1 /*
2 * hvm/io.c: hardware virtual machine I/O emulation
3 *
4 * Copyright (c) 2016 Citrix Systems Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include <xen/ctype.h>
20 #include <xen/init.h>
21 #include <xen/lib.h>
22 #include <xen/trace.h>
23 #include <xen/sched.h>
24 #include <xen/irq.h>
25 #include <xen/softirq.h>
26 #include <xen/domain.h>
27 #include <xen/event.h>
28 #include <xen/paging.h>
29 #include <xen/vpci.h>
30
31 #include <asm/hvm/emulate.h>
32 #include <asm/hvm/hvm.h>
33 #include <asm/hvm/ioreq.h>
34 #include <asm/hvm/vmx/vmx.h>
35
36 #include <public/hvm/ioreq.h>
37 #include <public/hvm/params.h>
38
set_ioreq_server(struct domain * d,unsigned int id,struct hvm_ioreq_server * s)39 static void set_ioreq_server(struct domain *d, unsigned int id,
40 struct hvm_ioreq_server *s)
41 {
42 ASSERT(id < MAX_NR_IOREQ_SERVERS);
43 ASSERT(!s || !d->arch.hvm.ioreq_server.server[id]);
44
45 d->arch.hvm.ioreq_server.server[id] = s;
46 }
47
48 #define GET_IOREQ_SERVER(d, id) \
49 (d)->arch.hvm.ioreq_server.server[id]
50
get_ioreq_server(const struct domain * d,unsigned int id)51 static struct hvm_ioreq_server *get_ioreq_server(const struct domain *d,
52 unsigned int id)
53 {
54 if ( id >= MAX_NR_IOREQ_SERVERS )
55 return NULL;
56
57 return GET_IOREQ_SERVER(d, id);
58 }
59
60 /*
61 * Iterate over all possible ioreq servers.
62 *
63 * NOTE: The iteration is backwards such that more recently created
64 * ioreq servers are favoured in hvm_select_ioreq_server().
65 * This is a semantic that previously existed when ioreq servers
66 * were held in a linked list.
67 */
68 #define FOR_EACH_IOREQ_SERVER(d, id, s) \
69 for ( (id) = MAX_NR_IOREQ_SERVERS; (id) != 0; ) \
70 if ( !(s = GET_IOREQ_SERVER(d, --(id))) ) \
71 continue; \
72 else
73
get_ioreq(struct hvm_ioreq_server * s,struct vcpu * v)74 static ioreq_t *get_ioreq(struct hvm_ioreq_server *s, struct vcpu *v)
75 {
76 shared_iopage_t *p = s->ioreq.va;
77
78 ASSERT((v == current) || !vcpu_runnable(v));
79 ASSERT(p != NULL);
80
81 return &p->vcpu_ioreq[v->vcpu_id];
82 }
83
hvm_io_pending(struct vcpu * v)84 bool hvm_io_pending(struct vcpu *v)
85 {
86 struct domain *d = v->domain;
87 struct hvm_ioreq_server *s;
88 unsigned int id;
89
90 FOR_EACH_IOREQ_SERVER(d, id, s)
91 {
92 struct hvm_ioreq_vcpu *sv;
93
94 list_for_each_entry ( sv,
95 &s->ioreq_vcpu_list,
96 list_entry )
97 {
98 if ( sv->vcpu == v && sv->pending )
99 return true;
100 }
101 }
102
103 return false;
104 }
105
hvm_io_assist(struct hvm_ioreq_vcpu * sv,uint64_t data)106 static void hvm_io_assist(struct hvm_ioreq_vcpu *sv, uint64_t data)
107 {
108 struct vcpu *v = sv->vcpu;
109 ioreq_t *ioreq = &v->arch.hvm.hvm_io.io_req;
110
111 if ( hvm_ioreq_needs_completion(ioreq) )
112 ioreq->data = data;
113
114 sv->pending = false;
115 }
116
hvm_wait_for_io(struct hvm_ioreq_vcpu * sv,ioreq_t * p)117 static bool hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
118 {
119 unsigned int prev_state = STATE_IOREQ_NONE;
120
121 while ( sv->pending )
122 {
123 unsigned int state = p->state;
124
125 smp_rmb();
126
127 recheck:
128 if ( unlikely(state == STATE_IOREQ_NONE) )
129 {
130 /*
131 * The only reason we should see this case is when an
132 * emulator is dying and it races with an I/O being
133 * requested.
134 */
135 hvm_io_assist(sv, ~0ul);
136 break;
137 }
138
139 if ( unlikely(state < prev_state) )
140 {
141 gdprintk(XENLOG_ERR, "Weird HVM ioreq state transition %u -> %u\n",
142 prev_state, state);
143 sv->pending = false;
144 domain_crash(sv->vcpu->domain);
145 return false; /* bail */
146 }
147
148 switch ( prev_state = state )
149 {
150 case STATE_IORESP_READY: /* IORESP_READY -> NONE */
151 p->state = STATE_IOREQ_NONE;
152 hvm_io_assist(sv, p->data);
153 break;
154 case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
155 case STATE_IOREQ_INPROCESS:
156 wait_on_xen_event_channel(sv->ioreq_evtchn,
157 ({ state = p->state;
158 smp_rmb();
159 state != prev_state; }));
160 goto recheck;
161 default:
162 gdprintk(XENLOG_ERR, "Weird HVM iorequest state %u\n", state);
163 sv->pending = false;
164 domain_crash(sv->vcpu->domain);
165 return false; /* bail */
166 }
167 }
168
169 return true;
170 }
171
handle_hvm_io_completion(struct vcpu * v)172 bool handle_hvm_io_completion(struct vcpu *v)
173 {
174 struct domain *d = v->domain;
175 struct hvm_vcpu_io *vio = &v->arch.hvm.hvm_io;
176 struct hvm_ioreq_server *s;
177 enum hvm_io_completion io_completion;
178 unsigned int id;
179
180 if ( has_vpci(d) && vpci_process_pending(v) )
181 {
182 raise_softirq(SCHEDULE_SOFTIRQ);
183 return false;
184 }
185
186 FOR_EACH_IOREQ_SERVER(d, id, s)
187 {
188 struct hvm_ioreq_vcpu *sv;
189
190 list_for_each_entry ( sv,
191 &s->ioreq_vcpu_list,
192 list_entry )
193 {
194 if ( sv->vcpu == v && sv->pending )
195 {
196 if ( !hvm_wait_for_io(sv, get_ioreq(s, v)) )
197 return false;
198
199 break;
200 }
201 }
202 }
203
204 vio->io_req.state = hvm_ioreq_needs_completion(&vio->io_req) ?
205 STATE_IORESP_READY : STATE_IOREQ_NONE;
206
207 msix_write_completion(v);
208 vcpu_end_shutdown_deferral(v);
209
210 io_completion = vio->io_completion;
211 vio->io_completion = HVMIO_no_completion;
212
213 switch ( io_completion )
214 {
215 case HVMIO_no_completion:
216 break;
217
218 case HVMIO_mmio_completion:
219 return handle_mmio();
220
221 case HVMIO_pio_completion:
222 return handle_pio(vio->io_req.addr, vio->io_req.size,
223 vio->io_req.dir);
224
225 case HVMIO_realmode_completion:
226 {
227 struct hvm_emulate_ctxt ctxt;
228
229 hvm_emulate_init_once(&ctxt, NULL, guest_cpu_user_regs());
230 vmx_realmode_emulate_one(&ctxt);
231 hvm_emulate_writeback(&ctxt);
232
233 break;
234 }
235 default:
236 ASSERT_UNREACHABLE();
237 break;
238 }
239
240 return true;
241 }
242
hvm_alloc_legacy_ioreq_gfn(struct hvm_ioreq_server * s)243 static gfn_t hvm_alloc_legacy_ioreq_gfn(struct hvm_ioreq_server *s)
244 {
245 struct domain *d = s->target;
246 unsigned int i;
247
248 BUILD_BUG_ON(HVM_PARAM_BUFIOREQ_PFN != HVM_PARAM_IOREQ_PFN + 1);
249
250 for ( i = HVM_PARAM_IOREQ_PFN; i <= HVM_PARAM_BUFIOREQ_PFN; i++ )
251 {
252 if ( !test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask) )
253 return _gfn(d->arch.hvm.params[i]);
254 }
255
256 return INVALID_GFN;
257 }
258
hvm_alloc_ioreq_gfn(struct hvm_ioreq_server * s)259 static gfn_t hvm_alloc_ioreq_gfn(struct hvm_ioreq_server *s)
260 {
261 struct domain *d = s->target;
262 unsigned int i;
263
264 for ( i = 0; i < sizeof(d->arch.hvm.ioreq_gfn.mask) * 8; i++ )
265 {
266 if ( test_and_clear_bit(i, &d->arch.hvm.ioreq_gfn.mask) )
267 return _gfn(d->arch.hvm.ioreq_gfn.base + i);
268 }
269
270 /*
271 * If we are out of 'normal' GFNs then we may still have a 'legacy'
272 * GFN available.
273 */
274 return hvm_alloc_legacy_ioreq_gfn(s);
275 }
276
hvm_free_legacy_ioreq_gfn(struct hvm_ioreq_server * s,gfn_t gfn)277 static bool hvm_free_legacy_ioreq_gfn(struct hvm_ioreq_server *s,
278 gfn_t gfn)
279 {
280 struct domain *d = s->target;
281 unsigned int i;
282
283 for ( i = HVM_PARAM_IOREQ_PFN; i <= HVM_PARAM_BUFIOREQ_PFN; i++ )
284 {
285 if ( gfn_eq(gfn, _gfn(d->arch.hvm.params[i])) )
286 break;
287 }
288 if ( i > HVM_PARAM_BUFIOREQ_PFN )
289 return false;
290
291 set_bit(i, &d->arch.hvm.ioreq_gfn.legacy_mask);
292 return true;
293 }
294
hvm_free_ioreq_gfn(struct hvm_ioreq_server * s,gfn_t gfn)295 static void hvm_free_ioreq_gfn(struct hvm_ioreq_server *s, gfn_t gfn)
296 {
297 struct domain *d = s->target;
298 unsigned int i = gfn_x(gfn) - d->arch.hvm.ioreq_gfn.base;
299
300 ASSERT(!gfn_eq(gfn, INVALID_GFN));
301
302 if ( !hvm_free_legacy_ioreq_gfn(s, gfn) )
303 {
304 ASSERT(i < sizeof(d->arch.hvm.ioreq_gfn.mask) * 8);
305 set_bit(i, &d->arch.hvm.ioreq_gfn.mask);
306 }
307 }
308
hvm_unmap_ioreq_gfn(struct hvm_ioreq_server * s,bool buf)309 static void hvm_unmap_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
310 {
311 struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
312
313 if ( gfn_eq(iorp->gfn, INVALID_GFN) )
314 return;
315
316 destroy_ring_for_helper(&iorp->va, iorp->page);
317 iorp->page = NULL;
318
319 hvm_free_ioreq_gfn(s, iorp->gfn);
320 iorp->gfn = INVALID_GFN;
321 }
322
hvm_map_ioreq_gfn(struct hvm_ioreq_server * s,bool buf)323 static int hvm_map_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
324 {
325 struct domain *d = s->target;
326 struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
327 int rc;
328
329 if ( iorp->page )
330 {
331 /*
332 * If a page has already been allocated (which will happen on
333 * demand if hvm_get_ioreq_server_frame() is called), then
334 * mapping a guest frame is not permitted.
335 */
336 if ( gfn_eq(iorp->gfn, INVALID_GFN) )
337 return -EPERM;
338
339 return 0;
340 }
341
342 if ( d->is_dying )
343 return -EINVAL;
344
345 iorp->gfn = hvm_alloc_ioreq_gfn(s);
346
347 if ( gfn_eq(iorp->gfn, INVALID_GFN) )
348 return -ENOMEM;
349
350 rc = prepare_ring_for_helper(d, gfn_x(iorp->gfn), &iorp->page,
351 &iorp->va);
352
353 if ( rc )
354 hvm_unmap_ioreq_gfn(s, buf);
355
356 return rc;
357 }
358
hvm_alloc_ioreq_mfn(struct hvm_ioreq_server * s,bool buf)359 static int hvm_alloc_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
360 {
361 struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
362 struct page_info *page;
363
364 if ( iorp->page )
365 {
366 /*
367 * If a guest frame has already been mapped (which may happen
368 * on demand if hvm_get_ioreq_server_info() is called), then
369 * allocating a page is not permitted.
370 */
371 if ( !gfn_eq(iorp->gfn, INVALID_GFN) )
372 return -EPERM;
373
374 return 0;
375 }
376
377 page = alloc_domheap_page(s->target, MEMF_no_refcount);
378
379 if ( !page )
380 return -ENOMEM;
381
382 if ( !get_page_and_type(page, s->target, PGT_writable_page) )
383 {
384 /*
385 * The domain can't possibly know about this page yet, so failure
386 * here is a clear indication of something fishy going on.
387 */
388 domain_crash(s->emulator);
389 return -ENODATA;
390 }
391
392 iorp->va = __map_domain_page_global(page);
393 if ( !iorp->va )
394 goto fail;
395
396 iorp->page = page;
397 clear_page(iorp->va);
398 return 0;
399
400 fail:
401 put_page_alloc_ref(page);
402 put_page_and_type(page);
403
404 return -ENOMEM;
405 }
406
hvm_free_ioreq_mfn(struct hvm_ioreq_server * s,bool buf)407 static void hvm_free_ioreq_mfn(struct hvm_ioreq_server *s, bool buf)
408 {
409 struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
410 struct page_info *page = iorp->page;
411
412 if ( !page )
413 return;
414
415 iorp->page = NULL;
416
417 unmap_domain_page_global(iorp->va);
418 iorp->va = NULL;
419
420 put_page_alloc_ref(page);
421 put_page_and_type(page);
422 }
423
is_ioreq_server_page(struct domain * d,const struct page_info * page)424 bool is_ioreq_server_page(struct domain *d, const struct page_info *page)
425 {
426 const struct hvm_ioreq_server *s;
427 unsigned int id;
428 bool found = false;
429
430 spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
431
432 FOR_EACH_IOREQ_SERVER(d, id, s)
433 {
434 if ( (s->ioreq.page == page) || (s->bufioreq.page == page) )
435 {
436 found = true;
437 break;
438 }
439 }
440
441 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
442
443 return found;
444 }
445
hvm_remove_ioreq_gfn(struct hvm_ioreq_server * s,bool buf)446 static void hvm_remove_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
447
448 {
449 struct domain *d = s->target;
450 struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
451
452 if ( gfn_eq(iorp->gfn, INVALID_GFN) )
453 return;
454
455 if ( guest_physmap_remove_page(d, iorp->gfn,
456 page_to_mfn(iorp->page), 0) )
457 domain_crash(d);
458 clear_page(iorp->va);
459 }
460
hvm_add_ioreq_gfn(struct hvm_ioreq_server * s,bool buf)461 static int hvm_add_ioreq_gfn(struct hvm_ioreq_server *s, bool buf)
462 {
463 struct domain *d = s->target;
464 struct hvm_ioreq_page *iorp = buf ? &s->bufioreq : &s->ioreq;
465 int rc;
466
467 if ( gfn_eq(iorp->gfn, INVALID_GFN) )
468 return 0;
469
470 clear_page(iorp->va);
471
472 rc = guest_physmap_add_page(d, iorp->gfn,
473 page_to_mfn(iorp->page), 0);
474 if ( rc == 0 )
475 paging_mark_pfn_dirty(d, _pfn(gfn_x(iorp->gfn)));
476
477 return rc;
478 }
479
hvm_update_ioreq_evtchn(struct hvm_ioreq_server * s,struct hvm_ioreq_vcpu * sv)480 static void hvm_update_ioreq_evtchn(struct hvm_ioreq_server *s,
481 struct hvm_ioreq_vcpu *sv)
482 {
483 ASSERT(spin_is_locked(&s->lock));
484
485 if ( s->ioreq.va != NULL )
486 {
487 ioreq_t *p = get_ioreq(s, sv->vcpu);
488
489 p->vp_eport = sv->ioreq_evtchn;
490 }
491 }
492
493 #define HANDLE_BUFIOREQ(s) \
494 ((s)->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF)
495
hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server * s,struct vcpu * v)496 static int hvm_ioreq_server_add_vcpu(struct hvm_ioreq_server *s,
497 struct vcpu *v)
498 {
499 struct hvm_ioreq_vcpu *sv;
500 int rc;
501
502 sv = xzalloc(struct hvm_ioreq_vcpu);
503
504 rc = -ENOMEM;
505 if ( !sv )
506 goto fail1;
507
508 spin_lock(&s->lock);
509
510 rc = alloc_unbound_xen_event_channel(v->domain, v->vcpu_id,
511 s->emulator->domain_id, NULL);
512 if ( rc < 0 )
513 goto fail2;
514
515 sv->ioreq_evtchn = rc;
516
517 if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
518 {
519 rc = alloc_unbound_xen_event_channel(v->domain, 0,
520 s->emulator->domain_id, NULL);
521 if ( rc < 0 )
522 goto fail3;
523
524 s->bufioreq_evtchn = rc;
525 }
526
527 sv->vcpu = v;
528
529 list_add(&sv->list_entry, &s->ioreq_vcpu_list);
530
531 if ( s->enabled )
532 hvm_update_ioreq_evtchn(s, sv);
533
534 spin_unlock(&s->lock);
535 return 0;
536
537 fail3:
538 free_xen_event_channel(v->domain, sv->ioreq_evtchn);
539
540 fail2:
541 spin_unlock(&s->lock);
542 xfree(sv);
543
544 fail1:
545 return rc;
546 }
547
hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server * s,struct vcpu * v)548 static void hvm_ioreq_server_remove_vcpu(struct hvm_ioreq_server *s,
549 struct vcpu *v)
550 {
551 struct hvm_ioreq_vcpu *sv;
552
553 spin_lock(&s->lock);
554
555 list_for_each_entry ( sv,
556 &s->ioreq_vcpu_list,
557 list_entry )
558 {
559 if ( sv->vcpu != v )
560 continue;
561
562 list_del(&sv->list_entry);
563
564 if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
565 free_xen_event_channel(v->domain, s->bufioreq_evtchn);
566
567 free_xen_event_channel(v->domain, sv->ioreq_evtchn);
568
569 xfree(sv);
570 break;
571 }
572
573 spin_unlock(&s->lock);
574 }
575
hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server * s)576 static void hvm_ioreq_server_remove_all_vcpus(struct hvm_ioreq_server *s)
577 {
578 struct hvm_ioreq_vcpu *sv, *next;
579
580 spin_lock(&s->lock);
581
582 list_for_each_entry_safe ( sv,
583 next,
584 &s->ioreq_vcpu_list,
585 list_entry )
586 {
587 struct vcpu *v = sv->vcpu;
588
589 list_del(&sv->list_entry);
590
591 if ( v->vcpu_id == 0 && HANDLE_BUFIOREQ(s) )
592 free_xen_event_channel(v->domain, s->bufioreq_evtchn);
593
594 free_xen_event_channel(v->domain, sv->ioreq_evtchn);
595
596 xfree(sv);
597 }
598
599 spin_unlock(&s->lock);
600 }
601
hvm_ioreq_server_map_pages(struct hvm_ioreq_server * s)602 static int hvm_ioreq_server_map_pages(struct hvm_ioreq_server *s)
603 {
604 int rc;
605
606 rc = hvm_map_ioreq_gfn(s, false);
607
608 if ( !rc && HANDLE_BUFIOREQ(s) )
609 rc = hvm_map_ioreq_gfn(s, true);
610
611 if ( rc )
612 hvm_unmap_ioreq_gfn(s, false);
613
614 return rc;
615 }
616
hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server * s)617 static void hvm_ioreq_server_unmap_pages(struct hvm_ioreq_server *s)
618 {
619 hvm_unmap_ioreq_gfn(s, true);
620 hvm_unmap_ioreq_gfn(s, false);
621 }
622
hvm_ioreq_server_alloc_pages(struct hvm_ioreq_server * s)623 static int hvm_ioreq_server_alloc_pages(struct hvm_ioreq_server *s)
624 {
625 int rc;
626
627 rc = hvm_alloc_ioreq_mfn(s, false);
628
629 if ( !rc && (s->bufioreq_handling != HVM_IOREQSRV_BUFIOREQ_OFF) )
630 rc = hvm_alloc_ioreq_mfn(s, true);
631
632 if ( rc )
633 hvm_free_ioreq_mfn(s, false);
634
635 return rc;
636 }
637
hvm_ioreq_server_free_pages(struct hvm_ioreq_server * s)638 static void hvm_ioreq_server_free_pages(struct hvm_ioreq_server *s)
639 {
640 hvm_free_ioreq_mfn(s, true);
641 hvm_free_ioreq_mfn(s, false);
642 }
643
hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server * s)644 static void hvm_ioreq_server_free_rangesets(struct hvm_ioreq_server *s)
645 {
646 unsigned int i;
647
648 for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
649 rangeset_destroy(s->range[i]);
650 }
651
hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server * s,ioservid_t id)652 static int hvm_ioreq_server_alloc_rangesets(struct hvm_ioreq_server *s,
653 ioservid_t id)
654 {
655 unsigned int i;
656 int rc;
657
658 for ( i = 0; i < NR_IO_RANGE_TYPES; i++ )
659 {
660 char *name;
661
662 rc = asprintf(&name, "ioreq_server %d %s", id,
663 (i == XEN_DMOP_IO_RANGE_PORT) ? "port" :
664 (i == XEN_DMOP_IO_RANGE_MEMORY) ? "memory" :
665 (i == XEN_DMOP_IO_RANGE_PCI) ? "pci" :
666 "");
667 if ( rc )
668 goto fail;
669
670 s->range[i] = rangeset_new(s->target, name,
671 RANGESETF_prettyprint_hex);
672
673 xfree(name);
674
675 rc = -ENOMEM;
676 if ( !s->range[i] )
677 goto fail;
678
679 rangeset_limit(s->range[i], MAX_NR_IO_RANGES);
680 }
681
682 return 0;
683
684 fail:
685 hvm_ioreq_server_free_rangesets(s);
686
687 return rc;
688 }
689
hvm_ioreq_server_enable(struct hvm_ioreq_server * s)690 static void hvm_ioreq_server_enable(struct hvm_ioreq_server *s)
691 {
692 struct hvm_ioreq_vcpu *sv;
693
694 spin_lock(&s->lock);
695
696 if ( s->enabled )
697 goto done;
698
699 hvm_remove_ioreq_gfn(s, false);
700 hvm_remove_ioreq_gfn(s, true);
701
702 s->enabled = true;
703
704 list_for_each_entry ( sv,
705 &s->ioreq_vcpu_list,
706 list_entry )
707 hvm_update_ioreq_evtchn(s, sv);
708
709 done:
710 spin_unlock(&s->lock);
711 }
712
hvm_ioreq_server_disable(struct hvm_ioreq_server * s)713 static void hvm_ioreq_server_disable(struct hvm_ioreq_server *s)
714 {
715 spin_lock(&s->lock);
716
717 if ( !s->enabled )
718 goto done;
719
720 hvm_add_ioreq_gfn(s, true);
721 hvm_add_ioreq_gfn(s, false);
722
723 s->enabled = false;
724
725 done:
726 spin_unlock(&s->lock);
727 }
728
hvm_ioreq_server_init(struct hvm_ioreq_server * s,struct domain * d,int bufioreq_handling,ioservid_t id)729 static int hvm_ioreq_server_init(struct hvm_ioreq_server *s,
730 struct domain *d, int bufioreq_handling,
731 ioservid_t id)
732 {
733 struct domain *currd = current->domain;
734 struct vcpu *v;
735 int rc;
736
737 s->target = d;
738
739 get_knownalive_domain(currd);
740 s->emulator = currd;
741
742 spin_lock_init(&s->lock);
743 INIT_LIST_HEAD(&s->ioreq_vcpu_list);
744 spin_lock_init(&s->bufioreq_lock);
745
746 s->ioreq.gfn = INVALID_GFN;
747 s->bufioreq.gfn = INVALID_GFN;
748
749 rc = hvm_ioreq_server_alloc_rangesets(s, id);
750 if ( rc )
751 return rc;
752
753 s->bufioreq_handling = bufioreq_handling;
754
755 for_each_vcpu ( d, v )
756 {
757 rc = hvm_ioreq_server_add_vcpu(s, v);
758 if ( rc )
759 goto fail_add;
760 }
761
762 return 0;
763
764 fail_add:
765 hvm_ioreq_server_remove_all_vcpus(s);
766 hvm_ioreq_server_unmap_pages(s);
767
768 hvm_ioreq_server_free_rangesets(s);
769
770 put_domain(s->emulator);
771 return rc;
772 }
773
hvm_ioreq_server_deinit(struct hvm_ioreq_server * s)774 static void hvm_ioreq_server_deinit(struct hvm_ioreq_server *s)
775 {
776 ASSERT(!s->enabled);
777 hvm_ioreq_server_remove_all_vcpus(s);
778
779 /*
780 * NOTE: It is safe to call both hvm_ioreq_server_unmap_pages() and
781 * hvm_ioreq_server_free_pages() in that order.
782 * This is because the former will do nothing if the pages
783 * are not mapped, leaving the page to be freed by the latter.
784 * However if the pages are mapped then the former will set
785 * the page_info pointer to NULL, meaning the latter will do
786 * nothing.
787 */
788 hvm_ioreq_server_unmap_pages(s);
789 hvm_ioreq_server_free_pages(s);
790
791 hvm_ioreq_server_free_rangesets(s);
792
793 put_domain(s->emulator);
794 }
795
hvm_create_ioreq_server(struct domain * d,int bufioreq_handling,ioservid_t * id)796 int hvm_create_ioreq_server(struct domain *d, int bufioreq_handling,
797 ioservid_t *id)
798 {
799 struct hvm_ioreq_server *s;
800 unsigned int i;
801 int rc;
802
803 if ( bufioreq_handling > HVM_IOREQSRV_BUFIOREQ_ATOMIC )
804 return -EINVAL;
805
806 s = xzalloc(struct hvm_ioreq_server);
807 if ( !s )
808 return -ENOMEM;
809
810 domain_pause(d);
811 spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
812
813 for ( i = 0; i < MAX_NR_IOREQ_SERVERS; i++ )
814 {
815 if ( !GET_IOREQ_SERVER(d, i) )
816 break;
817 }
818
819 rc = -ENOSPC;
820 if ( i >= MAX_NR_IOREQ_SERVERS )
821 goto fail;
822
823 /*
824 * It is safe to call set_ioreq_server() prior to
825 * hvm_ioreq_server_init() since the target domain is paused.
826 */
827 set_ioreq_server(d, i, s);
828
829 rc = hvm_ioreq_server_init(s, d, bufioreq_handling, i);
830 if ( rc )
831 {
832 set_ioreq_server(d, i, NULL);
833 goto fail;
834 }
835
836 if ( id )
837 *id = i;
838
839 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
840 domain_unpause(d);
841
842 return 0;
843
844 fail:
845 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
846 domain_unpause(d);
847
848 xfree(s);
849 return rc;
850 }
851
hvm_destroy_ioreq_server(struct domain * d,ioservid_t id)852 int hvm_destroy_ioreq_server(struct domain *d, ioservid_t id)
853 {
854 struct hvm_ioreq_server *s;
855 int rc;
856
857 spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
858
859 s = get_ioreq_server(d, id);
860
861 rc = -ENOENT;
862 if ( !s )
863 goto out;
864
865 rc = -EPERM;
866 if ( s->emulator != current->domain )
867 goto out;
868
869 domain_pause(d);
870
871 p2m_set_ioreq_server(d, 0, s);
872
873 hvm_ioreq_server_disable(s);
874
875 /*
876 * It is safe to call hvm_ioreq_server_deinit() prior to
877 * set_ioreq_server() since the target domain is paused.
878 */
879 hvm_ioreq_server_deinit(s);
880 set_ioreq_server(d, id, NULL);
881
882 domain_unpause(d);
883
884 xfree(s);
885
886 rc = 0;
887
888 out:
889 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
890
891 return rc;
892 }
893
hvm_get_ioreq_server_info(struct domain * d,ioservid_t id,unsigned long * ioreq_gfn,unsigned long * bufioreq_gfn,evtchn_port_t * bufioreq_port)894 int hvm_get_ioreq_server_info(struct domain *d, ioservid_t id,
895 unsigned long *ioreq_gfn,
896 unsigned long *bufioreq_gfn,
897 evtchn_port_t *bufioreq_port)
898 {
899 struct hvm_ioreq_server *s;
900 int rc;
901
902 spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
903
904 s = get_ioreq_server(d, id);
905
906 rc = -ENOENT;
907 if ( !s )
908 goto out;
909
910 rc = -EPERM;
911 if ( s->emulator != current->domain )
912 goto out;
913
914 if ( ioreq_gfn || bufioreq_gfn )
915 {
916 rc = hvm_ioreq_server_map_pages(s);
917 if ( rc )
918 goto out;
919 }
920
921 if ( ioreq_gfn )
922 *ioreq_gfn = gfn_x(s->ioreq.gfn);
923
924 if ( HANDLE_BUFIOREQ(s) )
925 {
926 if ( bufioreq_gfn )
927 *bufioreq_gfn = gfn_x(s->bufioreq.gfn);
928
929 if ( bufioreq_port )
930 *bufioreq_port = s->bufioreq_evtchn;
931 }
932
933 rc = 0;
934
935 out:
936 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
937
938 return rc;
939 }
940
hvm_get_ioreq_server_frame(struct domain * d,ioservid_t id,unsigned long idx,mfn_t * mfn)941 int hvm_get_ioreq_server_frame(struct domain *d, ioservid_t id,
942 unsigned long idx, mfn_t *mfn)
943 {
944 struct hvm_ioreq_server *s;
945 int rc;
946
947 ASSERT(is_hvm_domain(d));
948
949 spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
950
951 s = get_ioreq_server(d, id);
952
953 rc = -ENOENT;
954 if ( !s )
955 goto out;
956
957 rc = -EPERM;
958 if ( s->emulator != current->domain )
959 goto out;
960
961 rc = hvm_ioreq_server_alloc_pages(s);
962 if ( rc )
963 goto out;
964
965 switch ( idx )
966 {
967 case XENMEM_resource_ioreq_server_frame_bufioreq:
968 rc = -ENOENT;
969 if ( !HANDLE_BUFIOREQ(s) )
970 goto out;
971
972 *mfn = page_to_mfn(s->bufioreq.page);
973 rc = 0;
974 break;
975
976 case XENMEM_resource_ioreq_server_frame_ioreq(0):
977 *mfn = page_to_mfn(s->ioreq.page);
978 rc = 0;
979 break;
980
981 default:
982 rc = -EINVAL;
983 break;
984 }
985
986 out:
987 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
988
989 return rc;
990 }
991
hvm_map_io_range_to_ioreq_server(struct domain * d,ioservid_t id,uint32_t type,uint64_t start,uint64_t end)992 int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
993 uint32_t type, uint64_t start,
994 uint64_t end)
995 {
996 struct hvm_ioreq_server *s;
997 struct rangeset *r;
998 int rc;
999
1000 if ( start > end )
1001 return -EINVAL;
1002
1003 spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
1004
1005 s = get_ioreq_server(d, id);
1006
1007 rc = -ENOENT;
1008 if ( !s )
1009 goto out;
1010
1011 rc = -EPERM;
1012 if ( s->emulator != current->domain )
1013 goto out;
1014
1015 switch ( type )
1016 {
1017 case XEN_DMOP_IO_RANGE_PORT:
1018 case XEN_DMOP_IO_RANGE_MEMORY:
1019 case XEN_DMOP_IO_RANGE_PCI:
1020 r = s->range[type];
1021 break;
1022
1023 default:
1024 r = NULL;
1025 break;
1026 }
1027
1028 rc = -EINVAL;
1029 if ( !r )
1030 goto out;
1031
1032 rc = -EEXIST;
1033 if ( rangeset_overlaps_range(r, start, end) )
1034 goto out;
1035
1036 rc = rangeset_add_range(r, start, end);
1037
1038 out:
1039 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
1040
1041 return rc;
1042 }
1043
hvm_unmap_io_range_from_ioreq_server(struct domain * d,ioservid_t id,uint32_t type,uint64_t start,uint64_t end)1044 int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
1045 uint32_t type, uint64_t start,
1046 uint64_t end)
1047 {
1048 struct hvm_ioreq_server *s;
1049 struct rangeset *r;
1050 int rc;
1051
1052 if ( start > end )
1053 return -EINVAL;
1054
1055 spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
1056
1057 s = get_ioreq_server(d, id);
1058
1059 rc = -ENOENT;
1060 if ( !s )
1061 goto out;
1062
1063 rc = -EPERM;
1064 if ( s->emulator != current->domain )
1065 goto out;
1066
1067 switch ( type )
1068 {
1069 case XEN_DMOP_IO_RANGE_PORT:
1070 case XEN_DMOP_IO_RANGE_MEMORY:
1071 case XEN_DMOP_IO_RANGE_PCI:
1072 r = s->range[type];
1073 break;
1074
1075 default:
1076 r = NULL;
1077 break;
1078 }
1079
1080 rc = -EINVAL;
1081 if ( !r )
1082 goto out;
1083
1084 rc = -ENOENT;
1085 if ( !rangeset_contains_range(r, start, end) )
1086 goto out;
1087
1088 rc = rangeset_remove_range(r, start, end);
1089
1090 out:
1091 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
1092
1093 return rc;
1094 }
1095
1096 /*
1097 * Map or unmap an ioreq server to specific memory type. For now, only
1098 * HVMMEM_ioreq_server is supported, and in the future new types can be
1099 * introduced, e.g. HVMMEM_ioreq_serverX mapped to ioreq server X. And
1100 * currently, only write operations are to be forwarded to an ioreq server.
1101 * Support for the emulation of read operations can be added when an ioreq
1102 * server has such requirement in the future.
1103 */
hvm_map_mem_type_to_ioreq_server(struct domain * d,ioservid_t id,uint32_t type,uint32_t flags)1104 int hvm_map_mem_type_to_ioreq_server(struct domain *d, ioservid_t id,
1105 uint32_t type, uint32_t flags)
1106 {
1107 struct hvm_ioreq_server *s;
1108 int rc;
1109
1110 if ( type != HVMMEM_ioreq_server )
1111 return -EINVAL;
1112
1113 if ( flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE )
1114 return -EINVAL;
1115
1116 spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
1117
1118 s = get_ioreq_server(d, id);
1119
1120 rc = -ENOENT;
1121 if ( !s )
1122 goto out;
1123
1124 rc = -EPERM;
1125 if ( s->emulator != current->domain )
1126 goto out;
1127
1128 rc = p2m_set_ioreq_server(d, flags, s);
1129
1130 out:
1131 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
1132
1133 if ( rc == 0 && flags == 0 )
1134 {
1135 struct p2m_domain *p2m = p2m_get_hostp2m(d);
1136
1137 if ( read_atomic(&p2m->ioreq.entry_count) )
1138 p2m_change_entry_type_global(d, p2m_ioreq_server, p2m_ram_rw);
1139 }
1140
1141 return rc;
1142 }
1143
hvm_set_ioreq_server_state(struct domain * d,ioservid_t id,bool enabled)1144 int hvm_set_ioreq_server_state(struct domain *d, ioservid_t id,
1145 bool enabled)
1146 {
1147 struct hvm_ioreq_server *s;
1148 int rc;
1149
1150 spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
1151
1152 s = get_ioreq_server(d, id);
1153
1154 rc = -ENOENT;
1155 if ( !s )
1156 goto out;
1157
1158 rc = -EPERM;
1159 if ( s->emulator != current->domain )
1160 goto out;
1161
1162 domain_pause(d);
1163
1164 if ( enabled )
1165 hvm_ioreq_server_enable(s);
1166 else
1167 hvm_ioreq_server_disable(s);
1168
1169 domain_unpause(d);
1170
1171 rc = 0;
1172
1173 out:
1174 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
1175 return rc;
1176 }
1177
hvm_all_ioreq_servers_add_vcpu(struct domain * d,struct vcpu * v)1178 int hvm_all_ioreq_servers_add_vcpu(struct domain *d, struct vcpu *v)
1179 {
1180 struct hvm_ioreq_server *s;
1181 unsigned int id;
1182 int rc;
1183
1184 spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
1185
1186 FOR_EACH_IOREQ_SERVER(d, id, s)
1187 {
1188 rc = hvm_ioreq_server_add_vcpu(s, v);
1189 if ( rc )
1190 goto fail;
1191 }
1192
1193 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
1194
1195 return 0;
1196
1197 fail:
1198 while ( ++id != MAX_NR_IOREQ_SERVERS )
1199 {
1200 s = GET_IOREQ_SERVER(d, id);
1201
1202 if ( !s )
1203 continue;
1204
1205 hvm_ioreq_server_remove_vcpu(s, v);
1206 }
1207
1208 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
1209
1210 return rc;
1211 }
1212
hvm_all_ioreq_servers_remove_vcpu(struct domain * d,struct vcpu * v)1213 void hvm_all_ioreq_servers_remove_vcpu(struct domain *d, struct vcpu *v)
1214 {
1215 struct hvm_ioreq_server *s;
1216 unsigned int id;
1217
1218 spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
1219
1220 FOR_EACH_IOREQ_SERVER(d, id, s)
1221 hvm_ioreq_server_remove_vcpu(s, v);
1222
1223 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
1224 }
1225
hvm_destroy_all_ioreq_servers(struct domain * d)1226 void hvm_destroy_all_ioreq_servers(struct domain *d)
1227 {
1228 struct hvm_ioreq_server *s;
1229 unsigned int id;
1230
1231 if ( !relocate_portio_handler(d, 0xcf8, 0xcf8, 4) )
1232 return;
1233
1234 spin_lock_recursive(&d->arch.hvm.ioreq_server.lock);
1235
1236 /* No need to domain_pause() as the domain is being torn down */
1237
1238 FOR_EACH_IOREQ_SERVER(d, id, s)
1239 {
1240 hvm_ioreq_server_disable(s);
1241
1242 /*
1243 * It is safe to call hvm_ioreq_server_deinit() prior to
1244 * set_ioreq_server() since the target domain is being destroyed.
1245 */
1246 hvm_ioreq_server_deinit(s);
1247 set_ioreq_server(d, id, NULL);
1248
1249 xfree(s);
1250 }
1251
1252 spin_unlock_recursive(&d->arch.hvm.ioreq_server.lock);
1253 }
1254
hvm_select_ioreq_server(struct domain * d,ioreq_t * p)1255 struct hvm_ioreq_server *hvm_select_ioreq_server(struct domain *d,
1256 ioreq_t *p)
1257 {
1258 struct hvm_ioreq_server *s;
1259 uint32_t cf8;
1260 uint8_t type;
1261 uint64_t addr;
1262 unsigned int id;
1263
1264 if ( p->type != IOREQ_TYPE_COPY && p->type != IOREQ_TYPE_PIO )
1265 return NULL;
1266
1267 cf8 = d->arch.hvm.pci_cf8;
1268
1269 if ( p->type == IOREQ_TYPE_PIO &&
1270 (p->addr & ~3) == 0xcfc &&
1271 CF8_ENABLED(cf8) )
1272 {
1273 uint32_t x86_fam;
1274 pci_sbdf_t sbdf;
1275 unsigned int reg;
1276
1277 reg = hvm_pci_decode_addr(cf8, p->addr, &sbdf);
1278
1279 /* PCI config data cycle */
1280 type = XEN_DMOP_IO_RANGE_PCI;
1281 addr = ((uint64_t)sbdf.sbdf << 32) | reg;
1282 /* AMD extended configuration space access? */
1283 if ( CF8_ADDR_HI(cf8) &&
1284 d->arch.cpuid->x86_vendor == X86_VENDOR_AMD &&
1285 (x86_fam = get_cpu_family(
1286 d->arch.cpuid->basic.raw_fms, NULL, NULL)) >= 0x10 &&
1287 x86_fam < 0x17 )
1288 {
1289 uint64_t msr_val;
1290
1291 if ( !rdmsr_safe(MSR_AMD64_NB_CFG, msr_val) &&
1292 (msr_val & (1ULL << AMD64_NB_CFG_CF8_EXT_ENABLE_BIT)) )
1293 addr |= CF8_ADDR_HI(cf8);
1294 }
1295 }
1296 else
1297 {
1298 type = (p->type == IOREQ_TYPE_PIO) ?
1299 XEN_DMOP_IO_RANGE_PORT : XEN_DMOP_IO_RANGE_MEMORY;
1300 addr = p->addr;
1301 }
1302
1303 FOR_EACH_IOREQ_SERVER(d, id, s)
1304 {
1305 struct rangeset *r;
1306
1307 if ( !s->enabled )
1308 continue;
1309
1310 r = s->range[type];
1311
1312 switch ( type )
1313 {
1314 unsigned long start, end;
1315
1316 case XEN_DMOP_IO_RANGE_PORT:
1317 start = addr;
1318 end = start + p->size - 1;
1319 if ( rangeset_contains_range(r, start, end) )
1320 return s;
1321
1322 break;
1323
1324 case XEN_DMOP_IO_RANGE_MEMORY:
1325 start = hvm_mmio_first_byte(p);
1326 end = hvm_mmio_last_byte(p);
1327
1328 if ( rangeset_contains_range(r, start, end) )
1329 return s;
1330
1331 break;
1332
1333 case XEN_DMOP_IO_RANGE_PCI:
1334 if ( rangeset_contains_singleton(r, addr >> 32) )
1335 {
1336 p->type = IOREQ_TYPE_PCI_CONFIG;
1337 p->addr = addr;
1338 return s;
1339 }
1340
1341 break;
1342 }
1343 }
1344
1345 return NULL;
1346 }
1347
hvm_send_buffered_ioreq(struct hvm_ioreq_server * s,ioreq_t * p)1348 static int hvm_send_buffered_ioreq(struct hvm_ioreq_server *s, ioreq_t *p)
1349 {
1350 struct domain *d = current->domain;
1351 struct hvm_ioreq_page *iorp;
1352 buffered_iopage_t *pg;
1353 buf_ioreq_t bp = { .data = p->data,
1354 .addr = p->addr,
1355 .type = p->type,
1356 .dir = p->dir };
1357 /* Timeoffset sends 64b data, but no address. Use two consecutive slots. */
1358 int qw = 0;
1359
1360 /* Ensure buffered_iopage fits in a page */
1361 BUILD_BUG_ON(sizeof(buffered_iopage_t) > PAGE_SIZE);
1362
1363 iorp = &s->bufioreq;
1364 pg = iorp->va;
1365
1366 if ( !pg )
1367 return X86EMUL_UNHANDLEABLE;
1368
1369 /*
1370 * Return 0 for the cases we can't deal with:
1371 * - 'addr' is only a 20-bit field, so we cannot address beyond 1MB
1372 * - we cannot buffer accesses to guest memory buffers, as the guest
1373 * may expect the memory buffer to be synchronously accessed
1374 * - the count field is usually used with data_is_ptr and since we don't
1375 * support data_is_ptr we do not waste space for the count field either
1376 */
1377 if ( (p->addr > 0xffffful) || p->data_is_ptr || (p->count != 1) )
1378 return 0;
1379
1380 switch ( p->size )
1381 {
1382 case 1:
1383 bp.size = 0;
1384 break;
1385 case 2:
1386 bp.size = 1;
1387 break;
1388 case 4:
1389 bp.size = 2;
1390 break;
1391 case 8:
1392 bp.size = 3;
1393 qw = 1;
1394 break;
1395 default:
1396 gdprintk(XENLOG_WARNING, "unexpected ioreq size: %u\n", p->size);
1397 return X86EMUL_UNHANDLEABLE;
1398 }
1399
1400 spin_lock(&s->bufioreq_lock);
1401
1402 if ( (pg->ptrs.write_pointer - pg->ptrs.read_pointer) >=
1403 (IOREQ_BUFFER_SLOT_NUM - qw) )
1404 {
1405 /* The queue is full: send the iopacket through the normal path. */
1406 spin_unlock(&s->bufioreq_lock);
1407 return X86EMUL_UNHANDLEABLE;
1408 }
1409
1410 pg->buf_ioreq[pg->ptrs.write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp;
1411
1412 if ( qw )
1413 {
1414 bp.data = p->data >> 32;
1415 pg->buf_ioreq[(pg->ptrs.write_pointer+1) % IOREQ_BUFFER_SLOT_NUM] = bp;
1416 }
1417
1418 /* Make the ioreq_t visible /before/ write_pointer. */
1419 smp_wmb();
1420 pg->ptrs.write_pointer += qw ? 2 : 1;
1421
1422 /* Canonicalize read/write pointers to prevent their overflow. */
1423 while ( (s->bufioreq_handling == HVM_IOREQSRV_BUFIOREQ_ATOMIC) &&
1424 qw++ < IOREQ_BUFFER_SLOT_NUM &&
1425 pg->ptrs.read_pointer >= IOREQ_BUFFER_SLOT_NUM )
1426 {
1427 union bufioreq_pointers old = pg->ptrs, new;
1428 unsigned int n = old.read_pointer / IOREQ_BUFFER_SLOT_NUM;
1429
1430 new.read_pointer = old.read_pointer - n * IOREQ_BUFFER_SLOT_NUM;
1431 new.write_pointer = old.write_pointer - n * IOREQ_BUFFER_SLOT_NUM;
1432 cmpxchg(&pg->ptrs.full, old.full, new.full);
1433 }
1434
1435 notify_via_xen_event_channel(d, s->bufioreq_evtchn);
1436 spin_unlock(&s->bufioreq_lock);
1437
1438 return X86EMUL_OKAY;
1439 }
1440
hvm_send_ioreq(struct hvm_ioreq_server * s,ioreq_t * proto_p,bool buffered)1441 int hvm_send_ioreq(struct hvm_ioreq_server *s, ioreq_t *proto_p,
1442 bool buffered)
1443 {
1444 struct vcpu *curr = current;
1445 struct domain *d = curr->domain;
1446 struct hvm_ioreq_vcpu *sv;
1447
1448 ASSERT(s);
1449
1450 if ( buffered )
1451 return hvm_send_buffered_ioreq(s, proto_p);
1452
1453 if ( unlikely(!vcpu_start_shutdown_deferral(curr)) )
1454 return X86EMUL_RETRY;
1455
1456 list_for_each_entry ( sv,
1457 &s->ioreq_vcpu_list,
1458 list_entry )
1459 {
1460 if ( sv->vcpu == curr )
1461 {
1462 evtchn_port_t port = sv->ioreq_evtchn;
1463 ioreq_t *p = get_ioreq(s, curr);
1464
1465 if ( unlikely(p->state != STATE_IOREQ_NONE) )
1466 {
1467 gprintk(XENLOG_ERR, "device model set bad IO state %d\n",
1468 p->state);
1469 break;
1470 }
1471
1472 if ( unlikely(p->vp_eport != port) )
1473 {
1474 gprintk(XENLOG_ERR, "device model set bad event channel %d\n",
1475 p->vp_eport);
1476 break;
1477 }
1478
1479 proto_p->state = STATE_IOREQ_NONE;
1480 proto_p->vp_eport = port;
1481 *p = *proto_p;
1482
1483 prepare_wait_on_xen_event_channel(port);
1484
1485 /*
1486 * Following happens /after/ blocking and setting up ioreq
1487 * contents. prepare_wait_on_xen_event_channel() is an implicit
1488 * barrier.
1489 */
1490 p->state = STATE_IOREQ_READY;
1491 notify_via_xen_event_channel(d, port);
1492
1493 sv->pending = true;
1494 return X86EMUL_RETRY;
1495 }
1496 }
1497
1498 return X86EMUL_UNHANDLEABLE;
1499 }
1500
hvm_broadcast_ioreq(ioreq_t * p,bool buffered)1501 unsigned int hvm_broadcast_ioreq(ioreq_t *p, bool buffered)
1502 {
1503 struct domain *d = current->domain;
1504 struct hvm_ioreq_server *s;
1505 unsigned int id, failed = 0;
1506
1507 FOR_EACH_IOREQ_SERVER(d, id, s)
1508 {
1509 if ( !s->enabled )
1510 continue;
1511
1512 if ( hvm_send_ioreq(s, p, buffered) == X86EMUL_UNHANDLEABLE )
1513 failed++;
1514 }
1515
1516 return failed;
1517 }
1518
hvm_access_cf8(int dir,unsigned int port,unsigned int bytes,uint32_t * val)1519 static int hvm_access_cf8(
1520 int dir, unsigned int port, unsigned int bytes, uint32_t *val)
1521 {
1522 struct domain *d = current->domain;
1523
1524 if ( dir == IOREQ_WRITE && bytes == 4 )
1525 d->arch.hvm.pci_cf8 = *val;
1526
1527 /* We always need to fall through to the catch all emulator */
1528 return X86EMUL_UNHANDLEABLE;
1529 }
1530
hvm_ioreq_init(struct domain * d)1531 void hvm_ioreq_init(struct domain *d)
1532 {
1533 spin_lock_init(&d->arch.hvm.ioreq_server.lock);
1534
1535 register_portio_handler(d, 0xcf8, 4, hvm_access_cf8);
1536 }
1537
1538 /*
1539 * Local variables:
1540 * mode: C
1541 * c-file-style: "BSD"
1542 * c-basic-offset: 4
1543 * tab-width: 4
1544 * indent-tabs-mode: nil
1545 * End:
1546 */
1547