1 /*
2  * intercept.c: Handle performance critical I/O packets in hypervisor space
3  *
4  * Copyright (c) 2004, Intel Corporation.
5  * Copyright (c) 2008, Citrix Systems, Inc.
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  *
16  * You should have received a copy of the GNU General Public License along with
17  * this program; If not, see <http://www.gnu.org/licenses/>.
18  */
19 
20 #include <xen/types.h>
21 #include <xen/sched.h>
22 #include <asm/regs.h>
23 #include <asm/hvm/emulate.h>
24 #include <asm/hvm/hvm.h>
25 #include <asm/hvm/support.h>
26 #include <asm/hvm/domain.h>
27 #include <xen/lib.h>
28 #include <xen/sched.h>
29 #include <asm/current.h>
30 #include <io_ports.h>
31 #include <xen/event.h>
32 #include <xen/iommu.h>
33 
hvm_mmio_accept(const struct hvm_io_handler * handler,const ioreq_t * p)34 static bool_t hvm_mmio_accept(const struct hvm_io_handler *handler,
35                               const ioreq_t *p)
36 {
37     paddr_t first = hvm_mmio_first_byte(p), last;
38 
39     BUG_ON(handler->type != IOREQ_TYPE_COPY);
40 
41     if ( !handler->mmio.ops->check(current, first) )
42         return 0;
43 
44     /* Make sure the handler will accept the whole access. */
45     last = hvm_mmio_last_byte(p);
46     if ( last != first &&
47          !handler->mmio.ops->check(current, last) )
48         domain_crash(current->domain);
49 
50     return 1;
51 }
52 
hvm_mmio_read(const struct hvm_io_handler * handler,uint64_t addr,uint32_t size,uint64_t * data)53 static int hvm_mmio_read(const struct hvm_io_handler *handler,
54                          uint64_t addr, uint32_t size, uint64_t *data)
55 {
56     BUG_ON(handler->type != IOREQ_TYPE_COPY);
57 
58     return handler->mmio.ops->read(current, addr, size, data);
59 }
60 
hvm_mmio_write(const struct hvm_io_handler * handler,uint64_t addr,uint32_t size,uint64_t data)61 static int hvm_mmio_write(const struct hvm_io_handler *handler,
62                           uint64_t addr, uint32_t size, uint64_t data)
63 {
64     BUG_ON(handler->type != IOREQ_TYPE_COPY);
65 
66     return handler->mmio.ops->write(current, addr, size, data);
67 }
68 
69 static const struct hvm_io_ops mmio_ops = {
70     .accept = hvm_mmio_accept,
71     .read = hvm_mmio_read,
72     .write = hvm_mmio_write
73 };
74 
hvm_portio_accept(const struct hvm_io_handler * handler,const ioreq_t * p)75 static bool_t hvm_portio_accept(const struct hvm_io_handler *handler,
76                                 const ioreq_t *p)
77 {
78     unsigned int start = handler->portio.port;
79     unsigned int end = start + handler->portio.size;
80 
81     BUG_ON(handler->type != IOREQ_TYPE_PIO);
82 
83     return (p->addr >= start) && ((p->addr + p->size) <= end);
84 }
85 
hvm_portio_read(const struct hvm_io_handler * handler,uint64_t addr,uint32_t size,uint64_t * data)86 static int hvm_portio_read(const struct hvm_io_handler *handler,
87                            uint64_t addr, uint32_t size, uint64_t *data)
88 {
89     uint32_t val = ~0u;
90     int rc;
91 
92     BUG_ON(handler->type != IOREQ_TYPE_PIO);
93 
94     rc = handler->portio.action(IOREQ_READ, addr, size, &val);
95     *data = val;
96 
97     return rc;
98 }
99 
hvm_portio_write(const struct hvm_io_handler * handler,uint64_t addr,uint32_t size,uint64_t data)100 static int hvm_portio_write(const struct hvm_io_handler *handler,
101                             uint64_t addr, uint32_t size, uint64_t data)
102 {
103     uint32_t val = data;
104 
105     BUG_ON(handler->type != IOREQ_TYPE_PIO);
106 
107     return handler->portio.action(IOREQ_WRITE, addr, size, &val);
108 }
109 
110 static const struct hvm_io_ops portio_ops = {
111     .accept = hvm_portio_accept,
112     .read = hvm_portio_read,
113     .write = hvm_portio_write
114 };
115 
hvm_process_io_intercept(const struct hvm_io_handler * handler,ioreq_t * p)116 int hvm_process_io_intercept(const struct hvm_io_handler *handler,
117                              ioreq_t *p)
118 {
119     const struct hvm_io_ops *ops = handler->ops;
120     int rc = X86EMUL_OKAY, i, step = p->df ? -p->size : p->size;
121     uint64_t data;
122     uint64_t addr;
123 
124     if ( p->dir == IOREQ_READ )
125     {
126         for ( i = 0; i < p->count; i++ )
127         {
128             addr = (p->type == IOREQ_TYPE_COPY) ?
129                    p->addr + step * i :
130                    p->addr;
131             data = 0;
132             rc = ops->read(handler, addr, p->size, &data);
133             if ( rc != X86EMUL_OKAY )
134                 break;
135 
136             if ( p->data_is_ptr )
137             {
138                 switch ( hvm_copy_to_guest_phys(p->data + step * i,
139                                                 &data, p->size, current) )
140                 {
141                 case HVMTRANS_okay:
142                     break;
143                 case HVMTRANS_bad_gfn_to_mfn:
144                     /* Drop the write as real hardware would. */
145                     continue;
146                 case HVMTRANS_bad_linear_to_gfn:
147                 case HVMTRANS_gfn_paged_out:
148                 case HVMTRANS_gfn_shared:
149                 case HVMTRANS_need_retry:
150                     ASSERT_UNREACHABLE();
151                     /* fall through */
152                 default:
153                     domain_crash(current->domain);
154                     return X86EMUL_UNHANDLEABLE;
155                 }
156             }
157             else
158                 p->data = data;
159         }
160     }
161     else /* p->dir == IOREQ_WRITE */
162     {
163         for ( i = 0; i < p->count; i++ )
164         {
165             if ( p->data_is_ptr )
166             {
167                 struct vcpu *curr = current;
168                 unsigned int token = hvmemul_cache_disable(curr);
169 
170                 data = 0;
171                 switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
172                                                   p->size) )
173                 {
174                 case HVMTRANS_okay:
175                     break;
176                 case HVMTRANS_bad_gfn_to_mfn:
177                     data = ~0;
178                     break;
179                 case HVMTRANS_bad_linear_to_gfn:
180                 case HVMTRANS_gfn_paged_out:
181                 case HVMTRANS_gfn_shared:
182                 case HVMTRANS_need_retry:
183                     ASSERT_UNREACHABLE();
184                     /* fall through */
185                 default:
186                     domain_crash(curr->domain);
187                     return X86EMUL_UNHANDLEABLE;
188                 }
189 
190                 hvmemul_cache_restore(curr, token);
191             }
192             else
193                 data = p->data;
194 
195             addr = (p->type == IOREQ_TYPE_COPY) ?
196                    p->addr + step * i :
197                    p->addr;
198             rc = ops->write(handler, addr, p->size, data);
199             if ( rc != X86EMUL_OKAY )
200                 break;
201         }
202     }
203 
204     if ( i )
205     {
206         p->count = i;
207         rc = X86EMUL_OKAY;
208     }
209     else if ( rc == X86EMUL_UNHANDLEABLE )
210     {
211         /*
212          * Don't forward entire batches to the device model: This would
213          * prevent the internal handlers to see subsequent iterations of
214          * the request.
215          */
216         p->count = 1;
217     }
218 
219     return rc;
220 }
221 
hvm_find_io_handler(const ioreq_t * p)222 static const struct hvm_io_handler *hvm_find_io_handler(const ioreq_t *p)
223 {
224     struct domain *curr_d = current->domain;
225     unsigned int i;
226 
227     BUG_ON((p->type != IOREQ_TYPE_PIO) &&
228            (p->type != IOREQ_TYPE_COPY));
229 
230     for ( i = 0; i < curr_d->arch.hvm.io_handler_count; i++ )
231     {
232         const struct hvm_io_handler *handler =
233             &curr_d->arch.hvm.io_handler[i];
234         const struct hvm_io_ops *ops = handler->ops;
235 
236         if ( handler->type != p->type )
237             continue;
238 
239         if ( ops->accept(handler, p) )
240             return handler;
241     }
242 
243     return NULL;
244 }
245 
hvm_io_intercept(ioreq_t * p)246 int hvm_io_intercept(ioreq_t *p)
247 {
248     const struct hvm_io_handler *handler;
249     const struct hvm_io_ops *ops;
250     int rc;
251 
252     handler = hvm_find_io_handler(p);
253 
254     if ( handler == NULL )
255         return X86EMUL_UNHANDLEABLE;
256 
257     rc = hvm_process_io_intercept(handler, p);
258 
259     ops = handler->ops;
260     if ( ops->complete != NULL )
261         ops->complete(handler);
262 
263     return rc;
264 }
265 
hvm_next_io_handler(struct domain * d)266 struct hvm_io_handler *hvm_next_io_handler(struct domain *d)
267 {
268     unsigned int i = d->arch.hvm.io_handler_count++;
269 
270     ASSERT(d->arch.hvm.io_handler);
271 
272     if ( i == NR_IO_HANDLERS )
273     {
274         domain_crash(d);
275         return NULL;
276     }
277 
278     return &d->arch.hvm.io_handler[i];
279 }
280 
register_mmio_handler(struct domain * d,const struct hvm_mmio_ops * ops)281 void register_mmio_handler(struct domain *d,
282                            const struct hvm_mmio_ops *ops)
283 {
284     struct hvm_io_handler *handler = hvm_next_io_handler(d);
285 
286     if ( handler == NULL )
287         return;
288 
289     handler->type = IOREQ_TYPE_COPY;
290     handler->ops = &mmio_ops;
291     handler->mmio.ops = ops;
292 }
293 
register_portio_handler(struct domain * d,unsigned int port,unsigned int size,portio_action_t action)294 void register_portio_handler(struct domain *d, unsigned int port,
295                              unsigned int size, portio_action_t action)
296 {
297     struct hvm_io_handler *handler = hvm_next_io_handler(d);
298 
299     if ( handler == NULL )
300         return;
301 
302     handler->type = IOREQ_TYPE_PIO;
303     handler->ops = &portio_ops;
304     handler->portio.port = port;
305     handler->portio.size = size;
306     handler->portio.action = action;
307 }
308 
relocate_portio_handler(struct domain * d,unsigned int old_port,unsigned int new_port,unsigned int size)309 bool relocate_portio_handler(struct domain *d, unsigned int old_port,
310                              unsigned int new_port, unsigned int size)
311 {
312     unsigned int i;
313 
314     for ( i = 0; i < d->arch.hvm.io_handler_count; i++ )
315     {
316         struct hvm_io_handler *handler =
317             &d->arch.hvm.io_handler[i];
318 
319         if ( handler->type != IOREQ_TYPE_PIO )
320             continue;
321 
322         if ( (handler->portio.port == old_port) &&
323              (handler->portio.size = size) )
324         {
325             handler->portio.port = new_port;
326             return true;
327         }
328     }
329 
330     return false;
331 }
332 
hvm_mmio_internal(paddr_t gpa)333 bool_t hvm_mmio_internal(paddr_t gpa)
334 {
335     const struct hvm_io_handler *handler;
336     const struct hvm_io_ops *ops;
337     ioreq_t p = {
338         .type = IOREQ_TYPE_COPY,
339         .addr = gpa,
340         .count = 1,
341         .size = 1,
342     };
343 
344     handler = hvm_find_io_handler(&p);
345 
346     if ( handler == NULL )
347         return 0;
348 
349     ops = handler->ops;
350     if ( ops->complete != NULL )
351         ops->complete(handler);
352 
353     return 1;
354 }
355 
356 /*
357  * Local variables:
358  * mode: C
359  * c-file-style: "BSD"
360  * c-basic-offset: 4
361  * tab-width: 4
362  * indent-tabs-mode: nil
363  * End:
364  */
365