1 /*
2 * Copyright (c) 2017 Citrix Systems Inc.
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation;
7 * version 2.1 of the License.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <stdlib.h>
19 #include <string.h>
20 #include <errno.h>
21
22 #include "private.h"
23
all_restrict_cb(Xentoolcore__Active_Handle * ah,domid_t domid)24 static int all_restrict_cb(Xentoolcore__Active_Handle *ah, domid_t domid) {
25 xendevicemodel_handle *dmod = CONTAINER_OF(ah, *dmod, tc_ah);
26
27 if (dmod->fd < 0)
28 /* just in case */
29 return 0;
30
31 return xendevicemodel_restrict(dmod, domid);
32 }
33
xendevicemodel_open(xentoollog_logger * logger,unsigned open_flags)34 xendevicemodel_handle *xendevicemodel_open(xentoollog_logger *logger,
35 unsigned open_flags)
36 {
37 xendevicemodel_handle *dmod = calloc(1, sizeof(*dmod));
38 int rc;
39
40 if (!dmod)
41 return NULL;
42
43 dmod->fd = -1;
44 dmod->tc_ah.restrict_callback = all_restrict_cb;
45 xentoolcore__register_active_handle(&dmod->tc_ah);
46
47 dmod->flags = open_flags;
48 dmod->logger = logger;
49 dmod->logger_tofree = NULL;
50
51 if (!dmod->logger) {
52 dmod->logger = dmod->logger_tofree =
53 (xentoollog_logger*)
54 xtl_createlogger_stdiostream(stderr, XTL_PROGRESS, 0);
55 if (!dmod->logger)
56 goto err;
57 }
58
59 dmod->xcall = xencall_open(dmod->logger, 0);
60 if (!dmod->xcall)
61 goto err;
62
63 rc = osdep_xendevicemodel_open(dmod);
64 if (rc)
65 goto err;
66
67 return dmod;
68
69 err:
70 osdep_xendevicemodel_close(dmod);
71 xentoolcore__deregister_active_handle(&dmod->tc_ah);
72 xencall_close(dmod->xcall);
73 xtl_logger_destroy(dmod->logger_tofree);
74 free(dmod);
75 return NULL;
76 }
77
xendevicemodel_close(xendevicemodel_handle * dmod)78 int xendevicemodel_close(xendevicemodel_handle *dmod)
79 {
80 int rc;
81
82 if (!dmod)
83 return 0;
84
85 rc = osdep_xendevicemodel_close(dmod);
86
87 xentoolcore__deregister_active_handle(&dmod->tc_ah);
88 xencall_close(dmod->xcall);
89 xtl_logger_destroy(dmod->logger_tofree);
90 free(dmod);
91 return rc;
92 }
93
xendevicemodel_xcall(xendevicemodel_handle * dmod,domid_t domid,unsigned int nr_bufs,struct xendevicemodel_buf bufs[])94 int xendevicemodel_xcall(xendevicemodel_handle *dmod,
95 domid_t domid, unsigned int nr_bufs,
96 struct xendevicemodel_buf bufs[])
97 {
98 int ret = -1;
99 void **xcall_bufs;
100 xen_dm_op_buf_t *op_bufs = NULL;
101 unsigned int i;
102
103 xcall_bufs = calloc(nr_bufs, sizeof(*xcall_bufs));
104 if (xcall_bufs == NULL)
105 goto out;
106
107 op_bufs = xencall_alloc_buffer(dmod->xcall, sizeof(xen_dm_op_buf_t) *
108 nr_bufs);
109 if (op_bufs == NULL)
110 goto out;
111
112 for (i = 0; i < nr_bufs; i++) {
113 xcall_bufs[i] = xencall_alloc_buffer(dmod->xcall, bufs[i].size);
114 if ( xcall_bufs[i] == NULL )
115 goto out;
116
117 memcpy(xcall_bufs[i], bufs[i].ptr, bufs[i].size);
118 set_xen_guest_handle_raw(op_bufs[i].h, xcall_bufs[i]);
119
120 op_bufs[i].size = bufs[i].size;
121 }
122
123 ret = xencall3(dmod->xcall, __HYPERVISOR_dm_op,
124 domid, nr_bufs, (unsigned long)op_bufs);
125 if (ret < 0)
126 goto out;
127
128 for (i = 0; i < nr_bufs; i++)
129 memcpy(bufs[i].ptr, xcall_bufs[i], bufs[i].size);
130
131 out:
132 if (xcall_bufs)
133 for (i = 0; i < nr_bufs; i++)
134 xencall_free_buffer(dmod->xcall, xcall_bufs[i]);
135
136 xencall_free_buffer(dmod->xcall, op_bufs);
137 free(xcall_bufs);
138
139 return ret;
140 }
141
xendevicemodel_op(xendevicemodel_handle * dmod,domid_t domid,unsigned int nr_bufs,...)142 static int xendevicemodel_op(
143 xendevicemodel_handle *dmod, domid_t domid, unsigned int nr_bufs, ...)
144 {
145 struct xendevicemodel_buf *bufs;
146 va_list args;
147 unsigned int i;
148 int ret;
149
150 bufs = calloc(nr_bufs, sizeof(*bufs));
151 if (!bufs)
152 return -1;
153
154 va_start(args, nr_bufs);
155 for (i = 0; i < nr_bufs; i++) {
156 bufs[i].ptr = va_arg(args, void *);
157 bufs[i].size = va_arg(args, size_t);
158 }
159 va_end(args);
160
161 ret = osdep_xendevicemodel_op(dmod, domid, nr_bufs, bufs);
162
163 free(bufs);
164
165 return ret;
166 }
167
xendevicemodel_create_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,int handle_bufioreq,ioservid_t * id)168 int xendevicemodel_create_ioreq_server(
169 xendevicemodel_handle *dmod, domid_t domid, int handle_bufioreq,
170 ioservid_t *id)
171 {
172 struct xen_dm_op op;
173 struct xen_dm_op_create_ioreq_server *data;
174 int rc;
175
176 memset(&op, 0, sizeof(op));
177
178 op.op = XEN_DMOP_create_ioreq_server;
179 data = &op.u.create_ioreq_server;
180
181 data->handle_bufioreq = handle_bufioreq;
182
183 rc = xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
184 if (rc)
185 return rc;
186
187 *id = data->id;
188
189 return 0;
190 }
191
xendevicemodel_get_ioreq_server_info(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,xen_pfn_t * ioreq_gfn,xen_pfn_t * bufioreq_gfn,evtchn_port_t * bufioreq_port)192 int xendevicemodel_get_ioreq_server_info(
193 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
194 xen_pfn_t *ioreq_gfn, xen_pfn_t *bufioreq_gfn,
195 evtchn_port_t *bufioreq_port)
196 {
197 struct xen_dm_op op;
198 struct xen_dm_op_get_ioreq_server_info *data;
199 int rc;
200
201 memset(&op, 0, sizeof(op));
202
203 op.op = XEN_DMOP_get_ioreq_server_info;
204 data = &op.u.get_ioreq_server_info;
205
206 data->id = id;
207
208 /*
209 * If the caller is not requesting gfn values then instruct the
210 * hypercall not to retrieve them as this may cause them to be
211 * mapped.
212 */
213 if (!ioreq_gfn && !bufioreq_gfn)
214 data->flags |= XEN_DMOP_no_gfns;
215
216 rc = xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
217 if (rc)
218 return rc;
219
220 if (ioreq_gfn)
221 *ioreq_gfn = data->ioreq_gfn;
222
223 if (bufioreq_gfn)
224 *bufioreq_gfn = data->bufioreq_gfn;
225
226 if (bufioreq_port)
227 *bufioreq_port = data->bufioreq_port;
228
229 return 0;
230 }
231
xendevicemodel_map_io_range_to_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int is_mmio,uint64_t start,uint64_t end)232 int xendevicemodel_map_io_range_to_ioreq_server(
233 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
234 uint64_t start, uint64_t end)
235 {
236 struct xen_dm_op op;
237 struct xen_dm_op_ioreq_server_range *data;
238
239 memset(&op, 0, sizeof(op));
240
241 op.op = XEN_DMOP_map_io_range_to_ioreq_server;
242 data = &op.u.map_io_range_to_ioreq_server;
243
244 data->id = id;
245 data->type = is_mmio ? XEN_DMOP_IO_RANGE_MEMORY : XEN_DMOP_IO_RANGE_PORT;
246 data->start = start;
247 data->end = end;
248
249 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
250 }
251
xendevicemodel_unmap_io_range_from_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int is_mmio,uint64_t start,uint64_t end)252 int xendevicemodel_unmap_io_range_from_ioreq_server(
253 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int is_mmio,
254 uint64_t start, uint64_t end)
255 {
256 struct xen_dm_op op;
257 struct xen_dm_op_ioreq_server_range *data;
258
259 memset(&op, 0, sizeof(op));
260
261 op.op = XEN_DMOP_unmap_io_range_from_ioreq_server;
262 data = &op.u.unmap_io_range_from_ioreq_server;
263
264 data->id = id;
265 data->type = is_mmio ? XEN_DMOP_IO_RANGE_MEMORY : XEN_DMOP_IO_RANGE_PORT;
266 data->start = start;
267 data->end = end;
268
269 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
270 }
271
xendevicemodel_map_mem_type_to_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,uint16_t type,uint32_t flags)272 int xendevicemodel_map_mem_type_to_ioreq_server(
273 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, uint16_t type,
274 uint32_t flags)
275 {
276 struct xen_dm_op op;
277 struct xen_dm_op_map_mem_type_to_ioreq_server *data;
278
279 if (type != HVMMEM_ioreq_server ||
280 flags & ~XEN_DMOP_IOREQ_MEM_ACCESS_WRITE) {
281 errno = EINVAL;
282 return -1;
283 }
284
285 memset(&op, 0, sizeof(op));
286
287 op.op = XEN_DMOP_map_mem_type_to_ioreq_server;
288 data = &op.u.map_mem_type_to_ioreq_server;
289
290 data->id = id;
291 data->type = type;
292 data->flags = flags;
293
294 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
295 }
296
xendevicemodel_map_pcidev_to_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,uint16_t segment,uint8_t bus,uint8_t device,uint8_t function)297 int xendevicemodel_map_pcidev_to_ioreq_server(
298 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
299 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
300 {
301 struct xen_dm_op op;
302 struct xen_dm_op_ioreq_server_range *data;
303
304 if (device > 0x1f || function > 0x7) {
305 errno = EINVAL;
306 return -1;
307 }
308
309 memset(&op, 0, sizeof(op));
310
311 op.op = XEN_DMOP_map_io_range_to_ioreq_server;
312 data = &op.u.map_io_range_to_ioreq_server;
313
314 data->id = id;
315 data->type = XEN_DMOP_IO_RANGE_PCI;
316
317 /*
318 * The underlying hypercall will deal with ranges of PCI SBDF
319 * but, for simplicity, the API only uses singletons.
320 */
321 data->start = data->end = XEN_DMOP_PCI_SBDF((uint64_t)segment,
322 (uint64_t)bus,
323 (uint64_t)device,
324 (uint64_t)function);
325
326 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
327 }
328
xendevicemodel_unmap_pcidev_from_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,uint16_t segment,uint8_t bus,uint8_t device,uint8_t function)329 int xendevicemodel_unmap_pcidev_from_ioreq_server(
330 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id,
331 uint16_t segment, uint8_t bus, uint8_t device, uint8_t function)
332 {
333 struct xen_dm_op op;
334 struct xen_dm_op_ioreq_server_range *data;
335
336 if (device > 0x1f || function > 0x7) {
337 errno = EINVAL;
338 return -1;
339 }
340
341 memset(&op, 0, sizeof(op));
342
343 op.op = XEN_DMOP_unmap_io_range_from_ioreq_server;
344 data = &op.u.unmap_io_range_from_ioreq_server;
345
346 data->id = id;
347 data->type = XEN_DMOP_IO_RANGE_PCI;
348
349 /*
350 * The underlying hypercall will deal with ranges of PCI SBDF
351 * but, for simplicity, the API only uses singletons.
352 */
353 data->start = data->end = XEN_DMOP_PCI_SBDF((uint64_t)segment,
354 (uint64_t)bus,
355 (uint64_t)device,
356 (uint64_t)function);
357
358 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
359 }
360
xendevicemodel_destroy_ioreq_server(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id)361 int xendevicemodel_destroy_ioreq_server(
362 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id)
363 {
364 struct xen_dm_op op;
365 struct xen_dm_op_destroy_ioreq_server *data;
366
367 memset(&op, 0, sizeof(op));
368
369 op.op = XEN_DMOP_destroy_ioreq_server;
370 data = &op.u.destroy_ioreq_server;
371
372 data->id = id;
373
374 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
375 }
376
xendevicemodel_set_ioreq_server_state(xendevicemodel_handle * dmod,domid_t domid,ioservid_t id,int enabled)377 int xendevicemodel_set_ioreq_server_state(
378 xendevicemodel_handle *dmod, domid_t domid, ioservid_t id, int enabled)
379 {
380 struct xen_dm_op op;
381 struct xen_dm_op_set_ioreq_server_state *data;
382
383 memset(&op, 0, sizeof(op));
384
385 op.op = XEN_DMOP_set_ioreq_server_state;
386 data = &op.u.set_ioreq_server_state;
387
388 data->id = id;
389 data->enabled = !!enabled;
390
391 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
392 }
393
xendevicemodel_set_pci_intx_level(xendevicemodel_handle * dmod,domid_t domid,uint16_t segment,uint8_t bus,uint8_t device,uint8_t intx,unsigned int level)394 int xendevicemodel_set_pci_intx_level(
395 xendevicemodel_handle *dmod, domid_t domid, uint16_t segment,
396 uint8_t bus, uint8_t device, uint8_t intx, unsigned int level)
397 {
398 struct xen_dm_op op;
399 struct xen_dm_op_set_pci_intx_level *data;
400
401 memset(&op, 0, sizeof(op));
402
403 op.op = XEN_DMOP_set_pci_intx_level;
404 data = &op.u.set_pci_intx_level;
405
406 data->domain = segment;
407 data->bus = bus;
408 data->device = device;
409 data->intx = intx;
410 data->level = level;
411
412 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
413 }
414
xendevicemodel_set_isa_irq_level(xendevicemodel_handle * dmod,domid_t domid,uint8_t irq,unsigned int level)415 int xendevicemodel_set_isa_irq_level(
416 xendevicemodel_handle *dmod, domid_t domid, uint8_t irq,
417 unsigned int level)
418 {
419 struct xen_dm_op op;
420 struct xen_dm_op_set_isa_irq_level *data;
421
422 memset(&op, 0, sizeof(op));
423
424 op.op = XEN_DMOP_set_isa_irq_level;
425 data = &op.u.set_isa_irq_level;
426
427 data->isa_irq = irq;
428 data->level = level;
429
430 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
431 }
432
xendevicemodel_set_pci_link_route(xendevicemodel_handle * dmod,domid_t domid,uint8_t link,uint8_t irq)433 int xendevicemodel_set_pci_link_route(
434 xendevicemodel_handle *dmod, domid_t domid, uint8_t link, uint8_t irq)
435 {
436 struct xen_dm_op op;
437 struct xen_dm_op_set_pci_link_route *data;
438
439 memset(&op, 0, sizeof(op));
440
441 op.op = XEN_DMOP_set_pci_link_route;
442 data = &op.u.set_pci_link_route;
443
444 data->link = link;
445 data->isa_irq = irq;
446
447 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
448 }
449
xendevicemodel_inject_msi(xendevicemodel_handle * dmod,domid_t domid,uint64_t msi_addr,uint32_t msi_data)450 int xendevicemodel_inject_msi(
451 xendevicemodel_handle *dmod, domid_t domid, uint64_t msi_addr,
452 uint32_t msi_data)
453 {
454 struct xen_dm_op op;
455 struct xen_dm_op_inject_msi *data;
456
457 memset(&op, 0, sizeof(op));
458
459 op.op = XEN_DMOP_inject_msi;
460 data = &op.u.inject_msi;
461
462 data->addr = msi_addr;
463 data->data = msi_data;
464
465 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
466 }
467
xendevicemodel_track_dirty_vram(xendevicemodel_handle * dmod,domid_t domid,uint64_t first_pfn,uint32_t nr,unsigned long * dirty_bitmap)468 int xendevicemodel_track_dirty_vram(
469 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
470 uint32_t nr, unsigned long *dirty_bitmap)
471 {
472 struct xen_dm_op op;
473 struct xen_dm_op_track_dirty_vram *data;
474
475 memset(&op, 0, sizeof(op));
476
477 op.op = XEN_DMOP_track_dirty_vram;
478 data = &op.u.track_dirty_vram;
479
480 data->first_pfn = first_pfn;
481 data->nr = nr;
482
483 return xendevicemodel_op(dmod, domid, 2, &op, sizeof(op),
484 dirty_bitmap, (size_t)(nr + 7) / 8);
485 }
486
xendevicemodel_modified_memory_bulk(xendevicemodel_handle * dmod,domid_t domid,struct xen_dm_op_modified_memory_extent * extents,uint32_t nr)487 int xendevicemodel_modified_memory_bulk(
488 xendevicemodel_handle *dmod, domid_t domid,
489 struct xen_dm_op_modified_memory_extent *extents, uint32_t nr)
490 {
491 struct xen_dm_op op;
492 struct xen_dm_op_modified_memory *header;
493 size_t extents_size = nr * sizeof(struct xen_dm_op_modified_memory_extent);
494
495 memset(&op, 0, sizeof(op));
496
497 op.op = XEN_DMOP_modified_memory;
498 header = &op.u.modified_memory;
499
500 header->nr_extents = nr;
501 header->opaque = 0;
502
503 return xendevicemodel_op(dmod, domid, 2, &op, sizeof(op),
504 extents, extents_size);
505 }
506
xendevicemodel_modified_memory(xendevicemodel_handle * dmod,domid_t domid,uint64_t first_pfn,uint32_t nr)507 int xendevicemodel_modified_memory(
508 xendevicemodel_handle *dmod, domid_t domid, uint64_t first_pfn,
509 uint32_t nr)
510 {
511 struct xen_dm_op_modified_memory_extent extent = {
512 .first_pfn = first_pfn,
513 .nr = nr,
514 };
515
516 return xendevicemodel_modified_memory_bulk(dmod, domid, &extent, 1);
517 }
518
xendevicemodel_set_mem_type(xendevicemodel_handle * dmod,domid_t domid,hvmmem_type_t mem_type,uint64_t first_pfn,uint32_t nr)519 int xendevicemodel_set_mem_type(
520 xendevicemodel_handle *dmod, domid_t domid, hvmmem_type_t mem_type,
521 uint64_t first_pfn, uint32_t nr)
522 {
523 struct xen_dm_op op;
524 struct xen_dm_op_set_mem_type *data;
525
526 memset(&op, 0, sizeof(op));
527
528 op.op = XEN_DMOP_set_mem_type;
529 data = &op.u.set_mem_type;
530
531 data->mem_type = mem_type;
532 data->first_pfn = first_pfn;
533 data->nr = nr;
534
535 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
536 }
537
xendevicemodel_inject_event(xendevicemodel_handle * dmod,domid_t domid,int vcpu,uint8_t vector,uint8_t type,uint32_t error_code,uint8_t insn_len,uint64_t extra)538 int xendevicemodel_inject_event(
539 xendevicemodel_handle *dmod, domid_t domid, int vcpu, uint8_t vector,
540 uint8_t type, uint32_t error_code, uint8_t insn_len, uint64_t extra)
541 {
542 struct xen_dm_op op;
543 struct xen_dm_op_inject_event *data;
544
545 memset(&op, 0, sizeof(op));
546
547 op.op = XEN_DMOP_inject_event;
548 data = &op.u.inject_event;
549
550 data->vcpuid = vcpu;
551 data->vector = vector;
552 data->type = type;
553 data->error_code = error_code;
554 data->insn_len = insn_len;
555 data->cr2 = extra;
556
557 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
558 }
559
xendevicemodel_shutdown(xendevicemodel_handle * dmod,domid_t domid,unsigned int reason)560 int xendevicemodel_shutdown(
561 xendevicemodel_handle *dmod, domid_t domid, unsigned int reason)
562 {
563 struct xen_dm_op op;
564 struct xen_dm_op_remote_shutdown *data;
565
566 memset(&op, 0, sizeof(op));
567
568 op.op = XEN_DMOP_remote_shutdown;
569 data = &op.u.remote_shutdown;
570
571 data->reason = reason;
572
573 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
574 }
575
xendevicemodel_relocate_memory(xendevicemodel_handle * dmod,domid_t domid,uint32_t size,uint64_t src_gfn,uint64_t dst_gfn)576 int xendevicemodel_relocate_memory(
577 xendevicemodel_handle *dmod, domid_t domid, uint32_t size, uint64_t src_gfn,
578 uint64_t dst_gfn)
579 {
580 struct xen_dm_op op;
581 struct xen_dm_op_relocate_memory *data;
582
583 memset(&op, 0, sizeof(op));
584
585 op.op = XEN_DMOP_relocate_memory;
586 data = &op.u.relocate_memory;
587
588 data->size = size;
589 data->pad = 0;
590 data->src_gfn = src_gfn;
591 data->dst_gfn = dst_gfn;
592
593 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
594 }
595
xendevicemodel_pin_memory_cacheattr(xendevicemodel_handle * dmod,domid_t domid,uint64_t start,uint64_t end,uint32_t type)596 int xendevicemodel_pin_memory_cacheattr(
597 xendevicemodel_handle *dmod, domid_t domid, uint64_t start, uint64_t end,
598 uint32_t type)
599 {
600 struct xen_dm_op op;
601 struct xen_dm_op_pin_memory_cacheattr *data;
602
603 memset(&op, 0, sizeof(op));
604
605 op.op = XEN_DMOP_pin_memory_cacheattr;
606 data = &op.u.pin_memory_cacheattr;
607
608 data->start = start;
609 data->end = end;
610 data->type = type;
611
612 return xendevicemodel_op(dmod, domid, 1, &op, sizeof(op));
613 }
614
xendevicemodel_restrict(xendevicemodel_handle * dmod,domid_t domid)615 int xendevicemodel_restrict(xendevicemodel_handle *dmod, domid_t domid)
616 {
617 return osdep_xendevicemodel_restrict(dmod, domid);
618 }
619
620 /*
621 * Local variables:
622 * mode: C
623 * c-file-style: "BSD"
624 * c-basic-offset: 4
625 * tab-width: 4
626 * indent-tabs-mode: nil
627 * End:
628 */
629