/xen/xen/drivers/passthrough/ |
A D | iommu.c | 168 struct domain_iommu *hd = dom_iommu(d); in iommu_domain_init() local 178 hd->node = NUMA_NO_NODE; in iommu_domain_init() 185 hd->platform_ops = iommu_get_ops(); in iommu_domain_init() 186 ret = hd->platform_ops->init(d); in iommu_domain_init() 205 hd->need_sync = !iommu_use_hap_pt(d); in iommu_domain_init() 207 ASSERT(!(hd->need_sync && hd->hap_pt_share)); in iommu_domain_init() 214 struct domain_iommu *hd = dom_iommu(d); in iommu_hwdom_init() local 221 hd->platform_ops->hwdom_init(d); in iommu_hwdom_init() 226 struct domain_iommu *hd = dom_iommu(d); in iommu_teardown() local 228 hd->platform_ops->teardown(d); in iommu_teardown() [all …]
|
A D | device_tree.c | 32 struct domain_iommu *hd = dom_iommu(d); in iommu_assign_dt_device() local 46 rc = hd->platform_ops->assign_device(d, 0, dt_to_dev(dev), 0); in iommu_assign_dt_device() 51 list_add(&dev->domain_list, &hd->dt_devices); in iommu_assign_dt_device() 62 const struct domain_iommu *hd = dom_iommu(d); in iommu_deassign_dt_device() local 73 rc = hd->platform_ops->reassign_device(d, NULL, 0, dt_to_dev(dev)); in iommu_deassign_dt_device() 109 const struct domain_iommu *hd = dom_iommu(d); in iommu_release_dt_devices() local 116 list_for_each_entry_safe(dev, _dev, &hd->dt_devices, domain_list) in iommu_release_dt_devices()
|
A D | pci.c | 929 const struct domain_iommu *hd = dom_iommu(d); in deassign_device() local 1407 const struct domain_iommu *hd; in iommu_add_device() local 1416 hd = dom_iommu(pdev->domain); in iommu_add_device() 1438 const struct domain_iommu *hd; in iommu_enable_device() local 1445 hd = dom_iommu(pdev->domain); in iommu_enable_device() 1447 !hd->platform_ops->enable_device ) in iommu_enable_device() 1455 const struct domain_iommu *hd; in iommu_remove_device() local 1461 hd = dom_iommu(pdev->domain); in iommu_remove_device() 1509 const struct domain_iommu *hd = dom_iommu(d); in assign_device() local 1566 const struct domain_iommu *hd = dom_iommu(d); in iommu_get_device_group() local [all …]
|
/xen/xen/drivers/passthrough/amd/ |
A D | iommu_map.c | 195 table = hd->arch.root_table; in iommu_pde_from_dfn() 196 level = hd->arch.paging_mode; in iommu_pde_from_dfn() 299 spin_lock(&hd->arch.mapping_lock); in amd_iommu_map_page() 301 rc = amd_iommu_alloc_root(hd); in amd_iommu_map_page() 325 spin_unlock(&hd->arch.mapping_lock); in amd_iommu_map_page() 338 spin_lock(&hd->arch.mapping_lock); in amd_iommu_unmap_page() 340 if ( !hd->arch.root_table ) in amd_iommu_unmap_page() 361 spin_unlock(&hd->arch.mapping_lock); in amd_iommu_unmap_page() 465 if ( hd->arch.root_table ) in amd_iommu_quarantine_init() 471 spin_lock(&hd->arch.mapping_lock); in amd_iommu_quarantine_init() [all …]
|
A D | pci_amd_iommu.c | 95 BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode || in amd_iommu_setup_domain_device() 115 hd->arch.paging_mode, valid); in amd_iommu_setup_domain_device() 216 if ( !hd->arch.root_table ) in amd_iommu_alloc_root() 227 spin_lock(&hd->arch.mapping_lock); in allocate_domain_resources() 228 rc = amd_iommu_alloc_root(hd); in allocate_domain_resources() 427 spin_lock(&hd->arch.mapping_lock); in deallocate_iommu_page_tables() 428 if ( hd->arch.root_table ) in deallocate_iommu_page_tables() 430 deallocate_next_page_table(hd->arch.root_table, hd->arch.paging_mode); in deallocate_iommu_page_tables() 431 hd->arch.root_table = NULL; in deallocate_iommu_page_tables() 611 if ( !hd->arch.root_table ) in amd_dump_p2m_table() [all …]
|
A D | iommu.h | 229 int __must_check amd_iommu_alloc_root(struct domain_iommu *hd);
|
A D | iommu_guest.c | 810 struct domain_iommu *hd = dom_iommu(d); in guest_iommu_init() local 826 hd->arch.g_iommu = iommu; in guest_iommu_init()
|
/xen/xen/drivers/passthrough/vtd/ |
A D | iommu.c | 271 if ( !hd->arch.pgd_maddr && in addr_to_dma_page_maddr() 273 ((hd->arch.pgd_maddr = alloc_pgtable_maddr(1, hd->node)) == 0)) ) in addr_to_dma_page_maddr() 653 spin_lock(&hd->arch.mapping_lock); in dma_pte_clear_one() 1770 iommu_free_pagetable(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw)); in iommu_domain_teardown() 1771 hd->arch.pgd_maddr = 0; in iommu_domain_teardown() 2671 const struct domain_iommu *hd; in vtd_dump_p2m_table() local 2676 hd = dom_iommu(d); in vtd_dump_p2m_table() 2678 vtd_dump_p2m_table_level(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw), 0, 0); in vtd_dump_p2m_table() 2689 if ( hd->arch.pgd_maddr ) in intel_iommu_quarantine_init() 2697 hd->arch.pgd_maddr = alloc_pgtable_maddr(1, hd->node); in intel_iommu_quarantine_init() [all …]
|
/xen/xen/drivers/passthrough/x86/ |
A D | iommu.c | 139 struct domain_iommu *hd = dom_iommu(d); in arch_iommu_domain_init() local 141 spin_lock_init(&hd->arch.mapping_lock); in arch_iommu_domain_init() 142 INIT_LIST_HEAD(&hd->arch.mapped_rmrrs); in arch_iommu_domain_init()
|
/xen/docs/man/ |
A D | xen-vbd-interface.7.pandoc | 11 (sd*); IDE or AHCI (hd*). 13 For HVM guests, each whole-disk hd* and and sd* device is made 18 In hd* case with hdtype=ahci, disk will be AHCI via emulated 79 3 << 8 | disk << 6 | partition hd, disks 0..1, partitions 0..63 80 22 << 8 | (disk-2) << 6 | partition hd, disks 2..3, partitions 0..63 119 supply as few hd* devices as possible, and for the rest of the disks, 121 will map provided hd* devices to the corresponding /dev/xvd* (for
|
A D | xl-disk-configuration.5.pod | 130 hd[x], xvd[x], sd[x] etc. Please refer to the above specification for
|
/xen/tools/ocaml/libs/xc/ |
A D | xenctrl.ml | 203 let last_domid l = (List.hd l).domid + 1 in
|
/xen/stubdom/grub.patches/ |
A D | 00cvs | 448 + sprintf (name, "/dev/i2o/hd%c", unit); 458 + /* This is for I2O - we have /dev/i2o/hd<logical drive><partition> */ 474 + fprintf (fp, "(hd%d)\t%s\n", num_hd, name);
|
/xen/tools/firmware/rombios/ |
A D | rombios.c | 9166 ;; check that the hd type is really 0x0f.
|