/xen/xen/include/xen/ |
A D | domain_page.h | 26 void *map_domain_page(mfn_t mfn); 47 #define __map_domain_page(pg) map_domain_page(page_to_mfn(pg)) 56 #define map_domain_page(mfn) __mfn_to_virt(mfn_x(mfn)) macro
|
/xen/xen/arch/x86/ |
A D | debug.c | 78 l4t = map_domain_page(mfn); in dbg_pv_va2mfn() 85 l3t = map_domain_page(mfn); in dbg_pv_va2mfn() 94 l2t = map_domain_page(mfn); in dbg_pv_va2mfn() 102 l1t = map_domain_page(mfn); in dbg_pv_va2mfn() 131 va = map_domain_page(mfn); in dbg_rw_guest_mem()
|
A D | x86_emulate.c | 35 memset(((stb).ptr = map_domain_page(_mfn(this_cpu(stubs.mfn)))) + \
|
A D | tboot.c | 163 pt_vaddr = (struct dma_pte *)map_domain_page(_mfn(paddr_to_pfn(pt_maddr))); in update_iommu_mac() 196 void *pg = map_domain_page(_mfn(mfn)); in update_pagetable_mac()
|
A D | extable.c | 155 uint8_t *ptr = map_domain_page(_mfn(this_cpu(stubs.mfn))) + in stub_selftest()
|
A D | smpboot.c | 868 l3t = map_domain_page(l3mfn); in cleanup_cpu_root_pgt() 881 l2t = map_domain_page(l2mfn); in cleanup_cpu_root_pgt() 955 unsigned char *stub_page = map_domain_page(mfn); in cpu_smpboot_free()
|
A D | traps.c | 1327 l4t = map_domain_page(_mfn(mfn)); in __page_fault_type() 1336 l3t = map_domain_page(_mfn(mfn)); in __page_fault_type() 1347 l2t = map_domain_page(_mfn(mfn)); in __page_fault_type() 1358 l1t = map_domain_page(_mfn(mfn)); in __page_fault_type()
|
/xen/xen/arch/x86/mm/ |
A D | paging.c | 88 mfn_t *node = map_domain_page(mfn); in paging_new_log_dirty_node() 147 l3 = map_domain_page(l4[i4]); in paging_free_log_dirty_bitmap() 154 l2 = map_domain_page(l3[i3]); in paging_free_log_dirty_bitmap() 304 l3 = map_domain_page(mfn); in paging_mark_pfn_dirty() 312 l2 = map_domain_page(mfn); in paging_mark_pfn_dirty() 320 l1 = map_domain_page(mfn); in paging_mark_pfn_dirty() 374 l4 = map_domain_page(mfn); in paging_mfn_is_dirty() 380 l3 = map_domain_page(mfn); in paging_mfn_is_dirty() 386 l2 = map_domain_page(mfn); in paging_mfn_is_dirty() 392 l1 = map_domain_page(mfn); in paging_mfn_is_dirty() [all …]
|
A D | p2m-pt.c | 145 l1_pgentry_t *l3_table = map_domain_page(l1e_get_mfn(*p2m_entry)); in p2m_free_entry() 223 l1_entry = map_domain_page(mfn); in p2m_next_level() 248 next = map_domain_page(l1e_get_mfn(*p2m_entry)); in p2m_next_level() 278 table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m))); in p2m_pt_set_recalc_range() 346 table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m))); in do_recalc() 556 table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m))); in p2m_pt_set_entry() 743 l4_pgentry_t *l4e = map_domain_page(mfn); in p2m_pt_get_entry() 757 l3_pgentry_t *l3e = map_domain_page(mfn); in p2m_pt_get_entry() 799 l2e = map_domain_page(mfn); in p2m_pt_get_entry() 837 l1e = map_domain_page(mfn); in p2m_pt_get_entry() [all …]
|
A D | p2m-ept.c | 214 table = map_domain_page(mfn); in ept_set_middle_entry() 242 ept_entry_t *epte = map_domain_page(_mfn(ept_entry->mfn)); in ept_free_entry() 366 *table = next ?: map_domain_page(_mfn(e.mfn)); in ept_next_level() 380 ept_entry_t *epte = map_domain_page(mfn); in ept_invalidate_emt_subtree() 428 table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m))); in ept_invalidate_emt_range() 515 epte = map_domain_page(_mfn(mfn)); in resolve_misconfig() 713 table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m))); in ept_set_entry() 882 map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m))); in ept_get_entry() 993 map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m))); in ept_walk_table() 1026 next = map_domain_page(_mfn(ept_entry->mfn)); in ept_walk_table() [all …]
|
A D | p2m-pod.c | 763 map = map_domain_page(mfn_add(mfn0, i)); in p2m_pod_zero_check_superpage() 801 map = map_domain_page(mfn_add(mfn0, i)); in p2m_pod_zero_check_superpage() 897 map[i] = map_domain_page(mfns[i]); in p2m_pod_zero_check()
|
/xen/xen/arch/x86/x86_64/ |
A D | traps.c | 197 l4t = map_domain_page(_mfn(mfn)); in show_page_walk() 209 l3t = map_domain_page(_mfn(mfn)); in show_page_walk() 223 l2t = map_domain_page(_mfn(mfn)); in show_page_walk() 237 l1t = map_domain_page(_mfn(mfn)); in show_page_walk() 319 stub_page = map_domain_page(_mfn(this_cpu(stubs.mfn))); in subarch_percpu_traps_init()
|
A D | mm.c | 58 l4t = map_domain_page(_mfn(mfn)); in do_page_walk() 76 l2t = map_domain_page(_mfn(mfn)); in do_page_walk() 88 l1t = map_domain_page(_mfn(mfn)); in do_page_walk() 96 return map_domain_page(_mfn(mfn)) + (addr & ~PAGE_MASK); in do_page_walk() 1203 pl4e = map_domain_page(_mfn(mfn)); in handle_memadd_fault() 1212 pl3e = map_domain_page(_mfn(mfn)); in handle_memadd_fault() 1220 pl2e = map_domain_page(_mfn(mfn)); in handle_memadd_fault()
|
/xen/xen/arch/x86/mm/shadow/ |
A D | multi.c | 243 l4p = map_domain_page(gw->l4mfn); in shadow_check_gwalk() 246 l3p = map_domain_page(gw->l3mfn); in shadow_check_gwalk() 254 l2p = map_domain_page(gw->l2mfn); in shadow_check_gwalk() 279 l1p = map_domain_page(gw->l1mfn); in shadow_check_gl1e() 809 map = map_domain_page(mfn); in shadow_write_entries() 1546 l4e = map_domain_page(m4mfn); in sh_make_monitor_table() 1995 l3e = map_domain_page(m3mfn); in sh_destroy_monitor_table() 2287 snp = map_domain_page(snpmfn); in sh_resync_l1() 2288 gp = map_domain_page(gl1mfn); in sh_resync_l1() 2412 sl1p = map_domain_page(map_mfn); in sh_map_and_validate() [all …]
|
/xen/xen/drivers/passthrough/vtd/x86/ |
A D | vtd.c | 42 return map_domain_page(_mfn(paddr_to_pfn(maddr))); in map_vtd_domain_page()
|
/xen/misc/coverity/ |
A D | model.c | 86 void *map_domain_page(unsigned long mfn) in map_domain_page() function
|
/xen/xen/common/ |
A D | kimage.c | 494 for ( ptr = map_domain_page(_mfn(paddr_to_pfn(image->head))); \ 497 (unmap_domain_page(ptr), map_domain_page(_mfn(paddr_to_pfn(entry)))) \ 747 dest_va = map_domain_page(_mfn(dest_mfn)); in kimage_load_crash_segment() 865 page = map_domain_page(ind_mfn); in kimage_build_ind() 891 page = map_domain_page(mfn); in kimage_build_ind()
|
/xen/xen/arch/x86/pv/ |
A D | grant_table.c | 81 pl1e = map_domain_page(gl1mfn) + (addr & ~PAGE_MASK); in create_grant_pv_mapping() 237 pl1e = map_domain_page(gl1mfn) + (addr & ~PAGE_MASK); in replace_grant_pv_mapping()
|
A D | mm.c | 55 return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(linear); in map_guest_l1e()
|
A D | descriptor-tables.c | 223 entry = map_domain_page(mfn) + (gaddr & ~PAGE_MASK); in do_update_descriptor()
|
/xen/xen/drivers/passthrough/amd/ |
A D | iommu_map.c | 40 table = map_domain_page(_mfn(l1_mfn)); in clear_iommu_pte_present() 93 table = map_domain_page(_mfn(pt_mfn)); in set_iommu_pte_present() 214 next_table_vaddr = map_domain_page(_mfn(next_table_mfn)); in iommu_pde_from_dfn()
|
A D | iommu_guest.c | 166 log = map_domain_page(_mfn(mfn)) + (tail & ~PAGE_MASK); in guest_iommu_add_ppr_log() 216 log = map_domain_page(_mfn(mfn)) + (tail & ~PAGE_MASK); in guest_iommu_add_event_log() 341 vaddr = map_domain_page(get_gfn(d, gfn ,&p2mt)); in do_completion_wait() 386 dte_base = map_domain_page(_mfn(dte_mfn)); in do_invalidate_dte() 464 cmd = map_domain_page(_mfn(cmd_mfn)) + (head & ~PAGE_MASK); in guest_iommu_process_command()
|
/xen/xen/arch/x86/mm/hap/ |
A D | guest_walk.c | 90 top_map = map_domain_page(top_mfn); in hap_p2m_ga_to_gfn()
|
/xen/xen/include/asm-x86/ |
A D | page.h | 196 #define map_l1t_from_l2e(x) (l1_pgentry_t *)map_domain_page(l2e_get_mfn(x)) 197 #define map_l2t_from_l3e(x) (l2_pgentry_t *)map_domain_page(l3e_get_mfn(x)) 198 #define map_l3t_from_l4e(x) (l3_pgentry_t *)map_domain_page(l4e_get_mfn(x))
|
/xen/xen/arch/arm/ |
A D | mm.c | 255 mapping = map_domain_page(mfn_add(root_mfn, root_table)); in dump_pt_walk() 272 mapping = map_domain_page(lpae_get_mfn(pte)); in dump_pt_walk() 426 void *map_domain_page(mfn_t mfn) in map_domain_page() function 533 void *v = map_domain_page(_mfn(mfn)); in flush_page_to_ram() 1002 return map_domain_page(mfn); in xen_map_table()
|