Home
last modified time | relevance | path

Searched refs:page_to_mfn (Results 1 – 25 of 48) sorted by relevance

12

/xen/xen/include/xen/
A Ddomain_page.h47 #define __map_domain_page(pg) map_domain_page(page_to_mfn(pg))
51 return map_domain_page_global(page_to_mfn(pg)); in __map_domain_page_global()
/xen/xen/include/asm-x86/
A Dpage.h161 #define l1e_from_page(page, flags) l1e_from_mfn(page_to_mfn(page), flags)
162 #define l2e_from_page(page, flags) l2e_from_mfn(page_to_mfn(page), flags)
163 #define l3e_from_page(page, flags) l3e_from_mfn(page_to_mfn(page), flags)
164 #define l4e_from_page(page, flags) l4e_from_mfn(page_to_mfn(page), flags)
244 #define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg))
264 #define page_to_mfn(pg) pdx_to_mfn((unsigned long)((pg) - frame_table)) macro
268 #define __page_to_maddr(pg) mfn_to_maddr(page_to_mfn(pg))
/xen/xen/include/asm-arm/
A Dmm.h145 #define is_xen_heap_page(page) is_xen_heap_mfn(page_to_mfn(page))
222 #define page_to_mfn(pg) \ macro
227 #define page_to_maddr(pg) (mfn_to_maddr(page_to_mfn(pg)))
315 return mfn_to_virt(mfn_x(page_to_mfn(pg))); in page_to_virt()
/xen/xen/drivers/passthrough/amd/
A Diommu_map.c207 next_table_mfn = mfn_x(page_to_mfn(table)); in iommu_pde_from_dfn()
240 next_table_mfn = mfn_x(page_to_mfn(table)); in iommu_pde_from_dfn()
270 next_table_mfn = mfn_x(page_to_mfn(table)); in iommu_pde_from_dfn()
500 set_iommu_pde_present(pde, mfn_x(page_to_mfn(pg)), level - 1, in amd_iommu_quarantine_init()
A Diommu.h364 clear_domain_page(page_to_mfn(pg)); in alloc_amd_iommu_pgtable()
/xen/xen/common/
A Dpage_alloc.c1005 i, mfn_x(page_to_mfn(pg + i)), in alloc_heap_pages()
1028 flush_page_to_ram(mfn_x(page_to_mfn(&pg[i])), in alloc_heap_pages()
1382 mfn_t mfn = page_to_mfn(pg); in free_heap_pages()
1420 i, mfn_x(page_to_mfn(pg + i)), in free_heap_pages()
1457 if ( (mfn_x(page_to_mfn(pg)) & mask) ) in free_heap_pages()
1462 if ( !mfn_valid(page_to_mfn(predecessor)) || in free_heap_pages()
1485 if ( !mfn_valid(page_to_mfn(successor)) || in free_heap_pages()
1778 if ( !mfn_x(page_to_mfn(pg)) ) in init_heap_pages()
1805 unsigned long s = mfn_x(page_to_mfn(pg + i)); in init_heap_pages()
2412 i, mfn_x(page_to_mfn(pg + i)), in free_domheap_pages()
[all …]
A Dvmap.c39 map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR); in vm_init_type()
110 if ( !map_pages_to_xen(va, page_to_mfn(pg), 1, PAGE_HYPERVISOR) ) in vm_alloc()
261 mfn[i] = page_to_mfn(pg); in vmalloc_type()
A Dkimage.c79 clear_domain_page(page_to_mfn(page)); in kimage_alloc_zeroed_page()
408 clear_domain_page(page_to_mfn(page)); in kimage_alloc_crash_control_page()
916 copy_domain_page(page_to_mfn(xen_page), mfn); in kimage_build_ind()
A Dmemory.c145 mfn_t mfn = page_to_mfn(page); in increase_reservation()
267 mfn = page_to_mfn(page); in populate_physmap()
712 mfn = page_to_mfn(page); in memory_exchange()
760 mfn = page_to_mfn(page); in memory_exchange()
1436 page_to_mfn(page), 0); in do_memory_op()
/xen/xen/arch/x86/
A Dmm.c1440 mfn_t l2mfn = page_to_mfn(page); in promote_l2_table()
1539 mfn_t l3mfn = page_to_mfn(page); in promote_l3_table()
1765 mfn_t l4mfn = page_to_mfn(page); in promote_l4_table()
1872 mfn_t l2mfn = page_to_mfn(page); in demote_l2_table()
1924 mfn_t l3mfn = page_to_mfn(page); in demote_l3_table()
1971 mfn_t l4mfn = page_to_mfn(page); in demote_l4_table()
2661 mfn_x(page_to_mfn(page)), in validate_page()
2909 mfn_x(page_to_mfn(page))); in _get_page_type()
3017 mfn_t mfn = page_to_mfn(page); in _get_page_type()
3827 copy_domain_page(page_to_mfn(dst_page), page_to_mfn(src_page)); in do_mmuext_op()
[all …]
A Ddomain.c229 _p(mfn_x(page_to_mfn(page))), in dump_pageframe_info()
243 _p(mfn_x(page_to_mfn(page))), in dump_pageframe_info()
250 _p(mfn_x(page_to_mfn(page))), in dump_pageframe_info()
751 mfn = page_to_mfn(page); in arch_domain_soft_reset()
790 ret = guest_physmap_add_page(d, gfn, page_to_mfn(new_page), in arch_domain_soft_reset()
1058 done = !fill_ro_mpt(page_to_mfn(page)); in arch_set_info_guest()
1189 *l4tab = l4e_from_mfn(page_to_mfn(cr3_page), in arch_set_info_guest()
/xen/xen/arch/x86/mm/
A Dmem_sharing.c482 mfn = page_to_mfn(pg); in audit()
774 mfn_x(page_to_mfn(page)), page->count_info, in debug_mfn()
1306 copy_domain_page(page_to_mfn(page), page_to_mfn(old_page)); in __mem_sharing_unshare_page()
1308 BUG_ON(set_shared_p2m_entry(d, gfn, page_to_mfn(page))); in __mem_sharing_unshare_page()
1322 set_gpfn_from_mfn(mfn_x(page_to_mfn(page)), gfn); in __mem_sharing_unshare_page()
1328 paging_mark_dirty(d, page_to_mfn(page)); in __mem_sharing_unshare_page()
1538 new_mfn = page_to_mfn(page); in mem_sharing_fork_page()
1601 new_vcpu_info_mfn = page_to_mfn(page); in copy_vcpu_settings()
1689 new_mfn = page_to_mfn(page); in copy_special_pages()
1813 mfn_t mfn = page_to_mfn(page); in mem_sharing_fork_reset()
A Dp2m-pod.c65 mfn = page_to_mfn(page); in p2m_pod_cache_add()
99 clear_domain_page(mfn_add(page_to_mfn(page), i)); in p2m_pod_cache_add()
159 mfn = mfn_x(page_to_mfn(p)); in p2m_pod_cache_get()
429 bmfn = mfn_x(page_to_mfn(p)); in p2m_pod_offline_or_broken_hit()
432 mfn = mfn_x(page_to_mfn(q)); in p2m_pod_offline_or_broken_hit()
450 mfn = mfn_x(page_to_mfn(q)); in p2m_pod_offline_or_broken_hit()
1188 mfn = page_to_mfn(p); in p2m_pod_demand_populate()
A Dmem_paging.c395 mfn = page_to_mfn(page); in prepare()
/xen/xen/arch/x86/mm/hap/
A Dhap.c255 clear_domain_page(page_to_mfn(pg)); in hap_alloc()
309 d->domain_id, mfn_x(page_to_mfn(pg)), in hap_free_p2m_page()
318 hap_free(d, page_to_mfn(pg)); in hap_free_p2m_page()
401 m4mfn = page_to_mfn(pg); in hap_make_monitor_table()
A Dguest_walk.c86 top_mfn = page_to_mfn(top_page); in hap_p2m_ga_to_gfn()
/xen/xen/arch/x86/mm/shadow/
A Dcommon.c946 smfn = page_to_mfn(sp); in _shadow_prealloc()
1015 smfn = page_to_mfn(sp); in shadow_blow_tables()
1133 clear_domain_page(page_to_mfn(sp)); in shadow_alloc()
1150 return page_to_mfn(sp); in shadow_alloc()
1181 == mfn_x(page_to_mfn(sp)) ) in shadow_free()
1257 d->domain_id, mfn_x(page_to_mfn(pg)), in shadow_free_p2m_page()
1269 shadow_free(d, page_to_mfn(pg)); in shadow_free_p2m_page()
1448 __backpointer(sp), mfn_x(page_to_mfn(sp)), in sh_hash_audit_bucket()
1527 return page_to_mfn(sp); in shadow_hash_lookup()
1542 return page_to_mfn(sp); in shadow_hash_lookup()
[all …]
/xen/xen/arch/x86/pv/
A Ddom0_build.c505 alloc_spfn = mfn_x(page_to_mfn(page)); in dom0_construct_pv()
533 initrd->mod_start = initrd_mfn = mfn_x(page_to_mfn(page)); in dom0_construct_pv()
799 mfn = mfn_x(page_to_mfn(page)); in dom0_construct_pv()
824 mfn = mfn_x(page_to_mfn(page)); in dom0_construct_pv()
A Ddescriptor-tables.c205 mfn = page_to_mfn(page); in do_update_descriptor()
A Ddomain.c129 mfn = page_to_mfn(pg); in setup_compat_l4()
/xen/xen/arch/arm/
A Dp2m.c91 p2m->root, mfn_x(page_to_mfn(p2m->root))); in dump_p2m_lookup()
640 return mfn_to_p2m_entry(page_to_mfn(page), p2m_ram_rw, p2m_access_rwx); in page_to_p2m_table()
1169 p2m_invalidate_table(p2m, page_to_mfn(p2m->root + i)); in p2m_invalidate_root()
1407 p2m->vttbr = generate_vttbr(p2m->vmid, page_to_mfn(p2m->root)); in p2m_alloc_table()
2079 empty_root_mfn = page_to_mfn(root); in setup_virt_paging()
A Dkernel.c287 mfn = page_to_mfn(pages); in kernel_decompress()
/xen/xen/arch/x86/hvm/
A Dioreq.c456 page_to_mfn(iorp->page), 0) ) in hvm_remove_ioreq_gfn()
473 page_to_mfn(iorp->page), 0); in hvm_add_ioreq_gfn()
972 *mfn = page_to_mfn(s->bufioreq.page); in hvm_get_ioreq_server_frame()
977 *mfn = page_to_mfn(s->ioreq.page); in hvm_get_ioreq_server_frame()
/xen/xen/arch/x86/hvm/viridian/
A Dviridian.c254 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); in enable_hypercall_page()
750 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); in viridian_map_guest_page()
/xen/xen/arch/x86/x86_64/
A Dmm.c556 page_to_mfn(l1_pg), in paging_init()
585 page_to_mfn(l1_pg), in paging_init()
642 page_to_mfn(l1_pg), in paging_init()

Completed in 57 milliseconds

12