| /xen/xen/arch/x86/mm/shadow/ |
| A D | multi.c | 128 ASSERT(mfn_to_page(smfn)->u.sh.head); in set_fl1_shadow_status() 141 ASSERT(mfn_to_page(smfn)->u.sh.head); in set_shadow_status() 159 ASSERT(mfn_to_page(smfn)->u.sh.head); in delete_fl1_shadow_status() 169 ASSERT(mfn_to_page(smfn)->u.sh.head); in delete_shadow_status() 173 put_page(mfn_to_page(gmfn)); in delete_shadow_status() 1438 mfn_to_page(smfn)->up = 0; in sh_make_shadow() 2341 sp = mfn_to_page(smfn); in sh_safe_not_to_sync() 2349 sp = mfn_to_page(smfn); in sh_safe_not_to_sync() 2357 sp = mfn_to_page(smfn); in sh_safe_not_to_sync() 3632 pg = mfn_to_page(gl1mfn); in sh_invlpg() [all …]
|
| A D | hvm.c | 499 put_page(mfn_to_page(sh_ctxt->mfn[0])); in sh_emulate_map_dest() 509 put_page(mfn_to_page(sh_ctxt->mfn[0])); in sh_emulate_map_dest() 524 put_page(mfn_to_page(sh_ctxt->mfn[0])); in sh_emulate_map_dest() 525 put_page(mfn_to_page(sh_ctxt->mfn[1])); in sh_emulate_map_dest() 570 !(mfn_to_page(gmfn)->shadow_flags in check_for_early_unshadow() 640 shflags = mfn_to_page(sh_ctxt->mfn[0])->shadow_flags; in sh_emulate_unmap_dest() 680 put_page(mfn_to_page(sh_ctxt->mfn[0])); in sh_emulate_unmap_dest() 685 put_page(mfn_to_page(sh_ctxt->mfn[1])); in sh_emulate_unmap_dest()
|
| A D | private.h | 314 return page_is_out_of_sync(mfn_to_page(gmfn)); in mfn_is_out_of_sync() 325 return page_oos_may_write(mfn_to_page(gmfn)); in mfn_oos_may_write() 479 struct page_info *page = mfn_to_page(gmfn); in sh_mfn_is_a_page_table() 507 struct page_info *sp = mfn_to_page(smfn); in sh_get_ref() 540 struct page_info *sp = mfn_to_page(smfn); in sh_put_ref() 620 sp[0] = mfn_to_page(smfn); in sh_pin() 668 sp = mfn_to_page(smfn); in sh_unpin()
|
| A D | common.c | 186 pg = mfn_to_page(oos[idx]); in sh_oos_audit() 238 struct page_info *pg = mfn_to_page(gmfn); in _sh_resync_l1() 258 switch ( mfn_to_page(smfn)->u.sh.type ) in sh_remove_write_access_from_sl1p() 416 struct page_info *pg = mfn_to_page(gmfn); in _sh_resync() 667 pg = mfn_to_page(gmfn); in sh_unsync() 896 struct page_info *sp = mfn_to_page(smfn); in shadow_unhook_mappings() 1570 sp = mfn_to_page(smfn); in shadow_hash_insert() 1594 sp = mfn_to_page(smfn); in shadow_hash_delete() 1724 struct page_info *sp = mfn_to_page(smfn); in sh_destroy_shadow() 2343 mfn_to_page(smfn)->up = 0; in sh_clear_up_pointer() [all …]
|
| /xen/xen/include/asm-x86/ |
| A D | page.h | 92 #define l1e_get_page(x) mfn_to_page(l1e_get_mfn(x)) 93 #define l2e_get_page(x) mfn_to_page(l2e_get_mfn(x)) 94 #define l3e_get_page(x) mfn_to_page(l3e_get_mfn(x)) 95 #define l4e_get_page(x) mfn_to_page(l4e_get_mfn(x)) 238 #define pagetable_get_page(x) mfn_to_page(pagetable_get_mfn(x)) 263 #define mfn_to_page(mfn) (frame_table + mfn_to_pdx(mfn)) macro 267 #define __maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma)) 295 #define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va))
|
| A D | mm.h | 304 (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) 404 struct page_info *page = mfn_to_page(mfn); in get_page_from_mfn() 499 struct domain *d = page_get_owner(mfn_to_page(_mfn(mfn))); \
|
| A D | p2m.h | 500 page = mfn_to_page(_mfn(gfn)); in get_page_from_gfn() 967 if ( !page_get_owner_and_reference(mfn_to_page(nfn)) ) in p2m_entry_modify() 989 put_page(mfn_to_page(ofn)); in p2m_entry_modify()
|
| /xen/xen/include/asm-arm/ |
| A D | mm.h | 154 (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) 220 #define mfn_to_page(mfn) \ macro 226 #define maddr_to_page(ma) mfn_to_page(maddr_to_mfn(ma)) 238 #define vmap_to_page(va) mfn_to_page(vmap_to_mfn(va))
|
| A D | p2m.h | 341 page = mfn_to_page(mfn); in get_page_from_gfn()
|
| /xen/xen/arch/x86/pv/ |
| A D | descriptor-tables.c | 77 put_page_and_type(mfn_to_page(mfn)); in pv_destroy_gdt() 102 !get_page_and_type(mfn_to_page(mfn), d, PGT_seg_desc_page) ) in pv_set_gdt() 122 put_page_and_type(mfn_to_page(_mfn(frames[i]))); in pv_set_gdt()
|
| A D | ro-page-fault.c | 114 page = mfn_to_page(mfn); in ptwr_emulated_update() 320 struct page_info *page = mfn_to_page(mfn); in mmio_ro_do_page_fault()
|
| A D | shim.c | 172 struct page_info *page = mfn_to_page(l1e_get_mfn(*pl1e)); in replace_va_mapping() 218 share_xen_page_with_guest(mfn_to_page(_mfn(param)), d, SHARE_rw); \ in pv_shim_setup_dom() 245 share_xen_page_with_guest(mfn_to_page(console_mfn), d, SHARE_rw); in pv_shim_setup_dom() 399 put_page_and_type(mfn_to_page(_mfn(d->arch.pirq_eoi_map_mfn))); in pv_shim_shutdown()
|
| A D | dom0_build.c | 60 page = mfn_to_page(l1e_get_mfn(*pl1e)); in mark_pv_pt_pages_rdonly() 538 if ( assign_pages(d, mfn_to_page(_mfn(mfn++)), 0, 0) ) in dom0_construct_pv() 666 page = mfn_to_page(_mfn(mfn)); in dom0_construct_pv()
|
| /xen/xen/arch/x86/mm/ |
| A D | p2m-pod.c | 79 p = mfn_to_page(mfn_add(mfn, i)); in p2m_pod_cache_add() 163 q = mfn_to_page(_mfn(mfn+i)); in p2m_pod_cache_get() 439 q = mfn_to_page(_mfn(mfn + i)); in p2m_pod_offline_or_broken_hit() 635 page = mfn_to_page(mfn); in p2m_pod_decrease_reservation() 751 for ( k = 0, page = mfn_to_page(mfn); k < n; ++k, ++page ) in p2m_pod_zero_check_superpage() 791 if ( (mfn_to_page(mfn)->count_info & PGC_count_mask) > 1 ) in p2m_pod_zero_check_superpage() 835 p2m_pod_cache_add(p2m, mfn_to_page(mfn0), PAGE_ORDER_2M); in p2m_pod_zero_check_superpage() 891 const struct page_info *pg = mfn_to_page(mfns[i]); in p2m_pod_zero_check() 924 if ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) > 1 ) in p2m_pod_zero_check() 998 p2m_pod_cache_add(p2m, mfn_to_page(mfns[i]), PAGE_ORDER_4K); in p2m_pod_zero_check()
|
| A D | guest_walk.c | 519 put_page(mfn_to_page(gw->l3mfn)); in guest_walk_tables() 526 put_page(mfn_to_page(gw->l2mfn)); in guest_walk_tables() 532 put_page(mfn_to_page(gw->l1mfn)); in guest_walk_tables()
|
| A D | mem_paging.c | 245 page = mfn_to_page(mfn); in nominate() 300 page = mfn_to_page(mfn); in evict()
|
| A D | p2m.c | 573 page = mfn_to_page(mfn); in p2m_get_page_from_gfn() 607 page = mfn_to_page(mfn); in p2m_get_page_from_gfn() 840 struct page_info *page = mfn_to_page(mfn); in guest_physmap_add_page() 967 page_get_owner(mfn_to_page(mfn_add(mfn, i))) == dom_cow ) in guest_physmap_add_entry() 975 if ( page_get_owner(mfn_to_page(mfn_add(mfn, i))) != d ) in guest_physmap_add_entry() 1492 pg_type = read_atomic(&(mfn_to_page(omfn)->u.inuse.type_info)); in set_shared_p2m_entry() 1561 top = mfn_to_page(mfn); in p2m_flush_table_locked() 2608 if ( is_special_page(mfn_to_page(prev_mfn)) ) in p2m_add_foreign()
|
| /xen/xen/arch/x86/x86_64/ |
| A D | mm.c | 186 struct page_info *page = mfn_to_page(mfn_add(m2p_start_mfn, i)); in share_hotadd_m2p_table() 208 struct page_info *page = mfn_to_page(mfn_add(m2p_start_mfn, i)); in share_hotadd_m2p_table() 703 sva = (unsigned long)mfn_to_page(spfn); in cleanup_frame_table() 704 eva = (unsigned long)mfn_to_page(epfn); in cleanup_frame_table() 807 memset(mfn_to_page(spfn), 0, in extend_frame_table() 808 (unsigned long)mfn_to_page(epfn) - (unsigned long)mfn_to_page(spfn)); in extend_frame_table() 845 mfn_to_page(_mfn(m2p_start_mfn + i)), SHARE_ro); in subarch_init_memory() 863 mfn_to_page(_mfn(m2p_start_mfn + i)), SHARE_ro); in subarch_init_memory() 1282 pg = mfn_to_page(_mfn(i)); in transfer_pages_to_heap()
|
| /xen/xen/arch/x86/ |
| A D | tboot.c | 188 struct page_info *page = mfn_to_page(_mfn(mfn)); in update_pagetable_mac() 280 struct page_info *page = mfn_to_page(_mfn(mfn)); in tboot_gen_xenheap_integrity()
|
| A D | mm.c | 782 page = mfn_to_page(mfn); in is_iomem_page() 857 struct page_info *page = mfn_to_page(_mfn(mfn)); in get_page_from_l1e() 1083 struct page_info *page = mfn_to_page(mfn); in get_page_and_type_from_mfn() 1223 page = mfn_to_page(_mfn(pfn)); in put_page_from_l1e() 1317 return put_pt_page(l2e_get_page(l2e), mfn_to_page(l2mfn), flags); in put_page_from_l2e() 1330 return put_pt_page(l3e_get_page(l3e), mfn_to_page(l3mfn), flags); in put_page_from_l3e() 2261 struct page_info *l2pg = mfn_to_page(mfn); in mod_l2_entry() 3154 page = mfn_to_page(_mfn(mfn)); in vcpu_destroy_pagetables() 3276 struct page_info *page = mfn_to_page(old_base_mfn); in new_guest_cr3() 3628 page = mfn_to_page(_mfn(old_mfn)); in do_mmuext_op() [all …]
|
| /xen/xen/common/ |
| A D | xenoprof.c | 191 struct page_info *page = mfn_to_page(mfn_add(mfn, i)); in share_xenoprof_page_with_guest() 203 share_xen_page_with_guest(mfn_to_page(mfn_add(mfn, i)), d, SHARE_rw); in share_xenoprof_page_with_guest() 216 struct page_info *page = mfn_to_page(mfn_add(mfn, i)); in unshare_xenoprof_page_with_guest()
|
| A D | page_alloc.c | 1587 pg = mfn_to_page(mfn); in offline_page() 1694 pg = mfn_to_page(mfn); in online_page() 1748 pg = mfn_to_page(mfn); in query_page_offline() 1861 init_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s); in end_boot_allocator() 1870 init_heap_pages(mfn_to_page(_mfn(r->s)), r->e - r->s); in end_boot_allocator() 1926 pg = mfn_to_page(_mfn(mfn)); in smp_scrub_heap_pages() 2248 init_heap_pages(mfn_to_page(smfn), mfn_x(emfn) - mfn_x(smfn)); in init_domheap_pages()
|
| A D | trace.c | 246 mfn_to_page(_mfn(t_info_mfn_list[offset + i])), SHARE_rw); in alloc_trace_bufs() 275 ASSERT(!(mfn_to_page(_mfn(mfn))->count_info & PGC_allocated)); in alloc_trace_bufs()
|
| A D | kimage.c | 644 page = mfn_to_page(old_mfn); in kimage_alloc_page() 901 guest_page = mfn_to_page(mfn); in kimage_build_ind()
|
| /xen/xen/arch/x86/mm/hap/ |
| A D | nested_ept.c | 170 put_page(mfn_to_page(lxmfn)); in nept_walk_tables()
|