/xen/xen/arch/x86/mm/ |
A D | paging.c | 75 if ( mfn_valid(mfn) ) in paging_new_log_dirty_leaf() 85 if ( mfn_valid(mfn) ) in paging_new_log_dirty_node() 298 if ( !mfn_valid(mfn) ) in paging_mark_pfn_dirty() 301 if ( !mfn_valid(mfn) ) in paging_mark_pfn_dirty() 306 if ( !mfn_valid(mfn) ) in paging_mark_pfn_dirty() 309 if ( !mfn_valid(mfn) ) in paging_mark_pfn_dirty() 314 if ( !mfn_valid(mfn) ) in paging_mark_pfn_dirty() 317 if ( !mfn_valid(mfn) ) in paging_mark_pfn_dirty() 371 if ( !mfn_valid(mfn) ) in paging_mfn_is_dirty() 377 if ( !mfn_valid(mfn) ) in paging_mfn_is_dirty() [all …]
|
A D | p2m-pt.c | 256 ASSERT(rc && mfn_valid(mfn)); in p2m_next_level() 495 ASSERT(mfn_valid(mfn) || mfn_eq(mfn, INVALID_MFN)); in check_entry() 497 ASSERT(mfn_valid(mfn)); in check_entry() 590 l3e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) in p2m_pt_set_entry() 626 if ( mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) ) in p2m_pt_set_entry() 662 l2e_content = mfn_valid(mfn) || p2m_allows_invalid_mfn(p2mt) in p2m_pt_set_entry() 789 ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t)); in p2m_pt_get_entry() 828 ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t)); in p2m_pt_get_entry() 993 ASSERT(mfn_valid(_mfn(mfn))); in p2m_pt_audit_p2m() 1030 ASSERT(mfn_valid(_mfn(mfn))); in p2m_pt_audit_p2m() [all …]
|
A D | mem_paging.c | 189 if ( mfn_valid(mfn) && (p2mt == p2m_ram_paging_in) ) in p2m_mem_paging_resume() 233 if ( !mfn_valid(mfn) ) in nominate() 292 if ( unlikely(!mfn_valid(mfn)) ) in evict() 368 if ( !mfn_valid(mfn) ) in prepare()
|
A D | p2m.c | 603 if ( p2m_is_ram(*t) && mfn_valid(mfn) ) in p2m_get_page_from_gfn() 805 if ( mfn_valid(mfn) ) in p2m_remove_page() 896 if ( !mfn_valid(mfn) ) in guest_physmap_add_entry() 948 ASSERT(mfn_valid(omfn)); in guest_physmap_add_entry() 989 ASSERT(mfn_valid(omfn)); in guest_physmap_add_entry() 1489 ASSERT(mfn_valid(omfn)); in set_shared_p2m_entry() 1857 ASSERT(mfn_valid(*mfn)); in map_domain_gfn() 2330 rc = mfn_valid(mfn) in p2m_change_altp2m_gfn() 2504 (mfn_valid(p2mfn) in audit_p2m() 2606 if ( mfn_valid(prev_mfn) ) in p2m_add_foreign() [all …]
|
A D | mem_sharing.c | 439 if ( !mfn_valid(_mfn(mfn)) ) in mem_sharing_lookup() 740 if ( !mfn_valid(mfn) ) in __grab_shared_page() 837 if ( !mfn_valid(mfn) ) in nominate_page() 880 if ( mfn_valid(amfn) && (!mfn_eq(amfn, mfn) || ap2ma != p2ma) ) in nominate_page() 1169 if ( mfn_valid(cmfn) ) in add_to_physmap() 1357 if ( mfn_valid(mfn) && p2m_is_shared(t) ) in relinquish_shared_pages() 1522 if ( mfn_valid(mfn) && p2m_is_ram(p2mt) ) in mem_sharing_fork_page()
|
/xen/xen/arch/x86/x86_64/ |
A D | traps.c | 201 pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ? in show_page_walk() 206 !mfn_valid(_mfn(mfn)) ) in show_page_walk() 213 pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ? in show_page_walk() 220 !mfn_valid(_mfn(mfn)) ) in show_page_walk() 227 pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ? in show_page_walk() 234 !mfn_valid(_mfn(mfn)) ) in show_page_walk() 241 pfn = mfn_valid(_mfn(mfn)) && machine_to_phys_mapping_valid ? in show_page_walk()
|
A D | mm.c | 68 if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) ) in do_page_walk() 80 if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) ) in do_page_walk() 92 if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || !mfn_valid(_mfn(mfn)) ) in do_page_walk() 354 if ( mfn_valid(_mfn(i + n * PDX_GROUP_COUNT)) ) in setup_compat_m2p_table() 423 if ( mfn_valid(_mfn(i + n * PDX_GROUP_COUNT)) ) in setup_m2p_table() 540 if ( mfn_valid(_mfn(MFN(i + k) + n * PDX_GROUP_COUNT)) ) in paging_init() 574 if ( mfn_valid(_mfn(MFN(i) + n * PDX_GROUP_COUNT)) ) in paging_init() 633 if ( mfn_valid(_mfn(MFN(i) + n * PDX_GROUP_COUNT)) ) in paging_init()
|
/xen/xen/arch/x86/mm/shadow/ |
A D | multi.c | 353 if ( mfn_valid(gw->l4mfn) in sh_audit_gw() 357 if ( mfn_valid(gw->l3mfn) in sh_audit_gw() 373 if ( mfn_valid(gw->l1mfn) in sh_audit_gw() 379 && mfn_valid( in sh_audit_gw() 865 if ( mfn_valid(mfn) ) in shadow_get_page_from_l1e() 2249 if ( mfn_valid(gl1mfn) in validate_gl1e() 2345 ASSERT(mfn_valid(smfn)); in sh_safe_not_to_sync() 2354 ASSERT(mfn_valid(smfn)); in sh_safe_not_to_sync() 2362 ASSERT(mfn_valid(smfn)); in sh_safe_not_to_sync() 3854 if ( !mfn_valid(gmfn) ) in sh_set_toplevel_shadow() [all …]
|
A D | hvm.c | 442 ASSERT(mfn_valid(mfn)); in emulate_gva_to_mfn() 473 if ( !mfn_valid(sh_ctxt->mfn[0]) ) in sh_emulate_map_dest() 507 if ( !mfn_valid(sh_ctxt->mfn[1]) ) in sh_emulate_map_dest() 619 ASSERT(mfn_valid(sh_ctxt->mfn[0])); in sh_emulate_unmap_dest() 661 ASSERT(!mfn_valid(sh_ctxt->mfn[1])); in sh_emulate_unmap_dest() 666 if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) ) in sh_emulate_unmap_dest() 682 if ( unlikely(mfn_valid(sh_ctxt->mfn[1])) ) in sh_emulate_unmap_dest()
|
A D | private.h | 483 if ( !mfn_valid(gmfn) ) in sh_mfn_is_a_page_table() 509 ASSERT(mfn_valid(smfn)); in sh_get_ref() 542 ASSERT(mfn_valid(smfn)); in sh_put_ref() 619 ASSERT(mfn_valid(smfn)); in sh_pin() 667 ASSERT(mfn_valid(smfn)); in sh_unpin()
|
A D | common.c | 174 if ( !mfn_valid(oos[idx]) ) in sh_oos_audit() 240 ASSERT(mfn_valid(gmfn)); in _sh_resync_l1() 255 ASSERT(mfn_valid(smfn)); in sh_remove_write_access_from_sl1p() 256 ASSERT(mfn_valid(gmfn)); in sh_remove_write_access_from_sl1p() 463 if ( mfn_valid(oos[idx]) in oos_hash_add() 472 if ( mfn_valid(oos[idx]) ) in oos_hash_add() 613 if ( mfn_valid(oos[idx]) ) in sh_resync_all() 702 ASSERT(mfn_valid(gmfn)); in shadow_promote() 922 ASSERT(mfn_valid(smfn)); in trace_shadow_prealloc_unpin() 2133 ASSERT(mfn_valid(pmfn)); in sh_remove_shadow_via_pointer() [all …]
|
/xen/xen/drivers/passthrough/amd/ |
A D | iommu_guest.c | 164 ASSERT(mfn_valid(_mfn(mfn))); in guest_iommu_add_ppr_log() 214 ASSERT(mfn_valid(_mfn(mfn))); in guest_iommu_add_event_log() 383 ASSERT(mfn_valid(_mfn(dte_mfn))); in do_invalidate_dte() 404 ASSERT(mfn_valid(_mfn(gcr3_mfn))); in do_invalidate_dte() 462 ASSERT(mfn_valid(_mfn(cmd_mfn))); in guest_iommu_process_command()
|
/xen/xen/include/asm-arm/ |
A D | mm.h | 154 (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) 214 #define mfn_valid(mfn) ({ \ macro
|
A D | p2m.h | 343 if ( !mfn_valid(mfn) || !get_page(page, d) ) in get_page_from_gfn()
|
/xen/xen/arch/x86/pv/ |
A D | ro-page-fault.c | 118 ASSERT(mfn_valid(mfn)); in ptwr_emulated_update() 318 if ( mfn_valid(mfn) ) in mmio_ro_do_page_fault()
|
A D | descriptor-tables.c | 101 if ( !mfn_valid(mfn) || in pv_set_gdt()
|
/xen/xen/arch/x86/mm/hap/ |
A D | guest_walk.c | 89 ASSERT(mfn_valid(top_mfn)); in hap_p2m_ga_to_gfn()
|
A D | nested_hap.c | 176 if ( !mfn_valid(mfn) ) in nestedhap_walk_L0_p2m()
|
/xen/xen/include/asm-x86/ |
A D | mm.h | 304 (mfn_valid(mfn) && is_xen_heap_page(mfn_to_page(mfn))) 406 if ( unlikely(!mfn_valid(mfn)) || unlikely(!get_page(page, d)) ) in get_page_from_mfn()
|
A D | p2m.h | 501 return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL; in get_page_from_gfn() 961 if ( !mfn_valid(nfn) ) in p2m_entry_modify() 984 if ( !mfn_valid(ofn) ) in p2m_entry_modify()
|
/xen/xen/arch/x86/ |
A D | debug.c | 107 return mfn_valid(mfn) ? mfn : INVALID_MFN; in dbg_pv_va2mfn()
|
A D | tboot.c | 190 if ( !mfn_valid(_mfn(mfn)) ) in update_pagetable_mac() 282 if ( !mfn_valid(_mfn(mfn)) ) in tboot_gen_xenheap_integrity()
|
/xen/xen/drivers/passthrough/x86/ |
A D | iommu.c | 164 if ( (pfn > max_pfn && !mfn_valid(mfn)) || xen_in_range(pfn) ) in hwdom_iommu_map()
|
/xen/xen/common/ |
A D | page_alloc.c | 1462 if ( !mfn_valid(page_to_mfn(predecessor)) || in free_heap_pages() 1485 if ( !mfn_valid(page_to_mfn(successor)) || in free_heap_pages() 1579 if ( !mfn_valid(mfn) ) in offline_page() 1688 if ( !mfn_valid(mfn) ) in online_page() 1739 if ( !mfn_valid(mfn) || !page_is_ram_type(mfn_x(mfn), RAM_TYPE_CONVENTIONAL) ) in query_page_offline() 1929 if ( !mfn_valid(_mfn(mfn)) || !page_state_is(pg, free) ) in smp_scrub_heap_pages()
|
/xen/xen/arch/arm/ |
A D | p2m.c | 464 if ( !mfn_valid(mfn) ) in p2m_get_page_from_gfn() 734 ASSERT(mfn_valid(mfn)); in p2m_put_l3_page() 781 ASSERT(mfn_valid(mfn)); in p2m_free_entry() 1911 if ( !mfn_valid(mfn) ) in get_page_from_gva()
|