| /xen/xen/arch/x86/ |
| A D | debug.c | 38 return INVALID_MFN; in dbg_hvm_va2mfn() 42 mfn = INVALID_MFN; in dbg_hvm_va2mfn() 44 if ( mfn_eq(mfn, INVALID_MFN) ) in dbg_hvm_va2mfn() 83 return INVALID_MFN; in dbg_pv_va2mfn() 91 return INVALID_MFN; in dbg_pv_va2mfn() 100 return INVALID_MFN; in dbg_pv_va2mfn() 107 return mfn_valid(mfn) ? mfn : INVALID_MFN; in dbg_pv_va2mfn() 128 if ( mfn_eq(mfn, INVALID_MFN) ) in dbg_rw_guest_mem()
|
| A D | mm.c | 361 if ( !mfn_eq(l3mfn, INVALID_MFN) ) in arch_init_memory() 1686 ASSERT(!mfn_eq(l4mfn, INVALID_MFN)); in init_xen_l4_slots() 3898 mfn_t map_mfn = INVALID_MFN, mfn; in do_mmu_update() 4610 mfn_t mfn = INVALID_MFN; in xenmem_add_to_physmap_one() 4638 mfn = INVALID_MFN; in xenmem_add_to_physmap_one() 4647 if ( mfn_eq(mfn, INVALID_MFN) ) in xenmem_add_to_physmap_one() 5041 mfn_t mfn = v ? virt_to_mfn(v) : INVALID_MFN; in free_xen_pagetable() 5297 if ( !mfn_eq(mfn, INVALID_MFN) ) in map_pages_to_xen() 5324 if ( !mfn_eq(mfn, INVALID_MFN) ) in map_pages_to_xen() 5394 if ( !mfn_eq(mfn, INVALID_MFN) ) in map_pages_to_xen() [all …]
|
| /xen/tools/xenpaging/ |
| A D | policy_default.c | 66 mru[i] = INVALID_MFN; in policy_init() 132 return INVALID_MFN; in policy_choose_victim() 149 if ( old_gfn != INVALID_MFN ) in policy_handle_paged_in() 156 mru[i_mru & (mru_size - 1)] = INVALID_MFN; in policy_handle_paged_in()
|
| /xen/tools/libxc/ |
| A D | xc_sr_common_x86_pv.h | 58 if ( frame >= INVALID_MFN ) in pte_to_frame() 59 return INVALID_MFN; in pte_to_frame()
|
| A D | xg_private.h | 121 return mfn == ~0U ? INVALID_MFN : mfn; in xc_pfn_to_mfn()
|
| A D | xc_sr_restore_x86_pv.c | 62 ctx->restore.ops.set_gfn(ctx, i, INVALID_MFN); in expand_p2m() 67 ctx->x86.pv.p2m_pfns[i] = INVALID_MFN; in expand_p2m() 956 ((uint64_t *)ctx->x86.pv.p2m)[pfn] = mfn == INVALID_MFN ? ~0ULL : mfn; in x86_pv_set_gfn() 992 if ( pfn == INVALID_MFN ) in x86_pv_localise_page() 1001 if ( pfn_to_mfn(ctx, pfn) == INVALID_MFN ) in x86_pv_localise_page()
|
| /xen/xen/arch/x86/mm/ |
| A D | p2m.c | 532 mfn = INVALID_MFN; in __get_gfn_type_access() 665 return INVALID_MFN; in p2m_alloc_ptp() 2195 mfn_x(INVALID_MFN) ) in p2m_init_altp2m_by_id() 2249 mfn_x(INVALID_MFN) ) in p2m_destroy_altp2m_by_id() 2257 mfn_x(INVALID_MFN); in p2m_destroy_altp2m_by_id() 2259 mfn_x(INVALID_MFN); in p2m_destroy_altp2m_by_id() 2318 mfn_x(INVALID_MFN) ) in p2m_change_altp2m_gfn() 2678 mfn_x(INVALID_MFN) ) in p2m_set_suppress_ve_multi() 2746 mfn_x(INVALID_MFN) ) in p2m_get_suppress_ve() 2784 mfn_x(INVALID_MFN) ) in p2m_set_altp2m_view_visibility() [all …]
|
| A D | p2m-pt.c | 182 if ( mfn_eq(mfn, INVALID_MFN) ) in p2m_next_level() 220 if ( mfn_eq(mfn, INVALID_MFN) ) in p2m_next_level() 490 ASSERT(!mfn_eq(mfn, INVALID_MFN) && in check_entry() 529 unsigned long old_mfn = mfn_x(INVALID_MFN); in p2m_pt_set_entry() 737 return INVALID_MFN; in p2m_pt_get_entry() 750 return INVALID_MFN; in p2m_pt_get_entry() 778 return INVALID_MFN; in p2m_pt_get_entry() 790 return (p2m_is_valid(*t)) ? mfn : INVALID_MFN; in p2m_pt_get_entry() 819 return INVALID_MFN; in p2m_pt_get_entry() 829 return (p2m_is_valid(*t)) ? mfn : INVALID_MFN; in p2m_pt_get_entry() [all …]
|
| A D | p2m-pod.c | 554 if ( p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid, in p2m_pod_decrease_reservation() 609 if ( p2m_set_entry(p2m, gfn_add(gfn, i), INVALID_MFN, cur_order, in p2m_pod_decrease_reservation() 638 if ( p2m_set_entry(p2m, gfn_add(gfn, i), INVALID_MFN, cur_order, in p2m_pod_decrease_reservation() 687 mfn_t mfn, mfn0 = INVALID_MFN; in p2m_pod_zero_check_superpage() 777 if ( p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M, in p2m_pod_zero_check_superpage() 916 if ( p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K, in p2m_pod_zero_check() 1159 return !p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M, in p2m_pod_demand_populate() 1249 if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K, in p2m_pod_demand_populate() 1309 rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order, in guest_physmap_mark_populate_on_demand()
|
| A D | mem_access.c | 74 if ( mfn_eq(mfn, INVALID_MFN) ) in _p2m_get_mem_access() 370 mfn_x(INVALID_MFN) ) in p2m_set_mem_access() 430 mfn_x(INVALID_MFN) ) in p2m_set_mem_access_multi() 497 mfn_x(INVALID_MFN) ) in p2m_get_mem_access()
|
| A D | paging.c | 62 return INVALID_MFN; in paging_new_log_dirty_page() 90 node[i] = INVALID_MFN; in paging_new_log_dirty_node() 162 l3[i3] = INVALID_MFN; in paging_free_log_dirty_bitmap() 177 l4[i4] = INVALID_MFN; in paging_free_log_dirty_bitmap() 193 d->arch.paging.log_dirty.top = INVALID_MFN; in paging_free_log_dirty_bitmap() 647 d->arch.paging.log_dirty.top = INVALID_MFN; in paging_domain_init()
|
| A D | guest_walk.c | 226 gw->l2mfn = gw->l1mfn = INVALID_MFN; in guest_walk_tables() 348 gw->l1mfn = INVALID_MFN; in guest_walk_tables()
|
| A D | p2m-ept.c | 211 if ( mfn_eq(mfn, INVALID_MFN) ) in ept_set_middle_entry() 676 unsigned long fn_mask = !mfn_eq(mfn, INVALID_MFN) ? (gfn | mfn_x(mfn)) : gfn; in ept_set_entry() 889 mfn_t mfn = INVALID_MFN; in ept_get_entry() 1185 if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) ) in ept_set_ad_sync() 1407 if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) ) in p2m_find_altp2m_by_eptp()
|
| A D | mem_sharing.c | 1366 set_rc = p2m->set_entry(p2m, _gfn(gfn), INVALID_MFN, PAGE_ORDER_4K, in relinquish_shared_pages() 1587 if ( !mfn_eq(vcpu_info_mfn, INVALID_MFN) ) in copy_vcpu_settings() 1592 if ( mfn_eq(new_vcpu_info_mfn, INVALID_MFN) ) in copy_vcpu_settings() 1684 if ( mfn_eq(new_mfn, INVALID_MFN) ) in copy_special_pages() 1713 rc = p2m->set_entry(p2m, new_gfn, INVALID_MFN, PAGE_ORDER_4K, in copy_special_pages() 1831 rc = p2m->set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K, in mem_sharing_fork_reset()
|
| A D | mem_paging.c | 316 ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K, in evict()
|
| /xen/xen/arch/arm/ |
| A D | mm.c | 823 if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) ) in setup_xenheap_mappings() 1069 if ( (flags & _PAGE_PRESENT) && mfn_eq(mfn, INVALID_MFN) ) in xen_pt_check_entry() 1097 ASSERT(!mfn_eq(mfn, INVALID_MFN)); in xen_pt_check_entry() 1111 ASSERT(mfn_eq(mfn, INVALID_MFN)); in xen_pt_check_entry() 1125 ASSERT(mfn_eq(mfn, INVALID_MFN)); in xen_pt_check_entry() 1206 if ( !mfn_eq(mfn, INVALID_MFN) ) in xen_pt_update_entry() 1277 if ( !mfn_eq(mfn, INVALID_MFN) ) in xen_pt_update() 1303 return xen_pt_update(virt, INVALID_MFN, nr_mfns, _PAGE_POPULATE); in populate_pt_range() 1309 return xen_pt_update(v, INVALID_MFN, (e - v) >> PAGE_SHIFT, 0); in destroy_xen_mappings() 1315 return xen_pt_update(s, INVALID_MFN, (e - s) >> PAGE_SHIFT, flags); in modify_xen_mappings() [all …]
|
| A D | p2m.c | 359 mfn_t mfn = INVALID_MFN; in p2m_get_entry() 886 bool removing_mapping = mfn_eq(smfn, INVALID_MFN); in __p2m_set_entry() 996 ASSERT(!mfn_eq(INVALID_MFN, smfn) || (a == p2m_access_rwx)); in __p2m_set_entry() 1099 mask = !mfn_eq(smfn, INVALID_MFN) ? mfn_x(smfn) : 0; in p2m_set_entry() 1117 if ( !mfn_eq(smfn, INVALID_MFN) ) in p2m_set_entry() 1307 rc = p2m_set_entry(p2m, start_gfn, nr, INVALID_MFN, in p2m_remove_mapping() 1600 if ( !mfn_eq(mfn, INVALID_MFN) ) in relinquish_p2m_mapping() 1606 rc = __p2m_set_entry(p2m, start, order, INVALID_MFN, in relinquish_p2m_mapping() 1632 mfn_t mfn = INVALID_MFN; in p2m_cache_flush_range() 1694 if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) || !valid ) in p2m_cache_flush_range() [all …]
|
| A D | mem_access.c | 76 if ( mfn_eq(mfn, INVALID_MFN) ) in __p2m_get_mem_access() 204 if ( mfn_eq(mfn, INVALID_MFN) ) in p2m_mem_access_check_and_get_page() 412 if ( !mfn_eq(mfn, INVALID_MFN) ) in p2m_set_mem_access()
|
| /xen/xen/arch/x86/mm/shadow/ |
| A D | common.c | 86 v->arch.paging.shadow.oos[i] = INVALID_MFN; in shadow_vcpu_init() 293 if ( !mfn_eq(fixup->smfn[i], INVALID_MFN) ) in oos_fixup_flush_gmfn() 298 fixup->smfn[i] = INVALID_MFN; in oos_fixup_flush_gmfn() 458 fixup.smfn[i] = INVALID_MFN; in oos_hash_add() 508 oos[idx] = INVALID_MFN; in oos_hash_remove() 564 oos[idx] = INVALID_MFN; in sh_resync() 617 oos[idx] = INVALID_MFN; in sh_resync_all() 651 oos[idx] = INVALID_MFN; in sh_resync_all() 1549 return INVALID_MFN; in shadow_hash_lookup() 3246 mfn_t map_mfn = INVALID_MFN; in shadow_track_dirty_vram() [all …]
|
| A D | multi.c | 760 if ( !mfn_eq(gmfn, INVALID_MFN) && in l1e_propagate_from_guest() 1684 mfn_t sl3mfn = INVALID_MFN; in shadow_get_and_create_l2e() 2086 mfn_t sl3mfn = INVALID_MFN; in validate_gl4e() 2146 mfn_t sl2mfn = INVALID_MFN; in validate_gl3e() 2180 mfn_t sl1mfn = INVALID_MFN; in validate_gl2e() 2228 mfn_t gmfn = INVALID_MFN; in validate_gl1e() 2298 mfn_t gmfn = INVALID_MFN; in sh_resync_l1() 2590 gmfn = INVALID_MFN; in sh_prefetch() 4480 smfn = INVALID_MFN; in sh_pagetable_dying() 4490 ? INVALID_MFN in sh_pagetable_dying() [all …]
|
| /xen/xen/arch/x86/mm/hap/ |
| A D | hap.c | 404 init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false); in hap_make_monitor_table() 412 return INVALID_MFN; in hap_make_monitor_table() 503 d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN); in hap_enable() 504 d->arch.altp2m_visible_eptp[i] = mfn_x(INVALID_MFN); in hap_enable()
|
| /xen/xen/arch/x86/hvm/viridian/ |
| A D | viridian.c | 254 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); in enable_hypercall_page() 750 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); in viridian_map_guest_page()
|
| /xen/xen/common/ |
| A D | vmap.c | 237 map_pages_to_xen(addr, INVALID_MFN, pages, _PAGE_NONE); in vunmap()
|
| A D | domain.c | 130 v->vcpu_info_mfn = INVALID_MFN; in vcpu_info_reset() 1252 if ( !mfn_eq(v->vcpu_info_mfn, INVALID_MFN) ) in map_vcpu_info() 1319 if ( mfn_eq(mfn, INVALID_MFN) ) in unmap_vcpu_info()
|
| /xen/tools/libxc/include/ |
| A D | xc_dom.h | 434 return INVALID_MFN; in xc_dom_p2m()
|