Home
last modified time | relevance | path

Searched refs:INVALID_MFN (Results 1 – 25 of 46) sorted by relevance

12

/xen/xen/arch/x86/
A Ddebug.c38 return INVALID_MFN; in dbg_hvm_va2mfn()
42 mfn = INVALID_MFN; in dbg_hvm_va2mfn()
44 if ( mfn_eq(mfn, INVALID_MFN) ) in dbg_hvm_va2mfn()
83 return INVALID_MFN; in dbg_pv_va2mfn()
91 return INVALID_MFN; in dbg_pv_va2mfn()
100 return INVALID_MFN; in dbg_pv_va2mfn()
107 return mfn_valid(mfn) ? mfn : INVALID_MFN; in dbg_pv_va2mfn()
128 if ( mfn_eq(mfn, INVALID_MFN) ) in dbg_rw_guest_mem()
A Dmm.c361 if ( !mfn_eq(l3mfn, INVALID_MFN) ) in arch_init_memory()
1686 ASSERT(!mfn_eq(l4mfn, INVALID_MFN)); in init_xen_l4_slots()
3898 mfn_t map_mfn = INVALID_MFN, mfn; in do_mmu_update()
4610 mfn_t mfn = INVALID_MFN; in xenmem_add_to_physmap_one()
4638 mfn = INVALID_MFN; in xenmem_add_to_physmap_one()
4647 if ( mfn_eq(mfn, INVALID_MFN) ) in xenmem_add_to_physmap_one()
5041 mfn_t mfn = v ? virt_to_mfn(v) : INVALID_MFN; in free_xen_pagetable()
5297 if ( !mfn_eq(mfn, INVALID_MFN) ) in map_pages_to_xen()
5324 if ( !mfn_eq(mfn, INVALID_MFN) ) in map_pages_to_xen()
5394 if ( !mfn_eq(mfn, INVALID_MFN) ) in map_pages_to_xen()
[all …]
/xen/tools/xenpaging/
A Dpolicy_default.c66 mru[i] = INVALID_MFN; in policy_init()
132 return INVALID_MFN; in policy_choose_victim()
149 if ( old_gfn != INVALID_MFN ) in policy_handle_paged_in()
156 mru[i_mru & (mru_size - 1)] = INVALID_MFN; in policy_handle_paged_in()
/xen/tools/libxc/
A Dxc_sr_common_x86_pv.h58 if ( frame >= INVALID_MFN ) in pte_to_frame()
59 return INVALID_MFN; in pte_to_frame()
A Dxg_private.h121 return mfn == ~0U ? INVALID_MFN : mfn; in xc_pfn_to_mfn()
A Dxc_sr_restore_x86_pv.c62 ctx->restore.ops.set_gfn(ctx, i, INVALID_MFN); in expand_p2m()
67 ctx->x86.pv.p2m_pfns[i] = INVALID_MFN; in expand_p2m()
956 ((uint64_t *)ctx->x86.pv.p2m)[pfn] = mfn == INVALID_MFN ? ~0ULL : mfn; in x86_pv_set_gfn()
992 if ( pfn == INVALID_MFN ) in x86_pv_localise_page()
1001 if ( pfn_to_mfn(ctx, pfn) == INVALID_MFN ) in x86_pv_localise_page()
/xen/xen/arch/x86/mm/
A Dp2m.c532 mfn = INVALID_MFN; in __get_gfn_type_access()
665 return INVALID_MFN; in p2m_alloc_ptp()
2195 mfn_x(INVALID_MFN) ) in p2m_init_altp2m_by_id()
2249 mfn_x(INVALID_MFN) ) in p2m_destroy_altp2m_by_id()
2257 mfn_x(INVALID_MFN); in p2m_destroy_altp2m_by_id()
2259 mfn_x(INVALID_MFN); in p2m_destroy_altp2m_by_id()
2318 mfn_x(INVALID_MFN) ) in p2m_change_altp2m_gfn()
2678 mfn_x(INVALID_MFN) ) in p2m_set_suppress_ve_multi()
2746 mfn_x(INVALID_MFN) ) in p2m_get_suppress_ve()
2784 mfn_x(INVALID_MFN) ) in p2m_set_altp2m_view_visibility()
[all …]
A Dp2m-pt.c182 if ( mfn_eq(mfn, INVALID_MFN) ) in p2m_next_level()
220 if ( mfn_eq(mfn, INVALID_MFN) ) in p2m_next_level()
490 ASSERT(!mfn_eq(mfn, INVALID_MFN) && in check_entry()
529 unsigned long old_mfn = mfn_x(INVALID_MFN); in p2m_pt_set_entry()
737 return INVALID_MFN; in p2m_pt_get_entry()
750 return INVALID_MFN; in p2m_pt_get_entry()
778 return INVALID_MFN; in p2m_pt_get_entry()
790 return (p2m_is_valid(*t)) ? mfn : INVALID_MFN; in p2m_pt_get_entry()
819 return INVALID_MFN; in p2m_pt_get_entry()
829 return (p2m_is_valid(*t)) ? mfn : INVALID_MFN; in p2m_pt_get_entry()
[all …]
A Dp2m-pod.c554 if ( p2m_set_entry(p2m, gfn, INVALID_MFN, order, p2m_invalid, in p2m_pod_decrease_reservation()
609 if ( p2m_set_entry(p2m, gfn_add(gfn, i), INVALID_MFN, cur_order, in p2m_pod_decrease_reservation()
638 if ( p2m_set_entry(p2m, gfn_add(gfn, i), INVALID_MFN, cur_order, in p2m_pod_decrease_reservation()
687 mfn_t mfn, mfn0 = INVALID_MFN; in p2m_pod_zero_check_superpage()
777 if ( p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_2M, in p2m_pod_zero_check_superpage()
916 if ( p2m_set_entry(p2m, gfns[i], INVALID_MFN, PAGE_ORDER_4K, in p2m_pod_zero_check()
1159 return !p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_2M, in p2m_pod_demand_populate()
1249 if ( p2m_set_entry(p2m, gfn_aligned, INVALID_MFN, PAGE_ORDER_4K, in p2m_pod_demand_populate()
1309 rc = p2m_set_entry(p2m, gfn, INVALID_MFN, order, in guest_physmap_mark_populate_on_demand()
A Dmem_access.c74 if ( mfn_eq(mfn, INVALID_MFN) ) in _p2m_get_mem_access()
370 mfn_x(INVALID_MFN) ) in p2m_set_mem_access()
430 mfn_x(INVALID_MFN) ) in p2m_set_mem_access_multi()
497 mfn_x(INVALID_MFN) ) in p2m_get_mem_access()
A Dpaging.c62 return INVALID_MFN; in paging_new_log_dirty_page()
90 node[i] = INVALID_MFN; in paging_new_log_dirty_node()
162 l3[i3] = INVALID_MFN; in paging_free_log_dirty_bitmap()
177 l4[i4] = INVALID_MFN; in paging_free_log_dirty_bitmap()
193 d->arch.paging.log_dirty.top = INVALID_MFN; in paging_free_log_dirty_bitmap()
647 d->arch.paging.log_dirty.top = INVALID_MFN; in paging_domain_init()
A Dguest_walk.c226 gw->l2mfn = gw->l1mfn = INVALID_MFN; in guest_walk_tables()
348 gw->l1mfn = INVALID_MFN; in guest_walk_tables()
A Dp2m-ept.c211 if ( mfn_eq(mfn, INVALID_MFN) ) in ept_set_middle_entry()
676 unsigned long fn_mask = !mfn_eq(mfn, INVALID_MFN) ? (gfn | mfn_x(mfn)) : gfn; in ept_set_entry()
889 mfn_t mfn = INVALID_MFN; in ept_get_entry()
1185 if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) ) in ept_set_ad_sync()
1407 if ( d->arch.altp2m_eptp[i] == mfn_x(INVALID_MFN) ) in p2m_find_altp2m_by_eptp()
A Dmem_sharing.c1366 set_rc = p2m->set_entry(p2m, _gfn(gfn), INVALID_MFN, PAGE_ORDER_4K, in relinquish_shared_pages()
1587 if ( !mfn_eq(vcpu_info_mfn, INVALID_MFN) ) in copy_vcpu_settings()
1592 if ( mfn_eq(new_vcpu_info_mfn, INVALID_MFN) ) in copy_vcpu_settings()
1684 if ( mfn_eq(new_mfn, INVALID_MFN) ) in copy_special_pages()
1713 rc = p2m->set_entry(p2m, new_gfn, INVALID_MFN, PAGE_ORDER_4K, in copy_special_pages()
1831 rc = p2m->set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K, in mem_sharing_fork_reset()
A Dmem_paging.c316 ret = p2m_set_entry(p2m, gfn, INVALID_MFN, PAGE_ORDER_4K, in evict()
/xen/xen/arch/arm/
A Dmm.c823 if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) ) in setup_xenheap_mappings()
1069 if ( (flags & _PAGE_PRESENT) && mfn_eq(mfn, INVALID_MFN) ) in xen_pt_check_entry()
1097 ASSERT(!mfn_eq(mfn, INVALID_MFN)); in xen_pt_check_entry()
1111 ASSERT(mfn_eq(mfn, INVALID_MFN)); in xen_pt_check_entry()
1125 ASSERT(mfn_eq(mfn, INVALID_MFN)); in xen_pt_check_entry()
1206 if ( !mfn_eq(mfn, INVALID_MFN) ) in xen_pt_update_entry()
1277 if ( !mfn_eq(mfn, INVALID_MFN) ) in xen_pt_update()
1303 return xen_pt_update(virt, INVALID_MFN, nr_mfns, _PAGE_POPULATE); in populate_pt_range()
1309 return xen_pt_update(v, INVALID_MFN, (e - v) >> PAGE_SHIFT, 0); in destroy_xen_mappings()
1315 return xen_pt_update(s, INVALID_MFN, (e - s) >> PAGE_SHIFT, flags); in modify_xen_mappings()
[all …]
A Dp2m.c359 mfn_t mfn = INVALID_MFN; in p2m_get_entry()
886 bool removing_mapping = mfn_eq(smfn, INVALID_MFN); in __p2m_set_entry()
996 ASSERT(!mfn_eq(INVALID_MFN, smfn) || (a == p2m_access_rwx)); in __p2m_set_entry()
1099 mask = !mfn_eq(smfn, INVALID_MFN) ? mfn_x(smfn) : 0; in p2m_set_entry()
1117 if ( !mfn_eq(smfn, INVALID_MFN) ) in p2m_set_entry()
1307 rc = p2m_set_entry(p2m, start_gfn, nr, INVALID_MFN, in p2m_remove_mapping()
1600 if ( !mfn_eq(mfn, INVALID_MFN) ) in relinquish_p2m_mapping()
1606 rc = __p2m_set_entry(p2m, start, order, INVALID_MFN, in relinquish_p2m_mapping()
1632 mfn_t mfn = INVALID_MFN; in p2m_cache_flush_range()
1694 if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) || !valid ) in p2m_cache_flush_range()
[all …]
A Dmem_access.c76 if ( mfn_eq(mfn, INVALID_MFN) ) in __p2m_get_mem_access()
204 if ( mfn_eq(mfn, INVALID_MFN) ) in p2m_mem_access_check_and_get_page()
412 if ( !mfn_eq(mfn, INVALID_MFN) ) in p2m_set_mem_access()
/xen/xen/arch/x86/mm/shadow/
A Dcommon.c86 v->arch.paging.shadow.oos[i] = INVALID_MFN; in shadow_vcpu_init()
293 if ( !mfn_eq(fixup->smfn[i], INVALID_MFN) ) in oos_fixup_flush_gmfn()
298 fixup->smfn[i] = INVALID_MFN; in oos_fixup_flush_gmfn()
458 fixup.smfn[i] = INVALID_MFN; in oos_hash_add()
508 oos[idx] = INVALID_MFN; in oos_hash_remove()
564 oos[idx] = INVALID_MFN; in sh_resync()
617 oos[idx] = INVALID_MFN; in sh_resync_all()
651 oos[idx] = INVALID_MFN; in sh_resync_all()
1549 return INVALID_MFN; in shadow_hash_lookup()
3246 mfn_t map_mfn = INVALID_MFN; in shadow_track_dirty_vram()
[all …]
A Dmulti.c760 if ( !mfn_eq(gmfn, INVALID_MFN) && in l1e_propagate_from_guest()
1684 mfn_t sl3mfn = INVALID_MFN; in shadow_get_and_create_l2e()
2086 mfn_t sl3mfn = INVALID_MFN; in validate_gl4e()
2146 mfn_t sl2mfn = INVALID_MFN; in validate_gl3e()
2180 mfn_t sl1mfn = INVALID_MFN; in validate_gl2e()
2228 mfn_t gmfn = INVALID_MFN; in validate_gl1e()
2298 mfn_t gmfn = INVALID_MFN; in sh_resync_l1()
2590 gmfn = INVALID_MFN; in sh_prefetch()
4480 smfn = INVALID_MFN; in sh_pagetable_dying()
4490 ? INVALID_MFN in sh_pagetable_dying()
[all …]
/xen/xen/arch/x86/mm/hap/
A Dhap.c404 init_xen_l4_slots(l4e, m4mfn, d, INVALID_MFN, false); in hap_make_monitor_table()
412 return INVALID_MFN; in hap_make_monitor_table()
503 d->arch.altp2m_eptp[i] = mfn_x(INVALID_MFN); in hap_enable()
504 d->arch.altp2m_visible_eptp[i] = mfn_x(INVALID_MFN); in hap_enable()
/xen/xen/arch/x86/hvm/viridian/
A Dviridian.c254 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); in enable_hypercall_page()
750 gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); in viridian_map_guest_page()
/xen/xen/common/
A Dvmap.c237 map_pages_to_xen(addr, INVALID_MFN, pages, _PAGE_NONE); in vunmap()
A Ddomain.c130 v->vcpu_info_mfn = INVALID_MFN; in vcpu_info_reset()
1252 if ( !mfn_eq(v->vcpu_info_mfn, INVALID_MFN) ) in map_vcpu_info()
1319 if ( mfn_eq(mfn, INVALID_MFN) ) in unmap_vcpu_info()
/xen/tools/libxc/include/
A Dxc_dom.h434 return INVALID_MFN; in xc_dom_p2m()

Completed in 89 milliseconds

12