/xen/xen/arch/x86/mm/ |
A D | p2m-pod.c | 336 pod_lock(p2m); in p2m_pod_set_mem_target() 428 pod_lock(p2m); in p2m_pod_offline_or_broken_hit() 487 pod_lock(p2m); in p2m_pod_offline_or_broken_replace() 528 steal_for_cache = ( p2m->pod.entry_count > p2m->pod.count ); in p2m_pod_decrease_reservation() 659 if ( p2m->pod.entry_count < p2m->pod.count ) in p2m_pod_decrease_reservation() 675 p2m->pod.entry_count, p2m->pod.count); in p2m_pod_dump_data() 722 mfn = p2m->get_entry(p2m, gfn_add(gfn, i), &type, &a, 0, in p2m_pod_zero_check_superpage() 881 mfns[i] = p2m->get_entry(p2m, gfns[i], types + i, &a, in p2m_pod_zero_check() 1024 p2m->pod.reclaim_single = p2m->pod.max_guest; in p2m_pod_emergency_sweep() 1164 if ( p2m->pod.entry_count > p2m->pod.count ) in p2m_pod_demand_populate() [all …]
|
A D | p2m.c | 150 d->arch.p2m = p2m; in p2m_init_hostp2m() 291 p2m->change_entry_type_global(p2m, ot, nt); in change_entry_type_global() 337 p2m->memory_type_changed(p2m); in _memory_type_changed() 437 p2m->enable_hardware_log_dirty(p2m); in p2m_enable_hardware_log_dirty() 445 p2m->disable_hardware_log_dirty(p2m); in p2m_disable_hardware_log_dirty() 455 p2m->flush_hardware_cached_dirty(p2m); in p2m_flush_hardware_cached_dirty() 469 p2m->tlb_flush(p2m); in p2m_tlb_flush_sync() 481 p2m->tlb_flush(p2m); in p2m_unlock_and_tlb_flush() 497 if ( !p2m || !paging_mode_translate(p2m->domain) ) in __get_gfn_type_access() 1197 rc = p2m->recalc(p2m, gfn); in finish_type_change() [all …]
|
A D | p2m-ept.c | 248 p2m_tlb_flush_sync(p2m); in ept_free_entry() 650 p2m_lock(p2m); in ept_handle_misconfig() 656 p2m_unlock(p2m); in ept_handle_misconfig() 1047 if ( ept_invalidate_emt_subtree(p2m, _mfn(mfn), 1, p2m->ept.wl) ) in ept_change_entry_type_global() 1060 if ( !p2m->ept.mfn ) in ept_change_entry_type_range() 1105 if ( ept_invalidate_emt_subtree(p2m, _mfn(mfn), 0, p2m->ept.wl) ) in ept_memory_type_changed() 1166 ept_sync_domain_mask(p2m, p2m->domain->dirty_cpumask); in ept_tlb_flush() 1190 p2m_lock(p2m); in ept_set_ad_sync() 1331 ept = &p2m->ept; in ept_dump_p2m_table() 1391 ept = &p2m->ept; in p2m_init_altp2m_ept() [all …]
|
A D | mem_access.c | 71 mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL, NULL); in _p2m_get_mem_access() 95 if ( !p2m ) in p2m_mem_access_emulate_check() 159 if ( !p2m ) in p2m_mem_access_check() 166 mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL); in p2m_mem_access_check() 178 rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, in p2m_mem_access_check() 199 mfn = p2m->get_entry(p2m, gfn, &p2mt, &p2ma, 0, NULL, NULL); in p2m_mem_access_check() 205 rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, in p2m_mem_access_check() 310 rc = p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, t, a, -1); in set_mem_access() 389 p2m_lock(p2m); in p2m_set_mem_access() 410 p2m_unlock(p2m); in p2m_set_mem_access() [all …]
|
A D | p2m-pt.c | 187 rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, level + 1); in p2m_next_level() 240 rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, in p2m_next_level() 313 err = p2m->write_p2m_entry(p2m, first_gfn, pent, e, level); in p2m_pt_set_recalc_range() 404 err = p2m->write_p2m_entry(p2m, gfn, pent, e, level + 1); in do_recalc() 452 err = p2m->write_p2m_entry(p2m, gfn, pent, e, level + 1); in do_recalc() 476 p2m_lock(p2m); in p2m_pt_handle_deferred_changes() 478 p2m_unlock(p2m); in p2m_pt_handle_deferred_changes() 596 rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 3); in p2m_pt_set_entry() 633 rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 1); in p2m_pt_set_entry() 668 rc = p2m->write_p2m_entry(p2m, gfn, p2m_entry, entry_content, 2); in p2m_pt_set_entry() [all …]
|
A D | mem_paging.c | 119 gfn_lock(p2m, gfn, 0); in p2m_mem_paging_populate() 120 mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL); in p2m_mem_paging_populate() 130 gfn_unlock(p2m, gfn, 0); in p2m_mem_paging_populate() 184 mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL); in p2m_mem_paging_resume() 228 gfn_lock(p2m, gfn, 0); in nominate() 230 mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL); in nominate() 257 gfn_unlock(p2m, gfn, 0); in nominate() 288 gfn_lock(p2m, gfn, 0); in evict() 291 mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL); in evict() 358 gfn_lock(p2m, gfn, 0); in prepare() [all …]
|
A D | Makefile | 10 obj-y += p2m.o p2m-pt.o 11 obj-$(CONFIG_HVM) += p2m-ept.o p2m-pod.o
|
A D | mem_sharing.c | 1341 if ( p2m == NULL ) in relinquish_shared_pages() 1344 p2m_lock(p2m); in relinquish_shared_pages() 1356 mfn = p2m->get_entry(p2m, _gfn(gfn), &t, &a, 0, NULL, NULL); in relinquish_shared_pages() 1366 set_rc = p2m->set_entry(p2m, _gfn(gfn), INVALID_MFN, PAGE_ORDER_4K, in relinquish_shared_pages() 1387 p2m_unlock(p2m); in relinquish_shared_pages() 1544 return p2m->set_entry(p2m, gfn, new_mfn, PAGE_ORDER_4K, p2m_ram_rw, in mem_sharing_fork_page() 1604 ret = p2m->set_entry(p2m, gfn, new_vcpu_info_mfn, in copy_vcpu_settings() 1692 rc = p2m->set_entry(p2m, _gfn(value), new_mfn, PAGE_ORDER_4K, in copy_special_pages() 1713 rc = p2m->set_entry(p2m, new_gfn, INVALID_MFN, PAGE_ORDER_4K, in copy_special_pages() 1722 rc = p2m->set_entry(p2m, old_gfn, new_mfn, PAGE_ORDER_4K, in copy_special_pages() [all …]
|
A D | altp2m.c | 41 struct p2m_domain *p2m; in altp2m_vcpu_destroy() local 46 if ( (p2m = p2m_get_altp2m(v)) ) in altp2m_vcpu_destroy() 47 atomic_dec(&p2m->active_vcpus); in altp2m_vcpu_destroy()
|
A D | guest_walk.c | 84 guest_walk_tables(const struct vcpu *v, struct p2m_domain *p2m, in guest_walk_tables() argument 164 l3p = map_domain_gfn(p2m, in guest_walk_tables() 255 l2p = map_domain_gfn(p2m, in guest_walk_tables() 354 l1p = map_domain_gfn(p2m, in guest_walk_tables()
|
/xen/xen/arch/arm/ |
A D | p2m.c | 68 p2m_read_lock(p2m); in p2m_dump_info() 71 BUG_ON(p2m->stats.mappings[0] || p2m->stats.shattered[0]); in p2m_dump_info() 73 p2m->stats.mappings[1], p2m->stats.shattered[1]); in p2m_dump_info() 75 p2m->stats.mappings[2], p2m->stats.shattered[2]); in p2m_dump_info() 91 p2m->root, mfn_x(page_to_mfn(p2m->root))); in dump_p2m_lookup() 564 e->p2m.read = e->p2m.write = 0; in p2m_set_permission() 577 .p2m.af = 1, in mfn_to_p2m_entry() 1043 p2m->max_mapped_gfn = gfn_max(p2m->max_mapped_gfn, in __p2m_set_entry() 1169 p2m_invalidate_table(p2m, page_to_mfn(p2m->root + i)); in p2m_invalidate_root() 1407 p2m->vttbr = generate_vttbr(p2m->vmid, page_to_mfn(p2m->root)); in p2m_alloc_table() [all …]
|
A D | mem_access.c | 49 ASSERT(p2m_is_locked(p2m)); in __p2m_get_mem_access() 53 if ( !p2m->mem_access_enabled ) in __p2m_get_mem_access() 148 p2m_read_lock(p2m); in p2m_mem_access_check_and_get_page() 222 p2m_read_unlock(p2m); in p2m_mem_access_check_and_get_page() 237 if ( !p2m->mem_access_enabled ) in p2m_mem_access_check() 384 a = p2m->default_access; in p2m_set_mem_access() 399 p2m->default_access = a; in p2m_set_mem_access() 403 p2m_write_lock(p2m); in p2m_set_mem_access() 429 p2m_write_unlock(p2m); in p2m_set_mem_access() 453 p2m_read_lock(p2m); in p2m_get_mem_access() [all …]
|
/xen/xen/include/asm-x86/ |
A D | p2m.h | 395 return p2m->p2m_class == p2m_host; in p2m_is_hostp2m() 400 return p2m->p2m_class == p2m_nested; in p2m_is_nestedp2m() 405 return p2m->p2m_class == p2m_alternate; in p2m_is_altp2m() 408 #define p2m_get_pagetable(p2m) ((p2m)->phys_table) argument 589 int p2m_alloc_table(struct p2m_domain *p2m); 592 void p2m_teardown(struct p2m_domain *p2m); 689 return p2m->pod.entry_count; in p2m_pod_entry_count() 692 void p2m_pod_init(struct p2m_domain *p2m); 957 p2m->ioreq.entry_count++; in p2m_entry_modify() 979 ASSERT(p2m->ioreq.entry_count > 0); in p2m_entry_modify() [all …]
|
A D | paging.h | 129 struct p2m_domain *p2m, 133 struct p2m_domain *p2m, 140 int (*write_p2m_entry )(struct p2m_domain *p2m, 288 struct p2m_domain *p2m = v->domain->arch.p2m; in paging_ga_to_gfn_cr3() local 289 return paging_get_hostmode(v)->p2m_ga_to_gfn(v, p2m, cr3, ga, pfec, in paging_ga_to_gfn_cr3() 362 int paging_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn,
|
/xen/xen/arch/x86/mm/hap/ |
A D | nested_hap.c | 78 struct domain *d = p2m->domain; in nestedp2m_write_p2m_entry() 87 guest_flush_tlb_mask(d, p2m->dirty_cpumask); in nestedp2m_write_p2m_entry() 98 nestedhap_fix_p2m(struct vcpu *v, struct p2m_domain *p2m, in nestedhap_fix_p2m() argument 106 ASSERT(p2m); in nestedhap_fix_p2m() 107 ASSERT(p2m->set_entry); in nestedhap_fix_p2m() 108 ASSERT(p2m_locked_by_me(p2m)); in nestedhap_fix_p2m() 125 domain_crash(p2m->domain); in nestedhap_fix_p2m() 183 __put_gfn(p2m, L1_gpa >> PAGE_SHIFT); in nestedhap_walk_L0_p2m() 199 struct p2m_domain *p2m, *nested_p2m; in nestedhvm_hap_nested_page_fault() local 205 p2m = p2m_get_hostp2m(d); /* L0 p2m */ in nestedhvm_hap_nested_page_fault() [all …]
|
A D | guest_walk.c | 43 struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec) in hap_gva_to_gfn() 46 return hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(v, p2m, cr3, gva, pfec, NULL); in hap_gva_to_gfn() 50 struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3, in hap_p2m_ga_to_gfn() 63 top_page = p2m_get_page_from_gfn(p2m, top_gfn, &p2mt, NULL, in hap_p2m_ga_to_gfn() 67 ASSERT(p2m_is_hostp2m(p2m)); in hap_p2m_ga_to_gfn() 71 p2m_mem_paging_populate(p2m->domain, gaddr_to_gfn(cr3)); in hap_p2m_ga_to_gfn() 94 walk_ok = guest_walk_tables(v, p2m, ga, &gw, *pfec, in hap_p2m_ga_to_gfn() 105 page = p2m_get_page_from_gfn(p2m, gfn, &p2mt, NULL, in hap_p2m_ga_to_gfn() 111 ASSERT(p2m_is_hostp2m(p2m)); in hap_p2m_ga_to_gfn() 113 p2m_mem_paging_populate(p2m->domain, gfn); in hap_p2m_ga_to_gfn()
|
A D | private.h | 28 struct p2m_domain *p2m, 32 struct p2m_domain *p2m, 36 struct p2m_domain *p2m, 41 struct p2m_domain *p2m, unsigned long cr3, 44 struct p2m_domain *p2m, unsigned long cr3, 47 struct p2m_domain *p2m, unsigned long cr3,
|
A D | nested_ept.c | 156 struct p2m_domain *p2m = d->arch.p2m; in nept_walk_tables() local 165 lxp = map_domain_gfn(p2m, base_gfn, &lxmfn, P2M_ALLOC, &rc); in nept_walk_tables()
|
A D | hap.c | 184 struct p2m_domain *p2m = p2m_get_hostp2m(d); in hap_enable_log_dirty() local 190 if ( log_global && read_atomic(&p2m->ioreq.entry_count) ) in hap_enable_log_dirty() 777 hap_write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, l1_pgentry_t *p, in hap_write_p2m_entry() argument 780 struct domain *d = p2m->domain; in hap_write_p2m_entry() 804 rc = p2m_entry_modify(p2m, p2m_flags_to_type(l1e_get_flags(new)), in hap_write_p2m_entry() 826 struct vcpu *v, struct p2m_domain *p2m, unsigned long gva, uint32_t *pfec) in hap_gva_to_gfn_real_mode() argument 832 struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3, in hap_p2m_ga_to_gfn_real_mode() argument
|
/xen/xen/include/asm-arm/ |
A D | p2m.h | 204 write_lock(&p2m->lock); in p2m_write_lock() 207 void p2m_write_unlock(struct p2m_domain *p2m); 211 read_lock(&p2m->lock); in p2m_read_lock() 216 read_unlock(&p2m->lock); in p2m_read_unlock() 221 return rw_is_locked(&p2m->lock); in p2m_is_locked() 226 return rw_is_write_locked(&p2m->lock); in p2m_is_write_locked() 229 void p2m_tlb_flush_sync(struct p2m_domain *p2m); 238 mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn, 247 int p2m_set_entry(struct p2m_domain *p2m, 256 void p2m_invalidate_root(struct p2m_domain *p2m); [all …]
|
/xen/tools/libxc/ |
A D | xc_resume.c | 148 xen_pfn_t *p2m = NULL; in xc_domain_resume_any() local 208 p2m = xc_map_foreign_pages(xch, domid, PROT_READ, in xc_domain_resume_any() 211 if ( p2m == NULL ) in xc_domain_resume_any() 233 start_info->store_mfn = p2m[start_info->store_mfn]; in xc_domain_resume_any() 234 start_info->console.domU.mfn = p2m[start_info->console.domU.mfn]; in xc_domain_resume_any() 254 if (p2m) in xc_domain_resume_any() 255 munmap(p2m, P2M_FL_ENTRIES*PAGE_SIZE); in xc_domain_resume_any()
|
A D | xg_private.h | 110 static inline xen_pfn_t xc_pfn_to_mfn(xen_pfn_t pfn, xen_pfn_t *p2m, in xc_pfn_to_mfn() argument 115 return ((uint64_t *)p2m)[pfn]; in xc_pfn_to_mfn() 119 uint32_t mfn = ((uint32_t *)p2m)[pfn]; in xc_pfn_to_mfn()
|
A D | xc_sr_restore_x86_pv.c | 9 return xc_pfn_to_mfn(pfn, ctx->x86.pv.p2m, ctx->x86.pv.width); in pfn_to_mfn() 25 xen_pfn_t *p2m = NULL, *p2m_pfns = NULL; in expand_p2m() local 32 p2m = realloc(ctx->x86.pv.p2m, p2msz); in expand_p2m() 33 if ( !p2m ) in expand_p2m() 38 ctx->x86.pv.p2m = p2m; in expand_p2m() 572 memcpy(guest_p2m, ctx->x86.pv.p2m, in update_guest_p2m() 956 ((uint64_t *)ctx->x86.pv.p2m)[pfn] = mfn == INVALID_MFN ? ~0ULL : mfn; in x86_pv_set_gfn() 959 ((uint32_t *)ctx->x86.pv.p2m)[pfn] = mfn; in x86_pv_set_gfn() 1156 free(ctx->x86.pv.p2m); in x86_pv_cleanup()
|
/xen/xen/arch/x86/hvm/ |
A D | nestedhvm.c | 109 nestedhvm_vmcx_flushtlb(struct p2m_domain *p2m) in nestedhvm_vmcx_flushtlb() argument 111 on_selected_cpus(p2m->dirty_cpumask, nestedhvm_flushtlb_ipi, in nestedhvm_vmcx_flushtlb() 112 p2m->domain, 1); in nestedhvm_vmcx_flushtlb() 113 cpumask_clear(p2m->dirty_cpumask); in nestedhvm_vmcx_flushtlb()
|
/xen/xen/arch/x86/mm/shadow/ |
A D | none.c | 46 static unsigned long _gva_to_gfn(struct vcpu *v, struct p2m_domain *p2m, in _gva_to_gfn() argument 63 static int _write_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, in _write_p2m_entry() argument
|