/xen/xen/include/asm-arm/ |
A D | p2m.h | 51 gfn_t max_mapped_gfn; 58 gfn_t lowest_mapped_gfn; 248 gfn_t sgfn, 264 int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end); 277 gfn_t gfn, 283 gfn_t gfn, 288 gfn_t gfn, 293 gfn_t gfn, 300 gfn_t gfn, in guest_physmap_add_page() 307 mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn); [all …]
|
A D | grant_table.h | 15 gfn_t *shared_gfn; 16 gfn_t *status_gfn; 54 (gt)->arch.shared_gfn = xmalloc_array(gfn_t, ngf_); \ 55 (gt)->arch.status_gfn = xmalloc_array(gfn_t, nsf_); \
|
A D | vpl011.h | 62 gfn_t gfn;
|
/xen/xen/include/xen/ |
A D | p2m-common.h | 8 guest_physmap_remove_page(struct domain *d, gfn_t gfn, mfn_t mfn, 15 gfn_t start_gfn, 19 gfn_t start_gfn, 32 p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, 35 int __must_check check_get_page_from_gfn(struct domain *d, gfn_t gfn,
|
A D | grant_table.h | 55 gfn_t *gfn, uint16_t *status); 57 int gnttab_map_frame(struct domain *d, unsigned long idx, gfn_t gfn, 85 gfn_t *gfn, uint16_t *status) in mem_sharing_gref_to_gfn() 91 gfn_t gfn, mfn_t *mfn) in gnttab_map_frame()
|
A D | mm.h | 109 #ifndef gfn_t 110 #define gfn_t /* Grep fodder: gfn_t, _gfn() and gfn_x() are defined above */ macro 113 #undef gfn_t 118 static inline gfn_t gfn_add(gfn_t gfn, unsigned long i) in gfn_add() 123 static inline gfn_t gfn_max(gfn_t x, gfn_t y) in gfn_max() 128 static inline gfn_t gfn_min(gfn_t x, gfn_t y) in gfn_min() 133 static inline bool_t gfn_eq(gfn_t x, gfn_t y) in gfn_eq() 611 unsigned long idx, gfn_t gfn);
|
A D | mem_access.h | 77 long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, 91 int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access,
|
/xen/xen/include/asm-x86/ |
A D | guest_pt.h | 36 gfn_to_paddr(gfn_t gfn) in gfn_to_paddr() 77 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e) in guest_l1e_get_gfn() 79 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e) in guest_l2e_get_gfn() 92 static inline guest_l1e_t guest_l1e_from_gfn(gfn_t gfn, u32 flags) in guest_l1e_from_gfn() 94 static inline guest_l2e_t guest_l2e_from_gfn(gfn_t gfn, u32 flags) in guest_l2e_from_gfn() 150 static inline gfn_t guest_l1e_get_gfn(guest_l1e_t gl1e) in guest_l1e_get_gfn() 152 static inline gfn_t guest_l2e_get_gfn(guest_l2e_t gl2e) in guest_l2e_get_gfn() 154 static inline gfn_t guest_l3e_get_gfn(guest_l3e_t gl3e) in guest_l3e_get_gfn() 157 static inline gfn_t guest_l4e_get_gfn(guest_l4e_t gl4e) in guest_l4e_get_gfn() 369 static inline gfn_t guest_walk_to_gfn(const walk_t *gw) in guest_walk_to_gfn() [all …]
|
A D | p2m.h | 242 gfn_t gfn, 248 gfn_t gfn, 321 gfn_t reclaim_single; /* Last gfn of a scan */ 530 gfn_t first_gfn, second_gfn; 535 static inline void get_two_gfns(struct domain *rd, gfn_t rgfn, in get_two_gfns() 596 int guest_physmap_add_entry(struct domain *d, gfn_t gfn, 631 gfn_t first_gfn, 641 int set_mmio_p2m_entry(struct domain *d, gfn_t gfn, mfn_t mfn, 737 void p2m_mem_paging_populate(struct domain *d, gfn_t gfn); 888 gfn_t old_gfn, gfn_t new_gfn); [all …]
|
A D | mem_access.h | 49 int p2m_set_suppress_ve(struct domain *d, gfn_t gfn, bool suppress_ve, 56 int p2m_get_suppress_ve(struct domain *d, gfn_t gfn, bool *suppress_ve,
|
A D | mem_sharing.h | 85 int mem_sharing_fork_page(struct domain *d, gfn_t gfn, 146 static inline int mem_sharing_fork_page(struct domain *d, gfn_t gfn, bool lock) in mem_sharing_fork_page()
|
A D | altp2m.h | 37 int altp2m_vcpu_enable_ve(struct vcpu *v, gfn_t gfn);
|
/xen/xen/arch/arm/ |
A D | p2m.c | 875 gfn_t sgfn, in __p2m_set_entry() 1079 gfn_t sgfn, in p2m_set_entry() 1315 gfn_t gfn, in map_regions_p2mt() 1324 gfn_t gfn, in unmap_regions_p2mt() 1332 gfn_t start_gfn, in map_mmio_regions() 1348 gfn_t gfn, in map_dev_mmio_region() 1574 gfn_t start, end; in relinquish_p2m_mapping() 1627 int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end) in p2m_cache_flush_range() 1630 gfn_t next_block_gfn; in p2m_cache_flush_range() 1631 gfn_t start = *pstart; in p2m_cache_flush_range() [all …]
|
A D | mem_access.c | 27 static int __p2m_get_mem_access(struct domain *d, gfn_t gfn, in __p2m_get_mem_access() 108 gfn_t gfn; in p2m_mem_access_check_and_get_page() 354 long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, in p2m_set_mem_access() 444 int p2m_get_mem_access(struct domain *d, gfn_t gfn, in p2m_get_mem_access()
|
A D | domctl.c | 55 gfn_t s = _gfn(domctl->u.cacheflush.start_pfn); in arch_do_domctl() 56 gfn_t e = gfn_add(s, domctl->u.cacheflush.nr_pfns); in arch_do_domctl()
|
/xen/xen/arch/x86/mm/ |
A D | mem_paging.c | 39 void p2m_mem_paging_drop_page(struct domain *d, gfn_t gfn, p2m_type_t p2mt) in p2m_mem_paging_drop_page() 91 void p2m_mem_paging_populate(struct domain *d, gfn_t gfn) in p2m_mem_paging_populate() 181 gfn_t gfn = _gfn(rsp->u.mem_access.gfn); in p2m_mem_paging_resume() 219 static int nominate(struct domain *d, gfn_t gfn) in nominate() 279 static int evict(struct domain *d, gfn_t gfn) in evict() 345 static int prepare(struct domain *d, gfn_t gfn, in prepare()
|
A D | p2m.c | 491 gfn_t gfn = _gfn(gfn_l); in __get_gfn_type_access() 553 struct p2m_domain *p2m, gfn_t gfn, in p2m_get_page_from_gfn() 879 gfn_t ogfn; in guest_physmap_add_entry() 1033 gfn_t gfn = _gfn(gfn_l); in p2m_change_type_one() 1272 gfn_t gfn = _gfn(gfn_l); in set_typed_p2m_entry() 1346 gfn_t gfn = _gfn(gfn_l); in set_identity_p2m_entry() 1395 gfn_t gfn = _gfn(gfn_l); in clear_mmio_p2m_entry() 1437 gfn_t gfn = _gfn(gfn_l); in clear_identity_p2m_entry() 1475 gfn_t gfn = _gfn(gfn_l); in set_shared_p2m_entry() 2045 gfn_t gfn; in p2m_altp2m_get_or_propagate() [all …]
|
A D | mem_access.c | 41 static int _p2m_get_mem_access(struct p2m_domain *p2m, gfn_t gfn, in _p2m_get_mem_access() 148 gfn_t gfn = gaddr_to_gfn(gpa); in p2m_mem_access_check() 267 gfn_t gfn) in p2m_set_altp2m_mem_access() 289 gfn_t gfn) in set_mem_access() 355 long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, in p2m_set_mem_access() 482 int p2m_get_mem_access(struct domain *d, gfn_t gfn, xenmem_access_t *access, in p2m_get_mem_access()
|
A D | p2m-pod.c | 494 p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t gfn); 507 p2m_pod_decrease_reservation(struct domain *d, gfn_t gfn, unsigned int order) in p2m_pod_decrease_reservation() 685 p2m_pod_zero_check_superpage(struct p2m_domain *p2m, gfn_t gfn) in p2m_pod_zero_check_superpage() 862 p2m_pod_zero_check(struct p2m_domain *p2m, const gfn_t *gfns, unsigned int count) in p2m_pod_zero_check() 1018 gfn_t gfns[POD_SWEEP_STRIDE]; in p2m_pod_emergency_sweep() 1084 gfn_t gfn = _gfn(mrp->list[idx]); in pod_eager_reclaim() 1109 static void pod_eager_record(struct p2m_domain *p2m, gfn_t gfn, in pod_eager_record() 1122 p2m_pod_demand_populate(struct p2m_domain *p2m, gfn_t gfn, in p2m_pod_demand_populate() 1127 gfn_t gfn_aligned = _gfn((gfn_x(gfn) >> order) << order); in p2m_pod_demand_populate() 1275 gfn_t gfn = _gfn(gfn_l); in guest_physmap_mark_populate_on_demand()
|
A D | guest_walk.c | 86 gfn_t top_gfn, mfn_t top_mfn, void *top_map) in guest_walk_tables() 204 gfn_t start = guest_l3e_get_gfn(gw->l3e); in guest_walk_tables() 318 gfn_t start = _gfn(unfold_pse36(gw->l2e.l2) >> PAGE_SHIFT); in guest_walk_tables() 320 gfn_t start = guest_l2e_get_gfn(gw->l2e); in guest_walk_tables()
|
/xen/xen/arch/x86/mm/hap/ |
A D | guest_walk.c | 58 gfn_t top_gfn; in hap_p2m_ga_to_gfn() 102 gfn_t gfn = guest_walk_to_gfn(&gw); in hap_p2m_ga_to_gfn()
|
/xen/xen/include/asm-x86/hvm/ |
A D | cacheattr.h | 15 int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t gfn,
|
A D | monitor.h | 52 bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn, uint32_t pfec,
|
/xen/xen/arch/x86/ |
A D | debug.c | 30 dbg_hvm_va2mfn(dbgva_t vaddr, struct domain *dp, int toaddr, gfn_t *gfn) in dbg_hvm_va2mfn() 120 gfn_t gfn = INVALID_GFN; in dbg_rw_guest_mem()
|
/xen/xen/arch/arm/vgic/ |
A D | vgic.h | 71 int vgic_register_dist_iodev(struct domain *d, gfn_t dist_base_fn,
|