Home
last modified time | relevance | path

Searched refs:pg (Results 1 – 25 of 48) sorted by relevance

12

/xen/xen/common/
A Dpage_alloc.c151 #define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL) argument
405 if ( pg >= r->e || pg < r->s ) in alloc_boot_pages()
445 #define page_to_zone(pg) (is_xen_heap_page(pg) ? MEMZONE_XEN : \ argument
956 if ( !pg ) in alloc_heap_pages()
1006 pg[i].count_info, pg[i].v.free.order, in alloc_heap_pages()
1065 return pg; in alloc_heap_pages()
1336 st.pg = pg; in scrub_free_pages()
1421 pg[i].count_info, pg[i].v.free.order, in free_heap_pages()
1782 pg++; in init_heap_pages()
2317 page_list_add_tail(&pg[i], page_to_list(d, &pg[i])); in assign_pages()
[all …]
A Dvmap.c37 struct page_info *pg = alloc_domheap_page(NULL, 0); in vm_init_type() local
65 struct page_info *pg; in vm_alloc() local
100 pg = alloc_domheap_page(NULL, 0); in vm_alloc()
101 if ( !pg ) in vm_alloc()
120 free_domheap_page(pg); in vm_alloc()
246 struct page_info *pg; in vmalloc_type() local
258 pg = alloc_domheap_page(NULL, 0); in vmalloc_type()
259 if ( pg == NULL ) in vmalloc_type()
261 mfn[i] = page_to_mfn(pg); in vmalloc_type()
305 struct page_info *pg; in vfree() local
[all …]
A Dgrant_table.c1113 if ( !pg ) in map_grant_ref()
1116 if ( pg ) in map_grant_ref()
1128 if ( pg ) in map_grant_ref()
1130 put_page(pg); in map_grant_ref()
1270 put_page_type(pg); in map_grant_ref()
1273 put_page(pg); in map_grant_ref()
1508 struct page_info *pg; in unmap_common_complete() local
1533 pg = mfn_to_page(op->mfn); in unmap_common_complete()
1540 put_page(pg); in unmap_common_complete()
1559 put_page(pg); in unmap_common_complete()
[all …]
/xen/xen/include/xen/
A Ddomain_page.h47 #define __map_domain_page(pg) map_domain_page(page_to_mfn(pg)) argument
49 static inline void *__map_domain_page_global(const struct page_info *pg) in __map_domain_page_global() argument
51 return map_domain_page_global(page_to_mfn(pg)); in __map_domain_page_global()
57 #define __map_domain_page(pg) page_to_virt(pg) argument
66 static inline void *__map_domain_page_global(const struct page_info *pg) in __map_domain_page_global() argument
68 return page_to_virt(pg); in __map_domain_page_global()
A Dpdx.h21 #define page_to_pdx(pg) ((pg) - frame_table) argument
A Dmm.h202 void free_domheap_pages(struct page_info *pg, unsigned int order);
217 struct page_info *pg,
538 struct page_info *pg; in page_list_remove_head() local
543 pg = page_list_first(head); in page_list_remove_head()
544 list_del(&pg->list); in page_list_remove_head()
545 return pg; in page_list_remove_head()
591 #define arch_free_heap_page(d, pg) \ argument
592 page_list_del(pg, page_to_list(d, pg))
/xen/xen/arch/x86/mm/
A Daltp2m.c63 struct page_info *pg; in altp2m_vcpu_enable_ve() local
70 rc = check_get_page_from_gfn(d, gfn, false, &p2mt, &pg); in altp2m_vcpu_enable_ve()
80 if ( !p2m_is_pageable(p2mt) || is_special_page(pg) ) in altp2m_vcpu_enable_ve()
92 if ( cmpxchg(&a->veinfo_pg, NULL, pg) != NULL ) in altp2m_vcpu_enable_ve()
103 put_page(pg); in altp2m_vcpu_enable_ve()
111 struct page_info *pg; in altp2m_vcpu_disable_ve() local
118 if ( (pg = xchg(&a->veinfo_pg, NULL)) ) in altp2m_vcpu_disable_ve()
122 put_page(pg); in altp2m_vcpu_disable_ve()
A Dmem_sharing.c171 rc = _page_lock(pg); in mem_sharing_page_lock()
188 _page_unlock(pg); in mem_sharing_page_unlock()
475 struct page_info *pg; in audit() local
481 pg = pg_shared_info->pg; in audit()
482 mfn = page_to_mfn(pg); in audit()
521 if ( (!pg->sharing) || rmap_count(pg) == 0 ) in audit()
738 struct page_info *pg = NULL; in __grab_shared_page() local
743 pg = mfn_to_page(mfn); in __grab_shared_page()
758 return pg; in __grab_shared_page()
844 if ( !pg ) in nominate_page()
[all …]
/xen/xen/arch/x86/mm/hap/
A Dhap.c245 struct page_info *pg; in hap_alloc() local
250 if ( unlikely(!pg) ) in hap_alloc()
257 return pg; in hap_alloc()
272 struct page_info *pg; in hap_alloc_p2m_page() local
277 pg = hap_alloc(d); in hap_alloc_p2m_page()
283 ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask)); in hap_alloc_p2m_page()
293 return pg; in hap_alloc_p2m_page()
311 pg->count_info, pg->u.inuse.type_info); in hap_free_p2m_page()
337 struct page_info *pg; in hap_set_allocation() local
370 ASSERT(pg); in hap_set_allocation()
[all …]
/xen/xen/include/asm-x86/
A Dmm.h79 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st) argument
346 static inline void *__page_to_virt(const struct page_info *pg) in __page_to_virt() argument
348 ASSERT((unsigned long)pg - FRAMETABLE_VIRT_START < FRAMETABLE_SIZE); in __page_to_virt()
356 ((unsigned long)pg - FRAMETABLE_VIRT_START) / in __page_to_virt()
357 (sizeof(*pg) / (sizeof(*pg) & -sizeof(*pg))) * in __page_to_virt()
358 (PAGE_SIZE / (sizeof(*pg) & -sizeof(*pg)))); in __page_to_virt()
633 #define arch_free_heap_page(d, pg) \ argument
634 page_list_del2(pg, page_to_list(d, pg), &(d)->arch.relmem_list)
A Dpage.h244 #define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg)) argument
264 #define page_to_mfn(pg) pdx_to_mfn((unsigned long)((pg) - frame_table)) argument
268 #define __page_to_maddr(pg) mfn_to_maddr(page_to_mfn(pg)) argument
288 #define page_to_maddr(pg) __page_to_maddr(pg) argument
290 #define page_to_virt(pg) __page_to_virt(pg) argument
/xen/stubdom/vtpmmgr/
A Dvtpm_disk.c150 struct mem_vtpm_page *pg = find_mem_vtpm_page(group, vtpm); in vtpm_sync() local
151 if (!pg) in vtpm_sync()
153 pg->disk_loc.value = 0; in vtpm_sync()
191 struct mem_vtpm_page *pg = find_mem_vtpm_page(group, vtpm); in delete_vtpm() local
200 pg->vtpms[vtidx] = last; in delete_vtpm()
201 pg->disk_loc.value = 0; in delete_vtpm()
224 struct mem_vtpm_page *pg = &group->data[j]; in find_vtpm() local
225 for (k = 0; k < pg->size; k++) { in find_vtpm()
226 struct mem_vtpm *vt = pg->vtpms[k]; in find_vtpm()
/xen/xen/arch/x86/mm/shadow/
A Dcommon.c166 struct page_info *pg; in sh_oos_audit() local
661 struct page_info *pg; in sh_unsync() local
667 pg = mfn_to_page(gmfn); in sh_unsync()
672 if ( pg->shadow_flags & in sh_unsync()
1214 struct page_info *pg; in shadow_alloc_p2m_page() local
1240 ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask)); in shadow_alloc_p2m_page()
1244 return pg; in shadow_alloc_p2m_page()
1259 pg->count_info, pg->u.inuse.type_info); in shadow_free_p2m_page()
2641 if ( pg == NULL ) in shadow_enable()
2651 pg->count_info = 1; in shadow_enable()
[all …]
/xen/xen/include/asm-arm/
A Dmm.h121 #define page_state_is(pg, st) (((pg)->count_info&PGC_state) == PGC_state_##st) argument
222 #define page_to_mfn(pg) \ argument
223 pdx_to_mfn((unsigned long)((pg) - frame_table) + frametable_base_pdx)
227 #define page_to_maddr(pg) (mfn_to_maddr(page_to_mfn(pg))) argument
313 static inline void *page_to_virt(const struct page_info *pg) in page_to_virt() argument
315 return mfn_to_virt(mfn_x(page_to_mfn(pg))); in page_to_virt()
/xen/xen/arch/x86/
A Dtboot.c196 void *pg = map_domain_page(_mfn(mfn)); in update_pagetable_mac() local
198 vmac_update(pg, PAGE_SIZE, ctx); in update_pagetable_mac()
199 unmap_domain_page(pg); in update_pagetable_mac()
223 void *pg = __map_domain_page(page); in tboot_gen_domain_integrity() local
224 vmac_update(pg, PAGE_SIZE, &ctx); in tboot_gen_domain_integrity()
225 unmap_domain_page(pg); in tboot_gen_domain_integrity()
294 void *pg; in tboot_gen_xenheap_integrity() local
299 pg = mfn_to_virt(mfn); in tboot_gen_xenheap_integrity()
300 vmac_update((uint8_t *)pg, PAGE_SIZE, &ctx); in tboot_gen_xenheap_integrity()
A Dmm.c612 typeof(pg->linear_pt_count) nc = read_atomic(&pg->linear_pt_count), oc; in inc_linear_entries()
635 typeof(pg->linear_pt_count) oc; in dec_linear_entries()
643 typeof(pg->linear_pt_count) nc = read_atomic(&pg->linear_pt_count), oc; in inc_linear_uses()
659 typeof(pg->linear_pt_count) oc; in dec_linear_uses()
1283 put_page(pg); in put_pt_page()
5919 struct page_info *pg; in create_perdomain_mapping() local
5931 if ( !pg ) in create_perdomain_mapping()
5952 if ( !pg ) in create_perdomain_mapping()
5993 if ( !pg ) in create_perdomain_mapping()
6010 if ( pg ) in create_perdomain_mapping()
[all …]
A Dsmpboot.c634 struct page_info *pg; in alloc_stub_page() local
639 pg = mfn_to_page(_mfn(*mfn)); in alloc_stub_page()
645 pg = alloc_domheap_page(NULL, memflags); in alloc_stub_page()
646 if ( !pg ) in alloc_stub_page()
649 unmap_domain_page(memset(__map_domain_page(pg), 0xcc, PAGE_SIZE)); in alloc_stub_page()
653 if ( map_pages_to_xen(stub_va, page_to_mfn(pg), 1, in alloc_stub_page()
657 free_domheap_page(pg); in alloc_stub_page()
661 *mfn = mfn_x(page_to_mfn(pg)); in alloc_stub_page()
/xen/xen/drivers/passthrough/amd/
A Diommu.h361 struct page_info *pg = alloc_domheap_page(NULL, 0); in alloc_amd_iommu_pgtable() local
363 if ( pg ) in alloc_amd_iommu_pgtable()
364 clear_domain_page(page_to_mfn(pg)); in alloc_amd_iommu_pgtable()
366 return pg; in alloc_amd_iommu_pgtable()
369 static inline void free_amd_iommu_pgtable(struct page_info *pg) in free_amd_iommu_pgtable() argument
371 if ( pg ) in free_amd_iommu_pgtable()
372 free_domheap_page(pg); in free_amd_iommu_pgtable()
A Dpci_amd_iommu.c385 PFN_ORDER(pg) = level; in deallocate_next_page_table()
387 page_list_add_tail(pg, &iommu_pt_cleanup_list); in deallocate_next_page_table()
391 static void deallocate_page_table(struct page_info *pg) in deallocate_page_table() argument
394 unsigned int index, level = PFN_ORDER(pg); in deallocate_page_table()
396 PFN_ORDER(pg) = 0; in deallocate_page_table()
400 free_amd_iommu_pgtable(pg); in deallocate_page_table()
404 table_vaddr = __map_domain_page(pg); in deallocate_page_table()
420 free_amd_iommu_pgtable(pg); in deallocate_page_table()
556 static void amd_dump_p2m_table_level(struct page_info* pg, int level, in amd_dump_p2m_table_level() argument
566 table_vaddr = __map_domain_page(pg); in amd_dump_p2m_table_level()
[all …]
A Diommu_map.c480 struct page_info *pg; in amd_iommu_quarantine_init() local
488 pg = alloc_amd_iommu_pgtable(); in amd_iommu_quarantine_init()
489 if ( !pg ) in amd_iommu_quarantine_init()
500 set_iommu_pde_present(pde, mfn_x(page_to_mfn(pg)), level - 1, in amd_iommu_quarantine_init()
505 table = __map_domain_page(pg); in amd_iommu_quarantine_init()
/xen/xen/arch/x86/hvm/
A Dstdvga.c87 struct page_info *pg = s->vram_page[(a >> 12) & 0x3f]; in vram_getb() local
88 uint8_t *p = __map_domain_page(pg); in vram_getb()
94 struct page_info *pg = s->vram_page[(a >> 10) & 0x3f]; in vram_getl() local
95 uint32_t *p = __map_domain_page(pg); in vram_getl()
578 struct page_info *pg; in stdvga_init() local
589 pg = alloc_domheap_page(d, MEMF_no_owner); in stdvga_init()
590 if ( pg == NULL ) in stdvga_init()
592 s->vram_page[i] = pg; in stdvga_init()
593 clear_domain_page(page_to_mfn(pg)); in stdvga_init()
A Dioreq.c1352 buffered_iopage_t *pg; in hvm_send_buffered_ioreq() local
1364 pg = iorp->va; in hvm_send_buffered_ioreq()
1366 if ( !pg ) in hvm_send_buffered_ioreq()
1402 if ( (pg->ptrs.write_pointer - pg->ptrs.read_pointer) >= in hvm_send_buffered_ioreq()
1410 pg->buf_ioreq[pg->ptrs.write_pointer % IOREQ_BUFFER_SLOT_NUM] = bp; in hvm_send_buffered_ioreq()
1415 pg->buf_ioreq[(pg->ptrs.write_pointer+1) % IOREQ_BUFFER_SLOT_NUM] = bp; in hvm_send_buffered_ioreq()
1420 pg->ptrs.write_pointer += qw ? 2 : 1; in hvm_send_buffered_ioreq()
1425 pg->ptrs.read_pointer >= IOREQ_BUFFER_SLOT_NUM ) in hvm_send_buffered_ioreq()
1427 union bufioreq_pointers old = pg->ptrs, new; in hvm_send_buffered_ioreq()
1432 cmpxchg(&pg->ptrs.full, old.full, new.full); in hvm_send_buffered_ioreq()
/xen/xen/drivers/acpi/apei/
A Dapei-io.c80 int i, pg; in apei_range_map() local
83 pg = ((((paddr + size -1) & PAGE_MASK) in apei_range_map()
85 if (apei_range_nr + pg > FIX_APEI_RANGE_MAX) in apei_range_map()
88 start_nr = apei_range_nr + pg -1; in apei_range_map()
89 for (i = 0; i < pg; i++) { in apei_range_map()
/xen/xen/arch/x86/pv/
A Ddomain.c121 struct page_info *pg; in setup_compat_l4() local
125 pg = alloc_domheap_page(v->domain, MEMF_no_owner | MEMF_no_scrub); in setup_compat_l4()
126 if ( pg == NULL ) in setup_compat_l4()
129 mfn = page_to_mfn(pg); in setup_compat_l4()
136 pg->u.inuse.type_info = PGT_l4_page_table | PGT_validated | 1; in setup_compat_l4()
138 v->arch.guest_table = pagetable_from_page(pg); in setup_compat_l4()
/xen/xen/arch/arm/
A Dguest_walk.c130 if ( pte.pg.page ) /* Small page. */ in guest_walk_sd()
133 *ipa = ((paddr_t)pte.pg.base << L2DESC_SMALL_PAGE_SHIFT) | (gva & mask); in guest_walk_sd()
136 if ( !pte.pg.xn ) in guest_walk_sd()
150 if ( !pte.pg.ro ) in guest_walk_sd()

Completed in 62 milliseconds

12