/xen/docs/ |
A D | Makefile | 70 clean: clean-man-pages 83 .PHONY: man-pages install-man-pages clean-man-pages uninstall-man-pages 147 .PHONY: man$(1)-pages 152 install-man$(1)-pages: man$(1)-pages 157 .PHONY: clean-man$(1)-pages 158 clean-man$(1)-pages: 163 uninstall-man$(1)-pages: 167 man-pages: man$(1)-pages 168 install-man-pages: install-man$(1)-pages 169 clean-man-pages: clean-man$(1)-pages [all …]
|
/xen/xen/common/ |
A D | vmap.c | 231 if ( !pages ) in vunmap() 232 pages = vm_size(va, VMAP_XEN); in vunmap() 245 size_t pages, i; in vmalloc_type() local 251 pages = PFN_UP(size); in vmalloc_type() 256 for ( i = 0; i < pages; i++ ) in vmalloc_type() 304 unsigned int i, pages; in vfree() local 312 pages = vm_size(va, type); in vfree() 313 if ( !pages ) in vfree() 316 pages = vm_size(va, type); in vfree() 318 ASSERT(pages); in vfree() [all …]
|
A D | trace.c | 143 if ( pages > max_pages ) in calculate_tbuf_size() 147 pages, max_pages); in calculate_tbuf_size() 148 pages = max_pages; in calculate_tbuf_size() 160 return pages; in calculate_tbuf_size() 184 if ( pages == 0 ) in alloc_trace_bufs() 190 pages = calculate_tbuf_size(pages, t_info_first_offset); in alloc_trace_bufs() 200 t_info->tbuf_size = pages; in alloc_trace_bufs() 211 for ( i = 0; i < pages; i++ ) in alloc_trace_bufs() 244 for ( i = 0; i < pages; i++ ) in alloc_trace_bufs() 256 opt_tbuf_size = pages; in alloc_trace_bufs() [all …]
|
A D | bitmap.c | 273 int pages = 1 << order; in bitmap_find_free_region() local 276 if(pages > BITS_PER_LONG) in bitmap_find_free_region() 280 mask = (1ul << (pages - 1)); in bitmap_find_free_region() 284 for (i = 0; i < bits; i += pages) { in bitmap_find_free_region() 308 int pages = 1 << order; in bitmap_release_region() local 309 unsigned long mask = (1ul << (pages - 1)); in bitmap_release_region() 319 int pages = 1 << order; in bitmap_allocate_region() local 320 unsigned long mask = (1ul << (pages - 1)); in bitmap_allocate_region() 328 BUG_ON(pages > BITS_PER_LONG); in bitmap_allocate_region()
|
/xen/tools/libxc/ |
A D | xc_mem_access.c | 47 uint64_t *pages, in xc_set_mem_access_multi() argument 51 DECLARE_HYPERCALL_BOUNCE(pages, nr * sizeof(uint64_t), in xc_set_mem_access_multi() 64 if ( xc_hypercall_bounce_pre(xch, pages) || in xc_set_mem_access_multi() 71 set_xen_guest_handle(mao.pfn_list, pages); in xc_set_mem_access_multi() 77 xc_hypercall_bounce_post(xch, pages); in xc_set_mem_access_multi()
|
A D | xc_sr_restore.c | 370 if ( rec->length < sizeof(*pages) ) in handle_page_data() 373 rec->length, sizeof(*pages)); in handle_page_data() 377 if ( pages->count < 1 ) in handle_page_data() 383 if ( rec->length < sizeof(*pages) + (pages->count * sizeof(uint64_t)) ) in handle_page_data() 390 pfns = malloc(pages->count * sizeof(*pfns)); in handle_page_data() 391 types = malloc(pages->count * sizeof(*types)); in handle_page_data() 395 pages->count); in handle_page_data() 399 for ( i = 0; i < pages->count; ++i ) in handle_page_data() 401 pfn = pages->pfn[i] & PAGE_DATA_PFN_MASK; in handle_page_data() 426 if ( rec->length != (sizeof(*pages) + in handle_page_data() [all …]
|
A D | xc_dom_hvmloader.c | 190 size_t pages = (elf->pend - elf->pstart + PAGE_SIZE - 1) >> PAGE_SHIFT; in xc_dom_load_hvm_kernel() local 195 entries = calloc(pages, sizeof(privcmd_mmap_entry_t)); in xc_dom_load_hvm_kernel() 199 for ( i = 0; i < pages; i++ ) in xc_dom_load_hvm_kernel() 203 dom->xch, dom->guest_domid, pages << PAGE_SHIFT, in xc_dom_load_hvm_kernel() 205 entries, pages); in xc_dom_load_hvm_kernel() 213 elf->dest_size = pages * XC_DOM_PAGE_SIZE(dom); in xc_dom_load_hvm_kernel()
|
A D | xc_dom_core.c | 531 xen_pfn_t pages) in xc_dom_chk_alloc_pages() argument 542 pages, dom->total_pages, in xc_dom_chk_alloc_pages() 547 dom->pfn_alloc_end += pages; in xc_dom_chk_alloc_pages() 548 dom->virt_alloc_end += pages * page_size; in xc_dom_chk_alloc_pages() 559 xen_pfn_t pages; in xc_dom_alloc_pad() local 575 pages = (boundary - dom->virt_alloc_end) / page_size; in xc_dom_alloc_pad() 585 xen_pfn_t pages; in xc_dom_alloc_segment() local 591 pages = (size + page_size - 1) / page_size; in xc_dom_alloc_segment() 595 seg->pages = pages; in xc_dom_alloc_segment() 597 if ( xc_dom_chk_alloc_pages(dom, name, pages) ) in xc_dom_alloc_segment() [all …]
|
A D | xc_dom_elfloader.c | 207 xen_pfn_t pages; in xc_dom_load_elf_kernel() local 209 elf->dest_base = xc_dom_seg_to_ptr_pages(dom, &dom->kernel_seg, &pages); in xc_dom_load_elf_kernel() 216 elf->dest_size = pages * XC_DOM_PAGE_SIZE(dom); in xc_dom_load_elf_kernel()
|
A D | xc_tbuf.c | 82 int xc_tbuf_enable(xc_interface *xch, unsigned long pages, unsigned long *mfn, in xc_tbuf_enable() argument 93 (void)xc_tbuf_set_size(xch, pages); in xc_tbuf_enable()
|
A D | xc_dom_x86.c | 243 int pages, extra_pages; in alloc_pgtables_pv() local 250 pages = extra_pages; in alloc_pgtables_pv() 259 pages = map->area.pgtables + extra_pages; in alloc_pgtables_pv() 829 start_info->nr_p2m_frames = dom->p2m_seg.pages; in start_info_x86_64() 1264 uint64_t pages, super_pages; in meminit_pv() local 1274 super_pages = pages >> SUPERPAGE_2MB_SHIFT; in meminit_pv() 1277 for ( pfn = pfn_base; pfn < pfn_base+pages; pfn++ ) in meminit_pv() 1306 for ( j = pfn_base_idx - pfn_base; j < pages; j += allocsz ) in meminit_pv() 1308 allocsz = min_t(uint64_t, 1024 * 1024, pages - j); in meminit_pv() 1317 __func__, pages, i, pnode); in meminit_pv() [all …]
|
/xen/stubdom/grub/ |
A D | kexec.c | 51 static unsigned long *pages; variable 108 pages = realloc(pages, new_allocated * sizeof(*pages)); in kexec_allocate() 117 pages[i] = alloc_page(); in kexec_allocate() 118 memset((void*) pages[i], 0, PAGE_SIZE); in kexec_allocate() 119 new_pfn = PHYS_PFN(to_phys(pages[i])); in kexec_allocate() 301 memcpy((void *) pages[pfn], hypercall_page, PAGE_SIZE); in kexec() 346 munmap((void*) pages[pfn], PAGE_SIZE); in kexec() 421 do_map_frames(pages[pfn], &pages_mfns[pfn], 1, 0, 0, DOMID_SELF, 0, L1_PROT); in kexec() 425 free_page((void*)pages[pfn]); in kexec() 426 free(pages); in kexec() [all …]
|
/xen/tools/firmware/vgabios/ |
A D | vbetables-gen.c | 85 int pages, pitch; in main() local 99 pages = vram_size / (pm->height * pitch); in main() 100 if (pages > 0) { in main() 159 printf("/*Bit8u NumberOfImagePages*/ %d,\n", (pages / 4) - 1); in main() 161 printf("/*Bit8u NumberOfImagePages*/ %d,\n", pages - 1); in main()
|
/xen/xen/arch/x86/mm/ |
A D | paging.c | 406 unsigned long pages = 0; in paging_log_dirty_op() local 471 pages = d->arch.paging.preempt.log_dirty.done; in paging_log_dirty_op() 473 for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 ) in paging_log_dirty_op() 476 for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ ) in paging_log_dirty_op() 481 (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES); in paging_log_dirty_op() 487 if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) ) in paging_log_dirty_op() 488 bytes = (unsigned int)((sc->pages - pages + 7) >> 3); in paging_log_dirty_op() 501 pages += bytes << 3; in paging_log_dirty_op() 561 if ( pages < sc->pages ) in paging_log_dirty_op() 562 sc->pages = pages; in paging_log_dirty_op() [all …]
|
/xen/tools/libs/foreignmemory/include/ |
A D | xenforeignmemory.h | 107 int prot, size_t pages, 119 void *addr, int prot, int flags, size_t pages, 128 void *addr, size_t pages);
|
/xen/xen/arch/arm/ |
A D | livepatch.c | 144 int arch_livepatch_secure(const void *va, unsigned int pages, enum va_type type) in arch_livepatch_secure() argument 150 ASSERT(pages); in arch_livepatch_secure() 170 return modify_xen_mappings(start, start + pages * PAGE_SIZE, flags); in arch_livepatch_secure()
|
A D | kernel.c | 260 struct page_info *pages; in kernel_decompress() local 281 pages = alloc_domheap_pages(NULL, kernel_order_out, 0); in kernel_decompress() 282 if ( pages == NULL ) in kernel_decompress() 287 mfn = page_to_mfn(pages); in kernel_decompress() 295 mod->start = page_to_maddr(pages); in kernel_decompress() 304 free_domheap_page(pages + i); in kernel_decompress()
|
/xen/docs/misc/ |
A D | xenpaging.txt | 9 xenpaging writes memory pages of a given guest to a file and moves the 10 pages back to the pool of available memory. Once the guests wants to 39 Now xenpaging tries to page-out as many pages to keep the overall memory
|
/xen/xen/arch/arm/efi/ |
A D | efi-boot.h | 297 int pages; in fdt_increase_size() local 305 pages = PFN_UP(fdt_size + add_size); in fdt_increase_size() 307 pages, &fdt_addr); in fdt_increase_size() 316 if ( fdt_open_into(dtbfile.ptr, new_fdt, pages * EFI_PAGE_SIZE) ) in fdt_increase_size() 328 if ( fdt_create_empty_tree(new_fdt, pages * EFI_PAGE_SIZE) ) in fdt_increase_size() 343 dtbfile.size = pages * EFI_PAGE_SIZE; in fdt_increase_size()
|
/xen/xen/arch/x86/ |
A D | livepatch.c | 324 int arch_livepatch_secure(const void *va, unsigned int pages, enum va_type type) in arch_livepatch_secure() argument 330 ASSERT(pages); in arch_livepatch_secure() 339 return modify_xen_mappings(start, start + pages * PAGE_SIZE, flag); in arch_livepatch_secure()
|
A D | dom0_build.c | 42 unsigned long pages; in get_memsize() local 44 pages = sz->nr_pages + sz->percent * avail / 100; in get_memsize() 45 return sz->minus ? avail - pages : pages; in get_memsize()
|
/xen/xen/include/asm-x86/ |
A D | hap.h | 47 int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted);
|
A D | shadow.h | 96 int shadow_set_allocation(struct domain *d, unsigned int pages, 107 #define shadow_set_allocation(d, pages, preempted) \ argument
|
/xen/xen/arch/x86/mm/hap/ |
A D | hap.c | 335 int hap_set_allocation(struct domain *d, unsigned int pages, bool *preempted) in hap_set_allocation() argument 341 if ( pages < d->arch.paging.hap.p2m_pages ) in hap_set_allocation() 342 pages = 0; in hap_set_allocation() 344 pages -= d->arch.paging.hap.p2m_pages; in hap_set_allocation() 348 if ( d->arch.paging.hap.total_pages < pages ) in hap_set_allocation() 361 else if ( d->arch.paging.hap.total_pages > pages ) in hap_set_allocation()
|
/xen/xen/arch/x86/mm/shadow/ |
A D | common.c | 983 pages, in _shadow_prealloc() 1103 d->arch.paging.shadow.free_pages -= pages; in shadow_alloc() 1120 for ( i = 0; i < pages ; i++ ) in shadow_alloc() 1159 unsigned int pages; in shadow_free() local 1169 pages = shadow_size(shadow_type); in shadow_free() 1172 for ( i = 0; i < pages; i++ ) in shadow_free() 1190 if ( i < pages - 1 ) in shadow_free() 1204 d->arch.paging.shadow.free_pages += pages; in shadow_free() 1296 if ( pages > 0 ) in shadow_set_allocation() 1301 if ( pages < lower_bound ) in shadow_set_allocation() [all …]
|