/xen/tools/libs/call/ |
A D | buffer.c | 60 if ( nr_pages > 1 ) in cache_alloc() 88 if ( nr_pages == 1 && in cache_free() 130 void *p = cache_alloc(xcall, nr_pages); in xencall_alloc_buffer_pages() 133 p = osdep_alloc_pages(xcall, nr_pages); in xencall_alloc_buffer_pages() 138 memset(p, 0, nr_pages * PAGE_SIZE); in xencall_alloc_buffer_pages() 148 if ( !cache_free(xcall, p, nr_pages) ) in xencall_free_buffer_pages() 149 osdep_free_pages(xcall, p, nr_pages); in xencall_free_buffer_pages() 153 int nr_pages; member 160 int nr_pages = actual_size >> PAGE_SHIFT; in xencall_alloc_buffer() local 163 hdr = xencall_alloc_buffer_pages(xcall, nr_pages); in xencall_alloc_buffer() [all …]
|
A D | private.h | 58 void *osdep_alloc_pages(xencall_handle *xcall, size_t nr_pages); 59 void osdep_free_pages(xencall_handle *xcall, void *p, size_t nr_pages);
|
/xen/xen/arch/x86/ |
A D | dom0_build.c | 24 long nr_pages; member 36 return !sz->minus && sz->nr_pages; in memsize_gt_zero() 44 pages = sz->nr_pages + sz->percent * avail / 100; in get_memsize() 103 tmp.nr_pages = val >> PAGE_SHIFT; in parse_amt() 358 if ( !nr_pages ) in dom0_compute_nr_pages() 373 nr_pages = max(nr_pages, min_pages); in dom0_compute_nr_pages() 374 nr_pages = min(nr_pages, max_pages); in dom0_compute_nr_pages() 375 nr_pages = min(nr_pages, avail); in dom0_compute_nr_pages() 403 end = vend + nr_pages * sizeof_long; in dom0_compute_nr_pages() 413 nr_pages = min_pages; in dom0_compute_nr_pages() [all …]
|
A D | setup.c | 822 unsigned long nr_pages, raw_max_page, modules_headroom, module_map[1]; in __start_xen() local 1114 nr_pages = 0; in __start_xen() 1117 nr_pages += e820.map[i].size >> PAGE_SHIFT; in __start_xen() 1118 set_kexec_crash_area_size((u64)nr_pages << PAGE_SHIFT); in __start_xen() 1589 nr_pages = 0; in __start_xen() 1592 nr_pages += e820.map[i].size >> PAGE_SHIFT; in __start_xen() 1594 nr_pages >> (20 - PAGE_SHIFT), in __start_xen() 1595 nr_pages << (PAGE_SHIFT - 10)); in __start_xen() 1596 total_pages = nr_pages; in __start_xen()
|
/xen/xen/arch/x86/hvm/ |
A D | dom0_build.c | 94 nr_pages -= rc; in modify_identity_mmio() 121 while ( nr_pages != 0 ) in pvh_populate_memory_range() 150 start + nr_pages); in pvh_populate_memory_range() 186 nr_pages -= 1UL << order; in pvh_populate_memory_range() 361 if ( nr_pages == cur_pages ) in pvh_setup_e820() 398 cur_pages = nr_pages; in pvh_setup_e820() 409 ASSERT(cur_pages == nr_pages); in pvh_setup_e820() 417 pvh_setup_e820(d, nr_pages); in pvh_init_p2m() 883 for ( i = 0 ; i < nr_pages; i++ ) in acpi_memory_banned() 1043 unsigned long pfn, nr_pages; in pvh_setup_acpi() local [all …]
|
/xen/stubdom/vtpmmgr/ |
A D | vtpm_disk.c | 134 for (i = 0; i < group->nr_pages; i++) in vtpm_sync_group() 166 if (pgidx >= group->nr_pages) { in create_vtpm() 167 if (pgidx != group->nr_pages) in create_vtpm() 169 group->nr_pages++; in create_vtpm() 170 group->data = realloc(group->data, group->nr_pages * sizeof(*page)); in create_vtpm() 192 struct mem_vtpm_page *last_pg = group->data + (group->nr_pages - 1); in delete_vtpm() 208 group->nr_pages--; in delete_vtpm() 223 for (j = 0; j < group->nr_pages; j++) { in find_vtpm()
|
A D | disk_read.c | 40 for (i = 0; i < group->nr_pages; i++) { in group_free() 372 group->nr_pages = (group->nr_vtpms + VTPMS_PER_SECTOR - 1) / VTPMS_PER_SECTOR; in load_verify_group() 374 group->data = calloc(group->nr_pages, sizeof(group->data[0])); in load_verify_group() 376 rc = load_verify_vtpm_itree(dst, 0, group->nr_pages, disk.v.vtpm_hash, in load_verify_group()
|
A D | vtpm_disk.h | 151 int nr_pages; member
|
A D | disk_write.c | 123 for (i = 0; i < group->nr_pages; i++) in disk_populate_used_group() 204 disk_write_vtpm_itree(src, 0, group->nr_pages, disk.v.vtpm_hash, disk.vtpm_location, in disk_write_group_sector()
|
/xen/tools/libxc/ |
A D | xc_core.c | 352 int nr_vcpus, unsigned long nr_pages) in elfnote_dump_core_header() argument 365 header.xch_nr_pages = nr_pages; in elfnote_dump_core_header() 440 unsigned long nr_pages; in xc_domain_dumpcore_via_callback() local 563 nr_pages = info.nr_pages; in xc_domain_dumpcore_via_callback() 709 filesz = (uint64_t)nr_pages * PAGE_SIZE; in xc_domain_dumpcore_via_callback() 725 filesz = (uint64_t)nr_pages * sizeof(p2m_array[0]); in xc_domain_dumpcore_via_callback() 733 filesz = (uint64_t)nr_pages * sizeof(pfn_array[0]); in xc_domain_dumpcore_via_callback() 821 if ( j >= nr_pages ) in xc_domain_dumpcore_via_callback() 885 if ( j < nr_pages ) in xc_domain_dumpcore_via_callback() 890 DPRINTF("j (%ld) != nr_pages (%ld)", j, nr_pages); in xc_domain_dumpcore_via_callback() [all …]
|
A D | xc_hcall_buf.c | 30 void *xc__hypercall_buffer_alloc_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages) in xc__hypercall_buffer_alloc_pages() argument 32 void *p = xencall_alloc_buffer_pages(xch->xcall, nr_pages); in xc__hypercall_buffer_alloc_pages() 42 void xc__hypercall_buffer_free_pages(xc_interface *xch, xc_hypercall_buffer_t *b, int nr_pages) in xc__hypercall_buffer_free_pages() argument 44 xencall_free_buffer_pages(xch->xcall, b->hbuf, nr_pages); in xc__hypercall_buffer_free_pages()
|
A D | xc_sr_save.c | 95 unsigned int i, p, nr_pages = 0, nr_pages_mapped = 0; in write_batch() local 158 mfns[nr_pages++] = mfns[i]; in write_batch() 161 if ( nr_pages > 0 ) in write_batch() 164 xch->fmem, ctx->domid, PROT_READ, nr_pages, mfns, errors); in write_batch() 170 nr_pages_mapped = nr_pages; in write_batch() 202 --nr_pages; in write_batch() 227 rec.length += nr_pages * PAGE_SIZE; in write_batch() 246 if ( nr_pages ) in write_batch() 255 --nr_pages; in write_batch() 267 assert(nr_pages == 0); in write_batch()
|
A D | xc_core_x86.c | 93 if ( dinfo->p2m_size < info->nr_pages ) in xc_core_arch_map_p2m_rw() 95 ERROR("p2m_size < nr_pages -1 (%lx < %lx", dinfo->p2m_size, info->nr_pages - 1); in xc_core_arch_map_p2m_rw()
|
A D | xc_sr_restore.c | 215 nr_pages = 0; in process_page_data() local 252 mfns[nr_pages++] = ctx->restore.ops.pfn_to_gfn(ctx, pfns[i]); in process_page_data() 258 if ( nr_pages == 0 ) in process_page_data() 263 nr_pages, mfns, map_errs); in process_page_data() 268 nr_pages, count); in process_page_data() 323 xenforeignmemory_unmap(xch->fmem, mapping, nr_pages); in process_page_data()
|
A D | xc_dom_x86.c | 775 start_info->nr_pages = dom->total_pages; in start_info_x86_32() 821 start_info->nr_pages = dom->total_pages; in start_info_x86_64() 1350 unsigned long i, vmemid, nr_pages = dom->total_pages; in meminit_hvm() local 1368 if ( nr_pages > target_pages ) in meminit_hvm() 1405 if ( nr_pages > target_pages ) in meminit_hvm() 1429 if ( total_pages != nr_pages ) in meminit_hvm() 1432 total_pages, nr_pages); in meminit_hvm()
|
/xen/xen/arch/x86/pv/ |
A D | dom0_build.c | 102 unsigned long nr_pages) in setup_pv_physmap() argument 117 3 > nr_pages ) in setup_pv_physmap() 294 unsigned long nr_pages; in dom0_construct_pv() local 403 nr_pages = dom0_compute_nr_pages(d, &parms, initrd_len); in dom0_construct_pv() 494 if ( (1UL << order) + PFN_UP(initrd_len) > nr_pages ) in dom0_construct_pv() 547 if ( domain_tot_pages(d) < nr_pages ) in dom0_construct_pv() 549 nr_pages - domain_tot_pages(d)); in dom0_construct_pv() 750 si->nr_pages = nr_pages; in dom0_construct_pv() 772 nr_pages); in dom0_construct_pv() 818 while ( pfn < nr_pages ) in dom0_construct_pv() [all …]
|
/xen/tools/libs/call/include/ |
A D | xencall.h | 120 void *xencall_alloc_buffer_pages(xencall_handle *xcall, size_t nr_pages); 121 void xencall_free_buffer_pages(xencall_handle *xcall, void *p, size_t nr_pages);
|
/xen/tools/firmware/hvmloader/ |
A D | pci.c | 381 unsigned int nr_pages = min_t( in pci_setup() local 387 hvm_info->low_mem_pgend -= nr_pages; in pci_setup() 390 nr_pages, in pci_setup() 397 xatp.size = nr_pages; in pci_setup() 400 hvm_info->high_mem_pgend += nr_pages; in pci_setup()
|
/xen/stubdom/grub/ |
A D | kexec.c | 89 for (source_pfn = 0; source_pfn < start_info.nr_pages; source_pfn++) in do_exchange() 92 ASSERT(source_pfn < start_info.nr_pages); in do_exchange() 279 dom->total_pages = start_info.nr_pages; in kexec() 386 for (nr_m2p_updates = pfn = 0; pfn < start_info.nr_pages; pfn++) in kexec() 391 for (i = pfn = 0; pfn < start_info.nr_pages; pfn++) in kexec()
|
/xen/xen/include/asm-x86/ |
A D | dom0_build.h | 27 unsigned long nr_pages);
|
/xen/tools/python/xen/migration/ |
A D | libxc.py | 276 nr_pages = 0 290 nr_pages += 1 292 pagesz = nr_pages * 4096
|
/xen/tools/python/scripts/ |
A D | convert-legacy-stream | 392 nr_pages = len([x for x in pfns if (x & 0xf0000000) < 0xd0000000]) 393 pages = rdexact(nr_pages * 4096) 512 _, nr_pages = unpack_exact("=IQ") 513 info(" IOREQ server pages: %d" % (nr_pages, )) 515 [public.HVM_PARAM_NR_IOREQ_SERVER_PAGES, nr_pages])
|
/xen/xen/include/xen/ |
A D | mm.h | 577 static inline unsigned int get_order_from_pages(unsigned long nr_pages) in get_order_from_pages() argument 581 nr_pages--; in get_order_from_pages() 582 for ( order = 0; nr_pages; order++ ) in get_order_from_pages() 583 nr_pages >>= 1; in get_order_from_pages()
|
/xen/tools/misc/ |
A D | xen-lowmemd.c | 67 dom0_pages = (unsigned long long) dom0_info.nr_pages; in handle_low_mem()
|
/xen/xen/common/ |
A D | page_alloc.c | 1769 struct page_info *pg, unsigned long nr_pages) in init_heap_pages() argument 1780 if ( nr_pages-- <= 1 ) in init_heap_pages() 1799 for ( i = 0; i < nr_pages; i++ ) in init_heap_pages() 1806 unsigned long e = mfn_x(mfn_add(page_to_mfn(pg + nr_pages - 1), 1)); in init_heap_pages() 1812 n = init_node_heap(nid, mfn_x(page_to_mfn(pg + i)), nr_pages - i, in init_heap_pages() 1814 BUG_ON(i + n > nr_pages); in init_heap_pages() 1820 if ( i + n == nr_pages ) in init_heap_pages() 1822 nr_pages -= n; in init_heap_pages()
|