Lines Matching refs:nr_pages
66 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; variable
208 if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED)) in anon_release_pages()
214 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in anon_allocate_area()
230 rel_area == huge_fd_off0 ? 0 : nr_pages * page_size, in hugetlb_release_pages()
231 nr_pages * page_size)) in hugetlb_release_pages()
240 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in hugetlb_allocate_area()
245 *alloc_area == area_src ? 0 : nr_pages * page_size); in hugetlb_allocate_area()
250 area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in hugetlb_allocate_area()
253 nr_pages * page_size); in hugetlb_allocate_area()
283 if (madvise(rel_area, nr_pages * page_size, MADV_REMOVE)) in shmem_release_pages()
291 unsigned long offset = is_src ? 0 : nr_pages * page_size; in shmem_allocate_area()
293 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in shmem_allocate_area()
298 area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in shmem_allocate_area()
400 if (munmap(*area, nr_pages * page_size)) in munmap_area()
448 count_verify = malloc(nr_pages * sizeof(unsigned long long)); in uffd_test_ctx_init()
452 for (nr = 0; nr < nr_pages; nr++) { in uffd_test_ctx_init()
560 page_nr %= nr_pages; in locking_thread()
607 if (offset >= nr_pages * page_size) in __copy_page()
926 split_nr_pages = (nr_pages + 1) / 2; in faulting_process()
928 split_nr_pages = nr_pages; in faulting_process()
988 area_dst = mremap(area_dst, nr_pages * page_size, nr_pages * page_size, in faulting_process()
995 for (; nr < nr_pages; nr++) { in faulting_process()
1010 for (nr = 0; nr < nr_pages; nr++) in faulting_process()
1041 if (offset >= nr_pages * page_size) in __uffdio_zeropage()
1087 uffdio_register.range.len = nr_pages * page_size; in userfaultfd_zeropage_test()
1124 uffdio_register.range.len = nr_pages * page_size; in userfaultfd_events_test()
1154 return stats.missing_faults != nr_pages; in userfaultfd_events_test()
1176 uffdio_register.range.len = nr_pages * page_size; in userfaultfd_sig_test()
1235 uffdio_register.range.len = nr_pages * page_size; in userfaultfd_minor_test()
1247 for (p = 0; p < nr_pages; ++p) { in userfaultfd_minor_test()
1265 for (p = 0; p < nr_pages; ++p) { in userfaultfd_minor_test()
1280 return stats.missing_faults != 0 || stats.minor_faults != nr_pages; in userfaultfd_minor_test()
1353 if (test_pgsize > nr_pages * page_size) in userfaultfd_pagemap_test()
1364 if (madvise(area_dst, nr_pages * page_size, MADV_HUGEPAGE)) in userfaultfd_pagemap_test()
1368 if (madvise(area_dst, nr_pages * page_size, MADV_NOHUGEPAGE)) in userfaultfd_pagemap_test()
1373 uffdio_register.range.len = nr_pages * page_size; in userfaultfd_pagemap_test()
1459 uffdio_register.range.len = nr_pages * page_size; in userfaultfd_stress()
1510 nr_pages * page_size, false); in userfaultfd_stress()
1524 for (nr = 0; nr < nr_pages; nr++) in userfaultfd_stress()
1673 nr_pages = nr_pages_per_cpu * nr_cpus; in main()
1687 if (ftruncate(shm_fd, nr_pages * page_size * 2)) in main()
1691 nr_pages * page_size * 2)) in main()
1695 nr_pages, nr_pages_per_cpu); in main()