/linux/tools/testing/selftests/vm/ |
A D | mremap_dontunmap.c | 22 unsigned long page_size; variable 56 mremap(source_mapping, num_pages * page_size, num_pages * page_size, in kernel_support_for_mremap_dontunmap() 74 BUG_ON(size & (page_size - 1), in check_region_contains_byte() 87 memcmp(addr + (i * page_size), page_buffer, page_size); in check_region_contains_byte() 111 mremap(source_mapping, num_pages * page_size, num_pages * page_size, in mremap_dontunmap_simple() 152 mremap(source_mapping, num_pages * page_size, num_pages * page_size, in mremap_dontunmap_simple_shmem() 203 mremap(source_mapping, num_pages * page_size, num_pages * page_size, in mremap_dontunmap_simple_fixed() 254 mremap(source_mapping + (5 * page_size), 5 * page_size, in mremap_dontunmap_partial_mapping() 255 5 * page_size, in mremap_dontunmap_partial_mapping() 264 (source_mapping + (5 * page_size), 5 * page_size, 0) != 0, in mremap_dontunmap_partial_mapping() [all …]
|
A D | mlock2-tests.c | 204 if (mlock2_(map, 2 * page_size, 0)) { in test_mlock_lock() 217 if (munlock(map, 2 * page_size)) { in test_mlock_lock() 225 munmap(map, 2 * page_size); in test_mlock_lock() 280 if (munlock(map, 2 * page_size)) { in test_mlock_onfault() 291 munmap(map, 2 * page_size); in test_mlock_onfault() 327 munmap(map, 2 * page_size); in test_lock_onfault_of_present() 362 munmap(map, 2 * page_size); in test_munlockall() 404 munmap(map, 2 * page_size); in test_munlockall() 453 if (munlock(map + page_size, page_size)) { in test_vma_management() 472 if (munlock(map, page_size * 3)) { in test_vma_management() [all …]
|
A D | ksm_tests.c | 311 memset(map_ptr + page_size, '+', 1); in check_ksm_unmerge() 396 size_t page_size) in check_ksm_numa_merge() argument 428 memset(numa1_map_ptr, '*', page_size); in check_ksm_numa_merge() 429 memset(numa2_map_ptr, '*', page_size); in check_ksm_numa_merge() 447 numa_free(numa1_map_ptr, page_size); in check_ksm_numa_merge() 448 numa_free(numa2_map_ptr, page_size); in check_ksm_numa_merge() 453 numa_free(numa1_map_ptr, page_size); in check_ksm_numa_merge() 454 numa_free(numa2_map_ptr, page_size); in check_ksm_numa_merge() 770 page_size); in main() 778 merge_across_nodes, page_size); in main() [all …]
|
A D | map_fixed_noreplace.c | 36 page_size = sysconf(_SC_PAGE_SIZE); in main() 43 size = 5 * page_size; in main() 63 addr = BASE_ADDRESS + page_size; in main() 64 size = 3 * page_size; in main() 84 size = 5 * page_size; in main() 105 size = page_size; in main() 125 size = 2 * page_size; in main() 145 size = 2 * page_size; in main() 165 size = page_size; in main() 185 size = page_size; in main() [all …]
|
A D | userfaultfd.c | 253 nr_pages * page_size); in hugetlb_allocate_area() 962 page_size, false); in faulting_process() 988 area_dst = mremap(area_dst, nr_pages * page_size, nr_pages * page_size, in faulting_process() 1011 if (my_bcmp(area_dst + nr * page_size, zeropage, page_size)) in faulting_process() 1055 if (res != page_size) { in __uffdio_zeropage() 1249 page_size); in userfaultfd_minor_test() 1262 if (posix_memalign(&expected_page, page_size, page_size)) in userfaultfd_minor_test() 1269 page_size)) in userfaultfd_minor_test() 1427 if (posix_memalign(&area, page_size, page_size)) in userfaultfd_stress() 1617 if (!page_size) in set_test_type() [all …]
|
A D | khugepaged.c | 20 static unsigned long page_size; variable 455 for (i = start / page_size; i < end / page_size; i++) in fill_memory() 463 for (i = start / page_size; i < end / page_size; i++) { in validate_memory() 566 fill_memory(p, 0, page_size); in collapse_single_pte_entry() 707 madvise(p + page_size, hpage_pmd_size - page_size, MADV_DONTNEED); in collapse_single_pte_entry_compound() 775 (i + 1) * page_size, in collapse_compound_extreme() 784 (i + 1) * page_size, in collapse_compound_extreme() 820 fill_memory(p, 0, page_size); in collapse_fork() 837 fill_memory(p, page_size, 2 * page_size); in collapse_fork() 896 fill_memory(p, 0, page_size); in collapse_fork_compound() [all …]
|
A D | mremap_test.c | 52 #define PTE page_size 267 int page_size; in main() local 278 page_size = sysconf(_SC_PAGESIZE); in main() 281 test_cases[0] = MAKE_TEST(page_size, page_size, page_size, in main() 285 test_cases[1] = MAKE_TEST(page_size, page_size/4, page_size, in main() 288 test_cases[2] = MAKE_TEST(page_size/4, page_size, page_size, in main() 321 perf_test_cases[0] = MAKE_TEST(page_size, page_size, _1GB, NON_OVERLAPPING, EXPECT_SUCCESS, in main()
|
A D | hmm-tests.c | 59 unsigned int page_size; in FIXTURE() local 67 unsigned int page_size; in FIXTURE() local 1022 ret = munmap(buffer->ptr + self->page_size, self->page_size); in TEST_F() 1030 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size, in TEST_F() 1038 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size, in TEST_F() 1043 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size, in TEST_F() 1285 ret = munmap(buffer->ptr + self->page_size, self->page_size); in TEST_F() 1289 ret = mprotect(buffer->ptr + 2 * self->page_size, self->page_size, in TEST_F() 1297 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size, in TEST_F() 1302 ret = mprotect(buffer->ptr + 3 * self->page_size, self->page_size, in TEST_F() [all …]
|
A D | memfd_secret.c | 37 static unsigned long page_size; variable 181 mem = mmap(NULL, page_size, prot, mode, fd, 0); in test_remote_access() 187 ftruncate(fd, page_size); in test_remote_access() 188 memset(mem, PATTERN, page_size); in test_remote_access() 233 page_size = sysconf(_SC_PAGE_SIZE); in prepare() 234 if (!page_size) in prepare() 246 page_size, mlock_limit_cur, mlock_limit_max); in prepare() 248 if (page_size > mlock_limit_cur) in prepare() 249 mlock_limit_cur = page_size; in prepare() 250 if (page_size > mlock_limit_max) in prepare() [all …]
|
/linux/tools/testing/selftests/mincore/ |
A D | mincore_selftest.c | 33 int page_size; in TEST() local 73 munmap(addr, page_size); in TEST() 88 int page_size; in TEST() local 107 mlock(addr, page_size); in TEST() 118 munlock(addr, page_size); in TEST() 125 munmap(addr, page_size); in TEST() 144 int page_size; in TEST() local 165 mlock(addr, page_size); in TEST() 173 munmap(addr, page_size); in TEST() 193 int page_size; in TEST() local [all …]
|
/linux/drivers/misc/habanalabs/common/mmu/ |
A D | mmu.c | 163 else if ((page_size % prop->pmmu_huge.page_size) == 0) in hl_mmu_unmap_page() 173 if ((page_size % mmu_prop->page_size) == 0) { in hl_mmu_unmap_page() 187 page_size, mmu_prop->page_size >> 10); in hl_mmu_unmap_page() 263 if ((page_size % mmu_prop->page_size) == 0) { in hl_mmu_map_page() 279 page_size, mmu_prop->page_size >> 10); in hl_mmu_map_page() 359 page_size = prop->dmmu.page_size; in hl_mmu_map_contiguous() 362 page_size = prop->pmmu.page_size; in hl_mmu_map_contiguous() 365 page_size = prop->pmmu_huge.page_size; in hl_mmu_map_contiguous() 417 page_size = prop->dmmu.page_size; in hl_mmu_unmap_contiguous() 420 page_size = prop->pmmu.page_size; in hl_mmu_unmap_contiguous() [all …]
|
/linux/tools/testing/selftests/bpf/prog_tests/ |
A D | ringbuf.c | 90 int page_size = getpagesize(); in test_ringbuf() local 97 skel->maps.ringbuf.max_entries = page_size; in test_ringbuf() 107 tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE); in test_ringbuf() 111 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw"); in test_ringbuf() 114 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size); in test_ringbuf() 120 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size); in test_ringbuf() 124 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size); in test_ringbuf() 126 mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size); in test_ringbuf() 140 mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size); in test_ringbuf() 167 CHECK(skel->bss->ring_size != page_size, in test_ringbuf() [all …]
|
A D | mmap.c | 12 long page_size = sysconf(_SC_PAGE_SIZE); in roundup_page() local 13 return (sz + page_size - 1) / page_size * page_size; in roundup_page() 57 munmap(tmp1, page_size); in test_mmap() 209 munmap(tmp0, 4 * page_size); in test_mmap() 214 err = munmap(tmp1 + page_size, page_size); in test_mmap() 216 munmap(tmp1, 4 * page_size); in test_mmap() 221 tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ, in test_mmap() 224 munmap(tmp1, page_size); in test_mmap() 225 munmap(tmp1 + 2*page_size, 2 * page_size); in test_mmap() 250 munmap(tmp2, 4 * page_size); in test_mmap() [all …]
|
/linux/arch/powerpc/mm/ |
A D | init_64.c | 186 unsigned long page_size) in altmap_cross_boundary() argument 207 start = ALIGN_DOWN(start, page_size); in vmemmap_populate() 211 for (; start < end; start += page_size) { in vmemmap_populate() 221 if (vmemmap_populated(start, page_size)) in vmemmap_populate() 250 int nr_pfns = page_size >> PAGE_SHIFT; in vmemmap_populate() 261 start, start + page_size, p); in vmemmap_populate() 313 start = ALIGN_DOWN(start, page_size); in vmemmap_free() 322 for (; start < end; start += page_size) { in vmemmap_free() 332 if (vmemmap_populated(start, page_size)) in vmemmap_free() 347 if (page_size < PAGE_SIZE) { in vmemmap_free() [all …]
|
/linux/drivers/pci/endpoint/ |
A D | pci-epc-mem.c | 54 size_t page_size; in pci_epc_multi_mem_init() local 70 page_size = windows[i].page_size; in pci_epc_multi_mem_init() 71 if (page_size < PAGE_SIZE) in pci_epc_multi_mem_init() 72 page_size = PAGE_SIZE; in pci_epc_multi_mem_init() 73 page_shift = ilog2(page_size); in pci_epc_multi_mem_init() 94 mem->window.page_size = page_size; in pci_epc_multi_mem_init() 125 mem_window.page_size = page_size; in pci_epc_mem_init() 239 size_t page_size; in pci_epc_mem_free_addr() local 249 page_size = mem->window.page_size; in pci_epc_mem_free_addr() 250 page_shift = ilog2(page_size); in pci_epc_mem_free_addr() [all …]
|
/linux/tools/testing/selftests/powerpc/primitives/ |
A D | load_unaligned_zeropad.c | 38 static int page_size; variable 43 if (mprotect(mem_region + page_size, page_size, PROT_NONE)) { in protect_region() 53 if (mprotect(mem_region + page_size, page_size, PROT_READ|PROT_WRITE)) { in unprotect_region() 125 page_size = getpagesize(); in test_body() 126 mem_region = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, in test_body() 131 for (i = 0; i < page_size; i++) in test_body() 134 memset(mem_region+page_size, 0, page_size); in test_body() 138 for (i = 0; i < page_size; i++) in test_body()
|
/linux/drivers/misc/ |
A D | vmw_balloon.c | 686 ctl->page_size); in vmballoon_alloc_page_list() 697 ctl->page_size); in vmballoon_alloc_page_list() 728 page_size); in vmballoon_handle_one_result() 944 ctl->page_size); in vmballoon_release_refused_pages() 947 ctl->page_size); in vmballoon_release_refused_pages() 1110 .page_size = b->max_page_size, in vmballoon_inflate() 1145 ctl.page_size); in vmballoon_inflate() 1161 ctl.page_size--; in vmballoon_inflate() 1274 ctl.page_size); in vmballoon_deflate() 1279 ctl.page_size); in vmballoon_deflate() [all …]
|
/linux/drivers/mtd/spi-nor/ |
A D | xilinx.c | 33 offset = addr % nor->page_size; in s3an_convert_addr() 34 page = addr / nor->page_size; in s3an_convert_addr() 35 page <<= (nor->page_size > 512) ? 10 : 9; in s3an_convert_addr() 67 nor->page_size = (nor->page_size == 264) ? 256 : 512; in xilinx_nor_setup() 68 nor->mtd.writebufsize = nor->page_size; in xilinx_nor_setup() 69 nor->mtd.size = 8 * nor->page_size * nor->info->n_sectors; in xilinx_nor_setup() 70 nor->mtd.erasesize = 8 * nor->page_size; in xilinx_nor_setup()
|
/linux/tools/testing/selftests/kvm/lib/s390x/ |
A D | processor.c | 18 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in virt_pgd_alloc() 19 vm->page_size); in virt_pgd_alloc() 55 TEST_ASSERT((gva % vm->page_size) == 0, in virt_pg_map() 58 gva, vm->page_size); in virt_pg_map() 63 TEST_ASSERT((gpa % vm->page_size) == 0, in virt_pg_map() 66 gva, vm->page_size); in virt_pg_map() 70 gva, vm->max_gfn, vm->page_size); in virt_pg_map() 94 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in addr_gva2gpa() 95 vm->page_size); in addr_gva2gpa() 166 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in vm_vcpu_add_default() [all …]
|
/linux/drivers/misc/habanalabs/common/ |
A D | memory.c | 98 phys_pg_pack->page_size = page_size; in alloc_device_memory() 116 page_size); in alloc_device_memory() 156 page_size); in alloc_device_memory() 882 phys_pg_pack->page_size = page_size; in init_phys_pg_pack_from_userptr() 933 u32 page_size = phys_pg_pack->page_size; in map_phys_pg_pack() local 997 u32 page_size; in unmap_phys_pg_pack() local 1000 page_size = phys_pg_pack->page_size; in unmap_phys_pg_pack() 1081 u32 page_size = hdev->asic_prop.pmmu.page_size, in map_device_va() local 1104 if (phys_pg_pack->page_size == page_size) { in map_device_va() 1577 size_left = page_size; in alloc_sgt_from_device_pages() [all …]
|
/linux/drivers/net/ethernet/qlogic/qed/ |
A D | qed_chain.c | 22 params->page_size); in qed_chain_init() 24 params->page_size, in qed_chain_init() 33 chain->page_size = params->page_size; in qed_chain_init() 88 dma_free_coherent(dev, chain->page_size, virt, phys); in qed_chain_free_next_ptr() 101 dma_free_coherent(&cdev->pdev->dev, chain->page_size, in qed_chain_free_single() 207 virt = dma_alloc_coherent(dev, chain->page_size, &phys, in qed_chain_alloc_next_ptr() 290 virt = dma_alloc_coherent(dev, chain->page_size, &phys, in qed_chain_alloc_pbl() 326 if (!params->page_size) in qed_chain_alloc() 327 params->page_size = QED_CHAIN_PAGE_SIZE; in qed_chain_alloc() 334 params->page_size, in qed_chain_alloc() [all …]
|
/linux/arch/powerpc/mm/book3s64/ |
A D | radix_tlb.c | 571 unsigned long page_size, in __tlbie_va_range_lpid() argument 628 unsigned long page_size; member 665 unsigned long page_size, in _tlbie_va_range_lpid() argument 677 unsigned long pid, unsigned long page_size, in _tlbiel_va_range_multicast() argument 682 .pid = pid, .page_size = page_size, in _tlbiel_va_range_multicast() 1118 unsigned long page_size = 1UL << page_shift; in __radix__flush_tlb_range() local 1228 static int radix_get_mmu_psize(int page_size) in radix_get_mmu_psize() argument 1248 unsigned long page_size) in radix__flush_tlb_lpid_page() argument 1250 int psize = radix_get_mmu_psize(page_size); in radix__flush_tlb_lpid_page() 1286 int page_size = tlb->page_size; in radix__tlb_flush() local [all …]
|
/linux/drivers/infiniband/hw/mlx5/ |
A D | mem.c | 48 rdma_umem_for_each_dma_block (umem, &biter, page_size) { in mlx5_ib_populate_pas() 66 unsigned long page_size; in __mlx5_umem_find_best_quantized_pgoff() local 69 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask); in __mlx5_umem_find_best_quantized_pgoff() 70 if (!page_size) in __mlx5_umem_find_best_quantized_pgoff() 80 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff() 81 while (page_offset & ~(u64)(page_offset_mask * (page_size / scale))) { in __mlx5_umem_find_best_quantized_pgoff() 82 page_size /= 2; in __mlx5_umem_find_best_quantized_pgoff() 83 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff() 90 if (!(pgsz_bitmap & page_size)) in __mlx5_umem_find_best_quantized_pgoff() 94 (unsigned long)page_offset / (page_size / scale); in __mlx5_umem_find_best_quantized_pgoff() [all …]
|
/linux/tools/power/acpi/os_specific/service_layers/ |
A D | osunixmap.c | 67 acpi_size page_size; in acpi_os_map_memory() local 78 page_size = acpi_os_get_page_size(); in acpi_os_map_memory() 79 offset = where % page_size; in acpi_os_map_memory() 112 acpi_size page_size; in acpi_os_unmap_memory() local 114 page_size = acpi_os_get_page_size(); in acpi_os_unmap_memory() 115 offset = ACPI_TO_INTEGER(where) % page_size; in acpi_os_unmap_memory()
|
/linux/tools/testing/selftests/powerpc/copyloops/ |
A D | exc_validate.c | 81 int page_size; in test_copy_exception() local 85 page_size = getpagesize(); in test_copy_exception() 86 p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, in test_copy_exception() 94 memset(p, 0, page_size); in test_copy_exception() 98 if (mprotect(p + page_size, page_size, PROT_NONE)) { in test_copy_exception() 103 q = p + page_size - MAX_LEN; in test_copy_exception()
|