/linux/tools/testing/selftests/vm/ |
A D | hmm-tests.c | 135 cmd.npages = npages; in hmm_dmirror_cmd() 227 unsigned long npages; in TEST_F() local 235 ASSERT_NE(npages, 0); in TEST_F() 291 unsigned long npages; in TEST_F() local 298 ASSERT_NE(npages, 0); in TEST_F() 998 npages = 6; in TEST_F() 1111 npages); in TEST_F() 1165 npages); in TEST_F() 1234 npages); in TEST_F() 1265 npages = 7; in TEST_F() [all …]
|
/linux/drivers/gpu/drm/i915/selftests/ |
A D | scatterlist.c | 70 pfn += npages; in expect_pfn_sg() 209 unsigned long npages) in page_contiguous() argument 211 return first + npages == last; in page_contiguous() 242 pfn_to_page(pfn + npages), in alloc_table() 243 npages)) { in alloc_table() 256 pfn += npages; in alloc_table() 288 const npages_fn_t *npages; in igt_sg_alloc() local 292 for (npages = npages_funcs; *npages; npages++) { in igt_sg_alloc() 330 const npages_fn_t *npages; in igt_sg_trim() local 333 for (npages = npages_funcs; *npages; npages++) { in igt_sg_trim() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
A D | pagealloc.c | 53 s32 npages; member 176 s32 *npages, int boot) in mlx5_cmd_query_pages() argument 403 int npages = 0; in release_all_pages() local 429 u32 npages) in fwp_fill_manage_pages_out() argument 439 if (!--npages) in fwp_fill_manage_pages_out() 454 u32 npages; in reclaim_pages_cmd() local 470 while (p && i < npages) { in reclaim_pages_cmd() 547 else if (req->npages < 0) in pages_work_handler() 575 s32 npages; in req_pages_handler() local 596 req->npages = npages; in req_pages_handler() [all …]
|
/linux/arch/sparc/kernel/ |
A D | iommu.c | 158 unsigned long npages) in alloc_npages() argument 204 int npages, nid; in dma_4u_alloc_coherent() local 235 while (npages--) { in dma_4u_alloc_coherent() 251 unsigned long order, npages; in dma_4u_free_coherent() local 284 npages >>= IO_PAGE_SHIFT; in dma_4u_map_page() 383 vaddr, ctx, npages); in strbuf_flush() 405 npages >>= IO_PAGE_SHIFT; in dma_4u_unmap_page() 420 npages, direction); in dma_4u_unmap_page() 510 while (npages--) { in dma_4u_map_sg() 642 npages, direction); in dma_4u_unmap_sg() [all …]
|
A D | pci_sun4v.c | 74 p->npages = 0; in iommu_batch_start() 91 unsigned long npages = p->npages; in iommu_batch_flush() local 100 while (npages != 0) { in iommu_batch_flush() 104 npages, in iommu_batch_flush() 135 npages -= num; in iommu_batch_flush() 140 p->npages = 0; in iommu_batch_flush() 307 npages); in dma_4v_iommu_demap() 317 npages -= num; in dma_4v_iommu_demap() 318 } while (npages != 0); in dma_4v_iommu_demap() 436 unsigned long npages; in dma_4v_unmap_page() local [all …]
|
/linux/drivers/infiniband/hw/hfi1/ |
A D | user_exp_rcv.c | 130 unsigned int npages, in unpin_rcv_pages() argument 147 fd->tid_n_pinned -= npages; in unpin_rcv_pages() 156 unsigned int npages; in pin_rcv_pages() local 163 if (!npages) in pin_rcv_pages() 192 tidbuf->npages = npages; in pin_rcv_pages() 526 if (!npages) in find_phys_blocks() 659 npages); in program_rcvarray() 662 mapped += npages; in program_rcvarray() 665 EXP_TID_SET(LEN, npages); in program_rcvarray() 710 node->npages = npages; in set_rcvarray_entry() [all …]
|
A D | user_pages.c | 30 u32 nlocked, u32 npages) in hfi1_can_pin_pages() argument 55 if (pinned + npages >= ulimit && !can_lock) in hfi1_can_pin_pages() 58 return ((nlocked + npages) <= size) || can_lock; in hfi1_can_pin_pages() 61 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument 67 ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); in hfi1_acquire_user_pages() 77 size_t npages, bool dirty) in hfi1_release_user_pages() argument 79 unpin_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages() 82 atomic64_sub(npages, &mm->pinned_vm); in hfi1_release_user_pages()
|
/linux/drivers/gpu/drm/i915/gem/selftests/ |
A D | mock_dmabuf.c | 21 err = sg_alloc_table(st, mock->npages, GFP_KERNEL); in mock_map_dma_buf() 26 for (i = 0; i < mock->npages; i++) { in mock_map_dma_buf() 58 for (i = 0; i < mock->npages; i++) in mock_dmabuf_release() 69 vaddr = vm_map_ram(mock->pages, mock->npages, 0); in mock_dmabuf_vmap() 81 vm_unmap_ram(map->vaddr, mock->npages); in mock_dmabuf_vunmap() 98 static struct dma_buf *mock_dmabuf(int npages) in mock_dmabuf() argument 105 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), in mock_dmabuf() 110 mock->npages = npages; in mock_dmabuf() 111 for (i = 0; i < npages; i++) { in mock_dmabuf() 118 exp_info.size = npages * PAGE_SIZE; in mock_dmabuf()
|
/linux/drivers/gpu/drm/amd/amdkfd/ |
A D | kfd_migrate.c | 64 num_bytes = npages * 8; in svm_migrate_gart_map() 145 while (npages) { in svm_migrate_copy_memory_gart() 171 npages -= size; in svm_migrate_copy_memory_gart() 172 if (npages) { in svm_migrate_copy_memory_gart() 427 size *= npages; in svm_migrate_vma_to_vram() 449 if (cpages != npages) in svm_migrate_vma_to_vram() 451 cpages, npages); in svm_migrate_vma_to_vram() 666 size *= npages; in svm_migrate_vma_to_ram() 689 if (cpages != npages) in svm_migrate_vma_to_ram() 691 cpages, npages); in svm_migrate_vma_to_ram() [all …]
|
/linux/arch/powerpc/kernel/ |
A D | iommu.c | 213 int largealloc = npages > 15; in iommu_range_alloc() 225 if (unlikely(npages == 0)) { in iommu_range_alloc() 315 end = n + npages; in iommu_range_alloc() 380 unsigned int npages) in iommu_free_check() argument 427 unsigned int npages) in __iommu_free() argument 449 unsigned int npages) in iommu_free() argument 516 npages); in ppc_iommu_map_sg() 617 unsigned int npages; in ppc_iommu_unmap_sg() local 846 unsigned int npages, align; in iommu_map_page() local 868 npages); in iommu_map_page() [all …]
|
/linux/drivers/infiniband/hw/mthca/ |
A D | mthca_memfree.c | 72 for (i = 0; i < chunk->npages; ++i) in mthca_free_icm_pages() 81 for (i = 0; i < chunk->npages; ++i) { in mthca_free_icm_coherent() 157 while (npages > 0) { in mthca_alloc_icm() 165 chunk->npages = 0; in mthca_alloc_icm() 170 while (1 << cur_order > npages) in mthca_alloc_icm() 182 ++chunk->npages; in mthca_alloc_icm() 189 chunk->npages, in mthca_alloc_icm() 199 npages -= 1 << cur_order; in mthca_alloc_icm() 528 int npages; in mthca_init_user_db_tab() local 540 for (i = 0; i < npages; ++i) { in mthca_init_user_db_tab() [all …]
|
A D | mthca_allocator.c | 200 int npages, shift; in mthca_buf_alloc() local 207 npages = 1; in mthca_buf_alloc() 219 npages *= 2; in mthca_buf_alloc() 222 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 227 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 231 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_buf_alloc() 234 dma_list = kmalloc_array(npages, sizeof(*dma_list), in mthca_buf_alloc() 239 buf->page_list = kmalloc_array(npages, in mthca_buf_alloc() 245 for (i = 0; i < npages; ++i) in mthca_buf_alloc() 248 for (i = 0; i < npages; ++i) { in mthca_buf_alloc() [all …]
|
/linux/arch/x86/mm/ |
A D | cpu_entry_area.c | 63 unsigned int npages; in percpu_setup_debug_store() local 70 npages = sizeof(struct debug_store) / PAGE_SIZE; in percpu_setup_debug_store() 72 cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages, in percpu_setup_debug_store() 80 npages = sizeof(struct debug_store_buffers) / PAGE_SIZE; in percpu_setup_debug_store() 81 for (; npages; npages--, cea += PAGE_SIZE) in percpu_setup_debug_store() 89 npages = sizeof(estacks->name## _stack) / PAGE_SIZE; \ 91 estacks->name## _stack, npages, PAGE_KERNEL); \ 98 unsigned int npages; in percpu_setup_exception_stacks() local
|
/linux/drivers/fpga/ |
A D | dfl-afu-dma-region.c | 37 int npages = region->length >> PAGE_SHIFT; in afu_dma_pin_pages() local 41 ret = account_locked_vm(current->mm, npages, true); in afu_dma_pin_pages() 45 region->pages = kcalloc(npages, sizeof(struct page *), GFP_KERNEL); in afu_dma_pin_pages() 56 } else if (pinned != npages) { in afu_dma_pin_pages() 70 account_locked_vm(current->mm, npages, false); in afu_dma_pin_pages() 85 long npages = region->length >> PAGE_SHIFT; in afu_dma_unpin_pages() local 88 unpin_user_pages(region->pages, npages); in afu_dma_unpin_pages() 90 account_locked_vm(current->mm, npages, false); in afu_dma_unpin_pages() 92 dev_dbg(dev, "%ld pages unpinned\n", npages); in afu_dma_unpin_pages() 104 int npages = region->length >> PAGE_SHIFT; in afu_dma_check_continuous_pages() local [all …]
|
/linux/drivers/infiniband/core/ |
A D | umem.c | 157 unsigned long npages; in ib_umem_get() local 196 npages = ib_umem_num_pages(umem); in ib_umem_get() 197 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get() 204 new_pinned = atomic64_add_return(npages, &mm->pinned_vm); in ib_umem_get() 206 atomic64_sub(npages, &mm->pinned_vm); in ib_umem_get() 216 while (npages) { in ib_umem_get() 219 min_t(unsigned long, npages, in ib_umem_get() 229 npages -= pinned; in ib_umem_get() 233 npages, GFP_KERNEL); in ib_umem_get()
|
A D | ib_core_uverbs.c | 141 pgoff, entry->npages); in rdma_user_mmap_entry_get_pgoff() 171 if (entry->npages * PAGE_SIZE != vma->vm_end - vma->vm_start) { in rdma_user_mmap_entry_get() 191 for (i = 0; i < entry->npages; i++) in rdma_user_mmap_entry_free() 196 entry->start_pgoff, entry->npages); in rdma_user_mmap_entry_free() 269 u32 xa_first, xa_last, npages; in rdma_user_mmap_entry_insert_range() local 290 npages = (u32)DIV_ROUND_UP(length, PAGE_SIZE); in rdma_user_mmap_entry_insert_range() 291 entry->npages = npages; in rdma_user_mmap_entry_insert_range() 301 if (check_add_overflow(xa_first, npages, &xa_last)) in rdma_user_mmap_entry_insert_range() 328 entry->start_pgoff, npages); in rdma_user_mmap_entry_insert_range()
|
/linux/tools/testing/selftests/kvm/ |
A D | memslot_perf_test.c | 95 uint64_t npages; member 269 data->npages = mempages; in prepare_vm() 289 uint64_t npages; in prepare_vm() local 293 npages += rempages; in prepare_vm() 298 guest_addr += npages * 4096; in prepare_vm() 303 uint64_t npages; in prepare_vm() local 308 npages += rempages; in prepare_vm() 600 uint64_t npages; in test_memslot_do_unmap() local 606 npages = min(npages, count - ctr); in test_memslot_do_unmap() 611 ctr += npages; in test_memslot_do_unmap() [all …]
|
/linux/mm/ |
A D | hmm.c | 138 for (i = 0; i < npages; ++i) { in hmm_range_need_fault() 153 unsigned long i, npages; in hmm_vma_walk_hole() local 157 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_hole() 160 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0); in hmm_vma_walk_hole() 193 unsigned long pfn, npages, i; in hmm_vma_handle_pmd() local 197 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_handle_pmd() 331 unsigned long npages = (end - start) >> PAGE_SHIFT; in hmm_vma_walk_pmd() local 435 unsigned long i, npages, pfn; in hmm_vma_walk_pud() local 446 npages = (end - addr) >> PAGE_SHIFT; in hmm_vma_walk_pud() 451 npages, cpu_flags); in hmm_vma_walk_pud() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
A D | icm.c | 63 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_pages() 72 for (i = 0; i < chunk->npages; ++i) in mlx4_free_icm_coherent() 159 while (npages > 0) { in mlx4_alloc_icm() 179 while (1 << cur_order > npages) in mlx4_alloc_icm() 188 &chunk->buf[chunk->npages], in mlx4_alloc_icm() 202 ++chunk->npages; in mlx4_alloc_icm() 208 chunk->sg, chunk->npages, in mlx4_alloc_icm() 215 if (chunk->npages == MLX4_ICM_CHUNK_LEN) in mlx4_alloc_icm() 218 npages -= 1 << cur_order; in mlx4_alloc_icm() 223 chunk->npages, DMA_BIDIRECTIONAL); in mlx4_alloc_icm() [all …]
|
A D | mr.c | 199 if (!npages) { in mlx4_mtt_init() 538 access, npages, page_shift, mr); in mlx4_mr_alloc() 590 u64 iova, u64 size, int npages, in mlx4_mr_rereg_mem_write() argument 707 npages * sizeof(u64), DMA_TO_DEVICE); in mlx4_write_mtt_chunk() 709 for (i = 0; i < npages; ++i) in mlx4_write_mtt_chunk() 733 while (npages > 0) { in __mlx4_write_mtt() 737 npages -= chunk; in __mlx4_write_mtt() 764 while (npages > 0) { in mlx4_write_mtt() 766 npages); in mlx4_write_mtt() 778 npages -= chunk; in mlx4_write_mtt() [all …]
|
/linux/drivers/infiniband/hw/vmw_pvrdma/ |
A D | pvrdma_misc.c | 53 u64 npages, bool alloc_pages) in pvrdma_page_dir_init() argument 57 if (npages > PVRDMA_PAGE_DIR_MAX_PAGES) in pvrdma_page_dir_init() 67 pdir->ntables = PVRDMA_PAGE_DIR_TABLE(npages - 1) + 1; in pvrdma_page_dir_init() 81 pdir->npages = npages; in pvrdma_page_dir_init() 84 pdir->pages = kcalloc(npages, sizeof(*pdir->pages), in pvrdma_page_dir_init() 89 for (i = 0; i < pdir->npages; i++) { in pvrdma_page_dir_init() 127 for (i = 0; i < pdir->npages && pdir->pages[i]; i++) { in pvrdma_page_dir_cleanup_pages() 173 if (idx >= pdir->npages) in pvrdma_page_dir_insert_dma() 189 if (offset >= pdir->npages) in pvrdma_page_dir_insert_umem() 212 if (num_pages > pdir->npages) in pvrdma_page_dir_insert_page_list()
|
A D | pvrdma_mr.c | 122 int ret, npages; in pvrdma_reg_user_mr() local 136 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr() 137 if (npages < 0 || npages > PVRDMA_PAGE_DIR_MAX_PAGES) { in pvrdma_reg_user_mr() 139 npages); in pvrdma_reg_user_mr() 154 ret = pvrdma_page_dir_init(dev, &mr->pdir, npages, false); in pvrdma_reg_user_mr() 171 cmd->nchunks = npages; in pvrdma_reg_user_mr() 306 if (mr->npages == mr->max_pages) in pvrdma_set_page() 309 mr->pages[mr->npages++] = addr; in pvrdma_set_page() 320 mr->npages = 0; in pvrdma_map_mr_sg()
|
/linux/arch/powerpc/sysdev/ |
A D | dart_iommu.c | 173 long npages, unsigned long uaddr, in dart_build() argument 181 DBG("dart: build at: %lx, %lx, addr: %x\n", index, npages, uaddr); in dart_build() 188 l = npages; in dart_build() 196 dart_cache_sync(orig_dp, npages); in dart_build() 200 while (npages--) in dart_build() 209 static void dart_free(struct iommu_table *tbl, long index, long npages) in dart_free() argument 212 long orig_npages = npages; in dart_free() 219 DBG("dart: free at: %lx, %lx\n", index, npages); in dart_free() 223 while (npages--) in dart_free()
|
/linux/arch/powerpc/kvm/ |
A D | book3s_64_vio.c | 191 for (i = 0; i < npages; i++) in release_spapr_tce_table() 285 unsigned long npages, size = args->size; in kvm_vm_ioctl_create_spapr_tce() local 292 npages = kvmppc_tce_pages(size); in kvm_vm_ioctl_create_spapr_tce() 612 unsigned long tce_list, unsigned long npages) in kvmppc_h_put_tce_indirect() argument 630 if (npages > 512) in kvmppc_h_put_tce_indirect() 636 ret = kvmppc_ioba_validate(stt, ioba, npages); in kvmppc_h_put_tce_indirect() 647 for (i = 0; i < npages; ++i) { in kvmppc_h_put_tce_indirect() 659 for (i = 0; i < npages; ++i) { in kvmppc_h_put_tce_indirect() 698 iommu_tce_kill(stit->tbl, entry, npages); in kvmppc_h_put_tce_indirect() 719 ret = kvmppc_ioba_validate(stt, ioba, npages); in kvmppc_h_stuff_tce() [all …]
|
/linux/drivers/gpu/drm/etnaviv/ |
A D | etnaviv_gem_prime.c | 20 int npages = obj->size >> PAGE_SHIFT; in etnaviv_gem_prime_get_sg_table() local 25 return drm_prime_pages_to_sg(obj->dev, etnaviv_obj->pages, npages); in etnaviv_gem_prime_get_sg_table() 109 int ret, npages; in etnaviv_gem_prime_import_sg_table() local 118 npages = size / PAGE_SIZE; in etnaviv_gem_prime_import_sg_table() 121 etnaviv_obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); in etnaviv_gem_prime_import_sg_table() 127 ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages); in etnaviv_gem_prime_import_sg_table()
|