Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 25 of 92) sorted by relevance

1234

/linux/net/xdp/
A Dxdp_umem.c28 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages()
31 umem->pgs = NULL; in xdp_umem_unpin_pages()
37 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
66 kfree(umem); in xdp_umem_release()
83 if (!umem) in xdp_put_umem()
102 umem->pgs = kvcalloc(umem->npgs, sizeof(*umem->pgs), GFP_KERNEL | __GFP_NOWARN); in xdp_umem_pin_pages()
223 err = xdp_umem_addr_map(umem, umem->pgs, umem->npgs); in xdp_umem_reg()
241 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in xdp_umem_create()
242 if (!umem) in xdp_umem_create()
247 kfree(umem); in xdp_umem_create()
[all …]
A Dxsk_buff_pool.c45 struct xdp_umem *umem) in xp_create_and_assign_umem() argument
62 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
63 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
65 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
69 pool->frame_len = umem->chunk_size - umem->headroom - in xp_create_and_assign_umem()
71 pool->umem = umem; in xp_create_and_assign_umem()
72 pool->addrs = umem->addrs; in xp_create_and_assign_umem()
85 xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; in xp_create_and_assign_umem()
116 if (pool->umem->zc) { in xp_disable_drv_zc()
185 pool->umem->zc = true; in xp_assign_dev()
[all …]
A Dxsk_diag.c50 struct xdp_umem *umem = xs->umem; in xsk_diag_put_umem() local
54 if (!umem) in xsk_diag_put_umem()
57 du.id = umem->id; in xsk_diag_put_umem()
58 du.size = umem->size; in xsk_diag_put_umem()
59 du.num_pages = umem->npgs; in xsk_diag_put_umem()
60 du.chunk_size = umem->chunk_size; in xsk_diag_put_umem()
61 du.headroom = umem->headroom; in xsk_diag_put_umem()
65 if (umem->zc) in xsk_diag_put_umem()
67 du.refs = refcount_read(&umem->users); in xsk_diag_put_umem()
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
A Dumem.c46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
67 if (!umem->map) in nvkm_umem_unmap()
70 if (umem->io) { in nvkm_umem_unmap()
75 umem->bar = NULL; in nvkm_umem_unmap()
79 umem->map = NULL; in nvkm_umem_unmap()
94 if (umem->map) in nvkm_umem_map()
98 int ret = nvkm_mem_map_host(umem->memory, &umem->map); in nvkm_umem_map()
131 return umem; in nvkm_umem_dtor()
164 if (!(umem = kzalloc(sizeof(*umem), GFP_KERNEL))) in nvkm_umem_new()
167 umem->mmu = mmu; in nvkm_umem_new()
[all …]
/linux/drivers/infiniband/core/
A Dumem.c88 if (umem->is_odp) { in ib_umem_find_best_pgsz()
175 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in ib_umem_get()
176 if (!umem) in ib_umem_get()
185 umem->iova = addr; in ib_umem_get()
257 kfree(umem); in ib_umem_get()
269 if (!umem) in ib_umem_release()
273 if (umem->is_odp) in ib_umem_release()
276 __ib_umem_release(umem->ibdev, umem, 1); in ib_umem_release()
278 atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm); in ib_umem_release()
280 kfree(umem); in ib_umem_release()
[all …]
A Dumem_odp.c121 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local
131 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit()
132 umem->ibdev = device; in ib_umem_odp_alloc_implicit()
169 struct ib_umem *umem; in ib_umem_odp_alloc_child() local
178 umem = &odp_data->umem; in ib_umem_odp_alloc_child()
179 umem->ibdev = root->umem.ibdev; in ib_umem_odp_alloc_child()
180 umem->length = size; in ib_umem_odp_alloc_child()
182 umem->writable = root->umem.writable; in ib_umem_odp_alloc_child()
183 umem->owning_mm = root->umem.owning_mm; in ib_umem_odp_alloc_child()
200 mmput(umem->owning_mm); in ib_umem_odp_alloc_child()
[all …]
A Dumem_dmabuf.c36 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length, in ib_umem_dmabuf_map_pages()
62 umem_dmabuf->umem.sgt_append.sgt.nents = nmap; in ib_umem_dmabuf_map_pages()
116 struct ib_umem *umem; in ib_umem_dmabuf_get() local
139 umem = &umem_dmabuf->umem; in ib_umem_dmabuf_get()
140 umem->ibdev = device; in ib_umem_dmabuf_get()
141 umem->length = size; in ib_umem_dmabuf_get()
142 umem->address = offset; in ib_umem_dmabuf_get()
143 umem->writable = ib_access_writable(access); in ib_umem_dmabuf_get()
144 umem->is_dmabuf = 1; in ib_umem_dmabuf_get()
146 if (!ib_umem_num_pages(umem)) in ib_umem_dmabuf_get()
[all …]
/linux/include/rdma/
A Dib_umem.h33 struct ib_umem umem; member
46 return container_of(umem, struct ib_umem_dmabuf, umem); in to_ib_umem_dmabuf()
52 return umem->address & ~PAGE_MASK; in ib_umem_offset()
58 return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & in ib_umem_dma_offset()
65 return (size_t)((ALIGN(umem->iova + umem->length, pgsz) - in ib_umem_num_dma_blocks()
66 ALIGN_DOWN(umem->iova, pgsz))) / in ib_umem_num_dma_blocks()
72 return ib_umem_num_dma_blocks(umem, PAGE_SIZE); in ib_umem_num_pages()
76 struct ib_umem *umem, in __rdma_umem_block_iter_start() argument
80 umem->sgt_append.sgt.nents, pgsz); in __rdma_umem_block_iter_start()
102 void ib_umem_release(struct ib_umem *umem);
[all …]
A Dib_umem_odp.h13 struct ib_umem umem; member
47 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) in to_ib_umem_odp() argument
49 return container_of(umem, struct ib_umem_odp, umem); in to_ib_umem_odp()
/linux/tools/lib/bpf/
A Dxsk.c114 return umem ? umem->fd : -EINVAL; in xsk_umem__fd()
299 umem = calloc(1, sizeof(*umem)); in xsk_umem__create_v0_0_4()
300 if (!umem) in xsk_umem__create_v0_0_4()
326 err = xsk_create_umem_rings(umem, umem->fd, fill, comp); in xsk_umem__create_v0_0_4()
338 free(umem); in xsk_umem__create_v0_0_4()
915 struct xsk_umem *umem = ctx->umem; in xsk_put_ctx() local
966 ctx->umem = umem; in xsk_create_ctx()
1203 if (!umem) in xsk_socket__create()
1213 if (!umem) in xsk_umem__delete()
1220 free(umem); in xsk_umem__delete()
[all …]
A Dxsk.h255 int xsk_umem__fd(const struct xsk_umem *umem);
292 int xsk_umem__create(struct xsk_umem **umem,
298 int xsk_umem__create_v0_0_2(struct xsk_umem **umem,
304 int xsk_umem__create_v0_0_4(struct xsk_umem **umem,
312 struct xsk_umem *umem,
319 __u32 queue_id, struct xsk_umem *umem,
328 int xsk_umem__delete(struct xsk_umem *umem);
/linux/tools/testing/selftests/bpf/
A Dxdpxceiver.c253 ret = xsk_umem__create(&umem->umem, buffer, size, in xsk_configure_umem()
254 &umem->fq, &umem->cq, &cfg); in xsk_configure_umem()
269 xsk->umem = umem; in xsk_configure_socket()
517 pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size; in pkt_stream_generate()
547 struct xsk_umem_info *umem = test->ifobj_tx->umem; in pkt_stream_replace_half() local
647 u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom; in is_offset_correct()
745 struct xsk_umem_info *umem = xsk->umem; in receive_pkts() local
989 munmap(ifobj->umem->buffer, ifobj->umem->num_frames * ifobj->umem->frame_size); in testapp_cleanup_xsk_res()
990 xsk_umem__delete(ifobj->umem->umem); in testapp_cleanup_xsk_res()
1139 xsk_umem__delete(ifobj_tx->umem->umem); in swap_xsk_resources()
[all …]
/linux/drivers/infiniband/sw/siw/
A Dsiw_mem.c85 kfree(umem->page_chunk); in siw_umem_release()
86 kfree(umem); in siw_umem_release()
367 struct siw_umem *umem; in siw_umem_get() local
384 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in siw_umem_get()
385 if (!umem) in siw_umem_get()
389 umem->owning_mm = mm_s; in siw_umem_get()
390 umem->writable = writable; in siw_umem_get()
407 umem->page_chunk = in siw_umem_get()
409 if (!umem->page_chunk) { in siw_umem_get()
432 umem->num_pages += rv; in siw_umem_get()
[all …]
A Dsiw_mem.h10 void siw_umem_release(struct siw_umem *umem, bool dirty);
58 static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr) in siw_get_upage() argument
60 unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT, in siw_get_upage()
64 if (likely(page_idx < umem->num_pages)) in siw_get_upage()
65 return umem->page_chunk[chunk_idx].plist[page_in_chunk]; in siw_get_upage()
/linux/drivers/infiniband/hw/mlx4/
A Dmr.c77 mr->umem = NULL; in mlx4_ib_get_dma_mr()
183 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument
421 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
422 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
451 ib_umem_release(mr->umem); in mlx4_ib_reg_user_mr()
508 if (IS_ERR(mmr->umem)) { in mlx4_ib_rereg_user_mr()
509 err = PTR_ERR(mmr->umem); in mlx4_ib_rereg_user_mr()
511 mmr->umem = NULL; in mlx4_ib_rereg_user_mr()
607 if (mr->umem) in mlx4_ib_dereg_mr()
608 ib_umem_release(mr->umem); in mlx4_ib_dereg_mr()
[all …]
A Ddoorbell.c40 struct ib_umem *umem; member
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx4_ib_db_map_user()
69 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
70 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
78 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx4_ib_db_map_user()
95 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
A Dsrq.c117 srq->umem = in mlx4_ib_create_srq()
119 if (IS_ERR(srq->umem)) in mlx4_ib_create_srq()
120 return PTR_ERR(srq->umem); in mlx4_ib_create_srq()
123 dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE), in mlx4_ib_create_srq()
128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq()
213 if (!srq->umem) in mlx4_ib_create_srq()
215 ib_umem_release(srq->umem); in mlx4_ib_create_srq()
289 ib_umem_release(msrq->umem); in mlx4_ib_destroy_srq()
/linux/samples/bpf/
A Dxdpsock_user.c141 struct xsk_umem *umem; member
148 struct xsk_umem_info *umem; member
500 struct xsk_umem *umem = xsks[0]->umem->umem; in xdpsock_cleanup() local
822 umem = calloc(1, sizeof(*umem)); in xsk_configure_umem()
823 if (!umem) in xsk_configure_umem()
826 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq, in xsk_configure_umem()
831 umem->buffer = buffer; in xsk_configure_umem()
832 return umem; in xsk_configure_umem()
863 xsk->umem = umem; in xsk_configure_socket()
875 ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem, in xsk_configure_socket()
[all …]
/linux/lib/
A Dtest_user_copy.c47 static int test_check_nonzero_user(char *kmem, char __user *umem, size_t size) in test_check_nonzero_user() argument
65 umem += start; in test_check_nonzero_user()
87 ret |= test(copy_to_user(umem, kmem, size), in test_check_nonzero_user()
93 int retval = check_zeroed_user(umem + start, len); in test_check_nonzero_user()
105 static int test_copy_struct_from_user(char *kmem, char __user *umem, in test_copy_struct_from_user() argument
124 ret |= test(copy_to_user(umem, umem_src, size), in test_copy_struct_from_user()
134 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), in test_copy_struct_from_user()
147 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize), in test_copy_struct_from_user()
157 ret |= test(copy_struct_from_user(kmem, ksize, umem, usize) != -E2BIG, in test_copy_struct_from_user()
165 ret |= test(clear_user(umem + ksize, usize - ksize), in test_copy_struct_from_user()
[all …]
/linux/drivers/infiniband/hw/mlx5/
A Ddoorbell.c41 struct ib_umem *umem; member
68 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user()
70 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
71 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
81 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx5_ib_db_map_user()
99 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
A Dmr.c823 mr->umem = NULL; in mlx5_ib_get_dma_mr()
928 umem->iova = iova; in mlx5_umem_dmabuf_default_pgsz()
941 if (umem->is_dmabuf) in alloc_cacheable_mr()
974 mr->umem = umem; in alloc_cacheable_mr()
1337 mr->umem = umem; in reg_create()
1564 if (IS_ERR(umem)) in mlx5_ib_reg_user_mr()
1826 umem = mr->umem; in mlx5_ib_rereg_user_mr()
1827 mr->umem = NULL; in mlx5_ib_rereg_user_mr()
1981 if (mr->umem) { in mlx5_ib_dereg_mr()
2082 mr->umem = NULL; in mlx5_ib_alloc_pi_mr()
[all …]
/linux/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_mr.c117 struct ib_umem *umem; in pvrdma_reg_user_mr() local
129 umem = ib_umem_get(pd->device, start, length, access_flags); in pvrdma_reg_user_mr()
130 if (IS_ERR(umem)) { in pvrdma_reg_user_mr()
133 return ERR_CAST(umem); in pvrdma_reg_user_mr()
136 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr()
152 mr->umem = umem; in pvrdma_reg_user_mr()
161 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0); in pvrdma_reg_user_mr()
190 ib_umem_release(umem); in pvrdma_reg_user_mr()
257 mr->umem = NULL; in pvrdma_alloc_mr()
294 ib_umem_release(mr->umem); in pvrdma_dereg_mr()
A Dpvrdma_srq.c149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
150 if (IS_ERR(srq->umem)) { in pvrdma_create_srq()
151 ret = PTR_ERR(srq->umem); in pvrdma_create_srq()
155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE); in pvrdma_create_srq()
171 pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); in pvrdma_create_srq()
209 ib_umem_release(srq->umem); in pvrdma_create_srq()
229 ib_umem_release(srq->umem); in pvrdma_free_srq()
/linux/drivers/infiniband/hw/hns/
A Dhns_roce_db.c33 page->umem = ib_umem_get(context->ibucontext.device, page_addr, in hns_roce_db_map_user()
35 if (IS_ERR(page->umem)) { in hns_roce_db_map_user()
36 ret = PTR_ERR(page->umem); in hns_roce_db_map_user()
45 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
46 db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
64 ib_umem_release(db->u.user_page->umem); in hns_roce_db_unmap_user()
/linux/drivers/infiniband/sw/rxe/
A Drxe_mr.c170 struct ib_umem *umem; in rxe_mr_init_user() local
176 umem = ib_umem_get(pd->ibpd.device, start, length, access); in rxe_mr_init_user()
177 if (IS_ERR(umem)) { in rxe_mr_init_user()
179 __func__, (int)PTR_ERR(umem)); in rxe_mr_init_user()
180 err = PTR_ERR(umem); in rxe_mr_init_user()
184 num_buf = ib_umem_num_pages(umem); in rxe_mr_init_user()
205 for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) { in rxe_mr_init_user()
228 mr->umem = umem; in rxe_mr_init_user()
236 set->offset = ib_umem_offset(umem); in rxe_mr_init_user()
241 ib_umem_release(umem); in rxe_mr_init_user()
[all …]

Completed in 68 milliseconds

1234