/linux/include/rdma/ |
A D | ib_umem.h | 75 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, in __rdma_umem_block_iter_start() argument 79 __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl, in __rdma_umem_block_iter_start() 94 #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ argument 95 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ 96 __rdma_block_iter_next(biter);)
|
A D | ib_verbs.h | 2854 void __rdma_block_iter_start(struct ib_block_iter *biter, 2858 bool __rdma_block_iter_next(struct ib_block_iter *biter); 2866 rdma_block_iter_dma_address(struct ib_block_iter *biter) in rdma_block_iter_dma_address() argument 2868 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1); in rdma_block_iter_dma_address() 2881 #define rdma_for_each_block(sglist, biter, nents, pgsz) \ argument 2882 for (__rdma_block_iter_start(biter, sglist, nents, \ 2884 __rdma_block_iter_next(biter);)
|
/linux/drivers/infiniband/hw/hns/ |
A D | hns_roce_alloc.c | 161 struct ib_block_iter biter; in hns_roce_get_umem_bufs() local 165 rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) { in hns_roce_get_umem_bufs() 166 bufs[total++] = rdma_block_iter_dma_address(&biter); in hns_roce_get_umem_bufs()
|
/linux/drivers/infiniband/core/ |
A D | verbs.c | 2954 biter->__sg = sglist; in __rdma_block_iter_start() 2955 biter->__sg_nents = nents; in __rdma_block_iter_start() 2958 biter->__pg_bit = __fls(pgsz); in __rdma_block_iter_start() 2966 if (!biter->__sg_nents || !biter->__sg) in __rdma_block_iter_next() 2969 biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; in __rdma_block_iter_next() 2970 block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); in __rdma_block_iter_next() 2971 biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset; in __rdma_block_iter_next() 2973 if (biter->__sg_advance >= sg_dma_len(biter->__sg)) { in __rdma_block_iter_next() 2974 biter->__sg_advance = 0; in __rdma_block_iter_next() 2975 biter->__sg = sg_next(biter->__sg); in __rdma_block_iter_next() [all …]
|
/linux/drivers/infiniband/hw/mlx5/ |
A D | mem.c | 46 struct ib_block_iter biter; in mlx5_ib_populate_pas() local 48 rdma_umem_for_each_dma_block (umem, &biter, page_size) { in mlx5_ib_populate_pas() 49 *pas = cpu_to_be64(rdma_block_iter_dma_address(&biter) | in mlx5_ib_populate_pas()
|
A D | mr.c | 1203 struct ib_block_iter biter; in mlx5_ib_update_mr_pas() local 1224 rdma_for_each_block (mr->umem->sgt_append.sgt.sgl, &biter, in mlx5_ib_update_mr_pas() 1240 cpu_to_be64(rdma_block_iter_dma_address(&biter) | in mlx5_ib_update_mr_pas()
|
/linux/drivers/dma/ |
A D | fsl-edma-common.c | 284 * le16_to_cpu(edesc->tcd[i].vtcd->biter); in fsl_edma_desc_residue() 297 * le16_to_cpu(edesc->tcd[i].vtcd->biter); in fsl_edma_desc_residue() 371 edma_writew(edma, (s16)tcd->biter, ®s->tcd[ch].biter); in fsl_edma_set_tcd_regs() 389 u16 biter, u16 doff, u32 dlast_sga, bool major_int, in fsl_edma_fill_tcd() argument 415 tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); in fsl_edma_fill_tcd()
|
A D | mpc512x_dma.c | 165 u32 biter:9; /* Beginning "major" iteration count */ member 663 tcd->biter = 1; in mpc_dma_prep_memcpy() 770 tcd->biter = 1; in mpc_dma_prep_slave_sg() 784 tcd->biter = iter & 0x1ff; in mpc_dma_prep_slave_sg() 786 tcd->citer = tcd->biter; in mpc_dma_prep_slave_sg()
|
A D | fsl-edma-common.h | 81 __le16 biter; member
|
/linux/drivers/infiniband/hw/vmw_pvrdma/ |
A D | pvrdma_misc.c | 185 struct ib_block_iter biter; in pvrdma_page_dir_insert_umem() local 192 rdma_umem_for_each_dma_block (umem, &biter, PAGE_SIZE) { in pvrdma_page_dir_insert_umem() 194 pdir, i, rdma_block_iter_dma_address(&biter)); in pvrdma_page_dir_insert_umem()
|
/linux/drivers/infiniband/hw/cxgb4/ |
A D | mem.c | 497 struct ib_block_iter biter; in c4iw_reg_user_mr() local 548 rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) { in c4iw_reg_user_mr() 549 pages[i++] = cpu_to_be64(rdma_block_iter_dma_address(&biter)); in c4iw_reg_user_mr()
|
/linux/drivers/infiniband/hw/bnxt_re/ |
A D | qplib_res.c | 94 struct ib_block_iter biter; in bnxt_qplib_fill_user_dma_pages() local 97 rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) { in bnxt_qplib_fill_user_dma_pages() 98 pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter); in bnxt_qplib_fill_user_dma_pages()
|
/linux/drivers/infiniband/hw/efa/ |
A D | efa_verbs.c | 1211 struct ib_block_iter biter; in umem_to_page_list() local 1217 rdma_umem_for_each_dma_block(umem, &biter, BIT(hp_shift)) in umem_to_page_list() 1218 page_list[hp_idx++] = rdma_block_iter_dma_address(&biter); in umem_to_page_list() 1260 struct ib_block_iter biter; in pbl_chunk_list_create() local 1294 rdma_for_each_block(pages_sgl, &biter, sg_dma_cnt, in pbl_chunk_list_create() 1297 rdma_block_iter_dma_address(&biter); in pbl_chunk_list_create()
|
/linux/drivers/infiniband/hw/mthca/ |
A D | mthca_provider.c | 838 struct ib_block_iter biter; in mthca_reg_user_mr() local 887 rdma_umem_for_each_dma_block(mr->umem, &biter, PAGE_SIZE) { in mthca_reg_user_mr() 888 pages[i++] = rdma_block_iter_dma_address(&biter); in mthca_reg_user_mr()
|
/linux/drivers/infiniband/hw/ocrdma/ |
A D | ocrdma_verbs.c | 817 struct ib_block_iter biter; in build_user_pbes() local 828 rdma_umem_for_each_dma_block (mr->umem, &biter, PAGE_SIZE) { in build_user_pbes() 830 pg_addr = rdma_block_iter_dma_address(&biter); in build_user_pbes()
|
/linux/drivers/infiniband/hw/irdma/ |
A D | verbs.c | 2222 struct ib_block_iter biter; in irdma_copy_user_pgaddrs() local 2231 rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) { in irdma_copy_user_pgaddrs() 2232 *pbl = rdma_block_iter_dma_address(&biter); in irdma_copy_user_pgaddrs()
|
/linux/drivers/infiniband/hw/qedr/ |
A D | verbs.c | 626 struct ib_block_iter biter; in qedr_populate_pbls() local 648 rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) { in qedr_populate_pbls() 649 u64 pg_addr = rdma_block_iter_dma_address(&biter); in qedr_populate_pbls()
|