/linux/drivers/infiniband/hw/mthca/ |
A D | mthca_mr.c | 217 mtt = kmalloc(sizeof *mtt, GFP_KERNEL); in __mthca_alloc_mtt() 218 if (!mtt) in __mthca_alloc_mtt() 224 ++mtt->order; in __mthca_alloc_mtt() 228 kfree(mtt); in __mthca_alloc_mtt() 232 return mtt; in __mthca_alloc_mtt() 242 if (!mtt) in mthca_free_mtt() 245 mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order); in mthca_free_mtt() 249 mtt->first_seg + (1 << mtt->order) - 1); in mthca_free_mtt() 251 kfree(mtt); in mthca_free_mtt() 463 if (!mr->mtt) in mthca_mr_alloc() [all …]
|
A D | mthca_provider.c | 871 mr->mtt = mthca_alloc_mtt(dev, n); in mthca_reg_user_mr() 872 if (IS_ERR(mr->mtt)) { in mthca_reg_user_mr() 873 err = PTR_ERR(mr->mtt); in mthca_reg_user_mr() 895 err = mthca_write_mtt(dev, mr->mtt, n, pages, i); in mthca_reg_user_mr() 904 err = mthca_write_mtt(dev, mr->mtt, n, pages, i); in mthca_reg_user_mr() 919 mthca_free_mtt(dev, mr->mtt); in mthca_reg_user_mr()
|
A D | mthca_provider.h | 76 struct mthca_mtt *mtt; member
|
A D | mthca_dev.h | 467 void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt); 468 int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
A D | mr.c | 200 mtt->order = -1; in mlx4_mtt_init() 207 ++mtt->order; in mlx4_mtt_init() 209 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); in mlx4_mtt_init() 253 if (mtt->order < 0) in mlx4_mtt_cleanup() 256 mlx4_free_mtt_range(dev, mtt->offset, mtt->order); in mlx4_mtt_cleanup() 585 mr->mtt.order = -1; in mlx4_mr_rereg_mem_cleanup() 609 &mr->mtt)); in mlx4_mr_rereg_mem_write() 613 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { in mlx4_mr_rereg_mem_write() 658 &mr->mtt)); in mlx4_mr_enable() 661 if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) { in mlx4_mr_enable() [all …]
|
A D | resource_tracker.c | 2814 mpt->mtt = mtt; in mlx4_SW2HW_MPT_wrapper() 2858 if (mpt->mtt) in mlx4_HW2SW_MPT_wrapper() 3022 qp->mtt = mtt; in mlx4_RST2INIT_QP_wrapper() 3128 eq->mtt = mtt; in mlx4_SW2HW_EQ_wrapper() 3171 *res = mtt; in get_containing_mtt() 3172 mtt->com.from_state = mtt->com.state; in get_containing_mtt() 3287 mtt.order = 0; in mlx4_WRITE_MTT_wrapper() 3460 cq->mtt = mtt; in mlx4_SW2HW_CQ_wrapper() 3556 cq->mtt = mtt; in handle_resize() 3633 mtt); in mlx4_SW2HW_SRQ_wrapper() [all …]
|
A D | cq.c | 192 int entries, struct mlx4_mtt *mtt) in mlx4_cq_resize() argument 205 cq_context->log_page_size = mtt->page_shift - 12; in mlx4_cq_resize() 206 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_cq_resize() 343 struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, in mlx4_cq_alloc() argument 385 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_cq_alloc() 387 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_cq_alloc()
|
A D | qp.c | 87 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in __mlx4_qp_modify() argument 164 u64 mtt_addr = mlx4_mtt_addr(dev, mtt); in __mlx4_qp_modify() 167 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in __mlx4_qp_modify() 208 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in mlx4_qp_modify() argument 214 return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context, in mlx4_qp_modify() 914 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in mlx4_qp_to_ready() argument 932 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], in mlx4_qp_to_ready()
|
A D | srq.c | 163 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) in mlx4_srq_alloc() argument 193 srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_srq_alloc() 195 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_srq_alloc()
|
A D | alloc.c | 790 &wqres->mtt); in mlx4_alloc_hwq_res() 794 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); in mlx4_alloc_hwq_res() 801 mlx4_mtt_cleanup(dev, &wqres->mtt); in mlx4_alloc_hwq_res() 814 mlx4_mtt_cleanup(dev, &wqres->mtt); in mlx4_free_hwq_res()
|
A D | eq.c | 1028 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); in mlx4_create_eq() 1032 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); in mlx4_create_eq() 1042 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); in mlx4_create_eq() 1065 mlx4_mtt_cleanup(dev, &eq->mtt); in mlx4_create_eq() 1105 mlx4_mtt_cleanup(dev, &eq->mtt); in mlx4_free_eq()
|
A D | en_cq.c | 139 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, in mlx4_en_activate_cq()
|
/linux/drivers/infiniband/hw/mlx4/ |
A D | srq.c | 124 PAGE_SHIFT, &srq->mtt); in mlx4_ib_create_srq() 128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq() 164 &srq->mtt); in mlx4_ib_create_srq() 168 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); in mlx4_ib_create_srq() 186 &srq->mtt, srq->db.dma, &srq->msrq); in mlx4_ib_create_srq() 210 mlx4_mtt_cleanup(dev->dev, &srq->mtt); in mlx4_ib_create_srq() 274 mlx4_mtt_cleanup(dev->dev, &msrq->mtt); in mlx4_ib_destroy_srq()
|
A D | cq.c | 113 &buf->mtt); in mlx4_ib_alloc_cq_buf() 124 mlx4_mtt_cleanup(dev->dev, &buf->mtt); in mlx4_ib_alloc_cq_buf() 153 err = mlx4_mtt_init(dev->dev, n, shift, &buf->mtt); in mlx4_ib_get_cq_umem() 158 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); in mlx4_ib_get_cq_umem() 165 mlx4_mtt_cleanup(dev->dev, &buf->mtt); in mlx4_ib_get_cq_umem() 277 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); in mlx4_ib_create_cq() 383 struct mlx4_mtt mtt; in mlx4_ib_resize_cq() local 421 mtt = cq->buf.mtt; in mlx4_ib_resize_cq() 427 mlx4_mtt_cleanup(dev->dev, &mtt); in mlx4_ib_resize_cq() 461 mlx4_mtt_cleanup(dev->dev, &cq->resize_buf->buf.mtt); in mlx4_ib_resize_cq() [all …]
|
A D | mr.c | 95 struct mlx4_mtt *mtt, in mlx4_ib_umem_write_mtt_block() argument 134 err = mlx4_write_mtt(dev->dev, mtt, *start_index, in mlx4_ib_umem_write_mtt_block() 182 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, in mlx4_ib_umem_write_mtt() argument 200 mtt_shift = mtt->page_shift; in mlx4_ib_umem_write_mtt() 214 err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size, in mlx4_ib_umem_write_mtt() 232 err = mlx4_ib_umem_write_mtt_block(dev, mtt, mtt_size, in mlx4_ib_umem_write_mtt() 241 err = mlx4_write_mtt(dev->dev, mtt, start_index, npages, pages); in mlx4_ib_umem_write_mtt() 433 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr() 527 err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); in mlx4_ib_rereg_user_mr()
|
A D | mlx4_ib.h | 106 struct mlx4_mtt mtt; member 341 struct mlx4_mtt mtt; member 384 struct mlx4_mtt mtt; member 753 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
|
A D | qp.c | 644 qp->mtt = (to_mqp( in create_qp_rss() 645 (struct ib_qp *)init_attr->rwq_ind_tbl->ind_tbl[0]))->mtt; in create_qp_rss() 889 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); in create_rq() 894 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_rq() 945 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_rq() 1072 err = mlx4_mtt_init(dev->dev, n, shift, &qp->mtt); in create_qp_common() 1077 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common() 1127 &qp->mtt); in create_qp_common() 1131 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); in create_qp_common() 1233 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_qp_common() [all …]
|
/linux/drivers/infiniband/hw/mlx5/ |
A D | odp.c | 136 if (mtt) { in populate_klm() 137 pklm->key = cpu_to_be32(mtt->ibmr.lkey); in populate_klm() 535 struct mlx5_ib_mr *mtt; in mlx5_ib_free_odp_mr() local 544 mlx5_ib_dereg_mr(&mtt->ibmr, NULL); in mlx5_ib_free_odp_mr() 622 struct mlx5_ib_mr *mtt; in pagefault_implicit_mr() local 627 if (unlikely(!mtt)) { in pagefault_implicit_mr() 630 if (IS_ERR(mtt)) { in pagefault_implicit_mr() 631 ret = PTR_ERR(mtt); in pagefault_implicit_mr() 637 refcount_inc(&mtt->mmkey.usecount); in pagefault_implicit_mr() 641 umem_odp = to_ib_umem_odp(mtt->umem); in pagefault_implicit_mr() [all …]
|
A D | mr.c | 1207 struct mlx5_mtt *mtt; in mlx5_ib_update_mr_pas() local 1215 mtt = mlx5_ib_create_xlt_wr(mr, &wr, &sg, in mlx5_ib_update_mr_pas() 1218 sizeof(*mtt), flags); in mlx5_ib_update_mr_pas() 1219 if (!mtt) in mlx5_ib_update_mr_pas() 1223 cur_mtt = mtt; in mlx5_ib_update_mr_pas() 1227 if (cur_mtt == (void *)mtt + sg.length) { in mlx5_ib_update_mr_pas() 1236 cur_mtt = mtt; in mlx5_ib_update_mr_pas() 1249 final_size = (void *)cur_mtt - (void *)mtt; in mlx5_ib_update_mr_pas() 1260 mlx5_ib_unmap_free_xlt(dev, mtt, &sg); in mlx5_ib_update_mr_pas()
|
A D | devx.c | 2218 __be64 *mtt; in devx_umem_reg_cmd_alloc() local 2246 (MLX5_ST_SZ_BYTES(mtt) * in devx_umem_reg_cmd_alloc() 2253 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt); in devx_umem_reg_cmd_alloc() 2263 mlx5_ib_populate_pas(obj->umem, page_size, mtt, in devx_umem_reg_cmd_alloc()
|
/linux/include/linux/mlx4/ |
A D | device.h | 683 struct mlx4_mtt mtt; member 688 struct mlx4_mtt mtt; member 834 int mtt; member 1101 struct mlx4_mtt *mtt); 1102 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt); 1103 u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt); 1113 int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 1115 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 1126 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, 1139 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq);
|
A D | qp.h | 477 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, 485 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
|
A D | cq.h | 185 int entries, struct mlx4_mtt *mtt);
|
/linux/drivers/vdpa/mlx5/core/ |
A D | mr.c | 35 static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt) in populate_mtts() argument 48 mtt[j++] = cpu_to_be64(dma_addr); in populate_mtts() 59 inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16); in create_direct_mr()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/diag/ |
A D | fw_tracer.c | 180 __be64 *mtt; in mlx5_fw_tracer_create_mkey() local 185 sizeof(*mtt) * round_up(TRACER_BUFFER_PAGE_NUM, 2); in mlx5_fw_tracer_create_mkey() 193 mtt = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); in mlx5_fw_tracer_create_mkey() 195 mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE); in mlx5_fw_tracer_create_mkey()
|