Home
last modified time | relevance | path

Searched refs:cqe_size (Results 1 – 25 of 30) sorted by relevance

12

/linux/drivers/infiniband/hw/mlx5/
A Dcq.c677 buf->cqe_size = cqe_size; in alloc_cq_frag_buf()
732 if ((ucmd.cqe_size != 64 && ucmd.cqe_size != 128) || in create_cq_user()
736 *cqe_size = ucmd.cqe_size; in create_cq_user()
799 *cqe_size); in create_cq_user()
823 *cqe_size); in create_cq_user()
947 int cqe_size; in mlx5_ib_create_cq() local
990 cq->cqe_size = cqe_size; in mlx5_ib_create_cq()
1158 if (ucmd.cqe_size && SIZE_MAX / ucmd.cqe_size <= entries - 1) in resize_user()
1170 *cqe_size = ucmd.cqe_size; in resize_user()
1264 int cqe_size; in mlx5_ib_resize_cq() local
[all …]
A Dmlx5_ib.h537 int cqe_size; member
579 int cqe_size; member
/linux/drivers/infiniband/hw/hns/
A Dhns_roce_cq.c206 buf_attr.region[0].size = hr_cq->cq_depth * hr_cq->cqe_size; in alloc_cq_buf()
335 hr_cq->cqe_size = hr_dev->caps.cqe_sz; in set_cqe_size()
339 if (udata->inlen >= offsetofend(typeof(*ucmd), cqe_size)) { in set_cqe_size()
340 if (ucmd->cqe_size != HNS_ROCE_V2_CQE_SIZE && in set_cqe_size()
341 ucmd->cqe_size != HNS_ROCE_V3_CQE_SIZE) { in set_cqe_size()
343 "invalid cqe size %u.\n", ucmd->cqe_size); in set_cqe_size()
347 hr_cq->cqe_size = ucmd->cqe_size; in set_cqe_size()
349 hr_cq->cqe_size = HNS_ROCE_V2_CQE_SIZE; in set_cqe_size()
A Dhns_roce_main.c396 resp.cqe_size = hr_dev->caps.cqe_sz; in hns_roce_alloc_ucontext()
A Dhns_roce_device.h458 int cqe_size; member
/linux/drivers/net/ethernet/mellanox/mlx4/
A Dcq.c290 static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size) in mlx4_init_user_cqes() argument
292 int entries_per_copy = PAGE_SIZE / cqe_size; in mlx4_init_user_cqes()
318 array_size(entries, cqe_size)) ? in mlx4_init_user_cqes()
330 int cqe_size) in mlx4_init_kernel_cqes() argument
335 memset(buf->direct.buf, 0xcc, entries * cqe_size); in mlx4_init_kernel_cqes()
395 dev->caps.cqe_size); in mlx4_cq_alloc()
400 dev->caps.cqe_size); in mlx4_cq_alloc()
A Dfw.h203 u16 cqe_size; /* For use only when CQE stride feature enabled */ member
A Den_cq.c62 cq->buf_size = cq->size * mdev->dev->caps.cqe_size; in mlx4_en_create_cq()
A Den_tx.c453 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; in mlx4_en_process_tx_cq()
506 cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; in mlx4_en_process_tx_cq()
A Den_rx.c692 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; in mlx4_en_process_rx_cq()
907 cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor; in mlx4_en_process_rx_cq()
A Dmlx4_en.h568 int cqe_size; member
A Dfw.c1944 dev->caps.cqe_size = 64; in mlx4_INIT_HCA()
1947 dev->caps.cqe_size = 32; in mlx4_INIT_HCA()
1954 dev->caps.cqe_size = cache_line_size(); in mlx4_INIT_HCA()
2178 param->cqe_size = 1 << ((byte_field & in mlx4_QUERY_HCA()
A Den_netdev.c1678 cqe = mlx4_en_get_cqe(cq->buf, j, priv->cqe_size) + in mlx4_en_start_port()
3233 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; in mlx4_en_init_netdev()
3234 priv->cqe_size = mdev->dev->caps.cqe_size; in mlx4_en_init_netdev()
A Dmain.c1050 dev->caps.cqe_size = 64; in mlx4_slave_cap()
1053 dev->caps.cqe_size = 32; in mlx4_slave_cap()
1062 dev->caps.cqe_size = hca_param->cqe_size; in mlx4_slave_cap()
/linux/include/uapi/rdma/
A Dhns-abi.h42 __u32 cqe_size; member
88 __u32 cqe_size; member
A Dmlx5-abi.h288 __u32 cqe_size; member
304 __u16 cqe_size; member
A Dmlx4-abi.h71 __u32 cqe_size; member
/linux/drivers/infiniband/hw/mlx4/
A Dcq.c105 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, in mlx4_ib_alloc_cq_buf()
111 buf->entry_size = dev->dev->caps.cqe_size; in mlx4_ib_alloc_cq_buf()
143 int cqe_size = dev->dev->caps.cqe_size; in mlx4_ib_get_cq_umem() local
147 *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem()
358 int cqe_size = cq->buf.entry_size; in mlx4_ib_cq_resize_copy_cqes() local
359 int cqe_inc = cqe_size == 64 ? 1 : 0; in mlx4_ib_cq_resize_copy_cqes()
368 memcpy(new_cqe, get_cqe(cq, i & cq->ibcq.cqe), cqe_size); in mlx4_ib_cq_resize_copy_cqes()
/linux/drivers/net/ethernet/marvell/octeontx2/nic/
A Dotx2_txrx.h126 u16 cqe_size; member
A Dotx2_common.c896 cq->cqe_size = pfvf->qset.xqe_size; in otx2_cq_init()
899 err = qmem_alloc(pfvf->dev, &cq->cqe, cq->cqe_cnt, cq->cqe_size); in otx2_cq_init()
/linux/drivers/net/ethernet/huawei/hinic/
A Dhinic_hw_qp.c320 size_t cqe_dma_size, cqe_size; in alloc_rq_cqe() local
324 cqe_size = wq->q_depth * sizeof(*rq->cqe); in alloc_rq_cqe()
325 rq->cqe = vzalloc(cqe_size); in alloc_rq_cqe()
/linux/drivers/infiniband/hw/ocrdma/
A Docrdma_hw.c1790 u32 hw_pages, cqe_size, page_size, cqe_count; in ocrdma_mbx_create_cq() local
1803 cqe_size = OCRDMA_DPP_CQE_SIZE; in ocrdma_mbx_create_cq()
1808 cqe_size = sizeof(struct ocrdma_cqe); in ocrdma_mbx_create_cq()
1812 cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE); in ocrdma_mbx_create_cq()
1832 cqe_count = cq->len / cqe_size; in ocrdma_mbx_create_cq()
1861 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size); in ocrdma_mbx_create_cq()
1863 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1; in ocrdma_mbx_create_cq()
/linux/drivers/scsi/bnx2i/
A Dbnx2i.h661 u32 cqe_size; member
A Dbnx2i_hwi.c171 if (cq_index > ep->qp.cqe_size * 2) in bnx2i_arm_cq_event_coalescing()
172 cq_index -= ep->qp.cqe_size * 2; in bnx2i_arm_cq_event_coalescing()
1123 ep->qp.cqe_size = hba->max_cqes; in bnx2i_alloc_qp_resc()
2063 if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1)) in bnx2i_process_new_cqes()
/linux/drivers/scsi/lpfc/
A Dlpfc_sli4.h539 uint32_t cqe_size; member

Completed in 104 milliseconds

12