/linux/drivers/infiniband/ulp/iser/ |
A D | iser_verbs.c | 243 unsigned int max_send_wr, cq_size; in iser_create_ib_conn_res() local 257 cq_size = max_send_wr + ISER_QP_MAX_RECV_DTOS; in iser_create_ib_conn_res() 258 ib_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_SOFTIRQ); in iser_create_ib_conn_res() 263 ib_conn->cq_size = cq_size; in iser_create_ib_conn_res() 292 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); in iser_create_ib_conn_res() 406 ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); in iser_free_ib_conn_res()
|
A D | iscsi_iser.h | 381 u32 cq_size; member
|
/linux/drivers/net/ethernet/microsoft/mana/ |
A D | mana_en.c | 636 req.cq_size = cq_spec->queue_size; in mana_create_wq_obj() 1169 u32 cq_size; in mana_create_txq() local 1186 cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE; in mana_create_txq() 1187 cq_size = PAGE_ALIGN(cq_size); in mana_create_txq() 1220 spec.queue_size = cq_size; in mana_create_txq() 1335 *cq_size = 0; in mana_alloc_rx_wqe() 1369 *cq_size += COMP_ENTRY_SIZE; in mana_alloc_rx_wqe() 1403 u32 cq_size, rq_size; in mana_create_rxq() local 1420 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size); in mana_create_rxq() 1425 cq_size = PAGE_ALIGN(cq_size); in mana_create_rxq() [all …]
|
A D | hw_channel.c | 333 u32 eq_size, cq_size; in mana_hwc_create_cq() local 340 cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); in mana_hwc_create_cq() 341 if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE) in mana_hwc_create_cq() 342 cq_size = MINIMUM_SUPPORTED_PAGE_SIZE; in mana_hwc_create_cq() 355 err = mana_hwc_create_gdma_cq(hwc, cq_size, hwc_cq, mana_hwc_comp_event, in mana_hwc_create_cq()
|
A D | mana.h | 472 u32 cq_size; member
|
/linux/drivers/net/ethernet/mellanox/mlxbf_gige/ |
A D | mlxbf_gige_rx.c | 84 size_t wq_size, cq_size; in mlxbf_gige_rx_init() local 118 cq_size = MLXBF_GIGE_RX_CQE_SZ * priv->rx_q_entries; in mlxbf_gige_rx_init() 119 priv->rx_cqe_base = dma_alloc_coherent(priv->dev, cq_size, in mlxbf_gige_rx_init()
|
/linux/drivers/infiniband/hw/irdma/ |
A D | uk.c | 926 void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int cq_size) in irdma_uk_cq_resize() argument 929 cq->cq_size = cq_size; in irdma_uk_cq_resize() 930 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); in irdma_uk_cq_resize() 1454 cq->cq_size = info->cq_size; in irdma_uk_cq_init() 1459 IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); in irdma_uk_cq_init()
|
A D | user.h | 374 u32 cq_size; member 406 u32 cq_size; member
|
A D | puda.c | 739 set_64bit_val(wqe, 0, cq->cq_uk.cq_size); in irdma_puda_cq_wqe() 789 cqsize = rsrc->cq_size * (sizeof(struct irdma_cqe)); in irdma_puda_cq_create() 801 info.shadow_read_threshold = rsrc->cq_size >> 2; in irdma_puda_cq_create() 806 init_info->cq_size = rsrc->cq_size; in irdma_puda_cq_create() 1066 rsrc->cq_size = info->rq_size + info->sq_size; in irdma_puda_create_rsrc() 1069 rsrc->cq_size += info->rq_size; in irdma_puda_create_rsrc()
|
A D | verbs.h | 110 u16 cq_size; member
|
A D | puda.h | 115 u32 cq_size; member
|
A D | verbs.c | 1810 info.cq_size = max(entries, 4); in irdma_resize_cq() 1812 if (info.cq_size == iwcq->sc_cq.cq_uk.cq_size - 1) in irdma_resize_cq() 1849 rsize = info.cq_size * sizeof(struct irdma_cqe); in irdma_resize_cq() 1901 ibcq->cqe = info.cq_size - 1; in irdma_resize_cq() 1964 ukinfo->cq_size = max(entries, 4); in irdma_create_cq() 1966 iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; in irdma_create_cq() 2043 ukinfo->cq_size = entries; in irdma_create_cq() 2045 rsize = info.cq_uk_init_info.cq_size * sizeof(struct irdma_cqe); in irdma_create_cq() 2072 info.shadow_read_threshold = min(info.cq_uk_init_info.cq_size / 2, in irdma_create_cq() 2104 resp.cq_size = info.cq_uk_init_info.cq_size; in irdma_create_cq()
|
/linux/include/uapi/rdma/ |
A D | irdma-abi.h | 85 __u32 cq_size; member
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/fpga/ |
A D | conn.c | 411 static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) in mlx5_fpga_conn_create_cq() argument 424 cq_size = roundup_pow_of_two(cq_size); in mlx5_fpga_conn_create_cq() 425 MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq() 455 MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); in mlx5_fpga_conn_create_cq()
|
/linux/drivers/infiniband/ulp/isert/ |
A D | ib_isert.h | 184 u32 cq_size; member
|
A D | ib_isert.c | 105 u32 cq_size = ISERT_QP_MAX_REQ_DTOS + ISERT_QP_MAX_RECV_DTOS + 2; in isert_create_qp() local 111 isert_conn->cq = ib_cq_pool_get(ib_dev, cq_size, -1, IB_POLL_WORKQUEUE); in isert_create_qp() 117 isert_conn->cq_size = cq_size; in isert_create_qp() 139 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); in isert_create_qp() 411 ib_cq_pool_put(isert_conn->cq, isert_conn->cq_size); in isert_destroy_qp()
|
/linux/drivers/infiniband/ulp/srpt/ |
A D | ib_srpt.h | 304 u32 cq_size; member
|
/linux/drivers/dma/ |
A D | hisi_dma.c | 342 size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth; in hisi_dma_alloc_qps_mem() local 354 chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma, in hisi_dma_alloc_qps_mem()
|
/linux/drivers/net/ethernet/pensando/ionic/ |
A D | ionic_lif.c | 382 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa); in ionic_qcq_free() 568 int q_size, cq_size; in ionic_qcq_alloc() local 572 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE); in ionic_qcq_alloc() 574 new->q_size = PAGE_SIZE + q_size + cq_size; in ionic_qcq_alloc() 603 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size); in ionic_qcq_alloc() 604 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa, in ionic_qcq_alloc() 639 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa); in ionic_qcq_alloc() 722 memset(qcq->cq_base, 0, qcq->cq_size); in ionic_qcq_sanitize() 2451 swap(a->cq_size, b->cq_size); in ionic_swap_queues()
|
A D | ionic_lif.h | 69 u32 cq_size; member
|
A D | ionic_debugfs.c | 133 debugfs_create_x32("cq_size", 0400, qcq_dentry, &qcq->cq_size); in ionic_debugfs_add_qcq()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
A D | dr_send.c | 919 int cq_size; in mlx5dr_send_ring_alloc() local 927 cq_size = QUEUE_SIZE + 1; in mlx5dr_send_ring_alloc() 928 dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size); in mlx5dr_send_ring_alloc()
|
/linux/drivers/nvme/host/ |
A D | rdma.c | 100 int cq_size; member 422 ib_cq_pool_put(queue->ib_cq, queue->cq_size); in nvme_rdma_free_cq() 481 queue->ib_cq = ib_alloc_cq(ibdev, queue, queue->cq_size, in nvme_rdma_create_cq() 485 queue->ib_cq = ib_cq_pool_get(ibdev, queue->cq_size, in nvme_rdma_create_cq() 513 queue->cq_size = cq_factor * queue->queue_size + 1; in nvme_rdma_create_queue_ib()
|
/linux/include/linux/qed/ |
A D | qed_rdma_if.h | 257 u32 cq_size; member
|
/linux/drivers/infiniband/hw/mlx4/ |
A D | mad.c | 1983 int ret, cq_size; in create_pv_resources() local 2010 cq_size = 2 * nmbr_bufs; in create_pv_resources() 2012 cq_size *= 2; in create_pv_resources() 2014 cq_attr.cqe = cq_size; in create_pv_resources()
|