/linux/drivers/scsi/lpfc/ |
A D | lpfc_debugfs.h | 645 int cq_idx; in lpfc_debug_dump_cq_by_id() local 647 for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++) in lpfc_debug_dump_cq_by_id() 648 if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid) in lpfc_debug_dump_cq_by_id() 651 if (cq_idx < phba->cfg_hdw_queue) { in lpfc_debug_dump_cq_by_id() 652 pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); in lpfc_debug_dump_cq_by_id() 653 lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq); in lpfc_debug_dump_cq_by_id()
|
/linux/drivers/net/ethernet/marvell/octeontx2/nic/ |
A D | otx2_txrx.c | 30 u64 incr = (u64)(cq->cq_idx) << 32; in otx2_nix_cq_op_status() 365 seg_size[seg], parse, cq->cq_idx)) in otx2_rcv_pkt_handler() 372 skb_record_rx_queue(skb, cq->cq_idx); in otx2_rcv_pkt_handler() 414 ((u64)cq->cq_idx << 32) | processed_cqe); in otx2_rx_napi_handler() 454 qidx = cq->cq_idx - pfvf->hw.rx_queues; in otx2_tx_napi_handler() 489 int workdone = 0, cq_idx, i; in otx2_napi_handler() local 499 cq_idx = cq_poll->cq_ids[i]; in otx2_napi_handler() 500 if (unlikely(cq_idx == CINT_INVALID_CQ)) in otx2_napi_handler() 502 cq = &qset->cq[cq_idx]; in otx2_napi_handler() 1034 otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx); in otx2_cleanup_rx_cqes() [all …]
|
A D | otx2_txrx.h | 122 u8 cq_idx; member
|
A D | cn10k.c | 122 __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs, in cn10k_refill_pool_ptrs() 130 __cn10k_aura_freeptr(pfvf, cq->cq_idx, ptrs, in cn10k_refill_pool_ptrs()
|
A D | otx2_common.c | 550 work = &pfvf->refill_wrk[cq->cq_idx]; in otx2_alloc_buffer() 879 cq->cq_idx = qidx; in otx2_cq_init()
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
A D | en_cq.c | 90 int cq_idx) in mlx4_en_activate_cq() argument 126 cq_idx = cq_idx % priv->rx_ring_num; in mlx4_en_activate_cq() 127 rx_cq = priv->rx_cq[cq_idx]; in mlx4_en_activate_cq()
|
A D | mlx4_en.h | 684 int cq_idx);
|
/linux/drivers/infiniband/hw/efa/ |
A D | efa_verbs.c | 986 static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx) in efa_destroy_cq_idx() argument 988 struct efa_com_destroy_cq_params params = { .cq_idx = cq_idx }; in efa_destroy_cq_idx() 1006 cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr); in efa_destroy_cq() 1009 efa_destroy_cq_idx(dev, cq->cq_idx); in efa_destroy_cq() 1011 xa_erase(&dev->cqs_xa, cq->cq_idx); in efa_destroy_cq() 1152 resp.cq_idx = result.cq_idx; in efa_create_cq() 1153 cq->cq_idx = result.cq_idx; in efa_create_cq() 1160 cq->cq_idx); in efa_create_cq() 1168 cq->cq_idx); in efa_create_cq() 1190 xa_erase(&dev->cqs_xa, cq->cq_idx); in efa_create_cq() [all …]
|
A D | efa_com_cmd.h | 83 u16 cq_idx; member 91 u16 cq_idx; member
|
A D | efa_com_cmd.c | 187 result->cq_idx = cmd_completion.cq_idx; in efa_com_create_cq() 204 destroy_cmd.cq_idx = params->cq_idx; in efa_com_destroy_cq() 216 params->cq_idx, err); in efa_com_destroy_cq()
|
A D | efa_admin_cmds_defs.h | 485 u16 cq_idx; member 503 u16 cq_idx; member
|
A D | efa.h | 96 u16 cq_idx; member
|
/linux/drivers/net/ethernet/cavium/thunder/ |
A D | nicvf_main.c | 853 struct cmp_queue *cq = &qs->cq[cq_idx]; in nicvf_cq_intr_handler() 856 struct snd_queue *sq = &qs->sq[cq_idx]; in nicvf_cq_intr_handler() 857 struct rcv_queue *rq = &qs->rq[cq_idx]; in nicvf_cq_intr_handler() 911 cq_idx, processed_cqe); in nicvf_cq_intr_handler() 921 txq_idx = nicvf_netdev_qidx(nic, cq_idx); in nicvf_cq_intr_handler() 925 nicvf_xdp_sq_doorbell(nic, sq, cq_idx); in nicvf_cq_intr_handler() 972 cq->cq_idx); in nicvf_poll() 973 nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx); in nicvf_poll() 975 cq->cq_idx, cq_head); in nicvf_poll() 1043 int qidx = cq_poll->cq_idx; in nicvf_intr_handler() [all …]
|
A D | nicvf_queues.h | 245 u8 cq_idx; /* CQ index (0 to 7) in the QS */ member 267 u8 cq_idx; /* CQ index (0 to 7) in the above QS */ member
|
A D | nicvf_queues.c | 764 rq->cq_idx = qidx; in nicvf_rcv_queue_config() 780 (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | in nicvf_rcv_queue_config() 877 sq->cq_idx = qidx; in nicvf_snd_queue_config() 884 mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; in nicvf_snd_queue_config()
|
A D | nic.h | 123 u8 cq_idx; /* Completion queue index */ member
|
/linux/drivers/net/ethernet/amazon/ena/ |
A D | ena_admin_defs.h | 221 u16 cq_idx; member 309 u16 cq_idx; member 324 u16 cq_idx; member 870 u16 cq_idx; member
|
A D | ena_com.c | 1222 struct ena_com_io_sq *io_sq, u16 cq_idx) in ena_com_create_io_sq() argument 1253 create_cmd.cq_idx = cq_idx; in ena_com_create_io_sq() 1315 rss->rss_ind_tbl[i].cq_idx = io_sq->idx; in ena_com_ind_tbl_convert_to_device() 1425 io_cq->idx = cmd_completion.cq_idx; in ena_com_create_io_cq() 1509 destroy_cmd.cq_idx = io_cq->idx; in ena_com_destroy_io_cq()
|
/linux/drivers/infiniband/hw/usnic/ |
A D | usnic_abi.h | 83 u32 cq_idx[USNIC_QP_GRP_MAX_CQS]; member
|
A D | usnic_ib_verbs.c | 155 resp.cq_idx[i] = chunk->res[i]->vnic_idx; in usnic_ib_fill_create_qp_resp()
|
/linux/include/uapi/rdma/ |
A D | efa-abi.h | 76 __u16 cq_idx; member
|
/linux/drivers/misc/habanalabs/goya/ |
A D | goyaP.h | 238 u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx);
|
/linux/drivers/scsi/snic/ |
A D | snic_scsi.c | 1198 unsigned int cq_idx, in snic_io_cmpl_handler() argument 1281 unsigned int cq_idx; in snic_fwcq_cmpl_handler() local 1285 for (cq_idx = snic->wq_count; cq_idx < snic->cq_count; cq_idx++) { in snic_fwcq_cmpl_handler() 1286 nent_per_cq = vnic_cq_fw_service(&snic->cq[cq_idx], in snic_fwcq_cmpl_handler()
|
/linux/drivers/misc/habanalabs/common/ |
A D | irq.c | 122 queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work); in hl_irq_handler_cq()
|
/linux/drivers/scsi/qedi/ |
A D | qedi_fw.c | 1995 u16 cq_idx; in qedi_iscsi_send_ioreq() local 2023 cq_idx = smp_processor_id() % qedi->num_queues; in qedi_iscsi_send_ioreq() 2088 task_params.cq_rss_number = cq_idx; in qedi_iscsi_send_ioreq()
|