/linux/drivers/net/ethernet/mellanox/mlx4/ |
A D | en_cq.c | 98 cq->mcq.set_ci_db = cq->wqres.db.db; in mlx4_en_activate_cq() 99 cq->mcq.arm_db = cq->wqres.db.db + 1; in mlx4_en_activate_cq() 100 *cq->mcq.set_ci_db = 0; in mlx4_en_activate_cq() 101 *cq->mcq.arm_db = 0; in mlx4_en_activate_cq() 138 cq->mcq.usage = MLX4_RES_USAGE_DRIVER; in mlx4_en_activate_cq() 140 &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, in mlx4_en_activate_cq() 145 cq->mcq.event = mlx4_en_cq_event; in mlx4_en_activate_cq() 149 cq->mcq.comp = mlx4_en_tx_irq; in mlx4_en_activate_cq() 155 cq->mcq.comp = mlx4_en_rx_irq; in mlx4_en_activate_cq() 197 mlx4_cq_free(priv->mdev->dev, &cq->mcq); in mlx4_en_deactivate_cq() [all …]
|
A D | cq.c | 63 struct mlx4_cq *mcq, *temp; in mlx4_cq_tasklet_cb() local 69 list_for_each_entry_safe(mcq, temp, &ctx->process_list, tasklet_ctx.list) { in mlx4_cq_tasklet_cb() 70 list_del_init(&mcq->tasklet_ctx.list); in mlx4_cq_tasklet_cb() 71 mcq->tasklet_ctx.comp(mcq); in mlx4_cq_tasklet_cb() 72 if (refcount_dec_and_test(&mcq->refcount)) in mlx4_cq_tasklet_cb() 73 complete(&mcq->free); in mlx4_cq_tasklet_cb()
|
A D | en_rx.c | 341 ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn; in mlx4_en_activate_rx_rings() 691 index = cq->mcq.cons_index & ring->size_mask; in mlx4_en_process_rx_cq() 696 cq->mcq.cons_index & cq->size)) { in mlx4_en_process_rx_cq() 905 ++cq->mcq.cons_index; in mlx4_en_process_rx_cq() 906 index = (cq->mcq.cons_index) & ring->size_mask; in mlx4_en_process_rx_cq() 921 mlx4_cq_set_ci(&cq->mcq); in mlx4_en_process_rx_cq() 923 ring->cons = cq->mcq.cons_index; in mlx4_en_process_rx_cq() 932 void mlx4_en_rx_irq(struct mlx4_cq *mcq) in mlx4_en_rx_irq() argument 934 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); in mlx4_en_rx_irq()
|
A D | en_tx.c | 429 struct mlx4_cq *mcq = &cq->mcq; in mlx4_en_process_tx_cq() local 435 u32 cons_index = mcq->cons_index; in mlx4_en_process_tx_cq() 513 mcq->cons_index = cons_index; in mlx4_en_process_tx_cq() 514 mlx4_cq_set_ci(mcq); in mlx4_en_process_tx_cq() 537 void mlx4_en_tx_irq(struct mlx4_cq *mcq) in mlx4_en_tx_irq() argument 539 struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq); in mlx4_en_tx_irq()
|
A D | mlx4_en.h | 355 struct mlx4_cq mcq; member 689 void mlx4_en_tx_irq(struct mlx4_cq *mcq); 752 void mlx4_en_rx_irq(struct mlx4_cq *mcq);
|
/linux/drivers/infiniband/hw/mlx4/ |
A D | cq.c | 98 return mlx4_cq_modify(dev->dev, &mcq->mcq, cq_count, cq_period); in mlx4_ib_modify_cq() 231 *cq->mcq.set_ci_db = 0; in mlx4_ib_create_cq() 232 *cq->mcq.arm_db = 0; in mlx4_ib_create_cq() 248 &cq->mcq, vector, 0, in mlx4_ib_create_cq() 347 i = cq->mcq.cons_index; in mlx4_ib_get_outstanding_cqes() 361 i = cq->mcq.cons_index; in mlx4_ib_cq_resize_copy_cqes() 376 ++cq->mcq.cons_index; in mlx4_ib_cq_resize_copy_cqes() 482 mlx4_cq_free(dev->dev, &mcq->mcq); in mlx4_ib_destroy_cq() 491 &mcq->db); in mlx4_ib_destroy_cq() 680 ++cq->mcq.cons_index; in mlx4_ib_poll_one() [all …]
|
A D | qp.c | 830 struct mlx4_ib_cq *mcq; in create_rq() local 930 mcq = to_mcq(init_attr->send_cq); in create_rq() 932 mcq = to_mcq(init_attr->recv_cq); in create_rq() 962 struct mlx4_ib_cq *mcq; in create_qp_common() local 1204 mcq = to_mcq(init_attr->send_cq); in create_qp_common() 1206 mcq = to_mcq(init_attr->recv_cq); in create_qp_common() 1269 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_lock_cqs() 1284 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx4_ib_unlock_cqs() 4377 if (!mcq->mcq.reset_notify_added) in handle_drain_completion() 4378 mcq->mcq.reset_notify_added = 1; in handle_drain_completion() [all …]
|
A D | mlx4_ib.h | 117 struct mlx4_cq mcq; member 693 static inline struct mlx4_ib_cq *to_mibcq(struct mlx4_cq *mcq) in to_mibcq() argument 695 return container_of(mcq, struct mlx4_ib_cq, mcq); in to_mibcq()
|
A D | main.c | 3064 struct mlx4_cq *mcq; in mlx4_ib_handle_catas_error() local 3078 if (send_mcq->mcq.comp && in mlx4_ib_handle_catas_error() 3080 if (!send_mcq->mcq.reset_notify_added) { in mlx4_ib_handle_catas_error() 3081 send_mcq->mcq.reset_notify_added = 1; in mlx4_ib_handle_catas_error() 3082 list_add_tail(&send_mcq->mcq.reset_notify, in mlx4_ib_handle_catas_error() 3096 if (recv_mcq->mcq.comp && in mlx4_ib_handle_catas_error() 3098 if (!recv_mcq->mcq.reset_notify_added) { in mlx4_ib_handle_catas_error() 3099 recv_mcq->mcq.reset_notify_added = 1; in mlx4_ib_handle_catas_error() 3100 list_add_tail(&recv_mcq->mcq.reset_notify, in mlx4_ib_handle_catas_error() 3111 list_for_each_entry(mcq, &cq_notify_list, reset_notify) { in mlx4_ib_handle_catas_error() [all …]
|
/linux/drivers/infiniband/hw/mlx5/ |
A D | cq.c | 50 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() 57 type, mcq->cqn); in mlx5_ib_cq_event() 463 ++cq->mcq.cons_index; in mlx5_poll_one() 583 cq->mcq.cqn); in poll_soft_wc() 650 mlx5_cq_arm(&cq->mcq, in mlx5_ib_arm_cq() 1045 ret = mlx5_core_destroy_cq(dev->mdev, &mcq->mcq); in mlx5_ib_destroy_cq() 1134 err = mlx5_core_modify_cq_moderation(dev->mdev, &mcq->mcq, in mlx5_ib_modify_cq() 1137 mlx5_ib_warn(dev, "modify cq 0x%x failed\n", mcq->mcq.cqn); in mlx5_ib_modify_cq() 1217 i = cq->mcq.cons_index; in copy_resize_cqes() 1244 cq->mcq.cqn); in copy_resize_cqes() [all …]
|
A D | qp.c | 2089 to_mcq(init_attr->send_cq)->mcq.cqn); in create_dci() 2093 to_mcq(init_attr->recv_cq)->mcq.cqn); in create_dci() 2480 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs() 2484 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { in mlx5_ib_lock_cqs() 2510 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) { in mlx5_ib_unlock_cqs() 2513 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) { in mlx5_ib_unlock_cqs() 4145 MLX5_SET(qpc, qpc, cqn_snd, send_cq->mcq.cqn); in __mlx5_ib_modify_qp() 5562 struct mlx5_ib_cq *mcq = to_mcq(cq); in handle_drain_completion() local 5568 if (!mcq->mcq.reset_notify_added) in handle_drain_completion() 5569 mcq->mcq.reset_notify_added = 1; in handle_drain_completion() [all …]
|
A D | restrack.c | 156 return fill_res_raw(msg, dev, MLX5_SGMT_TYPE_PRM_QUERY_CQ, cq->mcq.cqn); in fill_res_cq_entry_raw()
|
A D | srq.c | 275 in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn; in mlx5_ib_create_srq() 277 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn; in mlx5_ib_create_srq()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/ |
A D | en_dim.c | 38 struct mlx5_core_dev *mdev, struct mlx5_core_cq *mcq) in mlx5e_complete_dim_work() argument 40 mlx5_core_modify_cq_moderation(mdev, mcq, moder.usec, moder.pkts); in mlx5e_complete_dim_work() 51 mlx5e_complete_dim_work(dim, cur_moder, rq->mdev, &rq->cq.mcq); in mlx5e_rx_dim_work() 61 mlx5e_complete_dim_work(dim, cur_moder, sq->cq.mdev, &sq->cq.mcq); in mlx5e_tx_dim_work()
|
A D | en_txrx.c | 245 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) in mlx5e_completion_event() argument 247 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); in mlx5e_completion_event() 254 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event) in mlx5e_cq_error_event() argument 256 struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq); in mlx5e_cq_error_event() 260 __func__, mcq->cqn, event); in mlx5e_cq_error_event()
|
A D | cq.c | 50 struct mlx5_core_cq *mcq; in mlx5_cq_tasklet_cb() local 57 list_for_each_entry_safe(mcq, temp, &ctx->process_list, in mlx5_cq_tasklet_cb() 59 list_del_init(&mcq->tasklet_ctx.list); in mlx5_cq_tasklet_cb() 60 mcq->tasklet_ctx.comp(mcq, NULL); in mlx5_cq_tasklet_cb() 61 mlx5_cq_put(mcq); in mlx5_cq_tasklet_cb()
|
A D | en_main.c | 1503 csp.cqn = sq->cq.mcq.cqn; in mlx5e_open_txqsq() 1598 csp.cqn = sq->cq.mcq.cqn; in mlx5e_open_icosq() 1657 csp.cqn = sq->cq.mcq.cqn; in mlx5e_open_xdpsq() 1723 struct mlx5_core_cq *mcq = &cq->mcq; in mlx5e_alloc_cq_common() local 1732 mcq->cqe_sz = 64; in mlx5e_alloc_cq_common() 1733 mcq->set_ci_db = cq->wq_ctrl.db.db; in mlx5e_alloc_cq_common() 1735 *mcq->set_ci_db = 0; in mlx5e_alloc_cq_common() 1736 *mcq->arm_db = 0; in mlx5e_alloc_cq_common() 1737 mcq->vector = param->eq_ix; in mlx5e_alloc_cq_common() 1739 mcq->event = mlx5e_cq_error_event; in mlx5e_alloc_cq_common() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/en/ |
A D | txrx.h | 54 void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe); 55 void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); 245 struct mlx5_core_cq *mcq; in mlx5e_cq_arm() local 247 mcq = &cq->mcq; in mlx5e_cq_arm() 248 mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, cq->wq.cc); in mlx5e_cq_arm() 331 cq->mcq.cqn, ci, qn, in mlx5e_dump_error_cqe()
|
A D | reporter_rx.c | 182 eq = rq->cq.mcq.eq; in mlx5e_rx_reporter_timeout_recover() 241 err = devlink_fmsg_u32_pair_put(fmsg, "cqn", icosq->cq.mcq.cqn); in mlx5e_reporter_icosq_diagnose() 306 err = mlx5e_health_eq_diag_fmsg(rq->cq.mcq.eq, fmsg); in mlx5e_rx_reporter_build_diagnose_output_rq_common() 699 rq->ix, icosq_str, rq->rqn, rq->cq.mcq.cqn); in mlx5e_reporter_rx_timeout()
|
A D | health.c | 45 err = mlx5_core_query_cq(cq->mdev, &cq->mcq, out); in mlx5e_health_cq_diag_fmsg() 56 err = devlink_fmsg_u32_pair_put(fmsg, "cqn", cq->mcq.cqn); in mlx5e_health_cq_diag_fmsg()
|
A D | reporter_tx.c | 106 eq = sq->cq.mcq.eq; in mlx5e_tx_reporter_timeout_recover() 193 return mlx5e_health_eq_diag_fmsg(sq->cq.mcq.eq, fmsg); in mlx5e_tx_reporter_build_diagnose_output_sq_common() 575 sq->ch_ix, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc, in mlx5e_reporter_tx_timeout()
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/fpga/ |
A D | conn.c | 361 mlx5_cq_arm(&conn->cq.mcq, MLX5_CQ_DB_REQ_NOT, in mlx5_fpga_conn_arm_cq() 405 conn = container_of(mcq, struct mlx5_fpga_conn, cq.mcq); in mlx5_fpga_conn_cq_complete() 471 conn->cq.mcq.cqe_sz = 64; in mlx5_fpga_conn_create_cq() 474 *conn->cq.mcq.set_ci_db = 0; in mlx5_fpga_conn_create_cq() 475 *conn->cq.mcq.arm_db = 0; in mlx5_fpga_conn_create_cq() 476 conn->cq.mcq.vector = 0; in mlx5_fpga_conn_create_cq() 478 conn->cq.mcq.uar = fdev->conn_res.uar; in mlx5_fpga_conn_create_cq() 574 MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); in mlx5_fpga_conn_create_qp() 575 MLX5_SET(qpc, qpc, cqn_rcv, conn->cq.mcq.cqn); in mlx5_fpga_conn_create_qp() 686 MLX5_SET(qpc, qpc, cqn_snd, conn->cq.mcq.cqn); in mlx5_fpga_conn_init_qp() [all …]
|
A D | conn.h | 58 struct mlx5_core_cq mcq; member
|
/linux/drivers/net/ethernet/mellanox/mlx5/core/steering/ |
A D | dr_send.c | 752 pr_err("CQ completion CQ: #%u\n", mcq->cqn); in dr_cq_complete() 814 cq->mcq.comp = dr_cq_complete; in dr_create_cq() 822 cq->mcq.cqe_sz = 64; in dr_create_cq() 823 cq->mcq.set_ci_db = cq->wq_ctrl.db.db; in dr_create_cq() 824 cq->mcq.arm_db = cq->wq_ctrl.db.db + 1; in dr_create_cq() 825 *cq->mcq.set_ci_db = 0; in dr_create_cq() 830 *cq->mcq.arm_db = cpu_to_be32(2 << 28); in dr_create_cq() 832 cq->mcq.vector = 0; in dr_create_cq() 833 cq->mcq.uar = uar; in dr_create_cq() 846 mlx5_core_destroy_cq(mdev, &cq->mcq); in dr_destroy_cq() [all …]
|
/linux/drivers/vdpa/mlx5/net/ |
A D | mlx5_vnet.c | 67 struct mlx5_core_cq mcq; member 523 vcq->mcq.cons_index++; in mlx5_vdpa_poll_one() 533 mlx5_cq_set_ci(&mvq->cq.mcq); in mlx5_vdpa_handle_completions() 546 struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq); in mlx5_vdpa_cq_comp() 568 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in mlx5_vdpa_cq_comp() 589 vcq->mcq.set_ci_db = vcq->db.db; in cq_create() 590 vcq->mcq.arm_db = vcq->db.db + 1; in cq_create() 591 vcq->mcq.cqe_sz = 64; in cq_create() 633 vcq->mcq.set_ci_db = vcq->db.db; in cq_create() 634 vcq->mcq.arm_db = vcq->db.db + 1; in cq_create() [all …]
|