Home
last modified time | relevance | path

Searched refs:ibcq (Results 1 – 25 of 58) sorted by relevance

123

/linux/drivers/infiniband/sw/rdmavt/
A Dcq.c55 head = cq->ibcq.cqe; in rvt_cq_enter()
68 if (cq->ibcq.event_handler) { in rvt_cq_enter()
72 ev.element.cq = &cq->ibcq; in rvt_cq_enter()
74 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rvt_cq_enter()
140 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in send_complete()
249 cq->ibcq.cqe = entries; in rvt_create_cq()
394 head = (u32)cq->ibcq.cqe; in rvt_resize_cq()
396 tail = (u32)cq->ibcq.cqe; in rvt_resize_cq()
415 cq->ibcq.cqe = cqe; in rvt_resize_cq()
493 tail = (u32)cq->ibcq.cqe; in rvt_poll_cq()
[all …]
A Dcq.h12 int rvt_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
14 int rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
15 int rvt_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
16 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
17 int rvt_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
/linux/drivers/infiniband/hw/mlx4/
A Dcq.c45 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_comp() local
46 ibcq->comp_handler(ibcq, ibcq->cq_context); in mlx4_ib_cq_comp()
52 struct ib_cq *ibcq; in mlx4_ib_cq_event() local
60 ibcq = &to_mibcq(cq)->ibcq; in mlx4_ib_cq_event()
61 if (ibcq->event_handler) { in mlx4_ib_cq_event()
64 event.element.cq = ibcq; in mlx4_ib_cq_event()
65 ibcq->event_handler(&event, ibcq->cq_context); in mlx4_ib_cq_event()
404 if (ibcq->uobject) { in mlx4_ib_resize_cq()
428 if (ibcq->uobject) { in mlx4_ib_resize_cq()
445 tmp_cqe = cq->ibcq.cqe; in mlx4_ib_resize_cq()
[all …]
A Dmlx4_ib.h116 struct ib_cq ibcq; member
688 static inline struct mlx4_ib_cq *to_mcq(struct ib_cq *ibcq) in to_mcq() argument
690 return container_of(ibcq, struct mlx4_ib_cq, ibcq); in to_mcq()
766 int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
767 int mlx4_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
770 int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
/linux/drivers/infiniband/hw/vmw_pvrdma/
A Dpvrdma_cq.c67 struct pvrdma_cq *cq = to_vcq(ibcq); in pvrdma_req_notify_cq()
83 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq()
132 cq->ibcq.cqe = entries; in pvrdma_create_cq()
195 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq()
288 cq->ibcq.cqe, &head); in _pvrdma_flush_cqe()
293 cq->ibcq.cqe); in _pvrdma_flush_cqe()
298 (cq->ibcq.cqe - head + tail); in _pvrdma_flush_cqe()
302 curr = cq->ibcq.cqe - 1; in _pvrdma_flush_cqe()
304 tail = cq->ibcq.cqe - 1; in _pvrdma_flush_cqe()
315 cq->ibcq.cqe); in _pvrdma_flush_cqe()
[all …]
A Dpvrdma_main.c186 INIT_RDMA_OBJ_SIZE(ib_cq, pvrdma_cq, ibcq),
321 if (cq && cq->ibcq.event_handler) { in pvrdma_cq_event()
322 struct ib_cq *ibcq = &cq->ibcq; in pvrdma_cq_event() local
325 e.device = ibcq->device; in pvrdma_cq_event()
326 e.element.cq = ibcq; in pvrdma_cq_event()
328 ibcq->event_handler(&e, ibcq->cq_context); in pvrdma_cq_event()
492 if (cq && cq->ibcq.comp_handler) in pvrdma_intrx_handler()
493 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in pvrdma_intrx_handler()
A Dpvrdma.h87 struct ib_cq ibcq; member
274 static inline struct pvrdma_cq *to_vcq(struct ib_cq *ibcq) in to_vcq() argument
276 return container_of(ibcq, struct pvrdma_cq, ibcq); in to_vcq()
/linux/drivers/infiniband/sw/rxe/
A Drxe_cq.c54 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in rxe_send_complete()
87 cq->ibcq.cqe = cqe; in rxe_cq_from_init()
101 cq->ibcq.cqe = cqe; in rxe_cq_resize_queue()
118 if (cq->ibcq.event_handler) { in rxe_cq_post()
119 ev.device = cq->ibcq.device; in rxe_cq_post()
120 ev.element.cq = &cq->ibcq; in rxe_cq_post()
122 cq->ibcq.event_handler(&ev, cq->ibcq.cq_context); in rxe_cq_post()
A Drxe_verbs.c776 struct ib_device *dev = ibcq->device; in rxe_create_cq()
778 struct rxe_cq *cq = to_rcq(ibcq); in rxe_create_cq()
802 static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) in rxe_destroy_cq() argument
804 struct rxe_cq *cq = to_rcq(ibcq); in rxe_destroy_cq()
815 struct rxe_cq *cq = to_rcq(ibcq); in rxe_resize_cq()
816 struct rxe_dev *rxe = to_rdev(ibcq->device); in rxe_resize_cq()
842 struct rxe_cq *cq = to_rcq(ibcq); in rxe_poll_cq()
860 static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) in rxe_peek_cq() argument
862 struct rxe_cq *cq = to_rcq(ibcq); in rxe_peek_cq()
872 struct rxe_cq *cq = to_rcq(ibcq); in rxe_req_notify_cq()
[all …]
/linux/drivers/infiniband/hw/mthca/
A Dmthca_cq.c230 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in mthca_cq_completion()
254 event.element.cq = &cq->ibcq; in mthca_cq_event()
255 if (cq->ibcq.event_handler) in mthca_cq_event()
256 cq->ibcq.event_handler(&event, cq->ibcq.cq_context); in mthca_cq_event()
337 cq->cons_index &= cq->ibcq.cqe; in mthca_cq_resize_copy_cqes()
339 cq->cons_index -= cq->ibcq.cqe + 1; in mthca_cq_resize_copy_cqes()
658 struct mthca_cq *cq = to_mcq(ibcq); in mthca_poll_cq()
698 cq->cons_index &= cq->ibcq.cqe; in mthca_poll_cq()
706 tcqe = cq->ibcq.cqe; in mthca_poll_cq()
738 struct mthca_cq *cq = to_mcq(ibcq); in mthca_arbel_arm_cq()
[all …]
A Dmthca_provider.h184 struct ib_cq ibcq; member
301 static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) in to_mcq() argument
303 return container_of(ibcq, struct mthca_cq, ibcq); in to_mcq()
A Dmthca_provider.c583 static int mthca_create_cq(struct ib_cq *ibcq, in mthca_create_cq() argument
587 struct ib_device *ibdev = ibcq->device; in mthca_create_cq()
619 cq = to_mcq(ibcq); in mthca_create_cq()
704 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) in mthca_resize_cq() argument
706 struct mthca_dev *dev = to_mdev(ibcq->device); in mthca_resize_cq()
707 struct mthca_cq *cq = to_mcq(ibcq); in mthca_resize_cq()
718 if (entries == ibcq->cqe + 1) { in mthca_resize_cq()
758 tcqe = cq->ibcq.cqe; in mthca_resize_cq()
760 cq->ibcq.cqe = cq->resize_buf->cqe; in mthca_resize_cq()
772 ibcq->cqe = entries - 1; in mthca_resize_cq()
[all …]
/linux/drivers/infiniband/hw/mlx5/
A Dcq.c43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp() local
45 ibcq->comp_handler(ibcq, ibcq->cq_context); in mlx5_ib_cq_comp()
52 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event() local
61 if (ibcq->event_handler) { in mlx5_ib_cq_event()
64 event.element.cq = ibcq; in mlx5_ib_cq_event()
65 ibcq->event_handler(&event, ibcq->cq_context); in mlx5_ib_cq_event()
931 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in notify_soft_wc_handler()
962 cq->ibcq.cqe = entries - 1; in mlx5_ib_create_cq()
1358 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1405 if (!ibcq) in mlx5_ib_get_cqe_size()
[all …]
A Drestrack.c151 static int fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ibcq) in fill_res_cq_entry_raw() argument
153 struct mlx5_ib_dev *dev = to_mdev(ibcq->device); in fill_res_cq_entry_raw()
154 struct mlx5_ib_cq *cq = to_mcq(ibcq); in fill_res_cq_entry_raw()
A Dmlx5_ib.h565 struct ib_cq ibcq; member
1164 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) in to_mcq() argument
1166 return container_of(ibcq, struct mlx5_ib_cq, ibcq); in to_mcq()
1270 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1273 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1274 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1276 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1341 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
1452 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
/linux/include/rdma/
A Drdmavt_cq.h47 struct ib_cq ibcq; member
60 static inline struct rvt_cq *ibcq_to_rvtcq(struct ib_cq *ibcq) in ibcq_to_rvtcq() argument
62 return container_of(ibcq, struct rvt_cq, ibcq); in ibcq_to_rvtcq()
/linux/drivers/infiniband/hw/cxgb4/
A Dev.c105 event.device = chp->ibcq.device; in post_qp_event()
107 event.element.cq = &chp->ibcq; in post_qp_event()
115 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in post_qp_event()
234 (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); in c4iw_ev_handler()
A Diw_cxgb4.h424 struct ib_cq ibcq; member
435 static inline struct c4iw_cq *to_c4iw_cq(struct ib_cq *ibcq) in to_c4iw_cq() argument
437 return container_of(ibcq, struct c4iw_cq, ibcq); in to_c4iw_cq()
959 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
983 int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
985 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1043 int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq);
/linux/drivers/infiniband/hw/hns/
A Dhns_roce_cq.c458 struct ib_cq *ibcq; in hns_roce_cq_completion() local
469 ibcq = &hr_cq->ib_cq; in hns_roce_cq_completion()
470 if (ibcq->comp_handler) in hns_roce_cq_completion()
471 ibcq->comp_handler(ibcq, ibcq->cq_context); in hns_roce_cq_completion()
479 struct ib_cq *ibcq; in hns_roce_cq_event() local
498 ibcq = &hr_cq->ib_cq; in hns_roce_cq_event()
499 if (ibcq->event_handler) { in hns_roce_cq_event()
500 event.device = ibcq->device; in hns_roce_cq_event()
501 event.element.cq = ibcq; in hns_roce_cq_event()
503 ibcq->event_handler(&event, ibcq->cq_context); in hns_roce_cq_event()
/linux/drivers/infiniband/hw/qedr/
A Dqedr_roce_cm.c82 cq->ibcq.comp_handler ? "Yes" : "No"); in qedr_ll2_complete_tx_packet()
92 if (cq->ibcq.comp_handler) in qedr_ll2_complete_tx_packet()
93 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in qedr_ll2_complete_tx_packet()
121 if (cq->ibcq.comp_handler) in qedr_ll2_complete_rx_packet()
122 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); in qedr_ll2_complete_rx_packet()
671 int qedr_gsi_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) in qedr_gsi_poll_cq() argument
673 struct qedr_dev *dev = get_qedr_dev(ibcq->device); in qedr_gsi_poll_cq()
674 struct qedr_cq *cq = get_qedr_cq(ibcq); in qedr_gsi_poll_cq()
A Dmain.c233 INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
468 if (!cq->destroyed && cq->ibcq.comp_handler) in qedr_irq_handler()
469 (*cq->ibcq.comp_handler) in qedr_irq_handler()
470 (&cq->ibcq, cq->ibcq.cq_context); in qedr_irq_handler()
650 struct ib_cq *ibcq; in qedr_affiliated_event() local
721 ibcq = &cq->ibcq; in qedr_affiliated_event()
722 if (ibcq->event_handler) { in qedr_affiliated_event()
723 event.device = ibcq->device; in qedr_affiliated_event()
724 event.element.cq = ibcq; in qedr_affiliated_event()
725 ibcq->event_handler(&event, ibcq->cq_context); in qedr_affiliated_event()
A Dverbs.h54 int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
56 int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
57 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
/linux/drivers/infiniband/hw/efa/
A Defa.h89 struct ib_cq ibcq; member
151 int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
152 int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
/linux/drivers/infiniband/hw/ocrdma/
A Docrdma_verbs.h73 int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
76 int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
A Docrdma.h316 struct ib_cq ibcq; member
471 static inline struct ocrdma_cq *get_ocrdma_cq(struct ib_cq *ibcq) in get_ocrdma_cq() argument
473 return container_of(ibcq, struct ocrdma_cq, ibcq); in get_ocrdma_cq()

Completed in 55 milliseconds

123