Lines Matching refs:cq

41 static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)  in mlx5_ib_cq_comp()  argument
43 struct ib_cq *ibcq = &to_mibcq(cq)->ibcq; in mlx5_ib_cq_comp()
50 struct mlx5_ib_cq *cq = container_of(mcq, struct mlx5_ib_cq, mcq); in mlx5_ib_cq_event() local
51 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_cq_event()
52 struct ib_cq *ibcq = &cq->ibcq; in mlx5_ib_cq_event()
64 event.element.cq = ibcq; in mlx5_ib_cq_event()
69 static void *get_cqe(struct mlx5_ib_cq *cq, int n) in get_cqe() argument
71 return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n); in get_cqe()
79 static void *get_sw_cqe(struct mlx5_ib_cq *cq, int n) in get_sw_cqe() argument
81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe()
84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe()
87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe()
94 static void *next_cqe_sw(struct mlx5_ib_cq *cq) in next_cqe_sw() argument
96 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
421 static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries, in mlx5_ib_poll_sw_comp() argument
428 list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) { in mlx5_ib_poll_sw_comp()
434 list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) { in mlx5_ib_poll_sw_comp()
441 static int mlx5_poll_one(struct mlx5_ib_cq *cq, in mlx5_poll_one() argument
445 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_poll_one()
457 cqe = next_cqe_sw(cq); in mlx5_poll_one()
461 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in mlx5_poll_one()
463 ++cq->mcq.cons_index; in mlx5_poll_one()
472 if (likely(cq->resize_buf)) { in mlx5_poll_one()
473 free_cq_buf(dev, &cq->buf); in mlx5_poll_one()
474 cq->buf = *cq->resize_buf; in mlx5_poll_one()
475 kfree(cq->resize_buf); in mlx5_poll_one()
476 cq->resize_buf = NULL; in mlx5_poll_one()
520 "Requestor" : "Responder", cq->mcq.cqn); in mlx5_poll_one()
557 cq->mcq.cqn, sig->err_item.key, in mlx5_poll_one()
571 static int poll_soft_wc(struct mlx5_ib_cq *cq, int num_entries, in poll_soft_wc() argument
574 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in poll_soft_wc()
578 list_for_each_entry_safe(soft_wc, next, &cq->wc_list, list) { in poll_soft_wc()
583 cq->mcq.cqn); in poll_soft_wc()
599 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_poll_cq() local
601 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in mlx5_ib_poll_cq()
607 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_poll_cq()
610 if (unlikely(!list_empty(&cq->wc_list))) in mlx5_ib_poll_cq()
611 soft_polled = poll_soft_wc(cq, num_entries, wc, true); in mlx5_ib_poll_cq()
613 mlx5_ib_poll_sw_comp(cq, num_entries - soft_polled, in mlx5_ib_poll_cq()
618 if (unlikely(!list_empty(&cq->wc_list))) in mlx5_ib_poll_cq()
619 soft_polled = poll_soft_wc(cq, num_entries, wc, false); in mlx5_ib_poll_cq()
622 if (mlx5_poll_one(cq, &cur_qp, wc + soft_polled + npolled)) in mlx5_ib_poll_cq()
627 mlx5_cq_set_ci(&cq->mcq); in mlx5_ib_poll_cq()
629 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_poll_cq()
637 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_arm_cq() local
642 spin_lock_irqsave(&cq->lock, irq_flags); in mlx5_ib_arm_cq()
643 if (cq->notify_flags != IB_CQ_NEXT_COMP) in mlx5_ib_arm_cq()
644 cq->notify_flags = flags & IB_CQ_SOLICITED_MASK; in mlx5_ib_arm_cq()
646 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !list_empty(&cq->wc_list)) in mlx5_ib_arm_cq()
648 spin_unlock_irqrestore(&cq->lock, irq_flags); in mlx5_ib_arm_cq()
650 mlx5_cq_arm(&cq->mcq, in mlx5_ib_arm_cq()
706 struct mlx5_ib_cq *cq, int entries, u32 **cqb, in create_cq_user() argument
738 cq->buf.umem = in create_cq_user()
741 if (IS_ERR(cq->buf.umem)) { in create_cq_user()
742 err = PTR_ERR(cq->buf.umem); in create_cq_user()
747 cq->buf.umem, cqc, log_page_size, MLX5_ADAPTER_PAGE_SHIFT, in create_cq_user()
754 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &cq->db); in create_cq_user()
758 ncont = ib_umem_num_dma_blocks(cq->buf.umem, page_size); in create_cq_user()
763 ib_umem_num_pages(cq->buf.umem), page_size, ncont); in create_cq_user()
774 mlx5_ib_populate_pas(cq->buf.umem, page_size, pas, 0); in create_cq_user()
827 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD; in create_cq_user()
831 cq->private_flags |= MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS; in create_cq_user()
840 mlx5_ib_db_unmap_user(context, &cq->db); in create_cq_user()
843 ib_umem_release(cq->buf.umem); in create_cq_user()
847 static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_udata *udata) in destroy_cq_user() argument
852 mlx5_ib_db_unmap_user(context, &cq->db); in destroy_cq_user()
853 ib_umem_release(cq->buf.umem); in destroy_cq_user()
869 static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in create_cq_kernel() argument
877 err = mlx5_db_alloc(dev->mdev, &cq->db); in create_cq_kernel()
881 cq->mcq.set_ci_db = cq->db.db; in create_cq_kernel()
882 cq->mcq.arm_db = cq->db.db + 1; in create_cq_kernel()
883 cq->mcq.cqe_sz = cqe_size; in create_cq_kernel()
885 err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size); in create_cq_kernel()
889 init_cq_frag_buf(&cq->buf); in create_cq_kernel()
893 cq->buf.frag_buf.npages; in create_cq_kernel()
901 mlx5_fill_page_frag_array(&cq->buf.frag_buf, pas); in create_cq_kernel()
905 cq->buf.frag_buf.page_shift - in create_cq_kernel()
913 free_cq_buf(dev, &cq->buf); in create_cq_kernel()
916 mlx5_db_free(dev->mdev, &cq->db); in create_cq_kernel()
920 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq) in destroy_cq_kernel() argument
922 free_cq_buf(dev, &cq->buf); in destroy_cq_kernel()
923 mlx5_db_free(dev->mdev, &cq->db); in destroy_cq_kernel()
928 struct mlx5_ib_cq *cq = container_of(work, struct mlx5_ib_cq, in notify_soft_wc_handler() local
931 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); in notify_soft_wc_handler()
941 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_create_cq() local
962 cq->ibcq.cqe = entries - 1; in mlx5_ib_create_cq()
963 mutex_init(&cq->resize_mutex); in mlx5_ib_create_cq()
964 spin_lock_init(&cq->lock); in mlx5_ib_create_cq()
965 cq->resize_buf = NULL; in mlx5_ib_create_cq()
966 cq->resize_umem = NULL; in mlx5_ib_create_cq()
967 cq->create_flags = attr->flags; in mlx5_ib_create_cq()
968 INIT_LIST_HEAD(&cq->list_send_qp); in mlx5_ib_create_cq()
969 INIT_LIST_HEAD(&cq->list_recv_qp); in mlx5_ib_create_cq()
972 err = create_cq_user(dev, udata, cq, entries, &cqb, &cqe_size, in mlx5_ib_create_cq()
978 err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, in mlx5_ib_create_cq()
983 INIT_WORK(&cq->notify_work, notify_soft_wc_handler); in mlx5_ib_create_cq()
990 cq->cqe_size = cqe_size; in mlx5_ib_create_cq()
995 cq->private_flags & in mlx5_ib_create_cq()
1000 MLX5_SET64(cqc, cqc, dbr_addr, cq->db.dma); in mlx5_ib_create_cq()
1001 if (cq->create_flags & IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN) in mlx5_ib_create_cq()
1004 err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen, out, sizeof(out)); in mlx5_ib_create_cq()
1008 mlx5_ib_dbg(dev, "cqn 0x%x\n", cq->mcq.cqn); in mlx5_ib_create_cq()
1010 cq->mcq.tasklet_ctx.comp = mlx5_ib_cq_comp; in mlx5_ib_create_cq()
1012 cq->mcq.comp = mlx5_ib_cq_comp; in mlx5_ib_create_cq()
1013 cq->mcq.event = mlx5_ib_cq_event; in mlx5_ib_create_cq()
1015 INIT_LIST_HEAD(&cq->wc_list); in mlx5_ib_create_cq()
1018 if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof(__u32))) { in mlx5_ib_create_cq()
1028 mlx5_core_destroy_cq(dev->mdev, &cq->mcq); in mlx5_ib_create_cq()
1033 destroy_cq_user(cq, udata); in mlx5_ib_create_cq()
1035 destroy_cq_kernel(dev, cq); in mlx5_ib_create_cq()
1039 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata) in mlx5_ib_destroy_cq() argument
1041 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_destroy_cq()
1042 struct mlx5_ib_cq *mcq = to_mcq(cq); in mlx5_ib_destroy_cq()
1061 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 rsn, struct mlx5_ib_srq *srq) in __mlx5_ib_cq_clean() argument
1069 if (!cq) in __mlx5_ib_cq_clean()
1078 for (prod_index = cq->mcq.cons_index; get_sw_cqe(cq, prod_index); prod_index++) in __mlx5_ib_cq_clean()
1079 if (prod_index == cq->mcq.cons_index + cq->ibcq.cqe) in __mlx5_ib_cq_clean()
1085 while ((int) --prod_index - (int) cq->mcq.cons_index >= 0) { in __mlx5_ib_cq_clean()
1086 cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
1087 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in __mlx5_ib_cq_clean()
1093 dest = get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe); in __mlx5_ib_cq_clean()
1094 dest64 = (cq->mcq.cqe_sz == 64) ? dest : dest + 64; in __mlx5_ib_cq_clean()
1096 memcpy(dest, cqe, cq->mcq.cqe_sz); in __mlx5_ib_cq_clean()
1103 cq->mcq.cons_index += nfreed; in __mlx5_ib_cq_clean()
1108 mlx5_cq_set_ci(&cq->mcq); in __mlx5_ib_cq_clean()
1112 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq) in mlx5_ib_cq_clean() argument
1114 if (!cq) in mlx5_ib_cq_clean()
1117 spin_lock_irq(&cq->lock); in mlx5_ib_cq_clean()
1118 __mlx5_ib_cq_clean(cq, qpn, srq); in mlx5_ib_cq_clean()
1119 spin_unlock_irq(&cq->lock); in mlx5_ib_cq_clean()
1122 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) in mlx5_ib_modify_cq() argument
1124 struct mlx5_ib_dev *dev = to_mdev(cq->device); in mlx5_ib_modify_cq()
1125 struct mlx5_ib_cq *mcq = to_mcq(cq); in mlx5_ib_modify_cq()
1142 static int resize_user(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in resize_user() argument
1169 cq->resize_umem = umem; in resize_user()
1175 static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq, in resize_kernel() argument
1180 cq->resize_buf = kzalloc(sizeof(*cq->resize_buf), GFP_KERNEL); in resize_kernel()
1181 if (!cq->resize_buf) in resize_kernel()
1184 err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size); in resize_kernel()
1188 init_cq_frag_buf(cq->resize_buf); in resize_kernel()
1193 kfree(cq->resize_buf); in resize_kernel()
1197 static int copy_resize_cqes(struct mlx5_ib_cq *cq) in copy_resize_cqes() argument
1199 struct mlx5_ib_dev *dev = to_mdev(cq->ibcq.device); in copy_resize_cqes()
1210 ssize = cq->buf.cqe_size; in copy_resize_cqes()
1211 dsize = cq->resize_buf->cqe_size; in copy_resize_cqes()
1217 i = cq->mcq.cons_index; in copy_resize_cqes()
1218 scqe = get_sw_cqe(cq, i); in copy_resize_cqes()
1227 dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc, in copy_resize_cqes()
1228 (i + 1) & cq->resize_buf->nent); in copy_resize_cqes()
1230 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); in copy_resize_cqes()
1235 scqe = get_sw_cqe(cq, i); in copy_resize_cqes()
1244 cq->mcq.cqn); in copy_resize_cqes()
1248 ++cq->mcq.cons_index; in copy_resize_cqes()
1255 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_resize_cq() local
1287 mutex_lock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1291 err = resize_user(dev, cq, entries, udata, &cqe_size); in mlx5_ib_resize_cq()
1296 cq->resize_umem, cqc, log_page_size, in mlx5_ib_resize_cq()
1303 npas = ib_umem_num_dma_blocks(cq->resize_umem, page_size); in mlx5_ib_resize_cq()
1309 err = resize_kernel(dev, cq, entries, cqe_size); in mlx5_ib_resize_cq()
1312 frag_buf = &cq->resize_buf->frag_buf; in mlx5_ib_resize_cq()
1328 mlx5_ib_populate_pas(cq->resize_umem, 1UL << page_shift, pas, in mlx5_ib_resize_cq()
1331 mlx5_fill_page_frag_array(&cq->resize_buf->frag_buf, pas); in mlx5_ib_resize_cq()
1346 cq->private_flags & in mlx5_ib_resize_cq()
1351 MLX5_SET(modify_cq_in, in, cqn, cq->mcq.cqn); in mlx5_ib_resize_cq()
1353 err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen); in mlx5_ib_resize_cq()
1358 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1359 ib_umem_release(cq->buf.umem); in mlx5_ib_resize_cq()
1360 cq->buf.umem = cq->resize_umem; in mlx5_ib_resize_cq()
1361 cq->resize_umem = NULL; in mlx5_ib_resize_cq()
1366 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_resize_cq()
1367 if (cq->resize_buf) { in mlx5_ib_resize_cq()
1368 err = copy_resize_cqes(cq); in mlx5_ib_resize_cq()
1370 tbuf = cq->buf; in mlx5_ib_resize_cq()
1371 cq->buf = *cq->resize_buf; in mlx5_ib_resize_cq()
1372 kfree(cq->resize_buf); in mlx5_ib_resize_cq()
1373 cq->resize_buf = NULL; in mlx5_ib_resize_cq()
1377 cq->ibcq.cqe = entries - 1; in mlx5_ib_resize_cq()
1378 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_resize_cq()
1382 mutex_unlock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1391 ib_umem_release(cq->resize_umem); in mlx5_ib_resize_cq()
1393 free_cq_buf(dev, cq->resize_buf); in mlx5_ib_resize_cq()
1394 cq->resize_buf = NULL; in mlx5_ib_resize_cq()
1397 mutex_unlock(&cq->resize_mutex); in mlx5_ib_resize_cq()
1403 struct mlx5_ib_cq *cq; in mlx5_ib_get_cqe_size() local
1408 cq = to_mcq(ibcq); in mlx5_ib_get_cqe_size()
1409 return cq->cqe_size; in mlx5_ib_get_cqe_size()
1416 struct mlx5_ib_cq *cq = to_mcq(ibcq); in mlx5_ib_generate_wc() local
1424 spin_lock_irqsave(&cq->lock, flags); in mlx5_ib_generate_wc()
1425 list_add_tail(&soft_wc->list, &cq->wc_list); in mlx5_ib_generate_wc()
1426 if (cq->notify_flags == IB_CQ_NEXT_COMP || in mlx5_ib_generate_wc()
1428 cq->notify_flags = 0; in mlx5_ib_generate_wc()
1429 schedule_work(&cq->notify_work); in mlx5_ib_generate_wc()
1431 spin_unlock_irqrestore(&cq->lock, flags); in mlx5_ib_generate_wc()