1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2
3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4 /* Copyright (c) 2008-2019, IBM Corporation */
5
6 #include <linux/errno.h>
7 #include <linux/types.h>
8
9 #include <rdma/ib_verbs.h>
10
11 #include "siw.h"
12
13 static int map_wc_opcode[SIW_NUM_OPCODES] = {
14 [SIW_OP_WRITE] = IB_WC_RDMA_WRITE,
15 [SIW_OP_SEND] = IB_WC_SEND,
16 [SIW_OP_SEND_WITH_IMM] = IB_WC_SEND,
17 [SIW_OP_READ] = IB_WC_RDMA_READ,
18 [SIW_OP_READ_LOCAL_INV] = IB_WC_RDMA_READ,
19 [SIW_OP_COMP_AND_SWAP] = IB_WC_COMP_SWAP,
20 [SIW_OP_FETCH_AND_ADD] = IB_WC_FETCH_ADD,
21 [SIW_OP_INVAL_STAG] = IB_WC_LOCAL_INV,
22 [SIW_OP_REG_MR] = IB_WC_REG_MR,
23 [SIW_OP_RECEIVE] = IB_WC_RECV,
24 [SIW_OP_READ_RESPONSE] = -1 /* not used */
25 };
26
27 static struct {
28 enum siw_wc_status siw;
29 enum ib_wc_status ib;
30 } map_cqe_status[SIW_NUM_WC_STATUS] = {
31 { SIW_WC_SUCCESS, IB_WC_SUCCESS },
32 { SIW_WC_LOC_LEN_ERR, IB_WC_LOC_LEN_ERR },
33 { SIW_WC_LOC_PROT_ERR, IB_WC_LOC_PROT_ERR },
34 { SIW_WC_LOC_QP_OP_ERR, IB_WC_LOC_QP_OP_ERR },
35 { SIW_WC_WR_FLUSH_ERR, IB_WC_WR_FLUSH_ERR },
36 { SIW_WC_BAD_RESP_ERR, IB_WC_BAD_RESP_ERR },
37 { SIW_WC_LOC_ACCESS_ERR, IB_WC_LOC_ACCESS_ERR },
38 { SIW_WC_REM_ACCESS_ERR, IB_WC_REM_ACCESS_ERR },
39 { SIW_WC_REM_INV_REQ_ERR, IB_WC_REM_INV_REQ_ERR },
40 { SIW_WC_GENERAL_ERR, IB_WC_GENERAL_ERR }
41 };
42
43 /*
44 * Reap one CQE from the CQ. Only used by kernel clients
45 * during CQ normal operation. Might be called during CQ
46 * flush for user mapped CQE array as well.
47 */
siw_reap_cqe(struct siw_cq * cq,struct ib_wc * wc)48 int siw_reap_cqe(struct siw_cq *cq, struct ib_wc *wc)
49 {
50 struct siw_cqe *cqe;
51 unsigned long flags;
52
53 spin_lock_irqsave(&cq->lock, flags);
54
55 cqe = &cq->queue[cq->cq_get % cq->num_cqe];
56 if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) {
57 memset(wc, 0, sizeof(*wc));
58 wc->wr_id = cqe->id;
59 wc->status = map_cqe_status[cqe->status].ib;
60 wc->opcode = map_wc_opcode[cqe->opcode];
61 wc->byte_len = cqe->bytes;
62
63 /*
64 * During CQ flush, also user land CQE's may get
65 * reaped here, which do not hold a QP reference
66 * and do not qualify for memory extension verbs.
67 */
68 if (likely(rdma_is_kernel_res(&cq->base_cq.res))) {
69 if (cqe->flags & SIW_WQE_REM_INVAL) {
70 wc->ex.invalidate_rkey = cqe->inval_stag;
71 wc->wc_flags = IB_WC_WITH_INVALIDATE;
72 }
73 wc->qp = cqe->base_qp;
74 siw_dbg_cq(cq,
75 "idx %u, type %d, flags %2x, id 0x%pK\n",
76 cq->cq_get % cq->num_cqe, cqe->opcode,
77 cqe->flags, (void *)(uintptr_t)cqe->id);
78 }
79 WRITE_ONCE(cqe->flags, 0);
80 cq->cq_get++;
81
82 spin_unlock_irqrestore(&cq->lock, flags);
83
84 return 1;
85 }
86 spin_unlock_irqrestore(&cq->lock, flags);
87
88 return 0;
89 }
90
91 /*
92 * siw_cq_flush()
93 *
94 * Flush all CQ elements.
95 */
siw_cq_flush(struct siw_cq * cq)96 void siw_cq_flush(struct siw_cq *cq)
97 {
98 struct ib_wc wc;
99
100 while (siw_reap_cqe(cq, &wc))
101 ;
102 }
103