1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */ 2 /* Copyright (c) 2015 - 2021 Intel Corporation */ 3 #ifndef IRDMA_VERBS_H 4 #define IRDMA_VERBS_H 5 6 #define IRDMA_MAX_SAVED_PHY_PGADDR 4 7 8 #define IRDMA_PKEY_TBL_SZ 1 9 #define IRDMA_DEFAULT_PKEY 0xFFFF 10 11 struct irdma_ucontext { 12 struct ib_ucontext ibucontext; 13 struct irdma_device *iwdev; 14 struct rdma_user_mmap_entry *db_mmap_entry; 15 struct list_head cq_reg_mem_list; 16 spinlock_t cq_reg_mem_list_lock; /* protect CQ memory list */ 17 struct list_head qp_reg_mem_list; 18 spinlock_t qp_reg_mem_list_lock; /* protect QP memory list */ 19 int abi_ver; 20 bool legacy_mode; 21 }; 22 23 struct irdma_pd { 24 struct ib_pd ibpd; 25 struct irdma_sc_pd sc_pd; 26 }; 27 28 struct irdma_av { 29 u8 macaddr[16]; 30 struct rdma_ah_attr attrs; 31 union { 32 struct sockaddr saddr; 33 struct sockaddr_in saddr_in; 34 struct sockaddr_in6 saddr_in6; 35 } sgid_addr, dgid_addr; 36 u8 net_type; 37 }; 38 39 struct irdma_ah { 40 struct ib_ah ibah; 41 struct irdma_sc_ah sc_ah; 42 struct irdma_pd *pd; 43 struct irdma_av av; 44 u8 sgid_index; 45 union ib_gid dgid; 46 }; 47 48 struct irdma_hmc_pble { 49 union { 50 u32 idx; 51 dma_addr_t addr; 52 }; 53 }; 54 55 struct irdma_cq_mr { 56 struct irdma_hmc_pble cq_pbl; 57 dma_addr_t shadow; 58 bool split; 59 }; 60 61 struct irdma_qp_mr { 62 struct irdma_hmc_pble sq_pbl; 63 struct irdma_hmc_pble rq_pbl; 64 dma_addr_t shadow; 65 struct page *sq_page; 66 }; 67 68 struct irdma_cq_buf { 69 struct irdma_dma_mem kmem_buf; 70 struct irdma_cq_uk cq_uk; 71 struct irdma_hw *hw; 72 struct list_head list; 73 struct work_struct work; 74 }; 75 76 struct irdma_pbl { 77 struct list_head list; 78 union { 79 struct irdma_qp_mr qp_mr; 80 struct irdma_cq_mr cq_mr; 81 }; 82 83 bool pbl_allocated:1; 84 bool on_list:1; 85 u64 user_base; 86 struct irdma_pble_alloc pble_alloc; 87 struct irdma_mr *iwmr; 88 }; 89 90 struct irdma_mr { 91 union { 92 struct ib_mr ibmr; 93 struct ib_mw ibmw; 94 }; 95 struct ib_umem *region; 96 u16 type; 97 u32 page_cnt; 98 u64 page_size; 99 u32 npages; 100 u32 stag; 101 u64 len; 102 u64 pgaddrmem[IRDMA_MAX_SAVED_PHY_PGADDR]; 103 struct irdma_pbl iwpbl; 104 }; 105 106 struct irdma_cq { 107 struct ib_cq ibcq; 108 struct irdma_sc_cq sc_cq; 109 u16 cq_head; 110 u16 cq_size; 111 u16 cq_num; 112 bool user_mode; 113 bool armed; 114 enum irdma_cmpl_notify last_notify; 115 u32 polled_cmpls; 116 u32 cq_mem_size; 117 struct irdma_dma_mem kmem; 118 struct irdma_dma_mem kmem_shadow; 119 spinlock_t lock; /* for poll cq */ 120 struct irdma_pbl *iwpbl; 121 struct irdma_pbl *iwpbl_shadow; 122 struct list_head resize_list; 123 struct irdma_cq_poll_info cur_cqe; 124 }; 125 126 struct disconn_work { 127 struct work_struct work; 128 struct irdma_qp *iwqp; 129 }; 130 131 struct iw_cm_id; 132 133 struct irdma_qp_kmode { 134 struct irdma_dma_mem dma_mem; 135 struct irdma_sq_uk_wr_trk_info *sq_wrid_mem; 136 u64 *rq_wrid_mem; 137 }; 138 139 struct irdma_qp { 140 struct ib_qp ibqp; 141 struct irdma_sc_qp sc_qp; 142 struct irdma_device *iwdev; 143 struct irdma_cq *iwscq; 144 struct irdma_cq *iwrcq; 145 struct irdma_pd *iwpd; 146 struct rdma_user_mmap_entry *push_wqe_mmap_entry; 147 struct rdma_user_mmap_entry *push_db_mmap_entry; 148 struct irdma_qp_host_ctx_info ctx_info; 149 union { 150 struct irdma_iwarp_offload_info iwarp_info; 151 struct irdma_roce_offload_info roce_info; 152 }; 153 154 union { 155 struct irdma_tcp_offload_info tcp_info; 156 struct irdma_udp_offload_info udp_info; 157 }; 158 159 struct irdma_ah roce_ah; 160 struct list_head teardown_entry; 161 refcount_t refcnt; 162 struct iw_cm_id *cm_id; 163 struct irdma_cm_node *cm_node; 164 struct ib_mr *lsmm_mr; 165 atomic_t hw_mod_qp_pend; 166 enum ib_qp_state ibqp_state; 167 u32 qp_mem_size; 168 u32 last_aeq; 169 int max_send_wr; 170 int max_recv_wr; 171 atomic_t close_timer_started; 172 spinlock_t lock; /* serialize posting WRs to SQ/RQ */ 173 struct irdma_qp_context *iwqp_context; 174 void *pbl_vbase; 175 dma_addr_t pbl_pbase; 176 struct page *page; 177 u8 active_conn : 1; 178 u8 user_mode : 1; 179 u8 hte_added : 1; 180 u8 flush_issued : 1; 181 u8 sig_all : 1; 182 u8 pau_mode : 1; 183 u8 rsvd : 1; 184 u8 iwarp_state; 185 u16 term_sq_flush_code; 186 u16 term_rq_flush_code; 187 u8 hw_iwarp_state; 188 u8 hw_tcp_state; 189 struct irdma_qp_kmode kqp; 190 struct irdma_dma_mem host_ctx; 191 struct timer_list terminate_timer; 192 struct irdma_pbl *iwpbl; 193 struct irdma_dma_mem q2_ctx_mem; 194 struct irdma_dma_mem ietf_mem; 195 struct completion free_qp; 196 wait_queue_head_t waitq; 197 wait_queue_head_t mod_qp_waitq; 198 u8 rts_ae_rcvd; 199 }; 200 201 enum irdma_mmap_flag { 202 IRDMA_MMAP_IO_NC, 203 IRDMA_MMAP_IO_WC, 204 }; 205 206 struct irdma_user_mmap_entry { 207 struct rdma_user_mmap_entry rdma_entry; 208 u64 bar_offset; 209 u8 mmap_flag; 210 }; 211 irdma_fw_major_ver(struct irdma_sc_dev * dev)212static inline u16 irdma_fw_major_ver(struct irdma_sc_dev *dev) 213 { 214 return (u16)FIELD_GET(IRDMA_FW_VER_MAJOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]); 215 } 216 irdma_fw_minor_ver(struct irdma_sc_dev * dev)217static inline u16 irdma_fw_minor_ver(struct irdma_sc_dev *dev) 218 { 219 return (u16)FIELD_GET(IRDMA_FW_VER_MINOR, dev->feature_info[IRDMA_FEATURE_FW_INFO]); 220 } 221 222 void irdma_mcast_mac(u32 *ip_addr, u8 *mac, bool ipv4); 223 int irdma_ib_register_device(struct irdma_device *iwdev); 224 void irdma_ib_unregister_device(struct irdma_device *iwdev); 225 void irdma_ib_dealloc_device(struct ib_device *ibdev); 226 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event); 227 #endif /* IRDMA_VERBS_H */ 228