Lines Matching refs:rhp

720 			cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);  in post_write_cmpl()
744 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]); in post_write_cmpl()
807 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3)); in build_tpte_memreg()
916 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_sq_db()
918 if (qhp->rhp->db_state == NORMAL) in ring_kernel_sq_db()
921 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_sq_db()
925 xa_unlock_irqrestore(&qhp->rhp->qps, flags); in ring_kernel_sq_db()
933 xa_lock_irqsave(&qhp->rhp->qps, flags); in ring_kernel_rq_db()
935 if (qhp->rhp->db_state == NORMAL) in ring_kernel_rq_db()
938 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry); in ring_kernel_rq_db()
942 xa_unlock_irqrestore(&qhp->rhp->qps, flags); in ring_kernel_rq_db()
1085 struct c4iw_dev *rhp; in c4iw_post_send() local
1093 rhp = qhp->rhp; in c4iw_post_send()
1121 if (qhp->rhp->rdev.lldi.write_cmpl_support && in c4iw_post_send()
1122 CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >= in c4iw_post_send()
1164 if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) { in c4iw_post_send()
1180 c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey); in c4iw_post_send()
1196 if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support && in c4iw_post_send()
1205 rhp->rdev.lldi.ulptx_memwrite_dsgl); in c4iw_post_send()
1218 c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey); in c4iw_post_send()
1237 rhp->rdev.lldi.ports[0]); in c4iw_post_send()
1251 if (!rhp->rdev.status_page->db_off) { in c4iw_post_send()
1312 qhp->rhp->rdev.lldi.ports[0]); in c4iw_post_receive()
1331 if (!qhp->rhp->rdev.status_page->db_off) { in c4iw_post_receive()
1590 c4iw_ofld_send(&qhp->rhp->rdev, skb); in post_terminate()
1692 static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, in rdma_fini() argument
1718 ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp, in rdma_fini()
1749 static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) in rdma_init() argument
1763 ret = alloc_ird(rhp, qhp->attr.max_ird); in rdma_init()
1810 rhp->rdev.lldi.vr->rq.start); in rdma_init()
1821 ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp, in rdma_init()
1826 free_ird(rhp, qhp->attr.max_ird); in rdma_init()
1832 int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, in c4iw_modify_qp() argument
1871 if (attrs->max_ird > cur_max_read_depth(rhp)) { in c4iw_modify_qp()
1918 ret = rdma_init(rhp, qhp); in c4iw_modify_qp()
1942 ret = rdma_fini(rhp, qhp, ep); in c4iw_modify_qp()
1958 ret = rdma_fini(rhp, qhp, ep); in c4iw_modify_qp()
2072 struct c4iw_dev *rhp; in c4iw_destroy_qp() local
2078 rhp = qhp->rhp; in c4iw_destroy_qp()
2083 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); in c4iw_destroy_qp()
2085 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); in c4iw_destroy_qp()
2088 xa_lock_irq(&rhp->qps); in c4iw_destroy_qp()
2089 __xa_erase(&rhp->qps, qhp->wq.sq.qid); in c4iw_destroy_qp()
2092 xa_unlock_irq(&rhp->qps); in c4iw_destroy_qp()
2093 free_ird(rhp, qhp->attr.max_ird); in c4iw_destroy_qp()
2102 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_destroy_qp()
2103 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq); in c4iw_destroy_qp()
2113 struct c4iw_dev *rhp; in c4iw_create_qp() local
2130 rhp = php->rhp; in c4iw_create_qp()
2131 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid); in c4iw_create_qp()
2132 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid); in c4iw_create_qp()
2140 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size) in c4iw_create_qp()
2147 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size) in c4iw_create_qp()
2159 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_qp()
2165 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_qp()
2176 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq, in c4iw_create_qp()
2177 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_qp()
2186 qhp->rhp = rhp; in c4iw_create_qp()
2211 ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL); in c4iw_create_qp()
2251 if (rhp->rdev.lldi.write_w_imm_support) in c4iw_create_qp()
2253 uresp.qid_mask = rhp->rdev.qpmask; in c4iw_create_qp()
2307 (pci_resource_start(rhp->rdev.lldi.pdev, 0) + in c4iw_create_qp()
2347 xa_erase_irq(&rhp->qps, qhp->wq.sq.qid); in c4iw_create_qp()
2349 destroy_qp(&rhp->rdev, &qhp->wq, in c4iw_create_qp()
2350 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq); in c4iw_create_qp()
2359 struct c4iw_dev *rhp; in c4iw_ib_modify_qp() local
2378 rhp = qhp->rhp; in c4iw_ib_modify_qp()
2403 if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) && in c4iw_ib_modify_qp()
2407 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0); in c4iw_ib_modify_qp()
2420 event.device = &srq->rhp->ibdev; in c4iw_dispatch_srq_limit_reached_event()
2475 struct c4iw_rdev *rdev = &srq->rhp->rdev; in free_srq_queue()
2512 struct c4iw_rdev *rdev = &srq->rhp->rdev; in alloc_srq_queue()
2665 struct c4iw_dev *rhp; in c4iw_create_srq() local
2681 rhp = php->rhp; in c4iw_create_srq()
2683 if (!rhp->rdev.lldi.vr->srq.size) in c4iw_create_srq()
2685 if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size) in c4iw_create_srq()
2703 srq->idx = c4iw_alloc_srq_idx(&rhp->rdev); in c4iw_create_srq()
2716 srq->rhp = rhp; in c4iw_create_srq()
2721 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) * in c4iw_create_srq()
2727 &rhp->rdev.uctx, srq->wr_waitp); in c4iw_create_srq()
2732 if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6) in c4iw_create_srq()
2748 uresp.qid_mask = rhp->rdev.qpmask; in c4iw_create_srq()
2784 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_create_srq()
2789 c4iw_free_srq_idx(&rhp->rdev, srq->idx); in c4iw_create_srq()
2797 struct c4iw_dev *rhp; in c4iw_destroy_srq() local
2802 rhp = srq->rhp; in c4iw_destroy_srq()
2807 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx, in c4iw_destroy_srq()
2809 c4iw_free_srq_idx(&rhp->rdev, srq->idx); in c4iw_destroy_srq()