Lines Matching refs:csk
156 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion);
158 static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb, in send_act_open_req() argument
161 unsigned int wscale = cxgbi_sock_compute_wscale(csk->rcv_win); in send_act_open_req()
167 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, csk->atid)); in send_act_open_req()
168 req->local_port = csk->saddr.sin_port; in send_act_open_req()
169 req->peer_port = csk->daddr.sin_port; in send_act_open_req()
170 req->local_ip = csk->saddr.sin_addr.s_addr; in send_act_open_req()
171 req->peer_ip = csk->daddr.sin_addr.s_addr; in send_act_open_req()
174 V_WND_SCALE(wscale) | V_MSS_IDX(csk->mss_idx) | in send_act_open_req()
177 V_RCV_BUFSIZ(csk->rcv_win >> 10)); in send_act_open_req()
181 csk, csk->state, csk->flags, csk->atid, in send_act_open_req()
184 csk->mss_idx, e->idx, e->smt_idx); in send_act_open_req()
186 l2t_send(csk->cdev->lldev, skb, csk->l2t); in send_act_open_req()
200 static void send_close_req(struct cxgbi_sock *csk) in send_close_req() argument
202 struct sk_buff *skb = csk->cpl_close; in send_close_req()
204 unsigned int tid = csk->tid; in send_close_req()
208 csk, csk->state, csk->flags, csk->tid); in send_close_req()
210 csk->cpl_close = NULL; in send_close_req()
214 req->rsvd = htonl(csk->write_seq); in send_close_req()
216 cxgbi_sock_skb_entail(csk, skb); in send_close_req()
217 if (csk->state >= CTP_ESTABLISHED) in send_close_req()
218 push_tx_frames(csk, 1); in send_close_req()
239 static void send_abort_req(struct cxgbi_sock *csk) in send_abort_req() argument
241 struct sk_buff *skb = csk->cpl_abort_req; in send_abort_req()
244 if (unlikely(csk->state == CTP_ABORTING || !skb)) in send_abort_req()
246 cxgbi_sock_set_state(csk, CTP_ABORTING); in send_abort_req()
247 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_PENDING); in send_abort_req()
249 cxgbi_sock_purge_write_queue(csk); in send_abort_req()
251 csk->cpl_abort_req = NULL; in send_abort_req()
256 req->wr.wr_lo = htonl(V_WR_TID(csk->tid)); in send_abort_req()
257 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, csk->tid)); in send_abort_req()
258 req->rsvd0 = htonl(csk->snd_nxt); in send_abort_req()
259 req->rsvd1 = !cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT); in send_abort_req()
264 csk, csk->state, csk->flags, csk->tid, csk->snd_nxt, in send_abort_req()
267 l2t_send(csk->cdev->lldev, skb, csk->l2t); in send_abort_req()
275 static void send_abort_rpl(struct cxgbi_sock *csk, int rst_status) in send_abort_rpl() argument
277 struct sk_buff *skb = csk->cpl_abort_rpl; in send_abort_rpl()
282 csk, csk->state, csk->flags, csk->tid, rst_status); in send_abort_rpl()
284 csk->cpl_abort_rpl = NULL; in send_abort_rpl()
287 rpl->wr.wr_lo = htonl(V_WR_TID(csk->tid)); in send_abort_rpl()
288 OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, csk->tid)); in send_abort_rpl()
290 cxgb3_ofld_send(csk->cdev->lldev, skb); in send_abort_rpl()
298 static u32 send_rx_credits(struct cxgbi_sock *csk, u32 credits) in send_rx_credits() argument
306 csk, csk->state, csk->flags, csk->tid, credits, dack); in send_rx_credits()
310 pr_info("csk 0x%p, credit %u, OOM.\n", csk, credits); in send_rx_credits()
315 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid)); in send_rx_credits()
319 cxgb3_ofld_send(csk->cdev->lldev, skb); in send_rx_credits()
351 static inline void make_tx_data_wr(struct cxgbi_sock *csk, struct sk_buff *skb, in make_tx_data_wr() argument
355 struct l2t_entry *l2t = csk->l2t; in make_tx_data_wr()
361 req->wr_lo = htonl(V_WR_TID(csk->tid)); in make_tx_data_wr()
366 V_TX_SHOVE((skb_peek(&csk->write_queue) ? 0 : 1))); in make_tx_data_wr()
367 req->sndseq = htonl(csk->snd_nxt); in make_tx_data_wr()
370 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) { in make_tx_data_wr()
372 V_TX_CPU_IDX(csk->rss_qid)); in make_tx_data_wr()
374 req->param |= htonl(V_TX_SNDBUF(csk->snd_win >> 15)); in make_tx_data_wr()
375 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT); in make_tx_data_wr()
393 static int push_tx_frames(struct cxgbi_sock *csk, int req_completion) in push_tx_frames() argument
398 if (unlikely(csk->state < CTP_ESTABLISHED || in push_tx_frames()
399 csk->state == CTP_CLOSE_WAIT_1 || csk->state >= CTP_ABORTING)) { in push_tx_frames()
402 csk, csk->state, csk->flags, csk->tid); in push_tx_frames()
406 while (csk->wr_cred && (skb = skb_peek(&csk->write_queue)) != NULL) { in push_tx_frames()
416 if (csk->wr_cred < wrs_needed) { in push_tx_frames()
419 csk, skb->len, skb->data_len, frags, in push_tx_frames()
420 wrs_needed, csk->wr_cred); in push_tx_frames()
424 __skb_unlink(skb, &csk->write_queue); in push_tx_frames()
427 csk->wr_cred -= wrs_needed; in push_tx_frames()
428 csk->wr_una_cred += wrs_needed; in push_tx_frames()
429 cxgbi_sock_enqueue_wr(csk, skb); in push_tx_frames()
434 csk, skb->len, skb->data_len, frags, skb->csum, in push_tx_frames()
435 csk->wr_cred, csk->wr_una_cred); in push_tx_frames()
439 csk->wr_una_cred == wrs_needed) || in push_tx_frames()
440 csk->wr_una_cred >= csk->wr_max_cred / 2) { in push_tx_frames()
442 csk->wr_una_cred = 0; in push_tx_frames()
445 make_tx_data_wr(csk, skb, len, req_completion); in push_tx_frames()
446 csk->snd_nxt += len; in push_tx_frames()
452 csk, csk->tid, skb); in push_tx_frames()
454 l2t_send(csk->cdev->lldev, skb, csk->l2t); in push_tx_frames()
465 static inline void free_atid(struct cxgbi_sock *csk) in free_atid() argument
467 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) { in free_atid()
468 cxgb3_free_atid(csk->cdev->lldev, csk->atid); in free_atid()
469 cxgbi_sock_clear_flag(csk, CTPF_HAS_ATID); in free_atid()
470 cxgbi_sock_put(csk); in free_atid()
476 struct cxgbi_sock *csk = ctx; in do_act_establish() local
484 atid, atid, csk, csk->state, csk->flags, rcv_isn); in do_act_establish()
486 cxgbi_sock_get(csk); in do_act_establish()
487 cxgbi_sock_set_flag(csk, CTPF_HAS_TID); in do_act_establish()
488 csk->tid = tid; in do_act_establish()
489 cxgb3_insert_tid(csk->cdev->lldev, &t3_client, csk, tid); in do_act_establish()
491 free_atid(csk); in do_act_establish()
493 csk->rss_qid = G_QNUM(ntohs(skb->csum)); in do_act_establish()
495 spin_lock_bh(&csk->lock); in do_act_establish()
496 if (csk->retry_timer.function) { in do_act_establish()
497 del_timer(&csk->retry_timer); in do_act_establish()
498 csk->retry_timer.function = NULL; in do_act_establish()
501 if (unlikely(csk->state != CTP_ACTIVE_OPEN)) in do_act_establish()
503 csk, csk->state, csk->flags, csk->tid); in do_act_establish()
505 csk->copied_seq = csk->rcv_wup = csk->rcv_nxt = rcv_isn; in do_act_establish()
506 if (csk->rcv_win > (M_RCV_BUFSIZ << 10)) in do_act_establish()
507 csk->rcv_wup -= csk->rcv_win - (M_RCV_BUFSIZ << 10); in do_act_establish()
509 cxgbi_sock_established(csk, ntohl(req->snd_isn), ntohs(req->tcp_opt)); in do_act_establish()
511 if (unlikely(cxgbi_sock_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED))) in do_act_establish()
513 send_abort_req(csk); in do_act_establish()
515 if (skb_queue_len(&csk->write_queue)) in do_act_establish()
516 push_tx_frames(csk, 1); in do_act_establish()
517 cxgbi_conn_tx_open(csk); in do_act_establish()
520 spin_unlock_bh(&csk->lock); in do_act_establish()
549 struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); in act_open_retry_timer() local
554 csk, csk->state, csk->flags, csk->tid); in act_open_retry_timer()
556 cxgbi_sock_get(csk); in act_open_retry_timer()
557 spin_lock_bh(&csk->lock); in act_open_retry_timer()
560 cxgbi_sock_fail_act_open(csk, -ENOMEM); in act_open_retry_timer()
562 skb->sk = (struct sock *)csk; in act_open_retry_timer()
564 send_act_open_req(csk, skb, csk->l2t); in act_open_retry_timer()
566 spin_unlock_bh(&csk->lock); in act_open_retry_timer()
567 cxgbi_sock_put(csk); in act_open_retry_timer()
572 struct cxgbi_sock *csk = ctx; in do_act_open_rpl() local
576 csk, csk->state, csk->flags, csk->atid, rpl->status, in do_act_open_rpl()
577 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), in do_act_open_rpl()
578 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); in do_act_open_rpl()
585 cxgbi_sock_get(csk); in do_act_open_rpl()
586 spin_lock_bh(&csk->lock); in do_act_open_rpl()
588 csk->retry_timer.function != act_open_retry_timer) { in do_act_open_rpl()
589 csk->retry_timer.function = act_open_retry_timer; in do_act_open_rpl()
590 mod_timer(&csk->retry_timer, jiffies + HZ / 2); in do_act_open_rpl()
592 cxgbi_sock_fail_act_open(csk, in do_act_open_rpl()
595 spin_unlock_bh(&csk->lock); in do_act_open_rpl()
596 cxgbi_sock_put(csk); in do_act_open_rpl()
607 struct cxgbi_sock *csk = ctx; in do_peer_close() local
611 csk, csk->state, csk->flags, csk->tid); in do_peer_close()
613 cxgbi_sock_rcv_peer_close(csk); in do_peer_close()
625 struct cxgbi_sock *csk = ctx; in do_close_con_rpl() local
630 csk, csk->state, csk->flags, csk->tid, ntohl(rpl->snd_nxt)); in do_close_con_rpl()
632 cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt)); in do_close_con_rpl()
643 static int abort_status_to_errno(struct cxgbi_sock *csk, int abort_reason, in abort_status_to_errno() argument
649 return csk->state > CTP_ESTABLISHED ? -EPIPE : -ECONNRESET; in abort_status_to_errno()
663 struct cxgbi_sock *csk = ctx; in do_abort_req() local
668 csk, csk->state, csk->flags, csk->tid); in do_abort_req()
675 cxgbi_sock_get(csk); in do_abort_req()
676 spin_lock_bh(&csk->lock); in do_abort_req()
678 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { in do_abort_req()
679 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req()
680 cxgbi_sock_set_state(csk, CTP_ABORTING); in do_abort_req()
684 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); in do_abort_req()
685 send_abort_rpl(csk, rst_status); in do_abort_req()
687 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { in do_abort_req()
688 csk->err = abort_status_to_errno(csk, req->status, &rst_status); in do_abort_req()
689 cxgbi_sock_closed(csk); in do_abort_req()
693 spin_unlock_bh(&csk->lock); in do_abort_req()
694 cxgbi_sock_put(csk); in do_abort_req()
710 struct cxgbi_sock *csk = ctx; in do_abort_rpl() local
714 rpl->status, csk, csk ? csk->state : 0, in do_abort_rpl()
715 csk ? csk->flags : 0UL); in do_abort_rpl()
731 if (csk) in do_abort_rpl()
732 cxgbi_sock_rcv_abort_rpl(csk); in do_abort_rpl()
745 struct cxgbi_sock *csk = ctx; in do_iscsi_hdr() local
755 csk, csk->state, csk->flags, csk->tid, skb, skb->len); in do_iscsi_hdr()
757 spin_lock_bh(&csk->lock); in do_iscsi_hdr()
759 if (unlikely(csk->state >= CTP_PASSIVE_CLOSE)) { in do_iscsi_hdr()
762 csk, csk->state, csk->flags, csk->tid); in do_iscsi_hdr()
763 if (csk->state != CTP_ABORTING) in do_iscsi_hdr()
779 csk->cdev->ports[csk->port_id]->name, csk->tid, in do_iscsi_hdr()
789 csk->cdev->ports[csk->port_id]->name, csk->tid, in do_iscsi_hdr()
801 csk, skb, skb->len, cxgbi_skcb_rx_pdulen(skb), status); in do_iscsi_hdr()
814 csk->cdev->ports[csk->port_id]->name, in do_iscsi_hdr()
815 csk->tid, sizeof(data_cpl), skb->len, err); in do_iscsi_hdr()
826 csk->rcv_nxt = ntohl(ddp_cpl.seq) + cxgbi_skcb_rx_pdulen(skb); in do_iscsi_hdr()
828 __skb_queue_tail(&csk->receive_queue, skb); in do_iscsi_hdr()
829 cxgbi_conn_pdu_ready(csk); in do_iscsi_hdr()
831 spin_unlock_bh(&csk->lock); in do_iscsi_hdr()
835 send_abort_req(csk); in do_iscsi_hdr()
837 spin_unlock_bh(&csk->lock); in do_iscsi_hdr()
849 struct cxgbi_sock *csk = ctx; in do_wr_ack() local
854 csk, csk->state, csk->flags, csk->tid, ntohs(hdr->credits)); in do_wr_ack()
856 cxgbi_sock_rcv_wr_ack(csk, ntohs(hdr->credits), ntohl(hdr->snd_una), 1); in do_wr_ack()
865 static int alloc_cpls(struct cxgbi_sock *csk) in alloc_cpls() argument
867 csk->cpl_close = alloc_wr(sizeof(struct cpl_close_con_req), 0, in alloc_cpls()
869 if (!csk->cpl_close) in alloc_cpls()
871 csk->cpl_abort_req = alloc_wr(sizeof(struct cpl_abort_req), 0, in alloc_cpls()
873 if (!csk->cpl_abort_req) in alloc_cpls()
876 csk->cpl_abort_rpl = alloc_wr(sizeof(struct cpl_abort_rpl), 0, in alloc_cpls()
878 if (!csk->cpl_abort_rpl) in alloc_cpls()
884 cxgbi_sock_free_cpl_skbs(csk); in alloc_cpls()
888 static void l2t_put(struct cxgbi_sock *csk) in l2t_put() argument
890 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; in l2t_put()
892 if (csk->l2t) { in l2t_put()
893 l2t_release(t3dev, csk->l2t); in l2t_put()
894 csk->l2t = NULL; in l2t_put()
895 cxgbi_sock_put(csk); in l2t_put()
903 static void release_offload_resources(struct cxgbi_sock *csk) in release_offload_resources() argument
905 struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; in release_offload_resources()
909 csk, csk->state, csk->flags, csk->tid); in release_offload_resources()
911 csk->rss_qid = 0; in release_offload_resources()
912 cxgbi_sock_free_cpl_skbs(csk); in release_offload_resources()
914 if (csk->wr_cred != csk->wr_max_cred) { in release_offload_resources()
915 cxgbi_sock_purge_wr_queue(csk); in release_offload_resources()
916 cxgbi_sock_reset_wr_list(csk); in release_offload_resources()
918 l2t_put(csk); in release_offload_resources()
919 if (cxgbi_sock_flag(csk, CTPF_HAS_ATID)) in release_offload_resources()
920 free_atid(csk); in release_offload_resources()
921 else if (cxgbi_sock_flag(csk, CTPF_HAS_TID)) { in release_offload_resources()
922 cxgb3_remove_tid(t3dev, (void *)csk, csk->tid); in release_offload_resources()
923 cxgbi_sock_clear_flag(csk, CTPF_HAS_TID); in release_offload_resources()
924 cxgbi_sock_put(csk); in release_offload_resources()
926 csk->dst = NULL; in release_offload_resources()
927 csk->cdev = NULL; in release_offload_resources()
952 static int init_act_open(struct cxgbi_sock *csk) in init_act_open() argument
954 struct dst_entry *dst = csk->dst; in init_act_open()
955 struct cxgbi_device *cdev = csk->cdev; in init_act_open()
957 struct net_device *ndev = cdev->ports[csk->port_id]; in init_act_open()
958 struct cxgbi_hba *chba = cdev->hbas[csk->port_id]; in init_act_open()
963 "csk 0x%p,%u,0x%lx.\n", csk, csk->state, csk->flags); in init_act_open()
967 csk->saddr.sin_addr.s_addr = chba->ipv4addr; in init_act_open()
969 csk->rss_qid = 0; in init_act_open()
970 csk->l2t = t3_l2t_get(t3dev, dst, ndev, in init_act_open()
971 &csk->daddr.sin_addr.s_addr); in init_act_open()
972 if (!csk->l2t) { in init_act_open()
976 cxgbi_sock_get(csk); in init_act_open()
978 csk->atid = cxgb3_alloc_atid(t3dev, &t3_client, csk); in init_act_open()
979 if (csk->atid < 0) { in init_act_open()
984 cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); in init_act_open()
985 cxgbi_sock_get(csk); in init_act_open()
992 skb->sk = (struct sock *)csk; in init_act_open()
994 csk->snd_win = cxgb3i_snd_win; in init_act_open()
995 csk->rcv_win = cxgb3i_rcv_win; in init_act_open()
997 csk->wr_max_cred = csk->wr_cred = T3C_DATA(t3dev)->max_wrs - 1; in init_act_open()
998 csk->wr_una_cred = 0; in init_act_open()
999 csk->mss_idx = cxgbi_sock_select_mss(csk, dst_mtu(dst)); in init_act_open()
1000 cxgbi_sock_reset_wr_list(csk); in init_act_open()
1001 csk->err = 0; in init_act_open()
1005 csk, csk->state, csk->flags, in init_act_open()
1006 &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port), in init_act_open()
1007 &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port)); in init_act_open()
1009 cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); in init_act_open()
1010 send_act_open_req(csk, skb, csk->l2t); in init_act_open()
1014 cxgb3_free_atid(t3dev, csk->atid); in init_act_open()
1016 cxgbi_sock_put(csk); in init_act_open()
1017 l2t_release(t3dev, csk->l2t); in init_act_open()
1018 csk->l2t = NULL; in init_act_open()
1095 static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk, in ddp_set_map() argument
1152 static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, in ddp_setup_conn_pgidx() argument
1161 "csk 0x%p, tid %u, pg_idx %d.\n", csk, tid, pg_idx); in ddp_setup_conn_pgidx()
1176 cxgb3_ofld_send(csk->cdev->lldev, skb); in ddp_setup_conn_pgidx()
1188 static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid, in ddp_setup_conn_digest() argument
1197 "csk 0x%p, tid %u, crc %d,%d.\n", csk, tid, hcrc, dcrc); in ddp_setup_conn_digest()
1212 cxgb3_ofld_send(csk->cdev->lldev, skb); in ddp_setup_conn_digest()