Lines Matching refs:smc
70 struct smc_sock *smc = smc_sk(sk); in smc_set_keepalive() local
72 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val); in smc_set_keepalive()
134 static void smc_restore_fallback_changes(struct smc_sock *smc) in smc_restore_fallback_changes() argument
136 if (smc->clcsock->file) { /* non-accepted sockets have no file yet */ in smc_restore_fallback_changes()
137 smc->clcsock->file->private_data = smc->sk.sk_socket; in smc_restore_fallback_changes()
138 smc->clcsock->file = NULL; in smc_restore_fallback_changes()
142 static int __smc_release(struct smc_sock *smc) in __smc_release() argument
144 struct sock *sk = &smc->sk; in __smc_release()
147 if (!smc->use_fallback) { in __smc_release()
148 rc = smc_close_active(smc); in __smc_release()
158 rc = kernel_sock_shutdown(smc->clcsock, in __smc_release()
164 smc_restore_fallback_changes(smc); in __smc_release()
170 if (smc->clcsock) { in __smc_release()
172 smc_clcsock_release(smc); in __smc_release()
175 if (!smc->use_fallback) in __smc_release()
176 smc_conn_free(&smc->conn); in __smc_release()
185 struct smc_sock *smc; in smc_release() local
192 smc = smc_sk(sk); in smc_release()
195 if (smc->connect_nonblock && sk->sk_state == SMC_INIT) in smc_release()
196 tcp_abort(smc->clcsock->sk, ECONNABORTED); in smc_release()
198 if (cancel_work_sync(&smc->connect_work)) in smc_release()
199 sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */ in smc_release()
209 rc = __smc_release(smc); in smc_release()
235 struct smc_sock *smc; in smc_sock_alloc() local
248 smc = smc_sk(sk); in smc_sock_alloc()
249 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); in smc_sock_alloc()
250 INIT_WORK(&smc->connect_work, smc_connect_work); in smc_sock_alloc()
251 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); in smc_sock_alloc()
252 INIT_LIST_HEAD(&smc->accept_q); in smc_sock_alloc()
253 spin_lock_init(&smc->accept_q_lock); in smc_sock_alloc()
254 spin_lock_init(&smc->conn.send_lock); in smc_sock_alloc()
257 mutex_init(&smc->clcsock_release_lock); in smc_sock_alloc()
267 struct smc_sock *smc; in smc_bind() local
270 smc = smc_sk(sk); in smc_bind()
291 if (sk->sk_state != SMC_INIT || smc->connect_nonblock) in smc_bind()
294 smc->clcsock->sk->sk_reuse = sk->sk_reuse; in smc_bind()
295 rc = kernel_bind(smc->clcsock, uaddr, addr_len); in smc_bind()
340 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc) in smc_copy_sock_settings_to_clc() argument
342 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC); in smc_copy_sock_settings_to_clc()
350 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc) in smc_copy_sock_settings_to_smc() argument
352 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC); in smc_copy_sock_settings_to_smc()
390 static int smcr_clnt_conf_first_link(struct smc_sock *smc) in smcr_clnt_conf_first_link() argument
392 struct smc_link *link = smc->conn.lnk; in smcr_clnt_conf_first_link()
402 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), in smcr_clnt_conf_first_link()
418 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc)) in smcr_clnt_conf_first_link()
422 smc->conn.rmb_desc->is_conf_rkey = true; in smcr_clnt_conf_first_link()
438 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), in smcr_clnt_conf_first_link()
459 static void smc_conn_save_peer_info_fce(struct smc_sock *smc, in smc_conn_save_peer_info_fce() argument
471 if (smc->conn.lgr->is_smcd) { in smc_conn_save_peer_info_fce()
472 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->d1.eid, in smc_conn_save_peer_info_fce()
477 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->r1.eid, in smc_conn_save_peer_info_fce()
483 smc->conn.lgr->peer_os = fce->os_type; in smc_conn_save_peer_info_fce()
484 smc->conn.lgr->peer_smc_release = fce->release; in smc_conn_save_peer_info_fce()
486 memcpy(smc->conn.lgr->peer_hostname, fce->hostname, in smc_conn_save_peer_info_fce()
490 static void smcr_conn_save_peer_info(struct smc_sock *smc, in smcr_conn_save_peer_info() argument
495 smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx; in smcr_conn_save_peer_info()
496 smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token); in smcr_conn_save_peer_info()
497 smc->conn.peer_rmbe_size = bufsize; in smcr_conn_save_peer_info()
498 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); in smcr_conn_save_peer_info()
499 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1); in smcr_conn_save_peer_info()
502 static void smcd_conn_save_peer_info(struct smc_sock *smc, in smcd_conn_save_peer_info() argument
507 smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx; in smcd_conn_save_peer_info()
508 smc->conn.peer_token = clc->d0.token; in smcd_conn_save_peer_info()
510 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg); in smcd_conn_save_peer_info()
511 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size); in smcd_conn_save_peer_info()
512 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx; in smcd_conn_save_peer_info()
515 static void smc_conn_save_peer_info(struct smc_sock *smc, in smc_conn_save_peer_info() argument
518 if (smc->conn.lgr->is_smcd) in smc_conn_save_peer_info()
519 smcd_conn_save_peer_info(smc, clc); in smc_conn_save_peer_info()
521 smcr_conn_save_peer_info(smc, clc); in smc_conn_save_peer_info()
522 smc_conn_save_peer_info_fce(smc, clc); in smc_conn_save_peer_info()
536 static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc, in smc_stat_inc_fback_rsn_cnt() argument
542 if (fback_arr[cnt].fback_code == smc->fallback_rsn) { in smc_stat_inc_fback_rsn_cnt()
547 fback_arr[cnt].fback_code = smc->fallback_rsn; in smc_stat_inc_fback_rsn_cnt()
554 static void smc_stat_fallback(struct smc_sock *smc) in smc_stat_fallback() argument
556 struct net *net = sock_net(&smc->sk); in smc_stat_fallback()
558 mutex_lock(&net->smc.mutex_fback_rsn); in smc_stat_fallback()
559 if (smc->listen_smc) { in smc_stat_fallback()
560 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv); in smc_stat_fallback()
561 net->smc.fback_rsn->srv_fback_cnt++; in smc_stat_fallback()
563 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt); in smc_stat_fallback()
564 net->smc.fback_rsn->clnt_fback_cnt++; in smc_stat_fallback()
566 mutex_unlock(&net->smc.mutex_fback_rsn); in smc_stat_fallback()
569 static void smc_switch_to_fallback(struct smc_sock *smc, int reason_code) in smc_switch_to_fallback() argument
571 wait_queue_head_t *smc_wait = sk_sleep(&smc->sk); in smc_switch_to_fallback()
572 wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk); in smc_switch_to_fallback()
575 smc->use_fallback = true; in smc_switch_to_fallback()
576 smc->fallback_rsn = reason_code; in smc_switch_to_fallback()
577 smc_stat_fallback(smc); in smc_switch_to_fallback()
578 trace_smc_switch_to_fallback(smc, reason_code); in smc_switch_to_fallback()
579 if (smc->sk.sk_socket && smc->sk.sk_socket->file) { in smc_switch_to_fallback()
580 smc->clcsock->file = smc->sk.sk_socket->file; in smc_switch_to_fallback()
581 smc->clcsock->file->private_data = smc->clcsock; in smc_switch_to_fallback()
582 smc->clcsock->wq.fasync_list = in smc_switch_to_fallback()
583 smc->sk.sk_socket->wq.fasync_list; in smc_switch_to_fallback()
598 static int smc_connect_fallback(struct smc_sock *smc, int reason_code) in smc_connect_fallback() argument
600 smc_switch_to_fallback(smc, reason_code); in smc_connect_fallback()
601 smc_copy_sock_settings_to_clc(smc); in smc_connect_fallback()
602 smc->connect_nonblock = 0; in smc_connect_fallback()
603 if (smc->sk.sk_state == SMC_INIT) in smc_connect_fallback()
604 smc->sk.sk_state = SMC_ACTIVE; in smc_connect_fallback()
609 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code, in smc_connect_decline_fallback() argument
612 struct net *net = sock_net(&smc->sk); in smc_connect_decline_fallback()
616 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt); in smc_connect_decline_fallback()
617 if (smc->sk.sk_state == SMC_INIT) in smc_connect_decline_fallback()
618 sock_put(&smc->sk); /* passive closing */ in smc_connect_decline_fallback()
622 rc = smc_clc_send_decline(smc, reason_code, version); in smc_connect_decline_fallback()
624 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt); in smc_connect_decline_fallback()
625 if (smc->sk.sk_state == SMC_INIT) in smc_connect_decline_fallback()
626 sock_put(&smc->sk); /* passive closing */ in smc_connect_decline_fallback()
630 return smc_connect_fallback(smc, reason_code); in smc_connect_decline_fallback()
633 static void smc_conn_abort(struct smc_sock *smc, int local_first) in smc_conn_abort() argument
636 smc_lgr_cleanup_early(&smc->conn); in smc_conn_abort()
638 smc_conn_free(&smc->conn); in smc_conn_abort()
643 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini) in smc_find_rdma_device() argument
649 smc_pnet_find_roce_resource(smc->clcsock->sk, ini); in smc_find_rdma_device()
659 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini) in smc_find_ism_device() argument
662 smc_pnet_find_ism_resource(smc->clcsock->sk, ini); in smc_find_ism_device()
685 static int smc_find_ism_v2_device_clnt(struct smc_sock *smc, in smc_find_ism_v2_device_clnt() argument
703 smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) { in smc_find_ism_v2_device_clnt()
722 static int smc_connect_ism_vlan_setup(struct smc_sock *smc, in smc_connect_ism_vlan_setup() argument
730 static int smc_find_proposal_devices(struct smc_sock *smc, in smc_find_proposal_devices() argument
737 smc_find_ism_device(smc, ini) || in smc_find_proposal_devices()
738 smc_connect_ism_vlan_setup(smc, ini)) in smc_find_proposal_devices()
744 smc_find_rdma_device(smc, ini)) in smc_find_proposal_devices()
754 smc_find_ism_v2_device_clnt(smc, ini)) in smc_find_proposal_devices()
759 ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr; in smc_find_proposal_devices()
761 smc->clcsock->sk->sk_family != AF_INET || in smc_find_proposal_devices()
763 smc_find_rdma_device(smc, ini)) in smc_find_proposal_devices()
780 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, in smc_connect_ism_vlan_cleanup() argument
796 static int smc_connect_clc(struct smc_sock *smc, in smc_connect_clc() argument
803 rc = smc_clc_send_proposal(smc, ini); in smc_connect_clc()
807 return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN, in smc_connect_clc()
839 static int smc_connect_rdma_v2_prepare(struct smc_sock *smc, in smc_connect_rdma_v2_prepare() argument
856 if (smc_ib_find_route(smc->clcsock->sk->sk_rcv_saddr, in smc_connect_rdma_v2_prepare()
870 static int smc_connect_rdma(struct smc_sock *smc, in smc_connect_rdma() argument
885 reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini); in smc_connect_rdma()
890 reason_code = smc_conn_create(smc, ini); in smc_connect_rdma()
896 smc_conn_save_peer_info(smc, aclc); in smc_connect_rdma()
899 link = smc->conn.lnk; in smc_connect_rdma()
904 struct smc_link *l = &smc->conn.lgr->lnk[i]; in smc_connect_rdma()
920 smc_switch_link_and_count(&smc->conn, link); in smc_connect_rdma()
924 if (smc_buf_create(smc, false)) { in smc_connect_rdma()
932 if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) { in smc_connect_rdma()
937 smc_close_init(smc); in smc_connect_rdma()
938 smc_rx_init(smc); in smc_connect_rdma()
946 if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) { in smc_connect_rdma()
951 smc_rmb_sync_sg_for_device(&smc->conn); in smc_connect_rdma()
963 reason_code = smc_clc_send_confirm(smc, ini->first_contact_local, in smc_connect_rdma()
968 smc_tx_init(smc); in smc_connect_rdma()
973 reason_code = smcr_clnt_conf_first_link(smc); in smc_connect_rdma()
980 smc_copy_sock_settings_to_clc(smc); in smc_connect_rdma()
981 smc->connect_nonblock = 0; in smc_connect_rdma()
982 if (smc->sk.sk_state == SMC_INIT) in smc_connect_rdma()
983 smc->sk.sk_state = SMC_ACTIVE; in smc_connect_rdma()
987 smc_conn_abort(smc, ini->first_contact_local); in smc_connect_rdma()
989 smc->connect_nonblock = 0; in smc_connect_rdma()
1014 static int smc_connect_ism(struct smc_sock *smc, in smc_connect_ism() argument
1036 rc = smc_conn_create(smc, ini); in smc_connect_ism()
1043 rc = smc_buf_create(smc, true); in smc_connect_ism()
1049 smc_conn_save_peer_info(smc, aclc); in smc_connect_ism()
1050 smc_close_init(smc); in smc_connect_ism()
1051 smc_rx_init(smc); in smc_connect_ism()
1052 smc_tx_init(smc); in smc_connect_ism()
1061 rc = smc_clc_send_confirm(smc, ini->first_contact_local, in smc_connect_ism()
1067 smc_copy_sock_settings_to_clc(smc); in smc_connect_ism()
1068 smc->connect_nonblock = 0; in smc_connect_ism()
1069 if (smc->sk.sk_state == SMC_INIT) in smc_connect_ism()
1070 smc->sk.sk_state = SMC_ACTIVE; in smc_connect_ism()
1074 smc_conn_abort(smc, ini->first_contact_local); in smc_connect_ism()
1076 smc->connect_nonblock = 0; in smc_connect_ism()
1107 static int __smc_connect(struct smc_sock *smc) in __smc_connect() argument
1116 if (smc->use_fallback) in __smc_connect()
1117 return smc_connect_fallback(smc, smc->fallback_rsn); in __smc_connect()
1120 if (!tcp_sk(smc->clcsock->sk)->syn_smc) in __smc_connect()
1121 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC); in __smc_connect()
1124 if (using_ipsec(smc)) in __smc_connect()
1125 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC, in __smc_connect()
1130 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM, in __smc_connect()
1139 if (smc_vlan_by_tcpsk(smc->clcsock, ini)) { in __smc_connect()
1149 rc = smc_find_proposal_devices(smc, ini); in __smc_connect()
1162 rc = smc_connect_clc(smc, aclc2, ini); in __smc_connect()
1175 rc = smc_connect_rdma(smc, aclc, ini); in __smc_connect()
1178 rc = smc_connect_ism(smc, aclc, ini); in __smc_connect()
1183 SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc); in __smc_connect()
1184 smc_connect_ism_vlan_cleanup(smc, ini); in __smc_connect()
1190 smc_connect_ism_vlan_cleanup(smc, ini); in __smc_connect()
1194 return smc_connect_decline_fallback(smc, rc, version); in __smc_connect()
1199 struct smc_sock *smc = container_of(work, struct smc_sock, in smc_connect_work() local
1201 long timeo = smc->sk.sk_sndtimeo; in smc_connect_work()
1206 lock_sock(smc->clcsock->sk); in smc_connect_work()
1207 if (smc->clcsock->sk->sk_err) { in smc_connect_work()
1208 smc->sk.sk_err = smc->clcsock->sk->sk_err; in smc_connect_work()
1209 } else if ((1 << smc->clcsock->sk->sk_state) & in smc_connect_work()
1211 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo); in smc_connect_work()
1213 ((1 << smc->clcsock->sk->sk_state) & in smc_connect_work()
1217 release_sock(smc->clcsock->sk); in smc_connect_work()
1218 lock_sock(&smc->sk); in smc_connect_work()
1219 if (rc != 0 || smc->sk.sk_err) { in smc_connect_work()
1220 smc->sk.sk_state = SMC_CLOSED; in smc_connect_work()
1222 smc->sk.sk_err = EPIPE; in smc_connect_work()
1224 smc->sk.sk_err = -sock_intr_errno(timeo); in smc_connect_work()
1225 sock_put(&smc->sk); /* passive closing */ in smc_connect_work()
1229 rc = __smc_connect(smc); in smc_connect_work()
1231 smc->sk.sk_err = -rc; in smc_connect_work()
1234 if (!sock_flag(&smc->sk, SOCK_DEAD)) { in smc_connect_work()
1235 if (smc->sk.sk_err) { in smc_connect_work()
1236 smc->sk.sk_state_change(&smc->sk); in smc_connect_work()
1238 smc->clcsock->sk->sk_write_space(smc->clcsock->sk); in smc_connect_work()
1239 smc->sk.sk_write_space(&smc->sk); in smc_connect_work()
1242 release_sock(&smc->sk); in smc_connect_work()
1249 struct smc_sock *smc; in smc_connect() local
1252 smc = smc_sk(sk); in smc_connect()
1271 smc_copy_sock_settings_to_clc(smc); in smc_connect()
1272 tcp_sk(smc->clcsock->sk)->syn_smc = 1; in smc_connect()
1273 if (smc->connect_nonblock) { in smc_connect()
1277 rc = kernel_connect(smc->clcsock, addr, alen, flags); in smc_connect()
1281 sock_hold(&smc->sk); /* sock put in passive closing */ in smc_connect()
1282 if (smc->use_fallback) in smc_connect()
1285 if (queue_work(smc_hs_wq, &smc->connect_work)) in smc_connect()
1286 smc->connect_nonblock = 1; in smc_connect()
1289 rc = __smc_connect(smc); in smc_connect()
1410 struct smc_sock *smc = smc_sk(sk); in smc_close_non_accepted() local
1417 __smc_release(smc); in smc_close_non_accepted()
1423 static int smcr_serv_conf_first_link(struct smc_sock *smc) in smcr_serv_conf_first_link() argument
1425 struct smc_link *link = smc->conn.lnk; in smcr_serv_conf_first_link()
1429 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc)) in smcr_serv_conf_first_link()
1443 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc), in smcr_serv_conf_first_link()
1454 smc->conn.rmb_desc->is_conf_rkey = true; in smcr_serv_conf_first_link()
1501 this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt); in smc_listen_out_err()
2108 struct smc_sock *smc; in smc_listen() local
2111 smc = smc_sk(sk); in smc_listen()
2116 smc->connect_nonblock) in smc_listen()
2127 smc_copy_sock_settings_to_clc(smc); in smc_listen()
2128 if (!smc->use_fallback) in smc_listen()
2129 tcp_sk(smc->clcsock->sk)->syn_smc = 1; in smc_listen()
2134 smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready; in smc_listen()
2135 smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready; in smc_listen()
2136 smc->clcsock->sk->sk_user_data = in smc_listen()
2137 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); in smc_listen()
2138 rc = kernel_listen(smc->clcsock, backlog); in smc_listen()
2140 smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready; in smc_listen()
2225 struct smc_sock *smc; in smc_getname() local
2231 smc = smc_sk(sock->sk); in smc_getname()
2233 return smc->clcsock->ops->getname(smc->clcsock, addr, peer); in smc_getname()
2239 struct smc_sock *smc; in smc_sendmsg() local
2242 smc = smc_sk(sk); in smc_sendmsg()
2250 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) { in smc_sendmsg()
2251 smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP); in smc_sendmsg()
2258 if (smc->use_fallback) { in smc_sendmsg()
2259 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len); in smc_sendmsg()
2261 rc = smc_tx_sendmsg(smc, msg, len); in smc_sendmsg()
2262 SMC_STAT_TX_PAYLOAD(smc, len, rc); in smc_sendmsg()
2273 struct smc_sock *smc; in smc_recvmsg() local
2276 smc = smc_sk(sk); in smc_recvmsg()
2293 if (smc->use_fallback) { in smc_recvmsg()
2294 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags); in smc_recvmsg()
2297 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags); in smc_recvmsg()
2298 SMC_STAT_RX_PAYLOAD(smc, rc, rc); in smc_recvmsg()
2323 struct smc_sock *smc; in smc_poll() local
2329 smc = smc_sk(sock->sk); in smc_poll()
2330 if (smc->use_fallback) { in smc_poll()
2332 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait); in smc_poll()
2333 sk->sk_err = smc->clcsock->sk->sk_err; in smc_poll()
2345 } else if (smc->use_fallback) { /* as result of connect_work()*/ in smc_poll()
2346 mask |= smc->clcsock->ops->poll(file, smc->clcsock, in smc_poll()
2348 sk->sk_err = smc->clcsock->sk->sk_err; in smc_poll()
2351 atomic_read(&smc->conn.sndbuf_space)) || in smc_poll()
2358 if (atomic_read(&smc->conn.bytes_to_rcv)) in smc_poll()
2364 if (smc->conn.urg_state == SMC_URG_VALID) in smc_poll()
2376 struct smc_sock *smc; in smc_shutdown() local
2381 smc = smc_sk(sk); in smc_shutdown()
2396 if (smc->use_fallback) { in smc_shutdown()
2397 rc = kernel_sock_shutdown(smc->clcsock, how); in smc_shutdown()
2398 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown; in smc_shutdown()
2406 rc = smc_close_active(smc); in smc_shutdown()
2412 rc = smc_close_shutdown_write(smc); in smc_shutdown()
2419 if (do_shutdown && smc->clcsock) in smc_shutdown()
2420 rc1 = kernel_sock_shutdown(smc->clcsock, how); in smc_shutdown()
2433 struct smc_sock *smc; in smc_setsockopt() local
2439 smc = smc_sk(sk); in smc_setsockopt()
2444 if (unlikely(!smc->clcsock->ops->setsockopt)) in smc_setsockopt()
2447 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname, in smc_setsockopt()
2449 if (smc->clcsock->sk->sk_err) { in smc_setsockopt()
2450 sk->sk_err = smc->clcsock->sk->sk_err; in smc_setsockopt()
2460 if (rc || smc->use_fallback) in smc_setsockopt()
2468 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) { in smc_setsockopt()
2469 smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP); in smc_setsockopt()
2479 SMC_STAT_INC(smc, ndly_cnt); in smc_setsockopt()
2480 mod_delayed_work(smc->conn.lgr->tx_wq, in smc_setsockopt()
2481 &smc->conn.tx_work, 0); in smc_setsockopt()
2490 SMC_STAT_INC(smc, cork_cnt); in smc_setsockopt()
2491 mod_delayed_work(smc->conn.lgr->tx_wq, in smc_setsockopt()
2492 &smc->conn.tx_work, 0); in smc_setsockopt()
2497 smc->sockopt_defer_accept = val; in smc_setsockopt()
2511 struct smc_sock *smc; in smc_getsockopt() local
2513 smc = smc_sk(sock->sk); in smc_getsockopt()
2515 if (unlikely(!smc->clcsock->ops->getsockopt)) in smc_getsockopt()
2517 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname, in smc_getsockopt()
2526 struct smc_sock *smc; in smc_ioctl() local
2529 smc = smc_sk(sock->sk); in smc_ioctl()
2530 conn = &smc->conn; in smc_ioctl()
2531 lock_sock(&smc->sk); in smc_ioctl()
2532 if (smc->use_fallback) { in smc_ioctl()
2533 if (!smc->clcsock) { in smc_ioctl()
2534 release_sock(&smc->sk); in smc_ioctl()
2537 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg); in smc_ioctl()
2538 release_sock(&smc->sk); in smc_ioctl()
2543 if (smc->sk.sk_state == SMC_LISTEN) { in smc_ioctl()
2544 release_sock(&smc->sk); in smc_ioctl()
2547 if (smc->sk.sk_state == SMC_INIT || in smc_ioctl()
2548 smc->sk.sk_state == SMC_CLOSED) in smc_ioctl()
2551 answ = atomic_read(&smc->conn.bytes_to_rcv); in smc_ioctl()
2555 if (smc->sk.sk_state == SMC_LISTEN) { in smc_ioctl()
2556 release_sock(&smc->sk); in smc_ioctl()
2559 if (smc->sk.sk_state == SMC_INIT || in smc_ioctl()
2560 smc->sk.sk_state == SMC_CLOSED) in smc_ioctl()
2563 answ = smc->conn.sndbuf_desc->len - in smc_ioctl()
2564 atomic_read(&smc->conn.sndbuf_space); in smc_ioctl()
2568 if (smc->sk.sk_state == SMC_LISTEN) { in smc_ioctl()
2569 release_sock(&smc->sk); in smc_ioctl()
2572 if (smc->sk.sk_state == SMC_INIT || in smc_ioctl()
2573 smc->sk.sk_state == SMC_CLOSED) in smc_ioctl()
2576 answ = smc_tx_prepared_sends(&smc->conn); in smc_ioctl()
2579 if (smc->sk.sk_state == SMC_LISTEN) { in smc_ioctl()
2580 release_sock(&smc->sk); in smc_ioctl()
2583 if (smc->sk.sk_state == SMC_INIT || in smc_ioctl()
2584 smc->sk.sk_state == SMC_CLOSED) { in smc_ioctl()
2594 release_sock(&smc->sk); in smc_ioctl()
2597 release_sock(&smc->sk); in smc_ioctl()
2606 struct smc_sock *smc; in smc_sendpage() local
2609 smc = smc_sk(sk); in smc_sendpage()
2616 if (smc->use_fallback) { in smc_sendpage()
2617 rc = kernel_sendpage(smc->clcsock, page, offset, in smc_sendpage()
2620 SMC_STAT_INC(smc, sendpage_cnt); in smc_sendpage()
2639 struct smc_sock *smc; in smc_splice_read() local
2642 smc = smc_sk(sk); in smc_splice_read()
2659 if (smc->use_fallback) { in smc_splice_read()
2660 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos, in smc_splice_read()
2671 SMC_STAT_INC(smc, splice_cnt); in smc_splice_read()
2672 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags); in smc_splice_read()
2707 struct smc_sock *smc; in smc_create() local
2726 smc = smc_sk(sk); in smc_create()
2727 smc->use_fallback = false; /* assume rdma capability first */ in smc_create()
2728 smc->fallback_rsn = 0; in smc_create()
2730 &smc->clcsock); in smc_create()
2735 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE); in smc_create()
2736 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE); in smc_create()