Lines Matching defs:smc

19 #define KMSG_COMPONENT "smc"
34 #include <net/smc.h>
41 #include "smc.h"
86 sock_net(skb->sk)->smc.limit_smc_hs))
100 sock_net(skb->sk)->smc.limit_smc_hs = true;
106 sock_net(skb->sk)->smc.limit_smc_hs = false;
112 struct smc_sock *smc = smc_sk(sk);
114 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
124 struct smc_sock *smc;
127 smc = smc_clcsock_user_data(sk);
129 if (READ_ONCE(sk->sk_ack_backlog) + atomic_read(&smc->queued_smc_hs) >
133 if (sk_acceptq_is_full(&smc->sk)) {
139 child = smc->ori_af_ops->syn_recv_sock(sk, skb, req, dst, req_unhash,
141 /* child must not inherit smc or its ops */
147 inet_csk(child)->icsk_af_ops = smc->ori_af_ops;
159 const struct smc_sock *smc;
161 smc = smc_clcsock_user_data(sk);
163 if (!smc)
213 struct smc_sock *smc = smc_sk(sk);
215 if (smc->conn.tx_in_release_sock) {
216 smc_tx_pending(&smc->conn);
217 smc->conn.tx_in_release_sock = false;
247 static void smc_fback_restore_callbacks(struct smc_sock *smc)
249 struct sock *clcsk = smc->clcsock->sk;
254 smc_clcsock_restore_cb(&clcsk->sk_state_change, &smc->clcsk_state_change);
255 smc_clcsock_restore_cb(&clcsk->sk_data_ready, &smc->clcsk_data_ready);
256 smc_clcsock_restore_cb(&clcsk->sk_write_space, &smc->clcsk_write_space);
257 smc_clcsock_restore_cb(&clcsk->sk_error_report, &smc->clcsk_error_report);
262 static void smc_restore_fallback_changes(struct smc_sock *smc)
264 if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
265 smc->clcsock->file->private_data = smc->sk.sk_socket;
266 smc->clcsock->file = NULL;
267 smc_fback_restore_callbacks(smc);
271 static int __smc_release(struct smc_sock *smc)
273 struct sock *sk = &smc->sk;
276 if (!smc->use_fallback) {
277 rc = smc_close_active(smc);
287 rc = kernel_sock_shutdown(smc->clcsock,
293 smc_restore_fallback_changes(smc);
299 if (smc->clcsock) {
301 smc_clcsock_release(smc);
304 if (!smc->use_fallback)
305 smc_conn_free(&smc->conn);
314 struct smc_sock *smc;
321 smc = smc_sk(sk);
326 if (smc->connect_nonblock && old_state == SMC_INIT)
327 tcp_abort(smc->clcsock->sk, ECONNABORTED);
329 if (cancel_work_sync(&smc->connect_work))
330 sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
341 !smc->use_fallback)
342 smc_close_active_abort(smc);
344 rc = __smc_release(smc);
368 struct smc_sock *smc;
381 WRITE_ONCE(sk->sk_sndbuf, 2 * READ_ONCE(net->smc.sysctl_wmem));
382 WRITE_ONCE(sk->sk_rcvbuf, 2 * READ_ONCE(net->smc.sysctl_rmem));
383 smc = smc_sk(sk);
384 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
385 INIT_WORK(&smc->connect_work, smc_connect_work);
386 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
387 INIT_LIST_HEAD(&smc->accept_q);
388 spin_lock_init(&smc->accept_q_lock);
389 spin_lock_init(&smc->conn.send_lock);
391 mutex_init(&smc->clcsock_release_lock);
392 smc_init_saved_callbacks(smc);
402 struct smc_sock *smc;
405 smc = smc_sk(sk);
426 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
429 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
430 smc->clcsock->sk->sk_reuseport = sk->sk_reuseport;
431 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
439 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
440 * clc socket (since smc is not called for these options from net/core)
474 2 * READ_ONCE(nnet->smc.sysctl_wmem));
484 2 * READ_ONCE(nnet->smc.sysctl_rmem));
507 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
509 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
516 /* copy only settings and flags relevant for smc from clc to smc socket */
517 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
519 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
595 static int smcr_clnt_conf_first_link(struct smc_sock *smc)
597 struct smc_link *link = smc->conn.lnk;
611 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
628 if (smc->conn.sndbuf_desc->is_vm) {
629 if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc))
634 if (smcr_link_reg_buf(link, smc->conn.rmb_desc))
638 smc->conn.rmb_desc->is_conf_rkey = true;
655 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
677 static void smc_conn_save_peer_info_fce(struct smc_sock *smc,
689 if (smc->conn.lgr->is_smcd) {
690 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->d1.eid,
695 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->r1.eid,
701 smc->conn.lgr->peer_os = fce->os_type;
702 smc->conn.lgr->peer_smc_release = fce->release;
704 memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
708 static void smcr_conn_save_peer_info(struct smc_sock *smc,
713 smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
714 smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
715 smc->conn.peer_rmbe_size = bufsize;
716 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
717 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
720 static void smcd_conn_save_peer_info(struct smc_sock *smc,
725 smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
726 smc->conn.peer_token = ntohll(clc->d0.token);
728 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
729 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
730 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
733 static void smc_conn_save_peer_info(struct smc_sock *smc,
736 if (smc->conn.lgr->is_smcd)
737 smcd_conn_save_peer_info(smc, clc);
739 smcr_conn_save_peer_info(smc, clc);
740 smc_conn_save_peer_info_fce(smc, clc);
754 static void smc_stat_inc_fback_rsn_cnt(struct smc_sock *smc,
760 if (fback_arr[cnt].fback_code == smc->fallback_rsn) {
765 fback_arr[cnt].fback_code = smc->fallback_rsn;
772 static void smc_stat_fallback(struct smc_sock *smc)
774 struct net *net = sock_net(&smc->sk);
776 mutex_lock(&net->smc.mutex_fback_rsn);
777 if (smc->listen_smc) {
778 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->srv);
779 net->smc.fback_rsn->srv_fback_cnt++;
781 smc_stat_inc_fback_rsn_cnt(smc, net->smc.fback_rsn->clnt);
782 net->smc.fback_rsn->clnt_fback_cnt++;
784 mutex_unlock(&net->smc.mutex_fback_rsn);
788 static void smc_fback_wakeup_waitqueue(struct smc_sock *smc, void *key)
793 wq = rcu_dereference(smc->sk.sk_wq);
797 /* wake up smc sk->sk_wq */
823 static void smc_fback_forward_wakeup(struct smc_sock *smc, struct sock *clcsk,
840 smc_fback_wakeup_waitqueue(smc, mark.key);
847 struct smc_sock *smc;
850 smc = smc_clcsock_user_data(clcsk);
851 if (smc)
852 smc_fback_forward_wakeup(smc, clcsk,
853 smc->clcsk_state_change);
859 struct smc_sock *smc;
862 smc = smc_clcsock_user_data(clcsk);
863 if (smc)
864 smc_fback_forward_wakeup(smc, clcsk,
865 smc->clcsk_data_ready);
871 struct smc_sock *smc;
874 smc = smc_clcsock_user_data(clcsk);
875 if (smc)
876 smc_fback_forward_wakeup(smc, clcsk,
877 smc->clcsk_write_space);
883 struct smc_sock *smc;
886 smc = smc_clcsock_user_data(clcsk);
887 if (smc)
888 smc_fback_forward_wakeup(smc, clcsk,
889 smc->clcsk_error_report);
893 static void smc_fback_replace_callbacks(struct smc_sock *smc)
895 struct sock *clcsk = smc->clcsock->sk;
898 clcsk->sk_user_data = (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
901 &smc->clcsk_state_change);
903 &smc->clcsk_data_ready);
905 &smc->clcsk_write_space);
907 &smc->clcsk_error_report);
912 static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code)
916 mutex_lock(&smc->clcsock_release_lock);
917 if (!smc->clcsock) {
922 smc->use_fallback = true;
923 smc->fallback_rsn = reason_code;
924 smc_stat_fallback(smc);
925 trace_smc_switch_to_fallback(smc, reason_code);
926 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
927 smc->clcsock->file = smc->sk.sk_socket->file;
928 smc->clcsock->file->private_data = smc->clcsock;
929 smc->clcsock->wq.fasync_list =
930 smc->sk.sk_socket->wq.fasync_list;
933 * in smc sk->sk_wq and they should be woken up
936 smc_fback_replace_callbacks(smc);
939 mutex_unlock(&smc->clcsock_release_lock);
944 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
946 struct net *net = sock_net(&smc->sk);
949 rc = smc_switch_to_fallback(smc, reason_code);
951 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
952 if (smc->sk.sk_state == SMC_INIT)
953 sock_put(&smc->sk); /* passive closing */
956 smc_copy_sock_settings_to_clc(smc);
957 smc->connect_nonblock = 0;
958 if (smc->sk.sk_state == SMC_INIT)
959 smc->sk.sk_state = SMC_ACTIVE;
964 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
967 struct net *net = sock_net(&smc->sk);
971 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
972 if (smc->sk.sk_state == SMC_INIT)
973 sock_put(&smc->sk); /* passive closing */
977 rc = smc_clc_send_decline(smc, reason_code, version);
979 this_cpu_inc(net->smc.smc_stats->clnt_hshake_err_cnt);
980 if (smc->sk.sk_state == SMC_INIT)
981 sock_put(&smc->sk); /* passive closing */
985 return smc_connect_fallback(smc, reason_code);
988 static void smc_conn_abort(struct smc_sock *smc, int local_first)
990 struct smc_connection *conn = &smc->conn;
1004 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
1010 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
1020 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
1023 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
1046 static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
1064 smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
1083 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
1091 static int smc_find_proposal_devices(struct smc_sock *smc,
1098 smc_find_ism_device(smc, ini) ||
1099 smc_connect_ism_vlan_setup(smc, ini))
1105 smc_find_rdma_device(smc, ini))
1115 smc_find_ism_v2_device_clnt(smc, ini))
1120 ini->smcrv2.saddr = smc->clcsock->sk->sk_rcv_saddr;
1122 smc->clcsock->sk->sk_family != AF_INET ||
1124 smc_find_rdma_device(smc, ini))
1141 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
1157 static int smc_connect_clc(struct smc_sock *smc,
1164 rc = smc_clc_send_proposal(smc, ini);
1168 return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN,
1200 static int smc_connect_rdma_v2_prepare(struct smc_sock *smc,
1208 struct net *net = sock_net(&smc->sk);
1218 if (smc_ib_find_route(net, smc->clcsock->sk->sk_rcv_saddr,
1238 static int smc_connect_rdma(struct smc_sock *smc,
1255 reason_code = smc_connect_rdma_v2_prepare(smc, aclc, ini);
1260 reason_code = smc_conn_create(smc, ini);
1266 smc_conn_save_peer_info(smc, aclc);
1269 link = smc->conn.lnk;
1274 struct smc_link *l = &smc->conn.lgr->lnk[i];
1290 smc_switch_link_and_count(&smc->conn, link);
1294 if (smc_buf_create(smc, false)) {
1302 if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
1307 smc_close_init(smc);
1308 smc_rx_init(smc);
1317 if (smc->conn.sndbuf_desc->is_vm) {
1318 if (smcr_lgr_reg_sndbufs(link, smc->conn.sndbuf_desc)) {
1323 if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
1339 reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
1344 smc_tx_init(smc);
1349 reason_code = smcr_clnt_conf_first_link(smc);
1356 smc_copy_sock_settings_to_clc(smc);
1357 smc->connect_nonblock = 0;
1358 if (smc->sk.sk_state == SMC_INIT)
1359 smc->sk.sk_state = SMC_ACTIVE;
1363 smc_conn_abort(smc, ini->first_contact_local);
1365 smc->connect_nonblock = 0;
1390 static int smc_connect_ism(struct smc_sock *smc,
1422 rc = smc_conn_create(smc, ini);
1429 rc = smc_buf_create(smc, true);
1435 smc_conn_save_peer_info(smc, aclc);
1436 smc_close_init(smc);
1437 smc_rx_init(smc);
1438 smc_tx_init(smc);
1447 rc = smc_clc_send_confirm(smc, ini->first_contact_local,
1453 smc_copy_sock_settings_to_clc(smc);
1454 smc->connect_nonblock = 0;
1455 if (smc->sk.sk_state == SMC_INIT)
1456 smc->sk.sk_state = SMC_ACTIVE;
1460 smc_conn_abort(smc, ini->first_contact_local);
1462 smc->connect_nonblock = 0;
1493 static int __smc_connect(struct smc_sock *smc)
1502 if (smc->use_fallback)
1503 return smc_connect_fallback(smc, smc->fallback_rsn);
1506 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
1507 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
1510 if (using_ipsec(smc))
1511 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
1516 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
1525 if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
1535 rc = smc_find_proposal_devices(smc, ini);
1548 rc = smc_connect_clc(smc, aclc2, ini);
1553 smc->sk.sk_err = ETIMEDOUT;
1558 /* check if smc modes and versions of CLC proposal and accept match */
1567 rc = smc_connect_rdma(smc, aclc, ini);
1570 rc = smc_connect_ism(smc, aclc, ini);
1575 SMC_STAT_CLNT_SUCC_INC(sock_net(smc->clcsock->sk), aclc);
1576 smc_connect_ism_vlan_cleanup(smc, ini);
1582 smc_connect_ism_vlan_cleanup(smc, ini);
1586 return smc_connect_decline_fallback(smc, rc, version);
1591 struct smc_sock *smc = container_of(work, struct smc_sock,
1593 long timeo = smc->sk.sk_sndtimeo;
1598 lock_sock(smc->clcsock->sk);
1599 if (smc->clcsock->sk->sk_err) {
1600 smc->sk.sk_err = smc->clcsock->sk->sk_err;
1601 } else if ((1 << smc->clcsock->sk->sk_state) &
1603 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
1605 ((1 << smc->clcsock->sk->sk_state) &
1609 release_sock(smc->clcsock->sk);
1610 lock_sock(&smc->sk);
1611 if (rc != 0 || smc->sk.sk_err) {
1612 smc->sk.sk_state = SMC_CLOSED;
1614 smc->sk.sk_err = EPIPE;
1616 smc->sk.sk_err = ECONNREFUSED;
1618 smc->sk.sk_err = -sock_intr_errno(timeo);
1619 sock_put(&smc->sk); /* passive closing */
1623 rc = __smc_connect(smc);
1625 smc->sk.sk_err = -rc;
1628 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
1629 if (smc->sk.sk_err) {
1630 smc->sk.sk_state_change(&smc->sk);
1632 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
1633 smc->sk.sk_write_space(&smc->sk);
1636 release_sock(&smc->sk);
1643 struct smc_sock *smc;
1646 smc = smc_sk(sk);
1648 /* separate smc parameter checking to be safe */
1685 smc_copy_sock_settings_to_clc(smc);
1686 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1687 if (smc->connect_nonblock) {
1691 rc = kernel_connect(smc->clcsock, addr, alen, flags);
1695 if (smc->use_fallback) {
1699 sock_hold(&smc->sk); /* sock put in passive closing */
1701 if (queue_work(smc_hs_wq, &smc->connect_work))
1702 smc->connect_nonblock = 1;
1706 rc = __smc_connect(smc);
1756 /* new clcsock has inherited the smc listen-specific sk_data_ready
1842 struct smc_sock *smc = smc_sk(sk);
1849 __smc_release(smc);
1855 static int smcr_serv_conf_first_link(struct smc_sock *smc)
1857 struct smc_link *link = smc->conn.lnk;
1862 if (smc->conn.sndbuf_desc->is_vm) {
1863 if (smcr_link_reg_buf(link, smc->conn.sndbuf_desc))
1868 if (smcr_link_reg_buf(link, smc->conn.rmb_desc))
1882 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1893 smc->conn.rmb_desc->is_conf_rkey = true;
1946 this_cpu_inc(net->smc.smc_stats->srv_hshake_err_cnt);
2420 /* check if peer is smc capable */
2507 /* fce smc release version is needed in smc_listen_rdma_finish,
2591 struct smc_sock *smc;
2594 smc = smc_sk(sk);
2599 smc->connect_nonblock || sock->state != SS_UNCONNECTED)
2608 * them to the clc socket -- copy smc socket options to clc socket
2610 smc_copy_sock_settings_to_clc(smc);
2611 if (!smc->use_fallback)
2612 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
2615 * smc-specific sk_data_ready function
2617 write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
2618 smc->clcsock->sk->sk_user_data =
2619 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
2620 smc_clcsock_replace_cb(&smc->clcsock->sk->sk_data_ready,
2621 smc_clcsock_data_ready, &smc->clcsk_data_ready);
2622 write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
2625 smc->ori_af_ops = inet_csk(smc->clcsock->sk)->icsk_af_ops;
2627 smc->af_ops = *smc->ori_af_ops;
2628 smc->af_ops.syn_recv_sock = smc_tcp_syn_recv_sock;
2630 inet_csk(smc->clcsock->sk)->icsk_af_ops = &smc->af_ops;
2632 if (smc->limit_smc_hs)
2633 tcp_sk(smc->clcsock->sk)->smc_hs_congested = smc_hs_congested;
2635 rc = kernel_listen(smc->clcsock, backlog);
2637 write_lock_bh(&smc->clcsock->sk->sk_callback_lock);
2638 smc_clcsock_restore_cb(&smc->clcsock->sk->sk_data_ready,
2639 &smc->clcsk_data_ready);
2640 smc->clcsock->sk->sk_user_data = NULL;
2641 write_unlock_bh(&smc->clcsock->sk->sk_callback_lock);
2726 struct smc_sock *smc;
2732 smc = smc_sk(sock->sk);
2734 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
2740 struct smc_sock *smc;
2743 smc = smc_sk(sk);
2749 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2750 rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
2764 if (smc->use_fallback) {
2765 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
2767 rc = smc_tx_sendmsg(smc, msg, len);
2768 SMC_STAT_TX_PAYLOAD(smc, len, rc);
2779 struct smc_sock *smc;
2782 smc = smc_sk(sk);
2799 if (smc->use_fallback) {
2800 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
2803 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
2804 SMC_STAT_RX_PAYLOAD(smc, rc, rc);
2829 struct smc_sock *smc;
2835 smc = smc_sk(sock->sk);
2836 if (smc->use_fallback) {
2838 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
2839 sk->sk_err = smc->clcsock->sk->sk_err;
2851 } else if (smc->use_fallback) { /* as result of connect_work()*/
2852 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
2854 sk->sk_err = smc->clcsock->sk->sk_err;
2857 atomic_read(&smc->conn.sndbuf_space)) ||
2864 if (atomic_read(&smc->conn.bytes_to_rcv))
2870 if (smc->conn.urg_state == SMC_URG_VALID)
2882 struct smc_sock *smc;
2887 smc = smc_sk(sk);
2913 if (smc->use_fallback) {
2914 rc = kernel_sock_shutdown(smc->clcsock, how);
2915 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
2926 rc = smc_close_active(smc);
2932 rc = smc_close_shutdown_write(smc);
2939 if (do_shutdown && smc->clcsock)
2940 rc1 = kernel_sock_shutdown(smc->clcsock, how);
2956 struct smc_sock *smc;
2959 smc = smc_sk(sock->sk);
2971 val = smc->limit_smc_hs;
2989 struct smc_sock *smc;
2992 smc = smc_sk(sk);
3006 smc->limit_smc_hs = !!val;
3022 struct smc_sock *smc;
3030 smc = smc_sk(sk);
3035 mutex_lock(&smc->clcsock_release_lock);
3036 if (!smc->clcsock) {
3037 mutex_unlock(&smc->clcsock_release_lock);
3040 if (unlikely(!smc->clcsock->ops->setsockopt))
3043 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
3045 if (smc->clcsock->sk->sk_err) {
3046 sk->sk_err = smc->clcsock->sk->sk_err;
3049 mutex_unlock(&smc->clcsock_release_lock);
3057 if (rc || smc->use_fallback)
3065 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
3066 rc = smc_switch_to_fallback(smc, SMC_CLC_DECL_OPTUNSUPP);
3076 SMC_STAT_INC(smc, ndly_cnt);
3077 smc_tx_pending(&smc->conn);
3078 cancel_delayed_work(&smc->conn.tx_work);
3087 SMC_STAT_INC(smc, cork_cnt);
3088 smc_tx_pending(&smc->conn);
3089 cancel_delayed_work(&smc->conn.tx_work);
3094 smc->sockopt_defer_accept = val;
3108 struct smc_sock *smc;
3114 smc = smc_sk(sock->sk);
3115 mutex_lock(&smc->clcsock_release_lock);
3116 if (!smc->clcsock) {
3117 mutex_unlock(&smc->clcsock_release_lock);
3121 if (unlikely(!smc->clcsock->ops->getsockopt)) {
3122 mutex_unlock(&smc->clcsock_release_lock);
3125 rc = smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
3127 mutex_unlock(&smc->clcsock_release_lock);
3136 struct smc_sock *smc;
3139 smc = smc_sk(sock->sk);
3140 conn = &smc->conn;
3141 lock_sock(&smc->sk);
3142 if (smc->use_fallback) {
3143 if (!smc->clcsock) {
3144 release_sock(&smc->sk);
3147 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
3148 release_sock(&smc->sk);
3153 if (smc->sk.sk_state == SMC_LISTEN) {
3154 release_sock(&smc->sk);
3157 if (smc->sk.sk_state == SMC_INIT ||
3158 smc->sk.sk_state == SMC_CLOSED)
3161 answ = atomic_read(&smc->conn.bytes_to_rcv);
3165 if (smc->sk.sk_state == SMC_LISTEN) {
3166 release_sock(&smc->sk);
3169 if (smc->sk.sk_state == SMC_INIT ||
3170 smc->sk.sk_state == SMC_CLOSED)
3173 answ = smc->conn.sndbuf_desc->len -
3174 atomic_read(&smc->conn.sndbuf_space);
3178 if (smc->sk.sk_state == SMC_LISTEN) {
3179 release_sock(&smc->sk);
3182 if (smc->sk.sk_state == SMC_INIT ||
3183 smc->sk.sk_state == SMC_CLOSED)
3186 answ = smc_tx_prepared_sends(&smc->conn);
3189 if (smc->sk.sk_state == SMC_LISTEN) {
3190 release_sock(&smc->sk);
3193 if (smc->sk.sk_state == SMC_INIT ||
3194 smc->sk.sk_state == SMC_CLOSED) {
3204 release_sock(&smc->sk);
3207 release_sock(&smc->sk);
3223 struct smc_sock *smc;
3226 smc = smc_sk(sk);
3243 if (smc->use_fallback) {
3244 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
3255 SMC_STAT_INC(smc, splice_cnt);
3256 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
3290 struct smc_sock *smc;
3310 smc = smc_sk(sk);
3311 smc->use_fallback = false; /* assume rdma capability first */
3312 smc->fallback_rsn = 0;
3315 smc->limit_smc_hs = net->smc.limit_smc_hs;
3320 &smc->clcsock);
3326 /* smc_clcsock_release() does not wait smc->clcsock->sk's
3328 * smc->sk is close()d, and TCP timers can be fired later,
3331 sk = smc->clcsock->sk;
3337 smc->clcsock = clcsock;
3388 /* replace tcp socket to smc */
3408 .name = "smc",
3602 MODULE_DESCRIPTION("smc socket address family");
3605 MODULE_ALIAS_TCP_ULP("smc");