Lines Matching defs:smc

19 #define KMSG_COMPONENT "smc"
33 #include <net/smc.h>
40 #include "smc.h"
67 struct smc_sock *smc = smc_sk(sk);
69 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
131 static void smc_restore_fallback_changes(struct smc_sock *smc)
133 if (smc->clcsock->file) { /* non-accepted sockets have no file yet */
134 smc->clcsock->file->private_data = smc->sk.sk_socket;
135 smc->clcsock->file = NULL;
139 static int __smc_release(struct smc_sock *smc)
141 struct sock *sk = &smc->sk;
144 if (!smc->use_fallback) {
145 rc = smc_close_active(smc);
155 rc = kernel_sock_shutdown(smc->clcsock,
161 smc_restore_fallback_changes(smc);
167 if (smc->clcsock) {
169 smc_clcsock_release(smc);
172 if (!smc->use_fallback)
173 smc_conn_free(&smc->conn);
182 struct smc_sock *smc;
189 smc = smc_sk(sk);
194 if (smc->connect_nonblock && old_state == SMC_INIT)
195 tcp_abort(smc->clcsock->sk, ECONNABORTED);
197 if (cancel_work_sync(&smc->connect_work))
198 sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
209 !smc->use_fallback)
210 smc_close_active_abort(smc);
212 rc = __smc_release(smc);
238 struct smc_sock *smc;
251 smc = smc_sk(sk);
252 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
253 INIT_WORK(&smc->connect_work, smc_connect_work);
254 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
255 INIT_LIST_HEAD(&smc->accept_q);
256 spin_lock_init(&smc->accept_q_lock);
257 spin_lock_init(&smc->conn.send_lock);
260 mutex_init(&smc->clcsock_release_lock);
270 struct smc_sock *smc;
273 smc = smc_sk(sk);
294 if (sk->sk_state != SMC_INIT || smc->connect_nonblock)
297 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
298 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
340 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
341 * clc socket (since smc is not called for these options from net/core)
343 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
345 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
352 /* copy only settings and flags relevant for smc from clc to smc socket */
353 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
355 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
393 static int smcr_clnt_conf_first_link(struct smc_sock *smc)
395 struct smc_link *link = smc->conn.lnk;
409 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
425 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
429 smc->conn.rmb_desc->is_conf_rkey = true;
445 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
456 static void smcr_conn_save_peer_info(struct smc_sock *smc,
461 smc->conn.peer_rmbe_idx = clc->r0.rmbe_idx;
462 smc->conn.local_tx_ctrl.token = ntohl(clc->r0.rmbe_alert_token);
463 smc->conn.peer_rmbe_size = bufsize;
464 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
465 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
478 static void smcd_conn_save_peer_info(struct smc_sock *smc,
483 smc->conn.peer_rmbe_idx = clc->d0.dmbe_idx;
484 smc->conn.peer_token = clc->d0.token;
486 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
487 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
488 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
497 memcpy(smc->conn.lgr->negotiated_eid, clc_v2->eid,
499 smc->conn.lgr->peer_os = fce->os_type;
500 smc->conn.lgr->peer_smc_release = fce->release;
502 memcpy(smc->conn.lgr->peer_hostname, fce->hostname,
507 static void smc_conn_save_peer_info(struct smc_sock *smc,
510 if (smc->conn.lgr->is_smcd)
511 smcd_conn_save_peer_info(smc, clc);
513 smcr_conn_save_peer_info(smc, clc);
526 static void smc_switch_to_fallback(struct smc_sock *smc)
528 wait_queue_head_t *smc_wait = sk_sleep(&smc->sk);
529 wait_queue_head_t *clc_wait = sk_sleep(smc->clcsock->sk);
532 smc->use_fallback = true;
533 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
534 smc->clcsock->file = smc->sk.sk_socket->file;
535 smc->clcsock->file->private_data = smc->clcsock;
536 smc->clcsock->wq.fasync_list =
537 smc->sk.sk_socket->wq.fasync_list;
540 * smc socket->wq, which should be removed
552 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
554 smc_switch_to_fallback(smc);
555 smc->fallback_rsn = reason_code;
556 smc_copy_sock_settings_to_clc(smc);
557 smc->connect_nonblock = 0;
558 if (smc->sk.sk_state == SMC_INIT)
559 smc->sk.sk_state = SMC_ACTIVE;
564 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code,
570 if (smc->sk.sk_state == SMC_INIT)
571 sock_put(&smc->sk); /* passive closing */
575 rc = smc_clc_send_decline(smc, reason_code, version);
577 if (smc->sk.sk_state == SMC_INIT)
578 sock_put(&smc->sk); /* passive closing */
582 return smc_connect_fallback(smc, reason_code);
586 static void smc_connect_abort(struct smc_sock *smc, int local_first)
589 smc_lgr_cleanup_early(&smc->conn);
591 smc_conn_free(&smc->conn);
596 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
602 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
610 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
613 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
636 static int smc_find_ism_v2_device_clnt(struct smc_sock *smc,
654 smc_pnet_is_ndev_pnetid(sock_net(&smc->sk), smcd->pnetid)) {
673 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
681 static int smc_find_proposal_devices(struct smc_sock *smc,
688 if (smc_find_ism_device(smc, ini) ||
689 smc_connect_ism_vlan_setup(smc, ini)) {
695 if (smc_find_rdma_device(smc, ini)) {
702 if (smc_ism_v2_capable && smc_find_ism_v2_device_clnt(smc, ini))
716 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc,
732 static int smc_connect_clc(struct smc_sock *smc,
739 rc = smc_clc_send_proposal(smc, ini);
743 return smc_clc_wait_msg(smc, aclc2, SMC_CLC_MAX_ACCEPT_LEN,
748 static int smc_connect_rdma(struct smc_sock *smc,
761 reason_code = smc_conn_create(smc, ini);
767 smc_conn_save_peer_info(smc, aclc);
770 link = smc->conn.lnk;
775 struct smc_link *l = &smc->conn.lgr->lnk[i];
790 smc->conn.lnk = link;
794 if (smc_buf_create(smc, false)) {
802 if (smc_rmb_rtoken_handling(&smc->conn, link, aclc)) {
807 smc_close_init(smc);
808 smc_rx_init(smc);
816 if (smcr_lgr_reg_rmbs(link, smc->conn.rmb_desc)) {
821 smc_rmb_sync_sg_for_device(&smc->conn);
823 reason_code = smc_clc_send_confirm(smc, ini->first_contact_local,
828 smc_tx_init(smc);
833 reason_code = smcr_clnt_conf_first_link(smc);
840 smc_copy_sock_settings_to_clc(smc);
841 smc->connect_nonblock = 0;
842 if (smc->sk.sk_state == SMC_INIT)
843 smc->sk.sk_state = SMC_ACTIVE;
847 smc_connect_abort(smc, ini->first_contact_local);
849 smc->connect_nonblock = 0;
874 static int smc_connect_ism(struct smc_sock *smc,
895 rc = smc_conn_create(smc, ini);
902 rc = smc_buf_create(smc, true);
908 smc_conn_save_peer_info(smc, aclc);
909 smc_close_init(smc);
910 smc_rx_init(smc);
911 smc_tx_init(smc);
913 rc = smc_clc_send_confirm(smc, ini->first_contact_local,
919 smc_copy_sock_settings_to_clc(smc);
920 smc->connect_nonblock = 0;
921 if (smc->sk.sk_state == SMC_INIT)
922 smc->sk.sk_state = SMC_ACTIVE;
926 smc_connect_abort(smc, ini->first_contact_local);
928 smc->connect_nonblock = 0;
952 static int __smc_connect(struct smc_sock *smc)
961 if (smc->use_fallback)
962 return smc_connect_fallback(smc, smc->fallback_rsn);
965 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
966 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
969 if (using_ipsec(smc))
970 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC,
975 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_MEM,
984 if (smc_vlan_by_tcpsk(smc->clcsock, ini)) {
993 rc = smc_find_proposal_devices(smc, ini);
1006 rc = smc_connect_clc(smc, aclc2, ini);
1010 /* check if smc modes and versions of CLC proposal and accept match */
1019 rc = smc_connect_rdma(smc, aclc, ini);
1021 rc = smc_connect_ism(smc, aclc, ini);
1025 smc_connect_ism_vlan_cleanup(smc, ini);
1031 smc_connect_ism_vlan_cleanup(smc, ini);
1035 return smc_connect_decline_fallback(smc, rc, version);
1040 struct smc_sock *smc = container_of(work, struct smc_sock,
1042 long timeo = smc->sk.sk_sndtimeo;
1047 lock_sock(smc->clcsock->sk);
1048 if (smc->clcsock->sk->sk_err) {
1049 smc->sk.sk_err = smc->clcsock->sk->sk_err;
1050 } else if ((1 << smc->clcsock->sk->sk_state) &
1052 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
1054 ((1 << smc->clcsock->sk->sk_state) &
1058 release_sock(smc->clcsock->sk);
1059 lock_sock(&smc->sk);
1060 if (rc != 0 || smc->sk.sk_err) {
1061 smc->sk.sk_state = SMC_CLOSED;
1063 smc->sk.sk_err = EPIPE;
1065 smc->sk.sk_err = ECONNREFUSED;
1067 smc->sk.sk_err = -sock_intr_errno(timeo);
1068 sock_put(&smc->sk); /* passive closing */
1072 rc = __smc_connect(smc);
1074 smc->sk.sk_err = -rc;
1077 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
1078 if (smc->sk.sk_err) {
1079 smc->sk.sk_state_change(&smc->sk);
1081 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
1082 smc->sk.sk_write_space(&smc->sk);
1085 release_sock(&smc->sk);
1092 struct smc_sock *smc;
1095 smc = smc_sk(sk);
1097 /* separate smc parameter checking to be safe */
1115 smc_copy_sock_settings_to_clc(smc);
1116 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1117 if (smc->connect_nonblock) {
1121 rc = kernel_connect(smc->clcsock, addr, alen, flags);
1125 if (smc->use_fallback)
1127 sock_hold(&smc->sk); /* sock put in passive closing */
1129 if (queue_work(smc_hs_wq, &smc->connect_work))
1130 smc->connect_nonblock = 1;
1133 rc = __smc_connect(smc);
1182 /* new clcsock has inherited the smc listen-specific sk_data_ready
1254 struct smc_sock *smc = smc_sk(sk);
1261 __smc_release(smc);
1267 static int smcr_serv_conf_first_link(struct smc_sock *smc)
1269 struct smc_link *link = smc->conn.lnk;
1273 if (smcr_link_reg_rmb(link, smc->conn.rmb_desc))
1287 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1298 smc->conn.rmb_desc->is_conf_rkey = true;
1717 /* check if peer is smc capable */
1863 struct smc_sock *smc;
1866 smc = smc_sk(sk);
1871 smc->connect_nonblock)
1880 * them to the clc socket -- copy smc socket options to clc socket
1882 smc_copy_sock_settings_to_clc(smc);
1883 if (!smc->use_fallback)
1884 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1887 * smc-specific sk_data_ready function
1889 smc->clcsk_data_ready = smc->clcsock->sk->sk_data_ready;
1890 smc->clcsock->sk->sk_data_ready = smc_clcsock_data_ready;
1891 smc->clcsock->sk->sk_user_data =
1892 (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY);
1893 rc = kernel_listen(smc->clcsock, backlog);
1895 smc->clcsock->sk->sk_data_ready = smc->clcsk_data_ready;
1980 struct smc_sock *smc;
1986 smc = smc_sk(sock->sk);
1988 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
1994 struct smc_sock *smc;
1997 smc = smc_sk(sk);
2003 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2004 smc_switch_to_fallback(smc);
2005 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
2017 if (smc->use_fallback)
2018 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
2020 rc = smc_tx_sendmsg(smc, msg, len);
2030 struct smc_sock *smc;
2033 smc = smc_sk(sk);
2050 if (smc->use_fallback) {
2051 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
2054 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
2079 struct smc_sock *smc;
2085 smc = smc_sk(sock->sk);
2086 if (smc->use_fallback) {
2088 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
2089 sk->sk_err = smc->clcsock->sk->sk_err;
2101 } else if (smc->use_fallback) { /* as result of connect_work()*/
2102 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
2104 sk->sk_err = smc->clcsock->sk->sk_err;
2107 atomic_read(&smc->conn.sndbuf_space)) ||
2114 if (atomic_read(&smc->conn.bytes_to_rcv))
2120 if (smc->conn.urg_state == SMC_URG_VALID)
2132 struct smc_sock *smc;
2137 smc = smc_sk(sk);
2152 if (smc->use_fallback) {
2153 rc = kernel_sock_shutdown(smc->clcsock, how);
2154 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
2164 rc = smc_close_active(smc);
2170 rc = smc_close_shutdown_write(smc);
2177 if (do_shutdown && smc->clcsock)
2178 rc1 = kernel_sock_shutdown(smc->clcsock, how);
2191 struct smc_sock *smc;
2197 smc = smc_sk(sk);
2202 if (unlikely(!smc->clcsock->ops->setsockopt))
2205 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
2207 if (smc->clcsock->sk->sk_err) {
2208 sk->sk_err = smc->clcsock->sk->sk_err;
2218 if (rc || smc->use_fallback)
2226 if (sk->sk_state == SMC_INIT && !smc->connect_nonblock) {
2227 smc_switch_to_fallback(smc);
2228 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
2238 mod_delayed_work(smc->conn.lgr->tx_wq,
2239 &smc->conn.tx_work, 0);
2247 mod_delayed_work(smc->conn.lgr->tx_wq,
2248 &smc->conn.tx_work, 0);
2252 smc->sockopt_defer_accept = val;
2266 struct smc_sock *smc;
2268 smc = smc_sk(sock->sk);
2270 if (unlikely(!smc->clcsock->ops->getsockopt))
2272 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
2281 struct smc_sock *smc;
2284 smc = smc_sk(sock->sk);
2285 conn = &smc->conn;
2286 lock_sock(&smc->sk);
2287 if (smc->use_fallback) {
2288 if (!smc->clcsock) {
2289 release_sock(&smc->sk);
2292 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
2293 release_sock(&smc->sk);
2298 if (smc->sk.sk_state == SMC_LISTEN) {
2299 release_sock(&smc->sk);
2302 if (smc->sk.sk_state == SMC_INIT ||
2303 smc->sk.sk_state == SMC_CLOSED)
2306 answ = atomic_read(&smc->conn.bytes_to_rcv);
2310 if (smc->sk.sk_state == SMC_LISTEN) {
2311 release_sock(&smc->sk);
2314 if (smc->sk.sk_state == SMC_INIT ||
2315 smc->sk.sk_state == SMC_CLOSED)
2318 answ = smc->conn.sndbuf_desc->len -
2319 atomic_read(&smc->conn.sndbuf_space);
2323 if (smc->sk.sk_state == SMC_LISTEN) {
2324 release_sock(&smc->sk);
2327 if (smc->sk.sk_state == SMC_INIT ||
2328 smc->sk.sk_state == SMC_CLOSED)
2331 answ = smc_tx_prepared_sends(&smc->conn);
2334 if (smc->sk.sk_state == SMC_LISTEN) {
2335 release_sock(&smc->sk);
2338 if (smc->sk.sk_state == SMC_INIT ||
2339 smc->sk.sk_state == SMC_CLOSED) {
2349 release_sock(&smc->sk);
2352 release_sock(&smc->sk);
2361 struct smc_sock *smc;
2364 smc = smc_sk(sk);
2371 if (smc->use_fallback)
2372 rc = kernel_sendpage(smc->clcsock, page, offset,
2392 struct smc_sock *smc;
2395 smc = smc_sk(sk);
2412 if (smc->use_fallback) {
2413 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
2424 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
2459 struct smc_sock *smc;
2478 smc = smc_sk(sk);
2479 smc->use_fallback = false; /* assume rdma capability first */
2480 smc->fallback_rsn = 0;
2482 &smc->clcsock);
2487 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
2488 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
2629 MODULE_DESCRIPTION("smc socket address family");