Lines Matching refs:ssk
41 * new ssk completes join, inherits options from s0 // seq 2
42 * Needs sync from mptcp join logic, but ssk->seq == msk->seq
44 * Set High order bits to sk_state so ssk->seq == msk->seq test
79 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
80 bool slow = lock_sock_fast(ssk);
84 sock_valbool_flag(ssk, SOCK_DBG, !!val);
87 if (ssk->sk_prot->keepalive)
88 ssk->sk_prot->keepalive(ssk, !!val);
89 sock_valbool_flag(ssk, SOCK_KEEPOPEN, !!val);
92 ssk->sk_priority = val;
96 ssk->sk_userlocks |= SOCK_SNDBUF_LOCK;
97 WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf);
98 mptcp_subflow_ctx(ssk)->cached_sndbuf = sk->sk_sndbuf;
102 ssk->sk_userlocks |= SOCK_RCVBUF_LOCK;
103 WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf);
106 if (READ_ONCE(ssk->sk_mark) != sk->sk_mark) {
107 WRITE_ONCE(ssk->sk_mark, sk->sk_mark);
108 sk_dst_reset(ssk);
112 WRITE_ONCE(ssk->sk_incoming_cpu, val);
117 unlock_sock_fast(ssk, slow);
161 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
162 bool slow = lock_sock_fast(ssk);
165 unlock_sock_fast(ssk, slow);
239 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
240 bool slow = lock_sock_fast(ssk);
243 unlock_sock_fast(ssk, slow);
274 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
275 bool slow = lock_sock_fast(ssk);
278 sock_reset_flag(ssk, SOCK_LINGER);
280 ssk->sk_lingertime = sk->sk_lingertime;
281 sock_set_flag(ssk, SOCK_LINGER);
285 unlock_sock_fast(ssk, slow);
296 struct sock *ssk;
305 ssk = __mptcp_nmpc_sk(msk);
306 if (IS_ERR(ssk)) {
308 return PTR_ERR(ssk);
311 ret = sk_setsockopt(ssk, SOL_SOCKET, optname, optval, optlen);
314 sk->sk_reuseport = ssk->sk_reuseport;
316 sk->sk_reuse = ssk->sk_reuse;
318 sk->sk_bound_dev_if = ssk->sk_bound_dev_if;
320 sk->sk_bound_dev_if = ssk->sk_bound_dev_if;
394 struct sock *ssk;
401 ssk = __mptcp_nmpc_sk(msk);
402 if (IS_ERR(ssk)) {
404 return PTR_ERR(ssk);
407 ret = tcp_setsockopt(ssk, SOL_IPV6, optname, optval, optlen);
417 sk->sk_ipv6only = ssk->sk_ipv6only;
421 inet_test_bit(TRANSPARENT, ssk));
425 inet_test_bit(FREEBIND, ssk));
608 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
611 lock_sock(ssk);
612 err = tcp_set_congestion_control(ssk, name, true, cap_net_admin);
616 release_sock(ssk);
643 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
645 lock_sock(ssk);
646 __tcp_sock_set_cork(ssk, !!val);
647 release_sock(ssk);
673 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
675 lock_sock(ssk);
676 __tcp_sock_set_nodelay(ssk, !!val);
677 release_sock(ssk);
690 struct sock *ssk;
699 ssk = __mptcp_nmpc_sk(msk);
700 if (IS_ERR(ssk)) {
702 return PTR_ERR(ssk);
707 inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
710 inet_assign_bit(TRANSPARENT, ssk,
740 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
743 slow = lock_sock_fast(ssk);
744 __ip_sock_set_tos(ssk, val);
745 unlock_sock_fast(ssk, slow);
770 struct sock *ssk;
775 ssk = __mptcp_nmpc_sk(msk);
776 if (IS_ERR(ssk)) {
777 ret = PTR_ERR(ssk);
781 ret = tcp_setsockopt(ssk, level, optname, optval, optlen);
833 struct sock *ssk;
850 ssk = __mptcp_tcp_fallback(msk);
852 if (ssk)
853 return tcp_setsockopt(ssk, level, optname, optval, optlen);
871 struct sock *ssk;
875 ssk = msk->first;
876 if (ssk) {
877 ret = tcp_getsockopt(ssk, level, optname, optval, optlen);
881 ssk = __mptcp_nmpc_sk(msk);
882 if (IS_ERR(ssk)) {
883 ret = PTR_ERR(ssk);
887 ret = tcp_getsockopt(ssk, level, optname, optval, optlen);
1048 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1055 tcp_get_info(ssk, &info);
1140 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1147 mptcp_get_sub_addrs(ssk, &a);
1248 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1262 mptcp_get_sub_addrs(ssk, &sfinfo.addrs);
1267 tcp_get_info(ssk, &tcp_info);
1377 struct sock *ssk;
1388 ssk = __mptcp_tcp_fallback(msk);
1390 if (ssk)
1391 return tcp_getsockopt(ssk, level, optname, optval, option);
1402 static void sync_socket_options(struct mptcp_sock *msk, struct sock *ssk)
1407 if (ssk->sk_prot->keepalive) {
1409 ssk->sk_prot->keepalive(ssk, 1);
1411 ssk->sk_prot->keepalive(ssk, 0);
1414 ssk->sk_priority = sk->sk_priority;
1415 ssk->sk_bound_dev_if = sk->sk_bound_dev_if;
1416 ssk->sk_incoming_cpu = sk->sk_incoming_cpu;
1417 ssk->sk_ipv6only = sk->sk_ipv6only;
1418 __ip_sock_set_tos(ssk, inet_sk(sk)->tos);
1421 ssk->sk_userlocks |= sk->sk_userlocks & tx_rx_locks;
1423 WRITE_ONCE(ssk->sk_sndbuf, sk->sk_sndbuf);
1424 mptcp_subflow_ctx(ssk)->cached_sndbuf = sk->sk_sndbuf;
1427 WRITE_ONCE(ssk->sk_rcvbuf, sk->sk_rcvbuf);
1431 ssk->sk_lingertime = sk->sk_lingertime;
1432 sock_set_flag(ssk, SOCK_LINGER);
1434 sock_reset_flag(ssk, SOCK_LINGER);
1437 if (sk->sk_mark != ssk->sk_mark) {
1438 ssk->sk_mark = sk->sk_mark;
1439 sk_dst_reset(ssk);
1442 sock_valbool_flag(ssk, SOCK_DBG, sock_flag(sk, SOCK_DBG));
1444 if (inet_csk(sk)->icsk_ca_ops != inet_csk(ssk)->icsk_ca_ops)
1445 tcp_set_congestion_control(ssk, msk->ca_name, false, true);
1446 __tcp_sock_set_cork(ssk, !!msk->cork);
1447 __tcp_sock_set_nodelay(ssk, !!msk->nodelay);
1449 inet_assign_bit(TRANSPARENT, ssk, inet_test_bit(TRANSPARENT, sk));
1450 inet_assign_bit(FREEBIND, ssk, inet_test_bit(FREEBIND, sk));
1453 static void __mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
1455 bool slow = lock_sock_fast(ssk);
1457 sync_socket_options(msk, ssk);
1459 unlock_sock_fast(ssk, slow);
1462 void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk)
1464 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1469 __mptcp_sockopt_sync(msk, ssk);
1475 void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk)
1477 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1482 sync_socket_options(msk, ssk);