Lines Matching refs:ssk
264 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
268 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
272 __skb_unlink(skb, &ssk->sk_receive_queue);
281 if (ssk->sk_forward_alloc < amount)
284 ssk->sk_forward_alloc -= amount;
383 static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk)
385 long tout = ssk && inet_csk(ssk)->icsk_pending ?
386 inet_csk(ssk)->icsk_timeout - jiffies : 0;
443 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
445 lock_sock(ssk);
446 tcp_send_ack(ssk);
447 release_sock(ssk);
461 struct sock *ssk,
464 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
472 pr_debug("msk=%p ssk=%p", msk, ssk);
473 tp = tcp_sk(ssk);
485 skb = skb_peek(&ssk->sk_receive_queue);
518 if (__mptcp_move_skb(msk, ssk, skb, offset, len))
526 sk_eat_skb(ssk, skb);
531 more_data_avail = mptcp_subflow_data_available(ssk);
541 tcp_cleanup_rbuf(ssk, 1);
592 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
605 __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
623 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
625 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
638 move_skbs_to_msk(msk, ssk))
653 move_skbs_to_msk(msk, ssk);
891 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
922 mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
926 skb = tcp_write_queue_tail(ssk);
980 ret = do_tcp_sendpages(ssk, page, offset, psize,
1006 * Note: we charge such data both to sk and ssk
1016 tail = tcp_write_queue_tail(ssk);
1023 skb = tcp_write_queue_tail(ssk);
1029 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
1042 mptcp_subflow_ctx(ssk)->rel_write_seq += ret;
1055 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1056 struct socket *sock = READ_ONCE(ssk->sk_socket);
1058 /* enables ssk->write_space() callbacks */
1066 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1073 return ((1 << ssk->sk_state) & (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT));
1083 struct sock *ssk;
1093 struct sock *ssk;
1115 ssk = mptcp_subflow_tcp_sock(subflow);
1116 *sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf);
1123 send_info[i].ssk = NULL;
1127 ssk = mptcp_subflow_tcp_sock(subflow);
1132 *sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf);
1136 pace = READ_ONCE(ssk->sk_pacing_rate);
1140 ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32,
1143 send_info[subflow->backup].ssk = ssk;
1148 pr_debug("msk=%p nr_active=%d ssk=%p:%lld backup=%p:%lld",
1149 msk, nr_active, send_info[0].ssk, send_info[0].ratio,
1150 send_info[1].ssk, send_info[1].ratio);
1154 send_info[0].ssk = send_info[1].ssk;
1156 if (send_info[0].ssk) {
1157 msk->last_snd = send_info[0].ssk;
1177 struct sock *ssk;
1205 ssk = mptcp_subflow_get_send(msk, &sndbuf);
1207 !ssk ||
1208 !mptcp_page_frag_refill(ssk, pfrag)) {
1209 if (ssk) {
1217 mptcp_set_timeout(sk, ssk);
1229 ssk = mptcp_subflow_get_send(msk, &sndbuf);
1241 pr_debug("conn_list->subflow=%p", ssk);
1243 lock_sock(ssk);
1246 ret = mptcp_sendmsg_frag(sk, ssk, msg, NULL, &timeo, &mss_now,
1250 mptcp_set_timeout(sk, ssk);
1251 release_sock(ssk);
1267 if (!sk_stream_memory_free(ssk) ||
1268 !mptcp_page_frag_refill(ssk, pfrag) ||
1270 tcp_push(ssk, msg->msg_flags, mss_now,
1271 tcp_sk(ssk)->nonagle, size_goal);
1272 mptcp_set_timeout(sk, ssk);
1273 release_sock(ssk);
1291 tcp_push(ssk, msg->msg_flags, mss_now,
1292 tcp_sk(ssk)->nonagle, size_goal);
1300 mptcp_set_timeout(sk, ssk);
1301 release_sock(ssk);
1307 mptcp_set_timeout(sk, ssk);
1309 tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
1317 release_sock(ssk);
1453 * exceed ssk->sk_rcvbuf).
1456 struct sock *ssk;
1459 ssk = mptcp_subflow_tcp_sock(subflow);
1460 slow = lock_sock_fast(ssk);
1461 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
1462 tcp_sk(ssk)->window_clamp = window_clamp;
1463 tcp_cleanup_rbuf(ssk, 1);
1464 unlock_sock_fast(ssk, slow);
1486 struct sock *ssk = mptcp_subflow_recv_lookup(msk);
1488 if (!ssk)
1491 lock_sock(ssk);
1492 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
1493 release_sock(ssk);
1586 /* .. race-breaker: ssk might have gotten new data
1653 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1659 if (!tcp_write_queue_empty(ssk)) {
1660 if (inet_csk(ssk)->icsk_ca_state >= TCP_CA_Loss)
1667 backup = ssk;
1671 return ssk;
1685 void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
1689 struct socket *sock = READ_ONCE(ssk->sk_socket);
1698 tcp_close(ssk, timeout);
1739 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1741 if (inet_sk_state_load(ssk) != TCP_CLOSE)
1744 __mptcp_close_ssk((struct sock *)msk, ssk, subflow, 0);
1751 struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
1788 ssk = mptcp_subflow_get_retrans(msk);
1789 if (!ssk)
1792 lock_sock(ssk);
1798 int ret = mptcp_sendmsg_frag(sk, ssk, &msg, dfrag, &timeo,
1812 tcp_push(ssk, msg.msg_flags, mss_now, tcp_sk(ssk)->nonagle,
1819 mptcp_set_timeout(sk, ssk);
1820 release_sock(ssk);
1900 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
1902 lock_sock(ssk);
1904 switch (ssk->sk_state) {
1910 tcp_disconnect(ssk, O_NONBLOCK);
1915 ssk->sk_shutdown |= how;
1916 tcp_shutdown(ssk, how);
1918 pr_debug("Sending DATA_FIN on subflow %p", ssk);
1919 mptcp_set_timeout(sk, ssk);
1920 tcp_send_ack(ssk);
1925 release_sock(ssk);
2004 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2005 __mptcp_close_ssk(sk, ssk, subflow, timeout);
2015 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
2018 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
2021 msk->sk_v6_daddr = ssk->sk_v6_daddr;
2022 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
2030 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
2031 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
2032 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
2033 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
2034 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
2035 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
2104 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
2106 const struct tcp_sock *tp = tcp_sk(ssk);
2142 struct sock *ssk = newsk;
2164 mptcp_copy_inaddrs(newsk, ssk);
2167 mptcp_rcv_space_init(msk, ssk);
2304 struct sock *ssk;
2321 ssk = __mptcp_tcp_fallback(msk);
2323 if (ssk)
2324 return tcp_setsockopt(ssk, level, optname, optval, optlen);
2336 struct sock *ssk;
2347 ssk = __mptcp_tcp_fallback(msk);
2349 if (ssk)
2350 return tcp_getsockopt(ssk, level, optname, optval, option);
2376 struct sock *ssk;
2378 ssk = mptcp_subflow_recv_lookup(msk);
2379 if (!ssk || !schedule_work(&msk->work))
2416 void mptcp_finish_connect(struct sock *ssk)
2423 subflow = mptcp_subflow_ctx(ssk);
2446 mptcp_rcv_space_init(msk, ssk);
2674 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
2679 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2681 if (!ssk->sk_socket)
2682 mptcp_sock_graft(ssk, newsock);