Lines Matching refs:ssk

336 static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size)
354 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
358 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
363 __skb_unlink(skb, &ssk->sk_receive_queue);
369 if (!mptcp_rmem_schedule(sk, ssk, skb->truesize))
498 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
500 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ?
501 inet_csk(ssk)->icsk_timeout - jiffies : 0;
514 static inline bool tcp_can_send_ack(const struct sock *ssk)
516 return !((1 << inet_sk_state_load(ssk)) &
520 void __mptcp_subflow_send_ack(struct sock *ssk)
522 if (tcp_can_send_ack(ssk))
523 tcp_send_ack(ssk);
526 static void mptcp_subflow_send_ack(struct sock *ssk)
530 slow = lock_sock_fast(ssk);
531 __mptcp_subflow_send_ack(ssk);
532 unlock_sock_fast(ssk, slow);
543 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk)
547 slow = lock_sock_fast(ssk);
548 if (tcp_can_send_ack(ssk))
549 tcp_cleanup_rbuf(ssk, 1);
550 unlock_sock_fast(ssk, slow);
553 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty)
555 const struct inet_connection_sock *icsk = inet_csk(ssk);
557 const struct tcp_sock *tp = tcp_sk(ssk);
578 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
580 if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty))
581 mptcp_subflow_cleanup_rbuf(ssk);
636 struct sock *ssk,
639 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
650 int ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
658 pr_debug("msk=%p ssk=%p", msk, ssk);
659 tp = tcp_sk(ssk);
670 skb = skb_peek(&ssk->sk_receive_queue);
703 if (__mptcp_move_skb(msk, ssk, skb, offset, len))
711 sk_eat_skb(ssk, skb);
716 more_data_avail = mptcp_subflow_data_available(ssk);
773 static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk)
775 int err = sock_error(ssk);
792 ssk_state = inet_sk_state_load(ssk);
816 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
821 __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
823 if (unlikely(ssk->sk_err)) {
840 void mptcp_data_ready(struct sock *sk, struct sock *ssk)
842 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
853 ssk_rbuf = READ_ONCE(ssk->sk_rcvbuf);
866 if (move_skbs_to_msk(msk, ssk))
872 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk)
874 mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq);
876 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
879 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
889 if (sk->sk_socket && !ssk->sk_socket)
890 mptcp_sock_graft(ssk, sk->sk_socket);
892 mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++;
893 mptcp_sockopt_sync_locked(msk, ssk);
894 mptcp_subflow_joined(msk, ssk);
896 __mptcp_propagate_sndbuf(sk, ssk);
906 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
907 bool slow = lock_sock_fast(ssk);
910 if (!__mptcp_finish_join(msk, ssk))
911 mptcp_subflow_reset(ssk);
912 unlock_sock_fast(ssk, slow);
1084 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1087 tcp_enter_memory_pressure(ssk);
1088 sk_stream_moderate_sndbuf(ssk);
1135 static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk,
1147 if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) {
1148 tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd);
1149 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_SNDWNDSHARED);
1184 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp)
1192 if (likely(sk_wmem_schedule(ssk, skb->truesize))) {
1193 tcp_skb_entail(ssk, skb);
1201 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held)
1205 return __mptcp_alloc_tx_skb(sk, ssk, gfp);
1221 struct sock *ssk,
1230 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPTX);
1231 mptcp_subflow_ctx(ssk)->send_infinite_map = 0;
1233 mptcp_do_fallback(ssk);
1238 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
1253 pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
1254 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
1260 if (unlikely(!__tcp_can_send(ssk)))
1264 if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE))
1265 ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE;
1266 info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags);
1269 skb = tcp_write_queue_tail(ssk);
1280 tcp_mark_push(tcp_sk(ssk), skb);
1287 tcp_mark_push(tcp_sk(ssk), skb);
1294 skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held);
1304 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy);
1308 if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
1309 tcp_remove_empty_skb(ssk);
1319 if (!sk_wmem_schedule(ssk, copy)) {
1320 tcp_remove_empty_skb(ssk);
1334 sk_wmem_queued_add(ssk, copy);
1335 sk_mem_charge(ssk, copy);
1336 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy);
1349 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq;
1359 mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1363 tcp_push_pending_frames(ssk);
1369 if (mptcp_subflow_ctx(ssk)->send_infinite_map)
1370 mptcp_update_infinite_map(msk, ssk, mpext);
1372 mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
1383 struct sock *ssk;
1424 struct sock *ssk;
1430 send_info[i].ssk = NULL;
1436 ssk = mptcp_subflow_tcp_sock(subflow);
1445 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate);
1451 linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace);
1453 send_info[subflow->backup].ssk = ssk;
1461 send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk;
1474 ssk = send_info[SSK_MODE_ACTIVE].ssk;
1475 if (!ssk || !sk_stream_memory_free(ssk))
1479 wmem = READ_ONCE(ssk->sk_wmem_queued);
1481 return ssk;
1483 subflow = mptcp_subflow_ctx(ssk);
1485 READ_ONCE(ssk->sk_pacing_rate) * burst,
1488 return ssk;
1491 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info)
1493 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
1494 release_sock(ssk);
1533 static int __subflow_push_pending(struct sock *sk, struct sock *ssk,
1547 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info);
1562 !sk_stream_memory_free(ssk) ||
1563 !mptcp_subflow_active(mptcp_subflow_ctx(ssk))) {
1577 struct sock *prev_ssk = NULL, *ssk = NULL;
1598 prev_ssk = ssk;
1599 ssk = mptcp_subflow_tcp_sock(subflow);
1600 if (ssk != prev_ssk) {
1601 /* First check. If the ssk has changed since
1611 lock_sock(ssk);
1616 ret = __subflow_push_pending(sk, ssk, &info);
1619 (1 << ssk->sk_state) &
1630 if (ssk)
1631 mptcp_push_release(ssk, &info);
1640 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first)
1652 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1660 ret = __subflow_push_pending(sk, ssk, &info);
1673 ret = __subflow_push_pending(sk, ssk, &info);
1682 if (xmit_ssk != ssk) {
1696 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
1723 struct sock *ssk;
1733 ssk = __mptcp_nmpc_sk(msk);
1734 if (IS_ERR(ssk))
1735 return PTR_ERR(ssk);
1740 ssk = msk->first;
1742 lock_sock(ssk);
1745 ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL);
1748 release_sock(ssk);
1750 /* do the blocking bits of inet_stream_connect outside the ssk socket lock */
1862 * Note: we charge such data both to sk and ssk
2031 * exceed ssk->sk_rcvbuf).
2034 struct sock *ssk;
2037 ssk = mptcp_subflow_tcp_sock(subflow);
2038 slow = lock_sock_fast(ssk);
2039 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf);
2040 tcp_sk(ssk)->window_clamp = window_clamp;
2041 tcp_cleanup_rbuf(ssk, 1);
2042 unlock_sock_fast(ssk, slow);
2079 struct sock *ssk = mptcp_subflow_recv_lookup(msk);
2086 if (likely(!ssk))
2089 slowpath = lock_sock_fast(ssk);
2092 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
2095 if (unlikely(ssk->sk_err))
2097 unlock_sock_fast(ssk, slowpath);
2291 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2297 if (!tcp_rtx_and_write_queues_empty(ssk)) {
2298 mptcp_pm_subflow_chk_stale(msk, ssk);
2305 backup = ssk;
2310 pick = ssk;
2365 static void __mptcp_subflow_disconnect(struct sock *ssk,
2369 if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
2374 WARN_ON_ONCE(tcp_disconnect(ssk, 0));
2377 tcp_shutdown(ssk, SEND_SHUTDOWN);
2389 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2401 if (msk->in_accept_queue && msk->first == ssk &&
2402 (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) {
2406 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
2407 mptcp_subflow_drop_ctx(ssk);
2411 dispose_it = msk->free_first || ssk != msk->first;
2415 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
2421 ssk->sk_lingertime = 0;
2422 sock_set_flag(ssk, SOCK_LINGER);
2428 __mptcp_subflow_disconnect(ssk, subflow, flags);
2429 release_sock(ssk);
2436 /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops
2437 * the ssk has been already destroyed, we just need to release the
2440 if (!inet_csk(ssk)->icsk_ulp_ops) {
2441 WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD));
2444 /* otherwise tcp will dispose of the ssk and subflow ctx */
2445 __tcp_close(ssk, 0);
2448 __sock_put(ssk);
2452 __mptcp_subflow_error_report(sk, ssk);
2453 release_sock(ssk);
2455 sock_put(ssk);
2457 if (ssk == msk->first)
2482 void mptcp_close_ssk(struct sock *sk, struct sock *ssk,
2486 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL);
2491 mptcp_pm_subflow_check_next(mptcp_sk(sk), ssk, subflow);
2493 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH);
2509 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2511 if (inet_sk_state_load(ssk) != TCP_CLOSE)
2515 if (!skb_queue_empty_lockless(&ssk->sk_receive_queue))
2518 mptcp_close_ssk(sk, ssk, subflow);
2588 struct sock *ssk;
2594 /* first check ssk: need to kick "stale" logic */
2623 ssk = mptcp_subflow_tcp_sock(subflow);
2625 lock_sock(ssk);
2632 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
2642 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
2647 release_sock(ssk);
2685 struct sock *ssk = msk->first;
2688 if (!ssk)
2693 slow = lock_sock_fast(ssk);
2694 mptcp_subflow_reset(ssk);
2695 WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0);
2696 unlock_sock_fast(ssk, slow);
2846 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how)
2848 lock_sock(ssk);
2850 switch (ssk->sk_state) {
2856 WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK));
2861 ssk->sk_shutdown |= how;
2862 tcp_shutdown(ssk, how);
2870 pr_debug("Sending DATA_FIN on subflow %p", ssk);
2871 tcp_send_ack(ssk);
2878 release_sock(ssk);
3010 struct sock *ssk;
3016 ssk = mptcp_sk(sk)->first;
3017 if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN))
3020 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING);
3021 tcp_set_state(ssk, TCP_CLOSE);
3022 mptcp_subflow_queue_clean(sk, ssk);
3023 inet_csk_listen_stop(ssk);
3024 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED);
3025 release_sock(ssk);
3058 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3059 bool slow = lock_sock_fast_nested(ssk);
3061 subflows_alive += ssk->sk_state != TCP_CLOSE;
3066 if (ssk == msk->first)
3072 ssk->sk_socket = NULL;
3073 ssk->sk_wq = NULL;
3074 unlock_sock_fast(ssk, slow);
3113 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
3116 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk);
3119 msk->sk_v6_daddr = ssk->sk_v6_daddr;
3120 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
3128 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
3129 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
3130 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
3131 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
3132 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
3133 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
3236 struct sock *ssk,
3272 msk->wnd_end = msk->snd_nxt + tcp_sk(ssk)->snd_wnd;
3288 WRITE_ONCE(msk->first, ssk);
3289 subflow = mptcp_subflow_ctx(ssk);
3291 sock_hold(ssk);
3301 mptcp_copy_inaddrs(nsk, ssk);
3302 __mptcp_propagate_sndbuf(nsk, ssk);
3304 mptcp_rcv_space_init(msk, ssk);
3314 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
3316 const struct tcp_sock *tp = tcp_sk(ssk);
3331 static struct sock *mptcp_accept(struct sock *ssk, int flags, int *err,
3336 pr_debug("ssk=%p, listener=%p", ssk, mptcp_subflow_ctx(ssk));
3337 newsk = inet_csk_accept(ssk, flags, err, kern);
3358 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK);
3360 MPTCP_INC_STATS(sock_net(ssk),
3418 void __mptcp_check_push(struct sock *sk, struct sock *ssk)
3424 __mptcp_subflow_push_pending(sk, ssk, false);
3492 static void schedule_3rdack_retransmission(struct sock *ssk)
3494 struct inet_connection_sock *icsk = inet_csk(ssk);
3495 struct tcp_sock *tp = tcp_sk(ssk);
3498 if (mptcp_subflow_ctx(ssk)->fully_established)
3511 sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout);
3514 void mptcp_subflow_process_delegated(struct sock *ssk, long status)
3516 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3522 __mptcp_subflow_push_pending(sk, ssk, true);
3536 schedule_3rdack_retransmission(ssk);
3557 pr_debug("msk=%p, ssk=%p", msk, msk->first);
3564 void mptcp_finish_connect(struct sock *ssk)
3570 subflow = mptcp_subflow_ctx(ssk);
3584 mptcp_pm_new_connection(msk, ssk, 0);
3596 bool mptcp_finish_join(struct sock *ssk)
3598 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
3613 mptcp_subflow_joined(msk, ssk);
3614 mptcp_propagate_sndbuf(parent, ssk);
3626 ret = __mptcp_finish_join(msk, ssk);
3628 sock_hold(ssk);
3632 sock_hold(ssk);
3734 struct sock *ssk;
3736 ssk = __mptcp_nmpc_sk(msk);
3737 if (IS_ERR(ssk))
3738 return PTR_ERR(ssk);
3741 subflow = mptcp_subflow_ctx(ssk);
3746 if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info))
3749 if (subflow->request_mptcp && mptcp_token_new_connect(ssk)) {
3750 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_TOKENFALLBACKINIT);
3760 lock_sock(ssk);
3765 if (ssk->sk_state != TCP_CLOSE)
3768 if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk)) {
3769 err = ssk->sk_prot->pre_connect(ssk, uaddr, addr_len);
3774 err = ssk->sk_prot->connect(ssk, uaddr, addr_len);
3778 inet_assign_bit(DEFER_CONNECT, sk, inet_test_bit(DEFER_CONNECT, ssk));
3782 release_sock(ssk);
3794 mptcp_copy_inaddrs(sk, ssk);
3835 struct sock *ssk, *sk = sock->sk;
3839 ssk = __mptcp_nmpc_sk(msk);
3840 if (IS_ERR(ssk)) {
3841 err = PTR_ERR(ssk);
3846 err = inet_bind_sk(ssk, uaddr, addr_len);
3849 err = inet6_bind_sk(ssk, uaddr, addr_len);
3852 mptcp_copy_inaddrs(sk, ssk);
3863 struct sock *ssk;
3874 ssk = __mptcp_nmpc_sk(msk);
3875 if (IS_ERR(ssk)) {
3876 err = PTR_ERR(ssk);
3883 lock_sock(ssk);
3884 err = __inet_listen_sk(ssk, backlog);
3885 release_sock(ssk);
3886 mptcp_set_state(sk, inet_sk_state_load(ssk));
3890 mptcp_copy_inaddrs(sk, ssk);
3891 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED);
3903 struct sock *ssk, *newsk;
3911 ssk = READ_ONCE(msk->first);
3912 if (!ssk)
3915 newsk = mptcp_accept(ssk, flags, &err, kern);
3929 /* set ssk->sk_socket of accept()ed flows to mptcp socket.
3933 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
3935 if (!ssk->sk_socket)
3936 mptcp_sock_graft(ssk, newsock);
3984 struct sock *ssk = READ_ONCE(msk->first);
3986 if (WARN_ON_ONCE(!ssk))
3989 return inet_csk_listen_poll(ssk);
4055 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
4057 bh_lock_sock_nested(ssk);
4058 if (!sock_owned_by_user(ssk)) {
4059 mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0));
4069 bh_unlock_sock(ssk);
4070 sock_put(ssk);