Lines Matching refs:msk
34 struct mptcp_sock msk;
53 static u64 mptcp_wnd_end(const struct mptcp_sock *msk)
55 return READ_ONCE(msk->wnd_end);
82 static int __mptcp_socket_create(struct mptcp_sock *msk)
85 struct sock *sk = (struct sock *)msk;
93 msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio;
94 WRITE_ONCE(msk->first, ssock->sk);
96 list_add(&subflow->node, &msk->conn_list);
99 subflow->subflow_id = msk->subflow_id++;
103 mptcp_sock_graft(msk->first, sk->sk_socket);
112 struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk)
114 struct sock *sk = (struct sock *)msk;
120 if (!msk->first) {
121 ret = __mptcp_socket_create(msk);
125 mptcp_sockopt_sync(msk, msk->first);
128 return msk->first;
174 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
180 return mptcp_try_coalesce((struct sock *)msk, to, from);
192 struct mptcp_sock *msk = mptcp_sk(sk);
196 reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk);
225 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
227 struct sock *sk = (struct sock *)msk;
234 max_seq = atomic64_read(&msk->rcv_wnd_sent);
236 pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
237 RB_EMPTY_ROOT(&msk->out_of_order_queue));
243 (unsigned long long)atomic64_read(&msk->rcv_wnd_sent));
248 p = &msk->out_of_order_queue.rb_node;
250 if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
252 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
253 msk->ooo_last_skb = skb;
260 if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
267 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
269 parent = &msk->ooo_last_skb->rbnode;
301 &msk->out_of_order_queue);
306 } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
316 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
323 rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
329 msk->ooo_last_skb = skb;
338 struct mptcp_sock *msk = mptcp_sk(sk);
341 if (size <= msk->rmem_fwd_alloc)
344 size -= msk->rmem_fwd_alloc;
354 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
359 struct sock *sk = (struct sock *)msk;
383 if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
385 msk->bytes_received += copy_len;
386 WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
394 } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
395 mptcp_data_queue_ofo(msk, skb);
431 struct mptcp_sock *msk = mptcp_sk(sk);
435 msk->write_seq == READ_ONCE(msk->snd_una);
440 struct mptcp_sock *msk = mptcp_sk(sk);
444 WRITE_ONCE(msk->snd_data_fin_enable, 0);
462 struct mptcp_sock *msk = mptcp_sk(sk);
464 if (READ_ONCE(msk->rcv_data_fin) &&
467 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
469 if (msk->ack_seq == rcv_data_fin_seq) {
535 static void mptcp_send_ack(struct mptcp_sock *msk)
539 mptcp_for_each_subflow(msk, subflow)
566 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk)
568 int old_space = READ_ONCE(msk->old_wspace);
570 struct sock *sk = (struct sock *)msk;
577 mptcp_for_each_subflow(msk, subflow) {
587 struct mptcp_sock *msk = mptcp_sk(sk);
593 * msk->rcv_data_fin was set when parsing the incoming options
594 * at the subflow level and the msk lock was not held, so this
596 * the msk state.
605 WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
606 WRITE_ONCE(msk->rcv_data_fin, 0);
628 if (!__mptcp_check_fallback(msk))
629 mptcp_send_ack(msk);
635 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
640 struct sock *sk = (struct sock *)msk;
658 pr_debug("msk=%p ssk=%p", msk, ssk);
681 if (__mptcp_check_fallback(msk)) {
703 if (__mptcp_move_skb(msk, ssk, skb, offset, len))
728 static bool __mptcp_ofo_queue(struct mptcp_sock *msk)
730 struct sock *sk = (struct sock *)msk;
736 p = rb_first(&msk->out_of_order_queue);
737 pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
740 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
744 rb_erase(&skb->rbnode, &msk->out_of_order_queue);
747 msk->ack_seq))) {
755 if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
756 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
760 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
766 msk->bytes_received += end_seq - msk->ack_seq;
767 msk->ack_seq = end_seq;
790 * destroy the msk as needed.
806 struct mptcp_sock *msk = mptcp_sk(sk);
808 mptcp_for_each_subflow(msk, subflow)
816 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
818 struct sock *sk = (struct sock *)msk;
821 __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
822 __mptcp_ofo_queue(msk);
827 __set_bit(MPTCP_ERROR_REPORT, &msk->cb_flags);
843 struct mptcp_sock *msk = mptcp_sk(sk);
847 * subflow at msk destruction time, but we must avoid enqueuing
848 * more data to the msk receive queue
858 /* over limit? can't append more skbs to msk, Also, no need to wake-up*/
866 if (move_skbs_to_msk(msk, ssk))
872 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk)
874 mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq);
875 WRITE_ONCE(msk->allow_infinite_fallback, false);
876 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC);
879 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk)
881 struct sock *sk = (struct sock *)msk;
886 /* attach to msk socket only after we are sure we will deal with it
892 mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++;
893 mptcp_sockopt_sync_locked(msk, ssk);
894 mptcp_subflow_joined(msk, ssk);
903 struct mptcp_sock *msk = mptcp_sk(sk);
909 list_move_tail(&subflow->node, &msk->conn_list);
910 if (!__mptcp_finish_join(msk, ssk))
947 static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
951 msk_owned_by_me(msk);
953 mptcp_for_each_subflow(msk, subflow) {
980 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
987 df->data_seq + df->data_len == msk->write_seq;
1007 struct mptcp_sock *msk = mptcp_sk(sk);
1011 snd_una = msk->snd_una;
1012 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
1016 if (unlikely(dfrag == msk->first_pending)) {
1018 if (WARN_ON_ONCE(!msk->recovery))
1021 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1033 if (WARN_ON_ONCE(!msk->recovery))
1049 if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt))
1050 msk->recovery = false;
1053 if (snd_una == READ_ONCE(msk->snd_nxt) &&
1054 snd_una == READ_ONCE(msk->write_seq)) {
1055 if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk))
1080 struct mptcp_sock *msk = mptcp_sk(sk);
1083 mptcp_for_each_subflow(msk, subflow) {
1109 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
1117 dfrag->data_seq = msk->write_seq;
1135 static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk,
1138 u64 window_end = mptcp_wnd_end(msk);
1141 if (__mptcp_check_fallback(msk))
1220 static void mptcp_update_infinite_map(struct mptcp_sock *msk,
1232 pr_fallback(msk);
1244 struct mptcp_sock *msk = mptcp_sk(sk);
1253 pr_debug("msk=%p ssk=%p sending dfrag at seq=%llu len=%u already sent=%u",
1254 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent);
1304 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy);
1306 u64 snd_una = READ_ONCE(msk->snd_una);
1308 if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
1361 if (READ_ONCE(msk->csum_enabled))
1367 if (READ_ONCE(msk->csum_enabled))
1370 mptcp_update_infinite_map(msk, ssk, mpext);
1417 struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
1421 struct sock *sk = (struct sock *)msk;
1434 mptcp_for_each_subflow(msk, subflow) {
1478 burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
1487 msk->snd_burst = burst;
1497 static void mptcp_update_post_push(struct mptcp_sock *msk,
1505 msk->snd_burst -= sent;
1518 if (likely(after64(snd_nxt_new, msk->snd_nxt))) {
1519 msk->bytes_sent += snd_nxt_new - msk->snd_nxt;
1520 msk->snd_nxt = snd_nxt_new;
1536 struct mptcp_sock *msk = mptcp_sk(sk);
1557 mptcp_update_post_push(msk, dfrag, ret);
1559 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
1561 if (msk->snd_burst <= 0 ||
1578 struct mptcp_sock *msk = mptcp_sk(sk);
1589 if (mptcp_sched_get_send(msk))
1594 mptcp_for_each_subflow(msk, subflow) {
1642 struct mptcp_sock *msk = mptcp_sk(sk);
1668 if (mptcp_sched_get_send(msk))
1679 mptcp_for_each_subflow(msk, subflow) {
1701 if (msk->snd_data_fin_enable &&
1702 msk->snd_nxt + 1 == msk->write_seq)
1722 struct mptcp_sock *msk = mptcp_sk(sk);
1733 ssk = __mptcp_nmpc_sk(msk);
1737 if (!msk->first)
1740 ssk = msk->first;
1744 msk->fastopening = 1;
1746 msk->fastopening = 0;
1776 struct mptcp_sock *msk = mptcp_sk(sk);
1823 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
1831 dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset);
1859 WRITE_ONCE(msk->write_seq, msk->write_seq + psize);
1861 /* charge data on mptcp pending queue to the msk socket
1867 list_add_tail(&dfrag->list, &msk->rtx_queue);
1868 if (!msk->first_pending)
1869 WRITE_ONCE(msk->first_pending, dfrag);
1871 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d", msk,
1900 static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
1909 skb_queue_walk_safe(&msk->receive_queue, skb, tmp) {
1942 WRITE_ONCE(msk->rmem_released, msk->rmem_released + skb->truesize);
1943 __skb_unlink(skb, &msk->receive_queue);
1958 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
1961 struct sock *sk = (struct sock *)msk;
1966 msk_owned_by_me(msk);
1971 if (!msk->rcvspace_init)
1972 mptcp_rcv_space_init(msk, msk->first);
1974 msk->rcvq_space.copied += copied;
1977 time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);
1979 rtt_us = msk->rcvq_space.rtt_us;
1984 mptcp_for_each_subflow(msk, subflow) {
1999 msk->rcvq_space.rtt_us = rtt_us;
2000 msk->scaling_ratio = scaling_ratio;
2004 if (msk->rcvq_space.copied <= msk->rcvq_space.space)
2012 rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;
2014 grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);
2016 do_div(grow, msk->rcvq_space.space);
2033 mptcp_for_each_subflow(msk, subflow) {
2047 msk->rcvq_space.space = msk->rcvq_space.copied;
2049 msk->rcvq_space.copied = 0;
2050 msk->rcvq_space.time = mstamp;
2055 struct mptcp_sock *msk = mptcp_sk(sk);
2057 if (!msk->rmem_released)
2060 atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc);
2061 mptcp_rmem_uncharge(sk, msk->rmem_released);
2062 WRITE_ONCE(msk->rmem_released, 0);
2067 struct mptcp_sock *msk = mptcp_sk(sk);
2069 skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue);
2072 static bool __mptcp_move_skbs(struct mptcp_sock *msk)
2074 struct sock *sk = (struct sock *)msk;
2079 struct sock *ssk = mptcp_subflow_recv_lookup(msk);
2082 /* we can have data pending in the subflows only if the msk
2092 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
2102 if (!RB_EMPTY_ROOT(&msk->out_of_order_queue) ||
2106 ret |= __mptcp_ofo_queue(msk);
2111 mptcp_check_data_fin((struct sock *)msk);
2112 return !skb_queue_empty(&msk->receive_queue);
2117 const struct mptcp_sock *msk = mptcp_sk(sk);
2120 skb = skb_peek(&msk->receive_queue);
2122 u64 hint_val = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
2139 struct mptcp_sock *msk = mptcp_sk(sk);
2160 if (unlikely(msk->recvmsg_inq))
2166 bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied, flags, &tss, &cmsg_flags);
2176 mptcp_cleanup_rbuf(msk);
2178 if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk))
2204 if (__mptcp_move_skbs(msk))
2241 pr_debug("msk=%p rx queue empty=%d:%d copied=%d",
2242 msk, skb_queue_empty_lockless(&sk->sk_receive_queue),
2243 skb_queue_empty(&msk->receive_queue), copied);
2245 mptcp_rcv_space_adjust(msk, copied);
2256 struct mptcp_sock *msk = mptcp_sk(sk);
2261 if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags))
2265 __set_bit(MPTCP_RETRANSMIT, &msk->cb_flags);
2284 struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk)
2290 mptcp_for_each_subflow(msk, subflow) {
2298 mptcp_pm_subflow_chk_stale(msk, ssk);
2323 struct mptcp_sock *msk = mptcp_sk(sk);
2325 if (__mptcp_check_fallback(msk))
2340 msk->recovery_snd_nxt = msk->snd_nxt;
2341 msk->recovery = true;
2344 msk->first_pending = rtx_head;
2345 msk->snd_burst = 0;
2348 list_for_each_entry(cur, &msk->rtx_queue, list) {
2393 struct mptcp_sock *msk = mptcp_sk(sk);
2401 if (msk->in_accept_queue && msk->first == ssk &&
2403 /* ensure later check in mptcp_worker() will dispose the msk */
2411 dispose_it = msk->free_first || ssk != msk->first;
2417 if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) {
2438 * reference owned by msk;
2457 if (ssk == msk->first)
2458 WRITE_ONCE(msk->first, NULL);
2470 if (list_is_singular(&msk->conn_list) && msk->first &&
2471 inet_sk_state_load(msk->first) == TCP_CLOSE) {
2473 msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) {
2504 struct mptcp_sock *msk = mptcp_sk(sk);
2508 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2533 static void mptcp_check_fastclose(struct mptcp_sock *msk)
2536 struct sock *sk = (struct sock *)msk;
2538 if (likely(!READ_ONCE(msk->rcv_fastclose)))
2541 mptcp_token_destroy(msk);
2543 mptcp_for_each_subflow_safe(msk, subflow, tmp) {
2572 set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags);
2584 struct mptcp_sock *msk = mptcp_sk(sk);
2595 err = mptcp_sched_get_retrans(msk);
2598 if (mptcp_data_fin_enabled(msk)) {
2603 mptcp_send_ack(msk);
2617 mptcp_for_each_subflow(msk, subflow) {
2629 info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len :
2644 WRITE_ONCE(msk->allow_infinite_fallback, false);
2651 msk->bytes_retrans += len;
2664 void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
2666 struct sock *sk = (struct sock *)msk;
2683 static void mptcp_mp_fail_no_response(struct mptcp_sock *msk)
2685 struct sock *ssk = msk->first;
2702 struct mptcp_sock *msk = mptcp_sk(sk);
2705 mptcp_for_each_subflow_safe(msk, subflow, tmp)
2712 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
2713 struct sock *sk = (struct sock *)msk;
2722 mptcp_check_fastclose(msk);
2724 mptcp_pm_nl_work(msk);
2730 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
2743 if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
2746 fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0;
2748 mptcp_mp_fail_no_response(msk);
2757 struct mptcp_sock *msk = mptcp_sk(sk);
2759 INIT_LIST_HEAD(&msk->conn_list);
2760 INIT_LIST_HEAD(&msk->join_list);
2761 INIT_LIST_HEAD(&msk->rtx_queue);
2762 INIT_WORK(&msk->work, mptcp_worker);
2763 __skb_queue_head_init(&msk->receive_queue);
2764 msk->out_of_order_queue = RB_ROOT;
2765 msk->first_pending = NULL;
2766 msk->rmem_fwd_alloc = 0;
2767 WRITE_ONCE(msk->rmem_released, 0);
2768 msk->timer_ival = TCP_RTO_MIN;
2770 WRITE_ONCE(msk->first, NULL);
2772 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
2773 WRITE_ONCE(msk->allow_infinite_fallback, true);
2774 msk->recovery = false;
2775 msk->subflow_id = 1;
2777 mptcp_pm_data_init(msk);
2780 timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
2830 struct mptcp_sock *msk = mptcp_sk(sk);
2833 WRITE_ONCE(msk->first_pending, NULL);
2834 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
2840 struct mptcp_sock *msk = mptcp_sk(sk);
2842 if (cancel_work_sync(&msk->work))
2929 struct mptcp_sock *msk = mptcp_sk(sk);
2931 pr_debug("msk=%p snd_data_fin_enable=%d pending=%d snd_nxt=%llu write_seq=%llu",
2932 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk),
2933 msk->snd_nxt, msk->write_seq);
2938 if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq ||
2942 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
2944 mptcp_for_each_subflow(msk, subflow) {
2953 struct mptcp_sock *msk = mptcp_sk(sk);
2955 pr_debug("msk=%p snd_data_fin_enable=%d shutdown=%x state=%d pending=%d",
2956 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state,
2960 WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
2961 WRITE_ONCE(msk->snd_data_fin_enable, 1);
2968 struct mptcp_sock *msk = mptcp_sk(sk);
2970 pr_debug("msk=%p", msk);
2976 msk->pm.status = 0;
2977 mptcp_release_sched(msk);
2981 WARN_ON_ONCE(msk->rmem_fwd_alloc);
2982 WARN_ON_ONCE(msk->rmem_released);
2996 static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
3001 if (skb_queue_empty_lockless(&((struct sock *)msk)->sk_receive_queue) &&
3002 skb_queue_empty_lockless(&msk->receive_queue))
3031 struct mptcp_sock *msk = mptcp_sk(sk);
3043 if (mptcp_check_readable(msk) || timeout < 0) {
3044 /* If the msk has read data, or the caller explicitly ask it,
3057 mptcp_for_each_subflow(msk, subflow) {
3066 if (ssk == msk->first)
3078 /* all the subflows are closed, only timeout can change the msk
3085 pr_debug("msk=%p state=%d", sk, sk->sk_state);
3086 if (msk->token)
3087 mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
3113 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
3117 struct ipv6_pinfo *msk6 = inet6_sk(msk);
3119 msk->sk_v6_daddr = ssk->sk_v6_daddr;
3120 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
3128 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
3129 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
3130 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
3131 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
3132 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
3133 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
3138 struct mptcp_sock *msk = mptcp_sk(sk);
3142 * msk->firstsocket lock).
3144 if (msk->fastopening)
3153 if (msk->token)
3154 mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
3156 /* msk->subflow is still intact, the following will not free the first
3159 mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE);
3160 WRITE_ONCE(msk->flags, 0);
3161 msk->cb_flags = 0;
3162 msk->recovery = false;
3163 msk->can_ack = false;
3164 msk->fully_established = false;
3165 msk->rcv_data_fin = false;
3166 msk->snd_data_fin_enable = false;
3167 msk->rcv_fastclose = false;
3168 msk->use_64bit_ack = false;
3169 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk)));
3170 mptcp_pm_data_reset(msk);
3172 msk->bytes_acked = 0;
3173 msk->bytes_received = 0;
3174 msk->bytes_sent = 0;
3175 msk->bytes_retrans = 0;
3176 msk->rcvspace_init = 0;
3242 struct mptcp_sock *msk;
3261 msk = mptcp_sk(nsk);
3262 msk->local_key = subflow_req->local_key;
3263 msk->token = subflow_req->token;
3264 msk->in_accept_queue = 1;
3265 WRITE_ONCE(msk->fully_established, false);
3267 WRITE_ONCE(msk->csum_enabled, true);
3269 msk->write_seq = subflow_req->idsn + 1;
3270 msk->snd_nxt = msk->write_seq;
3271 msk->snd_una = msk->write_seq;
3272 msk->wnd_end = msk->snd_nxt + tcp_sk(ssk)->snd_wnd;
3273 msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
3274 mptcp_init_sched(msk, mptcp_sk(sk)->sched);
3276 /* passive msk is created after the first/MPC subflow */
3277 msk->subflow_id = 2;
3282 /* this can't race with mptcp_close(), as the msk is
3287 /* The msk maintain a ref to each subflow in the connections list */
3288 WRITE_ONCE(msk->first, ssk);
3290 list_add(&subflow->node, &msk->conn_list);
3296 mptcp_token_accept(subflow_req, msk);
3298 /* set msk addresses early to ensure mptcp_pm_get_local_id()
3304 mptcp_rcv_space_init(msk, ssk);
3307 __mptcp_subflow_fully_established(msk, subflow, mp_opt);
3314 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
3318 msk->rcvspace_init = 1;
3319 msk->rcvq_space.copied = 0;
3320 msk->rcvq_space.rtt_us = 0;
3322 msk->rcvq_space.time = tp->tcp_mstamp;
3325 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
3327 if (msk->rcvq_space.space == 0)
3328 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
3369 void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
3372 struct sock *sk = (struct sock *)msk;
3377 mptcp_for_each_subflow_safe(msk, subflow, tmp)
3382 skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue);
3384 skb_rbtree_purge(&msk->out_of_order_queue);
3390 sk_forward_alloc_add(sk, msk->rmem_fwd_alloc);
3391 WRITE_ONCE(msk->rmem_fwd_alloc, 0);
3392 mptcp_token_destroy(msk);
3393 mptcp_pm_free_anno_list(msk);
3394 mptcp_free_local_addr_list(msk);
3399 struct mptcp_sock *msk = mptcp_sk(sk);
3402 msk->free_first = 1;
3403 mptcp_destroy_common(msk, 0);
3437 struct mptcp_sock *msk = mptcp_sk(sk);
3440 unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED);
3447 list_splice_init(&msk->join_list, &join_list);
3452 * 2) must avoid ABBA deadlock with msk socket spinlock: the RX
3453 * datapath acquires the msk socket spinlock while helding
3456 msk->cb_flags &= ~flags;
3470 if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags))
3472 if (unlikely(msk->cb_flags)) {
3473 /* be sure to sync the msk state before taking actions
3477 if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first)
3478 __mptcp_sync_state(sk, msk->pending_state);
3479 if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags))
3481 if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags))
3555 struct mptcp_sock *msk = mptcp_sk(sk);
3557 pr_debug("msk=%p, ssk=%p", msk, msk->first);
3558 if (WARN_ON_ONCE(!msk->first))
3561 return inet_csk_get_port(msk->first, snum);
3567 struct mptcp_sock *msk;
3572 msk = mptcp_sk(sk);
3574 pr_debug("msk=%p, token=%u", sk, subflow->token);
3579 /* the socket is not connected yet, no msk/subflow ops can access/race
3582 WRITE_ONCE(msk->local_key, subflow->local_key);
3584 mptcp_pm_new_connection(msk, ssk, 0);
3599 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
3600 struct sock *parent = (void *)msk;
3603 pr_debug("msk=%p, subflow=%p", msk, subflow);
3613 mptcp_subflow_joined(msk, ssk);
3618 if (!mptcp_pm_allow_new_subflow(msk))
3621 /* If we can't acquire msk socket lock here, let the release callback
3626 ret = __mptcp_finish_join(msk, ssk);
3629 list_add_tail(&subflow->node, &msk->conn_list);
3633 list_add_tail(&subflow->node, &msk->join_list);
3634 __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags);
3661 static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
3663 const struct sock *sk = (void *)msk;
3672 delta = msk->write_seq - v;
3673 if (__mptcp_check_fallback(msk) && msk->first) {
3674 struct tcp_sock *tp = tcp_sk(msk->first);
3680 if (!((1 << msk->first->sk_state) &
3692 struct mptcp_sock *msk = mptcp_sk(sk);
3701 __mptcp_move_skbs(msk);
3707 *karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una));
3712 *karg = mptcp_ioctl_outq(msk, msk->snd_nxt);
3722 static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
3726 __mptcp_do_fallback(msk);
3732 struct mptcp_sock *msk = mptcp_sk(sk);
3736 ssk = __mptcp_nmpc_sk(msk);
3747 mptcp_subflow_early_fallback(msk, subflow);
3751 mptcp_subflow_early_fallback(msk, subflow);
3753 if (likely(!__mptcp_check_fallback(msk)))
3759 if (!msk->fastopening)
3781 if (!msk->fastopening)
3784 /* on successful connect, the msk state will be moved to established by
3789 mptcp_token_destroy(msk);
3834 struct mptcp_sock *msk = mptcp_sk(sock->sk);
3839 ssk = __mptcp_nmpc_sk(msk);
3861 struct mptcp_sock *msk = mptcp_sk(sock->sk);
3866 pr_debug("msk=%p", msk);
3874 ssk = __mptcp_nmpc_sk(msk);
3902 struct mptcp_sock *msk = mptcp_sk(sock->sk);
3906 pr_debug("msk=%p", msk);
3911 ssk = READ_ONCE(msk->first);
3923 struct mptcp_sock *msk = mptcp_sk(newsk);
3927 msk->in_accept_queue = 0;
3932 mptcp_for_each_subflow(msk, subflow) {
3942 if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) {
3943 __mptcp_close_ssk(newsk, msk->first,
3944 mptcp_subflow_ctx(msk->first), 0);
3945 if (unlikely(list_is_singular(&msk->conn_list)))
3954 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk)
3956 struct sock *sk = (struct sock *)msk;
3962 smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
3973 struct mptcp_sock *msk;
3978 msk = mptcp_sk(sk);
3982 pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
3984 struct sock *ssk = READ_ONCE(msk->first);
3999 mask |= mptcp_check_readable(msk);
4003 mask |= mptcp_check_writeable(msk);