Lines Matching refs:msk
29 struct mptcp_sock msk;
44 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
48 static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk)
50 if (!msk->subflow || READ_ONCE(msk->can_ack))
53 return msk->subflow;
80 static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk)
82 sock_owned_by_me((const struct sock *)msk);
84 if (likely(!__mptcp_check_fallback(msk)))
87 return msk->first;
90 static int __mptcp_socket_create(struct mptcp_sock *msk)
93 struct sock *sk = (struct sock *)msk;
101 msk->first = ssock->sk;
102 msk->subflow = ssock;
104 list_add(&subflow->node, &msk->conn_list);
108 * via msk->sk_socket
110 RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq);
141 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
147 return mptcp_try_coalesce((struct sock *)msk, to, from);
154 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb)
156 struct sock *sk = (struct sock *)msk;
165 max_seq = space > 0 ? space + msk->ack_seq : msk->ack_seq;
167 pr_debug("msk=%p seq=%llx limit=%llx empty=%d", msk, seq, max_seq,
168 RB_EMPTY_ROOT(&msk->out_of_order_queue));
176 p = &msk->out_of_order_queue.rb_node;
178 if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) {
180 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
181 msk->ooo_last_skb = skb;
188 if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) {
195 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) {
197 parent = &msk->ooo_last_skb->rbnode;
229 &msk->out_of_order_queue);
234 } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) {
244 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue);
251 rb_erase(&skb1->rbnode, &msk->out_of_order_queue);
257 msk->ooo_last_skb = skb;
264 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk,
269 struct sock *sk = (struct sock *)msk;
296 if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) {
298 WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len);
306 } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) {
307 mptcp_data_queue_ofo(msk, skb);
330 struct mptcp_sock *msk = mptcp_sk(sk);
332 if (__mptcp_check_fallback(msk))
338 msk->write_seq == atomic64_read(&msk->snd_una)) {
341 WRITE_ONCE(msk->snd_data_fin_enable, 0);
365 struct mptcp_sock *msk = mptcp_sk(sk);
367 if (READ_ONCE(msk->rcv_data_fin) &&
370 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq);
372 if (msk->ack_seq == rcv_data_fin_seq) {
395 struct mptcp_sock *msk = mptcp_sk(sk);
398 if (__mptcp_check_fallback(msk) || !msk->first)
403 * msk->rcv_data_fin was set when parsing the incoming options
404 * at the subflow level and the msk lock was not held, so this
406 * the msk state.
417 WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1);
418 WRITE_ONCE(msk->rcv_data_fin, 0);
422 set_bit(MPTCP_DATA_READY, &msk->flags);
442 mptcp_for_each_subflow(msk, subflow) {
460 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk,
465 struct sock *sk = (struct sock *)msk;
472 pr_debug("msk=%p ssk=%p", msk, ssk);
496 if (__mptcp_check_fallback(msk)) {
518 if (__mptcp_move_skb(msk, ssk, skb, offset, len))
546 static bool mptcp_ofo_queue(struct mptcp_sock *msk)
548 struct sock *sk = (struct sock *)msk;
554 p = rb_first(&msk->out_of_order_queue);
555 pr_debug("msk=%p empty=%d", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue));
558 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq))
562 rb_erase(&skb->rbnode, &msk->out_of_order_queue);
565 msk->ack_seq))) {
573 if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) {
574 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq;
578 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq,
583 msk->ack_seq = end_seq;
592 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk)
594 struct sock *sk = (struct sock *)msk;
605 __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
606 mptcp_ofo_queue(msk);
614 schedule_work(&msk->work))
626 struct mptcp_sock *msk = mptcp_sk(sk);
635 set_bit(MPTCP_DATA_READY, &msk->flags);
638 move_skbs_to_msk(msk, ssk))
653 move_skbs_to_msk(msk, ssk);
660 static void __mptcp_flush_join_list(struct mptcp_sock *msk)
662 if (likely(list_empty(&msk->join_list)))
665 spin_lock_bh(&msk->join_list_lock);
666 list_splice_tail_init(&msk->join_list, &msk->conn_list);
667 spin_unlock_bh(&msk->join_list_lock);
699 struct mptcp_sock *msk = mptcp_sk(sk);
701 if (!test_and_set_bit(MPTCP_WORK_EOF, &msk->flags) &&
702 schedule_work(&msk->work))
706 static void mptcp_check_for_eof(struct mptcp_sock *msk)
709 struct sock *sk = (struct sock *)msk;
712 mptcp_for_each_subflow(msk, subflow)
717 * to msk, when all subflows agree on it
722 set_bit(MPTCP_DATA_READY, &msk->flags);
727 static bool mptcp_ext_cache_refill(struct mptcp_sock *msk)
729 const struct sock *sk = (const struct sock *)msk;
731 if (!msk->cached_ext)
732 msk->cached_ext = __skb_ext_alloc(sk->sk_allocation);
734 return !!msk->cached_ext;
737 static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk)
740 struct sock *sk = (struct sock *)msk;
744 mptcp_for_each_subflow(msk, subflow) {
768 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk,
774 df->data_seq + df->data_len == msk->write_seq;
792 static bool mptcp_is_writeable(struct mptcp_sock *msk)
796 if (!sk_stream_is_writeable((struct sock *)msk))
799 mptcp_for_each_subflow(msk, subflow) {
808 struct mptcp_sock *msk = mptcp_sk(sk);
816 if (__mptcp_check_fallback(msk))
817 atomic64_set(&msk->snd_una, msk->write_seq);
818 snd_una = atomic64_read(&msk->snd_una);
820 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) {
848 if (mptcp_is_writeable(msk)) {
875 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag,
883 dfrag->data_seq = msk->write_seq;
898 struct mptcp_sock *msk = mptcp_sk(sk);
914 write_seq = &msk->write_seq;
950 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag);
952 dfrag = mptcp_carve_data_frag(msk, pfrag, offset);
999 list_add_tail(&dfrag->list, &msk->rtx_queue);
1024 mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext);
1025 msk->cached_ext = NULL;
1047 static void mptcp_nospace(struct mptcp_sock *msk)
1051 clear_bit(MPTCP_SEND_SPACE, &msk->flags);
1052 smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */
1054 mptcp_for_each_subflow(msk, subflow) {
1087 static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk,
1097 sock_owned_by_me((struct sock *)msk);
1100 if (!mptcp_ext_cache_refill(msk))
1103 if (__mptcp_check_fallback(msk)) {
1104 if (!msk->first)
1106 *sndbuf = msk->first->sk_sndbuf;
1107 return sk_stream_memory_free(msk->first) ? msk->first : NULL;
1111 if (msk->last_snd && msk->snd_burst > 0 &&
1112 sk_stream_memory_free(msk->last_snd) &&
1113 mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) {
1114 mptcp_for_each_subflow(msk, subflow) {
1118 return msk->last_snd;
1126 mptcp_for_each_subflow(msk, subflow) {
1148 pr_debug("msk=%p nr_active=%d ssk=%p:%lld backup=%p:%lld",
1149 msk, nr_active, send_info[0].ssk, send_info[0].ratio,
1157 msk->last_snd = send_info[0].ssk;
1158 msk->snd_burst = min_t(int, MPTCP_SEND_BURST_SIZE,
1159 sk_stream_wspace(msk->last_snd));
1160 return msk->last_snd;
1165 static void ssk_check_wmem(struct mptcp_sock *msk)
1167 if (unlikely(!mptcp_is_writeable(msk)))
1168 mptcp_nospace(msk);
1174 struct mptcp_sock *msk = mptcp_sk(sk);
1204 __mptcp_flush_join_list(msk);
1205 ssk = mptcp_subflow_get_send(msk, &sndbuf);
1222 mptcp_nospace(msk);
1229 ssk = mptcp_subflow_get_send(msk, &sndbuf);
1230 if (list_empty(&msk->conn_list)) {
1260 msk->snd_burst -= ret;
1269 !mptcp_ext_cache_refill(msk)) {
1319 ssk_check_wmem(msk);
1327 struct mptcp_sock *msk = mptcp_sk(sk);
1333 test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait);
1339 static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk,
1343 struct sock *sk = (struct sock *)msk;
1381 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
1384 struct sock *sk = (struct sock *)msk;
1393 msk->rcvq_space.copied += copied;
1396 time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time);
1398 rtt_us = msk->rcvq_space.rtt_us;
1403 mptcp_for_each_subflow(msk, subflow) {
1417 msk->rcvq_space.rtt_us = rtt_us;
1421 if (msk->rcvq_space.copied <= msk->rcvq_space.space)
1429 rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss;
1431 grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space);
1433 do_div(grow, msk->rcvq_space.space);
1455 mptcp_for_each_subflow(msk, subflow) {
1469 msk->rcvq_space.space = msk->rcvq_space.copied;
1471 msk->rcvq_space.copied = 0;
1472 msk->rcvq_space.time = mstamp;
1475 static bool __mptcp_move_skbs(struct mptcp_sock *msk)
1481 if (((struct sock *)msk)->sk_state == TCP_CLOSE)
1484 __mptcp_flush_join_list(msk);
1486 struct sock *ssk = mptcp_subflow_recv_lookup(msk);
1492 done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved);
1496 if (mptcp_ofo_queue(msk) || moved > 0) {
1497 mptcp_check_data_fin((struct sock *)msk);
1506 struct mptcp_sock *msk = mptcp_sk(sk);
1519 __mptcp_flush_join_list(msk);
1524 bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied);
1534 __mptcp_move_skbs(msk))
1556 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
1557 mptcp_check_for_eof(msk);
1584 clear_bit(MPTCP_DATA_READY, &msk->flags);
1589 if (unlikely(__mptcp_move_skbs(msk)))
1590 set_bit(MPTCP_DATA_READY, &msk->flags);
1591 } else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) {
1593 set_bit(MPTCP_DATA_READY, &msk->flags);
1596 pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d",
1597 msk, test_bit(MPTCP_DATA_READY, &msk->flags),
1599 mptcp_rcv_space_adjust(msk, copied);
1607 struct mptcp_sock *msk = mptcp_sk(sk);
1609 if (atomic64_read(&msk->snd_una) == READ_ONCE(msk->write_seq)) {
1612 set_bit(MPTCP_WORK_RTX, &msk->flags);
1613 if (schedule_work(&msk->work))
1642 static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk)
1647 sock_owned_by_me((const struct sock *)msk);
1649 if (__mptcp_check_fallback(msk))
1650 return msk->first;
1652 mptcp_for_each_subflow(msk, subflow) {
1707 static void pm_work(struct mptcp_sock *msk)
1709 struct mptcp_pm_data *pm = &msk->pm;
1711 spin_lock_bh(&msk->pm.lock);
1713 pr_debug("msk=%p status=%x", msk, pm->status);
1716 mptcp_pm_nl_add_addr_received(msk);
1720 mptcp_pm_nl_rm_addr_received(msk);
1724 mptcp_pm_nl_fully_established(msk);
1728 mptcp_pm_nl_subflow_established(msk);
1731 spin_unlock_bh(&msk->pm.lock);
1734 static void __mptcp_close_subflow(struct mptcp_sock *msk)
1738 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
1744 __mptcp_close_ssk((struct sock *)msk, ssk, subflow, 0);
1750 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work);
1751 struct sock *ssk, *sk = &msk->sk.icsk_inet.sk;
1764 __mptcp_flush_join_list(msk);
1765 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
1766 __mptcp_close_subflow(msk);
1768 __mptcp_move_skbs(msk);
1770 if (msk->pm.status)
1771 pm_work(msk);
1773 if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags))
1774 mptcp_check_for_eof(msk);
1778 if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
1785 if (!mptcp_ext_cache_refill(msk))
1788 ssk = mptcp_subflow_get_retrans(msk);
1808 if (!mptcp_ext_cache_refill(msk))
1833 struct mptcp_sock *msk = mptcp_sk(sk);
1835 spin_lock_init(&msk->join_list_lock);
1837 INIT_LIST_HEAD(&msk->conn_list);
1838 INIT_LIST_HEAD(&msk->join_list);
1839 INIT_LIST_HEAD(&msk->rtx_queue);
1840 __set_bit(MPTCP_SEND_SPACE, &msk->flags);
1841 INIT_WORK(&msk->work, mptcp_worker);
1842 msk->out_of_order_queue = RB_ROOT;
1844 msk->first = NULL;
1847 mptcp_pm_data_init(msk);
1850 timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0);
1883 struct mptcp_sock *msk = mptcp_sk(sk);
1886 sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer);
1888 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
1894 struct mptcp_sock *msk = mptcp_sk(sk);
1896 if (cancel_work_sync(&msk->work))
1958 struct mptcp_sock *msk = mptcp_sk(sk);
1971 if (__mptcp_check_fallback(msk)) {
1975 WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
1976 WRITE_ONCE(msk->snd_data_fin_enable, 1);
1978 mptcp_for_each_subflow(msk, subflow) {
1994 spin_lock_bh(&msk->join_list_lock);
1995 list_splice_tail_init(&msk->join_list, &msk->conn_list);
1996 spin_unlock_bh(&msk->join_list_lock);
1997 list_splice_init(&msk->conn_list, &conn_list);
2015 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk)
2019 struct ipv6_pinfo *msk6 = inet6_sk(msk);
2021 msk->sk_v6_daddr = ssk->sk_v6_daddr;
2022 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr;
2030 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num;
2031 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport;
2032 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport;
2033 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr;
2034 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr;
2035 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr;
2063 struct mptcp_sock *msk;
2076 msk = mptcp_sk(nsk);
2077 msk->local_key = subflow_req->local_key;
2078 msk->token = subflow_req->token;
2079 msk->subflow = NULL;
2080 WRITE_ONCE(msk->fully_established, false);
2082 msk->write_seq = subflow_req->idsn + 1;
2083 atomic64_set(&msk->snd_una, msk->write_seq);
2085 msk->can_ack = true;
2086 msk->remote_key = mp_opt->sndr_key;
2087 mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq);
2089 WRITE_ONCE(msk->ack_seq, ack_seq);
2104 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk)
2108 msk->rcvq_space.copied = 0;
2109 msk->rcvq_space.rtt_us = 0;
2111 msk->rcvq_space.time = tp->tcp_mstamp;
2114 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd,
2116 if (msk->rcvq_space.space == 0)
2117 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT;
2123 struct mptcp_sock *msk = mptcp_sk(sk);
2127 listener = __mptcp_nmpc_socket(msk);
2133 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk));
2138 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
2160 msk = mptcp_sk(new_mptcp_sock);
2161 msk->first = newsk;
2165 list_add(&subflow->node, &msk->conn_list);
2167 mptcp_rcv_space_init(msk, ssk);
2182 void mptcp_destroy_common(struct mptcp_sock *msk)
2184 skb_rbtree_purge(&msk->out_of_order_queue);
2185 mptcp_token_destroy(msk);
2186 mptcp_pm_free_anno_list(msk);
2191 struct mptcp_sock *msk = mptcp_sk(sk);
2193 if (msk->cached_ext)
2194 __skb_ext_put(msk->cached_ext);
2196 mptcp_destroy_common(msk);
2200 static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname,
2203 struct sock *sk = (struct sock *)msk;
2211 ssock = __mptcp_nmpc_socket(msk);
2231 static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname,
2234 struct sock *sk = (struct sock *)msk;
2241 ssock = __mptcp_nmpc_socket(msk);
2303 struct mptcp_sock *msk = mptcp_sk(sk);
2306 pr_debug("msk=%p", msk);
2312 return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen);
2321 ssk = __mptcp_tcp_fallback(msk);
2327 return mptcp_setsockopt_v6(msk, optname, optval, optlen);
2335 struct mptcp_sock *msk = mptcp_sk(sk);
2338 pr_debug("msk=%p", msk);
2347 ssk = __mptcp_tcp_fallback(msk);
2375 struct mptcp_sock *msk = mptcp_sk(sk);
2378 ssk = mptcp_subflow_recv_lookup(msk);
2379 if (!ssk || !schedule_work(&msk->work))
2405 struct mptcp_sock *msk = mptcp_sk(sk);
2408 ssock = __mptcp_nmpc_socket(msk);
2409 pr_debug("msk=%p, subflow=%p", msk, ssock);
2419 struct mptcp_sock *msk;
2425 msk = mptcp_sk(sk);
2427 pr_debug("msk=%p, token=%u", sk, subflow->token);
2434 /* the socket is not connected yet, no msk/subflow ops can access/race
2437 WRITE_ONCE(msk->remote_key, subflow->remote_key);
2438 WRITE_ONCE(msk->local_key, subflow->local_key);
2439 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
2440 WRITE_ONCE(msk->ack_seq, ack_seq);
2441 WRITE_ONCE(msk->can_ack, 1);
2442 atomic64_set(&msk->snd_una, msk->write_seq);
2444 mptcp_pm_new_connection(msk, 0);
2446 mptcp_rcv_space_init(msk, ssk);
2461 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
2462 struct sock *parent = (void *)msk;
2466 pr_debug("msk=%p, subflow=%p", msk, subflow);
2472 if (!msk->pm.server_side)
2475 if (!mptcp_pm_allow_new_subflow(msk))
2479 * msk lock here.
2481 * msk status to avoid racing with mptcp_close()
2483 spin_lock_bh(&msk->join_list_lock);
2486 list_add_tail(&subflow->node, &msk->join_list);
2487 spin_unlock_bh(&msk->join_list_lock);
2491 /* attach to msk socket only after we are sure he will deal with us
2497 subflow->map_seq = READ_ONCE(msk->ack_seq);
2503 struct mptcp_sock *msk = mptcp_sk(sk);
2505 return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true;
2539 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2544 ssock = __mptcp_nmpc_socket(msk);
2559 static void mptcp_subflow_early_fallback(struct mptcp_sock *msk,
2563 __mptcp_do_fallback(msk);
2569 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2575 if (sock->state != SS_UNCONNECTED && msk->subflow) {
2579 ssock = msk->subflow;
2583 ssock = __mptcp_nmpc_socket(msk);
2589 mptcp_token_destroy(msk);
2597 mptcp_subflow_early_fallback(msk, subflow);
2600 mptcp_subflow_early_fallback(msk, subflow);
2606 /* on successful connect, the msk state will be moved to established by
2621 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2625 pr_debug("msk=%p", msk);
2628 ssock = __mptcp_nmpc_socket(msk);
2634 mptcp_token_destroy(msk);
2651 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2655 pr_debug("msk=%p", msk);
2661 ssock = __mptcp_nmpc_socket(msk);
2665 clear_bit(MPTCP_DATA_READY, &msk->flags);
2671 struct mptcp_sock *msk = mptcp_sk(newsock->sk);
2677 __mptcp_flush_join_list(msk);
2678 mptcp_for_each_subflow(msk, subflow) {
2687 set_bit(MPTCP_DATA_READY, &msk->flags);
2696 static __poll_t mptcp_check_readable(struct mptcp_sock *msk)
2698 return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM :
2706 struct mptcp_sock *msk;
2710 msk = mptcp_sk(sk);
2714 pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags);
2716 return mptcp_check_readable(msk);
2719 mask |= mptcp_check_readable(msk);
2720 if (test_bit(MPTCP_SEND_SPACE, &msk->flags))
2731 struct mptcp_sock *msk = mptcp_sk(sock->sk);
2735 pr_debug("sk=%p, how=%d", msk, how);
2754 if (__mptcp_check_fallback(msk)) {
2758 mptcp_for_each_subflow(msk, subflow) {
2768 __mptcp_flush_join_list(msk);
2770 WRITE_ONCE(msk->write_seq, msk->write_seq + 1);
2771 WRITE_ONCE(msk->snd_data_fin_enable, 1);
2773 mptcp_for_each_subflow(msk, subflow) {