Lines Matching refs:msk
45 if (subflow_req->msk)
46 sock_put((struct sock *)subflow_req->msk);
62 static bool mptcp_can_accept_new_subflow(const struct mptcp_sock *msk)
64 return mptcp_is_fully_established((void *)msk) &&
65 ((mptcp_pm_is_userspace(msk) &&
66 mptcp_userspace_pm_active(msk)) ||
67 READ_ONCE(msk->pm.accept_subflow));
73 struct mptcp_sock *msk = subflow_req->msk;
78 subflow_generate_hmac(msk->local_key, msk->remote_key,
88 struct mptcp_sock *msk;
91 msk = mptcp_token_get_sock(sock_net(req_to_sk(req)), subflow_req->token);
92 if (!msk) {
97 local_id = mptcp_pm_get_local_id(msk, (struct sock_common *)req);
99 sock_put((struct sock *)msk);
104 return msk;
115 subflow_req->msk = NULL;
119 static bool subflow_use_different_sport(struct mptcp_sock *msk, const struct sock *sk)
121 return inet_sk(sk)->inet_sport != inet_sk((struct sock *)msk)->inet_sport;
209 subflow_req->msk = subflow_token_join_request(req);
212 if (!subflow_req->msk) {
217 if (subflow_use_different_sport(subflow_req->msk, sk_listener)) {
220 ntohs(inet_sk((struct sock *)subflow_req->msk)->inet_sport));
221 if (!mptcp_pm_sport_in_anno_list(subflow_req->msk, sk_listener)) {
231 if (mptcp_can_accept_new_subflow(subflow_req->msk))
237 pr_debug("token=%u, remote_nonce=%u msk=%p", subflow_req->token,
238 subflow_req->remote_nonce, subflow_req->msk);
417 static bool subflow_use_different_dport(struct mptcp_sock *msk, const struct sock *sk)
419 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport;
425 struct mptcp_sock *msk = mptcp_sk(sk);
426 struct sock *ssk = msk->first;
430 if (!msk->rcvspace_init)
431 mptcp_rcv_space_init(msk, ssk);
437 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
438 WRITE_ONCE(msk->snd_nxt, msk->write_seq);
444 static void subflow_set_remote_key(struct mptcp_sock *msk,
459 WRITE_ONCE(msk->remote_key, subflow->remote_key);
460 WRITE_ONCE(msk->ack_seq, subflow->iasn);
461 WRITE_ONCE(msk->can_ack, true);
462 atomic64_set(&msk->rcv_wnd_sent, subflow->iasn);
469 struct mptcp_sock *msk = mptcp_sk(sk);
476 WRITE_ONCE(msk->snd_una, subflow->idsn + 1);
477 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
478 subflow_set_remote_key(msk, subflow, mp_opt);
484 msk->pending_state = ssk->sk_state;
485 __set_bit(MPTCP_SYNC_STATE, &msk->cb_flags);
495 struct mptcp_sock *msk;
503 msk = mptcp_sk(parent);
515 pr_fallback(msk);
520 WRITE_ONCE(msk->csum_enabled, true);
522 WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
561 if (subflow_use_different_dport(msk, sk)) {
587 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
593 err = mptcp_pm_get_local_id(msk, (struct sock_common *)sk);
710 struct mptcp_sock *msk;
713 msk = subflow_req->msk;
714 if (!msk)
717 subflow_generate_hmac(msk->remote_key, msk->local_key,
754 void __mptcp_subflow_fully_established(struct mptcp_sock *msk,
758 subflow_set_remote_key(msk, subflow, mp_opt);
760 WRITE_ONCE(msk->fully_established, true);
763 __mptcp_fastopen_gen_msk_ackseq(msk, subflow, mp_opt);
813 !mptcp_can_accept_new_subflow(subflow_req->msk)) {
860 owner = subflow_req->msk;
866 /* move the msk reference ownership to the subflow */
867 subflow_req->msk = NULL;
1044 struct mptcp_sock *msk)
1047 bool csum_reqd = READ_ONCE(msk->csum_enabled);
1094 bool updated = mptcp_update_rcv_data_fin(msk, mpext->data_seq,
1107 mptcp_schedule_work((struct sock *)msk);
1120 mptcp_update_rcv_data_fin(msk, data_fin_seq, mpext->dsn64);
1129 map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64);
1208 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1214 !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
1215 mptcp_schedule_work((struct sock *)msk);
1220 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1224 else if (READ_ONCE(msk->csum_enabled))
1230 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1236 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1242 if (sock_flag((struct sock *)msk, SOCK_DEAD))
1254 mptcp_reset_tout_timer(msk, subflow->fail_tout);
1261 struct mptcp_sock *msk;
1269 msk = mptcp_sk(subflow->conn);
1274 status = get_mapping_status(ssk, msk);
1287 if (unlikely(!READ_ONCE(msk->can_ack)))
1290 old_ack = READ_ONCE(msk->ack_seq);
1292 pr_debug("msk ack_seq=%llx subflow ack_seq=%llx", old_ack,
1305 subflow_sched_work_if_closed(msk, ssk);
1309 if (!__mptcp_check_fallback(msk)) {
1315 if (!READ_ONCE(msk->allow_infinite_fallback)) {
1320 mptcp_subflow_fail(msk, ssk);
1347 subflow->map_seq = READ_ONCE(msk->ack_seq);
1396 * and msk socket spinlock
1414 struct mptcp_sock *msk;
1418 msk = mptcp_sk(parent);
1430 WARN_ON_ONCE(!__mptcp_check_fallback(msk) && !subflow->mp_capable &&
1511 struct mptcp_sock *msk = mptcp_sk(sk);
1540 mptcp_pm_get_flags_and_ifindex_by_id(msk, local_id,
1543 subflow->remote_key = msk->remote_key;
1544 subflow->local_key = msk->local_key;
1545 subflow->token = msk->token;
1553 mptcp_sockopt_sync(msk, ssk);
1561 pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk,
1567 subflow->subflow_id = msk->subflow_id++;
1571 list_add_tail(&subflow->node, &msk->conn_list);
1579 WRITE_ONCE(msk->allow_infinite_fallback, false);
1595 mptcp_pm_close_subflow(msk);
1751 struct mptcp_sock *msk;
1755 msk = mptcp_sk(parent);
1758 pr_fallback(msk);
1775 * ingress data fin, so that the msk state will follow along
1777 if (__mptcp_check_fallback(msk) && subflow_is_done(sk) && msk->first == sk &&
1778 mptcp_update_rcv_data_fin(msk, READ_ONCE(msk->ack_seq), true))
1803 /* can't acquire the msk socket lock under the subflow one,
1842 /* we are still under the listener msk socket lock */
1904 /* if the msk has been orphaned, keep the ctx