Lines Matching defs:subflow

44 /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not
45 * completed yet or has failed, return the subflow socket.
50 if (!msk->subflow || READ_ONCE(msk->can_ack))
53 return msk->subflow;
92 struct mptcp_subflow_context *subflow;
102 msk->subflow = ssock;
103 subflow = mptcp_subflow_ctx(ssock->sk);
104 list_add(&subflow->node, &msk->conn_list);
105 subflow->request_mptcp = 1;
107 /* accept() will wait on first subflow sk_wq, and we always wakes up
268 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
277 /* try to fetch required memory from subflow */
292 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow);
404 * at the subflow level and the msk lock was not held, so this
415 struct mptcp_subflow_context *subflow;
442 mptcp_for_each_subflow(msk, subflow) {
443 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
464 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
482 map_remaining = subflow->map_data_len -
483 mptcp_subflow_get_map_offset(subflow);
502 subflow->map_data_len = skb->len;
625 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
633 wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL;
708 struct mptcp_subflow_context *subflow;
712 mptcp_for_each_subflow(msk, subflow)
713 receivers += !subflow->rx_eof;
739 struct mptcp_subflow_context *subflow;
744 mptcp_for_each_subflow(msk, subflow) {
745 if (subflow->data_avail)
746 return mptcp_subflow_tcp_sock(subflow);
794 struct mptcp_subflow_context *subflow;
799 mptcp_for_each_subflow(msk, subflow) {
800 if (sk_stream_is_writeable(subflow->tcp_sock))
847 /* Only wake up writers if a subflow is ready */
908 * from one substream to another, but do per subflow memory accounting
1049 struct mptcp_subflow_context *subflow;
1054 mptcp_for_each_subflow(msk, subflow) {
1055 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1064 static bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
1066 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1069 if (subflow->request_join && !subflow->fully_established)
1091 struct mptcp_subflow_context *subflow;
1110 /* re-use last subflow, if the burst allow that */
1114 mptcp_for_each_subflow(msk, subflow) {
1115 ssk = mptcp_subflow_tcp_sock(subflow);
1121 /* pick the subflow with the lower wmem/wspace ratio */
1126 mptcp_for_each_subflow(msk, subflow) {
1127 ssk = mptcp_subflow_tcp_sock(subflow);
1128 if (!mptcp_subflow_active(subflow))
1131 nr_active += !subflow->backup;
1133 if (!sk_stream_memory_free(subflow->tcp_sock))
1142 if (ratio < send_info[subflow->backup].ratio) {
1143 send_info[subflow->backup].ssk = ssk;
1144 send_info[subflow->backup].ratio = ratio;
1152 /* pick the best backup if no other subflow is active */
1241 pr_debug("conn_list->subflow=%p", ssk);
1257 /* burst can be negative, we will try move to the next subflow
1283 * Normally, when the tcp subflow can accept more data, then
1383 struct mptcp_subflow_context *subflow;
1403 mptcp_for_each_subflow(msk, subflow) {
1408 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow));
1451 * get drops at subflow level if skbs can't be moved to
1455 mptcp_for_each_subflow(msk, subflow) {
1459 ssk = mptcp_subflow_tcp_sock(subflow);
1637 /* Find an idle subflow. Return NULL if there is unacked data at tcp
1640 * A backup subflow is returned only if that is the only kind available.
1644 struct mptcp_subflow_context *subflow;
1652 mptcp_for_each_subflow(msk, subflow) {
1653 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1655 if (!mptcp_subflow_active(subflow))
1665 if (subflow->backup) {
1677 /* subflow sockets can be either outgoing (connect) or incoming
1686 struct mptcp_subflow_context *subflow,
1691 list_del(&subflow->node);
1694 /* outgoing subflow */
1697 /* incoming subflow */
1736 struct mptcp_subflow_context *subflow, *tmp;
1738 list_for_each_entry_safe(subflow, tmp, &msk->conn_list, node) {
1739 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
1744 __mptcp_close_ssk((struct sock *)msk, ssk, subflow, 0);
1918 pr_debug("Sending DATA_FIN on subflow %p", ssk);
1957 struct mptcp_subflow_context *subflow, *tmp;
1978 mptcp_for_each_subflow(msk, subflow) {
1979 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2003 list_for_each_entry_safe(subflow, tmp, &conn_list, node) {
2004 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2005 __mptcp_close_ssk(sk, ssk, subflow, timeout);
2042 * refers to the subflow socket, not the mptcp one.
2079 msk->subflow = NULL;
2093 /* will be fully established after successful MPC subflow creation */
2138 pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk));
2140 struct mptcp_subflow_context *subflow;
2144 subflow = mptcp_subflow_ctx(newsk);
2145 new_mptcp_sock = subflow->conn;
2147 /* is_mptcp should be false if subflow->conn is missing, see
2165 list_add(&subflow->node, &msk->conn_list);
2316 * MPTCP-level socket to configure the subflows until the subflow
2318 * to the one remaining subflow.
2342 * MPTCP-level socket to configure the subflows until the subflow
2344 * to the one remaining subflow.
2409 pr_debug("msk=%p, subflow=%p", msk, ssock);
2418 struct mptcp_subflow_context *subflow;
2423 subflow = mptcp_subflow_ctx(ssk);
2424 sk = subflow->conn;
2427 pr_debug("msk=%p, token=%u", sk, subflow->token);
2429 mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq);
2431 subflow->map_seq = ack_seq;
2432 subflow->map_subflow_seq = 1;
2434 /* the socket is not connected yet, no msk/subflow ops can access/race
2437 WRITE_ONCE(msk->remote_key, subflow->remote_key);
2438 WRITE_ONCE(msk->local_key, subflow->local_key);
2439 WRITE_ONCE(msk->write_seq, subflow->idsn + 1);
2460 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
2461 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
2466 pr_debug("msk=%p, subflow=%p", msk, subflow);
2485 if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node)))
2486 list_add_tail(&subflow->node, &msk->join_list);
2497 subflow->map_seq = READ_ONCE(msk->ack_seq);
2560 struct mptcp_subflow_context *subflow)
2562 subflow->request_mptcp = 0;
2570 struct mptcp_subflow_context *subflow;
2575 if (sock->state != SS_UNCONNECTED && msk->subflow) {
2576 /* pending connection or invalid state, let existing subflow
2579 ssock = msk->subflow;
2591 subflow = mptcp_subflow_ctx(ssock->sk);
2597 mptcp_subflow_early_fallback(msk, subflow);
2599 if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk))
2600 mptcp_subflow_early_fallback(msk, subflow);
2672 struct mptcp_subflow_context *subflow;
2678 mptcp_for_each_subflow(msk, subflow) {
2679 struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
2732 struct mptcp_subflow_context *subflow;
2758 mptcp_for_each_subflow(msk, subflow) {
2759 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);
2773 mptcp_for_each_subflow(msk, subflow) {
2774 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow);