Lines Matching defs:subflow

286 	bool		recovery;		/* closing subflow write queue reinjected */
295 u8 pending_state; /* A subflow asked to set this sk_state,
315 * ONCE annotation, the subflow outside the socket
457 /* MPTCP subflow context */
497 is_mptfo : 1, /* subflow is doing TFO */
506 u8 hmac[MPTCPOPT_HMAC_LEN]; /* MPJ subflow only */
550 mptcp_subflow_tcp_sock(const struct mptcp_subflow_context *subflow)
552 return subflow->tcp_sock;
556 mptcp_subflow_ctx_reset(struct mptcp_subflow_context *subflow)
558 memset(&subflow->reset, 0, sizeof(subflow->reset));
559 subflow->request_mptcp = 1;
560 WRITE_ONCE(subflow->local_id, -1);
564 mptcp_subflow_get_map_offset(const struct mptcp_subflow_context *subflow)
566 return tcp_sk(mptcp_subflow_tcp_sock(subflow))->copied_seq -
567 subflow->ssn_offset -
568 subflow->map_subflow_seq;
572 mptcp_subflow_get_mapped_dsn(const struct mptcp_subflow_context *subflow)
574 return subflow->map_seq + mptcp_subflow_get_map_offset(subflow);
579 static inline void mptcp_subflow_delegate(struct mptcp_subflow_context *subflow, int action)
585 /* the caller held the subflow bh socket lock */
592 old = set_mask_bits(&subflow->delegated_status, 0, set_bits);
594 if (WARN_ON_ONCE(!list_empty(&subflow->delegated_node)))
599 list_add_tail(&subflow->delegated_node, &delegated->head);
600 sock_hold(mptcp_subflow_tcp_sock(subflow));
627 struct mptcp_subflow_context *subflow,
636 struct mptcp_subflow_context *subflow);
667 void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
680 static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
683 if (subflow->request_join && !subflow->fully_established)
686 return __tcp_can_send(mptcp_subflow_tcp_sock(subflow));
689 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow);
691 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow);
787 struct mptcp_subflow_context *subflow;
794 mptcp_for_each_subflow(mptcp_sk(sk), subflow) {
795 ssk_sndbuf = READ_ONCE(mptcp_subflow_tcp_sock(subflow)->sk_sndbuf);
797 subflow->cached_sndbuf = ssk_sndbuf;
806 /* The called held both the msk socket and the subflow socket locks,
811 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
813 if (READ_ONCE(ssk->sk_sndbuf) != subflow->cached_sndbuf)
817 /* the caller held only the subflow socket lock, either in process or
824 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
826 if (likely(READ_ONCE(ssk->sk_sndbuf) == subflow->cached_sndbuf))
830 mptcp_subflow_delegate(subflow, MPTCP_DELEGATE_SNDBUF);
880 const struct mptcp_subflow_context *subflow);
943 void __mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
945 void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
1012 static inline u8 subflow_get_local_id(const struct mptcp_subflow_context *subflow)
1014 int local_id = READ_ONCE(subflow->local_id);
1061 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1062 struct mptcp_sock *msk = mptcp_sk(subflow->conn);
1078 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1079 struct sock *sk = subflow->conn;
1110 static inline bool is_active_ssk(struct mptcp_subflow_context *subflow)
1112 return (subflow->request_mptcp || subflow->request_join);
1117 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
1121 is_active_ssk(subflow) &&
1122 !subflow->conn_finished;