Lines Matching refs:ssk
273 void mptcp_subflow_reset(struct sock *ssk)
275 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
278 tcp_send_active_reset(ssk, GFP_ATOMIC);
279 tcp_done(ssk);
500 static void subflow_drop_ctx(struct sock *ssk)
502 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
507 subflow_ulp_fallback(ssk, ctx);
689 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
691 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
694 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
702 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
704 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
705 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
723 static enum mapping_status get_mapping_status(struct sock *ssk,
726 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
732 skb = skb_peek(&ssk->sk_receive_queue);
736 if (mptcp_check_fallback(ssk))
751 sk_eat_skb(ssk, skb);
767 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
829 if (skb_is_fully_mapped(ssk, skb)) {
830 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
851 if (!validate_mapping(ssk, skb))
858 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
861 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
869 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
870 tcp_sk(ssk)->copied_seq += incr;
871 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
872 sk_eat_skb(ssk, skb);
876 tcp_cleanup_rbuf(ssk, incr);
879 static bool subflow_check_data_avail(struct sock *ssk)
881 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
886 pr_debug("msk=%p ssk=%p data_avail=%d skb=%p", subflow->conn, ssk,
887 subflow->data_avail, skb_peek(&ssk->sk_receive_queue));
888 if (!skb_peek(&ssk->sk_receive_queue))
898 status = get_mapping_status(ssk, msk);
899 pr_debug("msk=%p ssk=%p status=%d", msk, ssk, status);
901 ssk->sk_err = EBADMSG;
906 skb = skb_peek(&ssk->sk_receive_queue);
910 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq -
919 skb = skb_peek(&ssk->sk_receive_queue);
928 ssk->sk_err = EBADMSG;
951 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
959 ssk->sk_error_report(ssk);
960 tcp_set_state(ssk, TCP_CLOSE);
961 tcp_send_active_reset(ssk, GFP_ATOMIC);
984 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
985 * not the ssk one.
989 * Data that is still on the ssk rx queue can thus be ignored,
993 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
995 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1105 struct sock *ssk;
1117 ssk = sf->sk;
1118 subflow = mptcp_subflow_ctx(ssk);
1124 err = mptcp_pm_get_local_id(msk, (struct sock_common *)ssk);
1141 ssk->sk_bound_dev_if = loc->ifindex;
1281 /* as recvmsg() does not acquire the subflow socket for ssk selection