Lines Matching refs:ssk

31 static void mptcp_subflow_ops_undo_override(struct sock *ssk);
395 void mptcp_subflow_reset(struct sock *ssk)
397 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
403 if (ssk->sk_state == TCP_CLOSE)
409 tcp_send_active_reset(ssk, GFP_ATOMIC);
410 tcp_done(ssk);
426 struct sock *ssk = msk->first;
428 subflow = mptcp_subflow_ctx(ssk);
429 __mptcp_propagate_sndbuf(sk, ssk);
431 mptcp_rcv_space_init(msk, ssk);
465 static void mptcp_propagate_state(struct sock *sk, struct sock *ssk,
477 WRITE_ONCE(msk->wnd_end, subflow->idsn + 1 + tcp_sk(ssk)->snd_wnd);
482 __mptcp_sync_state(sk, ssk->sk_state);
484 msk->pending_state = ssk->sk_state;
737 void mptcp_subflow_drop_ctx(struct sock *ssk)
739 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
744 list_del(&mptcp_subflow_ctx(ssk)->node);
745 if (inet_csk(ssk)->icsk_ulp_ops) {
746 subflow_ulp_fallback(ssk, ctx);
840 /* ssk inherits options of listener sk */
930 static bool skb_is_fully_mapped(struct sock *ssk, struct sk_buff *skb)
932 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
935 skb_consumed = tcp_sk(ssk)->copied_seq - TCP_SKB_CB(skb)->seq;
943 static bool validate_mapping(struct sock *ssk, struct sk_buff *skb)
945 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
946 u32 ssn = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
964 static enum mapping_status validate_data_csum(struct sock *ssk, struct sk_buff *skb,
967 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
986 seq = tcp_sk(ssk)->copied_seq + subflow->map_csum_len;
1006 if (skb_queue_is_last(&ssk->sk_receive_queue, skb)) {
1011 if (unlikely(ssk->sk_state == TCP_CLOSE))
1012 while ((skb = skb_peek(&ssk->sk_receive_queue)))
1013 sk_eat_skb(ssk, skb);
1035 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DATACSUMERR);
1043 static enum mapping_status get_mapping_status(struct sock *ssk,
1046 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1053 skb = skb_peek(&ssk->sk_receive_queue);
1057 if (mptcp_check_fallback(ssk))
1072 sk_eat_skb(ssk, skb);
1087 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_INFINITEMAPRX);
1145 if (skb_is_fully_mapped(ssk, skb)) {
1146 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSNOMATCH);
1177 if (!validate_mapping(ssk, skb)) {
1178 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSTCPMISMATCH);
1185 return validate_data_csum(ssk, skb, csum_reqd);
1188 static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
1191 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1199 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DUPDATA);
1200 tcp_sk(ssk)->copied_seq += incr;
1201 if (!before(tcp_sk(ssk)->copied_seq, TCP_SKB_CB(skb)->end_seq))
1202 sk_eat_skb(ssk, skb);
1208 static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
1210 if (likely(ssk->sk_state != TCP_CLOSE))
1213 if (skb_queue_empty(&ssk->sk_receive_queue) &&
1230 static void mptcp_subflow_fail(struct mptcp_sock *msk, struct sock *ssk)
1232 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1236 if (WARN_ON_ONCE(ssk != READ_ONCE(msk->first)))
1252 tcp_send_ack(ssk);
1257 static bool subflow_check_data_avail(struct sock *ssk)
1259 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1264 if (!skb_peek(&ssk->sk_receive_queue))
1274 status = get_mapping_status(ssk, msk);
1275 trace_subflow_check_data_avail(status, skb_peek(&ssk->sk_receive_queue));
1283 skb = skb_peek(&ssk->sk_receive_queue);
1295 mptcp_subflow_discard_data(ssk, skb, old_ack - ack_seq);
1305 subflow_sched_work_if_closed(msk, ssk);
1320 mptcp_subflow_fail(msk, ssk);
1333 WRITE_ONCE(ssk->sk_err, EBADMSG);
1334 tcp_set_state(ssk, TCP_CLOSE);
1335 while ((skb = skb_peek(&ssk->sk_receive_queue)))
1336 sk_eat_skb(ssk, skb);
1337 tcp_send_active_reset(ssk, GFP_ATOMIC);
1342 mptcp_do_fallback(ssk);
1345 skb = skb_peek(&ssk->sk_receive_queue);
1349 subflow->map_subflow_seq = tcp_sk(ssk)->copied_seq - subflow->ssn_offset;
1372 /* If ssk has an mptcp parent socket, use the mptcp rcvbuf occupancy,
1373 * not the ssk one.
1377 * Data that is still on the ssk rx queue can thus be ignored,
1381 void mptcp_space(const struct sock *ssk, int *space, int *full_space)
1383 const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1390 static void subflow_error_report(struct sock *ssk)
1392 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1439 static void subflow_write_space(struct sock *ssk)
1441 struct sock *sk = mptcp_subflow_ctx(ssk)->conn;
1443 mptcp_propagate_sndbuf(sk, ssk);
1518 struct sock *ssk;
1531 ssk = sf->sk;
1532 subflow = mptcp_subflow_ctx(ssk);
1546 mptcp_info2sockaddr(loc, &addr, ssk->sk_family);
1553 mptcp_sockopt_sync(msk, ssk);
1555 ssk->sk_bound_dev_if = ifindex;
1568 mptcp_info2sockaddr(remote, &addr, ssk->sk_family);
1570 sock_hold(ssk);
1577 mptcp_sock_graft(ssk, sk->sk_socket);
1623 static void mptcp_subflow_ops_override(struct sock *ssk)
1626 if (ssk->sk_prot == &tcpv6_prot)
1627 ssk->sk_prot = &tcpv6_prot_override;
1630 ssk->sk_prot = &tcp_prot_override;
1633 static void mptcp_subflow_ops_undo_override(struct sock *ssk)
1636 if (ssk->sk_prot == &tcpv6_prot_override)
1637 ssk->sk_prot = &tcpv6_prot;
1640 ssk->sk_prot = &tcp_prot;
1763 /* as recvmsg() does not acquire the subflow socket for ssk selection
1787 struct sock *sk, *ssk;
1790 * Splice the req list, so that accept() can not reach the pending ssk after
1809 ssk = req->sk;
1810 if (!sk_is_mptcp(ssk))
1813 subflow = mptcp_subflow_ctx(ssk);
1893 static void subflow_ulp_release(struct sock *ssk)
1895 struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
1919 mptcp_subflow_ops_undo_override(ssk);
1980 static void tcp_release_cb_override(struct sock *ssk)
1982 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
1991 mptcp_subflow_process_delegated(ssk, status);
1993 tcp_release_cb(ssk);
1996 static int tcp_abort_override(struct sock *ssk, int err)
2001 if (inet_sk_state_load(ssk) == TCP_LISTEN)
2004 return tcp_abort(ssk, err);