Lines Matching refs:req
386 void tcp_openreq_init_rwin(struct request_sock *req,
390 struct inet_request_sock *ireq = inet_rsk(req);
401 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
405 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
406 req->rsk_window_clamp = full_space;
408 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
417 &req->rsk_rcv_wnd,
418 &req->rsk_window_clamp,
427 const struct request_sock *req)
429 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
462 struct request_sock *req,
469 ireq = inet_rsk(req);
483 struct request_sock *req,
486 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
487 const struct inet_request_sock *ireq = inet_rsk(req);
488 struct tcp_request_sock *treq = tcp_rsk(req);
501 smc_check_reset_syn_req(oldtp, req, newtp);
527 newtp->total_retrans = req->num_retrans;
538 newtp->window_clamp = req->rsk_window_clamp;
539 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
540 newtp->rcv_wnd = req->rsk_rcv_wnd;
553 newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
560 if (req->num_timeout) {
571 newtp->rx_opt.mss_clamp = req->mss;
572 tcp_ecn_openreq_child(newtp, req);
600 struct request_sock *req,
615 tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
617 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
622 tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
628 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
656 &tcp_rsk(req)->last_oow_ack_time) &&
658 !inet_rtx_syn_ack(sk, req)) {
661 expires += reqsk_timeout(req, TCP_RTO_MAX);
663 mod_timer_pending(&req->rsk_timer, expires);
665 req->rsk_timer.expires = expires;
725 * than req because user data may have been sent out.
729 tcp_rsk(req)->snt_isn + 1))
740 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
745 &tcp_rsk(req)->last_oow_ack_time))
746 req->rsk_ops->send_ack(sk, skb, req);
755 * we take ownership of @req.
757 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
758 WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
760 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
762 at tcp_rsk(req)->rcv_isn + 1. */
790 if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
791 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
792 inet_rsk(req)->acked = 1;
803 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
804 req, &own_req);
808 if (own_req && rsk_drop_req(req)) {
809 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
810 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
815 tcp_synack_rtt_meas(child, req);
817 return inet_csk_complete_hashdance(sk, child, req, own_req);
820 if (sk != req->rsk_listener)
824 inet_rsk(req)->acked = 1;
835 req->rsk_ops->send_reset(sk, skb);
837 reqsk_fastopen_remove(sk, req, true);
841 bool unlinked = inet_csk_reqsk_queue_drop(sk, req);