Lines Matching refs:req
360 void tcp_openreq_init_rwin(struct request_sock *req,
364 struct inet_request_sock *ireq = inet_rsk(req);
375 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
379 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
380 req->rsk_window_clamp = full_space;
382 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
391 &req->rsk_rcv_wnd,
392 &req->rsk_window_clamp,
401 const struct request_sock *req)
403 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
436 struct request_sock *req,
443 ireq = inet_rsk(req);
457 struct request_sock *req,
460 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
461 const struct inet_request_sock *ireq = inet_rsk(req);
462 struct tcp_request_sock *treq = tcp_rsk(req);
474 smc_check_reset_syn_req(oldtp, req, newtp);
500 newtp->total_retrans = req->num_retrans;
511 newtp->window_clamp = req->rsk_window_clamp;
512 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
513 newtp->rcv_wnd = req->rsk_rcv_wnd;
526 newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
533 if (req->num_timeout) {
541 if (treq->af_specific->req_md5_lookup(sk, req_to_sk(req)))
546 newtp->rx_opt.mss_clamp = req->mss;
547 tcp_ecn_openreq_child(newtp, req);
574 struct request_sock *req,
589 tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
591 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
596 tmp_opt.ts_recent_stamp = ktime_get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout);
602 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
630 &tcp_rsk(req)->last_oow_ack_time) &&
632 !inet_rtx_syn_ack(sk, req)) {
635 expires += min(TCP_TIMEOUT_INIT << req->num_timeout,
638 mod_timer_pending(&req->rsk_timer, expires);
640 req->rsk_timer.expires = expires;
700 * than req because user data may have been sent out.
704 tcp_rsk(req)->snt_isn + 1))
715 tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) {
720 &tcp_rsk(req)->last_oow_ack_time))
721 req->rsk_ops->send_ack(sk, skb, req);
730 * we take ownership of @req.
732 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
733 WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
735 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
737 at tcp_rsk(req)->rcv_isn + 1. */
765 if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
766 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
767 inet_rsk(req)->acked = 1;
778 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
779 req, &own_req);
783 if (own_req && rsk_drop_req(req)) {
784 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
785 inet_csk_reqsk_queue_drop_and_put(sk, req);
790 tcp_synack_rtt_meas(child, req);
792 return inet_csk_complete_hashdance(sk, child, req, own_req);
796 inet_rsk(req)->acked = 1;
807 req->rsk_ops->send_reset(sk, skb);
809 reqsk_fastopen_remove(sk, req, true);
813 bool unlinked = inet_csk_reqsk_queue_drop(sk, req);