Lines Matching refs:tw
36 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
39 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
41 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
49 /* We are rate-limiting, so just release the tw sock and drop skb. */
50 inet_twsk_put(tw);
85 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
89 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
94 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
105 if (tw->tw_substate == TCP_FIN_WAIT2) {
114 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
126 inet_twsk_put(tw);
138 tw->tw_substate = TCP_TIME_WAIT;
145 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
176 if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
178 inet_twsk_deschedule_put(tw);
182 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
190 inet_twsk_put(tw);
223 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
233 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
236 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
238 inet_twsk_put(tw);
285 struct inet_timewait_sock *tw;
287 tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
289 if (tw) {
290 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
293 tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
294 tw->tw_mark = sk->sk_mark;
295 tw->tw_priority = sk->sk_priority;
296 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
305 tw->tw_txhash = sk->sk_txhash;
307 if (tw->tw_family == PF_INET6) {
310 tw->tw_v6_daddr = sk->sk_v6_daddr;
311 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
312 tw->tw_tclass = np->tclass;
313 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
314 tw->tw_ipv6only = sk->sk_ipv6only;
332 inet_twsk_schedule(tw, timeo);
334 * Note that access to tw after this point is illegal.
336 inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo);