Lines Matching refs:tw

22  *	@tw: timewait socket
29 void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
32 struct inet_bind2_bucket *tb2 = tw->tw_tb2;
33 struct inet_bind_bucket *tb = tw->tw_tb;
38 __hlist_del(&tw->tw_bind_node);
39 tw->tw_tb = NULL;
42 __hlist_del(&tw->tw_bind2_node);
43 tw->tw_tb2 = NULL;
46 __sock_put((struct sock *)tw);
50 static void inet_twsk_kill(struct inet_timewait_sock *tw)
52 struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
53 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
57 sk_nulls_del_node_init_rcu((struct sock *)tw);
61 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
63 bhead2 = inet_bhashfn_portaddr(hashinfo, (struct sock *)tw,
64 twsk_net(tw), tw->tw_num);
68 inet_twsk_bind_unhash(tw, hashinfo);
72 refcount_dec(&tw->tw_dr->tw_refcount);
73 inet_twsk_put(tw);
76 void inet_twsk_free(struct inet_timewait_sock *tw)
78 struct module *owner = tw->tw_prot->owner;
79 twsk_destructor((struct sock *)tw);
80 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
84 void inet_twsk_put(struct inet_timewait_sock *tw)
86 if (refcount_dec_and_test(&tw->tw_refcnt))
87 inet_twsk_free(tw);
91 static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
94 hlist_nulls_add_head_rcu(&tw->tw_node, list);
97 static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
100 hlist_add_head(&tw->tw_bind_node, list);
103 static void inet_twsk_add_bind2_node(struct inet_timewait_sock *tw,
106 hlist_add_head(&tw->tw_bind2_node, list);
114 void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
127 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
129 bhead2 = inet_bhashfn_portaddr(hashinfo, sk, twsk_net(tw), inet->inet_num);
134 tw->tw_tb = icsk->icsk_bind_hash;
136 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
138 tw->tw_tb2 = icsk->icsk_bind2_hash;
140 inet_twsk_add_bind2_node(tw, &tw->tw_tb2->deathrow);
147 inet_twsk_add_node_rcu(tw, &ehead->chain);
160 * committed into memory all tw fields.
162 * so we are not allowed to use tw anymore.
164 refcount_set(&tw->tw_refcnt, 3);
170 struct inet_timewait_sock *tw = from_timer(tw, t, tw_timer);
172 inet_twsk_kill(tw);
179 struct inet_timewait_sock *tw;
185 tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
187 if (tw) {
190 tw->tw_dr = dr;
192 tw->tw_daddr = inet->inet_daddr;
193 tw->tw_rcv_saddr = inet->inet_rcv_saddr;
194 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
195 tw->tw_tos = inet->tos;
196 tw->tw_num = inet->inet_num;
197 tw->tw_state = TCP_TIME_WAIT;
198 tw->tw_substate = state;
199 tw->tw_sport = inet->inet_sport;
200 tw->tw_dport = inet->inet_dport;
201 tw->tw_family = sk->sk_family;
202 tw->tw_reuse = sk->sk_reuse;
203 tw->tw_reuseport = sk->sk_reuseport;
204 tw->tw_hash = sk->sk_hash;
205 tw->tw_ipv6only = 0;
206 tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
207 tw->tw_prot = sk->sk_prot_creator;
208 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
209 twsk_net_set(tw, sock_net(sk));
210 timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED);
216 refcount_set(&tw->tw_refcnt, 0);
218 __module_get(tw->tw_prot->owner);
221 return tw;
231 * Caller should not access tw anymore.
233 void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
235 if (del_timer_sync(&tw->tw_timer))
236 inet_twsk_kill(tw);
237 inet_twsk_put(tw);
241 void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
263 * kill tw bucket after 3.5*RTO (it is important that this number
271 __NET_INC_STATS(twsk_net(tw), kill ? LINUX_MIB_TIMEWAITKILLED :
273 BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
274 refcount_inc(&tw->tw_dr->tw_refcount);
276 mod_timer_pending(&tw->tw_timer, jiffies + timeo);