Lines Matching refs:tw
22 * @tw: timewait socket
29 void inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
32 struct inet_bind_bucket *tb = tw->tw_tb;
37 __hlist_del(&tw->tw_bind_node);
38 tw->tw_tb = NULL;
40 __sock_put((struct sock *)tw);
44 static void inet_twsk_kill(struct inet_timewait_sock *tw)
46 struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo;
47 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
51 sk_nulls_del_node_init_rcu((struct sock *)tw);
55 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
59 inet_twsk_bind_unhash(tw, hashinfo);
62 atomic_dec(&tw->tw_dr->tw_count);
63 inet_twsk_put(tw);
66 void inet_twsk_free(struct inet_timewait_sock *tw)
68 struct module *owner = tw->tw_prot->owner;
69 twsk_destructor((struct sock *)tw);
71 pr_debug("%s timewait_sock %p released\n", tw->tw_prot->name, tw);
73 kmem_cache_free(tw->tw_prot->twsk_prot->twsk_slab, tw);
77 void inet_twsk_put(struct inet_timewait_sock *tw)
79 if (refcount_dec_and_test(&tw->tw_refcnt))
80 inet_twsk_free(tw);
84 static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw,
87 hlist_nulls_add_head_rcu(&tw->tw_node, list);
90 static void inet_twsk_add_bind_node(struct inet_timewait_sock *tw,
93 hlist_add_head(&tw->tw_bind_node, list);
101 void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
113 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), inet->inet_num,
116 tw->tw_tb = icsk->icsk_bind_hash;
118 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
123 inet_twsk_add_node_rcu(tw, &ehead->chain);
136 * committed into memory all tw fields.
138 * so we are not allowed to use tw anymore.
140 refcount_set(&tw->tw_refcnt, 3);
146 struct inet_timewait_sock *tw = from_timer(tw, t, tw_timer);
148 if (tw->tw_kill)
149 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
151 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITED);
152 inet_twsk_kill(tw);
159 struct inet_timewait_sock *tw;
164 tw = kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
166 if (tw) {
169 tw->tw_dr = dr;
171 tw->tw_daddr = inet->inet_daddr;
172 tw->tw_rcv_saddr = inet->inet_rcv_saddr;
173 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
174 tw->tw_tos = inet->tos;
175 tw->tw_num = inet->inet_num;
176 tw->tw_state = TCP_TIME_WAIT;
177 tw->tw_substate = state;
178 tw->tw_sport = inet->inet_sport;
179 tw->tw_dport = inet->inet_dport;
180 tw->tw_family = sk->sk_family;
181 tw->tw_reuse = sk->sk_reuse;
182 tw->tw_reuseport = sk->sk_reuseport;
183 tw->tw_hash = sk->sk_hash;
184 tw->tw_ipv6only = 0;
185 tw->tw_transparent = inet->transparent;
186 tw->tw_prot = sk->sk_prot_creator;
187 atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie));
188 twsk_net_set(tw, sock_net(sk));
189 timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED);
195 refcount_set(&tw->tw_refcnt, 0);
197 __module_get(tw->tw_prot->owner);
200 return tw;
210 * Caller should not access tw anymore.
212 void inet_twsk_deschedule_put(struct inet_timewait_sock *tw)
214 if (del_timer_sync(&tw->tw_timer))
215 inet_twsk_kill(tw);
216 inet_twsk_put(tw);
220 void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
242 * kill tw bucket after 3.5*RTO (it is important that this number
247 tw->tw_kill = timeo <= 4*HZ;
249 BUG_ON(mod_timer(&tw->tw_timer, jiffies + timeo));
250 atomic_inc(&tw->tw_dr->tw_count);
252 mod_timer_pending(&tw->tw_timer, jiffies + timeo);