2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic TIME_WAIT sockets functions
8 * From code orinally in TCP
11 #include <linux/kernel.h>
12 #include <net/inet_hashtables.h>
13 #include <net/inet_timewait_sock.h>
16 /* Must be called with locally disabled BHs. */
17 static void __inet_twsk_kill(struct inet_timewait_sock
*tw
,
18 struct inet_hashinfo
*hashinfo
)
20 struct inet_bind_hashbucket
*bhead
;
21 struct inet_bind_bucket
*tb
;
22 /* Unlink from established hashes. */
23 rwlock_t
*lock
= inet_ehash_lockp(hashinfo
, tw
->tw_hash
);
26 if (hlist_unhashed(&tw
->tw_node
)) {
30 __hlist_del(&tw
->tw_node
);
31 sk_node_init(&tw
->tw_node
);
34 /* Disassociate with bind bucket. */
35 bhead
= &hashinfo
->bhash
[inet_bhashfn(tw
->tw_num
, hashinfo
->bhash_size
)];
36 spin_lock(&bhead
->lock
);
38 __hlist_del(&tw
->tw_bind_node
);
40 inet_bind_bucket_destroy(hashinfo
->bind_bucket_cachep
, tb
);
41 spin_unlock(&bhead
->lock
);
42 #ifdef SOCK_REFCNT_DEBUG
43 if (atomic_read(&tw
->tw_refcnt
) != 1) {
44 printk(KERN_DEBUG
"%s timewait_sock %p refcnt=%d\n",
45 tw
->tw_prot
->name
, tw
, atomic_read(&tw
->tw_refcnt
));
51 void inet_twsk_put(struct inet_timewait_sock
*tw
)
53 if (atomic_dec_and_test(&tw
->tw_refcnt
)) {
54 struct module
*owner
= tw
->tw_prot
->owner
;
55 twsk_destructor((struct sock
*)tw
);
56 #ifdef SOCK_REFCNT_DEBUG
57 printk(KERN_DEBUG
"%s timewait_sock %p released\n",
58 tw
->tw_prot
->name
, tw
);
60 release_net(twsk_net(tw
));
61 kmem_cache_free(tw
->tw_prot
->twsk_prot
->twsk_slab
, tw
);
65 EXPORT_SYMBOL_GPL(inet_twsk_put
);
68 * Enter the time wait state. This is called with locally disabled BH.
69 * Essentially we whip up a timewait bucket, copy the relevant info into it
70 * from the SK, and mess with hash chains and list linkage.
72 void __inet_twsk_hashdance(struct inet_timewait_sock
*tw
, struct sock
*sk
,
73 struct inet_hashinfo
*hashinfo
)
75 const struct inet_sock
*inet
= inet_sk(sk
);
76 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
77 struct inet_ehash_bucket
*ehead
= inet_ehash_bucket(hashinfo
, sk
->sk_hash
);
78 rwlock_t
*lock
= inet_ehash_lockp(hashinfo
, sk
->sk_hash
);
79 struct inet_bind_hashbucket
*bhead
;
80 /* Step 1: Put TW into bind hash. Original socket stays there too.
81 Note, that any socket with inet->num != 0 MUST be bound in
82 binding cache, even if it is closed.
84 bhead
= &hashinfo
->bhash
[inet_bhashfn(inet
->num
, hashinfo
->bhash_size
)];
85 spin_lock(&bhead
->lock
);
86 tw
->tw_tb
= icsk
->icsk_bind_hash
;
87 BUG_TRAP(icsk
->icsk_bind_hash
);
88 inet_twsk_add_bind_node(tw
, &tw
->tw_tb
->owners
);
89 spin_unlock(&bhead
->lock
);
93 /* Step 2: Remove SK from established hash. */
94 if (__sk_del_node_init(sk
))
95 sock_prot_inuse_add(sock_net(sk
), sk
->sk_prot
, -1);
97 /* Step 3: Hash TW into TIMEWAIT chain. */
98 inet_twsk_add_node(tw
, &ehead
->twchain
);
99 atomic_inc(&tw
->tw_refcnt
);
104 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance
);
106 struct inet_timewait_sock
*inet_twsk_alloc(const struct sock
*sk
, const int state
)
108 struct inet_timewait_sock
*tw
=
109 kmem_cache_alloc(sk
->sk_prot_creator
->twsk_prot
->twsk_slab
,
112 const struct inet_sock
*inet
= inet_sk(sk
);
114 /* Give us an identity. */
115 tw
->tw_daddr
= inet
->daddr
;
116 tw
->tw_rcv_saddr
= inet
->rcv_saddr
;
117 tw
->tw_bound_dev_if
= sk
->sk_bound_dev_if
;
118 tw
->tw_num
= inet
->num
;
119 tw
->tw_state
= TCP_TIME_WAIT
;
120 tw
->tw_substate
= state
;
121 tw
->tw_sport
= inet
->sport
;
122 tw
->tw_dport
= inet
->dport
;
123 tw
->tw_family
= sk
->sk_family
;
124 tw
->tw_reuse
= sk
->sk_reuse
;
125 tw
->tw_hash
= sk
->sk_hash
;
127 tw
->tw_prot
= sk
->sk_prot_creator
;
128 twsk_net_set(tw
, hold_net(sock_net(sk
)));
129 atomic_set(&tw
->tw_refcnt
, 1);
130 inet_twsk_dead_node_init(tw
);
131 __module_get(tw
->tw_prot
->owner
);
137 EXPORT_SYMBOL_GPL(inet_twsk_alloc
);
139 /* Returns non-zero if quota exceeded. */
140 static int inet_twdr_do_twkill_work(struct inet_timewait_death_row
*twdr
,
143 struct inet_timewait_sock
*tw
;
144 struct hlist_node
*node
;
148 /* NOTE: compare this to previous version where lock
149 * was released after detaching chain. It was racy,
150 * because tw buckets are scheduled in not serialized context
151 * in 2.3 (with netfilter), and with softnet it is common, because
152 * soft irqs are not sequenced.
157 inet_twsk_for_each_inmate(tw
, node
, &twdr
->cells
[slot
]) {
158 __inet_twsk_del_dead_node(tw
);
159 spin_unlock(&twdr
->death_lock
);
160 __inet_twsk_kill(tw
, twdr
->hashinfo
);
163 spin_lock(&twdr
->death_lock
);
164 if (killed
> INET_TWDR_TWKILL_QUOTA
) {
169 /* While we dropped twdr->death_lock, another cpu may have
170 * killed off the next TW bucket in the list, therefore
171 * do a fresh re-read of the hlist head node with the
172 * lock reacquired. We still use the hlist traversal
173 * macro in order to get the prefetches.
178 twdr
->tw_count
-= killed
;
179 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED
, killed
);
184 void inet_twdr_hangman(unsigned long data
)
186 struct inet_timewait_death_row
*twdr
;
187 int unsigned need_timer
;
189 twdr
= (struct inet_timewait_death_row
*)data
;
190 spin_lock(&twdr
->death_lock
);
192 if (twdr
->tw_count
== 0)
196 if (inet_twdr_do_twkill_work(twdr
, twdr
->slot
)) {
197 twdr
->thread_slots
|= (1 << twdr
->slot
);
198 schedule_work(&twdr
->twkill_work
);
201 /* We purged the entire slot, anything left? */
205 twdr
->slot
= ((twdr
->slot
+ 1) & (INET_TWDR_TWKILL_SLOTS
- 1));
207 mod_timer(&twdr
->tw_timer
, jiffies
+ twdr
->period
);
209 spin_unlock(&twdr
->death_lock
);
212 EXPORT_SYMBOL_GPL(inet_twdr_hangman
);
214 void inet_twdr_twkill_work(struct work_struct
*work
)
216 struct inet_timewait_death_row
*twdr
=
217 container_of(work
, struct inet_timewait_death_row
, twkill_work
);
220 BUILD_BUG_ON((INET_TWDR_TWKILL_SLOTS
- 1) >
221 (sizeof(twdr
->thread_slots
) * 8));
223 while (twdr
->thread_slots
) {
224 spin_lock_bh(&twdr
->death_lock
);
225 for (i
= 0; i
< INET_TWDR_TWKILL_SLOTS
; i
++) {
226 if (!(twdr
->thread_slots
& (1 << i
)))
229 while (inet_twdr_do_twkill_work(twdr
, i
) != 0) {
230 if (need_resched()) {
231 spin_unlock_bh(&twdr
->death_lock
);
233 spin_lock_bh(&twdr
->death_lock
);
237 twdr
->thread_slots
&= ~(1 << i
);
239 spin_unlock_bh(&twdr
->death_lock
);
243 EXPORT_SYMBOL_GPL(inet_twdr_twkill_work
);
245 /* These are always called from BH context. See callers in
246 * tcp_input.c to verify this.
249 /* This is for handling early-kills of TIME_WAIT sockets. */
250 void inet_twsk_deschedule(struct inet_timewait_sock
*tw
,
251 struct inet_timewait_death_row
*twdr
)
253 spin_lock(&twdr
->death_lock
);
254 if (inet_twsk_del_dead_node(tw
)) {
256 if (--twdr
->tw_count
== 0)
257 del_timer(&twdr
->tw_timer
);
259 spin_unlock(&twdr
->death_lock
);
260 __inet_twsk_kill(tw
, twdr
->hashinfo
);
263 EXPORT_SYMBOL(inet_twsk_deschedule
);
265 void inet_twsk_schedule(struct inet_timewait_sock
*tw
,
266 struct inet_timewait_death_row
*twdr
,
267 const int timeo
, const int timewait_len
)
269 struct hlist_head
*list
;
272 /* timeout := RTO * 3.5
274 * 3.5 = 1+2+0.5 to wait for two retransmits.
276 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
277 * our ACK acking that FIN can be lost. If N subsequent retransmitted
278 * FINs (or previous seqments) are lost (probability of such event
279 * is p^(N+1), where p is probability to lose single packet and
280 * time to detect the loss is about RTO*(2^N - 1) with exponential
281 * backoff). Normal timewait length is calculated so, that we
282 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
283 * [ BTW Linux. following BSD, violates this requirement waiting
284 * only for 60sec, we should wait at least for 240 secs.
285 * Well, 240 consumes too much of resources 8)
287 * This interval is not reduced to catch old duplicate and
288 * responces to our wandering segments living for two MSLs.
289 * However, if we use PAWS to detect
290 * old duplicates, we can reduce the interval to bounds required
291 * by RTO, rather than MSL. So, if peer understands PAWS, we
292 * kill tw bucket after 3.5*RTO (it is important that this number
293 * is greater than TS tick!) and detect old duplicates with help
296 slot
= (timeo
+ (1 << INET_TWDR_RECYCLE_TICK
) - 1) >> INET_TWDR_RECYCLE_TICK
;
298 spin_lock(&twdr
->death_lock
);
300 /* Unlink it, if it was scheduled */
301 if (inet_twsk_del_dead_node(tw
))
304 atomic_inc(&tw
->tw_refcnt
);
306 if (slot
>= INET_TWDR_RECYCLE_SLOTS
) {
307 /* Schedule to slow timer */
308 if (timeo
>= timewait_len
) {
309 slot
= INET_TWDR_TWKILL_SLOTS
- 1;
311 slot
= DIV_ROUND_UP(timeo
, twdr
->period
);
312 if (slot
>= INET_TWDR_TWKILL_SLOTS
)
313 slot
= INET_TWDR_TWKILL_SLOTS
- 1;
315 tw
->tw_ttd
= jiffies
+ timeo
;
316 slot
= (twdr
->slot
+ slot
) & (INET_TWDR_TWKILL_SLOTS
- 1);
317 list
= &twdr
->cells
[slot
];
319 tw
->tw_ttd
= jiffies
+ (slot
<< INET_TWDR_RECYCLE_TICK
);
321 if (twdr
->twcal_hand
< 0) {
322 twdr
->twcal_hand
= 0;
323 twdr
->twcal_jiffie
= jiffies
;
324 twdr
->twcal_timer
.expires
= twdr
->twcal_jiffie
+
325 (slot
<< INET_TWDR_RECYCLE_TICK
);
326 add_timer(&twdr
->twcal_timer
);
328 if (time_after(twdr
->twcal_timer
.expires
,
329 jiffies
+ (slot
<< INET_TWDR_RECYCLE_TICK
)))
330 mod_timer(&twdr
->twcal_timer
,
331 jiffies
+ (slot
<< INET_TWDR_RECYCLE_TICK
));
332 slot
= (twdr
->twcal_hand
+ slot
) & (INET_TWDR_RECYCLE_SLOTS
- 1);
334 list
= &twdr
->twcal_row
[slot
];
337 hlist_add_head(&tw
->tw_death_node
, list
);
339 if (twdr
->tw_count
++ == 0)
340 mod_timer(&twdr
->tw_timer
, jiffies
+ twdr
->period
);
341 spin_unlock(&twdr
->death_lock
);
344 EXPORT_SYMBOL_GPL(inet_twsk_schedule
);
346 void inet_twdr_twcal_tick(unsigned long data
)
348 struct inet_timewait_death_row
*twdr
;
351 unsigned long now
= jiffies
;
355 twdr
= (struct inet_timewait_death_row
*)data
;
357 spin_lock(&twdr
->death_lock
);
358 if (twdr
->twcal_hand
< 0)
361 slot
= twdr
->twcal_hand
;
362 j
= twdr
->twcal_jiffie
;
364 for (n
= 0; n
< INET_TWDR_RECYCLE_SLOTS
; n
++) {
365 if (time_before_eq(j
, now
)) {
366 struct hlist_node
*node
, *safe
;
367 struct inet_timewait_sock
*tw
;
369 inet_twsk_for_each_inmate_safe(tw
, node
, safe
,
370 &twdr
->twcal_row
[slot
]) {
371 __inet_twsk_del_dead_node(tw
);
372 __inet_twsk_kill(tw
, twdr
->hashinfo
);
379 twdr
->twcal_jiffie
= j
;
380 twdr
->twcal_hand
= slot
;
383 if (!hlist_empty(&twdr
->twcal_row
[slot
])) {
384 mod_timer(&twdr
->twcal_timer
, j
);
388 j
+= 1 << INET_TWDR_RECYCLE_TICK
;
389 slot
= (slot
+ 1) & (INET_TWDR_RECYCLE_SLOTS
- 1);
391 twdr
->twcal_hand
= -1;
394 if ((twdr
->tw_count
-= killed
) == 0)
395 del_timer(&twdr
->tw_timer
);
396 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED
, killed
);
397 spin_unlock(&twdr
->death_lock
);
400 EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick
);