[PATCH] w1: Userspace communication protocol over connector.
[linux-2.6/verdex.git] / net / ipv4 / inet_timewait_sock.c
blob417f126c749e0539d335ee7c3e1c3a35a516a124
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Generic TIME_WAIT sockets functions
8 * From code orinally in TCP
9 */
11 #include <linux/config.h>
13 #include <net/inet_hashtables.h>
14 #include <net/inet_timewait_sock.h>
15 #include <net/ip.h>
17 /* Must be called with locally disabled BHs. */
18 void __inet_twsk_kill(struct inet_timewait_sock *tw, struct inet_hashinfo *hashinfo)
20 struct inet_bind_hashbucket *bhead;
21 struct inet_bind_bucket *tb;
22 /* Unlink from established hashes. */
23 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, tw->tw_hash);
25 write_lock(&ehead->lock);
26 if (hlist_unhashed(&tw->tw_node)) {
27 write_unlock(&ehead->lock);
28 return;
30 __hlist_del(&tw->tw_node);
31 sk_node_init(&tw->tw_node);
32 write_unlock(&ehead->lock);
34 /* Disassociate with bind bucket. */
35 bhead = &hashinfo->bhash[inet_bhashfn(tw->tw_num, hashinfo->bhash_size)];
36 spin_lock(&bhead->lock);
37 tb = tw->tw_tb;
38 __hlist_del(&tw->tw_bind_node);
39 tw->tw_tb = NULL;
40 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
41 spin_unlock(&bhead->lock);
42 #ifdef SOCK_REFCNT_DEBUG
43 if (atomic_read(&tw->tw_refcnt) != 1) {
44 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",
45 tw->tw_prot->name, tw, atomic_read(&tw->tw_refcnt));
47 #endif
48 inet_twsk_put(tw);
51 EXPORT_SYMBOL_GPL(__inet_twsk_kill);
54 * Enter the time wait state. This is called with locally disabled BH.
55 * Essentially we whip up a timewait bucket, copy the relevant info into it
56 * from the SK, and mess with hash chains and list linkage.
58 void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
59 struct inet_hashinfo *hashinfo)
61 const struct inet_sock *inet = inet_sk(sk);
62 const struct inet_connection_sock *icsk = inet_csk(sk);
63 struct inet_ehash_bucket *ehead = inet_ehash_bucket(hashinfo, sk->sk_hash);
64 struct inet_bind_hashbucket *bhead;
65 /* Step 1: Put TW into bind hash. Original socket stays there too.
66 Note, that any socket with inet->num != 0 MUST be bound in
67 binding cache, even if it is closed.
69 bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)];
70 spin_lock(&bhead->lock);
71 tw->tw_tb = icsk->icsk_bind_hash;
72 BUG_TRAP(icsk->icsk_bind_hash);
73 inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
74 spin_unlock(&bhead->lock);
76 write_lock(&ehead->lock);
78 /* Step 2: Remove SK from established hash. */
79 if (__sk_del_node_init(sk))
80 sock_prot_dec_use(sk->sk_prot);
82 /* Step 3: Hash TW into TIMEWAIT half of established hash table. */
83 inet_twsk_add_node(tw, &(ehead + hashinfo->ehash_size)->chain);
84 atomic_inc(&tw->tw_refcnt);
86 write_unlock(&ehead->lock);
89 EXPORT_SYMBOL_GPL(__inet_twsk_hashdance);
91 struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int state)
93 struct inet_timewait_sock *tw =
94 kmem_cache_alloc(sk->sk_prot_creator->twsk_prot->twsk_slab,
95 SLAB_ATOMIC);
96 if (tw != NULL) {
97 const struct inet_sock *inet = inet_sk(sk);
99 /* Give us an identity. */
100 tw->tw_daddr = inet->daddr;
101 tw->tw_rcv_saddr = inet->rcv_saddr;
102 tw->tw_bound_dev_if = sk->sk_bound_dev_if;
103 tw->tw_num = inet->num;
104 tw->tw_state = TCP_TIME_WAIT;
105 tw->tw_substate = state;
106 tw->tw_sport = inet->sport;
107 tw->tw_dport = inet->dport;
108 tw->tw_family = sk->sk_family;
109 tw->tw_reuse = sk->sk_reuse;
110 tw->tw_hash = sk->sk_hash;
111 tw->tw_ipv6only = 0;
112 tw->tw_prot = sk->sk_prot_creator;
113 atomic_set(&tw->tw_refcnt, 1);
114 inet_twsk_dead_node_init(tw);
115 __module_get(tw->tw_prot->owner);
118 return tw;
121 EXPORT_SYMBOL_GPL(inet_twsk_alloc);
123 /* Returns non-zero if quota exceeded. */
124 static int inet_twdr_do_twkill_work(struct inet_timewait_death_row *twdr,
125 const int slot)
127 struct inet_timewait_sock *tw;
128 struct hlist_node *node;
129 unsigned int killed;
130 int ret;
132 /* NOTE: compare this to previous version where lock
133 * was released after detaching chain. It was racy,
134 * because tw buckets are scheduled in not serialized context
135 * in 2.3 (with netfilter), and with softnet it is common, because
136 * soft irqs are not sequenced.
138 killed = 0;
139 ret = 0;
140 rescan:
141 inet_twsk_for_each_inmate(tw, node, &twdr->cells[slot]) {
142 __inet_twsk_del_dead_node(tw);
143 spin_unlock(&twdr->death_lock);
144 __inet_twsk_kill(tw, twdr->hashinfo);
145 inet_twsk_put(tw);
146 killed++;
147 spin_lock(&twdr->death_lock);
148 if (killed > INET_TWDR_TWKILL_QUOTA) {
149 ret = 1;
150 break;
153 /* While we dropped twdr->death_lock, another cpu may have
154 * killed off the next TW bucket in the list, therefore
155 * do a fresh re-read of the hlist head node with the
156 * lock reacquired. We still use the hlist traversal
157 * macro in order to get the prefetches.
159 goto rescan;
162 twdr->tw_count -= killed;
163 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITED, killed);
165 return ret;
168 void inet_twdr_hangman(unsigned long data)
170 struct inet_timewait_death_row *twdr;
171 int unsigned need_timer;
173 twdr = (struct inet_timewait_death_row *)data;
174 spin_lock(&twdr->death_lock);
176 if (twdr->tw_count == 0)
177 goto out;
179 need_timer = 0;
180 if (inet_twdr_do_twkill_work(twdr, twdr->slot)) {
181 twdr->thread_slots |= (1 << twdr->slot);
182 mb();
183 schedule_work(&twdr->twkill_work);
184 need_timer = 1;
185 } else {
186 /* We purged the entire slot, anything left? */
187 if (twdr->tw_count)
188 need_timer = 1;
190 twdr->slot = ((twdr->slot + 1) & (INET_TWDR_TWKILL_SLOTS - 1));
191 if (need_timer)
192 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
193 out:
194 spin_unlock(&twdr->death_lock);
197 EXPORT_SYMBOL_GPL(inet_twdr_hangman);
199 extern void twkill_slots_invalid(void);
201 void inet_twdr_twkill_work(void *data)
203 struct inet_timewait_death_row *twdr = data;
204 int i;
206 if ((INET_TWDR_TWKILL_SLOTS - 1) > (sizeof(twdr->thread_slots) * 8))
207 twkill_slots_invalid();
209 while (twdr->thread_slots) {
210 spin_lock_bh(&twdr->death_lock);
211 for (i = 0; i < INET_TWDR_TWKILL_SLOTS; i++) {
212 if (!(twdr->thread_slots & (1 << i)))
213 continue;
215 while (inet_twdr_do_twkill_work(twdr, i) != 0) {
216 if (need_resched()) {
217 spin_unlock_bh(&twdr->death_lock);
218 schedule();
219 spin_lock_bh(&twdr->death_lock);
223 twdr->thread_slots &= ~(1 << i);
225 spin_unlock_bh(&twdr->death_lock);
229 EXPORT_SYMBOL_GPL(inet_twdr_twkill_work);
231 /* These are always called from BH context. See callers in
232 * tcp_input.c to verify this.
235 /* This is for handling early-kills of TIME_WAIT sockets. */
236 void inet_twsk_deschedule(struct inet_timewait_sock *tw,
237 struct inet_timewait_death_row *twdr)
239 spin_lock(&twdr->death_lock);
240 if (inet_twsk_del_dead_node(tw)) {
241 inet_twsk_put(tw);
242 if (--twdr->tw_count == 0)
243 del_timer(&twdr->tw_timer);
245 spin_unlock(&twdr->death_lock);
246 __inet_twsk_kill(tw, twdr->hashinfo);
249 EXPORT_SYMBOL(inet_twsk_deschedule);
251 void inet_twsk_schedule(struct inet_timewait_sock *tw,
252 struct inet_timewait_death_row *twdr,
253 const int timeo, const int timewait_len)
255 struct hlist_head *list;
256 int slot;
258 /* timeout := RTO * 3.5
260 * 3.5 = 1+2+0.5 to wait for two retransmits.
262 * RATIONALE: if FIN arrived and we entered TIME-WAIT state,
263 * our ACK acking that FIN can be lost. If N subsequent retransmitted
264 * FINs (or previous seqments) are lost (probability of such event
265 * is p^(N+1), where p is probability to lose single packet and
266 * time to detect the loss is about RTO*(2^N - 1) with exponential
267 * backoff). Normal timewait length is calculated so, that we
268 * waited at least for one retransmitted FIN (maximal RTO is 120sec).
269 * [ BTW Linux. following BSD, violates this requirement waiting
270 * only for 60sec, we should wait at least for 240 secs.
271 * Well, 240 consumes too much of resources 8)
273 * This interval is not reduced to catch old duplicate and
274 * responces to our wandering segments living for two MSLs.
275 * However, if we use PAWS to detect
276 * old duplicates, we can reduce the interval to bounds required
277 * by RTO, rather than MSL. So, if peer understands PAWS, we
278 * kill tw bucket after 3.5*RTO (it is important that this number
279 * is greater than TS tick!) and detect old duplicates with help
280 * of PAWS.
282 slot = (timeo + (1 << INET_TWDR_RECYCLE_TICK) - 1) >> INET_TWDR_RECYCLE_TICK;
284 spin_lock(&twdr->death_lock);
286 /* Unlink it, if it was scheduled */
287 if (inet_twsk_del_dead_node(tw))
288 twdr->tw_count--;
289 else
290 atomic_inc(&tw->tw_refcnt);
292 if (slot >= INET_TWDR_RECYCLE_SLOTS) {
293 /* Schedule to slow timer */
294 if (timeo >= timewait_len) {
295 slot = INET_TWDR_TWKILL_SLOTS - 1;
296 } else {
297 slot = (timeo + twdr->period - 1) / twdr->period;
298 if (slot >= INET_TWDR_TWKILL_SLOTS)
299 slot = INET_TWDR_TWKILL_SLOTS - 1;
301 tw->tw_ttd = jiffies + timeo;
302 slot = (twdr->slot + slot) & (INET_TWDR_TWKILL_SLOTS - 1);
303 list = &twdr->cells[slot];
304 } else {
305 tw->tw_ttd = jiffies + (slot << INET_TWDR_RECYCLE_TICK);
307 if (twdr->twcal_hand < 0) {
308 twdr->twcal_hand = 0;
309 twdr->twcal_jiffie = jiffies;
310 twdr->twcal_timer.expires = twdr->twcal_jiffie +
311 (slot << INET_TWDR_RECYCLE_TICK);
312 add_timer(&twdr->twcal_timer);
313 } else {
314 if (time_after(twdr->twcal_timer.expires,
315 jiffies + (slot << INET_TWDR_RECYCLE_TICK)))
316 mod_timer(&twdr->twcal_timer,
317 jiffies + (slot << INET_TWDR_RECYCLE_TICK));
318 slot = (twdr->twcal_hand + slot) & (INET_TWDR_RECYCLE_SLOTS - 1);
320 list = &twdr->twcal_row[slot];
323 hlist_add_head(&tw->tw_death_node, list);
325 if (twdr->tw_count++ == 0)
326 mod_timer(&twdr->tw_timer, jiffies + twdr->period);
327 spin_unlock(&twdr->death_lock);
330 EXPORT_SYMBOL_GPL(inet_twsk_schedule);
332 void inet_twdr_twcal_tick(unsigned long data)
334 struct inet_timewait_death_row *twdr;
335 int n, slot;
336 unsigned long j;
337 unsigned long now = jiffies;
338 int killed = 0;
339 int adv = 0;
341 twdr = (struct inet_timewait_death_row *)data;
343 spin_lock(&twdr->death_lock);
344 if (twdr->twcal_hand < 0)
345 goto out;
347 slot = twdr->twcal_hand;
348 j = twdr->twcal_jiffie;
350 for (n = 0; n < INET_TWDR_RECYCLE_SLOTS; n++) {
351 if (time_before_eq(j, now)) {
352 struct hlist_node *node, *safe;
353 struct inet_timewait_sock *tw;
355 inet_twsk_for_each_inmate_safe(tw, node, safe,
356 &twdr->twcal_row[slot]) {
357 __inet_twsk_del_dead_node(tw);
358 __inet_twsk_kill(tw, twdr->hashinfo);
359 inet_twsk_put(tw);
360 killed++;
362 } else {
363 if (!adv) {
364 adv = 1;
365 twdr->twcal_jiffie = j;
366 twdr->twcal_hand = slot;
369 if (!hlist_empty(&twdr->twcal_row[slot])) {
370 mod_timer(&twdr->twcal_timer, j);
371 goto out;
374 j += 1 << INET_TWDR_RECYCLE_TICK;
375 slot = (slot + 1) & (INET_TWDR_RECYCLE_SLOTS - 1);
377 twdr->twcal_hand = -1;
379 out:
380 if ((twdr->tw_count -= killed) == 0)
381 del_timer(&twdr->tw_timer);
382 NET_ADD_STATS_BH(LINUX_MIB_TIMEWAITKILLED, killed);
383 spin_unlock(&twdr->death_lock);
386 EXPORT_SYMBOL_GPL(inet_twdr_twcal_tick);