new retransmit
[cor_2_6_31.git] / net / cor / snd.c
blob770be7a8872f622e08fca35c21f6e079a78865af
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
25 #include "cor.h"
27 struct kmem_cache *connretrans_slab;
29 struct conn_retrans {
30 /* timeout_list and conn_list share a single ref */
31 struct kref ref;
32 struct list_head timeout_list;
33 struct list_head conn_list;
34 struct htab_entry htab_entry;
35 struct conn *rconn;
36 __u32 seqno;
37 __u32 length;
38 unsigned long timeout;
41 static void free_connretrans(struct kref *ref)
43 struct conn_retrans *cr = container_of(ref, struct conn_retrans, ref);
44 kmem_cache_free(connretrans_slab, cr);
45 kref_put(&(cr->rconn->ref), free_conn);
48 static struct htable retransmits_conn;
50 struct retransmit_matchparam {
51 struct neighbor *nb;
52 __u32 conn_id;
53 __u32 seqno;
56 static __u32 rm_to_key(struct retransmit_matchparam *rm)
58 return rm->conn_id ^ rm->seqno;
61 static void htable_insert_connretrans(struct conn_retrans *cr)
63 struct retransmit_matchparam rm;
65 rm.conn_id = cr->rconn->target.out.conn_id;
66 rm.seqno = cr->seqno;
67 rm.nb = cr->rconn->target.out.nb;
69 htable_insert(&retransmits_conn, (char *) cr, rm_to_key(&rm));
72 static int htable_delete_connretrans(struct conn_retrans *cr)
74 struct retransmit_matchparam rm;
76 rm.conn_id = cr->rconn->target.out.conn_id;
77 rm.seqno = cr->seqno;
78 rm.nb = cr->rconn->target.out.nb;
80 return htable_delete(&retransmits_conn, rm_to_key(&rm), &rm,
81 free_connretrans);
84 /* static struct sk_buff * cor_dequeue(struct Qdisc *sch)
86 struct sk_buff *ret;
88 struct cor_sched_data *q = qdisc_priv(sch);
90 struct list_head *ln = q->conn_list.next;
91 struct conn *best = 0;
93 __u64 currcost_limit = 0;
94 __u64 currcost = 0;
96 spin_lock(&(q->lock));
98 if (!(skb_queue_empty(&(q->requeue_queue)))) {
99 ret = __skb_dequeue(&(q->requeue_queue));
100 goto out;
103 while (&(q->conn_list) != ln) {
104 __u32 max1, max2, maxcost;
105 struct conn *curr = (struct conn *)
106 (((char *) ln) - offsetof(struct conn,
107 target.out.queue_list));
109 BUG_ON(TARGET_OUT != curr->targettype);
110 max1 = (256 * ((__u64)curr->credits)) /
111 ((__u64)curr->bytes_queued + curr->avg_rate);
113 max2 = (256 * ((__u64)curr->credits +
114 curr->credit_sender - curr->credit_recp)) /
115 ((__u64)curr->bytes_queued + 2*curr->avg_rate);
117 maxcost = max((__u32) 0, min((max1), (max2)));
119 if (maxcost > currcost_limit) {
120 currcost = currcost_limit;
121 currcost_limit = maxcost;
122 best = curr;
125 ln = ln->next;
128 best->credits -= currcost;
130 ret = __skb_dequeue(&(best->target.out.queue));
132 if (skb_queue_empty(&(best->target.out.queue))) {
133 list_del(&(best->target.out.queue_list));
134 best->target.out.qdisc_active = 0;
137 out:
138 spin_unlock(&(q->lock));
140 if (likely(0 != ret)) {
141 sch->qstats.backlog -= ret->len;
142 sch->q.qlen--;
145 return ret;
148 static int cor_enqueue(struct sk_buff *skb, struct Qdisc *sch)
150 struct cor_sched_data *q = qdisc_priv(sch);
151 struct conn *rconn;
153 rconn = skb_pstate(skb)->rconn;
155 BUG_ON(TARGET_OUT != rconn->targettype);
157 spin_lock(&(rconn->target.out.qdisc_lock));
159 __skb_queue_tail(&(rconn->target.out.queue), skb);
161 if (unlikely(0 == rconn->target.out.qdisc_active)) {
162 spin_lock(&(q->lock));
163 list_add(&(rconn->target.out.queue_list), &(q->conn_list));
164 rconn->target.out.qdisc_active = 1;
165 spin_unlock(&(q->lock));
168 spin_unlock(&(rconn->target.out.qdisc_lock));
170 sch->bstats.bytes += skb->len;
171 sch->bstats.packets++;
172 sch->q.qlen++;
174 return NET_XMIT_SUCCESS;
175 } */
177 static struct sk_buff *create_packet(struct neighbor *nb, int size,
178 gfp_t alloc_flags, __u32 conn_id, __u32 seqno)
180 struct sk_buff *ret;
181 char *dest;
183 ret = alloc_skb(size + 9 + LL_ALLOCATED_SPACE(nb->dev), alloc_flags);
184 if (unlikely(0 == ret))
185 return 0;
187 ret->protocol = htons(ETH_P_COR);
188 ret->dev = nb->dev;
190 skb_reserve(ret, LL_RESERVED_SPACE(nb->dev));
191 if(unlikely(dev_hard_header(ret, nb->dev, ETH_P_COR, nb->mac,
192 nb->dev->dev_addr, ret->len) < 0))
193 return 0;
194 skb_reset_network_header(ret);
196 dest = skb_put(ret, 9);
197 BUG_ON(0 == dest);
199 dest[0] = PACKET_TYPE_DATA;
200 dest += 1;
202 put_u32(dest, conn_id, 1);
203 dest += 4;
204 put_u32(dest, seqno, 1);
205 dest += 4;
207 return ret;
210 struct sk_buff *create_packet_kernel(struct neighbor *nb, int size,
211 gfp_t alloc_flags)
213 __u32 seqno = atomic_add_return(1, &(nb->kpacket_seqno));
214 struct sk_buff *skb = create_packet(nb, size, alloc_flags, 0, seqno);
216 struct skb_procstate *ps = skb_pstate(skb);
217 memset(&(ps->funcstate.retransmit_queue), 0,
218 sizeof(ps->funcstate.retransmit_queue));
219 ps->funcstate.retransmit_queue.conn_id = 0;
220 ps->funcstate.retransmit_queue.seqno = seqno;
221 ps->funcstate.retransmit_queue.nb = nb;
222 /* nb_ref is in schedule_retransmit */
223 return skb;
226 static void set_conn_retrans_timeout(struct conn_retrans *cr)
228 cr->timeout = jiffies + msecs_to_jiffies(300 + ((__u32) atomic_read(
229 &(cr->rconn->target.out.nb->latency)))/1000);
232 static struct conn_retrans *readd_conn_retrans(struct conn_retrans *cr,
233 struct neighbor *nb, __u32 length)
235 unsigned long iflags;
237 struct conn_retrans *ret = 0;
239 if (unlikely(cr->length > length)) {
240 ret = kmem_cache_alloc(connretrans_slab, GFP_KERNEL);
241 if (unlikely(ret == 0)) {
242 cr->timeout = jiffies + 1;
243 return 0;
246 memset(ret, 0, sizeof (struct conn_retrans));
247 ret->rconn = cr->rconn;
248 kref_get(&(cr->rconn->ref));
249 ret->seqno = cr->seqno + length;
250 ret->length = cr->length - length;
251 kref_init(&(ret->ref));
254 spin_lock_irqsave( &(nb->retrans_lock), iflags );
256 if (ret != 0) {
257 htable_insert_connretrans(ret);
258 list_add(&(ret->timeout_list), &(nb->retrans_list_conn));
259 list_add(&(ret->conn_list), &(cr->conn_list));
261 cr->length = length;
262 } else {
263 list_del(&(cr->timeout_list));
264 list_add_tail(&(cr->timeout_list), &(nb->retrans_list_conn));
265 set_conn_retrans_timeout(cr);
267 BUG_ON(cr->length != length);
270 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
272 return ret;
275 static void send_retrans(struct neighbor *nb, struct conn_retrans *cr)
277 int targetmss = mss(nb);
279 __u32 seqno = cr->seqno;
280 __u32 length = cr->length;
282 mutex_lock(&(cr->rconn->rcv_lock));
284 while (cr->length >= targetmss) {
285 struct sk_buff *skb;
286 char *dst;
287 skb = create_packet(nb, targetmss, GFP_KERNEL,
288 cr->rconn->target.out.conn_id, seqno);
289 if (unlikely(skb == 0)) {
290 cr->timeout = jiffies + 1;
291 goto out;
294 cr = readd_conn_retrans(cr, nb, targetmss);
295 if (unlikely(cr == 0 && unlikely(cr->length > targetmss))) {
296 kfree_skb(skb);
297 goto out;
300 dst = skb_put(skb, targetmss);
302 databuf_pullold(&(cr->rconn->buf), cr->seqno, dst, targetmss);
303 dev_queue_xmit(skb);
305 seqno += targetmss;
306 length -= targetmss;
309 if (cr->length > 0) {
310 struct control_msg_out *cm;
311 char *buf = kmalloc(cr->length, GFP_KERNEL);
313 if (unlikely(buf == 0)) {
314 cr->timeout = jiffies + 1;
315 goto out;
318 databuf_pullold(&(cr->rconn->buf), cr->seqno, buf, cr->length);
320 cm = alloc_control_msg();
321 if (unlikely(cm == 0)) {
322 cr->timeout = jiffies + 1;
323 kfree(buf);
324 goto out;
327 if (unlikely(readd_conn_retrans(cr, nb, cr->length) != 0))
328 BUG();
330 send_conndata(cm, nb, cr->rconn->target.out.conn_id, cr->seqno,
331 buf, buf, cr->length);
334 out:
335 mutex_unlock(&(cr->rconn->rcv_lock));
338 void retransmit_conn_timerfunc(struct work_struct *work)
340 unsigned long iflags;
342 struct neighbor *nb = container_of(to_delayed_work(work),
343 struct neighbor, retrans_timer_conn);
345 struct conn_retrans *cr = 0;
347 int nbstate;
348 int nbput = 0;
350 spin_lock_irqsave( &(nb->state_lock), iflags );
351 nbstate = nb->state;
352 spin_unlock_irqrestore( &(nb->state_lock), iflags );
354 while (1) {
355 spin_lock_irqsave( &(nb->retrans_lock), iflags );
357 if (list_empty(&(nb->retrans_list_conn))) {
358 nb->retrans_timer_conn_running = 0;
359 nbput = 1;
360 break;
363 cr = container_of(nb->retrans_list_conn.next,
364 struct conn_retrans, timeout_list);
366 BUG_ON(cr->rconn->targettype != TARGET_OUT);
368 if (unlikely(unlikely(nbstate == NEIGHBOR_STATE_KILLED) ||
369 unlikely(atomic_read(
370 &(cr->rconn->isreset)) != 0))) {
371 htable_delete_connretrans(cr);
373 list_del(&(cr->timeout_list));
374 list_del(&(cr->conn_list));
375 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
377 kref_put(&(cr->ref), free_connretrans);
378 continue;
381 BUG_ON(nb != cr->rconn->target.out.nb);
383 if (time_after(cr->timeout, jiffies)) {
384 mod_timer(&(nb->retrans_timer), cr->timeout);
385 break;
388 kref_get(&(cr->ref));
389 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
390 send_retrans(nb, cr);
391 kref_put(&(cr->ref), free_connretrans);
394 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
396 if (nbput)
397 kref_put(&(nb->ref), neighbor_free);
400 void conn_ack_rcvd(struct neighbor *nb, __u32 conn_id, __u32 seqno,
401 __u32 length)
403 unsigned long iflags;
404 struct retransmit_matchparam rm;
405 struct conn_retrans *cr;
407 #warning todo length
409 rm.conn_id = conn_id;
410 rm.seqno = seqno;
411 rm.nb = nb;
413 spin_lock_irqsave( &(nb->retrans_lock), iflags );
415 cr = (struct conn_retrans *) htable_get(&retransmits_conn,
416 rm_to_key(&rm), &rm);
418 if (cr == 0) {
419 printk(KERN_ERR "bogus/duplicate ack received");
420 goto out_err;
423 BUG_ON(cr->rconn->targettype != TARGET_OUT);
425 if (cr->rconn->target.out.nb != nb) {
426 printk(KERN_ERR "invalid neigh when receiving ack");
427 goto out_err;
430 if (htable_delete_connretrans(cr))
431 BUG();
433 list_del(&(cr->timeout_list));
434 list_del(&(cr->conn_list));
435 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
437 kref_put(&(cr->ref), free_connretrans);
439 return;
441 out_err:
442 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
445 static int schedule_retransmit_conn(struct conn *rconn, __u32 seqno, __u32 len)
447 unsigned long iflags;
449 struct neighbor *nb = rconn->target.out.nb;
451 struct conn_retrans *cr;
452 int first;
454 BUG_ON(rconn->targettype != TARGET_OUT);
456 cr = kmem_cache_alloc(connretrans_slab, GFP_KERNEL);
457 if (unlikely(cr == 0))
458 return 1;
459 memset(cr, 0, sizeof (struct conn_retrans));
460 cr->rconn = rconn;
461 kref_get(&(rconn->ref));
462 cr->seqno = seqno;
463 cr->length = len;
464 kref_init(&(cr->ref));
465 set_conn_retrans_timeout(cr);
467 spin_lock_irqsave( &(nb->retrans_lock), iflags );
469 htable_insert_connretrans(cr);
471 first = unlikely(list_empty(&(nb->retrans_list_conn)));
472 list_add_tail(&(cr->timeout_list), &(nb->retrans_list_conn));
474 list_add_tail(&(cr->conn_list), &(rconn->target.out.retrans_list));
476 if (unlikely(unlikely(first) &&
477 unlikely(nb->retrans_timer_conn_running == 0))) {
478 __u32 delay = cr->timeout - jiffies;
479 schedule_delayed_work(&(nb->retrans_timer_conn), delay);
480 nb->retrans_timer_conn_running = 1;
481 kref_get(&(nb->ref));
484 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
485 return 0;
488 void flush_out(struct conn *rconn)
490 int targetmss = mss(rconn->target.out.nb);
491 __u32 seqno;
493 BUG_ON(rconn->targettype != TARGET_OUT);
495 if (unlikely(rconn->target.out.conn_id == 0))
496 return;
498 while (rconn->buf.read_remaining >= targetmss) {
499 struct sk_buff *skb;
500 char *dst;
501 seqno = rconn->target.out.seqno;
502 skb = create_packet(rconn->target.out.nb, targetmss, GFP_ATOMIC,
503 rconn->target.out.conn_id, seqno);
504 if (unlikely(skb == 0))
505 goto oom;
507 dst = skb_put(skb, targetmss);
509 databuf_pull(&(rconn->buf), dst, targetmss);
511 if (unlikely(schedule_retransmit_conn(rconn, seqno,
512 targetmss))) {
513 kfree_skb(skb);
514 goto oom;
516 rconn->target.out.seqno += targetmss;
517 dev_queue_xmit(skb);
520 if (rconn->buf.read_remaining > 0) {
521 struct control_msg_out *cm;
522 __u32 len = rconn->buf.read_remaining;
523 char *buf = kmalloc(len, GFP_KERNEL);
525 if (unlikely(buf == 0))
526 goto oom;
528 databuf_pull(&(rconn->buf), buf, len);
530 cm = alloc_control_msg();
531 if (unlikely(cm == 0)) {
532 kfree(buf);
533 goto oom;
536 seqno = rconn->target.out.seqno;
537 if (schedule_retransmit_conn(rconn, seqno, len)) {
538 free_control_msg(cm);
539 goto oom;
542 rconn->target.out.seqno += len;
544 send_conndata(cm, rconn->target.out.nb,
545 rconn->target.out.conn_id, seqno, buf, buf,
546 len);
549 wake_sender(rconn);
551 if (0) {
552 oom:
553 #warning todo flush later
558 static int matches_connretrans_connid_seqno(void *htentry, void *searcheditem)
560 struct conn_retrans *cr = (struct conn_retrans *) htentry;
561 struct retransmit_matchparam *rm = (struct retransmit_matchparam *)
562 searcheditem;
563 return rm->conn_id == cr->rconn->target.out.conn_id &&
564 rm->seqno == cr->seqno &&
565 rm->nb == cr->rconn->target.out.nb;
568 int __init cor_snd_init(void)
570 connretrans_slab = kmem_cache_create("cor_connretrans",
571 sizeof(struct conn_retrans), 8, 0, 0);
572 htable_init(&retransmits_conn, matches_connretrans_connid_seqno,
573 offsetof(struct conn_retrans, htab_entry),
574 offsetof(struct conn_retrans, ref));
576 return 0;
579 MODULE_LICENSE("GPL");