qos fixes, reset bufferusage on conn reset, conn refcnt fix
[cor_2_6_31.git] / net / cor / snd.c
blob3850164f24d2c0b08661c9ec8b94fd95f95c6ac6
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2010 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
25 #include "cor.h"
27 struct kmem_cache *connretrans_slab;
29 struct conn_retrans {
30 /* timeout_list and conn_list share a single ref */
31 struct kref ref;
32 struct list_head timeout_list;
33 struct list_head conn_list;
34 struct htab_entry htab_entry;
35 struct conn *rconn;
36 __u32 seqno;
37 __u32 length;
38 __u8 ackrcvd;
39 unsigned long timeout;
42 static void free_connretrans(struct kref *ref)
44 struct conn_retrans *cr = container_of(ref, struct conn_retrans, ref);
45 kmem_cache_free(connretrans_slab, cr);
46 kref_put(&(cr->rconn->ref), free_conn);
49 DEFINE_MUTEX(queues_lock);
50 LIST_HEAD(queues);
51 struct delayed_work qos_resume_work;
52 int qos_resume_scheduled;
54 struct qos_queue {
55 struct list_head queue_list;
57 struct net_device *dev;
59 struct list_head kpackets_waiting;
60 struct list_head conn_retrans_waiting;
61 struct list_head announce_waiting;
62 struct list_head conns_waiting;
65 static int _flush_out(struct conn *rconn, int fromqos, __u32 creditsperbyte);
67 /* Higherst bidder "pays" the credits the second has bid */
68 static int _resume_conns(struct qos_queue *q)
70 struct conn *best = 0;
71 __u64 bestcredit = 0;
72 __u64 secondcredit = 0;
74 int rc;
76 struct list_head *lh = q->conns_waiting.next;
78 while (lh != &(q->conns_waiting)) {
79 struct conn *rconn = container_of(lh, struct conn,
80 target.out.rb.lh);
81 __u64 credits;
83 refresh_conn_credits(rconn);
85 mutex_lock(&(rconn->rcv_lock));
87 BUG_ON(rconn->targettype != TARGET_OUT);
89 if (atomic_read(&(rconn->isreset)) != 0) {
90 lh = lh->next;
92 rconn->target.out.rb.in_queue = 0;
93 list_del(&(rconn->target.out.rb.lh));
94 mutex_unlock(&(rconn->rcv_lock));
95 kref_put(&(rconn->ref), free_conn);
97 continue;
100 BUG_ON(rconn->buf.read_remaining == 0);
102 if (rconn->credits <= 0)
103 credits = 0;
104 else
105 credits = multiply_div(rconn->credits,
106 ((__u64) 1) << 32,
107 rconn->buf.read_remaining);
108 mutex_unlock(&(rconn->rcv_lock));
110 if (best == 0 || bestcredit < credits) {
111 secondcredit = bestcredit;
112 best = rconn;
113 bestcredit = credits;
114 } else if (secondcredit < credits) {
115 secondcredit = credits;
118 lh = lh->next;
121 if (best == 0)
122 return RC_FLUSH_CONN_OUT_OK;
124 mutex_lock(&(best->rcv_lock));
125 rc = _flush_out(best, 1, (__u32) (secondcredit >> 32));
127 if (rc == RC_FLUSH_CONN_OUT_OK) {
128 best->target.out.rb.in_queue = 0;
129 list_del(&(best->target.out.rb.lh));
131 mutex_unlock(&(best->rcv_lock));
133 if (rc == RC_FLUSH_CONN_OUT_OK)
134 kref_put(&(best->ref), free_conn);
136 return rc;
139 static int resume_conns(struct qos_queue *q)
141 while (list_empty(&(q->conns_waiting)) == 0) {
142 int rc = _resume_conns(q);
143 if (rc != 0)
144 return rc;
146 return 0;
149 static int send_retrans(struct neighbor *nb, int fromqos);
151 static int _qos_resume(struct qos_queue *q, int caller)
153 int rc = 0;
155 struct list_head *lh;
157 if (caller == QOS_CALLER_KPACKET)
158 lh = &(q->conn_retrans_waiting);
159 else if (caller == QOS_CALLER_CONN_RETRANS)
160 lh = &(q->kpackets_waiting);
161 else if (caller == QOS_CALLER_ANNOUNCE)
162 lh = &(q->announce_waiting);
163 else
164 BUG();
166 while (list_empty(lh) == 0) {
167 struct list_head *curr = lh->next;
168 struct resume_block *rb = container_of(curr,
169 struct resume_block, lh);
170 rb->in_queue = 0;
171 list_del(curr);
173 if (caller == QOS_CALLER_KPACKET) {
174 struct neighbor *nb = container_of(rb, struct neighbor,
175 rb_kp);
176 rc = resume_send_messages(nb);
177 } else if (caller == QOS_CALLER_CONN_RETRANS) {
178 struct neighbor *nb = container_of(rb, struct neighbor,
179 rb_cr);
180 #warning todo do not send if neighbor is stalled
181 rc = send_retrans(nb, 1);
182 } else if (caller == QOS_CALLER_ANNOUNCE) {
183 struct announce_data *ann = container_of(rb,
184 struct announce_data, rb);
185 rc = send_announce_qos(ann);
186 } else {
187 BUG();
190 if (rc != 0 && rb->in_queue == 0) {
191 rb->in_queue = 1;
192 list_add(curr , lh);
193 } else {
194 if (caller == QOS_CALLER_KPACKET) {
195 kref_put(&(container_of(rb, struct neighbor,
196 rb_kp)->ref), neighbor_free);
197 } else if (caller == QOS_CALLER_CONN_RETRANS) {
198 kref_put(&(container_of(rb, struct neighbor,
199 rb_cr)->ref), neighbor_free);
200 } else if (caller == QOS_CALLER_ANNOUNCE) {
201 kref_put(&(container_of(rb,
202 struct announce_data, rb)->ref),
203 announce_data_free);
204 } else {
205 BUG();
210 if (rc != 0)
211 break;
213 return rc;
216 static void qos_resume(struct work_struct *work)
218 struct list_head *curr;
220 mutex_lock(&(queues_lock));
222 curr = queues.next;
223 while (curr != (&queues)) {
224 struct qos_queue *q = container_of(curr,
225 struct qos_queue, queue_list);
226 int i;
228 for (i=0;i<4;i++) {
229 int rc;
230 if (i == 3)
231 rc = resume_conns(q);
232 else
233 rc = _qos_resume(q, i);
235 if (rc != 0)
236 goto congested;
239 curr = curr->next;
241 if (i == 4 && unlikely(q->dev == 0)) {
242 list_del(&(q->queue_list));
243 kfree(q);
247 qos_resume_scheduled = 0;
249 if (0) {
250 congested:
251 schedule_delayed_work(&(qos_resume_work), 1);
254 mutex_unlock(&(queues_lock));
257 static struct qos_queue *get_queue(struct net_device *dev)
259 struct list_head *curr = queues.next;
260 while (curr != (&queues)) {
261 struct qos_queue *q = container_of(curr,
262 struct qos_queue, queue_list);
263 if (q->dev == dev)
264 return q;
266 return 0;
269 int destroy_queue(struct net_device *dev)
271 struct qos_queue *q;
273 mutex_lock(&(queues_lock));
275 q = get_queue(dev);
277 if (q == 0) {
278 mutex_unlock(&(queues_lock));
279 return 1;
282 q->dev = 0;
284 dev_put(dev);
286 mutex_unlock(&(queues_lock));
288 return 0;
291 int create_queue(struct net_device *dev)
293 struct qos_queue *q = kmalloc(sizeof(struct qos_queue), GFP_KERNEL);
295 if (q == 0) {
296 printk(KERN_ERR "cor: unable to allocate memory for device "
297 "queue, not enabling device");
298 return 1;
301 q->dev = dev;
302 dev_hold(dev);
304 INIT_LIST_HEAD(&(q->kpackets_waiting));
305 INIT_LIST_HEAD(&(q->conn_retrans_waiting));
306 INIT_LIST_HEAD(&(q->announce_waiting));
307 INIT_LIST_HEAD(&(q->conns_waiting));
309 mutex_lock(&(queues_lock));
310 list_add(&(q->queue_list), &queues);
311 mutex_unlock(&(queues_lock));
313 return 0;
316 void qos_enqueue(struct net_device *dev, struct resume_block *rb, int caller)
318 struct qos_queue *q;
320 mutex_lock(&(queues_lock));
322 if (rb->in_queue)
323 goto out;
325 q = get_queue(dev);
326 if (unlikely(q == 0))
327 goto out;
329 rb->in_queue = 1;
331 if (caller == QOS_CALLER_KPACKET) {
332 list_add(&(rb->lh) , &(q->conn_retrans_waiting));
333 kref_get(&(container_of(rb, struct neighbor, rb_kp)->ref));
334 } else if (caller == QOS_CALLER_CONN_RETRANS) {
335 list_add(&(rb->lh), &(q->kpackets_waiting));
336 kref_get(&(container_of(rb, struct neighbor, rb_cr)->ref));
337 } else if (caller == QOS_CALLER_ANNOUNCE) {
338 list_add(&(rb->lh), &(q->announce_waiting));
339 kref_get(&(container_of(rb, struct announce_data, rb)->ref));
340 } else if (caller == QOS_CALLER_CONN) {
341 struct conn *rconn = container_of(rb, struct conn,
342 target.out.rb);
343 mutex_lock(&(rconn->rcv_lock));
344 BUG_ON(rconn->targettype != TARGET_OUT);
345 list_add(&(rb->lh), &(q->conns_waiting));
346 kref_get(&(rconn->ref));
347 mutex_lock(&(rconn->rcv_lock));
348 } else {
349 BUG();
352 if (qos_resume_scheduled == 0) {
353 schedule_delayed_work(&(qos_resume_work), 1);
354 qos_resume_scheduled = 1;
357 out:
358 mutex_unlock(&(queues_lock));
361 void qos_enqueue_kpacket(struct neighbor *nb)
363 qos_enqueue(nb->dev, &(nb->rb_kp), QOS_CALLER_KPACKET);
366 static void qos_enqueue_conn_retrans(struct neighbor *nb)
368 qos_enqueue(nb->dev, &(nb->rb_cr), QOS_CALLER_CONN_RETRANS);
371 void qos_remove_conn(struct conn *rconn)
373 mutex_lock(&(rconn->rcv_lock));
374 if (rconn->targettype != TARGET_OUT)
375 goto out;
377 #warning todo
379 out:
380 mutex_unlock(&(rconn->rcv_lock));
383 #warning todo oom
384 void qos_enqueue_conn(struct conn *rconn, int oom)
386 BUG_ON(rconn->targettype != TARGET_OUT);
387 qos_enqueue(rconn->target.out.nb->dev, &(rconn->target.out.rb),
388 QOS_CALLER_CONN);
391 static int may_send_conn_retrans(struct neighbor *nb)
393 struct qos_queue *q;
394 int rc = 0;
396 mutex_lock(&(queues_lock));
398 q = get_queue(nb->dev);
399 if (unlikely(q == 0))
400 goto out;
402 rc = (list_empty(&(q->kpackets_waiting)));
404 out:
405 mutex_unlock(&(queues_lock));
407 return rc;
410 static int may_send_conn(struct conn *rconn)
412 struct qos_queue *q;
413 int rc = 0;
415 mutex_lock(&(queues_lock));
417 q = get_queue(rconn->target.out.nb->dev);
418 if (unlikely(q == 0))
419 goto out;
421 rc = (list_empty(&(q->kpackets_waiting)) &&
422 list_empty(&(q->conn_retrans_waiting)) &&
423 list_empty(&(q->announce_waiting)) &&
424 list_empty(&(q->conns_waiting)));
426 out:
427 mutex_unlock(&(queues_lock));
429 return rc;
433 struct sk_buff *create_packet(struct neighbor *nb, int size,
434 gfp_t alloc_flags, __u32 conn_id, __u32 seqno)
436 struct sk_buff *ret;
437 char *dest;
439 ret = alloc_skb(size + 9 + LL_ALLOCATED_SPACE(nb->dev), alloc_flags);
440 if (unlikely(0 == ret))
441 return 0;
443 ret->protocol = htons(ETH_P_COR);
444 ret->dev = nb->dev;
446 skb_reserve(ret, LL_RESERVED_SPACE(nb->dev));
447 if(unlikely(dev_hard_header(ret, nb->dev, ETH_P_COR, nb->mac,
448 nb->dev->dev_addr, ret->len) < 0))
449 return 0;
450 skb_reset_network_header(ret);
452 dest = skb_put(ret, 9);
453 BUG_ON(0 == dest);
455 dest[0] = PACKET_TYPE_DATA;
456 dest += 1;
458 put_u32(dest, conn_id, 1);
459 dest += 4;
460 put_u32(dest, seqno, 1);
461 dest += 4;
463 return ret;
466 static void set_conn_retrans_timeout(struct conn_retrans *cr)
468 struct neighbor *nb = cr->rconn->target.out.nb;
469 cr->timeout = jiffies + usecs_to_jiffies(100000 +
470 ((__u32) atomic_read(&(nb->latency))) +
471 ((__u32) atomic_read(&(nb->max_remote_cmsg_delay))));
474 static struct conn_retrans *readd_conn_retrans(struct conn_retrans *cr,
475 struct neighbor *nb, __u32 length, int *dontsend)
477 unsigned long iflags;
479 struct conn_retrans *ret = 0;
481 spin_lock_irqsave( &(nb->retrans_lock), iflags );
483 if (unlikely(cr->ackrcvd)) {
484 *dontsend = 1;
485 goto out;
486 } else
487 *dontsend = 0;
489 if (unlikely(cr->length > length)) {
490 ret = kmem_cache_alloc(connretrans_slab, GFP_ATOMIC);
491 if (unlikely(ret == 0)) {
492 cr->timeout = jiffies + 1;
493 goto out;
496 memset(ret, 0, sizeof (struct conn_retrans));
497 ret->rconn = cr->rconn;
498 kref_get(&(cr->rconn->ref));
499 ret->seqno = cr->seqno + length;
500 ret->length = cr->length - length;
501 kref_init(&(ret->ref));
503 list_add(&(ret->timeout_list), &(nb->retrans_list_conn));
504 list_add(&(ret->conn_list), &(cr->conn_list));
506 cr->length = length;
507 } else {
508 list_del(&(cr->timeout_list));
509 list_add_tail(&(cr->timeout_list), &(nb->retrans_list_conn));
510 set_conn_retrans_timeout(cr);
512 BUG_ON(cr->length != length);
515 out:
516 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
518 return ret;
521 /* rcvlock *must* be held while calling this */
522 void cancel_retrans(struct conn *rconn)
524 unsigned long iflags;
525 struct neighbor *nb = rconn->target.out.nb;
527 spin_lock_irqsave( &(nb->retrans_lock), iflags );
529 while (list_empty(&(rconn->target.out.retrans_list)) == 0) {
530 struct conn_retrans *cr = container_of(
531 rconn->target.out.retrans_list.next,
532 struct conn_retrans, conn_list);
533 BUG_ON(cr->rconn != rconn);
535 list_del(&(cr->timeout_list));
536 list_del(&(cr->conn_list));
537 cr->ackrcvd = 1;
538 kref_put(&(cr->ref), free_connretrans);
541 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
544 static int _send_retrans(struct neighbor *nb, struct conn_retrans *cr)
546 int targetmss = mss(nb);
547 int dontsend;
548 int queuefull = 0;
550 mutex_lock(&(cr->rconn->rcv_lock));
552 BUG_ON(cr->rconn->targettype != TARGET_OUT);
553 BUG_ON(cr->rconn->target.out.nb != nb);
555 kref_get(&(cr->rconn->ref));
557 if (unlikely(atomic_read(&(cr->rconn->isreset)) != 0)) {
558 cancel_retrans(cr->rconn);
559 goto out;
562 while (cr->length >= targetmss) {
563 struct sk_buff *skb;
564 char *dst;
565 struct conn_retrans *cr2;
566 int rc;
568 if (may_send_conn_retrans(nb) == 0)
569 goto qos_enqueue;
571 skb = create_packet(nb, targetmss, GFP_KERNEL,
572 cr->rconn->target.out.conn_id, cr->seqno);
573 if (unlikely(skb == 0)) {
574 cr->timeout = jiffies + 1;
575 goto out;
578 cr2 = readd_conn_retrans(cr, nb, targetmss, &dontsend);
579 if (unlikely(unlikely(dontsend) || unlikely(cr2 == 0 &&
580 unlikely(cr->length > targetmss)))) {
581 kfree_skb(skb);
582 goto out;
585 dst = skb_put(skb, targetmss);
587 databuf_pullold(&(cr->rconn->buf), cr->seqno, dst, targetmss);
588 rc = dev_queue_xmit(skb);
590 if (rc != 0) {
591 unsigned long iflags;
593 spin_lock_irqsave( &(nb->retrans_lock), iflags );
594 if (unlikely(cr->ackrcvd)) {
595 dontsend = 1;
596 } else {
597 list_del(&(cr->timeout_list));
598 list_add(&(cr->timeout_list),
599 &(nb->retrans_list_conn));
601 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
602 if (dontsend == 0)
603 goto qos_enqueue;
606 cr = cr2;
608 if (likely(cr == 0))
609 goto out;
612 if (unlikely(cr->length <= 0)) {
613 BUG();
614 } else {
615 struct control_msg_out *cm;
616 char *buf = kmalloc(cr->length, GFP_KERNEL);
618 if (unlikely(buf == 0)) {
619 cr->timeout = jiffies + 1;
620 goto out;
623 cm = alloc_control_msg(nb, ACM_PRIORITY_MED);
624 if (unlikely(cm == 0)) {
625 cr->timeout = jiffies + 1;
626 kfree(buf);
627 goto out;
630 databuf_pullold(&(cr->rconn->buf), cr->seqno, buf, cr->length);
632 if (unlikely(readd_conn_retrans(cr, nb, cr->length, &dontsend)
633 != 0))
634 BUG();
636 if (likely(dontsend == 0)) {
637 send_conndata(cm, cr->rconn->target.out.conn_id,
638 cr->seqno, buf, buf, cr->length);
642 if (0) {
643 qos_enqueue:
644 queuefull = 1;
646 out:
647 mutex_unlock(&(cr->rconn->rcv_lock));
649 kref_put(&(cr->rconn->ref), free_conn);
651 return queuefull;
654 static int send_retrans(struct neighbor *nb, int fromqos)
656 unsigned long iflags;
658 struct conn_retrans *cr = 0;
660 int nbstate;
661 int rescheduled = 0;
662 int queuefull = 0;
664 spin_lock_irqsave( &(nb->state_lock), iflags );
665 nbstate = nb->state;
666 spin_unlock_irqrestore( &(nb->state_lock), iflags );
668 while (1) {
669 spin_lock_irqsave( &(nb->retrans_lock), iflags );
671 if (list_empty(&(nb->retrans_list_conn))) {
672 nb->retrans_timer_conn_running = 0;
673 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
674 break;
677 cr = container_of(nb->retrans_list_conn.next,
678 struct conn_retrans, timeout_list);
680 BUG_ON(cr->rconn->targettype != TARGET_OUT);
682 if (unlikely(unlikely(nbstate == NEIGHBOR_STATE_KILLED) ||
683 unlikely(atomic_read(
684 &(cr->rconn->isreset)) != 0))) {
685 list_del(&(cr->timeout_list));
686 list_del(&(cr->conn_list));
687 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
689 kref_put(&(cr->ref), free_connretrans);
690 continue;
693 BUG_ON(nb != cr->rconn->target.out.nb);
695 if (time_after(cr->timeout, jiffies)) {
696 schedule_delayed_work(&(nb->retrans_timer_conn),
697 cr->timeout - jiffies);
698 rescheduled = 1;
699 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
700 break;
703 kref_get(&(cr->ref));
704 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
705 queuefull = _send_retrans(nb, cr);
706 kref_put(&(cr->ref), free_connretrans);
707 if (queuefull) {
708 rescheduled = 1;
709 if (fromqos == 0)
710 qos_enqueue_conn_retrans(nb);
711 break;
715 if (rescheduled == 0)
716 kref_put(&(nb->ref), neighbor_free);
718 return queuefull;
721 void retransmit_conn_timerfunc(struct work_struct *work)
723 struct neighbor *nb = container_of(to_delayed_work(work),
724 struct neighbor, retrans_timer_conn);
726 send_retrans(nb, 0);
729 static struct conn_retrans *search_seqno(struct conn *rconn, __u32 seqno)
731 struct list_head *next = rconn->target.out.retrans_list.next;
733 while (next != &(rconn->target.out.retrans_list)) {
734 struct conn_retrans *cr = container_of(next,
735 struct conn_retrans, conn_list);
736 BUG_ON(cr->rconn != rconn);
737 if (cr->seqno + cr->length > seqno)
738 return cr;
739 next = next->next;
741 return 0;
744 void conn_ack_rcvd(__u32 kpacket_seqno, struct conn *rconn, __u32 seqno,
745 __u8 window, __u32 seqno_ooo, __u32 length)
747 unsigned long iflags;
748 struct neighbor *nb = rconn->target.out.nb;
749 struct conn_retrans *cr = 0;
751 int setwindow = 0;
753 BUG_ON(rconn->targettype != TARGET_OUT);
755 mutex_lock(&(rconn->rcv_lock));
757 if (unlikely(((__s32) (seqno - rconn->target.out.seqno_nextsend)) > 0))
758 goto out;
760 spin_lock_irqsave( &(nb->retrans_lock), iflags );
762 if (likely(length == 0))
763 goto in_order;
765 cr = search_seqno(rconn, seqno_ooo);
767 while (cr != 0) {
768 struct list_head *next = cr->conn_list.next;
769 struct conn_retrans *nextcr = 0;
770 if (next != &(rconn->target.out.retrans_list)) {
771 nextcr = container_of(next, struct conn_retrans,
772 conn_list);
775 if (((__s32)(cr->seqno + cr->length - seqno_ooo - length)) > 0) {
776 __u32 newseqno = seqno_ooo + length;
777 cr->length -= (newseqno - cr->seqno);
778 cr->seqno = newseqno;
779 break;
780 } else {
781 list_del(&(cr->timeout_list));
782 list_del(&(cr->conn_list));
783 cr->ackrcvd = 1;
784 kref_put(&(cr->ref), free_connretrans);
787 cr = nextcr;
790 if (unlikely(list_empty(&(rconn->target.out.retrans_list))) == 0) {
791 struct conn_retrans *cr = container_of(
792 rconn->target.out.retrans_list.next,
793 struct conn_retrans, conn_list);
794 if (unlikely(((__s32) (cr->seqno -
795 rconn->target.out.seqno_acked)) > 0)) {
796 rconn->target.out.seqno_acked = cr->seqno;
800 in_order:
801 if (likely(((__s32) (seqno - rconn->target.out.seqno_acked)) > 0)) {
802 rconn->target.out.seqno_acked = seqno;
803 setwindow = 1;
806 cr = search_seqno(rconn, seqno_ooo);
808 while (cr != 0) {
809 struct list_head *next = cr->conn_list.next;
810 struct conn_retrans *nextcr = 0;
811 if (next != &(rconn->target.out.retrans_list)) {
812 nextcr = container_of(next, struct conn_retrans,
813 conn_list);
816 if (((__s32)(cr->seqno + cr->length -
817 rconn->target.out.seqno_acked)) <= 0) {
818 list_del(&(cr->timeout_list));
819 list_del(&(cr->conn_list));
820 cr->ackrcvd = 1;
821 kref_put(&(cr->ref), free_connretrans);
824 cr = nextcr;
827 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
828 databuf_ack(rconn, rconn->target.out.seqno_acked);
830 setwindow = setwindow || (seqno == rconn->target.out.seqno_acked &&
831 (kpacket_seqno - rconn->target.out.kp_windowsetseqno >
832 0));
833 if (setwindow) {
834 rconn->target.out.kp_windowsetseqno = kpacket_seqno;
835 rconn->target.out.seqno_windowlimit = seqno +
836 dec_window(window);
839 out:
840 mutex_unlock(&(rconn->rcv_lock));
843 static void schedule_retransmit_conn(struct conn_retrans *cr, struct conn *rconn,
844 __u32 seqno, __u32 len)
846 unsigned long iflags;
848 struct neighbor *nb = rconn->target.out.nb;
850 int first;
852 BUG_ON(rconn->targettype != TARGET_OUT);
854 memset(cr, 0, sizeof (struct conn_retrans));
855 cr->rconn = rconn;
856 kref_get(&(rconn->ref));
857 cr->seqno = seqno;
858 cr->length = len;
859 kref_init(&(cr->ref));
860 set_conn_retrans_timeout(cr);
862 spin_lock_irqsave( &(nb->retrans_lock), iflags );
864 first = unlikely(list_empty(&(nb->retrans_list_conn)));
865 list_add_tail(&(cr->timeout_list), &(nb->retrans_list_conn));
867 list_add_tail(&(cr->conn_list), &(rconn->target.out.retrans_list));
869 if (unlikely(unlikely(first) &&
870 unlikely(nb->retrans_timer_conn_running == 0))) {
871 schedule_delayed_work(&(nb->retrans_timer_conn),
872 cr->timeout - jiffies);
873 nb->retrans_timer_conn_running = 1;
874 kref_get(&(nb->ref));
877 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
880 static int _flush_out(struct conn *rconn, int fromqos, __u32 creditsperbyte)
882 int targetmss = mss(rconn->target.out.nb);
883 __u32 seqno;
885 #warning todo honor window size, balance credits
887 BUG_ON(rconn->targettype != TARGET_OUT);
889 if (unlikely(rconn->target.out.conn_id == 0))
890 return RC_FLUSH_CONN_OUT_OK;
892 if (unlikely(atomic_read(&(rconn->isreset)) != 0))
893 return RC_FLUSH_CONN_OUT_OK;
895 if (fromqos == 0 && may_send_conn(rconn) == 0)
896 return RC_FLUSH_CONN_OUT_CONG;
898 while (rconn->buf.read_remaining >= targetmss) {
899 struct conn_retrans *cr;
900 struct sk_buff *skb;
901 char *dst;
902 int rc;
904 if (unlikely(creditsperbyte * targetmss >
905 rconn->credits))
906 return RC_FLUSH_CONN_OUT_CREDITS;
908 seqno = rconn->target.out.seqno_nextsend;
909 skb = create_packet(rconn->target.out.nb, targetmss, GFP_ATOMIC,
910 rconn->target.out.conn_id, seqno);
911 if (unlikely(skb == 0))
912 return RC_FLUSH_CONN_OUT_OOM;
914 cr = kmem_cache_alloc(connretrans_slab, GFP_KERNEL);
915 if (unlikely(cr == 0)) {
916 kfree_skb(skb);
917 return RC_FLUSH_CONN_OUT_OOM;
920 dst = skb_put(skb, targetmss);
922 databuf_pull(&(rconn->buf), dst, targetmss);
924 rc = dev_queue_xmit(skb);
925 if (rc != 0) {
926 databuf_unpull(&(rconn->buf), targetmss);
927 kmem_cache_free(connretrans_slab, cr);
928 return RC_FLUSH_CONN_OUT_CONG;
931 rconn->credits -= creditsperbyte * targetmss;
932 rconn->target.out.seqno_nextsend += targetmss;
933 schedule_retransmit_conn(cr, rconn, seqno, targetmss);
936 if (rconn->buf.read_remaining > 0) {
937 struct control_msg_out *cm;
938 struct conn_retrans *cr;
939 __u32 len = rconn->buf.read_remaining;
940 char *buf = kmalloc(len, GFP_KERNEL);
942 if (unlikely(creditsperbyte * len > rconn->credits))
943 return RC_FLUSH_CONN_OUT_CREDITS;
945 if (unlikely(buf == 0))
946 return RC_FLUSH_CONN_OUT_OOM;
948 cm = alloc_control_msg(rconn->target.out.nb, ACM_PRIORITY_MED);
949 if (unlikely(cm == 0)) {
950 kfree(buf);
951 return RC_FLUSH_CONN_OUT_OOM;
954 cr = kmem_cache_alloc(connretrans_slab, GFP_KERNEL);
955 if (unlikely(cr == 0)) {
956 kfree(buf);
957 free_control_msg(cm);
958 return RC_FLUSH_CONN_OUT_CONG;
961 databuf_pull(&(rconn->buf), buf, len);
963 seqno = rconn->target.out.seqno_nextsend;
964 rconn->credits -= creditsperbyte * len;
965 rconn->target.out.seqno_nextsend += len;
967 schedule_retransmit_conn(cr, rconn, seqno, len);
969 send_conndata(cm, rconn->target.out.conn_id, seqno, buf, buf,
970 len);
973 wake_sender(rconn);
975 return RC_FLUSH_CONN_OUT_OK;
978 int flush_out(struct conn *rconn)
980 return _flush_out(rconn, 0, 0);
983 int __init cor_snd_init(void)
985 connretrans_slab = kmem_cache_create("cor_connretrans",
986 sizeof(struct conn_retrans), 8, 0, 0);
988 if (unlikely(connretrans_slab == 0))
989 return 1;
991 INIT_DELAYED_WORK(&(qos_resume_work), qos_resume);
992 qos_resume_scheduled = 0;
994 return 0;
997 MODULE_LICENSE("GPL");