2 * Connection oriented routing
3 * Copyright (C) 2007-2010 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
27 struct kmem_cache
*connretrans_slab
;
30 /* timeout_list and conn_list share a single ref */
32 struct list_head timeout_list
;
33 struct list_head conn_list
;
34 struct htab_entry htab_entry
;
39 unsigned long timeout
;
42 static void free_connretrans(struct kref
*ref
)
44 struct conn_retrans
*cr
= container_of(ref
, struct conn_retrans
, ref
);
45 kmem_cache_free(connretrans_slab
, cr
);
46 kref_put(&(cr
->rconn
->ref
), free_conn
);
49 DEFINE_MUTEX(queues_lock
);
51 struct delayed_work qos_resume_work
;
52 int qos_resume_scheduled
;
55 struct list_head queue_list
;
57 struct net_device
*dev
;
59 struct list_head kpackets_waiting
;
60 struct list_head conn_retrans_waiting
;
61 struct list_head announce_waiting
;
62 struct list_head conns_waiting
;
65 static int _flush_out(struct conn
*rconn
, int fromqos
, __u32 creditsperbyte
);
67 /* Higherst bidder "pays" the credits the second has bid */
68 static int _resume_conns(struct qos_queue
*q
)
70 struct conn
*best
= 0;
72 __u64 secondcredit
= 0;
76 struct list_head
*lh
= q
->conns_waiting
.next
;
78 while (lh
!= &(q
->conns_waiting
)) {
79 struct conn
*rconn
= container_of(lh
, struct conn
,
83 refresh_conn_credits(rconn
);
85 mutex_lock(&(rconn
->rcv_lock
));
87 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
89 if (atomic_read(&(rconn
->isreset
)) != 0) {
92 rconn
->target
.out
.rb
.in_queue
= 0;
93 list_del(&(rconn
->target
.out
.rb
.lh
));
94 mutex_unlock(&(rconn
->rcv_lock
));
95 kref_put(&(rconn
->ref
), free_conn
);
100 BUG_ON(rconn
->buf
.read_remaining
== 0);
102 if (rconn
->credits
<= 0)
105 credits
= multiply_div(rconn
->credits
,
107 rconn
->buf
.read_remaining
);
108 mutex_unlock(&(rconn
->rcv_lock
));
110 if (best
== 0 || bestcredit
< credits
) {
111 secondcredit
= bestcredit
;
113 bestcredit
= credits
;
114 } else if (secondcredit
< credits
) {
115 secondcredit
= credits
;
122 return RC_FLUSH_CONN_OUT_OK
;
124 mutex_lock(&(best
->rcv_lock
));
125 rc
= _flush_out(best
, 1, (__u32
) (secondcredit
>> 32));
127 if (rc
== RC_FLUSH_CONN_OUT_OK
) {
128 best
->target
.out
.rb
.in_queue
= 0;
129 list_del(&(best
->target
.out
.rb
.lh
));
131 mutex_unlock(&(best
->rcv_lock
));
133 if (rc
== RC_FLUSH_CONN_OUT_OK
)
134 kref_put(&(best
->ref
), free_conn
);
139 static int resume_conns(struct qos_queue
*q
)
141 while (list_empty(&(q
->conns_waiting
)) == 0) {
142 int rc
= _resume_conns(q
);
149 static int send_retrans(struct neighbor
*nb
, int fromqos
);
151 static int _qos_resume(struct qos_queue
*q
, int caller
)
155 struct list_head
*lh
;
157 if (caller
== QOS_CALLER_KPACKET
)
158 lh
= &(q
->conn_retrans_waiting
);
159 else if (caller
== QOS_CALLER_CONN_RETRANS
)
160 lh
= &(q
->kpackets_waiting
);
161 else if (caller
== QOS_CALLER_ANNOUNCE
)
162 lh
= &(q
->announce_waiting
);
166 while (list_empty(lh
) == 0) {
167 struct list_head
*curr
= lh
->next
;
168 struct resume_block
*rb
= container_of(curr
,
169 struct resume_block
, lh
);
173 if (caller
== QOS_CALLER_KPACKET
) {
174 struct neighbor
*nb
= container_of(rb
, struct neighbor
,
176 rc
= resume_send_messages(nb
);
177 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
178 struct neighbor
*nb
= container_of(rb
, struct neighbor
,
180 #warning todo do not send if neighbor is stalled
181 rc
= send_retrans(nb
, 1);
182 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
183 struct announce_data
*ann
= container_of(rb
,
184 struct announce_data
, rb
);
185 rc
= send_announce_qos(ann
);
190 if (rc
!= 0 && rb
->in_queue
== 0) {
194 if (caller
== QOS_CALLER_KPACKET
) {
195 kref_put(&(container_of(rb
, struct neighbor
,
196 rb_kp
)->ref
), neighbor_free
);
197 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
198 kref_put(&(container_of(rb
, struct neighbor
,
199 rb_cr
)->ref
), neighbor_free
);
200 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
201 kref_put(&(container_of(rb
,
202 struct announce_data
, rb
)->ref
),
216 static void qos_resume(struct work_struct
*work
)
218 struct list_head
*curr
;
220 mutex_lock(&(queues_lock
));
223 while (curr
!= (&queues
)) {
224 struct qos_queue
*q
= container_of(curr
,
225 struct qos_queue
, queue_list
);
231 rc
= resume_conns(q
);
233 rc
= _qos_resume(q
, i
);
241 if (i
== 4 && unlikely(q
->dev
== 0)) {
242 list_del(&(q
->queue_list
));
247 qos_resume_scheduled
= 0;
251 schedule_delayed_work(&(qos_resume_work
), 1);
254 mutex_unlock(&(queues_lock
));
257 static struct qos_queue
*get_queue(struct net_device
*dev
)
259 struct list_head
*curr
= queues
.next
;
260 while (curr
!= (&queues
)) {
261 struct qos_queue
*q
= container_of(curr
,
262 struct qos_queue
, queue_list
);
269 int destroy_queue(struct net_device
*dev
)
273 mutex_lock(&(queues_lock
));
278 mutex_unlock(&(queues_lock
));
286 mutex_unlock(&(queues_lock
));
291 int create_queue(struct net_device
*dev
)
293 struct qos_queue
*q
= kmalloc(sizeof(struct qos_queue
), GFP_KERNEL
);
296 printk(KERN_ERR
"cor: unable to allocate memory for device "
297 "queue, not enabling device");
304 INIT_LIST_HEAD(&(q
->kpackets_waiting
));
305 INIT_LIST_HEAD(&(q
->conn_retrans_waiting
));
306 INIT_LIST_HEAD(&(q
->announce_waiting
));
307 INIT_LIST_HEAD(&(q
->conns_waiting
));
309 mutex_lock(&(queues_lock
));
310 list_add(&(q
->queue_list
), &queues
);
311 mutex_unlock(&(queues_lock
));
316 void qos_enqueue(struct net_device
*dev
, struct resume_block
*rb
, int caller
)
320 mutex_lock(&(queues_lock
));
326 if (unlikely(q
== 0))
331 if (caller
== QOS_CALLER_KPACKET
) {
332 list_add(&(rb
->lh
) , &(q
->conn_retrans_waiting
));
333 kref_get(&(container_of(rb
, struct neighbor
, rb_kp
)->ref
));
334 } else if (caller
== QOS_CALLER_CONN_RETRANS
) {
335 list_add(&(rb
->lh
), &(q
->kpackets_waiting
));
336 kref_get(&(container_of(rb
, struct neighbor
, rb_cr
)->ref
));
337 } else if (caller
== QOS_CALLER_ANNOUNCE
) {
338 list_add(&(rb
->lh
), &(q
->announce_waiting
));
339 kref_get(&(container_of(rb
, struct announce_data
, rb
)->ref
));
340 } else if (caller
== QOS_CALLER_CONN
) {
341 struct conn
*rconn
= container_of(rb
, struct conn
,
343 mutex_lock(&(rconn
->rcv_lock
));
344 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
345 list_add(&(rb
->lh
), &(q
->conns_waiting
));
346 kref_get(&(rconn
->ref
));
347 mutex_lock(&(rconn
->rcv_lock
));
352 if (qos_resume_scheduled
== 0) {
353 schedule_delayed_work(&(qos_resume_work
), 1);
354 qos_resume_scheduled
= 1;
358 mutex_unlock(&(queues_lock
));
361 void qos_enqueue_kpacket(struct neighbor
*nb
)
363 qos_enqueue(nb
->dev
, &(nb
->rb_kp
), QOS_CALLER_KPACKET
);
366 static void qos_enqueue_conn_retrans(struct neighbor
*nb
)
368 qos_enqueue(nb
->dev
, &(nb
->rb_cr
), QOS_CALLER_CONN_RETRANS
);
371 void qos_remove_conn(struct conn
*rconn
)
373 mutex_lock(&(rconn
->rcv_lock
));
374 if (rconn
->targettype
!= TARGET_OUT
)
380 mutex_unlock(&(rconn
->rcv_lock
));
384 void qos_enqueue_conn(struct conn
*rconn
, int oom
)
386 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
387 qos_enqueue(rconn
->target
.out
.nb
->dev
, &(rconn
->target
.out
.rb
),
391 static int may_send_conn_retrans(struct neighbor
*nb
)
396 mutex_lock(&(queues_lock
));
398 q
= get_queue(nb
->dev
);
399 if (unlikely(q
== 0))
402 rc
= (list_empty(&(q
->kpackets_waiting
)));
405 mutex_unlock(&(queues_lock
));
410 static int may_send_conn(struct conn
*rconn
)
415 mutex_lock(&(queues_lock
));
417 q
= get_queue(rconn
->target
.out
.nb
->dev
);
418 if (unlikely(q
== 0))
421 rc
= (list_empty(&(q
->kpackets_waiting
)) &&
422 list_empty(&(q
->conn_retrans_waiting
)) &&
423 list_empty(&(q
->announce_waiting
)) &&
424 list_empty(&(q
->conns_waiting
)));
427 mutex_unlock(&(queues_lock
));
433 struct sk_buff
*create_packet(struct neighbor
*nb
, int size
,
434 gfp_t alloc_flags
, __u32 conn_id
, __u32 seqno
)
439 ret
= alloc_skb(size
+ 9 + LL_ALLOCATED_SPACE(nb
->dev
), alloc_flags
);
440 if (unlikely(0 == ret
))
443 ret
->protocol
= htons(ETH_P_COR
);
446 skb_reserve(ret
, LL_RESERVED_SPACE(nb
->dev
));
447 if(unlikely(dev_hard_header(ret
, nb
->dev
, ETH_P_COR
, nb
->mac
,
448 nb
->dev
->dev_addr
, ret
->len
) < 0))
450 skb_reset_network_header(ret
);
452 dest
= skb_put(ret
, 9);
455 dest
[0] = PACKET_TYPE_DATA
;
458 put_u32(dest
, conn_id
, 1);
460 put_u32(dest
, seqno
, 1);
466 static void set_conn_retrans_timeout(struct conn_retrans
*cr
)
468 struct neighbor
*nb
= cr
->rconn
->target
.out
.nb
;
469 cr
->timeout
= jiffies
+ usecs_to_jiffies(100000 +
470 ((__u32
) atomic_read(&(nb
->latency
))) +
471 ((__u32
) atomic_read(&(nb
->max_remote_cmsg_delay
))));
474 static struct conn_retrans
*readd_conn_retrans(struct conn_retrans
*cr
,
475 struct neighbor
*nb
, __u32 length
, int *dontsend
)
477 unsigned long iflags
;
479 struct conn_retrans
*ret
= 0;
481 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
483 if (unlikely(cr
->ackrcvd
)) {
489 if (unlikely(cr
->length
> length
)) {
490 ret
= kmem_cache_alloc(connretrans_slab
, GFP_ATOMIC
);
491 if (unlikely(ret
== 0)) {
492 cr
->timeout
= jiffies
+ 1;
496 memset(ret
, 0, sizeof (struct conn_retrans
));
497 ret
->rconn
= cr
->rconn
;
498 kref_get(&(cr
->rconn
->ref
));
499 ret
->seqno
= cr
->seqno
+ length
;
500 ret
->length
= cr
->length
- length
;
501 kref_init(&(ret
->ref
));
503 list_add(&(ret
->timeout_list
), &(nb
->retrans_list_conn
));
504 list_add(&(ret
->conn_list
), &(cr
->conn_list
));
508 list_del(&(cr
->timeout_list
));
509 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list_conn
));
510 set_conn_retrans_timeout(cr
);
512 BUG_ON(cr
->length
!= length
);
516 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
521 /* rcvlock *must* be held while calling this */
522 void cancel_retrans(struct conn
*rconn
)
524 unsigned long iflags
;
525 struct neighbor
*nb
= rconn
->target
.out
.nb
;
527 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
529 while (list_empty(&(rconn
->target
.out
.retrans_list
)) == 0) {
530 struct conn_retrans
*cr
= container_of(
531 rconn
->target
.out
.retrans_list
.next
,
532 struct conn_retrans
, conn_list
);
533 BUG_ON(cr
->rconn
!= rconn
);
535 list_del(&(cr
->timeout_list
));
536 list_del(&(cr
->conn_list
));
538 kref_put(&(cr
->ref
), free_connretrans
);
541 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
544 static int _send_retrans(struct neighbor
*nb
, struct conn_retrans
*cr
)
546 int targetmss
= mss(nb
);
550 mutex_lock(&(cr
->rconn
->rcv_lock
));
552 BUG_ON(cr
->rconn
->targettype
!= TARGET_OUT
);
553 BUG_ON(cr
->rconn
->target
.out
.nb
!= nb
);
555 kref_get(&(cr
->rconn
->ref
));
557 if (unlikely(atomic_read(&(cr
->rconn
->isreset
)) != 0)) {
558 cancel_retrans(cr
->rconn
);
562 while (cr
->length
>= targetmss
) {
565 struct conn_retrans
*cr2
;
568 if (may_send_conn_retrans(nb
) == 0)
571 skb
= create_packet(nb
, targetmss
, GFP_KERNEL
,
572 cr
->rconn
->target
.out
.conn_id
, cr
->seqno
);
573 if (unlikely(skb
== 0)) {
574 cr
->timeout
= jiffies
+ 1;
578 cr2
= readd_conn_retrans(cr
, nb
, targetmss
, &dontsend
);
579 if (unlikely(unlikely(dontsend
) || unlikely(cr2
== 0 &&
580 unlikely(cr
->length
> targetmss
)))) {
585 dst
= skb_put(skb
, targetmss
);
587 databuf_pullold(&(cr
->rconn
->buf
), cr
->seqno
, dst
, targetmss
);
588 rc
= dev_queue_xmit(skb
);
591 unsigned long iflags
;
593 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
594 if (unlikely(cr
->ackrcvd
)) {
597 list_del(&(cr
->timeout_list
));
598 list_add(&(cr
->timeout_list
),
599 &(nb
->retrans_list_conn
));
601 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
612 if (unlikely(cr
->length
<= 0)) {
615 struct control_msg_out
*cm
;
616 char *buf
= kmalloc(cr
->length
, GFP_KERNEL
);
618 if (unlikely(buf
== 0)) {
619 cr
->timeout
= jiffies
+ 1;
623 cm
= alloc_control_msg(nb
, ACM_PRIORITY_MED
);
624 if (unlikely(cm
== 0)) {
625 cr
->timeout
= jiffies
+ 1;
630 databuf_pullold(&(cr
->rconn
->buf
), cr
->seqno
, buf
, cr
->length
);
632 if (unlikely(readd_conn_retrans(cr
, nb
, cr
->length
, &dontsend
)
636 if (likely(dontsend
== 0)) {
637 send_conndata(cm
, cr
->rconn
->target
.out
.conn_id
,
638 cr
->seqno
, buf
, buf
, cr
->length
);
647 mutex_unlock(&(cr
->rconn
->rcv_lock
));
649 kref_put(&(cr
->rconn
->ref
), free_conn
);
654 static int send_retrans(struct neighbor
*nb
, int fromqos
)
656 unsigned long iflags
;
658 struct conn_retrans
*cr
= 0;
664 spin_lock_irqsave( &(nb
->state_lock
), iflags
);
666 spin_unlock_irqrestore( &(nb
->state_lock
), iflags
);
669 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
671 if (list_empty(&(nb
->retrans_list_conn
))) {
672 nb
->retrans_timer_conn_running
= 0;
673 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
677 cr
= container_of(nb
->retrans_list_conn
.next
,
678 struct conn_retrans
, timeout_list
);
680 BUG_ON(cr
->rconn
->targettype
!= TARGET_OUT
);
682 if (unlikely(unlikely(nbstate
== NEIGHBOR_STATE_KILLED
) ||
683 unlikely(atomic_read(
684 &(cr
->rconn
->isreset
)) != 0))) {
685 list_del(&(cr
->timeout_list
));
686 list_del(&(cr
->conn_list
));
687 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
689 kref_put(&(cr
->ref
), free_connretrans
);
693 BUG_ON(nb
!= cr
->rconn
->target
.out
.nb
);
695 if (time_after(cr
->timeout
, jiffies
)) {
696 schedule_delayed_work(&(nb
->retrans_timer_conn
),
697 cr
->timeout
- jiffies
);
699 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
703 kref_get(&(cr
->ref
));
704 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
705 queuefull
= _send_retrans(nb
, cr
);
706 kref_put(&(cr
->ref
), free_connretrans
);
710 qos_enqueue_conn_retrans(nb
);
715 if (rescheduled
== 0)
716 kref_put(&(nb
->ref
), neighbor_free
);
721 void retransmit_conn_timerfunc(struct work_struct
*work
)
723 struct neighbor
*nb
= container_of(to_delayed_work(work
),
724 struct neighbor
, retrans_timer_conn
);
729 static struct conn_retrans
*search_seqno(struct conn
*rconn
, __u32 seqno
)
731 struct list_head
*next
= rconn
->target
.out
.retrans_list
.next
;
733 while (next
!= &(rconn
->target
.out
.retrans_list
)) {
734 struct conn_retrans
*cr
= container_of(next
,
735 struct conn_retrans
, conn_list
);
736 BUG_ON(cr
->rconn
!= rconn
);
737 if (cr
->seqno
+ cr
->length
> seqno
)
744 void conn_ack_rcvd(__u32 kpacket_seqno
, struct conn
*rconn
, __u32 seqno
,
745 __u8 window
, __u32 seqno_ooo
, __u32 length
)
747 unsigned long iflags
;
748 struct neighbor
*nb
= rconn
->target
.out
.nb
;
749 struct conn_retrans
*cr
= 0;
753 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
755 mutex_lock(&(rconn
->rcv_lock
));
757 if (unlikely(((__s32
) (seqno
- rconn
->target
.out
.seqno_nextsend
)) > 0))
760 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
762 if (likely(length
== 0))
765 cr
= search_seqno(rconn
, seqno_ooo
);
768 struct list_head
*next
= cr
->conn_list
.next
;
769 struct conn_retrans
*nextcr
= 0;
770 if (next
!= &(rconn
->target
.out
.retrans_list
)) {
771 nextcr
= container_of(next
, struct conn_retrans
,
775 if (((__s32
)(cr
->seqno
+ cr
->length
- seqno_ooo
- length
)) > 0) {
776 __u32 newseqno
= seqno_ooo
+ length
;
777 cr
->length
-= (newseqno
- cr
->seqno
);
778 cr
->seqno
= newseqno
;
781 list_del(&(cr
->timeout_list
));
782 list_del(&(cr
->conn_list
));
784 kref_put(&(cr
->ref
), free_connretrans
);
790 if (unlikely(list_empty(&(rconn
->target
.out
.retrans_list
))) == 0) {
791 struct conn_retrans
*cr
= container_of(
792 rconn
->target
.out
.retrans_list
.next
,
793 struct conn_retrans
, conn_list
);
794 if (unlikely(((__s32
) (cr
->seqno
-
795 rconn
->target
.out
.seqno_acked
)) > 0)) {
796 rconn
->target
.out
.seqno_acked
= cr
->seqno
;
801 if (likely(((__s32
) (seqno
- rconn
->target
.out
.seqno_acked
)) > 0)) {
802 rconn
->target
.out
.seqno_acked
= seqno
;
806 cr
= search_seqno(rconn
, seqno_ooo
);
809 struct list_head
*next
= cr
->conn_list
.next
;
810 struct conn_retrans
*nextcr
= 0;
811 if (next
!= &(rconn
->target
.out
.retrans_list
)) {
812 nextcr
= container_of(next
, struct conn_retrans
,
816 if (((__s32
)(cr
->seqno
+ cr
->length
-
817 rconn
->target
.out
.seqno_acked
)) <= 0) {
818 list_del(&(cr
->timeout_list
));
819 list_del(&(cr
->conn_list
));
821 kref_put(&(cr
->ref
), free_connretrans
);
827 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
828 databuf_ack(rconn
, rconn
->target
.out
.seqno_acked
);
830 setwindow
= setwindow
|| (seqno
== rconn
->target
.out
.seqno_acked
&&
831 (kpacket_seqno
- rconn
->target
.out
.kp_windowsetseqno
>
834 rconn
->target
.out
.kp_windowsetseqno
= kpacket_seqno
;
835 rconn
->target
.out
.seqno_windowlimit
= seqno
+
840 mutex_unlock(&(rconn
->rcv_lock
));
843 static void schedule_retransmit_conn(struct conn_retrans
*cr
, struct conn
*rconn
,
844 __u32 seqno
, __u32 len
)
846 unsigned long iflags
;
848 struct neighbor
*nb
= rconn
->target
.out
.nb
;
852 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
854 memset(cr
, 0, sizeof (struct conn_retrans
));
856 kref_get(&(rconn
->ref
));
859 kref_init(&(cr
->ref
));
860 set_conn_retrans_timeout(cr
);
862 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
864 first
= unlikely(list_empty(&(nb
->retrans_list_conn
)));
865 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list_conn
));
867 list_add_tail(&(cr
->conn_list
), &(rconn
->target
.out
.retrans_list
));
869 if (unlikely(unlikely(first
) &&
870 unlikely(nb
->retrans_timer_conn_running
== 0))) {
871 schedule_delayed_work(&(nb
->retrans_timer_conn
),
872 cr
->timeout
- jiffies
);
873 nb
->retrans_timer_conn_running
= 1;
874 kref_get(&(nb
->ref
));
877 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
880 static int _flush_out(struct conn
*rconn
, int fromqos
, __u32 creditsperbyte
)
882 int targetmss
= mss(rconn
->target
.out
.nb
);
885 #warning todo honor window size, balance credits
887 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
889 if (unlikely(rconn
->target
.out
.conn_id
== 0))
890 return RC_FLUSH_CONN_OUT_OK
;
892 if (unlikely(atomic_read(&(rconn
->isreset
)) != 0))
893 return RC_FLUSH_CONN_OUT_OK
;
895 if (fromqos
== 0 && may_send_conn(rconn
) == 0)
896 return RC_FLUSH_CONN_OUT_CONG
;
898 while (rconn
->buf
.read_remaining
>= targetmss
) {
899 struct conn_retrans
*cr
;
904 if (unlikely(creditsperbyte
* targetmss
>
906 return RC_FLUSH_CONN_OUT_CREDITS
;
908 seqno
= rconn
->target
.out
.seqno_nextsend
;
909 skb
= create_packet(rconn
->target
.out
.nb
, targetmss
, GFP_ATOMIC
,
910 rconn
->target
.out
.conn_id
, seqno
);
911 if (unlikely(skb
== 0))
912 return RC_FLUSH_CONN_OUT_OOM
;
914 cr
= kmem_cache_alloc(connretrans_slab
, GFP_KERNEL
);
915 if (unlikely(cr
== 0)) {
917 return RC_FLUSH_CONN_OUT_OOM
;
920 dst
= skb_put(skb
, targetmss
);
922 databuf_pull(&(rconn
->buf
), dst
, targetmss
);
924 rc
= dev_queue_xmit(skb
);
926 databuf_unpull(&(rconn
->buf
), targetmss
);
927 kmem_cache_free(connretrans_slab
, cr
);
928 return RC_FLUSH_CONN_OUT_CONG
;
931 rconn
->credits
-= creditsperbyte
* targetmss
;
932 rconn
->target
.out
.seqno_nextsend
+= targetmss
;
933 schedule_retransmit_conn(cr
, rconn
, seqno
, targetmss
);
936 if (rconn
->buf
.read_remaining
> 0) {
937 struct control_msg_out
*cm
;
938 struct conn_retrans
*cr
;
939 __u32 len
= rconn
->buf
.read_remaining
;
940 char *buf
= kmalloc(len
, GFP_KERNEL
);
942 if (unlikely(creditsperbyte
* len
> rconn
->credits
))
943 return RC_FLUSH_CONN_OUT_CREDITS
;
945 if (unlikely(buf
== 0))
946 return RC_FLUSH_CONN_OUT_OOM
;
948 cm
= alloc_control_msg(rconn
->target
.out
.nb
, ACM_PRIORITY_MED
);
949 if (unlikely(cm
== 0)) {
951 return RC_FLUSH_CONN_OUT_OOM
;
954 cr
= kmem_cache_alloc(connretrans_slab
, GFP_KERNEL
);
955 if (unlikely(cr
== 0)) {
957 free_control_msg(cm
);
958 return RC_FLUSH_CONN_OUT_CONG
;
961 databuf_pull(&(rconn
->buf
), buf
, len
);
963 seqno
= rconn
->target
.out
.seqno_nextsend
;
964 rconn
->credits
-= creditsperbyte
* len
;
965 rconn
->target
.out
.seqno_nextsend
+= len
;
967 schedule_retransmit_conn(cr
, rconn
, seqno
, len
);
969 send_conndata(cm
, rconn
->target
.out
.conn_id
, seqno
, buf
, buf
,
975 return RC_FLUSH_CONN_OUT_OK
;
978 int flush_out(struct conn
*rconn
)
980 return _flush_out(rconn
, 0, 0);
983 int __init
cor_snd_init(void)
985 connretrans_slab
= kmem_cache_create("cor_connretrans",
986 sizeof(struct conn_retrans
), 8, 0, 0);
988 if (unlikely(connretrans_slab
== 0))
991 INIT_DELAYED_WORK(&(qos_resume_work
), qos_resume
);
992 qos_resume_scheduled
= 0;
997 MODULE_LICENSE("GPL");