2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
27 struct kmem_cache
*connretrans_slab
;
30 /* timeout_list and conn_list share a single ref */
32 struct list_head timeout_list
;
33 struct list_head conn_list
;
34 struct htab_entry htab_entry
;
38 unsigned long timeout
;
41 static void free_connretrans(struct kref
*ref
)
43 struct conn_retrans
*cr
= container_of(ref
, struct conn_retrans
, ref
);
44 kmem_cache_free(connretrans_slab
, cr
);
45 kref_put(&(cr
->rconn
->ref
), free_conn
);
48 static struct htable retransmits_conn
;
50 struct retransmit_matchparam
{
56 static __u32
rm_to_key(struct retransmit_matchparam
*rm
)
58 return rm
->conn_id
^ rm
->seqno
;
61 static void htable_insert_connretrans(struct conn_retrans
*cr
)
63 struct retransmit_matchparam rm
;
65 rm
.conn_id
= cr
->rconn
->target
.out
.conn_id
;
67 rm
.nb
= cr
->rconn
->target
.out
.nb
;
69 htable_insert(&retransmits_conn
, (char *) cr
, rm_to_key(&rm
));
72 static int htable_delete_connretrans(struct conn_retrans
*cr
)
74 struct retransmit_matchparam rm
;
76 rm
.conn_id
= cr
->rconn
->target
.out
.conn_id
;
78 rm
.nb
= cr
->rconn
->target
.out
.nb
;
80 return htable_delete(&retransmits_conn
, rm_to_key(&rm
), &rm
,
84 /* static struct sk_buff * cor_dequeue(struct Qdisc *sch)
88 struct cor_sched_data *q = qdisc_priv(sch);
90 struct list_head *ln = q->conn_list.next;
91 struct conn *best = 0;
93 __u64 currcost_limit = 0;
96 spin_lock(&(q->lock));
98 if (!(skb_queue_empty(&(q->requeue_queue)))) {
99 ret = __skb_dequeue(&(q->requeue_queue));
103 while (&(q->conn_list) != ln) {
104 __u32 max1, max2, maxcost;
105 struct conn *curr = (struct conn *)
106 (((char *) ln) - offsetof(struct conn,
107 target.out.queue_list));
109 BUG_ON(TARGET_OUT != curr->targettype);
110 max1 = (256 * ((__u64)curr->credits)) /
111 ((__u64)curr->bytes_queued + curr->avg_rate);
113 max2 = (256 * ((__u64)curr->credits +
114 curr->credit_sender - curr->credit_recp)) /
115 ((__u64)curr->bytes_queued + 2*curr->avg_rate);
117 maxcost = max((__u32) 0, min((max1), (max2)));
119 if (maxcost > currcost_limit) {
120 currcost = currcost_limit;
121 currcost_limit = maxcost;
128 best->credits -= currcost;
130 ret = __skb_dequeue(&(best->target.out.queue));
132 if (skb_queue_empty(&(best->target.out.queue))) {
133 list_del(&(best->target.out.queue_list));
134 best->target.out.qdisc_active = 0;
138 spin_unlock(&(q->lock));
140 if (likely(0 != ret)) {
141 sch->qstats.backlog -= ret->len;
148 static int cor_enqueue(struct sk_buff *skb, struct Qdisc *sch)
150 struct cor_sched_data *q = qdisc_priv(sch);
153 rconn = skb_pstate(skb)->rconn;
155 BUG_ON(TARGET_OUT != rconn->targettype);
157 spin_lock(&(rconn->target.out.qdisc_lock));
159 __skb_queue_tail(&(rconn->target.out.queue), skb);
161 if (unlikely(0 == rconn->target.out.qdisc_active)) {
162 spin_lock(&(q->lock));
163 list_add(&(rconn->target.out.queue_list), &(q->conn_list));
164 rconn->target.out.qdisc_active = 1;
165 spin_unlock(&(q->lock));
168 spin_unlock(&(rconn->target.out.qdisc_lock));
170 sch->bstats.bytes += skb->len;
171 sch->bstats.packets++;
174 return NET_XMIT_SUCCESS;
177 static struct sk_buff
*create_packet(struct neighbor
*nb
, int size
,
178 gfp_t alloc_flags
, __u32 conn_id
, __u32 seqno
)
183 ret
= alloc_skb(size
+ 9 + LL_ALLOCATED_SPACE(nb
->dev
), alloc_flags
);
184 if (unlikely(0 == ret
))
187 ret
->protocol
= htons(ETH_P_COR
);
190 skb_reserve(ret
, LL_RESERVED_SPACE(nb
->dev
));
191 if(unlikely(dev_hard_header(ret
, nb
->dev
, ETH_P_COR
, nb
->mac
,
192 nb
->dev
->dev_addr
, ret
->len
) < 0))
194 skb_reset_network_header(ret
);
196 dest
= skb_put(ret
, 9);
199 dest
[0] = PACKET_TYPE_DATA
;
202 put_u32(dest
, conn_id
, 1);
204 put_u32(dest
, seqno
, 1);
210 struct sk_buff
*create_packet_kernel(struct neighbor
*nb
, int size
,
213 __u32 seqno
= atomic_add_return(1, &(nb
->kpacket_seqno
));
214 struct sk_buff
*skb
= create_packet(nb
, size
, alloc_flags
, 0, seqno
);
216 struct skb_procstate
*ps
= skb_pstate(skb
);
217 memset(&(ps
->funcstate
.retransmit_queue
), 0,
218 sizeof(ps
->funcstate
.retransmit_queue
));
219 ps
->funcstate
.retransmit_queue
.conn_id
= 0;
220 ps
->funcstate
.retransmit_queue
.seqno
= seqno
;
221 ps
->funcstate
.retransmit_queue
.nb
= nb
;
222 /* nb_ref is in schedule_retransmit */
226 static void set_conn_retrans_timeout(struct conn_retrans
*cr
)
228 cr
->timeout
= jiffies
+ msecs_to_jiffies(300 + ((__u32
) atomic_read(
229 &(cr
->rconn
->target
.out
.nb
->latency
)))/1000);
232 static struct conn_retrans
*readd_conn_retrans(struct conn_retrans
*cr
,
233 struct neighbor
*nb
, __u32 length
)
235 unsigned long iflags
;
237 struct conn_retrans
*ret
= 0;
239 if (unlikely(cr
->length
> length
)) {
240 ret
= kmem_cache_alloc(connretrans_slab
, GFP_KERNEL
);
241 if (unlikely(ret
== 0)) {
242 cr
->timeout
= jiffies
+ 1;
246 memset(ret
, 0, sizeof (struct conn_retrans
));
247 ret
->rconn
= cr
->rconn
;
248 kref_get(&(cr
->rconn
->ref
));
249 ret
->seqno
= cr
->seqno
+ length
;
250 ret
->length
= cr
->length
- length
;
251 kref_init(&(ret
->ref
));
254 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
257 htable_insert_connretrans(ret
);
258 list_add(&(ret
->timeout_list
), &(nb
->retrans_list_conn
));
259 list_add(&(ret
->conn_list
), &(cr
->conn_list
));
263 list_del(&(cr
->timeout_list
));
264 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list_conn
));
265 set_conn_retrans_timeout(cr
);
267 BUG_ON(cr
->length
!= length
);
270 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
275 static void send_retrans(struct neighbor
*nb
, struct conn_retrans
*cr
)
277 int targetmss
= mss(nb
);
279 __u32 seqno
= cr
->seqno
;
280 __u32 length
= cr
->length
;
282 mutex_lock(&(cr
->rconn
->rcv_lock
));
284 while (cr
->length
>= targetmss
) {
287 skb
= create_packet(nb
, targetmss
, GFP_KERNEL
,
288 cr
->rconn
->target
.out
.conn_id
, seqno
);
289 if (unlikely(skb
== 0)) {
290 cr
->timeout
= jiffies
+ 1;
294 cr
= readd_conn_retrans(cr
, nb
, targetmss
);
295 if (unlikely(cr
== 0 && unlikely(cr
->length
> targetmss
))) {
300 dst
= skb_put(skb
, targetmss
);
302 databuf_pullold(&(cr
->rconn
->buf
), cr
->seqno
, dst
, targetmss
);
309 if (cr
->length
> 0) {
310 struct control_msg_out
*cm
;
311 char *buf
= kmalloc(cr
->length
, GFP_KERNEL
);
313 if (unlikely(buf
== 0)) {
314 cr
->timeout
= jiffies
+ 1;
318 databuf_pullold(&(cr
->rconn
->buf
), cr
->seqno
, buf
, cr
->length
);
320 cm
= alloc_control_msg();
321 if (unlikely(cm
== 0)) {
322 cr
->timeout
= jiffies
+ 1;
327 if (unlikely(readd_conn_retrans(cr
, nb
, cr
->length
) != 0))
330 send_conndata(cm
, nb
, cr
->rconn
->target
.out
.conn_id
, cr
->seqno
,
331 buf
, buf
, cr
->length
);
335 mutex_unlock(&(cr
->rconn
->rcv_lock
));
338 void retransmit_conn_timerfunc(struct work_struct
*work
)
340 unsigned long iflags
;
342 struct neighbor
*nb
= container_of(to_delayed_work(work
),
343 struct neighbor
, retrans_timer_conn
);
345 struct conn_retrans
*cr
= 0;
350 spin_lock_irqsave( &(nb
->state_lock
), iflags
);
352 spin_unlock_irqrestore( &(nb
->state_lock
), iflags
);
355 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
357 if (list_empty(&(nb
->retrans_list_conn
))) {
358 nb
->retrans_timer_conn_running
= 0;
363 cr
= container_of(nb
->retrans_list_conn
.next
,
364 struct conn_retrans
, timeout_list
);
366 BUG_ON(cr
->rconn
->targettype
!= TARGET_OUT
);
368 if (unlikely(unlikely(nbstate
== NEIGHBOR_STATE_KILLED
) ||
369 unlikely(atomic_read(
370 &(cr
->rconn
->isreset
)) != 0))) {
371 htable_delete_connretrans(cr
);
373 list_del(&(cr
->timeout_list
));
374 list_del(&(cr
->conn_list
));
375 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
377 kref_put(&(cr
->ref
), free_connretrans
);
381 BUG_ON(nb
!= cr
->rconn
->target
.out
.nb
);
383 if (time_after(cr
->timeout
, jiffies
)) {
384 mod_timer(&(nb
->retrans_timer
), cr
->timeout
);
388 kref_get(&(cr
->ref
));
389 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
390 send_retrans(nb
, cr
);
391 kref_put(&(cr
->ref
), free_connretrans
);
394 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
397 kref_put(&(nb
->ref
), neighbor_free
);
400 void conn_ack_rcvd(struct neighbor
*nb
, __u32 conn_id
, __u32 seqno
,
403 unsigned long iflags
;
404 struct retransmit_matchparam rm
;
405 struct conn_retrans
*cr
;
409 rm
.conn_id
= conn_id
;
413 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
415 cr
= (struct conn_retrans
*) htable_get(&retransmits_conn
,
416 rm_to_key(&rm
), &rm
);
419 printk(KERN_ERR
"bogus/duplicate ack received");
423 BUG_ON(cr
->rconn
->targettype
!= TARGET_OUT
);
425 if (cr
->rconn
->target
.out
.nb
!= nb
) {
426 printk(KERN_ERR
"invalid neigh when receiving ack");
430 if (htable_delete_connretrans(cr
))
433 list_del(&(cr
->timeout_list
));
434 list_del(&(cr
->conn_list
));
435 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
437 kref_put(&(cr
->ref
), free_connretrans
);
442 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
445 static int schedule_retransmit_conn(struct conn
*rconn
, __u32 seqno
, __u32 len
)
447 unsigned long iflags
;
449 struct neighbor
*nb
= rconn
->target
.out
.nb
;
451 struct conn_retrans
*cr
;
454 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
456 cr
= kmem_cache_alloc(connretrans_slab
, GFP_KERNEL
);
457 if (unlikely(cr
== 0))
459 memset(cr
, 0, sizeof (struct conn_retrans
));
461 kref_get(&(rconn
->ref
));
464 kref_init(&(cr
->ref
));
465 set_conn_retrans_timeout(cr
);
467 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
469 htable_insert_connretrans(cr
);
471 first
= unlikely(list_empty(&(nb
->retrans_list_conn
)));
472 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list_conn
));
474 list_add_tail(&(cr
->conn_list
), &(rconn
->target
.out
.retrans_list
));
476 if (unlikely(unlikely(first
) &&
477 unlikely(nb
->retrans_timer_conn_running
== 0))) {
478 __u32 delay
= cr
->timeout
- jiffies
;
479 schedule_delayed_work(&(nb
->retrans_timer_conn
), delay
);
480 nb
->retrans_timer_conn_running
= 1;
481 kref_get(&(nb
->ref
));
484 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
488 void flush_out(struct conn
*rconn
)
490 int targetmss
= mss(rconn
->target
.out
.nb
);
493 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
495 if (unlikely(rconn
->target
.out
.conn_id
== 0))
498 while (rconn
->buf
.read_remaining
>= targetmss
) {
501 seqno
= rconn
->target
.out
.seqno
;
502 skb
= create_packet(rconn
->target
.out
.nb
, targetmss
, GFP_ATOMIC
,
503 rconn
->target
.out
.conn_id
, seqno
);
504 if (unlikely(skb
== 0))
507 dst
= skb_put(skb
, targetmss
);
509 databuf_pull(&(rconn
->buf
), dst
, targetmss
);
511 if (unlikely(schedule_retransmit_conn(rconn
, seqno
,
516 rconn
->target
.out
.seqno
+= targetmss
;
520 if (rconn
->buf
.read_remaining
> 0) {
521 struct control_msg_out
*cm
;
522 __u32 len
= rconn
->buf
.read_remaining
;
523 char *buf
= kmalloc(len
, GFP_KERNEL
);
525 if (unlikely(buf
== 0))
528 databuf_pull(&(rconn
->buf
), buf
, len
);
530 cm
= alloc_control_msg();
531 if (unlikely(cm
== 0)) {
536 seqno
= rconn
->target
.out
.seqno
;
537 if (schedule_retransmit_conn(rconn
, seqno
, len
)) {
538 free_control_msg(cm
);
542 rconn
->target
.out
.seqno
+= len
;
544 send_conndata(cm
, rconn
->target
.out
.nb
,
545 rconn
->target
.out
.conn_id
, seqno
, buf
, buf
,
553 #warning todo flush later
558 static int matches_connretrans_connid_seqno(void *htentry
, void *searcheditem
)
560 struct conn_retrans
*cr
= (struct conn_retrans
*) htentry
;
561 struct retransmit_matchparam
*rm
= (struct retransmit_matchparam
*)
563 return rm
->conn_id
== cr
->rconn
->target
.out
.conn_id
&&
564 rm
->seqno
== cr
->seqno
&&
565 rm
->nb
== cr
->rconn
->target
.out
.nb
;
568 int __init
cor_snd_init(void)
570 connretrans_slab
= kmem_cache_create("cor_connretrans",
571 sizeof(struct conn_retrans
), 8, 0, 0);
572 htable_init(&retransmits_conn
, matches_connretrans_connid_seqno
,
573 offsetof(struct conn_retrans
, htab_entry
),
574 offsetof(struct conn_retrans
, ref
));
579 MODULE_LICENSE("GPL");