2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
27 struct kmem_cache
*connretrans_slab
;
30 /* timeout_list and conn_list share a single ref */
32 struct list_head timeout_list
;
33 struct list_head conn_list
;
34 struct htab_entry htab_entry
;
39 unsigned long timeout
;
42 static void free_connretrans(struct kref
*ref
)
44 struct conn_retrans
*cr
= container_of(ref
, struct conn_retrans
, ref
);
45 kmem_cache_free(connretrans_slab
, cr
);
46 kref_put(&(cr
->rconn
->ref
), free_conn
);
49 static struct htable retransmits_conn
;
51 struct retransmit_matchparam
{
57 static __u32
rm_to_key(struct retransmit_matchparam
*rm
)
59 return rm
->conn_id
^ rm
->seqno
;
62 static void htable_insert_connretrans(struct conn_retrans
*cr
)
64 struct retransmit_matchparam rm
;
66 rm
.conn_id
= cr
->rconn
->target
.out
.conn_id
;
68 rm
.nb
= cr
->rconn
->target
.out
.nb
;
70 htable_insert(&retransmits_conn
, (char *) cr
, rm_to_key(&rm
));
73 static int htable_delete_connretrans(struct conn_retrans
*cr
)
75 struct retransmit_matchparam rm
;
77 rm
.conn_id
= cr
->rconn
->target
.out
.conn_id
;
79 rm
.nb
= cr
->rconn
->target
.out
.nb
;
81 return htable_delete(&retransmits_conn
, rm_to_key(&rm
), &rm
,
85 /* static struct sk_buff * cor_dequeue(struct Qdisc *sch)
89 struct cor_sched_data *q = qdisc_priv(sch);
91 struct list_head *ln = q->conn_list.next;
92 struct conn *best = 0;
94 __u64 currcost_limit = 0;
97 spin_lock(&(q->lock));
99 if (!(skb_queue_empty(&(q->requeue_queue)))) {
100 ret = __skb_dequeue(&(q->requeue_queue));
104 while (&(q->conn_list) != ln) {
105 __u32 max1, max2, maxcost;
106 struct conn *curr = (struct conn *)
107 (((char *) ln) - offsetof(struct conn,
108 target.out.queue_list));
110 BUG_ON(TARGET_OUT != curr->targettype);
111 max1 = (256 * ((__u64)curr->credits)) /
112 ((__u64)curr->bytes_queued + curr->avg_rate);
114 max2 = (256 * ((__u64)curr->credits +
115 curr->credit_sender - curr->credit_recp)) /
116 ((__u64)curr->bytes_queued + 2*curr->avg_rate);
118 maxcost = max((__u32) 0, min((max1), (max2)));
120 if (maxcost > currcost_limit) {
121 currcost = currcost_limit;
122 currcost_limit = maxcost;
129 best->credits -= currcost;
131 ret = __skb_dequeue(&(best->target.out.queue));
133 if (skb_queue_empty(&(best->target.out.queue))) {
134 list_del(&(best->target.out.queue_list));
135 best->target.out.qdisc_active = 0;
139 spin_unlock(&(q->lock));
141 if (likely(0 != ret)) {
142 sch->qstats.backlog -= ret->len;
149 static int cor_enqueue(struct sk_buff *skb, struct Qdisc *sch)
151 struct cor_sched_data *q = qdisc_priv(sch);
154 rconn = skb_pstate(skb)->rconn;
156 BUG_ON(TARGET_OUT != rconn->targettype);
158 spin_lock(&(rconn->target.out.qdisc_lock));
160 __skb_queue_tail(&(rconn->target.out.queue), skb);
162 if (unlikely(0 == rconn->target.out.qdisc_active)) {
163 spin_lock(&(q->lock));
164 list_add(&(rconn->target.out.queue_list), &(q->conn_list));
165 rconn->target.out.qdisc_active = 1;
166 spin_unlock(&(q->lock));
169 spin_unlock(&(rconn->target.out.qdisc_lock));
171 sch->bstats.bytes += skb->len;
172 sch->bstats.packets++;
175 return NET_XMIT_SUCCESS;
178 struct sk_buff
*create_packet(struct neighbor
*nb
, int size
,
179 gfp_t alloc_flags
, __u32 conn_id
, __u32 seqno
)
184 ret
= alloc_skb(size
+ 9 + LL_ALLOCATED_SPACE(nb
->dev
), alloc_flags
);
185 if (unlikely(0 == ret
))
188 ret
->protocol
= htons(ETH_P_COR
);
191 skb_reserve(ret
, LL_RESERVED_SPACE(nb
->dev
));
192 if(unlikely(dev_hard_header(ret
, nb
->dev
, ETH_P_COR
, nb
->mac
,
193 nb
->dev
->dev_addr
, ret
->len
) < 0))
195 skb_reset_network_header(ret
);
197 dest
= skb_put(ret
, 9);
200 dest
[0] = PACKET_TYPE_DATA
;
203 put_u32(dest
, conn_id
, 1);
205 put_u32(dest
, seqno
, 1);
211 static void set_conn_retrans_timeout(struct conn_retrans
*cr
)
213 cr
->timeout
= jiffies
+ msecs_to_jiffies(300 + ((__u32
) atomic_read(
214 &(cr
->rconn
->target
.out
.nb
->latency
)))/1000);
217 static struct conn_retrans
*readd_conn_retrans(struct conn_retrans
*cr
,
218 struct neighbor
*nb
, __u32 length
, int *dontsend
)
220 unsigned long iflags
;
222 struct conn_retrans
*ret
= 0;
224 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
226 if (unlikely(cr
->ackrcvd
)) {
232 if (unlikely(cr
->length
> length
)) {
233 ret
= kmem_cache_alloc(connretrans_slab
, GFP_ATOMIC
);
234 if (unlikely(ret
== 0)) {
235 cr
->timeout
= jiffies
+ 1;
239 memset(ret
, 0, sizeof (struct conn_retrans
));
240 ret
->rconn
= cr
->rconn
;
241 kref_get(&(cr
->rconn
->ref
));
242 ret
->seqno
= cr
->seqno
+ length
;
243 ret
->length
= cr
->length
- length
;
244 kref_init(&(ret
->ref
));
248 htable_insert_connretrans(ret
);
249 list_add(&(ret
->timeout_list
), &(nb
->retrans_list_conn
));
250 list_add(&(ret
->conn_list
), &(cr
->conn_list
));
254 list_del(&(cr
->timeout_list
));
255 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list_conn
));
256 set_conn_retrans_timeout(cr
);
258 BUG_ON(cr
->length
!= length
);
262 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
267 static void send_retrans(struct neighbor
*nb
, struct conn_retrans
*cr
)
269 int targetmss
= mss(nb
);
271 __u32 seqno
= cr
->seqno
;
272 __u32 length
= cr
->length
;
276 mutex_lock(&(cr
->rconn
->rcv_lock
));
278 while (cr
->length
>= targetmss
) {
281 skb
= create_packet(nb
, targetmss
, GFP_KERNEL
,
282 cr
->rconn
->target
.out
.conn_id
, seqno
);
283 if (unlikely(skb
== 0)) {
284 cr
->timeout
= jiffies
+ 1;
288 cr
= readd_conn_retrans(cr
, nb
, targetmss
, &dontsend
);
289 if (unlikely(unlikely(dontsend
) || unlikely(cr
== 0 &&
290 unlikely(cr
->length
> targetmss
)))) {
295 dst
= skb_put(skb
, targetmss
);
297 databuf_pullold(&(cr
->rconn
->buf
), cr
->seqno
, dst
, targetmss
);
304 if (cr
->length
> 0) {
305 struct control_msg_out
*cm
;
306 char *buf
= kmalloc(cr
->length
, GFP_KERNEL
);
308 if (unlikely(buf
== 0)) {
309 cr
->timeout
= jiffies
+ 1;
313 databuf_pullold(&(cr
->rconn
->buf
), cr
->seqno
, buf
, cr
->length
);
315 cm
= alloc_control_msg();
316 if (unlikely(cm
== 0)) {
317 cr
->timeout
= jiffies
+ 1;
322 if (unlikely(readd_conn_retrans(cr
, nb
, cr
->length
, &dontsend
)
326 if (likely(dontsend
== 0))
327 send_conndata(cm
, nb
, cr
->rconn
->target
.out
.conn_id
,
328 cr
->seqno
, buf
, buf
, cr
->length
);
332 mutex_unlock(&(cr
->rconn
->rcv_lock
));
335 void retransmit_conn_timerfunc(struct work_struct
*work
)
337 unsigned long iflags
;
339 struct neighbor
*nb
= container_of(to_delayed_work(work
),
340 struct neighbor
, retrans_timer_conn
);
342 struct conn_retrans
*cr
= 0;
347 spin_lock_irqsave( &(nb
->state_lock
), iflags
);
349 spin_unlock_irqrestore( &(nb
->state_lock
), iflags
);
352 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
354 if (list_empty(&(nb
->retrans_list_conn
))) {
355 nb
->retrans_timer_conn_running
= 0;
360 cr
= container_of(nb
->retrans_list_conn
.next
,
361 struct conn_retrans
, timeout_list
);
363 BUG_ON(cr
->rconn
->targettype
!= TARGET_OUT
);
365 if (unlikely(unlikely(nbstate
== NEIGHBOR_STATE_KILLED
) ||
366 unlikely(atomic_read(
367 &(cr
->rconn
->isreset
)) != 0))) {
368 htable_delete_connretrans(cr
);
370 list_del(&(cr
->timeout_list
));
371 list_del(&(cr
->conn_list
));
372 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
374 kref_put(&(cr
->ref
), free_connretrans
);
378 BUG_ON(nb
!= cr
->rconn
->target
.out
.nb
);
380 if (time_after(cr
->timeout
, jiffies
)) {
381 mod_timer(&(nb
->retrans_timer
), cr
->timeout
);
385 kref_get(&(cr
->ref
));
386 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
387 send_retrans(nb
, cr
);
388 kref_put(&(cr
->ref
), free_connretrans
);
391 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
394 kref_put(&(nb
->ref
), neighbor_free
);
397 void conn_ack_rcvd(struct neighbor
*nb
, __u32 conn_id
, __u32 seqno
,
400 unsigned long iflags
;
401 struct retransmit_matchparam rm
;
402 struct conn_retrans
*cr
;
406 rm
.conn_id
= conn_id
;
410 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
412 cr
= (struct conn_retrans
*) htable_get(&retransmits_conn
,
413 rm_to_key(&rm
), &rm
);
415 if (unlikely(cr
== 0)) {
416 printk(KERN_ERR
"bogus/duplicate ack received");
420 BUG_ON(cr
->rconn
->targettype
!= TARGET_OUT
);
422 if (unlikely(cr
->rconn
->target
.out
.nb
!= nb
)) {
423 printk(KERN_ERR
"invalid neigh when receiving ack");
427 if (unlikely(htable_delete_connretrans(cr
)))
430 list_del(&(cr
->timeout_list
));
431 list_del(&(cr
->conn_list
));
433 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
435 #warning todo databuf_ack
437 kref_put(&(cr
->ref
), free_connretrans
);
442 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
445 static int schedule_retransmit_conn(struct conn
*rconn
, __u32 seqno
, __u32 len
)
447 unsigned long iflags
;
449 struct neighbor
*nb
= rconn
->target
.out
.nb
;
451 struct conn_retrans
*cr
;
454 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
456 cr
= kmem_cache_alloc(connretrans_slab
, GFP_KERNEL
);
457 if (unlikely(cr
== 0))
459 memset(cr
, 0, sizeof (struct conn_retrans
));
461 kref_get(&(rconn
->ref
));
464 kref_init(&(cr
->ref
));
465 set_conn_retrans_timeout(cr
);
467 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
469 htable_insert_connretrans(cr
);
471 first
= unlikely(list_empty(&(nb
->retrans_list_conn
)));
472 list_add_tail(&(cr
->timeout_list
), &(nb
->retrans_list_conn
));
474 list_add_tail(&(cr
->conn_list
), &(rconn
->target
.out
.retrans_list
));
476 if (unlikely(unlikely(first
) &&
477 unlikely(nb
->retrans_timer_conn_running
== 0))) {
478 __u32 delay
= cr
->timeout
- jiffies
;
479 schedule_delayed_work(&(nb
->retrans_timer_conn
), delay
);
480 nb
->retrans_timer_conn_running
= 1;
481 kref_get(&(nb
->ref
));
484 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
488 void flush_out(struct conn
*rconn
)
490 int targetmss
= mss(rconn
->target
.out
.nb
);
493 BUG_ON(rconn
->targettype
!= TARGET_OUT
);
495 if (unlikely(rconn
->target
.out
.conn_id
== 0))
498 while (rconn
->buf
.read_remaining
>= targetmss
) {
501 seqno
= rconn
->target
.out
.seqno
;
502 skb
= create_packet(rconn
->target
.out
.nb
, targetmss
, GFP_ATOMIC
,
503 rconn
->target
.out
.conn_id
, seqno
);
504 if (unlikely(skb
== 0))
507 dst
= skb_put(skb
, targetmss
);
509 databuf_pull(&(rconn
->buf
), dst
, targetmss
);
511 if (unlikely(schedule_retransmit_conn(rconn
, seqno
,
516 rconn
->target
.out
.seqno
+= targetmss
;
520 if (rconn
->buf
.read_remaining
> 0) {
521 struct control_msg_out
*cm
;
522 __u32 len
= rconn
->buf
.read_remaining
;
523 char *buf
= kmalloc(len
, GFP_KERNEL
);
525 if (unlikely(buf
== 0))
528 databuf_pull(&(rconn
->buf
), buf
, len
);
530 cm
= alloc_control_msg();
531 if (unlikely(cm
== 0)) {
536 seqno
= rconn
->target
.out
.seqno
;
537 if (unlikely(schedule_retransmit_conn(rconn
, seqno
, len
))) {
538 free_control_msg(cm
);
542 rconn
->target
.out
.seqno
+= len
;
544 send_conndata(cm
, rconn
->target
.out
.nb
,
545 rconn
->target
.out
.conn_id
, seqno
, buf
, buf
,
553 #warning todo flush later
558 static int matches_connretrans_connid_seqno(void *htentry
, void *searcheditem
)
560 struct conn_retrans
*cr
= (struct conn_retrans
*) htentry
;
561 struct retransmit_matchparam
*rm
= (struct retransmit_matchparam
*)
563 return rm
->conn_id
== cr
->rconn
->target
.out
.conn_id
&&
564 rm
->seqno
== cr
->seqno
&&
565 rm
->nb
== cr
->rconn
->target
.out
.nb
;
568 int __init
cor_snd_init(void)
570 connretrans_slab
= kmem_cache_create("cor_connretrans",
571 sizeof(struct conn_retrans
), 8, 0, 0);
572 htable_init(&retransmits_conn
, matches_connretrans_connid_seqno
,
573 offsetof(struct conn_retrans
, htab_entry
),
574 offsetof(struct conn_retrans
, ref
));
579 MODULE_LICENSE("GPL");