2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
27 static struct htable retransmits
;
29 static void free_skb(struct ref_counter
*cnt
)
31 struct skb_procstate
*ps
= container_of(cnt
, struct skb_procstate
,
32 funcstate
.retransmit_queue
.refs
);
34 struct sk_buff
*skb
= skb_from_pstate(ps
);
36 ref_counter_decr(&(ps
->rconn
->refs
));
40 static struct ref_counter_def skb_refcnt
= {
44 struct retransmit_matchparam
{
50 static __u32
rm_to_key(struct retransmit_matchparam
*rm
)
52 return rm
->conn_id
^ rm
->seqno
;
55 /* static struct sk_buff * cor_dequeue(struct Qdisc *sch)
59 struct cor_sched_data *q = qdisc_priv(sch);
61 struct list_head *ln = q->conn_list.next;
62 struct conn *best = 0;
64 __u64 currcost_limit = 0;
67 spin_lock(&(q->lock));
69 if (!(skb_queue_empty(&(q->requeue_queue)))) {
70 ret = __skb_dequeue(&(q->requeue_queue));
74 while (&(q->conn_list) != ln) {
75 __u32 max1, max2, maxcost;
76 struct conn *curr = (struct conn *)
77 (((char *) ln) - offsetof(struct conn,
78 target.out.queue_list));
80 BUG_ON(TARGET_OUT != curr->targettype);
81 max1 = (256 * ((__u64)curr->credits)) /
82 ((__u64)curr->bytes_queued + curr->avg_rate);
84 max2 = (256 * ((__u64)curr->credits +
85 curr->credit_sender - curr->credit_recp)) /
86 ((__u64)curr->bytes_queued + 2*curr->avg_rate);
88 maxcost = max((__u32) 0, min((max1), (max2)));
90 if (maxcost > currcost_limit) {
91 currcost = currcost_limit;
92 currcost_limit = maxcost;
99 best->credits -= currcost;
101 ret = __skb_dequeue(&(best->target.out.queue));
103 if (skb_queue_empty(&(best->target.out.queue))) {
104 list_del(&(best->target.out.queue_list));
105 best->target.out.qdisc_active = 0;
109 spin_unlock(&(q->lock));
111 if (likely(0 != ret)) {
112 sch->qstats.backlog -= ret->len;
119 static int cor_enqueue(struct sk_buff *skb, struct Qdisc *sch)
121 struct cor_sched_data *q = qdisc_priv(sch);
124 rconn = skb_pstate(skb)->rconn;
126 BUG_ON(TARGET_OUT != rconn->targettype);
128 spin_lock(&(rconn->target.out.qdisc_lock));
130 __skb_queue_tail(&(rconn->target.out.queue), skb);
132 if (unlikely(0 == rconn->target.out.qdisc_active)) {
133 spin_lock(&(q->lock));
134 list_add(&(rconn->target.out.queue_list), &(q->conn_list));
135 rconn->target.out.qdisc_active = 1;
136 spin_unlock(&(q->lock));
139 spin_unlock(&(rconn->target.out.qdisc_lock));
141 sch->bstats.bytes += skb->len;
142 sch->bstats.packets++;
145 return NET_XMIT_SUCCESS;
148 static void cor_xmit(struct sk_buff
*skb
, int atomic
)
150 struct sk_buff
*skb2
;
154 skb2
= skb_clone(skb
, __GFP_DMA
| (atomic
? GFP_ATOMIC
: GFP_KERNEL
));
157 printk(KERN_WARNING
"cor_xmit: cannot clone skb, "
158 "allocation failure?");
162 printk(KERN_ERR
"xmit");
163 dev_queue_xmit(skb2
);
164 printk(KERN_ERR
"xmit2");
167 static void set_retrans_timeout(struct sk_buff
*skb
, struct neighbor
*nb
)
169 struct skb_procstate
*ps
= skb_pstate(skb
);
170 ps
->funcstate
.retransmit_queue
.timeout
= jiffies
+
171 msecs_to_jiffies(100 + nb
->latency
);
174 void retransmit_timerfunc(unsigned long arg
)
176 unsigned long iflags
;
178 struct neighbor
*nb
= (struct neighbor
*) arg
;
179 struct sk_buff
*skb
= 0;
180 unsigned long timeout
;
183 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
184 skb
= __skb_dequeue(&(nb
->retrans_list
));
189 timeout
= skb_pstate(skb
)->funcstate
.retransmit_queue
.timeout
;
191 if (time_after(timeout
, jiffies
)) {
192 __skb_queue_head(&(nb
->retrans_list
), skb
);
196 set_retrans_timeout(skb
, nb
);
198 __skb_queue_tail(&(nb
->retrans_list
), skb
);
200 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
205 mod_timer(&(nb
->retrans_timer
), timeout
);
208 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
211 static struct sk_buff
*create_packet(struct neighbor
*nb
, int size
,
212 gfp_t alloc_flags
, __u32 conn_id
, __u32 seqno
,
215 struct net_device
*dev
= nb
->dev
;
217 struct skb_procstate
*ps
;
220 ret
= alloc_skb(size
+ 9 + LL_ALLOCATED_SPACE(dev
), alloc_flags
);
221 if (unlikely(0 == ret
))
224 ret
->protocol
= htons(ETH_P_COR
);
227 skb_reserve(ret
, LL_RESERVED_SPACE(dev
));
228 if(unlikely(dev_hard_header(ret
, dev
, ETH_P_COR
, nb
->mac
,
229 dev
->dev_addr
, ret
->len
) < 0))
231 skb_reset_network_header(ret
);
233 ps
= skb_pstate(ret
);
235 memset(&(ps
->funcstate
.retransmit_queue
), 0,
236 sizeof(ps
->funcstate
.retransmit_queue
));
237 #warning todo funcstate.retransmit_queue.htab_entry
238 ps
->funcstate
.retransmit_queue
.conn_id
= conn_id
;
239 ps
->funcstate
.retransmit_queue
.seqno
= seqno
;
240 ps
->funcstate
.retransmit_queue
.nb
= nb
;
241 #warning todo decr refcnt
242 ref_counter_incr(&(nb
->refs
));
244 dest
= skb_put(ret
, 9);
247 dest
[0] = PACKET_TYPE_DATA
;
250 put_u32(dest
, conn_id
, 1);
252 put_u32(dest
, seqno
, 1);
258 struct sk_buff
*create_packet_conn(struct conn
*target
, int size
,
261 __u32 connid
= target
->target
.out
.conn_id
;
264 BUG_ON(target
->targettype
!= TARGET_OUT
);
266 seqno
= target
->target
.out
.seqno
;
267 target
->target
.out
.seqno
+= size
;
269 return create_packet(target
->target
.out
.nb
, size
, alloc_flags
,
270 connid
, seqno
, target
);
273 struct sk_buff
*create_packet_kernel(struct neighbor
*nb
, int size
,
276 __u32 seqno
= atomic_add_return(1, &(nb
->kpacket_seqno
));
277 return create_packet(nb
, size
, alloc_flags
, 0, seqno
, 0);
280 void send_conn_flushdata(struct conn
*rconn
, char *data
, __u32 datalen
)
283 struct control_msg_out
*cm
= alloc_control_msg();
285 seqno
= rconn
->target
.out
.seqno
;
286 rconn
->target
.out
.seqno
+= datalen
;
288 #warning todo retransmit/controlmsg == 0
290 send_conndata(cm
, rconn
->target
.out
.nb
, rconn
->target
.out
.conn_id
,
291 seqno
, data
, data
, datalen
);
294 static void schedule_retransmit(struct sk_buff
*skb
, struct neighbor
*nb
)
296 unsigned long iflags
;
298 struct skb_procstate
*ps
= skb_pstate(skb
);
299 struct retransmit_matchparam rm
;
302 rm
.conn_id
= ps
->funcstate
.retransmit_queue
.conn_id
;
303 rm
.seqno
= ps
->funcstate
.retransmit_queue
.seqno
;
306 set_retrans_timeout(skb
, nb
);
307 ref_counter_init(&(ps
->funcstate
.retransmit_queue
.refs
), &skb_refcnt
);
308 htable_insert(&retransmits
, (char *) skb
, rm_to_key(&rm
));
309 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
310 first
= unlikely(skb_queue_empty(&(nb
->retrans_list
)));
311 __skb_queue_tail(&(nb
->retrans_list
), skb
);
314 mod_timer(&(nb
->retrans_timer
),
315 ps
->funcstate
.retransmit_queue
.timeout
);
318 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
320 ref_counter_incr(&(ps
->funcstate
.retransmit_queue
.refs
));
323 void send_packet(struct sk_buff
*skb
, struct neighbor
*nb
)
325 struct skb_procstate
*ps
= skb_pstate(skb
);
327 schedule_retransmit(skb
, nb
);
330 ref_counter_incr(&(ps
->rconn
->refs
));
335 void ack_received(struct neighbor
*nb
, __u32 conn_id
, __u32 seqno
)
337 unsigned long iflags
;
339 struct sk_buff
*skb
= 0;
340 struct skb_procstate
*ps
;
341 struct retransmit_matchparam rm
;
347 rm
.conn_id
= conn_id
;
351 skb
= (struct sk_buff
*) htable_get(&retransmits
, rm_to_key(&rm
), &rm
);
354 printk(KERN_ERR
"bogus/duplicate ack received");
357 printk(KERN_ERR
"good ack received");
359 ps
= skb_pstate(skb
);
361 ret
= htable_delete(&retransmits
, rm_to_key(&rm
), &rm
);
363 /* somebody else has already deleted it in the meantime */
367 spin_lock_irqsave( &(nb
->retrans_lock
), iflags
);
369 if (unlikely(nb
->retrans_list
.next
== skb
))
371 skb
->next
->prev
= skb
->prev
;
372 skb
->prev
->next
= skb
->next
;
375 if (unlikely(skb_queue_empty(&(nb
->retrans_list
)))) {
376 mod_timer(&(nb
->retrans_timer
), jiffies
+ nb
->latency
);
380 spin_unlock_irqrestore( &(nb
->retrans_lock
), iflags
);
383 void flush_out(struct conn
*rconn
)
385 int targetmss
= mss(rconn
->target
.out
.nb
);
390 if (unlikely(rconn
->target
.out
.conn_id
== 0))
393 while (rconn
->buf
.read_remaining
>= targetmss
) {
394 struct sk_buff
*newskb
= create_packet_conn(rconn
, targetmss
,
397 char *dst
= skb_put(newskb
, targetmss
);
398 databuf_pull(&(rconn
->buf
), dst
, targetmss
);
399 databuf_ackread(&(rconn
->buf
));
400 send_packet(newskb
, rconn
->target
.out
.nb
);
403 if (rconn
->buf
.read_remaining
== 0)
406 len
= rconn
->buf
.read_remaining
;
407 buf
= kmalloc(len
, GFP_KERNEL
);
409 databuf_pull(&(rconn
->buf
), buf
, len
);
410 databuf_ackread(&(rconn
->buf
));
412 send_conn_flushdata(rconn
, buf
, len
);
417 static int matches_skb_connid_seqno(void *htentry
, void *searcheditem
)
419 struct sk_buff
*skb
= (struct sk_buff
*) htentry
;
420 struct skb_procstate
*ps
= skb_pstate(skb
);
421 struct retransmit_matchparam
*rm
= (struct retransmit_matchparam
*)
424 return rm
->conn_id
== ps
->funcstate
.retransmit_queue
.conn_id
&&
425 rm
->seqno
== ps
->funcstate
.retransmit_queue
.seqno
&&
426 rm
->nb
== ps
->funcstate
.retransmit_queue
.nb
;
429 static inline __u32
retransmit_entryoffset(void)
431 return offsetof(struct sk_buff
, cb
) + offsetof(struct skb_procstate
,
432 funcstate
.retransmit_queue
.htab_entry
);
435 static inline __u32
retransmit_refsoffset(void)
437 return offsetof(struct sk_buff
, cb
) + offsetof(struct skb_procstate
,
438 funcstate
.retransmit_queue
.refs
);
441 int __init
cor_snd_init(void)
443 htable_init(&retransmits
, matches_skb_connid_seqno
,
444 retransmit_entryoffset(), retransmit_refsoffset());
449 MODULE_LICENSE("GPL");