matches_skb_connid_seqno bugfix on kernel packets, ref counter is_active is now gone...
[cor_2_6_31.git] / net / cor / snd.c
blob21289b82dcd64e835de15f881a06bb6843bd6d9c
1 /*
2 * Connection oriented routing
3 * Copyright (C) 2007-2008 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include <linux/gfp.h>
22 #include <linux/jiffies.h>
23 #include <linux/slab.h>
25 #include "cor.h"
27 static struct htable retransmits;
29 static void free_skb(struct ref_counter *cnt)
31 struct skb_procstate *ps = container_of(cnt, struct skb_procstate,
32 funcstate.retransmit_queue.refs);
34 struct sk_buff *skb = skb_from_pstate(ps);
36 ref_counter_decr(&(ps->rconn->refs));
37 kfree_skb(skb);
40 static struct ref_counter_def skb_refcnt = {
41 .free = free_skb
44 struct retransmit_matchparam {
45 __u32 conn_id;
46 __u32 seqno;
47 struct neighbor *nb;
50 static __u32 rm_to_key(struct retransmit_matchparam *rm)
52 return rm->conn_id ^ rm->seqno;
55 /* static struct sk_buff * cor_dequeue(struct Qdisc *sch)
57 struct sk_buff *ret;
59 struct cor_sched_data *q = qdisc_priv(sch);
61 struct list_head *ln = q->conn_list.next;
62 struct conn *best = 0;
64 __u64 currcost_limit = 0;
65 __u64 currcost = 0;
67 spin_lock(&(q->lock));
69 if (!(skb_queue_empty(&(q->requeue_queue)))) {
70 ret = __skb_dequeue(&(q->requeue_queue));
71 goto out;
74 while (&(q->conn_list) != ln) {
75 __u32 max1, max2, maxcost;
76 struct conn *curr = (struct conn *)
77 (((char *) ln) - offsetof(struct conn,
78 target.out.queue_list));
80 BUG_ON(TARGET_OUT != curr->targettype);
81 max1 = (256 * ((__u64)curr->credits)) /
82 ((__u64)curr->bytes_queued + curr->avg_rate);
84 max2 = (256 * ((__u64)curr->credits +
85 curr->credit_sender - curr->credit_recp)) /
86 ((__u64)curr->bytes_queued + 2*curr->avg_rate);
88 maxcost = max((__u32) 0, min((max1), (max2)));
90 if (maxcost > currcost_limit) {
91 currcost = currcost_limit;
92 currcost_limit = maxcost;
93 best = curr;
96 ln = ln->next;
99 best->credits -= currcost;
101 ret = __skb_dequeue(&(best->target.out.queue));
103 if (skb_queue_empty(&(best->target.out.queue))) {
104 list_del(&(best->target.out.queue_list));
105 best->target.out.qdisc_active = 0;
108 out:
109 spin_unlock(&(q->lock));
111 if (likely(0 != ret)) {
112 sch->qstats.backlog -= ret->len;
113 sch->q.qlen--;
116 return ret;
119 static int cor_enqueue(struct sk_buff *skb, struct Qdisc *sch)
121 struct cor_sched_data *q = qdisc_priv(sch);
122 struct conn *rconn;
124 rconn = skb_pstate(skb)->rconn;
126 BUG_ON(TARGET_OUT != rconn->targettype);
128 spin_lock(&(rconn->target.out.qdisc_lock));
130 __skb_queue_tail(&(rconn->target.out.queue), skb);
132 if (unlikely(0 == rconn->target.out.qdisc_active)) {
133 spin_lock(&(q->lock));
134 list_add(&(rconn->target.out.queue_list), &(q->conn_list));
135 rconn->target.out.qdisc_active = 1;
136 spin_unlock(&(q->lock));
139 spin_unlock(&(rconn->target.out.qdisc_lock));
141 sch->bstats.bytes += skb->len;
142 sch->bstats.packets++;
143 sch->q.qlen++;
145 return NET_XMIT_SUCCESS;
146 } */
148 static void cor_xmit(struct sk_buff *skb, int atomic)
150 struct sk_buff *skb2;
152 BUG_ON(skb == 0);
154 skb2 = skb_clone(skb, __GFP_DMA | (atomic ? GFP_ATOMIC : GFP_KERNEL));
156 if (skb2 == 0) {
157 printk(KERN_WARNING "cor_xmit: cannot clone skb, "
158 "allocation failure?");
159 return;
162 printk(KERN_ERR "xmit");
163 dev_queue_xmit(skb2);
164 printk(KERN_ERR "xmit2");
167 static void set_retrans_timeout(struct sk_buff *skb, struct neighbor *nb)
169 struct skb_procstate *ps = skb_pstate(skb);
170 ps->funcstate.retransmit_queue.timeout = jiffies +
171 msecs_to_jiffies(100 + nb->latency);
174 void retransmit_timerfunc(unsigned long arg)
176 unsigned long iflags;
178 struct neighbor *nb = (struct neighbor *) arg;
179 struct sk_buff *skb = 0;
180 unsigned long timeout;
182 while (1) {
183 spin_lock_irqsave( &(nb->retrans_lock), iflags );
184 skb = __skb_dequeue(&(nb->retrans_list));
186 if (0 == skb)
187 goto out;
189 timeout = skb_pstate(skb)->funcstate.retransmit_queue.timeout;
191 if (time_after(timeout, jiffies)) {
192 __skb_queue_head(&(nb->retrans_list), skb);
193 goto modtimer;
196 set_retrans_timeout(skb, nb);
198 __skb_queue_tail(&(nb->retrans_list), skb);
200 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
201 cor_xmit(skb, 1);
204 modtimer:
205 mod_timer(&(nb->retrans_timer), timeout);
207 out:
208 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
211 static struct sk_buff *create_packet(struct neighbor *nb, int size,
212 gfp_t alloc_flags, __u32 conn_id, __u32 seqno,
213 struct conn *target)
215 struct net_device *dev = nb->dev;
216 struct sk_buff *ret;
217 struct skb_procstate *ps;
218 char *dest;
220 ret = alloc_skb(size + 9 + LL_ALLOCATED_SPACE(dev), alloc_flags);
221 if (unlikely(0 == ret))
222 return 0;
224 ret->protocol = htons(ETH_P_COR);
225 ret->dev = dev;
227 skb_reserve(ret, LL_RESERVED_SPACE(dev));
228 if(unlikely(dev_hard_header(ret, dev, ETH_P_COR, nb->mac,
229 dev->dev_addr, ret->len) < 0))
230 return 0;
231 skb_reset_network_header(ret);
233 ps = skb_pstate(ret);
234 ps->rconn = target;
235 memset(&(ps->funcstate.retransmit_queue), 0,
236 sizeof(ps->funcstate.retransmit_queue));
237 #warning todo funcstate.retransmit_queue.htab_entry
238 ps->funcstate.retransmit_queue.conn_id = conn_id;
239 ps->funcstate.retransmit_queue.seqno = seqno;
240 ps->funcstate.retransmit_queue.nb = nb;
241 #warning todo decr refcnt
242 ref_counter_incr(&(nb->refs));
244 dest = skb_put(ret, 9);
245 BUG_ON(0 == dest);
247 dest[0] = PACKET_TYPE_DATA;
248 dest += 1;
250 put_u32(dest, conn_id, 1);
251 dest += 4;
252 put_u32(dest, seqno, 1);
253 dest += 4;
255 return ret;
258 struct sk_buff *create_packet_conn(struct conn *target, int size,
259 gfp_t alloc_flags)
261 __u32 connid = target->target.out.conn_id;
262 __u32 seqno;
264 BUG_ON(target->targettype != TARGET_OUT);
266 seqno = target->target.out.seqno;
267 target->target.out.seqno += size;
269 return create_packet(target->target.out.nb, size, alloc_flags,
270 connid, seqno, target);
273 struct sk_buff *create_packet_kernel(struct neighbor *nb, int size,
274 gfp_t alloc_flags)
276 __u32 seqno = atomic_add_return(1, &(nb->kpacket_seqno));
277 return create_packet(nb, size, alloc_flags, 0, seqno, 0);
280 void send_conn_flushdata(struct conn *rconn, char *data, __u32 datalen)
282 __u32 seqno;
283 struct control_msg_out *cm = alloc_control_msg();
285 seqno = rconn->target.out.seqno;
286 rconn->target.out.seqno += datalen;
288 #warning todo retransmit/controlmsg == 0
290 send_conndata(cm, rconn->target.out.nb, rconn->target.out.conn_id,
291 seqno, data, data, datalen);
294 static void schedule_retransmit(struct sk_buff *skb, struct neighbor *nb)
296 unsigned long iflags;
298 struct skb_procstate *ps = skb_pstate(skb);
299 struct retransmit_matchparam rm;
300 int first;
302 rm.conn_id = ps->funcstate.retransmit_queue.conn_id;
303 rm.seqno = ps->funcstate.retransmit_queue.seqno;
304 rm.nb = nb;
306 set_retrans_timeout(skb, nb);
307 ref_counter_init(&(ps->funcstate.retransmit_queue.refs), &skb_refcnt);
308 htable_insert(&retransmits, (char *) skb, rm_to_key(&rm));
309 spin_lock_irqsave( &(nb->retrans_lock), iflags );
310 first = unlikely(skb_queue_empty(&(nb->retrans_list)));
311 __skb_queue_tail(&(nb->retrans_list), skb);
313 if (first) {
314 mod_timer(&(nb->retrans_timer),
315 ps->funcstate.retransmit_queue.timeout);
318 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
320 ref_counter_incr(&(ps->funcstate.retransmit_queue.refs));
323 void send_packet(struct sk_buff *skb, struct neighbor *nb)
325 struct skb_procstate *ps = skb_pstate(skb);
327 schedule_retransmit(skb, nb);
329 if (ps->rconn != 0)
330 ref_counter_incr(&(ps->rconn->refs));
332 cor_xmit(skb, 0);
335 void ack_received(struct neighbor *nb, __u32 conn_id, __u32 seqno)
337 unsigned long iflags;
339 struct sk_buff *skb = 0;
340 struct skb_procstate *ps;
341 struct retransmit_matchparam rm;
343 int first = 0;
345 int ret;
347 rm.conn_id = conn_id;
348 rm.seqno = seqno;
349 rm.nb = nb;
351 skb = (struct sk_buff *) htable_get(&retransmits, rm_to_key(&rm), &rm);
353 if (0 == skb) {
354 printk(KERN_ERR "bogus/duplicate ack received");
355 return;
357 printk(KERN_ERR "good ack received");
359 ps = skb_pstate(skb);
361 ret = htable_delete(&retransmits, rm_to_key(&rm), &rm);
362 if (ret) {
363 /* somebody else has already deleted it in the meantime */
364 return;
367 spin_lock_irqsave( &(nb->retrans_lock), iflags );
369 if (unlikely(nb->retrans_list.next == skb))
370 first = 1;
371 skb->next->prev = skb->prev;
372 skb->prev->next = skb->next;
374 if (first) {
375 if (unlikely(skb_queue_empty(&(nb->retrans_list)))) {
376 mod_timer(&(nb->retrans_timer), jiffies + nb->latency);
380 spin_unlock_irqrestore( &(nb->retrans_lock), iflags );
383 void flush_out(struct conn *rconn)
385 int targetmss = mss(rconn->target.out.nb);
387 char *buf;
388 __u32 len;
390 if (unlikely(rconn->target.out.conn_id == 0))
391 return;
393 while (rconn->buf.read_remaining >= targetmss) {
394 struct sk_buff *newskb = create_packet_conn(rconn, targetmss,
395 GFP_ATOMIC);
397 char *dst = skb_put(newskb, targetmss);
398 databuf_pull(&(rconn->buf), dst, targetmss);
399 databuf_ackread(&(rconn->buf));
400 send_packet(newskb, rconn->target.out.nb);
403 if (rconn->buf.read_remaining == 0)
404 return;
406 len = rconn->buf.read_remaining;
407 buf = kmalloc(len, GFP_KERNEL);
409 databuf_pull(&(rconn->buf), buf, len);
410 databuf_ackread(&(rconn->buf));
412 send_conn_flushdata(rconn, buf, len);
414 wake_sender(rconn);
417 static int matches_skb_connid_seqno(void *htentry, void *searcheditem)
419 struct sk_buff *skb = (struct sk_buff *) htentry;
420 struct skb_procstate *ps = skb_pstate(skb);
421 struct retransmit_matchparam *rm = (struct retransmit_matchparam *)
422 searcheditem;
424 return rm->conn_id == ps->funcstate.retransmit_queue.conn_id &&
425 rm->seqno == ps->funcstate.retransmit_queue.seqno &&
426 rm->nb == ps->funcstate.retransmit_queue.nb;
429 static inline __u32 retransmit_entryoffset(void)
431 return offsetof(struct sk_buff, cb) + offsetof(struct skb_procstate,
432 funcstate.retransmit_queue.htab_entry);
435 static inline __u32 retransmit_refsoffset(void)
437 return offsetof(struct sk_buff, cb) + offsetof(struct skb_procstate,
438 funcstate.retransmit_queue.refs);
441 int __init cor_snd_init(void)
443 htable_init(&retransmits, matches_skb_connid_seqno,
444 retransmit_entryoffset(), retransmit_refsoffset());
446 return 0;
449 MODULE_LICENSE("GPL");