2 * Simple traffic shaper for Linux NET3.
4 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
5 * http://www.redhat.com
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
13 * warranty for any of this software. This material is provided
14 * "AS-IS" and at no charge.
20 * Compute time length of frame at regulated speed
21 * Add frame to queue at appropriate point
22 * Adjust time length computation for followup frames
23 * Any frame that falls outside of its boundaries is freed
25 * We work to the following constants
27 * SHAPER_QLEN Maximum queued frames
28 * SHAPER_LATENCY Bounding latency on a frame. Leaving this latency
29 * window drops the frame. This stops us queueing
30 * frames for a long time and confusing a remote
32 * SHAPER_MAXSLIP Maximum time a priority frame may jump forward.
33 * That bounds the penalty we will inflict on low
35 * SHAPER_BURST Time range we call "now" in order to reduce
36 * system load. The more we make this the burstier
37 * the behaviour, the better local performance you
38 * get through packet clustering on routers and the
39 * worse the remote end gets to judge rtts.
41 * This is designed to handle lower speed links ( < 200K/second or so). We
42 * run off a 100-150Hz base clock typically. This gives us a resolution at
43 * 200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
44 * resolution may start to cause much more burstiness in the traffic. We
45 * could avoid a lot of that by calling kick_shaper() at the end of the
46 * tied device transmissions. If you run above about 100K second you
47 * may need to tune the supposed speed rate for the right values.
50 * Downing the interface under the shaper before the shaper
51 * will render your machine defunct. Don't for now shape over
52 * PPP or SLIP therefore!
53 * This will be fixed in BETA4
57 * bh_atomic() SMP races fixes and rewritten the locking code to
58 * be SMP safe and irq-mask friendly.
59 * NOTE: we can't use start_bh_atomic() in kick_shaper()
60 * because it's going to be recalled from an irq handler,
61 * and synchronize_bh() is a nono if called from irq context.
62 * 1999 Andrea Arcangeli
64 * Device statistics (tx_pakets, tx_bytes,
65 * tx_drops: queue_over_time and collisions: max_queue_exceded)
66 * 1999/06/18 Jordi Murgo <savage@apostols.org>
69 #include <linux/module.h>
70 #include <linux/kernel.h>
71 #include <linux/sched.h>
72 #include <linux/ptrace.h>
73 #include <linux/fcntl.h>
75 #include <linux/malloc.h>
76 #include <linux/string.h>
77 #include <linux/errno.h>
78 #include <linux/netdevice.h>
79 #include <linux/etherdevice.h>
80 #include <linux/skbuff.h>
81 #include <linux/if_arp.h>
82 #include <linux/init.h>
85 #include <linux/if_shaper.h>
87 int sh_debug
; /* Debug flag */
89 #define SHAPER_BANNER "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
95 static int shaper_lock(struct shaper
*sh
)
98 * Lock in an interrupt must fail
100 while (test_and_set_bit(0, &sh
->locked
))
103 sleep_on(&sh
->wait_queue
);
111 static void shaper_kick(struct shaper
*sh
);
113 static void shaper_unlock(struct shaper
*sh
)
115 clear_bit(0, &sh
->locked
);
116 wake_up(&sh
->wait_queue
);
121 * Compute clocks on a buffer
124 static int shaper_clocks(struct shaper
*shaper
, struct sk_buff
*skb
)
126 int t
=skb
->len
/shaper
->bytespertick
;
131 * Set the speed of a shaper. We compute this in bytes per tick since
132 * thats how the machine wants to run. Quoted input is in bits per second
133 * as is traditional (note not BAUD). We assume 8 bit bytes.
136 static void shaper_setspeed(struct shaper
*shaper
, int bitspersec
)
138 shaper
->bitspersec
=bitspersec
;
139 shaper
->bytespertick
=(bitspersec
/HZ
)/8;
140 if(!shaper
->bytespertick
)
141 shaper
->bytespertick
++;
145 * Throw a frame at a shaper.
148 static int shaper_qframe(struct shaper
*shaper
, struct sk_buff
*skb
)
153 * Get ready to work on this shaper. Lock may fail if its
154 * an interrupt and locked.
157 if(!shaper_lock(shaper
))
159 ptr
=shaper
->sendq
.prev
;
162 * Set up our packet details
166 skb
->shapeclock
=shaper
->recovery
;
167 if(time_before(skb
->shapeclock
, jiffies
))
168 skb
->shapeclock
=jiffies
;
169 skb
->priority
=0; /* short term bug fix */
170 skb
->shapestamp
=jiffies
;
173 * Time slots for this packet.
176 skb
->shapelen
= shaper_clocks(shaper
,skb
);
178 #ifdef SHAPER_COMPLEX /* and broken.. */
180 while(ptr
&& ptr
!=(struct sk_buff
*)&shaper
->sendq
)
183 && jiffies
- ptr
->shapeclock
< SHAPER_MAXSLIP
)
185 struct sk_buff
*tmp
=ptr
->prev
;
188 * It goes before us therefore we slip the length
192 ptr
->shapeclock
+=skb
->shapelen
;
193 ptr
->shapelatency
+=skb
->shapelen
;
196 * The packet may have slipped so far back it
199 if(ptr
->shapelatency
> SHAPER_LATENCY
)
209 if(ptr
==NULL
|| ptr
==(struct sk_buff
*)&shaper
->sendq
)
210 skb_queue_head(&shaper
->sendq
,skb
);
215 * Set the packet clock out time according to the
216 * frames ahead. Im sure a bit of thought could drop
219 for(tmp
=skb_peek(&shaper
->sendq
); tmp
!=NULL
&& tmp
!=ptr
; tmp
=tmp
->next
)
220 skb
->shapeclock
+=tmp
->shapelen
;
227 * Up our shape clock by the time pending on the queue
228 * (Should keep this in the shaper as a variable..)
230 for(tmp
=skb_peek(&shaper
->sendq
); tmp
!=NULL
&&
231 tmp
!=(struct sk_buff
*)&shaper
->sendq
; tmp
=tmp
->next
)
232 skb
->shapeclock
+=tmp
->shapelen
;
234 * Queue over time. Spill packet.
236 if(skb
->shapeclock
-jiffies
> SHAPER_LATENCY
) {
238 shaper
->stats
.tx_dropped
++;
240 skb_queue_tail(&shaper
->sendq
, skb
);
244 printk("Frame queued.\n");
245 if(skb_queue_len(&shaper
->sendq
)>SHAPER_QLEN
)
247 ptr
=skb_dequeue(&shaper
->sendq
);
249 shaper
->stats
.collisions
++;
251 shaper_unlock(shaper
);
256 * Transmit from a shaper
259 static void shaper_queue_xmit(struct shaper
*shaper
, struct sk_buff
*skb
)
261 struct sk_buff
*newskb
=skb_clone(skb
, GFP_ATOMIC
);
263 printk("Kick frame on %p\n",newskb
);
266 newskb
->dev
=shaper
->dev
;
269 printk("Kick new frame to %s, %d\n",
270 shaper
->dev
->name
,newskb
->priority
);
271 dev_queue_xmit(newskb
);
273 shaper
->stats
.tx_bytes
+=newskb
->len
;
274 shaper
->stats
.tx_packets
++;
277 printk("Kicked new frame out.\n");
283 * Timer handler for shaping clock
286 static void shaper_timer(unsigned long data
)
288 struct shaper
*sh
=(struct shaper
*)data
;
293 * Kick a shaper queue and try and do something sensible with the
297 static void shaper_kick(struct shaper
*shaper
)
302 * Shaper unlock will kick
305 if (test_and_set_bit(0, &shaper
->locked
))
308 printk("Shaper locked.\n");
309 mod_timer(&shaper
->timer
, jiffies
);
315 * Walk the list (may be empty)
318 while((skb
=skb_peek(&shaper
->sendq
))!=NULL
)
321 * Each packet due to go out by now (within an error
322 * of SHAPER_BURST) gets kicked onto the link
326 printk("Clock = %d, jiffies = %ld\n", skb
->shapeclock
, jiffies
);
327 if(time_before_eq(skb
->shapeclock
- jiffies
, SHAPER_BURST
))
330 * Pull the frame and get interrupts back on.
334 if (shaper
->recovery
< skb
->shapeclock
+ skb
->shapelen
)
335 shaper
->recovery
= skb
->shapeclock
+ skb
->shapelen
;
337 * Pass on to the physical target device via
338 * our low level packet thrower.
342 shaper_queue_xmit(shaper
, skb
); /* Fire */
353 mod_timer(&shaper
->timer
, skb
->shapeclock
);
355 clear_bit(0, &shaper
->locked
);
360 * Flush the shaper queues on a closedown
363 static void shaper_flush(struct shaper
*shaper
)
366 if(!shaper_lock(shaper
))
368 printk(KERN_ERR
"shaper: shaper_flush() called by an irq!\n");
371 while((skb
=skb_dequeue(&shaper
->sendq
))!=NULL
)
373 shaper_unlock(shaper
);
377 * Bring the interface up. We just disallow this until a
381 static int shaper_open(struct net_device
*dev
)
383 struct shaper
*shaper
=dev
->priv
;
386 * Can't open until attached.
387 * Also can't open until speed is set, or we'll get
388 * a division by zero.
391 if(shaper
->dev
==NULL
)
393 if(shaper
->bitspersec
==0)
400 * Closing a shaper flushes the queues.
403 static int shaper_close(struct net_device
*dev
)
405 struct shaper
*shaper
=dev
->priv
;
406 shaper_flush(shaper
);
408 del_timer(&shaper
->timer
);
415 * Revectored calls. We alter the parameters and call the functions
416 * for our attached device. This enables us to bandwidth allocate after
417 * ARP and other resolutions and not before.
421 static int shaper_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
423 struct shaper
*sh
=dev
->priv
;
424 return shaper_qframe(sh
, skb
);
427 static struct net_device_stats
*shaper_get_stats(struct net_device
*dev
)
429 struct shaper
*sh
=dev
->priv
;
433 static int shaper_header(struct sk_buff
*skb
, struct net_device
*dev
,
434 unsigned short type
, void *daddr
, void *saddr
, unsigned len
)
436 struct shaper
*sh
=dev
->priv
;
439 printk("Shaper header\n");
441 v
=sh
->hard_header(skb
,sh
->dev
,type
,daddr
,saddr
,len
);
446 static int shaper_rebuild_header(struct sk_buff
*skb
)
448 struct shaper
*sh
=skb
->dev
->priv
;
449 struct net_device
*dev
=skb
->dev
;
452 printk("Shaper rebuild header\n");
454 v
=sh
->rebuild_header(skb
);
460 static int shaper_cache(struct neighbour
*neigh
, struct hh_cache
*hh
)
462 struct shaper
*sh
=neigh
->dev
->priv
;
463 struct net_device
*tmp
;
466 printk("Shaper header cache bind\n");
469 ret
=sh
->hard_header_cache(neigh
,hh
);
474 static void shaper_cache_update(struct hh_cache
*hh
, struct net_device
*dev
,
475 unsigned char *haddr
)
477 struct shaper
*sh
=dev
->priv
;
479 printk("Shaper cache update\n");
480 sh
->header_cache_update(hh
, sh
->dev
, haddr
);
484 static int shaper_neigh_setup(struct neighbour
*n
)
486 if (n
->nud_state
== NUD_NONE
) {
487 n
->ops
= &arp_broken_ops
;
488 n
->output
= n
->ops
->output
;
493 static int shaper_neigh_setup_dev(struct net_device
*dev
, struct neigh_parms
*p
)
495 if (p
->tbl
->family
== AF_INET
) {
496 p
->neigh_setup
= shaper_neigh_setup
;
503 static int shaper_attach(struct net_device
*shdev
, struct shaper
*sh
, struct net_device
*dev
)
506 sh
->hard_start_xmit
=dev
->hard_start_xmit
;
507 sh
->get_stats
=dev
->get_stats
;
510 sh
->hard_header
=dev
->hard_header
;
511 shdev
->hard_header
= shaper_header
;
514 shdev
->hard_header
= NULL
;
516 if(dev
->rebuild_header
)
518 sh
->rebuild_header
= dev
->rebuild_header
;
519 shdev
->rebuild_header
= shaper_rebuild_header
;
522 shdev
->rebuild_header
= NULL
;
525 if(dev
->hard_header_cache
)
527 sh
->hard_header_cache
= dev
->hard_header_cache
;
528 shdev
->hard_header_cache
= shaper_cache
;
532 shdev
->hard_header_cache
= NULL
;
535 if(dev
->header_cache_update
)
537 sh
->header_cache_update
= dev
->header_cache_update
;
538 shdev
->header_cache_update
= shaper_cache_update
;
541 shdev
->header_cache_update
= NULL
;
543 shdev
->header_cache_update
= NULL
;
544 shdev
->hard_header_cache
= NULL
;
546 shdev
->neigh_setup
= shaper_neigh_setup_dev
;
548 shdev
->hard_header_len
=dev
->hard_header_len
;
549 shdev
->type
=dev
->type
;
550 shdev
->addr_len
=dev
->addr_len
;
556 static int shaper_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
558 struct shaperconf
*ss
= (struct shaperconf
*)&ifr
->ifr_data
;
559 struct shaper
*sh
=dev
->priv
;
564 struct net_device
*them
=__dev_get_by_name(ss
->ss_name
);
569 return shaper_attach(dev
,dev
->priv
, them
);
574 strcpy(ss
->ss_name
, sh
->dev
->name
);
576 case SHAPER_SET_SPEED
:
577 shaper_setspeed(sh
,ss
->ss_speed
);
579 case SHAPER_GET_SPEED
:
580 ss
->ss_speed
=sh
->bitspersec
;
587 static struct shaper
*shaper_alloc(struct net_device
*dev
)
589 struct shaper
*sh
=kmalloc(sizeof(struct shaper
), GFP_KERNEL
);
592 memset(sh
,0,sizeof(*sh
));
593 skb_queue_head_init(&sh
->sendq
);
594 init_timer(&sh
->timer
);
595 sh
->timer
.function
=shaper_timer
;
596 sh
->timer
.data
=(unsigned long)sh
;
597 init_waitqueue_head(&sh
->wait_queue
);
602 * Add a shaper device to the system
605 int __init
shaper_probe(struct net_device
*dev
)
611 dev
->priv
= shaper_alloc(dev
);
615 dev
->open
= shaper_open
;
616 dev
->stop
= shaper_close
;
617 dev
->hard_start_xmit
= shaper_start_xmit
;
618 dev
->get_stats
= shaper_get_stats
;
619 dev
->set_multicast_list
= NULL
;
622 * Intialise the packet queues
625 dev_init_buffers(dev
);
628 * Handlers for when we attach to a device.
631 dev
->hard_header
= shaper_header
;
632 dev
->rebuild_header
= shaper_rebuild_header
;
634 dev
->hard_header_cache
= shaper_cache
;
635 dev
->header_cache_update
= shaper_cache_update
;
637 dev
->neigh_setup
= shaper_neigh_setup_dev
;
638 dev
->do_ioctl
= shaper_ioctl
;
639 dev
->hard_header_len
= 0;
640 dev
->type
= ARPHRD_ETHER
; /* initially */
641 dev
->set_mac_address
= NULL
;
644 dev
->tx_queue_len
= 10;
656 static char devicename
[9];
658 static struct net_device dev_shape
=
663 0, 0, 0, NULL
, shaper_probe
666 int init_module(void)
668 int err
=dev_alloc_name(&dev_shape
,"shaper%d");
671 printk(SHAPER_BANNER
);
672 if (register_netdev(&dev_shape
) != 0)
674 printk("Traffic shaper initialised.\n");
678 void cleanup_module(void)
680 struct shaper
*sh
=dev_shape
.priv
;
683 * No need to check MOD_IN_USE, as sys_delete_module() checks.
684 * To be unloadable we must be closed and detached so we don't
685 * need to flush things.
688 unregister_netdev(&dev_shape
);
691 * Free up the private structure, or leak memory :-)
694 dev_shape
.priv
= NULL
;
699 static struct net_device dev_sh0
=
704 0, 0, 0, NULL
, shaper_probe
708 static struct net_device dev_sh1
=
713 0, 0, 0, NULL
, shaper_probe
717 static struct net_device dev_sh2
=
722 0, 0, 0, NULL
, shaper_probe
725 static struct net_device dev_sh3
=
730 0, 0, 0, NULL
, shaper_probe
733 void shaper_init(void)
735 register_netdev(&dev_sh0
);
736 register_netdev(&dev_sh1
);
737 register_netdev(&dev_sh2
);
738 register_netdev(&dev_sh3
);