2 * Simple traffic shaper for Linux NET3.
4 * (c) Copyright 1996 Alan Cox <alan@redhat.com>, All Rights Reserved.
5 * http://www.redhat.com
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Neither Alan Cox nor CymruNet Ltd. admit liability nor provide
13 * warranty for any of this software. This material is provided
14 * "AS-IS" and at no charge.
20 * Compute time length of frame at regulated speed
21 * Add frame to queue at appropriate point
22 * Adjust time length computation for followup frames
23 * Any frame that falls outside of its boundaries is freed
25 * We work to the following constants
27 * SHAPER_QLEN Maximum queued frames
28 * SHAPER_LATENCY Bounding latency on a frame. Leaving this latency
29 * window drops the frame. This stops us queueing
30 * frames for a long time and confusing a remote
32 * SHAPER_MAXSLIP Maximum time a priority frame may jump forward.
33 * That bounds the penalty we will inflict on low
35 * SHAPER_BURST Time range we call "now" in order to reduce
36 * system load. The more we make this the burstier
37 * the behaviour, the better local performance you
38 * get through packet clustering on routers and the
39 * worse the remote end gets to judge rtts.
41 * This is designed to handle lower speed links ( < 200K/second or so). We
42 * run off a 100-150Hz base clock typically. This gives us a resolution at
43 * 200Kbit/second of about 2Kbit or 256 bytes. Above that our timer
44 * resolution may start to cause much more burstiness in the traffic. We
45 * could avoid a lot of that by calling kick_shaper() at the end of the
46 * tied device transmissions. If you run above about 100K second you
47 * may need to tune the supposed speed rate for the right values.
50 * Downing the interface under the shaper before the shaper
51 * will render your machine defunct. Don't for now shape over
52 * PPP or SLIP therefore!
53 * This will be fixed in BETA4
57 * bh_atomic() SMP races fixes and rewritten the locking code to
58 * be SMP safe and irq-mask friendly.
59 * NOTE: we can't use start_bh_atomic() in kick_shaper()
60 * because it's going to be recalled from an irq handler,
61 * and synchronize_bh() is a nono if called from irq context.
62 * 1999 Andrea Arcangeli
64 * Device statistics (tx_pakets, tx_bytes,
65 * tx_drops: queue_over_time and collisions: max_queue_exceded)
66 * 1999/06/18 Jordi Murgo <savage@apostols.org>
68 * Use skb->cb for private data.
72 #include <linux/config.h>
73 #include <linux/module.h>
74 #include <linux/kernel.h>
75 #include <linux/fcntl.h>
77 #include <linux/slab.h>
78 #include <linux/string.h>
79 #include <linux/errno.h>
80 #include <linux/netdevice.h>
81 #include <linux/etherdevice.h>
82 #include <linux/skbuff.h>
83 #include <linux/if_arp.h>
84 #include <linux/init.h>
85 #include <linux/if_shaper.h>
91 unsigned long shapeclock
; /* Time it should go out */
92 unsigned long shapestamp
; /* Stamp for shaper */
93 __u32 shapelatency
; /* Latency on frame */
94 __u32 shapelen
; /* Frame length in clocks */
95 __u16 shapepend
; /* Pending */
97 #define SHAPERCB(skb) ((struct shaper_cb *) ((skb)->cb))
99 static int sh_debug
; /* Debug flag */
101 #define SHAPER_BANNER "CymruNet Traffic Shaper BETA 0.04 for Linux 2.1\n"
107 static int shaper_lock(struct shaper
*sh
)
110 * Lock in an interrupt must fail
112 while (test_and_set_bit(0, &sh
->locked
))
115 sleep_on(&sh
->wait_queue
);
123 static void shaper_kick(struct shaper
*sh
);
125 static void shaper_unlock(struct shaper
*sh
)
127 clear_bit(0, &sh
->locked
);
128 wake_up(&sh
->wait_queue
);
133 * Compute clocks on a buffer
136 static int shaper_clocks(struct shaper
*shaper
, struct sk_buff
*skb
)
138 int t
=skb
->len
/shaper
->bytespertick
;
143 * Set the speed of a shaper. We compute this in bytes per tick since
144 * thats how the machine wants to run. Quoted input is in bits per second
145 * as is traditional (note not BAUD). We assume 8 bit bytes.
148 static void shaper_setspeed(struct shaper
*shaper
, int bitspersec
)
150 shaper
->bitspersec
=bitspersec
;
151 shaper
->bytespertick
=(bitspersec
/HZ
)/8;
152 if(!shaper
->bytespertick
)
153 shaper
->bytespertick
++;
157 * Throw a frame at a shaper.
160 static int shaper_qframe(struct shaper
*shaper
, struct sk_buff
*skb
)
165 * Get ready to work on this shaper. Lock may fail if its
166 * an interrupt and locked.
169 if(!shaper_lock(shaper
))
171 ptr
=shaper
->sendq
.prev
;
174 * Set up our packet details
177 SHAPERCB(skb
)->shapelatency
=0;
178 SHAPERCB(skb
)->shapeclock
=shaper
->recovery
;
179 if(time_before(SHAPERCB(skb
)->shapeclock
, jiffies
))
180 SHAPERCB(skb
)->shapeclock
=jiffies
;
181 skb
->priority
=0; /* short term bug fix */
182 SHAPERCB(skb
)->shapestamp
=jiffies
;
185 * Time slots for this packet.
188 SHAPERCB(skb
)->shapelen
= shaper_clocks(shaper
,skb
);
190 #ifdef SHAPER_COMPLEX /* and broken.. */
192 while(ptr
&& ptr
!=(struct sk_buff
*)&shaper
->sendq
)
195 && jiffies
- SHAPERCB(ptr
)->shapeclock
< SHAPER_MAXSLIP
)
197 struct sk_buff
*tmp
=ptr
->prev
;
200 * It goes before us therefore we slip the length
204 SHAPERCB(ptr
)->shapeclock
+=SHAPERCB(skb
)->shapelen
;
205 SHAPERCB(ptr
)->shapelatency
+=SHAPERCB(skb
)->shapelen
;
208 * The packet may have slipped so far back it
211 if(SHAPERCB(ptr
)->shapelatency
> SHAPER_LATENCY
)
221 if(ptr
==NULL
|| ptr
==(struct sk_buff
*)&shaper
->sendq
)
222 skb_queue_head(&shaper
->sendq
,skb
);
227 * Set the packet clock out time according to the
228 * frames ahead. Im sure a bit of thought could drop
231 for(tmp
=skb_peek(&shaper
->sendq
); tmp
!=NULL
&& tmp
!=ptr
; tmp
=tmp
->next
)
232 SHAPERCB(skb
)->shapeclock
+=tmp
->shapelen
;
239 * Up our shape clock by the time pending on the queue
240 * (Should keep this in the shaper as a variable..)
242 for(tmp
=skb_peek(&shaper
->sendq
); tmp
!=NULL
&&
243 tmp
!=(struct sk_buff
*)&shaper
->sendq
; tmp
=tmp
->next
)
244 SHAPERCB(skb
)->shapeclock
+=SHAPERCB(tmp
)->shapelen
;
246 * Queue over time. Spill packet.
248 if(SHAPERCB(skb
)->shapeclock
-jiffies
> SHAPER_LATENCY
) {
250 shaper
->stats
.tx_dropped
++;
252 skb_queue_tail(&shaper
->sendq
, skb
);
256 printk("Frame queued.\n");
257 if(skb_queue_len(&shaper
->sendq
)>SHAPER_QLEN
)
259 ptr
=skb_dequeue(&shaper
->sendq
);
261 shaper
->stats
.collisions
++;
263 shaper_unlock(shaper
);
268 * Transmit from a shaper
271 static void shaper_queue_xmit(struct shaper
*shaper
, struct sk_buff
*skb
)
273 struct sk_buff
*newskb
=skb_clone(skb
, GFP_ATOMIC
);
275 printk("Kick frame on %p\n",newskb
);
278 newskb
->dev
=shaper
->dev
;
281 printk("Kick new frame to %s, %d\n",
282 shaper
->dev
->name
,newskb
->priority
);
283 dev_queue_xmit(newskb
);
285 shaper
->stats
.tx_bytes
+= skb
->len
;
286 shaper
->stats
.tx_packets
++;
289 printk("Kicked new frame out.\n");
295 * Timer handler for shaping clock
298 static void shaper_timer(unsigned long data
)
300 struct shaper
*sh
=(struct shaper
*)data
;
305 * Kick a shaper queue and try and do something sensible with the
309 static void shaper_kick(struct shaper
*shaper
)
314 * Shaper unlock will kick
317 if (test_and_set_bit(0, &shaper
->locked
))
320 printk("Shaper locked.\n");
321 mod_timer(&shaper
->timer
, jiffies
);
327 * Walk the list (may be empty)
330 while((skb
=skb_peek(&shaper
->sendq
))!=NULL
)
333 * Each packet due to go out by now (within an error
334 * of SHAPER_BURST) gets kicked onto the link
338 printk("Clock = %ld, jiffies = %ld\n", SHAPERCB(skb
)->shapeclock
, jiffies
);
339 if(time_before_eq(SHAPERCB(skb
)->shapeclock
, jiffies
+ SHAPER_BURST
))
342 * Pull the frame and get interrupts back on.
346 if (shaper
->recovery
<
347 SHAPERCB(skb
)->shapeclock
+ SHAPERCB(skb
)->shapelen
)
348 shaper
->recovery
= SHAPERCB(skb
)->shapeclock
+ SHAPERCB(skb
)->shapelen
;
350 * Pass on to the physical target device via
351 * our low level packet thrower.
354 SHAPERCB(skb
)->shapepend
=0;
355 shaper_queue_xmit(shaper
, skb
); /* Fire */
366 mod_timer(&shaper
->timer
, SHAPERCB(skb
)->shapeclock
);
368 clear_bit(0, &shaper
->locked
);
373 * Flush the shaper queues on a closedown
376 static void shaper_flush(struct shaper
*shaper
)
379 if(!shaper_lock(shaper
))
381 printk(KERN_ERR
"shaper: shaper_flush() called by an irq!\n");
384 while((skb
=skb_dequeue(&shaper
->sendq
))!=NULL
)
386 shaper_unlock(shaper
);
390 * Bring the interface up. We just disallow this until a
394 static int shaper_open(struct net_device
*dev
)
396 struct shaper
*shaper
=dev
->priv
;
399 * Can't open until attached.
400 * Also can't open until speed is set, or we'll get
401 * a division by zero.
404 if(shaper
->dev
==NULL
)
406 if(shaper
->bitspersec
==0)
412 * Closing a shaper flushes the queues.
415 static int shaper_close(struct net_device
*dev
)
417 struct shaper
*shaper
=dev
->priv
;
418 shaper_flush(shaper
);
419 del_timer_sync(&shaper
->timer
);
424 * Revectored calls. We alter the parameters and call the functions
425 * for our attached device. This enables us to bandwidth allocate after
426 * ARP and other resolutions and not before.
430 static int shaper_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
432 struct shaper
*sh
=dev
->priv
;
433 return shaper_qframe(sh
, skb
);
436 static struct net_device_stats
*shaper_get_stats(struct net_device
*dev
)
438 struct shaper
*sh
=dev
->priv
;
442 static int shaper_header(struct sk_buff
*skb
, struct net_device
*dev
,
443 unsigned short type
, void *daddr
, void *saddr
, unsigned len
)
445 struct shaper
*sh
=dev
->priv
;
448 printk("Shaper header\n");
450 v
=sh
->hard_header(skb
,sh
->dev
,type
,daddr
,saddr
,len
);
455 static int shaper_rebuild_header(struct sk_buff
*skb
)
457 struct shaper
*sh
=skb
->dev
->priv
;
458 struct net_device
*dev
=skb
->dev
;
461 printk("Shaper rebuild header\n");
463 v
=sh
->rebuild_header(skb
);
469 static int shaper_cache(struct neighbour
*neigh
, struct hh_cache
*hh
)
471 struct shaper
*sh
=neigh
->dev
->priv
;
472 struct net_device
*tmp
;
475 printk("Shaper header cache bind\n");
478 ret
=sh
->hard_header_cache(neigh
,hh
);
483 static void shaper_cache_update(struct hh_cache
*hh
, struct net_device
*dev
,
484 unsigned char *haddr
)
486 struct shaper
*sh
=dev
->priv
;
488 printk("Shaper cache update\n");
489 sh
->header_cache_update(hh
, sh
->dev
, haddr
);
495 static int shaper_neigh_setup(struct neighbour
*n
)
498 if (n
->nud_state
== NUD_NONE
) {
499 n
->ops
= &arp_broken_ops
;
500 n
->output
= n
->ops
->output
;
506 static int shaper_neigh_setup_dev(struct net_device
*dev
, struct neigh_parms
*p
)
509 if (p
->tbl
->family
== AF_INET
) {
510 p
->neigh_setup
= shaper_neigh_setup
;
518 #else /* !(CONFIG_INET) */
520 static int shaper_neigh_setup_dev(struct net_device
*dev
, struct neigh_parms
*p
)
527 static int shaper_attach(struct net_device
*shdev
, struct shaper
*sh
, struct net_device
*dev
)
530 sh
->hard_start_xmit
=dev
->hard_start_xmit
;
531 sh
->get_stats
=dev
->get_stats
;
534 sh
->hard_header
=dev
->hard_header
;
535 shdev
->hard_header
= shaper_header
;
538 shdev
->hard_header
= NULL
;
540 if(dev
->rebuild_header
)
542 sh
->rebuild_header
= dev
->rebuild_header
;
543 shdev
->rebuild_header
= shaper_rebuild_header
;
546 shdev
->rebuild_header
= NULL
;
549 if(dev
->hard_header_cache
)
551 sh
->hard_header_cache
= dev
->hard_header_cache
;
552 shdev
->hard_header_cache
= shaper_cache
;
556 shdev
->hard_header_cache
= NULL
;
559 if(dev
->header_cache_update
)
561 sh
->header_cache_update
= dev
->header_cache_update
;
562 shdev
->header_cache_update
= shaper_cache_update
;
565 shdev
->header_cache_update
= NULL
;
567 shdev
->header_cache_update
= NULL
;
568 shdev
->hard_header_cache
= NULL
;
570 shdev
->neigh_setup
= shaper_neigh_setup_dev
;
572 shdev
->hard_header_len
=dev
->hard_header_len
;
573 shdev
->type
=dev
->type
;
574 shdev
->addr_len
=dev
->addr_len
;
580 static int shaper_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
582 struct shaperconf
*ss
= (struct shaperconf
*)&ifr
->ifr_ifru
;
583 struct shaper
*sh
=dev
->priv
;
585 if(ss
->ss_cmd
== SHAPER_SET_DEV
|| ss
->ss_cmd
== SHAPER_SET_SPEED
)
587 if(!capable(CAP_NET_ADMIN
))
595 struct net_device
*them
=__dev_get_by_name(ss
->ss_name
);
600 return shaper_attach(dev
,dev
->priv
, them
);
605 strcpy(ss
->ss_name
, sh
->dev
->name
);
607 case SHAPER_SET_SPEED
:
608 shaper_setspeed(sh
,ss
->ss_speed
);
610 case SHAPER_GET_SPEED
:
611 ss
->ss_speed
=sh
->bitspersec
;
618 static void shaper_init_priv(struct net_device
*dev
)
620 struct shaper
*sh
= dev
->priv
;
622 skb_queue_head_init(&sh
->sendq
);
623 init_timer(&sh
->timer
);
624 sh
->timer
.function
=shaper_timer
;
625 sh
->timer
.data
=(unsigned long)sh
;
626 init_waitqueue_head(&sh
->wait_queue
);
630 * Add a shaper device to the system
633 static void __init
shaper_setup(struct net_device
*dev
)
639 SET_MODULE_OWNER(dev
);
641 shaper_init_priv(dev
);
643 dev
->open
= shaper_open
;
644 dev
->stop
= shaper_close
;
645 dev
->hard_start_xmit
= shaper_start_xmit
;
646 dev
->get_stats
= shaper_get_stats
;
647 dev
->set_multicast_list
= NULL
;
650 * Intialise the packet queues
654 * Handlers for when we attach to a device.
657 dev
->hard_header
= shaper_header
;
658 dev
->rebuild_header
= shaper_rebuild_header
;
660 dev
->hard_header_cache
= shaper_cache
;
661 dev
->header_cache_update
= shaper_cache_update
;
663 dev
->neigh_setup
= shaper_neigh_setup_dev
;
664 dev
->do_ioctl
= shaper_ioctl
;
665 dev
->hard_header_len
= 0;
666 dev
->type
= ARPHRD_ETHER
; /* initially */
667 dev
->set_mac_address
= NULL
;
670 dev
->tx_queue_len
= 10;
674 static int shapers
= 1;
677 module_param(shapers
, int, 0);
678 MODULE_PARM_DESC(shapers
, "Traffic shaper: maximum number of shapers");
682 static int __init
set_num_shapers(char *str
)
684 shapers
= simple_strtol(str
, NULL
, 0);
688 __setup("shapers=", set_num_shapers
);
692 static struct net_device
**devs
;
694 static unsigned int shapers_registered
= 0;
696 static int __init
shaper_init(void)
700 struct net_device
*dev
;
706 alloc_size
= sizeof(*dev
) * shapers
;
707 devs
= kmalloc(alloc_size
, GFP_KERNEL
);
710 memset(devs
, 0, alloc_size
);
712 for (i
= 0; i
< shapers
; i
++) {
714 snprintf(name
, IFNAMSIZ
, "shaper%d", i
);
715 dev
= alloc_netdev(sizeof(struct shaper
), name
,
720 if (register_netdev(dev
)) {
726 shapers_registered
++;
729 if (!shapers_registered
) {
734 return (shapers_registered
? 0 : -ENODEV
);
737 static void __exit
shaper_exit (void)
741 for (i
= 0; i
< shapers_registered
; i
++) {
743 unregister_netdev(devs
[i
]);
744 free_netdev(devs
[i
]);
752 module_init(shaper_init
);
753 module_exit(shaper_exit
);
754 MODULE_LICENSE("GPL");