2 * Equalizer Load-balancer for serial network interfaces.
4 * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
5 * NCM: Network and Communications Management, Inc.
7 * (c) Copyright 2002 David S. Miller (davem@redhat.com)
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
12 * The author may be reached as simon@ncm.com, or C/O
17 * Phone: 1-703-847-0040 ext 103
22 * skeleton.c by Donald Becker.
24 * The Harried and Overworked Alan Cox
26 * The Alan Cox and Mike McLagan plot to get someone else to do the code,
27 * which turned out to be me.
32 * Revision 1.2 1996/04/11 17:51:52 guru
33 * Added one-line eql_remove_slave patch.
35 * Revision 1.1 1996/04/11 17:44:17 guru
38 * Revision 3.13 1996/01/21 15:17:18 alan
39 * tx_queue_len changes.
42 * Revision 3.12 1995/03/22 21:07:51 anarchy
43 * Added capable() checks on configuration.
46 * Revision 3.11 1995/01/19 23:14:31 guru
47 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
48 * (priority_Bps) + bytes_queued * 8;
50 * Revision 3.10 1995/01/19 23:07:53 guru
52 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
53 * (priority_Bps) + bytes_queued;
55 * Revision 3.9 1995/01/19 22:38:20 guru
56 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
57 * (priority_Bps) + bytes_queued * 4;
59 * Revision 3.8 1995/01/19 22:30:55 guru
60 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
61 * (priority_Bps) + bytes_queued * 2;
63 * Revision 3.7 1995/01/19 21:52:35 guru
64 * printk's trimmed out.
66 * Revision 3.6 1995/01/19 21:49:56 guru
67 * This is working pretty well. I gained 1 K/s in speed.. now it's just
68 * robustness and printk's to be diked out.
70 * Revision 3.5 1995/01/18 22:29:59 guru
71 * still crashes the kernel when the lock_wait thing is woken up.
73 * Revision 3.4 1995/01/18 21:59:47 guru
74 * Broken set-bit locking snapshot
76 * Revision 3.3 1995/01/17 22:09:18 guru
77 * infinite sleep in a lock somewhere..
79 * Revision 3.2 1995/01/15 16:46:06 guru
80 * Log trimmed of non-pertinent 1.x branch messages
82 * Revision 3.1 1995/01/15 14:41:45 guru
83 * New Scheduler and timer stuff...
85 * Revision 1.15 1995/01/15 14:29:02 guru
86 * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one
87 * with the dumber scheduler
89 * Revision 1.14 1995/01/15 02:37:08 guru
90 * shock.. the kept-new-versions could have zonked working
93 * Revision 1.13 1995/01/15 02:36:31 guru
96 * scheduler was torn out and replaced with something smarter
98 * global names not prefixed with eql_ were renamed to protect
99 * against namespace collisions
101 * a few more abstract interfaces were added to facilitate any
102 * potential change of datastructure. the driver is still using
103 * a linked list of slaves. going to a heap would be a bit of
106 * this compiles fine with no warnings.
108 * the locking mechanism and timer stuff must be written however,
109 * this version will not work otherwise
111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM
114 #include <linux/module.h>
115 #include <linux/kernel.h>
116 #include <linux/init.h>
117 #include <linux/timer.h>
118 #include <linux/netdevice.h>
120 #include <linux/if.h>
121 #include <linux/if_arp.h>
122 #include <linux/if_eql.h>
124 #include <asm/uaccess.h>
126 static int eql_open(struct net_device
*dev
);
127 static int eql_close(struct net_device
*dev
);
128 static int eql_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
);
129 static int eql_slave_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
130 static struct net_device_stats
*eql_get_stats(struct net_device
*dev
);
132 #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
133 #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER)
135 static void eql_kill_one_slave(slave_queue_t
*queue
, slave_t
*slave
);
137 static void eql_timer(unsigned long param
)
139 equalizer_t
*eql
= (equalizer_t
*) param
;
140 struct list_head
*this, *tmp
, *head
;
142 spin_lock_bh(&eql
->queue
.lock
);
143 head
= &eql
->queue
.all_slaves
;
144 list_for_each_safe(this, tmp
, head
) {
145 slave_t
*slave
= list_entry(this, slave_t
, list
);
147 if ((slave
->dev
->flags
& IFF_UP
) == IFF_UP
) {
148 slave
->bytes_queued
-= slave
->priority_Bps
;
149 if (slave
->bytes_queued
< 0)
150 slave
->bytes_queued
= 0;
152 eql_kill_one_slave(&eql
->queue
, slave
);
156 spin_unlock_bh(&eql
->queue
.lock
);
158 eql
->timer
.expires
= jiffies
+ EQL_DEFAULT_RESCHED_IVAL
;
159 add_timer(&eql
->timer
);
162 static char version
[] __initdata
=
163 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)\n";
165 static void __init
eql_setup(struct net_device
*dev
)
167 equalizer_t
*eql
= netdev_priv(dev
);
169 SET_MODULE_OWNER(dev
);
171 init_timer(&eql
->timer
);
172 eql
->timer
.data
= (unsigned long) eql
;
173 eql
->timer
.expires
= jiffies
+ EQL_DEFAULT_RESCHED_IVAL
;
174 eql
->timer
.function
= eql_timer
;
176 spin_lock_init(&eql
->queue
.lock
);
177 INIT_LIST_HEAD(&eql
->queue
.all_slaves
);
178 eql
->queue
.master_dev
= dev
;
180 dev
->open
= eql_open
;
181 dev
->stop
= eql_close
;
182 dev
->do_ioctl
= eql_ioctl
;
183 dev
->hard_start_xmit
= eql_slave_xmit
;
184 dev
->get_stats
= eql_get_stats
;
187 * Now we undo some of the things that eth_setup does
191 dev
->mtu
= EQL_DEFAULT_MTU
; /* set to 576 in if_eql.h */
192 dev
->flags
= IFF_MASTER
;
194 dev
->type
= ARPHRD_SLIP
;
195 dev
->tx_queue_len
= 5; /* Hands them off fast */
198 static int eql_open(struct net_device
*dev
)
200 equalizer_t
*eql
= netdev_priv(dev
);
202 /* XXX We should force this off automatically for the user. */
203 printk(KERN_INFO
"%s: remember to turn off Van-Jacobson compression on "
204 "your slave devices.\n", dev
->name
);
206 BUG_ON(!list_empty(&eql
->queue
.all_slaves
));
209 eql
->max_slaves
= EQL_DEFAULT_MAX_SLAVES
; /* 4 usually... */
211 add_timer(&eql
->timer
);
216 static void eql_kill_one_slave(slave_queue_t
*queue
, slave_t
*slave
)
218 list_del(&slave
->list
);
220 slave
->dev
->flags
&= ~IFF_SLAVE
;
225 static void eql_kill_slave_queue(slave_queue_t
*queue
)
227 struct list_head
*head
, *tmp
, *this;
229 spin_lock_bh(&queue
->lock
);
231 head
= &queue
->all_slaves
;
232 list_for_each_safe(this, tmp
, head
) {
233 slave_t
*s
= list_entry(this, slave_t
, list
);
235 eql_kill_one_slave(queue
, s
);
238 spin_unlock_bh(&queue
->lock
);
241 static int eql_close(struct net_device
*dev
)
243 equalizer_t
*eql
= netdev_priv(dev
);
246 * The timer has to be stopped first before we start hacking away
247 * at the data structure it scans every so often...
250 del_timer_sync(&eql
->timer
);
252 eql_kill_slave_queue(&eql
->queue
);
257 static int eql_enslave(struct net_device
*dev
, slaving_request_t __user
*srq
);
258 static int eql_emancipate(struct net_device
*dev
, slaving_request_t __user
*srq
);
260 static int eql_g_slave_cfg(struct net_device
*dev
, slave_config_t __user
*sc
);
261 static int eql_s_slave_cfg(struct net_device
*dev
, slave_config_t __user
*sc
);
263 static int eql_g_master_cfg(struct net_device
*dev
, master_config_t __user
*mc
);
264 static int eql_s_master_cfg(struct net_device
*dev
, master_config_t __user
*mc
);
266 static int eql_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
268 if (cmd
!= EQL_GETMASTRCFG
&& cmd
!= EQL_GETSLAVECFG
&&
269 !capable(CAP_NET_ADMIN
))
274 return eql_enslave(dev
, ifr
->ifr_data
);
276 return eql_emancipate(dev
, ifr
->ifr_data
);
277 case EQL_GETSLAVECFG
:
278 return eql_g_slave_cfg(dev
, ifr
->ifr_data
);
279 case EQL_SETSLAVECFG
:
280 return eql_s_slave_cfg(dev
, ifr
->ifr_data
);
281 case EQL_GETMASTRCFG
:
282 return eql_g_master_cfg(dev
, ifr
->ifr_data
);
283 case EQL_SETMASTRCFG
:
284 return eql_s_master_cfg(dev
, ifr
->ifr_data
);
290 /* queue->lock must be held */
291 static slave_t
*__eql_schedule_slaves(slave_queue_t
*queue
)
293 unsigned long best_load
= ~0UL;
294 struct list_head
*this, *tmp
, *head
;
299 /* Make a pass to set the best slave. */
300 head
= &queue
->all_slaves
;
301 list_for_each_safe(this, tmp
, head
) {
302 slave_t
*slave
= list_entry(this, slave_t
, list
);
303 unsigned long slave_load
, bytes_queued
, priority_Bps
;
305 /* Go through the slave list once, updating best_slave
306 * whenever a new best_load is found.
308 bytes_queued
= slave
->bytes_queued
;
309 priority_Bps
= slave
->priority_Bps
;
310 if ((slave
->dev
->flags
& IFF_UP
) == IFF_UP
) {
311 slave_load
= (~0UL - (~0UL / 2)) -
312 (priority_Bps
) + bytes_queued
* 8;
314 if (slave_load
< best_load
) {
315 best_load
= slave_load
;
319 /* We found a dead slave, kill it. */
320 eql_kill_one_slave(queue
, slave
);
326 static int eql_slave_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
328 equalizer_t
*eql
= netdev_priv(dev
);
331 spin_lock(&eql
->queue
.lock
);
333 slave
= __eql_schedule_slaves(&eql
->queue
);
335 struct net_device
*slave_dev
= slave
->dev
;
337 skb
->dev
= slave_dev
;
339 slave
->bytes_queued
+= skb
->len
;
341 eql
->stats
.tx_packets
++;
343 eql
->stats
.tx_dropped
++;
347 spin_unlock(&eql
->queue
.lock
);
352 static struct net_device_stats
* eql_get_stats(struct net_device
*dev
)
354 equalizer_t
*eql
= netdev_priv(dev
);
359 * Private ioctl functions
362 /* queue->lock must be held */
363 static slave_t
*__eql_find_slave_dev(slave_queue_t
*queue
, struct net_device
*dev
)
365 struct list_head
*this, *head
;
367 head
= &queue
->all_slaves
;
368 list_for_each(this, head
) {
369 slave_t
*slave
= list_entry(this, slave_t
, list
);
371 if (slave
->dev
== dev
)
378 static inline int eql_is_full(slave_queue_t
*queue
)
380 equalizer_t
*eql
= netdev_priv(queue
->master_dev
);
382 if (queue
->num_slaves
>= eql
->max_slaves
)
387 /* queue->lock must be held */
388 static int __eql_insert_slave(slave_queue_t
*queue
, slave_t
*slave
)
390 if (!eql_is_full(queue
)) {
391 slave_t
*duplicate_slave
= NULL
;
393 duplicate_slave
= __eql_find_slave_dev(queue
, slave
->dev
);
395 eql_kill_one_slave(queue
, duplicate_slave
);
397 list_add(&slave
->list
, &queue
->all_slaves
);
399 slave
->dev
->flags
|= IFF_SLAVE
;
407 static int eql_enslave(struct net_device
*master_dev
, slaving_request_t __user
*srqp
)
409 struct net_device
*slave_dev
;
410 slaving_request_t srq
;
412 if (copy_from_user(&srq
, srqp
, sizeof (slaving_request_t
)))
415 slave_dev
= dev_get_by_name(srq
.slave_name
);
417 if ((master_dev
->flags
& IFF_UP
) == IFF_UP
) {
418 /* slave is not a master & not already a slave: */
419 if (!eql_is_master(slave_dev
) &&
420 !eql_is_slave(slave_dev
)) {
421 slave_t
*s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
422 equalizer_t
*eql
= netdev_priv(master_dev
);
430 memset(s
, 0, sizeof(*s
));
432 s
->priority
= srq
.priority
;
433 s
->priority_bps
= srq
.priority
;
434 s
->priority_Bps
= srq
.priority
/ 8;
436 spin_lock_bh(&eql
->queue
.lock
);
437 ret
= __eql_insert_slave(&eql
->queue
, s
);
442 spin_unlock_bh(&eql
->queue
.lock
);
453 static int eql_emancipate(struct net_device
*master_dev
, slaving_request_t __user
*srqp
)
455 equalizer_t
*eql
= netdev_priv(master_dev
);
456 struct net_device
*slave_dev
;
457 slaving_request_t srq
;
460 if (copy_from_user(&srq
, srqp
, sizeof (slaving_request_t
)))
463 slave_dev
= dev_get_by_name(srq
.slave_name
);
466 spin_lock_bh(&eql
->queue
.lock
);
468 if (eql_is_slave(slave_dev
)) {
469 slave_t
*slave
= __eql_find_slave_dev(&eql
->queue
,
473 eql_kill_one_slave(&eql
->queue
, slave
);
479 spin_unlock_bh(&eql
->queue
.lock
);
485 static int eql_g_slave_cfg(struct net_device
*dev
, slave_config_t __user
*scp
)
487 equalizer_t
*eql
= netdev_priv(dev
);
489 struct net_device
*slave_dev
;
493 if (copy_from_user(&sc
, scp
, sizeof (slave_config_t
)))
496 slave_dev
= dev_get_by_name(sc
.slave_name
);
502 spin_lock_bh(&eql
->queue
.lock
);
503 if (eql_is_slave(slave_dev
)) {
504 slave
= __eql_find_slave_dev(&eql
->queue
, slave_dev
);
506 sc
.priority
= slave
->priority
;
510 spin_unlock_bh(&eql
->queue
.lock
);
514 if (!ret
&& copy_to_user(scp
, &sc
, sizeof (slave_config_t
)))
520 static int eql_s_slave_cfg(struct net_device
*dev
, slave_config_t __user
*scp
)
524 struct net_device
*slave_dev
;
528 if (copy_from_user(&sc
, scp
, sizeof (slave_config_t
)))
531 slave_dev
= dev_get_by_name(sc
.slave_name
);
537 eql
= netdev_priv(dev
);
538 spin_lock_bh(&eql
->queue
.lock
);
539 if (eql_is_slave(slave_dev
)) {
540 slave
= __eql_find_slave_dev(&eql
->queue
, slave_dev
);
542 slave
->priority
= sc
.priority
;
543 slave
->priority_bps
= sc
.priority
;
544 slave
->priority_Bps
= sc
.priority
/ 8;
548 spin_unlock_bh(&eql
->queue
.lock
);
553 static int eql_g_master_cfg(struct net_device
*dev
, master_config_t __user
*mcp
)
558 if (eql_is_master(dev
)) {
559 eql
= netdev_priv(dev
);
560 mc
.max_slaves
= eql
->max_slaves
;
561 mc
.min_slaves
= eql
->min_slaves
;
562 if (copy_to_user(mcp
, &mc
, sizeof (master_config_t
)))
569 static int eql_s_master_cfg(struct net_device
*dev
, master_config_t __user
*mcp
)
574 if (copy_from_user(&mc
, mcp
, sizeof (master_config_t
)))
577 if (eql_is_master(dev
)) {
578 eql
= netdev_priv(dev
);
579 eql
->max_slaves
= mc
.max_slaves
;
580 eql
->min_slaves
= mc
.min_slaves
;
586 static struct net_device
*dev_eql
;
588 static int __init
eql_init_module(void)
594 dev_eql
= alloc_netdev(sizeof(equalizer_t
), "eql", eql_setup
);
598 err
= register_netdev(dev_eql
);
600 free_netdev(dev_eql
);
604 static void __exit
eql_cleanup_module(void)
606 unregister_netdev(dev_eql
);
607 free_netdev(dev_eql
);
610 module_init(eql_init_module
);
611 module_exit(eql_cleanup_module
);
612 MODULE_LICENSE("GPL");