2 * Equalizer Load-balancer for serial network interfaces.
4 * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
5 * NCM: Network and Communications Management, Inc.
7 * (c) Copyright 2002 David S. Miller (davem@redhat.com)
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
12 * The author may be reached as simon@ncm.com, or C/O
17 * Phone: 1-703-847-0040 ext 103
22 * skeleton.c by Donald Becker.
24 * The Harried and Overworked Alan Cox
26 * The Alan Cox and Mike McLagan plot to get someone else to do the code,
27 * which turned out to be me.
32 * Revision 1.2 1996/04/11 17:51:52 guru
33 * Added one-line eql_remove_slave patch.
35 * Revision 1.1 1996/04/11 17:44:17 guru
38 * Revision 3.13 1996/01/21 15:17:18 alan
39 * tx_queue_len changes.
42 * Revision 3.12 1995/03/22 21:07:51 anarchy
43 * Added capable() checks on configuration.
46 * Revision 3.11 1995/01/19 23:14:31 guru
47 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
48 * (priority_Bps) + bytes_queued * 8;
50 * Revision 3.10 1995/01/19 23:07:53 guru
52 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
53 * (priority_Bps) + bytes_queued;
55 * Revision 3.9 1995/01/19 22:38:20 guru
56 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
57 * (priority_Bps) + bytes_queued * 4;
59 * Revision 3.8 1995/01/19 22:30:55 guru
60 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
61 * (priority_Bps) + bytes_queued * 2;
63 * Revision 3.7 1995/01/19 21:52:35 guru
64 * printk's trimmed out.
66 * Revision 3.6 1995/01/19 21:49:56 guru
67 * This is working pretty well. I gained 1 K/s in speed.. now it's just
68 * robustness and printk's to be diked out.
70 * Revision 3.5 1995/01/18 22:29:59 guru
71 * still crashes the kernel when the lock_wait thing is woken up.
73 * Revision 3.4 1995/01/18 21:59:47 guru
74 * Broken set-bit locking snapshot
76 * Revision 3.3 1995/01/17 22:09:18 guru
77 * infinite sleep in a lock somewhere..
79 * Revision 3.2 1995/01/15 16:46:06 guru
80 * Log trimmed of non-pertinent 1.x branch messages
82 * Revision 3.1 1995/01/15 14:41:45 guru
83 * New Scheduler and timer stuff...
85 * Revision 1.15 1995/01/15 14:29:02 guru
86 * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one
87 * with the dumber scheduler
89 * Revision 1.14 1995/01/15 02:37:08 guru
90 * shock.. the kept-new-versions could have zonked working
93 * Revision 1.13 1995/01/15 02:36:31 guru
96 * scheduler was torn out and replaced with something smarter
98 * global names not prefixed with eql_ were renamed to protect
99 * against namespace collisions
101 * a few more abstract interfaces were added to facilitate any
102 * potential change of datastructure. the driver is still using
103 * a linked list of slaves. going to a heap would be a bit of
106 * this compiles fine with no warnings.
108 * the locking mechanism and timer stuff must be written however,
109 * this version will not work otherwise
111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM
114 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
116 #include <linux/capability.h>
117 #include <linux/module.h>
118 #include <linux/kernel.h>
119 #include <linux/init.h>
120 #include <linux/slab.h>
121 #include <linux/timer.h>
122 #include <linux/netdevice.h>
123 #include <net/net_namespace.h>
125 #include <linux/if.h>
126 #include <linux/if_arp.h>
127 #include <linux/if_eql.h>
128 #include <linux/pkt_sched.h>
130 #include <linux/uaccess.h>
132 static int eql_open(struct net_device
*dev
);
133 static int eql_close(struct net_device
*dev
);
134 static int eql_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
);
135 static netdev_tx_t
eql_slave_xmit(struct sk_buff
*skb
, struct net_device
*dev
);
137 #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
138 #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER)
140 static void eql_kill_one_slave(slave_queue_t
*queue
, slave_t
*slave
);
142 static void eql_timer(struct timer_list
*t
)
144 equalizer_t
*eql
= from_timer(eql
, t
, timer
);
145 struct list_head
*this, *tmp
, *head
;
147 spin_lock(&eql
->queue
.lock
);
148 head
= &eql
->queue
.all_slaves
;
149 list_for_each_safe(this, tmp
, head
) {
150 slave_t
*slave
= list_entry(this, slave_t
, list
);
152 if ((slave
->dev
->flags
& IFF_UP
) == IFF_UP
) {
153 slave
->bytes_queued
-= slave
->priority_Bps
;
154 if (slave
->bytes_queued
< 0)
155 slave
->bytes_queued
= 0;
157 eql_kill_one_slave(&eql
->queue
, slave
);
161 spin_unlock(&eql
->queue
.lock
);
163 eql
->timer
.expires
= jiffies
+ EQL_DEFAULT_RESCHED_IVAL
;
164 add_timer(&eql
->timer
);
167 static const char version
[] __initconst
=
168 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
170 static const struct net_device_ops eql_netdev_ops
= {
171 .ndo_open
= eql_open
,
172 .ndo_stop
= eql_close
,
173 .ndo_do_ioctl
= eql_ioctl
,
174 .ndo_start_xmit
= eql_slave_xmit
,
177 static void __init
eql_setup(struct net_device
*dev
)
179 equalizer_t
*eql
= netdev_priv(dev
);
181 timer_setup(&eql
->timer
, eql_timer
, 0);
182 eql
->timer
.expires
= jiffies
+ EQL_DEFAULT_RESCHED_IVAL
;
184 spin_lock_init(&eql
->queue
.lock
);
185 INIT_LIST_HEAD(&eql
->queue
.all_slaves
);
186 eql
->queue
.master_dev
= dev
;
188 dev
->netdev_ops
= &eql_netdev_ops
;
191 * Now we undo some of the things that eth_setup does
195 dev
->mtu
= EQL_DEFAULT_MTU
; /* set to 576 in if_eql.h */
196 dev
->flags
= IFF_MASTER
;
198 dev
->type
= ARPHRD_SLIP
;
199 dev
->tx_queue_len
= 5; /* Hands them off fast */
203 static int eql_open(struct net_device
*dev
)
205 equalizer_t
*eql
= netdev_priv(dev
);
207 /* XXX We should force this off automatically for the user. */
209 "remember to turn off Van-Jacobson compression on your slave devices\n");
211 BUG_ON(!list_empty(&eql
->queue
.all_slaves
));
214 eql
->max_slaves
= EQL_DEFAULT_MAX_SLAVES
; /* 4 usually... */
216 add_timer(&eql
->timer
);
221 static void eql_kill_one_slave(slave_queue_t
*queue
, slave_t
*slave
)
223 list_del(&slave
->list
);
225 slave
->dev
->flags
&= ~IFF_SLAVE
;
230 static void eql_kill_slave_queue(slave_queue_t
*queue
)
232 struct list_head
*head
, *tmp
, *this;
234 spin_lock_bh(&queue
->lock
);
236 head
= &queue
->all_slaves
;
237 list_for_each_safe(this, tmp
, head
) {
238 slave_t
*s
= list_entry(this, slave_t
, list
);
240 eql_kill_one_slave(queue
, s
);
243 spin_unlock_bh(&queue
->lock
);
246 static int eql_close(struct net_device
*dev
)
248 equalizer_t
*eql
= netdev_priv(dev
);
251 * The timer has to be stopped first before we start hacking away
252 * at the data structure it scans every so often...
255 del_timer_sync(&eql
->timer
);
257 eql_kill_slave_queue(&eql
->queue
);
262 static int eql_enslave(struct net_device
*dev
, slaving_request_t __user
*srq
);
263 static int eql_emancipate(struct net_device
*dev
, slaving_request_t __user
*srq
);
265 static int eql_g_slave_cfg(struct net_device
*dev
, slave_config_t __user
*sc
);
266 static int eql_s_slave_cfg(struct net_device
*dev
, slave_config_t __user
*sc
);
268 static int eql_g_master_cfg(struct net_device
*dev
, master_config_t __user
*mc
);
269 static int eql_s_master_cfg(struct net_device
*dev
, master_config_t __user
*mc
);
271 static int eql_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
273 if (cmd
!= EQL_GETMASTRCFG
&& cmd
!= EQL_GETSLAVECFG
&&
274 !capable(CAP_NET_ADMIN
))
279 return eql_enslave(dev
, ifr
->ifr_data
);
281 return eql_emancipate(dev
, ifr
->ifr_data
);
282 case EQL_GETSLAVECFG
:
283 return eql_g_slave_cfg(dev
, ifr
->ifr_data
);
284 case EQL_SETSLAVECFG
:
285 return eql_s_slave_cfg(dev
, ifr
->ifr_data
);
286 case EQL_GETMASTRCFG
:
287 return eql_g_master_cfg(dev
, ifr
->ifr_data
);
288 case EQL_SETMASTRCFG
:
289 return eql_s_master_cfg(dev
, ifr
->ifr_data
);
295 /* queue->lock must be held */
296 static slave_t
*__eql_schedule_slaves(slave_queue_t
*queue
)
298 unsigned long best_load
= ~0UL;
299 struct list_head
*this, *tmp
, *head
;
304 /* Make a pass to set the best slave. */
305 head
= &queue
->all_slaves
;
306 list_for_each_safe(this, tmp
, head
) {
307 slave_t
*slave
= list_entry(this, slave_t
, list
);
308 unsigned long slave_load
, bytes_queued
, priority_Bps
;
310 /* Go through the slave list once, updating best_slave
311 * whenever a new best_load is found.
313 bytes_queued
= slave
->bytes_queued
;
314 priority_Bps
= slave
->priority_Bps
;
315 if ((slave
->dev
->flags
& IFF_UP
) == IFF_UP
) {
316 slave_load
= (~0UL - (~0UL / 2)) -
317 (priority_Bps
) + bytes_queued
* 8;
319 if (slave_load
< best_load
) {
320 best_load
= slave_load
;
324 /* We found a dead slave, kill it. */
325 eql_kill_one_slave(queue
, slave
);
331 static netdev_tx_t
eql_slave_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
333 equalizer_t
*eql
= netdev_priv(dev
);
336 spin_lock(&eql
->queue
.lock
);
338 slave
= __eql_schedule_slaves(&eql
->queue
);
340 struct net_device
*slave_dev
= slave
->dev
;
342 skb
->dev
= slave_dev
;
343 skb
->priority
= TC_PRIO_FILLER
;
344 slave
->bytes_queued
+= skb
->len
;
346 dev
->stats
.tx_packets
++;
348 dev
->stats
.tx_dropped
++;
352 spin_unlock(&eql
->queue
.lock
);
358 * Private ioctl functions
361 /* queue->lock must be held */
362 static slave_t
*__eql_find_slave_dev(slave_queue_t
*queue
, struct net_device
*dev
)
364 struct list_head
*this, *head
;
366 head
= &queue
->all_slaves
;
367 list_for_each(this, head
) {
368 slave_t
*slave
= list_entry(this, slave_t
, list
);
370 if (slave
->dev
== dev
)
377 static inline int eql_is_full(slave_queue_t
*queue
)
379 equalizer_t
*eql
= netdev_priv(queue
->master_dev
);
381 if (queue
->num_slaves
>= eql
->max_slaves
)
386 /* queue->lock must be held */
387 static int __eql_insert_slave(slave_queue_t
*queue
, slave_t
*slave
)
389 if (!eql_is_full(queue
)) {
390 slave_t
*duplicate_slave
= NULL
;
392 duplicate_slave
= __eql_find_slave_dev(queue
, slave
->dev
);
394 eql_kill_one_slave(queue
, duplicate_slave
);
396 dev_hold(slave
->dev
);
397 list_add(&slave
->list
, &queue
->all_slaves
);
399 slave
->dev
->flags
|= IFF_SLAVE
;
407 static int eql_enslave(struct net_device
*master_dev
, slaving_request_t __user
*srqp
)
409 struct net_device
*slave_dev
;
410 slaving_request_t srq
;
412 if (copy_from_user(&srq
, srqp
, sizeof (slaving_request_t
)))
415 slave_dev
= __dev_get_by_name(&init_net
, srq
.slave_name
);
419 if ((master_dev
->flags
& IFF_UP
) == IFF_UP
) {
420 /* slave is not a master & not already a slave: */
421 if (!eql_is_master(slave_dev
) && !eql_is_slave(slave_dev
)) {
422 slave_t
*s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
423 equalizer_t
*eql
= netdev_priv(master_dev
);
429 memset(s
, 0, sizeof(*s
));
431 s
->priority
= srq
.priority
;
432 s
->priority_bps
= srq
.priority
;
433 s
->priority_Bps
= srq
.priority
/ 8;
435 spin_lock_bh(&eql
->queue
.lock
);
436 ret
= __eql_insert_slave(&eql
->queue
, s
);
440 spin_unlock_bh(&eql
->queue
.lock
);
449 static int eql_emancipate(struct net_device
*master_dev
, slaving_request_t __user
*srqp
)
451 equalizer_t
*eql
= netdev_priv(master_dev
);
452 struct net_device
*slave_dev
;
453 slaving_request_t srq
;
456 if (copy_from_user(&srq
, srqp
, sizeof (slaving_request_t
)))
459 slave_dev
= __dev_get_by_name(&init_net
, srq
.slave_name
);
464 spin_lock_bh(&eql
->queue
.lock
);
465 if (eql_is_slave(slave_dev
)) {
466 slave_t
*slave
= __eql_find_slave_dev(&eql
->queue
, slave_dev
);
468 eql_kill_one_slave(&eql
->queue
, slave
);
472 spin_unlock_bh(&eql
->queue
.lock
);
477 static int eql_g_slave_cfg(struct net_device
*dev
, slave_config_t __user
*scp
)
479 equalizer_t
*eql
= netdev_priv(dev
);
481 struct net_device
*slave_dev
;
485 if (copy_from_user(&sc
, scp
, sizeof (slave_config_t
)))
488 slave_dev
= __dev_get_by_name(&init_net
, sc
.slave_name
);
494 spin_lock_bh(&eql
->queue
.lock
);
495 if (eql_is_slave(slave_dev
)) {
496 slave
= __eql_find_slave_dev(&eql
->queue
, slave_dev
);
498 sc
.priority
= slave
->priority
;
502 spin_unlock_bh(&eql
->queue
.lock
);
504 if (!ret
&& copy_to_user(scp
, &sc
, sizeof (slave_config_t
)))
510 static int eql_s_slave_cfg(struct net_device
*dev
, slave_config_t __user
*scp
)
514 struct net_device
*slave_dev
;
518 if (copy_from_user(&sc
, scp
, sizeof (slave_config_t
)))
521 slave_dev
= __dev_get_by_name(&init_net
, sc
.slave_name
);
527 eql
= netdev_priv(dev
);
528 spin_lock_bh(&eql
->queue
.lock
);
529 if (eql_is_slave(slave_dev
)) {
530 slave
= __eql_find_slave_dev(&eql
->queue
, slave_dev
);
532 slave
->priority
= sc
.priority
;
533 slave
->priority_bps
= sc
.priority
;
534 slave
->priority_Bps
= sc
.priority
/ 8;
538 spin_unlock_bh(&eql
->queue
.lock
);
543 static int eql_g_master_cfg(struct net_device
*dev
, master_config_t __user
*mcp
)
548 memset(&mc
, 0, sizeof(master_config_t
));
550 if (eql_is_master(dev
)) {
551 eql
= netdev_priv(dev
);
552 mc
.max_slaves
= eql
->max_slaves
;
553 mc
.min_slaves
= eql
->min_slaves
;
554 if (copy_to_user(mcp
, &mc
, sizeof (master_config_t
)))
561 static int eql_s_master_cfg(struct net_device
*dev
, master_config_t __user
*mcp
)
566 if (copy_from_user(&mc
, mcp
, sizeof (master_config_t
)))
569 if (eql_is_master(dev
)) {
570 eql
= netdev_priv(dev
);
571 eql
->max_slaves
= mc
.max_slaves
;
572 eql
->min_slaves
= mc
.min_slaves
;
578 static struct net_device
*dev_eql
;
580 static int __init
eql_init_module(void)
584 pr_info("%s\n", version
);
586 dev_eql
= alloc_netdev(sizeof(equalizer_t
), "eql", NET_NAME_UNKNOWN
,
591 err
= register_netdev(dev_eql
);
593 free_netdev(dev_eql
);
597 static void __exit
eql_cleanup_module(void)
599 unregister_netdev(dev_eql
);
600 free_netdev(dev_eql
);
603 module_init(eql_init_module
);
604 module_exit(eql_cleanup_module
);
605 MODULE_LICENSE("GPL");