net: ptp: do not reimplement PTP/BPF classifier
[linux/fpc-iii.git] / drivers / net / eql.c
blob7a79b60468796a88795af28e655a94acf17f981f
1 /*
2 * Equalizer Load-balancer for serial network interfaces.
4 * (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
5 * NCM: Network and Communications Management, Inc.
7 * (c) Copyright 2002 David S. Miller (davem@redhat.com)
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
12 * The author may be reached as simon@ncm.com, or C/O
13 * NCM
14 * Attn: Simon Janes
15 * 6803 Whittier Ave
16 * McLean VA 22101
17 * Phone: 1-703-847-0040 ext 103
21 * Sources:
22 * skeleton.c by Donald Becker.
23 * Inspirations:
24 * The Harried and Overworked Alan Cox
25 * Conspiracies:
26 * The Alan Cox and Mike McLagan plot to get someone else to do the code,
27 * which turned out to be me.
31 * $Log: eql.c,v $
32 * Revision 1.2 1996/04/11 17:51:52 guru
33 * Added one-line eql_remove_slave patch.
35 * Revision 1.1 1996/04/11 17:44:17 guru
36 * Initial revision
38 * Revision 3.13 1996/01/21 15:17:18 alan
39 * tx_queue_len changes.
40 * reformatted.
42 * Revision 3.12 1995/03/22 21:07:51 anarchy
43 * Added capable() checks on configuration.
44 * Moved header file.
46 * Revision 3.11 1995/01/19 23:14:31 guru
47 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
48 * (priority_Bps) + bytes_queued * 8;
50 * Revision 3.10 1995/01/19 23:07:53 guru
51 * back to
52 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
53 * (priority_Bps) + bytes_queued;
55 * Revision 3.9 1995/01/19 22:38:20 guru
56 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
57 * (priority_Bps) + bytes_queued * 4;
59 * Revision 3.8 1995/01/19 22:30:55 guru
60 * slave_load = (ULONG_MAX - (ULONG_MAX / 2)) -
61 * (priority_Bps) + bytes_queued * 2;
63 * Revision 3.7 1995/01/19 21:52:35 guru
64 * printk's trimmed out.
66 * Revision 3.6 1995/01/19 21:49:56 guru
67 * This is working pretty well. I gained 1 K/s in speed.. now it's just
68 * robustness and printk's to be diked out.
70 * Revision 3.5 1995/01/18 22:29:59 guru
71 * still crashes the kernel when the lock_wait thing is woken up.
73 * Revision 3.4 1995/01/18 21:59:47 guru
74 * Broken set-bit locking snapshot
76 * Revision 3.3 1995/01/17 22:09:18 guru
77 * infinite sleep in a lock somewhere..
79 * Revision 3.2 1995/01/15 16:46:06 guru
80 * Log trimmed of non-pertinent 1.x branch messages
82 * Revision 3.1 1995/01/15 14:41:45 guru
83 * New Scheduler and timer stuff...
85 * Revision 1.15 1995/01/15 14:29:02 guru
86 * Will make 1.14 (now 1.15) the 3.0 branch, and the 1.12 the 2.0 branch, the one
87 * with the dumber scheduler
89 * Revision 1.14 1995/01/15 02:37:08 guru
90 * shock.. the kept-new-versions could have zonked working
91 * stuff.. shudder
93 * Revision 1.13 1995/01/15 02:36:31 guru
94 * big changes
96 * scheduler was torn out and replaced with something smarter
98 * global names not prefixed with eql_ were renamed to protect
99 * against namespace collisions
101 * a few more abstract interfaces were added to facilitate any
102 * potential change of datastructure. the driver is still using
103 * a linked list of slaves. going to a heap would be a bit of
104 * an overkill.
106 * this compiles fine with no warnings.
108 * the locking mechanism and timer stuff must be written however,
109 * this version will not work otherwise
111 * Sorry, I had to rewrite most of this for 2.5.x -DaveM
114 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
116 #include <linux/capability.h>
117 #include <linux/module.h>
118 #include <linux/kernel.h>
119 #include <linux/init.h>
120 #include <linux/slab.h>
121 #include <linux/timer.h>
122 #include <linux/netdevice.h>
123 #include <net/net_namespace.h>
125 #include <linux/if.h>
126 #include <linux/if_arp.h>
127 #include <linux/if_eql.h>
128 #include <linux/pkt_sched.h>
130 #include <asm/uaccess.h>
132 static int eql_open(struct net_device *dev);
133 static int eql_close(struct net_device *dev);
134 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
135 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev);
137 #define eql_is_slave(dev) ((dev->flags & IFF_SLAVE) == IFF_SLAVE)
138 #define eql_is_master(dev) ((dev->flags & IFF_MASTER) == IFF_MASTER)
140 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
142 static void eql_timer(unsigned long param)
144 equalizer_t *eql = (equalizer_t *) param;
145 struct list_head *this, *tmp, *head;
147 spin_lock(&eql->queue.lock);
148 head = &eql->queue.all_slaves;
149 list_for_each_safe(this, tmp, head) {
150 slave_t *slave = list_entry(this, slave_t, list);
152 if ((slave->dev->flags & IFF_UP) == IFF_UP) {
153 slave->bytes_queued -= slave->priority_Bps;
154 if (slave->bytes_queued < 0)
155 slave->bytes_queued = 0;
156 } else {
157 eql_kill_one_slave(&eql->queue, slave);
161 spin_unlock(&eql->queue.lock);
163 eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
164 add_timer(&eql->timer);
167 static const char version[] __initconst =
168 "Equalizer2002: Simon Janes (simon@ncm.com) and David S. Miller (davem@redhat.com)";
170 static const struct net_device_ops eql_netdev_ops = {
171 .ndo_open = eql_open,
172 .ndo_stop = eql_close,
173 .ndo_do_ioctl = eql_ioctl,
174 .ndo_start_xmit = eql_slave_xmit,
177 static void __init eql_setup(struct net_device *dev)
179 equalizer_t *eql = netdev_priv(dev);
181 init_timer(&eql->timer);
182 eql->timer.data = (unsigned long) eql;
183 eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
184 eql->timer.function = eql_timer;
186 spin_lock_init(&eql->queue.lock);
187 INIT_LIST_HEAD(&eql->queue.all_slaves);
188 eql->queue.master_dev = dev;
190 dev->netdev_ops = &eql_netdev_ops;
193 * Now we undo some of the things that eth_setup does
194 * that we don't like
197 dev->mtu = EQL_DEFAULT_MTU; /* set to 576 in if_eql.h */
198 dev->flags = IFF_MASTER;
200 dev->type = ARPHRD_SLIP;
201 dev->tx_queue_len = 5; /* Hands them off fast */
202 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
205 static int eql_open(struct net_device *dev)
207 equalizer_t *eql = netdev_priv(dev);
209 /* XXX We should force this off automatically for the user. */
210 netdev_info(dev,
211 "remember to turn off Van-Jacobson compression on your slave devices\n");
213 BUG_ON(!list_empty(&eql->queue.all_slaves));
215 eql->min_slaves = 1;
216 eql->max_slaves = EQL_DEFAULT_MAX_SLAVES; /* 4 usually... */
218 add_timer(&eql->timer);
220 return 0;
223 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave)
225 list_del(&slave->list);
226 queue->num_slaves--;
227 slave->dev->flags &= ~IFF_SLAVE;
228 dev_put(slave->dev);
229 kfree(slave);
232 static void eql_kill_slave_queue(slave_queue_t *queue)
234 struct list_head *head, *tmp, *this;
236 spin_lock_bh(&queue->lock);
238 head = &queue->all_slaves;
239 list_for_each_safe(this, tmp, head) {
240 slave_t *s = list_entry(this, slave_t, list);
242 eql_kill_one_slave(queue, s);
245 spin_unlock_bh(&queue->lock);
248 static int eql_close(struct net_device *dev)
250 equalizer_t *eql = netdev_priv(dev);
253 * The timer has to be stopped first before we start hacking away
254 * at the data structure it scans every so often...
257 del_timer_sync(&eql->timer);
259 eql_kill_slave_queue(&eql->queue);
261 return 0;
264 static int eql_enslave(struct net_device *dev, slaving_request_t __user *srq);
265 static int eql_emancipate(struct net_device *dev, slaving_request_t __user *srq);
267 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
268 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *sc);
270 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mc);
271 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mc);
273 static int eql_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
275 if (cmd != EQL_GETMASTRCFG && cmd != EQL_GETSLAVECFG &&
276 !capable(CAP_NET_ADMIN))
277 return -EPERM;
279 switch (cmd) {
280 case EQL_ENSLAVE:
281 return eql_enslave(dev, ifr->ifr_data);
282 case EQL_EMANCIPATE:
283 return eql_emancipate(dev, ifr->ifr_data);
284 case EQL_GETSLAVECFG:
285 return eql_g_slave_cfg(dev, ifr->ifr_data);
286 case EQL_SETSLAVECFG:
287 return eql_s_slave_cfg(dev, ifr->ifr_data);
288 case EQL_GETMASTRCFG:
289 return eql_g_master_cfg(dev, ifr->ifr_data);
290 case EQL_SETMASTRCFG:
291 return eql_s_master_cfg(dev, ifr->ifr_data);
292 default:
293 return -EOPNOTSUPP;
297 /* queue->lock must be held */
298 static slave_t *__eql_schedule_slaves(slave_queue_t *queue)
300 unsigned long best_load = ~0UL;
301 struct list_head *this, *tmp, *head;
302 slave_t *best_slave;
304 best_slave = NULL;
306 /* Make a pass to set the best slave. */
307 head = &queue->all_slaves;
308 list_for_each_safe(this, tmp, head) {
309 slave_t *slave = list_entry(this, slave_t, list);
310 unsigned long slave_load, bytes_queued, priority_Bps;
312 /* Go through the slave list once, updating best_slave
313 * whenever a new best_load is found.
315 bytes_queued = slave->bytes_queued;
316 priority_Bps = slave->priority_Bps;
317 if ((slave->dev->flags & IFF_UP) == IFF_UP) {
318 slave_load = (~0UL - (~0UL / 2)) -
319 (priority_Bps) + bytes_queued * 8;
321 if (slave_load < best_load) {
322 best_load = slave_load;
323 best_slave = slave;
325 } else {
326 /* We found a dead slave, kill it. */
327 eql_kill_one_slave(queue, slave);
330 return best_slave;
333 static netdev_tx_t eql_slave_xmit(struct sk_buff *skb, struct net_device *dev)
335 equalizer_t *eql = netdev_priv(dev);
336 slave_t *slave;
338 spin_lock(&eql->queue.lock);
340 slave = __eql_schedule_slaves(&eql->queue);
341 if (slave) {
342 struct net_device *slave_dev = slave->dev;
344 skb->dev = slave_dev;
345 skb->priority = TC_PRIO_FILLER;
346 slave->bytes_queued += skb->len;
347 dev_queue_xmit(skb);
348 dev->stats.tx_packets++;
349 } else {
350 dev->stats.tx_dropped++;
351 dev_kfree_skb(skb);
354 spin_unlock(&eql->queue.lock);
356 return NETDEV_TX_OK;
360 * Private ioctl functions
363 /* queue->lock must be held */
364 static slave_t *__eql_find_slave_dev(slave_queue_t *queue, struct net_device *dev)
366 struct list_head *this, *head;
368 head = &queue->all_slaves;
369 list_for_each(this, head) {
370 slave_t *slave = list_entry(this, slave_t, list);
372 if (slave->dev == dev)
373 return slave;
376 return NULL;
379 static inline int eql_is_full(slave_queue_t *queue)
381 equalizer_t *eql = netdev_priv(queue->master_dev);
383 if (queue->num_slaves >= eql->max_slaves)
384 return 1;
385 return 0;
388 /* queue->lock must be held */
389 static int __eql_insert_slave(slave_queue_t *queue, slave_t *slave)
391 if (!eql_is_full(queue)) {
392 slave_t *duplicate_slave = NULL;
394 duplicate_slave = __eql_find_slave_dev(queue, slave->dev);
395 if (duplicate_slave)
396 eql_kill_one_slave(queue, duplicate_slave);
398 dev_hold(slave->dev);
399 list_add(&slave->list, &queue->all_slaves);
400 queue->num_slaves++;
401 slave->dev->flags |= IFF_SLAVE;
403 return 0;
406 return -ENOSPC;
409 static int eql_enslave(struct net_device *master_dev, slaving_request_t __user *srqp)
411 struct net_device *slave_dev;
412 slaving_request_t srq;
414 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
415 return -EFAULT;
417 slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
418 if (!slave_dev)
419 return -ENODEV;
421 if ((master_dev->flags & IFF_UP) == IFF_UP) {
422 /* slave is not a master & not already a slave: */
423 if (!eql_is_master(slave_dev) && !eql_is_slave(slave_dev)) {
424 slave_t *s = kmalloc(sizeof(*s), GFP_KERNEL);
425 equalizer_t *eql = netdev_priv(master_dev);
426 int ret;
428 if (!s)
429 return -ENOMEM;
431 memset(s, 0, sizeof(*s));
432 s->dev = slave_dev;
433 s->priority = srq.priority;
434 s->priority_bps = srq.priority;
435 s->priority_Bps = srq.priority / 8;
437 spin_lock_bh(&eql->queue.lock);
438 ret = __eql_insert_slave(&eql->queue, s);
439 if (ret)
440 kfree(s);
442 spin_unlock_bh(&eql->queue.lock);
444 return ret;
448 return -EINVAL;
451 static int eql_emancipate(struct net_device *master_dev, slaving_request_t __user *srqp)
453 equalizer_t *eql = netdev_priv(master_dev);
454 struct net_device *slave_dev;
455 slaving_request_t srq;
456 int ret;
458 if (copy_from_user(&srq, srqp, sizeof (slaving_request_t)))
459 return -EFAULT;
461 slave_dev = __dev_get_by_name(&init_net, srq.slave_name);
462 if (!slave_dev)
463 return -ENODEV;
465 ret = -EINVAL;
466 spin_lock_bh(&eql->queue.lock);
467 if (eql_is_slave(slave_dev)) {
468 slave_t *slave = __eql_find_slave_dev(&eql->queue, slave_dev);
469 if (slave) {
470 eql_kill_one_slave(&eql->queue, slave);
471 ret = 0;
474 spin_unlock_bh(&eql->queue.lock);
476 return ret;
479 static int eql_g_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
481 equalizer_t *eql = netdev_priv(dev);
482 slave_t *slave;
483 struct net_device *slave_dev;
484 slave_config_t sc;
485 int ret;
487 if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
488 return -EFAULT;
490 slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
491 if (!slave_dev)
492 return -ENODEV;
494 ret = -EINVAL;
496 spin_lock_bh(&eql->queue.lock);
497 if (eql_is_slave(slave_dev)) {
498 slave = __eql_find_slave_dev(&eql->queue, slave_dev);
499 if (slave) {
500 sc.priority = slave->priority;
501 ret = 0;
504 spin_unlock_bh(&eql->queue.lock);
506 if (!ret && copy_to_user(scp, &sc, sizeof (slave_config_t)))
507 ret = -EFAULT;
509 return ret;
512 static int eql_s_slave_cfg(struct net_device *dev, slave_config_t __user *scp)
514 slave_t *slave;
515 equalizer_t *eql;
516 struct net_device *slave_dev;
517 slave_config_t sc;
518 int ret;
520 if (copy_from_user(&sc, scp, sizeof (slave_config_t)))
521 return -EFAULT;
523 slave_dev = __dev_get_by_name(&init_net, sc.slave_name);
524 if (!slave_dev)
525 return -ENODEV;
527 ret = -EINVAL;
529 eql = netdev_priv(dev);
530 spin_lock_bh(&eql->queue.lock);
531 if (eql_is_slave(slave_dev)) {
532 slave = __eql_find_slave_dev(&eql->queue, slave_dev);
533 if (slave) {
534 slave->priority = sc.priority;
535 slave->priority_bps = sc.priority;
536 slave->priority_Bps = sc.priority / 8;
537 ret = 0;
540 spin_unlock_bh(&eql->queue.lock);
542 return ret;
545 static int eql_g_master_cfg(struct net_device *dev, master_config_t __user *mcp)
547 equalizer_t *eql;
548 master_config_t mc;
550 memset(&mc, 0, sizeof(master_config_t));
552 if (eql_is_master(dev)) {
553 eql = netdev_priv(dev);
554 mc.max_slaves = eql->max_slaves;
555 mc.min_slaves = eql->min_slaves;
556 if (copy_to_user(mcp, &mc, sizeof (master_config_t)))
557 return -EFAULT;
558 return 0;
560 return -EINVAL;
563 static int eql_s_master_cfg(struct net_device *dev, master_config_t __user *mcp)
565 equalizer_t *eql;
566 master_config_t mc;
568 if (copy_from_user(&mc, mcp, sizeof (master_config_t)))
569 return -EFAULT;
571 if (eql_is_master(dev)) {
572 eql = netdev_priv(dev);
573 eql->max_slaves = mc.max_slaves;
574 eql->min_slaves = mc.min_slaves;
575 return 0;
577 return -EINVAL;
580 static struct net_device *dev_eql;
582 static int __init eql_init_module(void)
584 int err;
586 pr_info("%s\n", version);
588 dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
589 if (!dev_eql)
590 return -ENOMEM;
592 err = register_netdev(dev_eql);
593 if (err)
594 free_netdev(dev_eql);
595 return err;
598 static void __exit eql_cleanup_module(void)
600 unregister_netdev(dev_eql);
601 free_netdev(dev_eql);
604 module_init(eql_init_module);
605 module_exit(eql_cleanup_module);
606 MODULE_LICENSE("GPL");