Revert "[PATCH] paravirt: Add startup infrastructure for paravirtualization"
[pv_ops_mirror.git] / drivers / net / plip.c
blob8754cf3356b03610d092275d8a457f274aad3c15
1 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2 /* PLIP: A parallel port "network" driver for Linux. */
3 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
4 /*
5 * Authors: Donald Becker <becker@scyld.com>
6 * Tommy Thorn <thorn@daimi.aau.dk>
7 * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8 * Alan Cox <gw4pts@gw4pts.ampr.org>
9 * Peter Bauer <100136.3530@compuserve.com>
10 * Niibe Yutaka <gniibe@mri.co.jp>
11 * Nimrod Zimerman <zimerman@mailandnews.com>
13 * Enhancements:
14 * Modularization and ifreq/ifmap support by Alan Cox.
15 * Rewritten by Niibe Yutaka.
16 * parport-sharing awareness code by Philip Blundell.
17 * SMP locking by Niibe Yutaka.
18 * Support for parallel ports with no IRQ (poll mode),
19 * Modifications to use the parallel port API
20 * by Nimrod Zimerman.
22 * Fixes:
23 * Niibe Yutaka
24 * - Module initialization.
25 * - MTU fix.
26 * - Make sure other end is OK, before sending a packet.
27 * - Fix immediate timer problem.
29 * Al Viro
30 * - Changed {enable,disable}_irq handling to make it work
31 * with new ("stack") semantics.
33 * This program is free software; you can redistribute it and/or
34 * modify it under the terms of the GNU General Public License
35 * as published by the Free Software Foundation; either version
36 * 2 of the License, or (at your option) any later version.
40 * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
41 * inspired by Russ Nelson's parallel port packet driver.
43 * NOTE:
44 * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45 * Because of the necessity to communicate to DOS machines with the
46 * Crynwr packet driver, Peter Bauer changed the protocol again
47 * back to original protocol.
49 * This version follows original PLIP protocol.
50 * So, this PLIP can't communicate the PLIP of Linux v1.0.
54 * To use with DOS box, please do (Turn on ARP switch):
55 * # ifconfig plip[0-2] arp
57 static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
60 Sources:
61 Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 "parallel.asm" parallel port packet driver.
64 The "Crynwr" parallel port standard specifies the following protocol:
65 Trigger by sending nibble '0x8' (this causes interrupt on other end)
66 count-low octet
67 count-high octet
68 ... data octets
69 checksum octet
70 Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
73 The packet is encapsulated as if it were ethernet.
75 The cable used is a de facto standard parallel null cable -- sold as
76 a "LapLink" cable by various places. You'll need a 12-conductor cable to
77 make one yourself. The wiring is:
78 SLCTIN 17 - 17
79 GROUND 25 - 25
80 D0->ERROR 2 - 15 15 - 2
81 D1->SLCT 3 - 13 13 - 3
82 D2->PAPOUT 4 - 12 12 - 4
83 D3->ACK 5 - 10 10 - 5
84 D4->BUSY 6 - 11 11 - 6
85 Do not connect the other pins. They are
86 D5,D6,D7 are 7,8,9
87 STROBE is 1, FEED is 14, INIT is 16
88 extra grounds are 18,19,20,21,22,23,24
91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/types.h>
94 #include <linux/fcntl.h>
95 #include <linux/interrupt.h>
96 #include <linux/string.h>
97 #include <linux/if_ether.h>
98 #include <linux/in.h>
99 #include <linux/errno.h>
100 #include <linux/delay.h>
101 #include <linux/init.h>
102 #include <linux/netdevice.h>
103 #include <linux/etherdevice.h>
104 #include <linux/inetdevice.h>
105 #include <linux/skbuff.h>
106 #include <linux/if_plip.h>
107 #include <linux/workqueue.h>
108 #include <linux/spinlock.h>
109 #include <linux/parport.h>
110 #include <linux/bitops.h>
112 #include <net/neighbour.h>
114 #include <asm/system.h>
115 #include <asm/irq.h>
116 #include <asm/byteorder.h>
117 #include <asm/semaphore.h>
119 /* Maximum number of devices to support. */
120 #define PLIP_MAX 8
122 /* Use 0 for production, 1 for verification, >2 for debug */
123 #ifndef NET_DEBUG
124 #define NET_DEBUG 1
125 #endif
126 static const unsigned int net_debug = NET_DEBUG;
128 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
129 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
131 /* In micro second */
132 #define PLIP_DELAY_UNIT 1
134 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
135 #define PLIP_TRIGGER_WAIT 500
137 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
138 #define PLIP_NIBBLE_WAIT 3000
140 /* Bottom halves */
141 static void plip_kick_bh(struct work_struct *work);
142 static void plip_bh(struct work_struct *work);
143 static void plip_timer_bh(struct work_struct *work);
145 /* Interrupt handler */
146 static void plip_interrupt(int irq, void *dev_id);
148 /* Functions for DEV methods */
149 static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
150 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
151 unsigned short type, void *daddr,
152 void *saddr, unsigned len);
153 static int plip_hard_header_cache(struct neighbour *neigh,
154 struct hh_cache *hh);
155 static int plip_open(struct net_device *dev);
156 static int plip_close(struct net_device *dev);
157 static struct net_device_stats *plip_get_stats(struct net_device *dev);
158 static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
159 static int plip_preempt(void *handle);
160 static void plip_wakeup(void *handle);
162 enum plip_connection_state {
163 PLIP_CN_NONE=0,
164 PLIP_CN_RECEIVE,
165 PLIP_CN_SEND,
166 PLIP_CN_CLOSING,
167 PLIP_CN_ERROR
170 enum plip_packet_state {
171 PLIP_PK_DONE=0,
172 PLIP_PK_TRIGGER,
173 PLIP_PK_LENGTH_LSB,
174 PLIP_PK_LENGTH_MSB,
175 PLIP_PK_DATA,
176 PLIP_PK_CHECKSUM
179 enum plip_nibble_state {
180 PLIP_NB_BEGIN,
181 PLIP_NB_1,
182 PLIP_NB_2,
185 struct plip_local {
186 enum plip_packet_state state;
187 enum plip_nibble_state nibble;
188 union {
189 struct {
190 #if defined(__LITTLE_ENDIAN)
191 unsigned char lsb;
192 unsigned char msb;
193 #elif defined(__BIG_ENDIAN)
194 unsigned char msb;
195 unsigned char lsb;
196 #else
197 #error "Please fix the endianness defines in <asm/byteorder.h>"
198 #endif
199 } b;
200 unsigned short h;
201 } length;
202 unsigned short byte;
203 unsigned char checksum;
204 unsigned char data;
205 struct sk_buff *skb;
208 struct net_local {
209 struct net_device_stats enet_stats;
210 struct net_device *dev;
211 struct work_struct immediate;
212 struct delayed_work deferred;
213 struct delayed_work timer;
214 struct plip_local snd_data;
215 struct plip_local rcv_data;
216 struct pardevice *pardev;
217 unsigned long trigger;
218 unsigned long nibble;
219 enum plip_connection_state connection;
220 unsigned short timeout_count;
221 int is_deferred;
222 int port_owner;
223 int should_relinquish;
224 int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev,
225 unsigned short type, void *daddr,
226 void *saddr, unsigned len);
227 int (*orig_hard_header_cache)(struct neighbour *neigh,
228 struct hh_cache *hh);
229 spinlock_t lock;
230 atomic_t kill_timer;
231 struct semaphore killed_timer_sem;
234 static inline void enable_parport_interrupts (struct net_device *dev)
236 if (dev->irq != -1)
238 struct parport *port =
239 ((struct net_local *)dev->priv)->pardev->port;
240 port->ops->enable_irq (port);
244 static inline void disable_parport_interrupts (struct net_device *dev)
246 if (dev->irq != -1)
248 struct parport *port =
249 ((struct net_local *)dev->priv)->pardev->port;
250 port->ops->disable_irq (port);
254 static inline void write_data (struct net_device *dev, unsigned char data)
256 struct parport *port =
257 ((struct net_local *)dev->priv)->pardev->port;
259 port->ops->write_data (port, data);
262 static inline unsigned char read_status (struct net_device *dev)
264 struct parport *port =
265 ((struct net_local *)dev->priv)->pardev->port;
267 return port->ops->read_status (port);
270 /* Entry point of PLIP driver.
271 Probe the hardware, and register/initialize the driver.
273 PLIP is rather weird, because of the way it interacts with the parport
274 system. It is _not_ initialised from Space.c. Instead, plip_init()
275 is called, and that function makes up a "struct net_device" for each port, and
276 then calls us here.
279 static void
280 plip_init_netdev(struct net_device *dev)
282 struct net_local *nl = netdev_priv(dev);
284 /* Then, override parts of it */
285 dev->hard_start_xmit = plip_tx_packet;
286 dev->open = plip_open;
287 dev->stop = plip_close;
288 dev->get_stats = plip_get_stats;
289 dev->do_ioctl = plip_ioctl;
290 dev->header_cache_update = NULL;
291 dev->tx_queue_len = 10;
292 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
293 memset(dev->dev_addr, 0xfc, ETH_ALEN);
295 /* Set the private structure */
296 nl->orig_hard_header = dev->hard_header;
297 dev->hard_header = plip_hard_header;
299 nl->orig_hard_header_cache = dev->hard_header_cache;
300 dev->hard_header_cache = plip_hard_header_cache;
303 nl->port_owner = 0;
305 /* Initialize constants */
306 nl->trigger = PLIP_TRIGGER_WAIT;
307 nl->nibble = PLIP_NIBBLE_WAIT;
309 /* Initialize task queue structures */
310 INIT_WORK(&nl->immediate, plip_bh);
311 INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
313 if (dev->irq == -1)
314 INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
316 spin_lock_init(&nl->lock);
319 /* Bottom half handler for the delayed request.
320 This routine is kicked by do_timer().
321 Request `plip_bh' to be invoked. */
322 static void
323 plip_kick_bh(struct work_struct *work)
325 struct net_local *nl =
326 container_of(work, struct net_local, deferred.work);
328 if (nl->is_deferred)
329 schedule_work(&nl->immediate);
332 /* Forward declarations of internal routines */
333 static int plip_none(struct net_device *, struct net_local *,
334 struct plip_local *, struct plip_local *);
335 static int plip_receive_packet(struct net_device *, struct net_local *,
336 struct plip_local *, struct plip_local *);
337 static int plip_send_packet(struct net_device *, struct net_local *,
338 struct plip_local *, struct plip_local *);
339 static int plip_connection_close(struct net_device *, struct net_local *,
340 struct plip_local *, struct plip_local *);
341 static int plip_error(struct net_device *, struct net_local *,
342 struct plip_local *, struct plip_local *);
343 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
344 struct plip_local *snd,
345 struct plip_local *rcv,
346 int error);
348 #define OK 0
349 #define TIMEOUT 1
350 #define ERROR 2
351 #define HS_TIMEOUT 3
353 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
354 struct plip_local *snd, struct plip_local *rcv);
356 static const plip_func connection_state_table[] =
358 plip_none,
359 plip_receive_packet,
360 plip_send_packet,
361 plip_connection_close,
362 plip_error
365 /* Bottom half handler of PLIP. */
366 static void
367 plip_bh(struct work_struct *work)
369 struct net_local *nl = container_of(work, struct net_local, immediate);
370 struct plip_local *snd = &nl->snd_data;
371 struct plip_local *rcv = &nl->rcv_data;
372 plip_func f;
373 int r;
375 nl->is_deferred = 0;
376 f = connection_state_table[nl->connection];
377 if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK
378 && (r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
379 nl->is_deferred = 1;
380 schedule_delayed_work(&nl->deferred, 1);
384 static void
385 plip_timer_bh(struct work_struct *work)
387 struct net_local *nl =
388 container_of(work, struct net_local, timer.work);
390 if (!(atomic_read (&nl->kill_timer))) {
391 plip_interrupt (-1, nl->dev);
393 schedule_delayed_work(&nl->timer, 1);
395 else {
396 up (&nl->killed_timer_sem);
400 static int
401 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
402 struct plip_local *snd, struct plip_local *rcv,
403 int error)
405 unsigned char c0;
407 * This is tricky. If we got here from the beginning of send (either
408 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
409 * already disabled. With the old variant of {enable,disable}_irq()
410 * extra disable_irq() was a no-op. Now it became mortal - it's
411 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
412 * that is). So we have to treat HS_TIMEOUT and ERROR from send
413 * in a special way.
416 spin_lock_irq(&nl->lock);
417 if (nl->connection == PLIP_CN_SEND) {
419 if (error != ERROR) { /* Timeout */
420 nl->timeout_count++;
421 if ((error == HS_TIMEOUT
422 && nl->timeout_count <= 10)
423 || nl->timeout_count <= 3) {
424 spin_unlock_irq(&nl->lock);
425 /* Try again later */
426 return TIMEOUT;
428 c0 = read_status(dev);
429 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
430 dev->name, snd->state, c0);
431 } else
432 error = HS_TIMEOUT;
433 nl->enet_stats.tx_errors++;
434 nl->enet_stats.tx_aborted_errors++;
435 } else if (nl->connection == PLIP_CN_RECEIVE) {
436 if (rcv->state == PLIP_PK_TRIGGER) {
437 /* Transmission was interrupted. */
438 spin_unlock_irq(&nl->lock);
439 return OK;
441 if (error != ERROR) { /* Timeout */
442 if (++nl->timeout_count <= 3) {
443 spin_unlock_irq(&nl->lock);
444 /* Try again later */
445 return TIMEOUT;
447 c0 = read_status(dev);
448 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
449 dev->name, rcv->state, c0);
451 nl->enet_stats.rx_dropped++;
453 rcv->state = PLIP_PK_DONE;
454 if (rcv->skb) {
455 kfree_skb(rcv->skb);
456 rcv->skb = NULL;
458 snd->state = PLIP_PK_DONE;
459 if (snd->skb) {
460 dev_kfree_skb(snd->skb);
461 snd->skb = NULL;
463 spin_unlock_irq(&nl->lock);
464 if (error == HS_TIMEOUT) {
465 DISABLE(dev->irq);
466 synchronize_irq(dev->irq);
468 disable_parport_interrupts (dev);
469 netif_stop_queue (dev);
470 nl->connection = PLIP_CN_ERROR;
471 write_data (dev, 0x00);
473 return TIMEOUT;
476 static int
477 plip_none(struct net_device *dev, struct net_local *nl,
478 struct plip_local *snd, struct plip_local *rcv)
480 return OK;
483 /* PLIP_RECEIVE --- receive a byte(two nibbles)
484 Returns OK on success, TIMEOUT on timeout */
485 static inline int
486 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
487 enum plip_nibble_state *ns_p, unsigned char *data_p)
489 unsigned char c0, c1;
490 unsigned int cx;
492 switch (*ns_p) {
493 case PLIP_NB_BEGIN:
494 cx = nibble_timeout;
495 while (1) {
496 c0 = read_status(dev);
497 udelay(PLIP_DELAY_UNIT);
498 if ((c0 & 0x80) == 0) {
499 c1 = read_status(dev);
500 if (c0 == c1)
501 break;
503 if (--cx == 0)
504 return TIMEOUT;
506 *data_p = (c0 >> 3) & 0x0f;
507 write_data (dev, 0x10); /* send ACK */
508 *ns_p = PLIP_NB_1;
510 case PLIP_NB_1:
511 cx = nibble_timeout;
512 while (1) {
513 c0 = read_status(dev);
514 udelay(PLIP_DELAY_UNIT);
515 if (c0 & 0x80) {
516 c1 = read_status(dev);
517 if (c0 == c1)
518 break;
520 if (--cx == 0)
521 return TIMEOUT;
523 *data_p |= (c0 << 1) & 0xf0;
524 write_data (dev, 0x00); /* send ACK */
525 *ns_p = PLIP_NB_BEGIN;
526 case PLIP_NB_2:
527 break;
529 return OK;
533 * Determine the packet's protocol ID. The rule here is that we
534 * assume 802.3 if the type field is short enough to be a length.
535 * This is normal practice and works for any 'now in use' protocol.
537 * PLIP is ethernet ish but the daddr might not be valid if unicast.
538 * PLIP fortunately has no bus architecture (its Point-to-point).
540 * We can't fix the daddr thing as that quirk (more bug) is embedded
541 * in far too many old systems not all even running Linux.
544 static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
546 struct ethhdr *eth;
547 unsigned char *rawp;
549 skb_reset_mac_header(skb);
550 skb_pull(skb,dev->hard_header_len);
551 eth = eth_hdr(skb);
553 if(*eth->h_dest&1)
555 if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
556 skb->pkt_type=PACKET_BROADCAST;
557 else
558 skb->pkt_type=PACKET_MULTICAST;
562 * This ALLMULTI check should be redundant by 1.4
563 * so don't forget to remove it.
566 if (ntohs(eth->h_proto) >= 1536)
567 return eth->h_proto;
569 rawp = skb->data;
572 * This is a magic hack to spot IPX packets. Older Novell breaks
573 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
574 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
575 * won't work for fault tolerant netware but does for the rest.
577 if (*(unsigned short *)rawp == 0xFFFF)
578 return htons(ETH_P_802_3);
581 * Real 802.2 LLC
583 return htons(ETH_P_802_2);
586 /* PLIP_RECEIVE_PACKET --- receive a packet */
587 static int
588 plip_receive_packet(struct net_device *dev, struct net_local *nl,
589 struct plip_local *snd, struct plip_local *rcv)
591 unsigned short nibble_timeout = nl->nibble;
592 unsigned char *lbuf;
594 switch (rcv->state) {
595 case PLIP_PK_TRIGGER:
596 DISABLE(dev->irq);
597 /* Don't need to synchronize irq, as we can safely ignore it */
598 disable_parport_interrupts (dev);
599 write_data (dev, 0x01); /* send ACK */
600 if (net_debug > 2)
601 printk(KERN_DEBUG "%s: receive start\n", dev->name);
602 rcv->state = PLIP_PK_LENGTH_LSB;
603 rcv->nibble = PLIP_NB_BEGIN;
605 case PLIP_PK_LENGTH_LSB:
606 if (snd->state != PLIP_PK_DONE) {
607 if (plip_receive(nl->trigger, dev,
608 &rcv->nibble, &rcv->length.b.lsb)) {
609 /* collision, here dev->tbusy == 1 */
610 rcv->state = PLIP_PK_DONE;
611 nl->is_deferred = 1;
612 nl->connection = PLIP_CN_SEND;
613 schedule_delayed_work(&nl->deferred, 1);
614 enable_parport_interrupts (dev);
615 ENABLE(dev->irq);
616 return OK;
618 } else {
619 if (plip_receive(nibble_timeout, dev,
620 &rcv->nibble, &rcv->length.b.lsb))
621 return TIMEOUT;
623 rcv->state = PLIP_PK_LENGTH_MSB;
625 case PLIP_PK_LENGTH_MSB:
626 if (plip_receive(nibble_timeout, dev,
627 &rcv->nibble, &rcv->length.b.msb))
628 return TIMEOUT;
629 if (rcv->length.h > dev->mtu + dev->hard_header_len
630 || rcv->length.h < 8) {
631 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
632 return ERROR;
634 /* Malloc up new buffer. */
635 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
636 if (rcv->skb == NULL) {
637 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
638 return ERROR;
640 skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
641 skb_put(rcv->skb,rcv->length.h);
642 rcv->skb->dev = dev;
643 rcv->state = PLIP_PK_DATA;
644 rcv->byte = 0;
645 rcv->checksum = 0;
647 case PLIP_PK_DATA:
648 lbuf = rcv->skb->data;
650 if (plip_receive(nibble_timeout, dev,
651 &rcv->nibble, &lbuf[rcv->byte]))
652 return TIMEOUT;
653 while (++rcv->byte < rcv->length.h);
655 rcv->checksum += lbuf[--rcv->byte];
656 while (rcv->byte);
657 rcv->state = PLIP_PK_CHECKSUM;
659 case PLIP_PK_CHECKSUM:
660 if (plip_receive(nibble_timeout, dev,
661 &rcv->nibble, &rcv->data))
662 return TIMEOUT;
663 if (rcv->data != rcv->checksum) {
664 nl->enet_stats.rx_crc_errors++;
665 if (net_debug)
666 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
667 return ERROR;
669 rcv->state = PLIP_PK_DONE;
671 case PLIP_PK_DONE:
672 /* Inform the upper layer for the arrival of a packet. */
673 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
674 netif_rx(rcv->skb);
675 dev->last_rx = jiffies;
676 nl->enet_stats.rx_bytes += rcv->length.h;
677 nl->enet_stats.rx_packets++;
678 rcv->skb = NULL;
679 if (net_debug > 2)
680 printk(KERN_DEBUG "%s: receive end\n", dev->name);
682 /* Close the connection. */
683 write_data (dev, 0x00);
684 spin_lock_irq(&nl->lock);
685 if (snd->state != PLIP_PK_DONE) {
686 nl->connection = PLIP_CN_SEND;
687 spin_unlock_irq(&nl->lock);
688 schedule_work(&nl->immediate);
689 enable_parport_interrupts (dev);
690 ENABLE(dev->irq);
691 return OK;
692 } else {
693 nl->connection = PLIP_CN_NONE;
694 spin_unlock_irq(&nl->lock);
695 enable_parport_interrupts (dev);
696 ENABLE(dev->irq);
697 return OK;
700 return OK;
703 /* PLIP_SEND --- send a byte (two nibbles)
704 Returns OK on success, TIMEOUT when timeout */
705 static inline int
706 plip_send(unsigned short nibble_timeout, struct net_device *dev,
707 enum plip_nibble_state *ns_p, unsigned char data)
709 unsigned char c0;
710 unsigned int cx;
712 switch (*ns_p) {
713 case PLIP_NB_BEGIN:
714 write_data (dev, data & 0x0f);
715 *ns_p = PLIP_NB_1;
717 case PLIP_NB_1:
718 write_data (dev, 0x10 | (data & 0x0f));
719 cx = nibble_timeout;
720 while (1) {
721 c0 = read_status(dev);
722 if ((c0 & 0x80) == 0)
723 break;
724 if (--cx == 0)
725 return TIMEOUT;
726 udelay(PLIP_DELAY_UNIT);
728 write_data (dev, 0x10 | (data >> 4));
729 *ns_p = PLIP_NB_2;
731 case PLIP_NB_2:
732 write_data (dev, (data >> 4));
733 cx = nibble_timeout;
734 while (1) {
735 c0 = read_status(dev);
736 if (c0 & 0x80)
737 break;
738 if (--cx == 0)
739 return TIMEOUT;
740 udelay(PLIP_DELAY_UNIT);
742 *ns_p = PLIP_NB_BEGIN;
743 return OK;
745 return OK;
748 /* PLIP_SEND_PACKET --- send a packet */
749 static int
750 plip_send_packet(struct net_device *dev, struct net_local *nl,
751 struct plip_local *snd, struct plip_local *rcv)
753 unsigned short nibble_timeout = nl->nibble;
754 unsigned char *lbuf;
755 unsigned char c0;
756 unsigned int cx;
758 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
759 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
760 snd->state = PLIP_PK_DONE;
761 snd->skb = NULL;
762 return ERROR;
765 switch (snd->state) {
766 case PLIP_PK_TRIGGER:
767 if ((read_status(dev) & 0xf8) != 0x80)
768 return HS_TIMEOUT;
770 /* Trigger remote rx interrupt. */
771 write_data (dev, 0x08);
772 cx = nl->trigger;
773 while (1) {
774 udelay(PLIP_DELAY_UNIT);
775 spin_lock_irq(&nl->lock);
776 if (nl->connection == PLIP_CN_RECEIVE) {
777 spin_unlock_irq(&nl->lock);
778 /* Interrupted. */
779 nl->enet_stats.collisions++;
780 return OK;
782 c0 = read_status(dev);
783 if (c0 & 0x08) {
784 spin_unlock_irq(&nl->lock);
785 DISABLE(dev->irq);
786 synchronize_irq(dev->irq);
787 if (nl->connection == PLIP_CN_RECEIVE) {
788 /* Interrupted.
789 We don't need to enable irq,
790 as it is soon disabled. */
791 /* Yes, we do. New variant of
792 {enable,disable}_irq *counts*
793 them. -- AV */
794 ENABLE(dev->irq);
795 nl->enet_stats.collisions++;
796 return OK;
798 disable_parport_interrupts (dev);
799 if (net_debug > 2)
800 printk(KERN_DEBUG "%s: send start\n", dev->name);
801 snd->state = PLIP_PK_LENGTH_LSB;
802 snd->nibble = PLIP_NB_BEGIN;
803 nl->timeout_count = 0;
804 break;
806 spin_unlock_irq(&nl->lock);
807 if (--cx == 0) {
808 write_data (dev, 0x00);
809 return HS_TIMEOUT;
813 case PLIP_PK_LENGTH_LSB:
814 if (plip_send(nibble_timeout, dev,
815 &snd->nibble, snd->length.b.lsb))
816 return TIMEOUT;
817 snd->state = PLIP_PK_LENGTH_MSB;
819 case PLIP_PK_LENGTH_MSB:
820 if (plip_send(nibble_timeout, dev,
821 &snd->nibble, snd->length.b.msb))
822 return TIMEOUT;
823 snd->state = PLIP_PK_DATA;
824 snd->byte = 0;
825 snd->checksum = 0;
827 case PLIP_PK_DATA:
829 if (plip_send(nibble_timeout, dev,
830 &snd->nibble, lbuf[snd->byte]))
831 return TIMEOUT;
832 while (++snd->byte < snd->length.h);
834 snd->checksum += lbuf[--snd->byte];
835 while (snd->byte);
836 snd->state = PLIP_PK_CHECKSUM;
838 case PLIP_PK_CHECKSUM:
839 if (plip_send(nibble_timeout, dev,
840 &snd->nibble, snd->checksum))
841 return TIMEOUT;
843 nl->enet_stats.tx_bytes += snd->skb->len;
844 dev_kfree_skb(snd->skb);
845 nl->enet_stats.tx_packets++;
846 snd->state = PLIP_PK_DONE;
848 case PLIP_PK_DONE:
849 /* Close the connection */
850 write_data (dev, 0x00);
851 snd->skb = NULL;
852 if (net_debug > 2)
853 printk(KERN_DEBUG "%s: send end\n", dev->name);
854 nl->connection = PLIP_CN_CLOSING;
855 nl->is_deferred = 1;
856 schedule_delayed_work(&nl->deferred, 1);
857 enable_parport_interrupts (dev);
858 ENABLE(dev->irq);
859 return OK;
861 return OK;
864 static int
865 plip_connection_close(struct net_device *dev, struct net_local *nl,
866 struct plip_local *snd, struct plip_local *rcv)
868 spin_lock_irq(&nl->lock);
869 if (nl->connection == PLIP_CN_CLOSING) {
870 nl->connection = PLIP_CN_NONE;
871 netif_wake_queue (dev);
873 spin_unlock_irq(&nl->lock);
874 if (nl->should_relinquish) {
875 nl->should_relinquish = nl->port_owner = 0;
876 parport_release(nl->pardev);
878 return OK;
881 /* PLIP_ERROR --- wait till other end settled */
882 static int
883 plip_error(struct net_device *dev, struct net_local *nl,
884 struct plip_local *snd, struct plip_local *rcv)
886 unsigned char status;
888 status = read_status(dev);
889 if ((status & 0xf8) == 0x80) {
890 if (net_debug > 2)
891 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
892 nl->connection = PLIP_CN_NONE;
893 nl->should_relinquish = 0;
894 netif_start_queue (dev);
895 enable_parport_interrupts (dev);
896 ENABLE(dev->irq);
897 netif_wake_queue (dev);
898 } else {
899 nl->is_deferred = 1;
900 schedule_delayed_work(&nl->deferred, 1);
903 return OK;
906 /* Handle the parallel port interrupts. */
907 static void
908 plip_interrupt(int irq, void *dev_id)
910 struct net_device *dev = dev_id;
911 struct net_local *nl;
912 struct plip_local *rcv;
913 unsigned char c0;
915 nl = netdev_priv(dev);
916 rcv = &nl->rcv_data;
918 spin_lock_irq (&nl->lock);
920 c0 = read_status(dev);
921 if ((c0 & 0xf8) != 0xc0) {
922 if ((dev->irq != -1) && (net_debug > 1))
923 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
924 spin_unlock_irq (&nl->lock);
925 return;
928 if (net_debug > 3)
929 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
931 switch (nl->connection) {
932 case PLIP_CN_CLOSING:
933 netif_wake_queue (dev);
934 case PLIP_CN_NONE:
935 case PLIP_CN_SEND:
936 rcv->state = PLIP_PK_TRIGGER;
937 nl->connection = PLIP_CN_RECEIVE;
938 nl->timeout_count = 0;
939 schedule_work(&nl->immediate);
940 break;
942 case PLIP_CN_RECEIVE:
943 /* May occur because there is race condition
944 around test and set of dev->interrupt.
945 Ignore this interrupt. */
946 break;
948 case PLIP_CN_ERROR:
949 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
950 break;
953 spin_unlock_irq(&nl->lock);
956 static int
957 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
959 struct net_local *nl = netdev_priv(dev);
960 struct plip_local *snd = &nl->snd_data;
962 if (netif_queue_stopped(dev))
963 return 1;
965 /* We may need to grab the bus */
966 if (!nl->port_owner) {
967 if (parport_claim(nl->pardev))
968 return 1;
969 nl->port_owner = 1;
972 netif_stop_queue (dev);
974 if (skb->len > dev->mtu + dev->hard_header_len) {
975 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
976 netif_start_queue (dev);
977 return 1;
980 if (net_debug > 2)
981 printk(KERN_DEBUG "%s: send request\n", dev->name);
983 spin_lock_irq(&nl->lock);
984 dev->trans_start = jiffies;
985 snd->skb = skb;
986 snd->length.h = skb->len;
987 snd->state = PLIP_PK_TRIGGER;
988 if (nl->connection == PLIP_CN_NONE) {
989 nl->connection = PLIP_CN_SEND;
990 nl->timeout_count = 0;
992 schedule_work(&nl->immediate);
993 spin_unlock_irq(&nl->lock);
995 return 0;
998 static void
999 plip_rewrite_address(struct net_device *dev, struct ethhdr *eth)
1001 struct in_device *in_dev;
1003 if ((in_dev=dev->ip_ptr) != NULL) {
1004 /* Any address will do - we take the first */
1005 struct in_ifaddr *ifa=in_dev->ifa_list;
1006 if (ifa != NULL) {
1007 memcpy(eth->h_source, dev->dev_addr, 6);
1008 memset(eth->h_dest, 0xfc, 2);
1009 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1014 static int
1015 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1016 unsigned short type, void *daddr,
1017 void *saddr, unsigned len)
1019 struct net_local *nl = netdev_priv(dev);
1020 int ret;
1022 if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0)
1023 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1025 return ret;
1028 int plip_hard_header_cache(struct neighbour *neigh,
1029 struct hh_cache *hh)
1031 struct net_local *nl = neigh->dev->priv;
1032 int ret;
1034 if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0)
1036 struct ethhdr *eth;
1038 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1039 HH_DATA_OFF(sizeof(*eth)));
1040 plip_rewrite_address (neigh->dev, eth);
1043 return ret;
1046 /* Open/initialize the board. This is called (in the current kernel)
1047 sometime after booting when the 'ifconfig' program is run.
1049 This routine gets exclusive access to the parallel port by allocating
1050 its IRQ line.
1052 static int
1053 plip_open(struct net_device *dev)
1055 struct net_local *nl = netdev_priv(dev);
1056 struct in_device *in_dev;
1058 /* Grab the port */
1059 if (!nl->port_owner) {
1060 if (parport_claim(nl->pardev)) return -EAGAIN;
1061 nl->port_owner = 1;
1064 nl->should_relinquish = 0;
1066 /* Clear the data port. */
1067 write_data (dev, 0x00);
1069 /* Enable rx interrupt. */
1070 enable_parport_interrupts (dev);
1071 if (dev->irq == -1)
1073 atomic_set (&nl->kill_timer, 0);
1074 schedule_delayed_work(&nl->timer, 1);
1077 /* Initialize the state machine. */
1078 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1079 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1080 nl->connection = PLIP_CN_NONE;
1081 nl->is_deferred = 0;
1083 /* Fill in the MAC-level header.
1084 We used to abuse dev->broadcast to store the point-to-point
1085 MAC address, but we no longer do it. Instead, we fetch the
1086 interface address whenever it is needed, which is cheap enough
1087 because we use the hh_cache. Actually, abusing dev->broadcast
1088 didn't work, because when using plip_open the point-to-point
1089 address isn't yet known.
1090 PLIP doesn't have a real MAC address, but we need it to be
1091 DOS compatible, and to properly support taps (otherwise,
1092 when the device address isn't identical to the address of a
1093 received frame, the kernel incorrectly drops it). */
1095 if ((in_dev=dev->ip_ptr) != NULL) {
1096 /* Any address will do - we take the first. We already
1097 have the first two bytes filled with 0xfc, from
1098 plip_init_dev(). */
1099 struct in_ifaddr *ifa=in_dev->ifa_list;
1100 if (ifa != NULL) {
1101 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1105 netif_start_queue (dev);
1107 return 0;
1110 /* The inverse routine to plip_open (). */
1111 static int
1112 plip_close(struct net_device *dev)
1114 struct net_local *nl = netdev_priv(dev);
1115 struct plip_local *snd = &nl->snd_data;
1116 struct plip_local *rcv = &nl->rcv_data;
1118 netif_stop_queue (dev);
1119 DISABLE(dev->irq);
1120 synchronize_irq(dev->irq);
1122 if (dev->irq == -1)
1124 init_MUTEX_LOCKED (&nl->killed_timer_sem);
1125 atomic_set (&nl->kill_timer, 1);
1126 down (&nl->killed_timer_sem);
1129 #ifdef NOTDEF
1130 outb(0x00, PAR_DATA(dev));
1131 #endif
1132 nl->is_deferred = 0;
1133 nl->connection = PLIP_CN_NONE;
1134 if (nl->port_owner) {
1135 parport_release(nl->pardev);
1136 nl->port_owner = 0;
1139 snd->state = PLIP_PK_DONE;
1140 if (snd->skb) {
1141 dev_kfree_skb(snd->skb);
1142 snd->skb = NULL;
1144 rcv->state = PLIP_PK_DONE;
1145 if (rcv->skb) {
1146 kfree_skb(rcv->skb);
1147 rcv->skb = NULL;
1150 #ifdef NOTDEF
1151 /* Reset. */
1152 outb(0x00, PAR_CONTROL(dev));
1153 #endif
1154 return 0;
1157 static int
1158 plip_preempt(void *handle)
1160 struct net_device *dev = (struct net_device *)handle;
1161 struct net_local *nl = netdev_priv(dev);
1163 /* Stand our ground if a datagram is on the wire */
1164 if (nl->connection != PLIP_CN_NONE) {
1165 nl->should_relinquish = 1;
1166 return 1;
1169 nl->port_owner = 0; /* Remember that we released the bus */
1170 return 0;
1173 static void
1174 plip_wakeup(void *handle)
1176 struct net_device *dev = (struct net_device *)handle;
1177 struct net_local *nl = netdev_priv(dev);
1179 if (nl->port_owner) {
1180 /* Why are we being woken up? */
1181 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1182 if (!parport_claim(nl->pardev))
1183 /* bus_owner is already set (but why?) */
1184 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1185 else
1186 return;
1189 if (!(dev->flags & IFF_UP))
1190 /* Don't need the port when the interface is down */
1191 return;
1193 if (!parport_claim(nl->pardev)) {
1194 nl->port_owner = 1;
1195 /* Clear the data port. */
1196 write_data (dev, 0x00);
1199 return;
1202 static struct net_device_stats *
1203 plip_get_stats(struct net_device *dev)
1205 struct net_local *nl = netdev_priv(dev);
1206 struct net_device_stats *r = &nl->enet_stats;
1208 return r;
1211 static int
1212 plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1214 struct net_local *nl = netdev_priv(dev);
1215 struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1217 if (cmd != SIOCDEVPLIP)
1218 return -EOPNOTSUPP;
1220 switch(pc->pcmd) {
1221 case PLIP_GET_TIMEOUT:
1222 pc->trigger = nl->trigger;
1223 pc->nibble = nl->nibble;
1224 break;
1225 case PLIP_SET_TIMEOUT:
1226 if(!capable(CAP_NET_ADMIN))
1227 return -EPERM;
1228 nl->trigger = pc->trigger;
1229 nl->nibble = pc->nibble;
1230 break;
1231 default:
1232 return -EOPNOTSUPP;
1234 return 0;
1237 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1238 static int timid;
1240 module_param_array(parport, int, NULL, 0);
1241 module_param(timid, int, 0);
1242 MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1244 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1246 static inline int
1247 plip_searchfor(int list[], int a)
1249 int i;
1250 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1251 if (list[i] == a) return 1;
1253 return 0;
1256 /* plip_attach() is called (by the parport code) when a port is
1257 * available to use. */
1258 static void plip_attach (struct parport *port)
1260 static int unit;
1261 struct net_device *dev;
1262 struct net_local *nl;
1263 char name[IFNAMSIZ];
1265 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1266 plip_searchfor(parport, port->number)) {
1267 if (unit == PLIP_MAX) {
1268 printk(KERN_ERR "plip: too many devices\n");
1269 return;
1272 sprintf(name, "plip%d", unit);
1273 dev = alloc_etherdev(sizeof(struct net_local));
1274 if (!dev) {
1275 printk(KERN_ERR "plip: memory squeeze\n");
1276 return;
1279 strcpy(dev->name, name);
1281 SET_MODULE_OWNER(dev);
1282 dev->irq = port->irq;
1283 dev->base_addr = port->base;
1284 if (port->irq == -1) {
1285 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1286 "which is fairly inefficient!\n", port->name);
1289 nl = netdev_priv(dev);
1290 nl->dev = dev;
1291 nl->pardev = parport_register_device(port, name, plip_preempt,
1292 plip_wakeup, plip_interrupt,
1293 0, dev);
1295 if (!nl->pardev) {
1296 printk(KERN_ERR "%s: parport_register failed\n", name);
1297 goto err_free_dev;
1298 return;
1301 plip_init_netdev(dev);
1303 if (register_netdev(dev)) {
1304 printk(KERN_ERR "%s: network register failed\n", name);
1305 goto err_parport_unregister;
1308 printk(KERN_INFO "%s", version);
1309 if (dev->irq != -1)
1310 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1311 "using IRQ %d.\n",
1312 dev->name, dev->base_addr, dev->irq);
1313 else
1314 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1315 "not using IRQ.\n",
1316 dev->name, dev->base_addr);
1317 dev_plip[unit++] = dev;
1319 return;
1321 err_parport_unregister:
1322 parport_unregister_device(nl->pardev);
1323 err_free_dev:
1324 free_netdev(dev);
1325 return;
1328 /* plip_detach() is called (by the parport code) when a port is
1329 * no longer available to use. */
1330 static void plip_detach (struct parport *port)
1332 /* Nothing to do */
1335 static struct parport_driver plip_driver = {
1336 .name = "plip",
1337 .attach = plip_attach,
1338 .detach = plip_detach
1341 static void __exit plip_cleanup_module (void)
1343 struct net_device *dev;
1344 int i;
1346 parport_unregister_driver (&plip_driver);
1348 for (i=0; i < PLIP_MAX; i++) {
1349 if ((dev = dev_plip[i])) {
1350 struct net_local *nl = netdev_priv(dev);
1351 unregister_netdev(dev);
1352 if (nl->port_owner)
1353 parport_release(nl->pardev);
1354 parport_unregister_device(nl->pardev);
1355 free_netdev(dev);
1356 dev_plip[i] = NULL;
1361 #ifndef MODULE
1363 static int parport_ptr;
1365 static int __init plip_setup(char *str)
1367 int ints[4];
1369 str = get_options(str, ARRAY_SIZE(ints), ints);
1371 /* Ugh. */
1372 if (!strncmp(str, "parport", 7)) {
1373 int n = simple_strtoul(str+7, NULL, 10);
1374 if (parport_ptr < PLIP_MAX)
1375 parport[parport_ptr++] = n;
1376 else
1377 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1378 str);
1379 } else if (!strcmp(str, "timid")) {
1380 timid = 1;
1381 } else {
1382 if (ints[0] == 0 || ints[1] == 0) {
1383 /* disable driver on "plip=" or "plip=0" */
1384 parport[0] = -2;
1385 } else {
1386 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1387 ints[1]);
1390 return 1;
1393 __setup("plip=", plip_setup);
1395 #endif /* !MODULE */
1397 static int __init plip_init (void)
1399 if (parport[0] == -2)
1400 return 0;
1402 if (parport[0] != -1 && timid) {
1403 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1404 timid = 0;
1407 if (parport_register_driver (&plip_driver)) {
1408 printk (KERN_WARNING "plip: couldn't register driver\n");
1409 return 1;
1412 return 0;
1415 module_init(plip_init);
1416 module_exit(plip_cleanup_module);
1417 MODULE_LICENSE("GPL");
1420 * Local variables:
1421 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"
1422 * End: