[XFRM]: skb_cow_data() does not set proper owner for new skbs.
[linux-2.6/verdex.git] / drivers / net / plip.c
blobf4b62405d2e5997a2639e53424481d136e776961
1 /* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
2 /* PLIP: A parallel port "network" driver for Linux. */
3 /* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
4 /*
5 * Authors: Donald Becker <becker@scyld.com>
6 * Tommy Thorn <thorn@daimi.aau.dk>
7 * Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
8 * Alan Cox <gw4pts@gw4pts.ampr.org>
9 * Peter Bauer <100136.3530@compuserve.com>
10 * Niibe Yutaka <gniibe@mri.co.jp>
11 * Nimrod Zimerman <zimerman@mailandnews.com>
13 * Enhancements:
14 * Modularization and ifreq/ifmap support by Alan Cox.
15 * Rewritten by Niibe Yutaka.
16 * parport-sharing awareness code by Philip Blundell.
17 * SMP locking by Niibe Yutaka.
18 * Support for parallel ports with no IRQ (poll mode),
19 * Modifications to use the parallel port API
20 * by Nimrod Zimerman.
22 * Fixes:
23 * Niibe Yutaka
24 * - Module initialization.
25 * - MTU fix.
26 * - Make sure other end is OK, before sending a packet.
27 * - Fix immediate timer problem.
29 * Al Viro
30 * - Changed {enable,disable}_irq handling to make it work
31 * with new ("stack") semantics.
33 * This program is free software; you can redistribute it and/or
34 * modify it under the terms of the GNU General Public License
35 * as published by the Free Software Foundation; either version
36 * 2 of the License, or (at your option) any later version.
40 * Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
41 * inspired by Russ Nelson's parallel port packet driver.
43 * NOTE:
44 * Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
45 * Because of the necessity to communicate to DOS machines with the
46 * Crynwr packet driver, Peter Bauer changed the protocol again
47 * back to original protocol.
49 * This version follows original PLIP protocol.
50 * So, this PLIP can't communicate the PLIP of Linux v1.0.
54 * To use with DOS box, please do (Turn on ARP switch):
55 * # ifconfig plip[0-2] arp
57 static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
60 Sources:
61 Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
62 "parallel.asm" parallel port packet driver.
64 The "Crynwr" parallel port standard specifies the following protocol:
65 Trigger by sending nibble '0x8' (this causes interrupt on other end)
66 count-low octet
67 count-high octet
68 ... data octets
69 checksum octet
70 Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
71 <wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
73 The packet is encapsulated as if it were ethernet.
75 The cable used is a de facto standard parallel null cable -- sold as
76 a "LapLink" cable by various places. You'll need a 12-conductor cable to
77 make one yourself. The wiring is:
78 SLCTIN 17 - 17
79 GROUND 25 - 25
80 D0->ERROR 2 - 15 15 - 2
81 D1->SLCT 3 - 13 13 - 3
82 D2->PAPOUT 4 - 12 12 - 4
83 D3->ACK 5 - 10 10 - 5
84 D4->BUSY 6 - 11 11 - 6
85 Do not connect the other pins. They are
86 D5,D6,D7 are 7,8,9
87 STROBE is 1, FEED is 14, INIT is 16
88 extra grounds are 18,19,20,21,22,23,24
91 #include <linux/module.h>
92 #include <linux/kernel.h>
93 #include <linux/types.h>
94 #include <linux/fcntl.h>
95 #include <linux/interrupt.h>
96 #include <linux/string.h>
97 #include <linux/if_ether.h>
98 #include <linux/in.h>
99 #include <linux/errno.h>
100 #include <linux/delay.h>
101 #include <linux/lp.h>
102 #include <linux/init.h>
103 #include <linux/netdevice.h>
104 #include <linux/etherdevice.h>
105 #include <linux/inetdevice.h>
106 #include <linux/skbuff.h>
107 #include <linux/if_plip.h>
108 #include <linux/workqueue.h>
109 #include <linux/ioport.h>
110 #include <linux/spinlock.h>
111 #include <linux/parport.h>
112 #include <linux/bitops.h>
114 #include <net/neighbour.h>
116 #include <asm/system.h>
117 #include <asm/irq.h>
118 #include <asm/byteorder.h>
119 #include <asm/semaphore.h>
121 /* Maximum number of devices to support. */
122 #define PLIP_MAX 8
124 /* Use 0 for production, 1 for verification, >2 for debug */
125 #ifndef NET_DEBUG
126 #define NET_DEBUG 1
127 #endif
128 static unsigned int net_debug = NET_DEBUG;
130 #define ENABLE(irq) if (irq != -1) enable_irq(irq)
131 #define DISABLE(irq) if (irq != -1) disable_irq(irq)
133 /* In micro second */
134 #define PLIP_DELAY_UNIT 1
136 /* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
137 #define PLIP_TRIGGER_WAIT 500
139 /* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
140 #define PLIP_NIBBLE_WAIT 3000
142 /* Bottom halves */
143 static void plip_kick_bh(struct net_device *dev);
144 static void plip_bh(struct net_device *dev);
145 static void plip_timer_bh(struct net_device *dev);
147 /* Interrupt handler */
148 static void plip_interrupt(int irq, void *dev_id, struct pt_regs *regs);
150 /* Functions for DEV methods */
151 static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
152 static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
153 unsigned short type, void *daddr,
154 void *saddr, unsigned len);
155 static int plip_hard_header_cache(struct neighbour *neigh,
156 struct hh_cache *hh);
157 static int plip_open(struct net_device *dev);
158 static int plip_close(struct net_device *dev);
159 static struct net_device_stats *plip_get_stats(struct net_device *dev);
160 static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
161 static int plip_preempt(void *handle);
162 static void plip_wakeup(void *handle);
164 enum plip_connection_state {
165 PLIP_CN_NONE=0,
166 PLIP_CN_RECEIVE,
167 PLIP_CN_SEND,
168 PLIP_CN_CLOSING,
169 PLIP_CN_ERROR
172 enum plip_packet_state {
173 PLIP_PK_DONE=0,
174 PLIP_PK_TRIGGER,
175 PLIP_PK_LENGTH_LSB,
176 PLIP_PK_LENGTH_MSB,
177 PLIP_PK_DATA,
178 PLIP_PK_CHECKSUM
181 enum plip_nibble_state {
182 PLIP_NB_BEGIN,
183 PLIP_NB_1,
184 PLIP_NB_2,
187 struct plip_local {
188 enum plip_packet_state state;
189 enum plip_nibble_state nibble;
190 union {
191 struct {
192 #if defined(__LITTLE_ENDIAN)
193 unsigned char lsb;
194 unsigned char msb;
195 #elif defined(__BIG_ENDIAN)
196 unsigned char msb;
197 unsigned char lsb;
198 #else
199 #error "Please fix the endianness defines in <asm/byteorder.h>"
200 #endif
201 } b;
202 unsigned short h;
203 } length;
204 unsigned short byte;
205 unsigned char checksum;
206 unsigned char data;
207 struct sk_buff *skb;
210 struct net_local {
211 struct net_device_stats enet_stats;
212 struct work_struct immediate;
213 struct work_struct deferred;
214 struct work_struct timer;
215 struct plip_local snd_data;
216 struct plip_local rcv_data;
217 struct pardevice *pardev;
218 unsigned long trigger;
219 unsigned long nibble;
220 enum plip_connection_state connection;
221 unsigned short timeout_count;
222 int is_deferred;
223 int port_owner;
224 int should_relinquish;
225 int (*orig_hard_header)(struct sk_buff *skb, struct net_device *dev,
226 unsigned short type, void *daddr,
227 void *saddr, unsigned len);
228 int (*orig_hard_header_cache)(struct neighbour *neigh,
229 struct hh_cache *hh);
230 spinlock_t lock;
231 atomic_t kill_timer;
232 struct semaphore killed_timer_sem;
235 inline static void enable_parport_interrupts (struct net_device *dev)
237 if (dev->irq != -1)
239 struct parport *port =
240 ((struct net_local *)dev->priv)->pardev->port;
241 port->ops->enable_irq (port);
245 inline static void disable_parport_interrupts (struct net_device *dev)
247 if (dev->irq != -1)
249 struct parport *port =
250 ((struct net_local *)dev->priv)->pardev->port;
251 port->ops->disable_irq (port);
255 inline static void write_data (struct net_device *dev, unsigned char data)
257 struct parport *port =
258 ((struct net_local *)dev->priv)->pardev->port;
260 port->ops->write_data (port, data);
263 inline static unsigned char read_status (struct net_device *dev)
265 struct parport *port =
266 ((struct net_local *)dev->priv)->pardev->port;
268 return port->ops->read_status (port);
271 /* Entry point of PLIP driver.
272 Probe the hardware, and register/initialize the driver.
274 PLIP is rather weird, because of the way it interacts with the parport
275 system. It is _not_ initialised from Space.c. Instead, plip_init()
276 is called, and that function makes up a "struct net_device" for each port, and
277 then calls us here.
280 static void
281 plip_init_netdev(struct net_device *dev)
283 struct net_local *nl = netdev_priv(dev);
285 /* Then, override parts of it */
286 dev->hard_start_xmit = plip_tx_packet;
287 dev->open = plip_open;
288 dev->stop = plip_close;
289 dev->get_stats = plip_get_stats;
290 dev->do_ioctl = plip_ioctl;
291 dev->header_cache_update = NULL;
292 dev->tx_queue_len = 10;
293 dev->flags = IFF_POINTOPOINT|IFF_NOARP;
294 memset(dev->dev_addr, 0xfc, ETH_ALEN);
296 /* Set the private structure */
297 nl->orig_hard_header = dev->hard_header;
298 dev->hard_header = plip_hard_header;
300 nl->orig_hard_header_cache = dev->hard_header_cache;
301 dev->hard_header_cache = plip_hard_header_cache;
304 nl->port_owner = 0;
306 /* Initialize constants */
307 nl->trigger = PLIP_TRIGGER_WAIT;
308 nl->nibble = PLIP_NIBBLE_WAIT;
310 /* Initialize task queue structures */
311 INIT_WORK(&nl->immediate, (void (*)(void *))plip_bh, dev);
312 INIT_WORK(&nl->deferred, (void (*)(void *))plip_kick_bh, dev);
314 if (dev->irq == -1)
315 INIT_WORK(&nl->timer, (void (*)(void *))plip_timer_bh, dev);
317 spin_lock_init(&nl->lock);
320 /* Bottom half handler for the delayed request.
321 This routine is kicked by do_timer().
322 Request `plip_bh' to be invoked. */
323 static void
324 plip_kick_bh(struct net_device *dev)
326 struct net_local *nl = netdev_priv(dev);
328 if (nl->is_deferred)
329 schedule_work(&nl->immediate);
332 /* Forward declarations of internal routines */
333 static int plip_none(struct net_device *, struct net_local *,
334 struct plip_local *, struct plip_local *);
335 static int plip_receive_packet(struct net_device *, struct net_local *,
336 struct plip_local *, struct plip_local *);
337 static int plip_send_packet(struct net_device *, struct net_local *,
338 struct plip_local *, struct plip_local *);
339 static int plip_connection_close(struct net_device *, struct net_local *,
340 struct plip_local *, struct plip_local *);
341 static int plip_error(struct net_device *, struct net_local *,
342 struct plip_local *, struct plip_local *);
343 static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
344 struct plip_local *snd,
345 struct plip_local *rcv,
346 int error);
348 #define OK 0
349 #define TIMEOUT 1
350 #define ERROR 2
351 #define HS_TIMEOUT 3
353 typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
354 struct plip_local *snd, struct plip_local *rcv);
356 static plip_func connection_state_table[] =
358 plip_none,
359 plip_receive_packet,
360 plip_send_packet,
361 plip_connection_close,
362 plip_error
365 /* Bottom half handler of PLIP. */
366 static void
367 plip_bh(struct net_device *dev)
369 struct net_local *nl = netdev_priv(dev);
370 struct plip_local *snd = &nl->snd_data;
371 struct plip_local *rcv = &nl->rcv_data;
372 plip_func f;
373 int r;
375 nl->is_deferred = 0;
376 f = connection_state_table[nl->connection];
377 if ((r = (*f)(dev, nl, snd, rcv)) != OK
378 && (r = plip_bh_timeout_error(dev, nl, snd, rcv, r)) != OK) {
379 nl->is_deferred = 1;
380 schedule_delayed_work(&nl->deferred, 1);
384 static void
385 plip_timer_bh(struct net_device *dev)
387 struct net_local *nl = netdev_priv(dev);
389 if (!(atomic_read (&nl->kill_timer))) {
390 plip_interrupt (-1, dev, NULL);
392 schedule_delayed_work(&nl->timer, 1);
394 else {
395 up (&nl->killed_timer_sem);
399 static int
400 plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
401 struct plip_local *snd, struct plip_local *rcv,
402 int error)
404 unsigned char c0;
406 * This is tricky. If we got here from the beginning of send (either
407 * with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
408 * already disabled. With the old variant of {enable,disable}_irq()
409 * extra disable_irq() was a no-op. Now it became mortal - it's
410 * unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
411 * that is). So we have to treat HS_TIMEOUT and ERROR from send
412 * in a special way.
415 spin_lock_irq(&nl->lock);
416 if (nl->connection == PLIP_CN_SEND) {
418 if (error != ERROR) { /* Timeout */
419 nl->timeout_count++;
420 if ((error == HS_TIMEOUT
421 && nl->timeout_count <= 10)
422 || nl->timeout_count <= 3) {
423 spin_unlock_irq(&nl->lock);
424 /* Try again later */
425 return TIMEOUT;
427 c0 = read_status(dev);
428 printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
429 dev->name, snd->state, c0);
430 } else
431 error = HS_TIMEOUT;
432 nl->enet_stats.tx_errors++;
433 nl->enet_stats.tx_aborted_errors++;
434 } else if (nl->connection == PLIP_CN_RECEIVE) {
435 if (rcv->state == PLIP_PK_TRIGGER) {
436 /* Transmission was interrupted. */
437 spin_unlock_irq(&nl->lock);
438 return OK;
440 if (error != ERROR) { /* Timeout */
441 if (++nl->timeout_count <= 3) {
442 spin_unlock_irq(&nl->lock);
443 /* Try again later */
444 return TIMEOUT;
446 c0 = read_status(dev);
447 printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
448 dev->name, rcv->state, c0);
450 nl->enet_stats.rx_dropped++;
452 rcv->state = PLIP_PK_DONE;
453 if (rcv->skb) {
454 kfree_skb(rcv->skb);
455 rcv->skb = NULL;
457 snd->state = PLIP_PK_DONE;
458 if (snd->skb) {
459 dev_kfree_skb(snd->skb);
460 snd->skb = NULL;
462 spin_unlock_irq(&nl->lock);
463 if (error == HS_TIMEOUT) {
464 DISABLE(dev->irq);
465 synchronize_irq(dev->irq);
467 disable_parport_interrupts (dev);
468 netif_stop_queue (dev);
469 nl->connection = PLIP_CN_ERROR;
470 write_data (dev, 0x00);
472 return TIMEOUT;
475 static int
476 plip_none(struct net_device *dev, struct net_local *nl,
477 struct plip_local *snd, struct plip_local *rcv)
479 return OK;
482 /* PLIP_RECEIVE --- receive a byte(two nibbles)
483 Returns OK on success, TIMEOUT on timeout */
484 inline static int
485 plip_receive(unsigned short nibble_timeout, struct net_device *dev,
486 enum plip_nibble_state *ns_p, unsigned char *data_p)
488 unsigned char c0, c1;
489 unsigned int cx;
491 switch (*ns_p) {
492 case PLIP_NB_BEGIN:
493 cx = nibble_timeout;
494 while (1) {
495 c0 = read_status(dev);
496 udelay(PLIP_DELAY_UNIT);
497 if ((c0 & 0x80) == 0) {
498 c1 = read_status(dev);
499 if (c0 == c1)
500 break;
502 if (--cx == 0)
503 return TIMEOUT;
505 *data_p = (c0 >> 3) & 0x0f;
506 write_data (dev, 0x10); /* send ACK */
507 *ns_p = PLIP_NB_1;
509 case PLIP_NB_1:
510 cx = nibble_timeout;
511 while (1) {
512 c0 = read_status(dev);
513 udelay(PLIP_DELAY_UNIT);
514 if (c0 & 0x80) {
515 c1 = read_status(dev);
516 if (c0 == c1)
517 break;
519 if (--cx == 0)
520 return TIMEOUT;
522 *data_p |= (c0 << 1) & 0xf0;
523 write_data (dev, 0x00); /* send ACK */
524 *ns_p = PLIP_NB_BEGIN;
525 case PLIP_NB_2:
526 break;
528 return OK;
532 * Determine the packet's protocol ID. The rule here is that we
533 * assume 802.3 if the type field is short enough to be a length.
534 * This is normal practice and works for any 'now in use' protocol.
536 * PLIP is ethernet ish but the daddr might not be valid if unicast.
537 * PLIP fortunately has no bus architecture (its Point-to-point).
539 * We can't fix the daddr thing as that quirk (more bug) is embedded
540 * in far too many old systems not all even running Linux.
543 static unsigned short plip_type_trans(struct sk_buff *skb, struct net_device *dev)
545 struct ethhdr *eth;
546 unsigned char *rawp;
548 skb->mac.raw=skb->data;
549 skb_pull(skb,dev->hard_header_len);
550 eth = eth_hdr(skb);
552 if(*eth->h_dest&1)
554 if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
555 skb->pkt_type=PACKET_BROADCAST;
556 else
557 skb->pkt_type=PACKET_MULTICAST;
561 * This ALLMULTI check should be redundant by 1.4
562 * so don't forget to remove it.
565 if (ntohs(eth->h_proto) >= 1536)
566 return eth->h_proto;
568 rawp = skb->data;
571 * This is a magic hack to spot IPX packets. Older Novell breaks
572 * the protocol design and runs IPX over 802.3 without an 802.2 LLC
573 * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
574 * won't work for fault tolerant netware but does for the rest.
576 if (*(unsigned short *)rawp == 0xFFFF)
577 return htons(ETH_P_802_3);
580 * Real 802.2 LLC
582 return htons(ETH_P_802_2);
586 /* PLIP_RECEIVE_PACKET --- receive a packet */
587 static int
588 plip_receive_packet(struct net_device *dev, struct net_local *nl,
589 struct plip_local *snd, struct plip_local *rcv)
591 unsigned short nibble_timeout = nl->nibble;
592 unsigned char *lbuf;
594 switch (rcv->state) {
595 case PLIP_PK_TRIGGER:
596 DISABLE(dev->irq);
597 /* Don't need to synchronize irq, as we can safely ignore it */
598 disable_parport_interrupts (dev);
599 write_data (dev, 0x01); /* send ACK */
600 if (net_debug > 2)
601 printk(KERN_DEBUG "%s: receive start\n", dev->name);
602 rcv->state = PLIP_PK_LENGTH_LSB;
603 rcv->nibble = PLIP_NB_BEGIN;
605 case PLIP_PK_LENGTH_LSB:
606 if (snd->state != PLIP_PK_DONE) {
607 if (plip_receive(nl->trigger, dev,
608 &rcv->nibble, &rcv->length.b.lsb)) {
609 /* collision, here dev->tbusy == 1 */
610 rcv->state = PLIP_PK_DONE;
611 nl->is_deferred = 1;
612 nl->connection = PLIP_CN_SEND;
613 schedule_delayed_work(&nl->deferred, 1);
614 enable_parport_interrupts (dev);
615 ENABLE(dev->irq);
616 return OK;
618 } else {
619 if (plip_receive(nibble_timeout, dev,
620 &rcv->nibble, &rcv->length.b.lsb))
621 return TIMEOUT;
623 rcv->state = PLIP_PK_LENGTH_MSB;
625 case PLIP_PK_LENGTH_MSB:
626 if (plip_receive(nibble_timeout, dev,
627 &rcv->nibble, &rcv->length.b.msb))
628 return TIMEOUT;
629 if (rcv->length.h > dev->mtu + dev->hard_header_len
630 || rcv->length.h < 8) {
631 printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
632 return ERROR;
634 /* Malloc up new buffer. */
635 rcv->skb = dev_alloc_skb(rcv->length.h + 2);
636 if (rcv->skb == NULL) {
637 printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
638 return ERROR;
640 skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
641 skb_put(rcv->skb,rcv->length.h);
642 rcv->skb->dev = dev;
643 rcv->state = PLIP_PK_DATA;
644 rcv->byte = 0;
645 rcv->checksum = 0;
647 case PLIP_PK_DATA:
648 lbuf = rcv->skb->data;
650 if (plip_receive(nibble_timeout, dev,
651 &rcv->nibble, &lbuf[rcv->byte]))
652 return TIMEOUT;
653 while (++rcv->byte < rcv->length.h);
655 rcv->checksum += lbuf[--rcv->byte];
656 while (rcv->byte);
657 rcv->state = PLIP_PK_CHECKSUM;
659 case PLIP_PK_CHECKSUM:
660 if (plip_receive(nibble_timeout, dev,
661 &rcv->nibble, &rcv->data))
662 return TIMEOUT;
663 if (rcv->data != rcv->checksum) {
664 nl->enet_stats.rx_crc_errors++;
665 if (net_debug)
666 printk(KERN_DEBUG "%s: checksum error\n", dev->name);
667 return ERROR;
669 rcv->state = PLIP_PK_DONE;
671 case PLIP_PK_DONE:
672 /* Inform the upper layer for the arrival of a packet. */
673 rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
674 netif_rx(rcv->skb);
675 dev->last_rx = jiffies;
676 nl->enet_stats.rx_bytes += rcv->length.h;
677 nl->enet_stats.rx_packets++;
678 rcv->skb = NULL;
679 if (net_debug > 2)
680 printk(KERN_DEBUG "%s: receive end\n", dev->name);
682 /* Close the connection. */
683 write_data (dev, 0x00);
684 spin_lock_irq(&nl->lock);
685 if (snd->state != PLIP_PK_DONE) {
686 nl->connection = PLIP_CN_SEND;
687 spin_unlock_irq(&nl->lock);
688 schedule_work(&nl->immediate);
689 enable_parport_interrupts (dev);
690 ENABLE(dev->irq);
691 return OK;
692 } else {
693 nl->connection = PLIP_CN_NONE;
694 spin_unlock_irq(&nl->lock);
695 enable_parport_interrupts (dev);
696 ENABLE(dev->irq);
697 return OK;
700 return OK;
703 /* PLIP_SEND --- send a byte (two nibbles)
704 Returns OK on success, TIMEOUT when timeout */
705 inline static int
706 plip_send(unsigned short nibble_timeout, struct net_device *dev,
707 enum plip_nibble_state *ns_p, unsigned char data)
709 unsigned char c0;
710 unsigned int cx;
712 switch (*ns_p) {
713 case PLIP_NB_BEGIN:
714 write_data (dev, data & 0x0f);
715 *ns_p = PLIP_NB_1;
717 case PLIP_NB_1:
718 write_data (dev, 0x10 | (data & 0x0f));
719 cx = nibble_timeout;
720 while (1) {
721 c0 = read_status(dev);
722 if ((c0 & 0x80) == 0)
723 break;
724 if (--cx == 0)
725 return TIMEOUT;
726 udelay(PLIP_DELAY_UNIT);
728 write_data (dev, 0x10 | (data >> 4));
729 *ns_p = PLIP_NB_2;
731 case PLIP_NB_2:
732 write_data (dev, (data >> 4));
733 cx = nibble_timeout;
734 while (1) {
735 c0 = read_status(dev);
736 if (c0 & 0x80)
737 break;
738 if (--cx == 0)
739 return TIMEOUT;
740 udelay(PLIP_DELAY_UNIT);
742 *ns_p = PLIP_NB_BEGIN;
743 return OK;
745 return OK;
748 /* PLIP_SEND_PACKET --- send a packet */
749 static int
750 plip_send_packet(struct net_device *dev, struct net_local *nl,
751 struct plip_local *snd, struct plip_local *rcv)
753 unsigned short nibble_timeout = nl->nibble;
754 unsigned char *lbuf;
755 unsigned char c0;
756 unsigned int cx;
758 if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
759 printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
760 snd->state = PLIP_PK_DONE;
761 snd->skb = NULL;
762 return ERROR;
765 switch (snd->state) {
766 case PLIP_PK_TRIGGER:
767 if ((read_status(dev) & 0xf8) != 0x80)
768 return HS_TIMEOUT;
770 /* Trigger remote rx interrupt. */
771 write_data (dev, 0x08);
772 cx = nl->trigger;
773 while (1) {
774 udelay(PLIP_DELAY_UNIT);
775 spin_lock_irq(&nl->lock);
776 if (nl->connection == PLIP_CN_RECEIVE) {
777 spin_unlock_irq(&nl->lock);
778 /* Interrupted. */
779 nl->enet_stats.collisions++;
780 return OK;
782 c0 = read_status(dev);
783 if (c0 & 0x08) {
784 spin_unlock_irq(&nl->lock);
785 DISABLE(dev->irq);
786 synchronize_irq(dev->irq);
787 if (nl->connection == PLIP_CN_RECEIVE) {
788 /* Interrupted.
789 We don't need to enable irq,
790 as it is soon disabled. */
791 /* Yes, we do. New variant of
792 {enable,disable}_irq *counts*
793 them. -- AV */
794 ENABLE(dev->irq);
795 nl->enet_stats.collisions++;
796 return OK;
798 disable_parport_interrupts (dev);
799 if (net_debug > 2)
800 printk(KERN_DEBUG "%s: send start\n", dev->name);
801 snd->state = PLIP_PK_LENGTH_LSB;
802 snd->nibble = PLIP_NB_BEGIN;
803 nl->timeout_count = 0;
804 break;
806 spin_unlock_irq(&nl->lock);
807 if (--cx == 0) {
808 write_data (dev, 0x00);
809 return HS_TIMEOUT;
813 case PLIP_PK_LENGTH_LSB:
814 if (plip_send(nibble_timeout, dev,
815 &snd->nibble, snd->length.b.lsb))
816 return TIMEOUT;
817 snd->state = PLIP_PK_LENGTH_MSB;
819 case PLIP_PK_LENGTH_MSB:
820 if (plip_send(nibble_timeout, dev,
821 &snd->nibble, snd->length.b.msb))
822 return TIMEOUT;
823 snd->state = PLIP_PK_DATA;
824 snd->byte = 0;
825 snd->checksum = 0;
827 case PLIP_PK_DATA:
829 if (plip_send(nibble_timeout, dev,
830 &snd->nibble, lbuf[snd->byte]))
831 return TIMEOUT;
832 while (++snd->byte < snd->length.h);
834 snd->checksum += lbuf[--snd->byte];
835 while (snd->byte);
836 snd->state = PLIP_PK_CHECKSUM;
838 case PLIP_PK_CHECKSUM:
839 if (plip_send(nibble_timeout, dev,
840 &snd->nibble, snd->checksum))
841 return TIMEOUT;
843 nl->enet_stats.tx_bytes += snd->skb->len;
844 dev_kfree_skb(snd->skb);
845 nl->enet_stats.tx_packets++;
846 snd->state = PLIP_PK_DONE;
848 case PLIP_PK_DONE:
849 /* Close the connection */
850 write_data (dev, 0x00);
851 snd->skb = NULL;
852 if (net_debug > 2)
853 printk(KERN_DEBUG "%s: send end\n", dev->name);
854 nl->connection = PLIP_CN_CLOSING;
855 nl->is_deferred = 1;
856 schedule_delayed_work(&nl->deferred, 1);
857 enable_parport_interrupts (dev);
858 ENABLE(dev->irq);
859 return OK;
861 return OK;
864 static int
865 plip_connection_close(struct net_device *dev, struct net_local *nl,
866 struct plip_local *snd, struct plip_local *rcv)
868 spin_lock_irq(&nl->lock);
869 if (nl->connection == PLIP_CN_CLOSING) {
870 nl->connection = PLIP_CN_NONE;
871 netif_wake_queue (dev);
873 spin_unlock_irq(&nl->lock);
874 if (nl->should_relinquish) {
875 nl->should_relinquish = nl->port_owner = 0;
876 parport_release(nl->pardev);
878 return OK;
881 /* PLIP_ERROR --- wait till other end settled */
882 static int
883 plip_error(struct net_device *dev, struct net_local *nl,
884 struct plip_local *snd, struct plip_local *rcv)
886 unsigned char status;
888 status = read_status(dev);
889 if ((status & 0xf8) == 0x80) {
890 if (net_debug > 2)
891 printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
892 nl->connection = PLIP_CN_NONE;
893 nl->should_relinquish = 0;
894 netif_start_queue (dev);
895 enable_parport_interrupts (dev);
896 ENABLE(dev->irq);
897 netif_wake_queue (dev);
898 } else {
899 nl->is_deferred = 1;
900 schedule_delayed_work(&nl->deferred, 1);
903 return OK;
906 /* Handle the parallel port interrupts. */
907 static void
908 plip_interrupt(int irq, void *dev_id, struct pt_regs * regs)
910 struct net_device *dev = dev_id;
911 struct net_local *nl;
912 struct plip_local *rcv;
913 unsigned char c0;
915 if (dev == NULL) {
916 printk(KERN_DEBUG "plip_interrupt: irq %d for unknown device.\n", irq);
917 return;
920 nl = netdev_priv(dev);
921 rcv = &nl->rcv_data;
923 spin_lock_irq (&nl->lock);
925 c0 = read_status(dev);
926 if ((c0 & 0xf8) != 0xc0) {
927 if ((dev->irq != -1) && (net_debug > 1))
928 printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
929 spin_unlock_irq (&nl->lock);
930 return;
933 if (net_debug > 3)
934 printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
936 switch (nl->connection) {
937 case PLIP_CN_CLOSING:
938 netif_wake_queue (dev);
939 case PLIP_CN_NONE:
940 case PLIP_CN_SEND:
941 rcv->state = PLIP_PK_TRIGGER;
942 nl->connection = PLIP_CN_RECEIVE;
943 nl->timeout_count = 0;
944 schedule_work(&nl->immediate);
945 break;
947 case PLIP_CN_RECEIVE:
948 /* May occur because there is race condition
949 around test and set of dev->interrupt.
950 Ignore this interrupt. */
951 break;
953 case PLIP_CN_ERROR:
954 printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
955 break;
958 spin_unlock_irq(&nl->lock);
961 static int
962 plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
964 struct net_local *nl = netdev_priv(dev);
965 struct plip_local *snd = &nl->snd_data;
967 if (netif_queue_stopped(dev))
968 return 1;
970 /* We may need to grab the bus */
971 if (!nl->port_owner) {
972 if (parport_claim(nl->pardev))
973 return 1;
974 nl->port_owner = 1;
977 netif_stop_queue (dev);
979 if (skb->len > dev->mtu + dev->hard_header_len) {
980 printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
981 netif_start_queue (dev);
982 return 1;
985 if (net_debug > 2)
986 printk(KERN_DEBUG "%s: send request\n", dev->name);
988 spin_lock_irq(&nl->lock);
989 dev->trans_start = jiffies;
990 snd->skb = skb;
991 snd->length.h = skb->len;
992 snd->state = PLIP_PK_TRIGGER;
993 if (nl->connection == PLIP_CN_NONE) {
994 nl->connection = PLIP_CN_SEND;
995 nl->timeout_count = 0;
997 schedule_work(&nl->immediate);
998 spin_unlock_irq(&nl->lock);
1000 return 0;
1003 static void
1004 plip_rewrite_address(struct net_device *dev, struct ethhdr *eth)
1006 struct in_device *in_dev;
1008 if ((in_dev=dev->ip_ptr) != NULL) {
1009 /* Any address will do - we take the first */
1010 struct in_ifaddr *ifa=in_dev->ifa_list;
1011 if (ifa != NULL) {
1012 memcpy(eth->h_source, dev->dev_addr, 6);
1013 memset(eth->h_dest, 0xfc, 2);
1014 memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
1019 static int
1020 plip_hard_header(struct sk_buff *skb, struct net_device *dev,
1021 unsigned short type, void *daddr,
1022 void *saddr, unsigned len)
1024 struct net_local *nl = netdev_priv(dev);
1025 int ret;
1027 if ((ret = nl->orig_hard_header(skb, dev, type, daddr, saddr, len)) >= 0)
1028 plip_rewrite_address (dev, (struct ethhdr *)skb->data);
1030 return ret;
1033 int plip_hard_header_cache(struct neighbour *neigh,
1034 struct hh_cache *hh)
1036 struct net_local *nl = neigh->dev->priv;
1037 int ret;
1039 if ((ret = nl->orig_hard_header_cache(neigh, hh)) == 0)
1041 struct ethhdr *eth;
1043 eth = (struct ethhdr*)(((u8*)hh->hh_data) +
1044 HH_DATA_OFF(sizeof(*eth)));
1045 plip_rewrite_address (neigh->dev, eth);
1048 return ret;
1051 /* Open/initialize the board. This is called (in the current kernel)
1052 sometime after booting when the 'ifconfig' program is run.
1054 This routine gets exclusive access to the parallel port by allocating
1055 its IRQ line.
1057 static int
1058 plip_open(struct net_device *dev)
1060 struct net_local *nl = netdev_priv(dev);
1061 struct in_device *in_dev;
1063 /* Grab the port */
1064 if (!nl->port_owner) {
1065 if (parport_claim(nl->pardev)) return -EAGAIN;
1066 nl->port_owner = 1;
1069 nl->should_relinquish = 0;
1071 /* Clear the data port. */
1072 write_data (dev, 0x00);
1074 /* Enable rx interrupt. */
1075 enable_parport_interrupts (dev);
1076 if (dev->irq == -1)
1078 atomic_set (&nl->kill_timer, 0);
1079 schedule_delayed_work(&nl->timer, 1);
1082 /* Initialize the state machine. */
1083 nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
1084 nl->rcv_data.skb = nl->snd_data.skb = NULL;
1085 nl->connection = PLIP_CN_NONE;
1086 nl->is_deferred = 0;
1088 /* Fill in the MAC-level header.
1089 We used to abuse dev->broadcast to store the point-to-point
1090 MAC address, but we no longer do it. Instead, we fetch the
1091 interface address whenever it is needed, which is cheap enough
1092 because we use the hh_cache. Actually, abusing dev->broadcast
1093 didn't work, because when using plip_open the point-to-point
1094 address isn't yet known.
1095 PLIP doesn't have a real MAC address, but we need it to be
1096 DOS compatible, and to properly support taps (otherwise,
1097 when the device address isn't identical to the address of a
1098 received frame, the kernel incorrectly drops it). */
1100 if ((in_dev=dev->ip_ptr) != NULL) {
1101 /* Any address will do - we take the first. We already
1102 have the first two bytes filled with 0xfc, from
1103 plip_init_dev(). */
1104 struct in_ifaddr *ifa=in_dev->ifa_list;
1105 if (ifa != NULL) {
1106 memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
1110 netif_start_queue (dev);
1112 return 0;
1115 /* The inverse routine to plip_open (). */
1116 static int
1117 plip_close(struct net_device *dev)
1119 struct net_local *nl = netdev_priv(dev);
1120 struct plip_local *snd = &nl->snd_data;
1121 struct plip_local *rcv = &nl->rcv_data;
1123 netif_stop_queue (dev);
1124 DISABLE(dev->irq);
1125 synchronize_irq(dev->irq);
1127 if (dev->irq == -1)
1129 init_MUTEX_LOCKED (&nl->killed_timer_sem);
1130 atomic_set (&nl->kill_timer, 1);
1131 down (&nl->killed_timer_sem);
1134 #ifdef NOTDEF
1135 outb(0x00, PAR_DATA(dev));
1136 #endif
1137 nl->is_deferred = 0;
1138 nl->connection = PLIP_CN_NONE;
1139 if (nl->port_owner) {
1140 parport_release(nl->pardev);
1141 nl->port_owner = 0;
1144 snd->state = PLIP_PK_DONE;
1145 if (snd->skb) {
1146 dev_kfree_skb(snd->skb);
1147 snd->skb = NULL;
1149 rcv->state = PLIP_PK_DONE;
1150 if (rcv->skb) {
1151 kfree_skb(rcv->skb);
1152 rcv->skb = NULL;
1155 #ifdef NOTDEF
1156 /* Reset. */
1157 outb(0x00, PAR_CONTROL(dev));
1158 #endif
1159 return 0;
1162 static int
1163 plip_preempt(void *handle)
1165 struct net_device *dev = (struct net_device *)handle;
1166 struct net_local *nl = netdev_priv(dev);
1168 /* Stand our ground if a datagram is on the wire */
1169 if (nl->connection != PLIP_CN_NONE) {
1170 nl->should_relinquish = 1;
1171 return 1;
1174 nl->port_owner = 0; /* Remember that we released the bus */
1175 return 0;
1178 static void
1179 plip_wakeup(void *handle)
1181 struct net_device *dev = (struct net_device *)handle;
1182 struct net_local *nl = netdev_priv(dev);
1184 if (nl->port_owner) {
1185 /* Why are we being woken up? */
1186 printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
1187 if (!parport_claim(nl->pardev))
1188 /* bus_owner is already set (but why?) */
1189 printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
1190 else
1191 return;
1194 if (!(dev->flags & IFF_UP))
1195 /* Don't need the port when the interface is down */
1196 return;
1198 if (!parport_claim(nl->pardev)) {
1199 nl->port_owner = 1;
1200 /* Clear the data port. */
1201 write_data (dev, 0x00);
1204 return;
1207 static struct net_device_stats *
1208 plip_get_stats(struct net_device *dev)
1210 struct net_local *nl = netdev_priv(dev);
1211 struct net_device_stats *r = &nl->enet_stats;
1213 return r;
1216 static int
1217 plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1219 struct net_local *nl = netdev_priv(dev);
1220 struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
1222 if (cmd != SIOCDEVPLIP)
1223 return -EOPNOTSUPP;
1225 switch(pc->pcmd) {
1226 case PLIP_GET_TIMEOUT:
1227 pc->trigger = nl->trigger;
1228 pc->nibble = nl->nibble;
1229 break;
1230 case PLIP_SET_TIMEOUT:
1231 if(!capable(CAP_NET_ADMIN))
1232 return -EPERM;
1233 nl->trigger = pc->trigger;
1234 nl->nibble = pc->nibble;
1235 break;
1236 default:
1237 return -EOPNOTSUPP;
1239 return 0;
1242 static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
1243 static int timid;
1245 module_param_array(parport, int, NULL, 0);
1246 module_param(timid, int, 0);
1247 MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
1249 static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
1251 static inline int
1252 plip_searchfor(int list[], int a)
1254 int i;
1255 for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
1256 if (list[i] == a) return 1;
1258 return 0;
1261 /* plip_attach() is called (by the parport code) when a port is
1262 * available to use. */
1263 static void plip_attach (struct parport *port)
1265 static int unit;
1266 struct net_device *dev;
1267 struct net_local *nl;
1268 char name[IFNAMSIZ];
1270 if ((parport[0] == -1 && (!timid || !port->devices)) ||
1271 plip_searchfor(parport, port->number)) {
1272 if (unit == PLIP_MAX) {
1273 printk(KERN_ERR "plip: too many devices\n");
1274 return;
1277 sprintf(name, "plip%d", unit);
1278 dev = alloc_etherdev(sizeof(struct net_local));
1279 if (!dev) {
1280 printk(KERN_ERR "plip: memory squeeze\n");
1281 return;
1284 strcpy(dev->name, name);
1286 SET_MODULE_OWNER(dev);
1287 dev->irq = port->irq;
1288 dev->base_addr = port->base;
1289 if (port->irq == -1) {
1290 printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
1291 "which is fairly inefficient!\n", port->name);
1294 nl = netdev_priv(dev);
1295 nl->pardev = parport_register_device(port, name, plip_preempt,
1296 plip_wakeup, plip_interrupt,
1297 0, dev);
1299 if (!nl->pardev) {
1300 printk(KERN_ERR "%s: parport_register failed\n", name);
1301 goto err_free_dev;
1302 return;
1305 plip_init_netdev(dev);
1307 if (register_netdev(dev)) {
1308 printk(KERN_ERR "%s: network register failed\n", name);
1309 goto err_parport_unregister;
1312 printk(KERN_INFO "%s", version);
1313 if (dev->irq != -1)
1314 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1315 "using IRQ %d.\n",
1316 dev->name, dev->base_addr, dev->irq);
1317 else
1318 printk(KERN_INFO "%s: Parallel port at %#3lx, "
1319 "not using IRQ.\n",
1320 dev->name, dev->base_addr);
1321 dev_plip[unit++] = dev;
1323 return;
1325 err_parport_unregister:
1326 parport_unregister_device(nl->pardev);
1327 err_free_dev:
1328 free_netdev(dev);
1329 return;
1332 /* plip_detach() is called (by the parport code) when a port is
1333 * no longer available to use. */
1334 static void plip_detach (struct parport *port)
1336 /* Nothing to do */
1339 static struct parport_driver plip_driver = {
1340 .name = "plip",
1341 .attach = plip_attach,
1342 .detach = plip_detach
1345 static void __exit plip_cleanup_module (void)
1347 struct net_device *dev;
1348 int i;
1350 parport_unregister_driver (&plip_driver);
1352 for (i=0; i < PLIP_MAX; i++) {
1353 if ((dev = dev_plip[i])) {
1354 struct net_local *nl = netdev_priv(dev);
1355 unregister_netdev(dev);
1356 if (nl->port_owner)
1357 parport_release(nl->pardev);
1358 parport_unregister_device(nl->pardev);
1359 free_netdev(dev);
1360 dev_plip[i] = NULL;
1365 #ifndef MODULE
1367 static int parport_ptr;
1369 static int __init plip_setup(char *str)
1371 int ints[4];
1373 str = get_options(str, ARRAY_SIZE(ints), ints);
1375 /* Ugh. */
1376 if (!strncmp(str, "parport", 7)) {
1377 int n = simple_strtoul(str+7, NULL, 10);
1378 if (parport_ptr < PLIP_MAX)
1379 parport[parport_ptr++] = n;
1380 else
1381 printk(KERN_INFO "plip: too many ports, %s ignored.\n",
1382 str);
1383 } else if (!strcmp(str, "timid")) {
1384 timid = 1;
1385 } else {
1386 if (ints[0] == 0 || ints[1] == 0) {
1387 /* disable driver on "plip=" or "plip=0" */
1388 parport[0] = -2;
1389 } else {
1390 printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
1391 ints[1]);
1394 return 1;
1397 __setup("plip=", plip_setup);
1399 #endif /* !MODULE */
1401 static int __init plip_init (void)
1403 if (parport[0] == -2)
1404 return 0;
1406 if (parport[0] != -1 && timid) {
1407 printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
1408 timid = 0;
1411 if (parport_register_driver (&plip_driver)) {
1412 printk (KERN_WARNING "plip: couldn't register driver\n");
1413 return 1;
1416 return 0;
1419 module_init(plip_init);
1420 module_exit(plip_cleanup_module);
1421 MODULE_LICENSE("GPL");
1424 * Local variables:
1425 * compile-command: "gcc -DMODULE -DMODVERSIONS -D__KERNEL__ -Wall -Wstrict-prototypes -O2 -g -fomit-frame-pointer -pipe -c plip.c"
1426 * End: