1 /*********************************************************************
3 * Filename: w83977af_ir.c
5 * Description: FIR driver for the Winbond W83977AF Super I/O chip
6 * Status: Experimental.
7 * Author: Paul VanderSpek
8 * Created at: Wed Nov 4 11:46:16 1998
9 * Modified at: Fri Jan 28 12:10:59 2000
10 * Modified by: Dag Brattli <dagb@cs.uit.no>
12 * Copyright (c) 1998-2000 Dag Brattli <dagb@cs.uit.no>
13 * Copyright (c) 1998-1999 Rebel.com
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of
18 * the License, or (at your option) any later version.
20 * Neither Paul VanderSpek nor Rebel.com admit liability nor provide
21 * warranty for any of this software. This material is provided "AS-IS"
24 * If you find bugs in this file, its very likely that the same bug
25 * will also be in pc87108.c since the implementations are quite
28 * Notice that all functions that needs to access the chip in _any_
29 * way, must save BSR register on entry, and restore it on exit.
30 * It is _very_ important to follow this policy!
34 * bank = inb( iobase+BSR);
36 * do_your_stuff_here();
38 * outb( bank, iobase+BSR);
40 ********************************************************************/
42 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/types.h>
47 #include <linux/skbuff.h>
48 #include <linux/netdevice.h>
49 #include <linux/ioport.h>
50 #include <linux/delay.h>
51 #include <linux/init.h>
52 #include <linux/interrupt.h>
53 #include <linux/rtnetlink.h>
54 #include <linux/dma-mapping.h>
55 #include <linux/gfp.h>
59 #include <asm/byteorder.h>
61 #include <net/irda/irda.h>
62 #include <net/irda/wrapper.h>
63 #include <net/irda/irda_device.h>
65 #include "w83977af_ir.h"
67 #define CONFIG_USE_W977_PNP /* Currently needed */
68 #define PIO_MAX_SPEED 115200
70 static char *driver_name
= "w83977af_ir";
71 static int qos_mtt_bits
= 0x07; /* 1 ms or more */
73 #define CHIP_IO_EXTENT 8
75 static unsigned int io
[] = { 0x180, ~0, ~0, ~0 };
76 #ifdef CONFIG_ARCH_NETWINDER /* Adjust to NetWinder differences */
77 static unsigned int irq
[] = { 6, 0, 0, 0 };
79 static unsigned int irq
[] = { 11, 0, 0, 0 };
81 static unsigned int dma
[] = { 1, 0, 0, 0 };
82 static unsigned int efbase
[] = { W977_EFIO_BASE
, W977_EFIO2_BASE
};
83 static unsigned int efio
= W977_EFIO_BASE
;
85 static struct w83977af_ir
*dev_self
[] = { NULL
, NULL
, NULL
, NULL
};
88 static int w83977af_open(int i
, unsigned int iobase
, unsigned int irq
,
90 static int w83977af_close(struct w83977af_ir
*self
);
91 static int w83977af_probe(int iobase
, int irq
, int dma
);
92 static int w83977af_dma_receive(struct w83977af_ir
*self
);
93 static int w83977af_dma_receive_complete(struct w83977af_ir
*self
);
94 static netdev_tx_t
w83977af_hard_xmit(struct sk_buff
*skb
,
95 struct net_device
*dev
);
96 static int w83977af_pio_write(int iobase
, __u8
*buf
, int len
, int fifo_size
);
97 static void w83977af_dma_write(struct w83977af_ir
*self
, int iobase
);
98 static void w83977af_change_speed(struct w83977af_ir
*self
, __u32 speed
);
99 static int w83977af_is_receiving(struct w83977af_ir
*self
);
101 static int w83977af_net_open(struct net_device
*dev
);
102 static int w83977af_net_close(struct net_device
*dev
);
103 static int w83977af_net_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
);
106 * Function w83977af_init ()
108 * Initialize chip. Just try to find out how many chips we are dealing with
111 static int __init
w83977af_init(void)
115 for (i
= 0; i
< ARRAY_SIZE(dev_self
) && io
[i
] < 2000; i
++) {
116 if (w83977af_open(i
, io
[i
], irq
[i
], dma
[i
]) == 0)
123 * Function w83977af_cleanup ()
125 * Close all configured chips
128 static void __exit
w83977af_cleanup(void)
132 for (i
= 0; i
< ARRAY_SIZE(dev_self
); i
++) {
134 w83977af_close(dev_self
[i
]);
138 static const struct net_device_ops w83977_netdev_ops
= {
139 .ndo_open
= w83977af_net_open
,
140 .ndo_stop
= w83977af_net_close
,
141 .ndo_start_xmit
= w83977af_hard_xmit
,
142 .ndo_do_ioctl
= w83977af_net_ioctl
,
146 * Function w83977af_open (iobase, irq)
148 * Open driver instance
151 static int w83977af_open(int i
, unsigned int iobase
, unsigned int irq
,
154 struct net_device
*dev
;
155 struct w83977af_ir
*self
;
158 /* Lock the port that we need */
159 if (!request_region(iobase
, CHIP_IO_EXTENT
, driver_name
)) {
160 pr_debug("%s: can't get iobase of 0x%03x\n",
165 if (w83977af_probe(iobase
, irq
, dma
) == -1) {
170 * Allocate new instance of the driver
172 dev
= alloc_irdadev(sizeof(struct w83977af_ir
));
174 pr_err("IrDA: Can't allocate memory for IrDA control block!\n");
179 self
= netdev_priv(dev
);
180 spin_lock_init(&self
->lock
);
183 self
->io
.fir_base
= iobase
;
185 self
->io
.fir_ext
= CHIP_IO_EXTENT
;
187 self
->io
.fifo_size
= 32;
189 /* Initialize QoS for this device */
190 irda_init_max_qos_capabilies(&self
->qos
);
192 /* The only value we must override it the baudrate */
194 /* FIXME: The HP HDLS-1100 does not support 1152000! */
195 self
->qos
.baud_rate
.bits
= IR_9600
| IR_19200
| IR_38400
| IR_57600
|
196 IR_115200
| IR_576000
| IR_1152000
| (IR_4000000
<< 8);
198 /* The HP HDLS-1100 needs 1 ms according to the specs */
199 self
->qos
.min_turn_time
.bits
= qos_mtt_bits
;
200 irda_qos_bits_to_value(&self
->qos
);
202 /* Max DMA buffer size needed = (data_size + 6) * (window_size) + 6; */
203 self
->rx_buff
.truesize
= 14384;
204 self
->tx_buff
.truesize
= 4000;
206 /* Allocate memory if needed */
208 dma_zalloc_coherent(NULL
, self
->rx_buff
.truesize
,
209 &self
->rx_buff_dma
, GFP_KERNEL
);
210 if (!self
->rx_buff
.head
) {
216 dma_zalloc_coherent(NULL
, self
->tx_buff
.truesize
,
217 &self
->tx_buff_dma
, GFP_KERNEL
);
218 if (!self
->tx_buff
.head
) {
223 self
->rx_buff
.in_frame
= FALSE
;
224 self
->rx_buff
.state
= OUTSIDE_FRAME
;
225 self
->tx_buff
.data
= self
->tx_buff
.head
;
226 self
->rx_buff
.data
= self
->rx_buff
.head
;
229 dev
->netdev_ops
= &w83977_netdev_ops
;
231 err
= register_netdev(dev
);
233 net_err_ratelimited("%s:, register_netdevice() failed!\n",
237 net_info_ratelimited("IrDA: Registered device %s\n", dev
->name
);
239 /* Need to store self somewhere */
244 dma_free_coherent(NULL
, self
->tx_buff
.truesize
,
245 self
->tx_buff
.head
, self
->tx_buff_dma
);
247 dma_free_coherent(NULL
, self
->rx_buff
.truesize
,
248 self
->rx_buff
.head
, self
->rx_buff_dma
);
252 release_region(iobase
, CHIP_IO_EXTENT
);
257 * Function w83977af_close (self)
259 * Close driver instance
262 static int w83977af_close(struct w83977af_ir
*self
)
266 iobase
= self
->io
.fir_base
;
268 #ifdef CONFIG_USE_W977_PNP
269 /* enter PnP configuration mode */
270 w977_efm_enter(efio
);
272 w977_select_device(W977_DEVICE_IR
, efio
);
274 /* Deactivate device */
275 w977_write_reg(0x30, 0x00, efio
);
278 #endif /* CONFIG_USE_W977_PNP */
280 /* Remove netdevice */
281 unregister_netdev(self
->netdev
);
283 /* Release the PORT that this driver is using */
284 pr_debug("%s: Releasing Region %03x\n", __func__
, self
->io
.fir_base
);
285 release_region(self
->io
.fir_base
, self
->io
.fir_ext
);
287 if (self
->tx_buff
.head
)
288 dma_free_coherent(NULL
, self
->tx_buff
.truesize
,
289 self
->tx_buff
.head
, self
->tx_buff_dma
);
291 if (self
->rx_buff
.head
)
292 dma_free_coherent(NULL
, self
->rx_buff
.truesize
,
293 self
->rx_buff
.head
, self
->rx_buff_dma
);
295 free_netdev(self
->netdev
);
300 static int w83977af_probe(int iobase
, int irq
, int dma
)
305 for (i
= 0; i
< 2; i
++) {
306 #ifdef CONFIG_USE_W977_PNP
307 /* Enter PnP configuration mode */
308 w977_efm_enter(efbase
[i
]);
310 w977_select_device(W977_DEVICE_IR
, efbase
[i
]);
312 /* Configure PnP port, IRQ, and DMA channel */
313 w977_write_reg(0x60, (iobase
>> 8) & 0xff, efbase
[i
]);
314 w977_write_reg(0x61, (iobase
) & 0xff, efbase
[i
]);
316 w977_write_reg(0x70, irq
, efbase
[i
]);
317 #ifdef CONFIG_ARCH_NETWINDER
318 /* Netwinder uses 1 higher than Linux */
319 w977_write_reg(0x74, dma
+ 1, efbase
[i
]);
321 w977_write_reg(0x74, dma
, efbase
[i
]);
322 #endif /* CONFIG_ARCH_NETWINDER */
323 w977_write_reg(0x75, 0x04, efbase
[i
]);/* Disable Tx DMA */
325 /* Set append hardware CRC, enable IR bank selection */
326 w977_write_reg(0xf0, APEDCRC
| ENBNKSEL
, efbase
[i
]);
328 /* Activate device */
329 w977_write_reg(0x30, 0x01, efbase
[i
]);
331 w977_efm_exit(efbase
[i
]);
332 #endif /* CONFIG_USE_W977_PNP */
333 /* Disable Advanced mode */
334 switch_bank(iobase
, SET2
);
335 outb(iobase
+ 2, 0x00);
337 /* Turn on UART (global) interrupts */
338 switch_bank(iobase
, SET0
);
339 outb(HCR_EN_IRQ
, iobase
+ HCR
);
341 /* Switch to advanced mode */
342 switch_bank(iobase
, SET2
);
343 outb(inb(iobase
+ ADCR1
) | ADCR1_ADV_SL
, iobase
+ ADCR1
);
345 /* Set default IR-mode */
346 switch_bank(iobase
, SET0
);
347 outb(HCR_SIR
, iobase
+ HCR
);
349 /* Read the Advanced IR ID */
350 switch_bank(iobase
, SET3
);
351 version
= inb(iobase
+ AUID
);
354 if (0x10 == (version
& 0xf0)) {
357 /* Set FIFO size to 32 */
358 switch_bank(iobase
, SET2
);
359 outb(ADCR2_RXFS32
| ADCR2_TXFS32
, iobase
+ ADCR2
);
361 /* Set FIFO threshold to TX17, RX16 */
362 switch_bank(iobase
, SET0
);
363 outb(UFR_RXTL
| UFR_TXTL
| UFR_TXF_RST
| UFR_RXF_RST
|
364 UFR_EN_FIFO
, iobase
+ UFR
);
366 /* Receiver frame length */
367 switch_bank(iobase
, SET4
);
368 outb(2048 & 0xff, iobase
+ 6);
369 outb((2048 >> 8) & 0x1f, iobase
+ 7);
372 * Init HP HSDL-1100 transceiver.
374 * Set IRX_MSL since we have 2 * receive paths IRRX,
375 * and IRRXH. Clear IRSL0D since we want IRSL0 * to
376 * be a input pin used for IRRXH
378 * IRRX pin 37 connected to receiver
379 * IRTX pin 38 connected to transmitter
380 * FIRRX pin 39 connected to receiver (IRSL0)
381 * CIRRX pin 40 connected to pin 37
383 switch_bank(iobase
, SET7
);
384 outb(0x40, iobase
+ 7);
386 net_info_ratelimited("W83977AF (IR) driver loaded. Version: 0x%02x\n",
391 /* Try next extented function register address */
392 pr_debug("%s: Wrong chip version\n", __func__
);
398 static void w83977af_change_speed(struct w83977af_ir
*self
, __u32 speed
)
400 int ir_mode
= HCR_SIR
;
404 iobase
= self
->io
.fir_base
;
406 /* Update accounting for new speed */
407 self
->io
.speed
= speed
;
409 /* Save current bank */
410 set
= inb(iobase
+ SSR
);
412 /* Disable interrupts */
413 switch_bank(iobase
, SET0
);
414 outb(0, iobase
+ ICR
);
417 switch_bank(iobase
, SET2
);
418 outb(0x00, iobase
+ ABHL
);
421 case 9600: outb(0x0c, iobase
+ ABLL
); break;
422 case 19200: outb(0x06, iobase
+ ABLL
); break;
423 case 38400: outb(0x03, iobase
+ ABLL
); break;
424 case 57600: outb(0x02, iobase
+ ABLL
); break;
425 case 115200: outb(0x01, iobase
+ ABLL
); break;
427 ir_mode
= HCR_MIR_576
;
428 pr_debug("%s: handling baud of 576000\n", __func__
);
431 ir_mode
= HCR_MIR_1152
;
432 pr_debug("%s: handling baud of 1152000\n", __func__
);
436 pr_debug("%s: handling baud of 4000000\n", __func__
);
440 pr_debug("%s: unknown baud rate of %d\n", __func__
, speed
);
445 switch_bank(iobase
, SET0
);
446 outb(ir_mode
, iobase
+ HCR
);
448 /* set FIFO size to 32 */
449 switch_bank(iobase
, SET2
);
450 outb(ADCR2_RXFS32
| ADCR2_TXFS32
, iobase
+ ADCR2
);
452 /* set FIFO threshold to TX17, RX16 */
453 switch_bank(iobase
, SET0
);
454 outb(0x00, iobase
+ UFR
); /* Reset */
455 outb(UFR_EN_FIFO
, iobase
+ UFR
); /* First we must enable FIFO */
456 outb(0xa7, iobase
+ UFR
);
458 netif_wake_queue(self
->netdev
);
460 /* Enable some interrupts so we can receive frames */
461 switch_bank(iobase
, SET0
);
462 if (speed
> PIO_MAX_SPEED
) {
463 outb(ICR_EFSFI
, iobase
+ ICR
);
464 w83977af_dma_receive(self
);
466 outb(ICR_ERBRI
, iobase
+ ICR
);
470 outb(set
, iobase
+ SSR
);
474 * Function w83977af_hard_xmit (skb, dev)
476 * Sets up a DMA transfer to send the current frame.
479 static netdev_tx_t
w83977af_hard_xmit(struct sk_buff
*skb
,
480 struct net_device
*dev
)
482 struct w83977af_ir
*self
;
488 self
= netdev_priv(dev
);
490 iobase
= self
->io
.fir_base
;
492 pr_debug("%s: %ld, skb->len=%d\n", __func__
, jiffies
, (int)skb
->len
);
494 /* Lock transmit buffer */
495 netif_stop_queue(dev
);
497 /* Check if we need to change the speed */
498 speed
= irda_get_next_speed(skb
);
499 if ((speed
!= self
->io
.speed
) && (speed
!= -1)) {
500 /* Check for empty frame */
502 w83977af_change_speed(self
, speed
);
506 self
->new_speed
= speed
;
509 /* Save current set */
510 set
= inb(iobase
+ SSR
);
512 /* Decide if we should use PIO or DMA transfer */
513 if (self
->io
.speed
> PIO_MAX_SPEED
) {
514 self
->tx_buff
.data
= self
->tx_buff
.head
;
515 skb_copy_from_linear_data(skb
, self
->tx_buff
.data
, skb
->len
);
516 self
->tx_buff
.len
= skb
->len
;
518 mtt
= irda_get_mtt(skb
);
519 pr_debug("%s: %ld, mtt=%d\n", __func__
, jiffies
, mtt
);
525 /* Enable DMA interrupt */
526 switch_bank(iobase
, SET0
);
527 outb(ICR_EDMAI
, iobase
+ ICR
);
528 w83977af_dma_write(self
, iobase
);
530 self
->tx_buff
.data
= self
->tx_buff
.head
;
531 self
->tx_buff
.len
= async_wrap_skb(skb
, self
->tx_buff
.data
,
532 self
->tx_buff
.truesize
);
534 /* Add interrupt on tx low level (will fire immediately) */
535 switch_bank(iobase
, SET0
);
536 outb(ICR_ETXTHI
, iobase
+ ICR
);
540 /* Restore set register */
541 outb(set
, iobase
+ SSR
);
547 * Function w83977af_dma_write (self, iobase)
549 * Send frame using DMA
552 static void w83977af_dma_write(struct w83977af_ir
*self
, int iobase
)
556 pr_debug("%s: len=%d\n", __func__
, self
->tx_buff
.len
);
558 /* Save current set */
559 set
= inb(iobase
+ SSR
);
562 switch_bank(iobase
, SET0
);
563 outb(inb(iobase
+ HCR
) & ~HCR_EN_DMA
, iobase
+ HCR
);
565 /* Choose transmit DMA channel */
566 switch_bank(iobase
, SET2
);
567 outb(ADCR1_D_CHSW
| /*ADCR1_DMA_F|*/ADCR1_ADV_SL
, iobase
+ ADCR1
);
568 irda_setup_dma(self
->io
.dma
, self
->tx_buff_dma
, self
->tx_buff
.len
,
570 self
->io
.direction
= IO_XMIT
;
573 switch_bank(iobase
, SET0
);
574 outb(inb(iobase
+ HCR
) | HCR_EN_DMA
| HCR_TX_WT
, iobase
+ HCR
);
576 /* Restore set register */
577 outb(set
, iobase
+ SSR
);
581 * Function w83977af_pio_write (iobase, buf, len, fifo_size)
586 static int w83977af_pio_write(int iobase
, __u8
*buf
, int len
, int fifo_size
)
591 /* Save current bank */
592 set
= inb(iobase
+ SSR
);
594 switch_bank(iobase
, SET0
);
595 if (!(inb_p(iobase
+ USR
) & USR_TSRE
)) {
596 pr_debug("%s: warning, FIFO not empty yet!\n", __func__
);
599 pr_debug("%s: %d bytes left in tx fifo\n", __func__
, fifo_size
);
602 /* Fill FIFO with current frame */
603 while ((fifo_size
-- > 0) && (actual
< len
)) {
604 /* Transmit next byte */
605 outb(buf
[actual
++], iobase
+ TBR
);
608 pr_debug("%s: fifo_size %d ; %d sent of %d\n",
609 __func__
, fifo_size
, actual
, len
);
612 outb(set
, iobase
+ SSR
);
618 * Function w83977af_dma_xmit_complete (self)
620 * The transfer of a frame in finished. So do the necessary things
624 static void w83977af_dma_xmit_complete(struct w83977af_ir
*self
)
629 pr_debug("%s: %ld\n", __func__
, jiffies
);
631 IRDA_ASSERT(self
, return;);
633 iobase
= self
->io
.fir_base
;
635 /* Save current set */
636 set
= inb(iobase
+ SSR
);
639 switch_bank(iobase
, SET0
);
640 outb(inb(iobase
+ HCR
) & ~HCR_EN_DMA
, iobase
+ HCR
);
642 /* Check for underrun! */
643 if (inb(iobase
+ AUDR
) & AUDR_UNDR
) {
644 pr_debug("%s: Transmit underrun!\n", __func__
);
646 self
->netdev
->stats
.tx_errors
++;
647 self
->netdev
->stats
.tx_fifo_errors
++;
649 /* Clear bit, by writing 1 to it */
650 outb(AUDR_UNDR
, iobase
+ AUDR
);
652 self
->netdev
->stats
.tx_packets
++;
655 if (self
->new_speed
) {
656 w83977af_change_speed(self
, self
->new_speed
);
660 /* Unlock tx_buff and request another frame */
661 /* Tell the network layer, that we want more frames */
662 netif_wake_queue(self
->netdev
);
665 outb(set
, iobase
+ SSR
);
669 * Function w83977af_dma_receive (self)
671 * Get ready for receiving a frame. The device will initiate a DMA
672 * if it starts to receive a frame.
675 static int w83977af_dma_receive(struct w83977af_ir
*self
)
679 #ifdef CONFIG_ARCH_NETWINDER
683 IRDA_ASSERT(self
, return -1;);
685 pr_debug("%s\n", __func__
);
687 iobase
= self
->io
.fir_base
;
689 /* Save current set */
690 set
= inb(iobase
+ SSR
);
693 switch_bank(iobase
, SET0
);
694 outb(inb(iobase
+ HCR
) & ~HCR_EN_DMA
, iobase
+ HCR
);
696 /* Choose DMA Rx, DMA Fairness, and Advanced mode */
697 switch_bank(iobase
, SET2
);
698 outb((inb(iobase
+ ADCR1
) & ~ADCR1_D_CHSW
)/*|ADCR1_DMA_F*/ | ADCR1_ADV_SL
,
701 self
->io
.direction
= IO_RECV
;
702 self
->rx_buff
.data
= self
->rx_buff
.head
;
704 #ifdef CONFIG_ARCH_NETWINDER
705 spin_lock_irqsave(&self
->lock
, flags
);
707 disable_dma(self
->io
.dma
);
708 clear_dma_ff(self
->io
.dma
);
709 set_dma_mode(self
->io
.dma
, DMA_MODE_READ
);
710 set_dma_addr(self
->io
.dma
, self
->rx_buff_dma
);
711 set_dma_count(self
->io
.dma
, self
->rx_buff
.truesize
);
713 irda_setup_dma(self
->io
.dma
, self
->rx_buff_dma
, self
->rx_buff
.truesize
,
717 * Reset Rx FIFO. This will also flush the ST_FIFO, it's very
718 * important that we don't reset the Tx FIFO since it might not
719 * be finished transmitting yet
721 switch_bank(iobase
, SET0
);
722 outb(UFR_RXTL
| UFR_TXTL
| UFR_RXF_RST
| UFR_EN_FIFO
, iobase
+ UFR
);
723 self
->st_fifo
.len
= self
->st_fifo
.tail
= self
->st_fifo
.head
= 0;
726 switch_bank(iobase
, SET0
);
727 #ifdef CONFIG_ARCH_NETWINDER
728 hcr
= inb(iobase
+ HCR
);
729 outb(hcr
| HCR_EN_DMA
, iobase
+ HCR
);
730 enable_dma(self
->io
.dma
);
731 spin_unlock_irqrestore(&self
->lock
, flags
);
733 outb(inb(iobase
+ HCR
) | HCR_EN_DMA
, iobase
+ HCR
);
736 outb(set
, iobase
+ SSR
);
742 * Function w83977af_receive_complete (self)
744 * Finished with receiving a frame
747 static int w83977af_dma_receive_complete(struct w83977af_ir
*self
)
750 struct st_fifo
*st_fifo
;
756 pr_debug("%s\n", __func__
);
758 st_fifo
= &self
->st_fifo
;
760 iobase
= self
->io
.fir_base
;
762 /* Save current set */
763 set
= inb(iobase
+ SSR
);
765 iobase
= self
->io
.fir_base
;
767 /* Read status FIFO */
768 switch_bank(iobase
, SET5
);
769 while ((status
= inb(iobase
+ FS_FO
)) & FS_FO_FSFDR
) {
770 st_fifo
->entries
[st_fifo
->tail
].status
= status
;
772 st_fifo
->entries
[st_fifo
->tail
].len
= inb(iobase
+ RFLFL
);
773 st_fifo
->entries
[st_fifo
->tail
].len
|= inb(iobase
+ RFLFH
) << 8;
779 while (st_fifo
->len
) {
780 /* Get first entry */
781 status
= st_fifo
->entries
[st_fifo
->head
].status
;
782 len
= st_fifo
->entries
[st_fifo
->head
].len
;
786 /* Check for errors */
787 if (status
& FS_FO_ERR_MSK
) {
788 if (status
& FS_FO_LST_FR
) {
789 /* Add number of lost frames to stats */
790 self
->netdev
->stats
.rx_errors
+= len
;
793 self
->netdev
->stats
.rx_errors
++;
795 self
->rx_buff
.data
+= len
;
797 if (status
& FS_FO_MX_LEX
)
798 self
->netdev
->stats
.rx_length_errors
++;
800 if (status
& FS_FO_PHY_ERR
)
801 self
->netdev
->stats
.rx_frame_errors
++;
803 if (status
& FS_FO_CRC_ERR
)
804 self
->netdev
->stats
.rx_crc_errors
++;
806 /* The errors below can be reported in both cases */
807 if (status
& FS_FO_RX_OV
)
808 self
->netdev
->stats
.rx_fifo_errors
++;
810 if (status
& FS_FO_FSF_OV
)
811 self
->netdev
->stats
.rx_fifo_errors
++;
814 /* Check if we have transferred all data to memory */
815 switch_bank(iobase
, SET0
);
816 if (inb(iobase
+ USR
) & USR_RDR
)
817 udelay(80); /* Should be enough!? */
819 skb
= dev_alloc_skb(len
+ 1);
821 pr_info("%s: memory squeeze, dropping frame\n",
823 /* Restore set register */
824 outb(set
, iobase
+ SSR
);
829 /* Align to 20 bytes */
832 /* Copy frame without CRC */
833 if (self
->io
.speed
< 4000000) {
834 skb_put(skb
, len
- 2);
835 skb_copy_to_linear_data(skb
,
839 skb_put(skb
, len
- 4);
840 skb_copy_to_linear_data(skb
,
845 /* Move to next frame */
846 self
->rx_buff
.data
+= len
;
847 self
->netdev
->stats
.rx_packets
++;
849 skb
->dev
= self
->netdev
;
850 skb_reset_mac_header(skb
);
851 skb
->protocol
= htons(ETH_P_IRDA
);
855 /* Restore set register */
856 outb(set
, iobase
+ SSR
);
862 * Function pc87108_pio_receive (self)
864 * Receive all data in receiver FIFO
867 static void w83977af_pio_receive(struct w83977af_ir
*self
)
872 IRDA_ASSERT(self
, return;);
874 iobase
= self
->io
.fir_base
;
876 /* Receive all characters in Rx FIFO */
878 byte
= inb(iobase
+ RBR
);
879 async_unwrap_char(self
->netdev
, &self
->netdev
->stats
, &self
->rx_buff
,
881 } while (inb(iobase
+ USR
) & USR_RDR
); /* Data available */
885 * Function w83977af_sir_interrupt (self, eir)
887 * Handle SIR interrupt
890 static __u8
w83977af_sir_interrupt(struct w83977af_ir
*self
, int isr
)
897 pr_debug("%s: isr=%#x\n", __func__
, isr
);
899 iobase
= self
->io
.fir_base
;
900 /* Transmit FIFO low on data */
901 if (isr
& ISR_TXTH_I
) {
902 /* Write data left in transmit buffer */
903 actual
= w83977af_pio_write(self
->io
.fir_base
,
908 self
->tx_buff
.data
+= actual
;
909 self
->tx_buff
.len
-= actual
;
911 self
->io
.direction
= IO_XMIT
;
913 /* Check if finished */
914 if (self
->tx_buff
.len
> 0) {
915 new_icr
|= ICR_ETXTHI
;
917 set
= inb(iobase
+ SSR
);
918 switch_bank(iobase
, SET0
);
919 outb(AUDR_SFEND
, iobase
+ AUDR
);
920 outb(set
, iobase
+ SSR
);
922 self
->netdev
->stats
.tx_packets
++;
924 /* Feed me more packets */
925 netif_wake_queue(self
->netdev
);
926 new_icr
|= ICR_ETBREI
;
929 /* Check if transmission has completed */
930 if (isr
& ISR_TXEMP_I
) {
931 /* Check if we need to change the speed? */
932 if (self
->new_speed
) {
933 pr_debug("%s: Changing speed!\n", __func__
);
934 w83977af_change_speed(self
, self
->new_speed
);
938 /* Turn around and get ready to receive some data */
939 self
->io
.direction
= IO_RECV
;
940 new_icr
|= ICR_ERBRI
;
943 /* Rx FIFO threshold or timeout */
944 if (isr
& ISR_RXTH_I
) {
945 w83977af_pio_receive(self
);
948 new_icr
|= ICR_ERBRI
;
954 * Function pc87108_fir_interrupt (self, eir)
956 * Handle MIR/FIR interrupt
959 static __u8
w83977af_fir_interrupt(struct w83977af_ir
*self
, int isr
)
965 iobase
= self
->io
.fir_base
;
966 set
= inb(iobase
+ SSR
);
968 /* End of frame detected in FIFO */
969 if (isr
& (ISR_FEND_I
| ISR_FSF_I
)) {
970 if (w83977af_dma_receive_complete(self
)) {
971 /* Wait for next status FIFO interrupt */
972 new_icr
|= ICR_EFSFI
;
974 /* DMA not finished yet */
976 /* Set timer value, resolution 1 ms */
977 switch_bank(iobase
, SET4
);
978 outb(0x01, iobase
+ TMRL
); /* 1 ms */
979 outb(0x00, iobase
+ TMRH
);
982 outb(IR_MSL_EN_TMR
, iobase
+ IR_MSL
);
984 new_icr
|= ICR_ETMRI
;
988 if (isr
& ISR_TMR_I
) {
990 switch_bank(iobase
, SET4
);
991 outb(0, iobase
+ IR_MSL
);
993 /* Clear timer event */
994 /* switch_bank(iobase, SET0); */
995 /* outb(ASCR_CTE, iobase+ASCR); */
997 /* Check if this is a TX timer interrupt */
998 if (self
->io
.direction
== IO_XMIT
) {
999 w83977af_dma_write(self
, iobase
);
1001 new_icr
|= ICR_EDMAI
;
1003 /* Check if DMA has now finished */
1004 w83977af_dma_receive_complete(self
);
1006 new_icr
|= ICR_EFSFI
;
1009 /* Finished with DMA */
1010 if (isr
& ISR_DMA_I
) {
1011 w83977af_dma_xmit_complete(self
);
1013 /* Check if there are more frames to be transmitted */
1014 /* if (irda_device_txqueue_empty(self)) { */
1016 /* Prepare for receive
1018 * ** Netwinder Tx DMA likes that we do this anyway **
1020 w83977af_dma_receive(self
);
1021 new_icr
= ICR_EFSFI
;
1026 outb(set
, iobase
+ SSR
);
1032 * Function w83977af_interrupt (irq, dev_id, regs)
1034 * An interrupt from the chip has arrived. Time to do some work
1037 static irqreturn_t
w83977af_interrupt(int irq
, void *dev_id
)
1039 struct net_device
*dev
= dev_id
;
1040 struct w83977af_ir
*self
;
1044 self
= netdev_priv(dev
);
1046 iobase
= self
->io
.fir_base
;
1048 /* Save current bank */
1049 set
= inb(iobase
+ SSR
);
1050 switch_bank(iobase
, SET0
);
1052 icr
= inb(iobase
+ ICR
);
1053 isr
= inb(iobase
+ ISR
) & icr
; /* Mask out the interesting ones */
1055 outb(0, iobase
+ ICR
); /* Disable interrupts */
1058 /* Dispatch interrupt handler for the current speed */
1059 if (self
->io
.speed
> PIO_MAX_SPEED
)
1060 icr
= w83977af_fir_interrupt(self
, isr
);
1062 icr
= w83977af_sir_interrupt(self
, isr
);
1065 outb(icr
, iobase
+ ICR
); /* Restore (new) interrupts */
1066 outb(set
, iobase
+ SSR
); /* Restore bank register */
1067 return IRQ_RETVAL(isr
);
1071 * Function w83977af_is_receiving (self)
1073 * Return TRUE is we are currently receiving a frame
1076 static int w83977af_is_receiving(struct w83977af_ir
*self
)
1082 IRDA_ASSERT(self
, return FALSE
;);
1084 if (self
->io
.speed
> 115200) {
1085 iobase
= self
->io
.fir_base
;
1087 /* Check if rx FIFO is not empty */
1088 set
= inb(iobase
+ SSR
);
1089 switch_bank(iobase
, SET2
);
1090 if ((inb(iobase
+ RXFDTH
) & 0x3f) != 0) {
1091 /* We are receiving something */
1094 outb(set
, iobase
+ SSR
);
1096 status
= (self
->rx_buff
.state
!= OUTSIDE_FRAME
);
1103 * Function w83977af_net_open (dev)
1108 static int w83977af_net_open(struct net_device
*dev
)
1110 struct w83977af_ir
*self
;
1115 IRDA_ASSERT(dev
, return -1;);
1116 self
= netdev_priv(dev
);
1118 IRDA_ASSERT(self
, return 0;);
1120 iobase
= self
->io
.fir_base
;
1122 if (request_irq(self
->io
.irq
, w83977af_interrupt
, 0, dev
->name
,
1127 * Always allocate the DMA channel after the IRQ,
1128 * and clean up on failure.
1130 if (request_dma(self
->io
.dma
, dev
->name
)) {
1131 free_irq(self
->io
.irq
, dev
);
1135 /* Save current set */
1136 set
= inb(iobase
+ SSR
);
1138 /* Enable some interrupts so we can receive frames again */
1139 switch_bank(iobase
, SET0
);
1140 if (self
->io
.speed
> 115200) {
1141 outb(ICR_EFSFI
, iobase
+ ICR
);
1142 w83977af_dma_receive(self
);
1144 outb(ICR_ERBRI
, iobase
+ ICR
);
1147 /* Restore bank register */
1148 outb(set
, iobase
+ SSR
);
1150 /* Ready to play! */
1151 netif_start_queue(dev
);
1153 /* Give self a hardware name */
1154 sprintf(hwname
, "w83977af @ 0x%03x", self
->io
.fir_base
);
1157 * Open new IrLAP layer instance, now that everything should be
1158 * initialized properly
1160 self
->irlap
= irlap_open(dev
, &self
->qos
, hwname
);
1166 * Function w83977af_net_close (dev)
1171 static int w83977af_net_close(struct net_device
*dev
)
1173 struct w83977af_ir
*self
;
1177 IRDA_ASSERT(dev
, return -1;);
1179 self
= netdev_priv(dev
);
1181 IRDA_ASSERT(self
, return 0;);
1183 iobase
= self
->io
.fir_base
;
1186 netif_stop_queue(dev
);
1188 /* Stop and remove instance of IrLAP */
1190 irlap_close(self
->irlap
);
1193 disable_dma(self
->io
.dma
);
1195 /* Save current set */
1196 set
= inb(iobase
+ SSR
);
1198 /* Disable interrupts */
1199 switch_bank(iobase
, SET0
);
1200 outb(0, iobase
+ ICR
);
1202 free_irq(self
->io
.irq
, dev
);
1203 free_dma(self
->io
.dma
);
1205 /* Restore bank register */
1206 outb(set
, iobase
+ SSR
);
1212 * Function w83977af_net_ioctl (dev, rq, cmd)
1214 * Process IOCTL commands for this device
1217 static int w83977af_net_ioctl(struct net_device
*dev
, struct ifreq
*rq
, int cmd
)
1219 struct if_irda_req
*irq
= (struct if_irda_req
*)rq
;
1220 struct w83977af_ir
*self
;
1221 unsigned long flags
;
1224 IRDA_ASSERT(dev
, return -1;);
1226 self
= netdev_priv(dev
);
1228 IRDA_ASSERT(self
, return -1;);
1230 pr_debug("%s: %s, (cmd=0x%X)\n", __func__
, dev
->name
, cmd
);
1232 spin_lock_irqsave(&self
->lock
, flags
);
1235 case SIOCSBANDWIDTH
: /* Set bandwidth */
1236 if (!capable(CAP_NET_ADMIN
)) {
1240 w83977af_change_speed(self
, irq
->ifr_baudrate
);
1242 case SIOCSMEDIABUSY
: /* Set media busy */
1243 if (!capable(CAP_NET_ADMIN
)) {
1247 irda_device_set_media_busy(self
->netdev
, TRUE
);
1249 case SIOCGRECEIVING
: /* Check if we are receiving right now */
1250 irq
->ifr_receiving
= w83977af_is_receiving(self
);
1256 spin_unlock_irqrestore(&self
->lock
, flags
);
1260 MODULE_AUTHOR("Dag Brattli <dagb@cs.uit.no>");
1261 MODULE_DESCRIPTION("Winbond W83977AF IrDA Device Driver");
1262 MODULE_LICENSE("GPL");
1264 module_param(qos_mtt_bits
, int, 0);
1265 MODULE_PARM_DESC(qos_mtt_bits
, "Mimimum Turn Time");
1266 module_param_array(io
, int, NULL
, 0);
1267 MODULE_PARM_DESC(io
, "Base I/O addresses");
1268 module_param_array(irq
, int, NULL
, 0);
1269 MODULE_PARM_DESC(irq
, "IRQ lines");
1272 * Function init_module (void)
1277 module_init(w83977af_init
);
1280 * Function cleanup_module (void)
1285 module_exit(w83977af_cleanup
);