2 * linux/drivers/net/irda/pxaficp_ir.c
4 * Based on sa1100_ir.c by Russell King
6 * Changes copyright (C) 2003-2005 MontaVista Software, Inc.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
15 #include <linux/dma-mapping.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/platform_device.h>
21 #include <linux/clk.h>
22 #include <linux/dmaengine.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dma/pxa-dma.h>
25 #include <linux/gpio.h>
26 #include <linux/slab.h>
28 #include <net/irda/irda.h>
29 #include <net/irda/irmod.h>
30 #include <net/irda/wrapper.h>
31 #include <net/irda/irda_device.h>
33 #include <linux/platform_data/irda-pxaficp.h>
35 #define __REG(x) ((x) & 0xffff)
36 #include <mach/regs-uart.h>
38 #define ICCR0 0x0000 /* ICP Control Register 0 */
39 #define ICCR1 0x0004 /* ICP Control Register 1 */
40 #define ICCR2 0x0008 /* ICP Control Register 2 */
41 #define ICDR 0x000c /* ICP Data Register */
42 #define ICSR0 0x0014 /* ICP Status Register 0 */
43 #define ICSR1 0x0018 /* ICP Status Register 1 */
45 #define ICCR0_AME (1 << 7) /* Address match enable */
46 #define ICCR0_TIE (1 << 6) /* Transmit FIFO interrupt enable */
47 #define ICCR0_RIE (1 << 5) /* Receive FIFO interrupt enable */
48 #define ICCR0_RXE (1 << 4) /* Receive enable */
49 #define ICCR0_TXE (1 << 3) /* Transmit enable */
50 #define ICCR0_TUS (1 << 2) /* Transmit FIFO underrun select */
51 #define ICCR0_LBM (1 << 1) /* Loopback mode */
52 #define ICCR0_ITR (1 << 0) /* IrDA transmission */
54 #define ICCR2_RXP (1 << 3) /* Receive Pin Polarity select */
55 #define ICCR2_TXP (1 << 2) /* Transmit Pin Polarity select */
56 #define ICCR2_TRIG (3 << 0) /* Receive FIFO Trigger threshold */
57 #define ICCR2_TRIG_8 (0 << 0) /* >= 8 bytes */
58 #define ICCR2_TRIG_16 (1 << 0) /* >= 16 bytes */
59 #define ICCR2_TRIG_32 (2 << 0) /* >= 32 bytes */
61 #define ICSR0_EOC (1 << 6) /* DMA End of Descriptor Chain */
62 #define ICSR0_FRE (1 << 5) /* Framing error */
63 #define ICSR0_RFS (1 << 4) /* Receive FIFO service request */
64 #define ICSR0_TFS (1 << 3) /* Transnit FIFO service request */
65 #define ICSR0_RAB (1 << 2) /* Receiver abort */
66 #define ICSR0_TUR (1 << 1) /* Trunsmit FIFO underun */
67 #define ICSR0_EIF (1 << 0) /* End/Error in FIFO */
69 #define ICSR1_ROR (1 << 6) /* Receiver FIFO underrun */
70 #define ICSR1_CRE (1 << 5) /* CRC error */
71 #define ICSR1_EOF (1 << 4) /* End of frame */
72 #define ICSR1_TNF (1 << 3) /* Transmit FIFO not full */
73 #define ICSR1_RNE (1 << 2) /* Receive FIFO not empty */
74 #define ICSR1_TBY (1 << 1) /* Tramsmiter busy flag */
75 #define ICSR1_RSY (1 << 0) /* Recevier synchronized flag */
77 #define IrSR_RXPL_NEG_IS_ZERO (1<<4)
78 #define IrSR_RXPL_POS_IS_ZERO 0x0
79 #define IrSR_TXPL_NEG_IS_ZERO (1<<3)
80 #define IrSR_TXPL_POS_IS_ZERO 0x0
81 #define IrSR_XMODE_PULSE_1_6 (1<<2)
82 #define IrSR_XMODE_PULSE_3_16 0x0
83 #define IrSR_RCVEIR_IR_MODE (1<<1)
84 #define IrSR_RCVEIR_UART_MODE 0x0
85 #define IrSR_XMITIR_IR_MODE (1<<0)
86 #define IrSR_XMITIR_UART_MODE 0x0
88 #define IrSR_IR_RECEIVE_ON (\
89 IrSR_RXPL_NEG_IS_ZERO | \
90 IrSR_TXPL_POS_IS_ZERO | \
91 IrSR_XMODE_PULSE_3_16 | \
92 IrSR_RCVEIR_IR_MODE | \
93 IrSR_XMITIR_UART_MODE)
95 #define IrSR_IR_TRANSMIT_ON (\
96 IrSR_RXPL_NEG_IS_ZERO | \
97 IrSR_TXPL_POS_IS_ZERO | \
98 IrSR_XMODE_PULSE_3_16 | \
99 IrSR_RCVEIR_UART_MODE | \
102 /* macros for registers read/write */
103 #define ficp_writel(irda, val, off) \
105 dev_vdbg(irda->dev, \
106 "%s():%d ficp_writel(0x%x, %s)\n", \
107 __func__, __LINE__, (val), #off); \
108 writel_relaxed((val), (irda)->irda_base + (off)); \
111 #define ficp_readl(irda, off) \
114 _v = readl_relaxed((irda)->irda_base + (off)); \
115 dev_vdbg(irda->dev, \
116 "%s():%d ficp_readl(%s): 0x%x\n", \
117 __func__, __LINE__, #off, _v); \
121 #define stuart_writel(irda, val, off) \
123 dev_vdbg(irda->dev, \
124 "%s():%d stuart_writel(0x%x, %s)\n", \
125 __func__, __LINE__, (val), #off); \
126 writel_relaxed((val), (irda)->stuart_base + (off)); \
129 #define stuart_readl(irda, off) \
132 _v = readl_relaxed((irda)->stuart_base + (off)); \
133 dev_vdbg(irda->dev, \
134 "%s():%d stuart_readl(%s): 0x%x\n", \
135 __func__, __LINE__, #off, _v); \
142 unsigned long long last_clk
;
144 void __iomem
*stuart_base
;
145 void __iomem
*irda_base
;
146 unsigned char *dma_rx_buff
;
147 unsigned char *dma_tx_buff
;
148 dma_addr_t dma_rx_buff_phy
;
149 dma_addr_t dma_tx_buff_phy
;
150 unsigned int dma_tx_buff_len
;
151 struct dma_chan
*txdma
;
152 struct dma_chan
*rxdma
;
153 dma_cookie_t rx_cookie
;
154 dma_cookie_t tx_cookie
;
161 struct irlap_cb
*irlap
;
168 struct pxaficp_platform_data
*pdata
;
174 static int pxa_irda_set_speed(struct pxa_irda
*si
, int speed
);
176 static inline void pxa_irda_disable_clk(struct pxa_irda
*si
)
179 clk_disable_unprepare(si
->cur_clk
);
183 static inline void pxa_irda_enable_firclk(struct pxa_irda
*si
)
185 si
->cur_clk
= si
->fir_clk
;
186 clk_prepare_enable(si
->fir_clk
);
189 static inline void pxa_irda_enable_sirclk(struct pxa_irda
*si
)
191 si
->cur_clk
= si
->sir_clk
;
192 clk_prepare_enable(si
->sir_clk
);
196 #define IS_FIR(si) ((si)->speed >= 4000000)
197 #define IRDA_FRAME_SIZE_LIMIT 2047
199 static void pxa_irda_fir_dma_rx_irq(void *data
);
200 static void pxa_irda_fir_dma_tx_irq(void *data
);
202 inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda
*si
)
204 struct dma_async_tx_descriptor
*tx
;
206 tx
= dmaengine_prep_slave_single(si
->rxdma
, si
->dma_rx_buff_phy
,
207 IRDA_FRAME_SIZE_LIMIT
, DMA_FROM_DEVICE
,
210 dev_err(si
->dev
, "prep_slave_sg() failed\n");
213 tx
->callback
= pxa_irda_fir_dma_rx_irq
;
214 tx
->callback_param
= si
;
215 si
->rx_cookie
= dmaengine_submit(tx
);
216 dma_async_issue_pending(si
->rxdma
);
219 inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda
*si
)
221 struct dma_async_tx_descriptor
*tx
;
223 tx
= dmaengine_prep_slave_single(si
->txdma
, si
->dma_tx_buff_phy
,
224 si
->dma_tx_buff_len
, DMA_TO_DEVICE
,
227 dev_err(si
->dev
, "prep_slave_sg() failed\n");
230 tx
->callback
= pxa_irda_fir_dma_tx_irq
;
231 tx
->callback_param
= si
;
232 si
->tx_cookie
= dmaengine_submit(tx
);
233 dma_async_issue_pending(si
->rxdma
);
237 * Set the IrDA communications mode.
239 static void pxa_irda_set_mode(struct pxa_irda
*si
, int mode
)
241 if (si
->pdata
->transceiver_mode
)
242 si
->pdata
->transceiver_mode(si
->dev
, mode
);
244 if (gpio_is_valid(si
->pdata
->gpio_pwdown
))
245 gpio_set_value(si
->pdata
->gpio_pwdown
,
247 !si
->pdata
->gpio_pwdown_inverted
);
248 pxa2xx_transceiver_mode(si
->dev
, mode
);
253 * Set the IrDA communications speed.
255 static int pxa_irda_set_speed(struct pxa_irda
*si
, int speed
)
258 unsigned int divisor
;
261 case 9600: case 19200: case 38400:
262 case 57600: case 115200:
264 /* refer to PXA250/210 Developer's Manual 10-7 */
265 /* BaudRate = 14.7456 MHz / (16*Divisor) */
266 divisor
= 14745600 / (16 * speed
);
268 local_irq_save(flags
);
272 dmaengine_terminate_all(si
->rxdma
);
274 ficp_writel(si
, 0, ICCR0
);
275 pxa_irda_disable_clk(si
);
277 /* set board transceiver to SIR mode */
278 pxa_irda_set_mode(si
, IR_SIRMODE
);
280 /* enable the STUART clock */
281 pxa_irda_enable_sirclk(si
);
284 /* disable STUART first */
285 stuart_writel(si
, 0, STIER
);
287 /* access DLL & DLH */
288 stuart_writel(si
, stuart_readl(si
, STLCR
) | LCR_DLAB
, STLCR
);
289 stuart_writel(si
, divisor
& 0xff, STDLL
);
290 stuart_writel(si
, divisor
>> 8, STDLH
);
291 stuart_writel(si
, stuart_readl(si
, STLCR
) & ~LCR_DLAB
, STLCR
);
294 stuart_writel(si
, IrSR_IR_RECEIVE_ON
| IrSR_XMODE_PULSE_1_6
,
296 stuart_writel(si
, IER_UUE
| IER_RLSE
| IER_RAVIE
| IER_RTIOE
,
299 local_irq_restore(flags
);
303 local_irq_save(flags
);
306 stuart_writel(si
, 0, STIER
);
307 stuart_writel(si
, 0, STISR
);
308 pxa_irda_disable_clk(si
);
310 /* disable FICP first */
311 ficp_writel(si
, 0, ICCR0
);
313 /* set board transceiver to FIR mode */
314 pxa_irda_set_mode(si
, IR_FIRMODE
);
316 /* enable the FICP clock */
317 pxa_irda_enable_firclk(si
);
320 pxa_irda_fir_dma_rx_start(si
);
321 ficp_writel(si
, ICCR0_ITR
| ICCR0_RXE
, ICCR0
);
323 local_irq_restore(flags
);
333 /* SIR interrupt service routine. */
334 static irqreturn_t
pxa_irda_sir_irq(int irq
, void *dev_id
)
336 struct net_device
*dev
= dev_id
;
337 struct pxa_irda
*si
= netdev_priv(dev
);
340 iir
= stuart_readl(si
, STIIR
);
342 switch (iir
& 0x0F) {
343 case 0x06: /* Receiver Line Status */
344 lsr
= stuart_readl(si
, STLSR
);
345 while (lsr
& LSR_FIFOE
) {
346 data
= stuart_readl(si
, STRBR
);
347 if (lsr
& (LSR_OE
| LSR_PE
| LSR_FE
| LSR_BI
)) {
348 printk(KERN_DEBUG
"pxa_ir: sir receiving error\n");
349 dev
->stats
.rx_errors
++;
351 dev
->stats
.rx_frame_errors
++;
353 dev
->stats
.rx_fifo_errors
++;
355 dev
->stats
.rx_bytes
++;
356 async_unwrap_char(dev
, &dev
->stats
,
359 lsr
= stuart_readl(si
, STLSR
);
361 si
->last_clk
= sched_clock();
364 case 0x04: /* Received Data Available */
367 case 0x0C: /* Character Timeout Indication */
369 dev
->stats
.rx_bytes
++;
370 async_unwrap_char(dev
, &dev
->stats
, &si
->rx_buff
,
371 stuart_readl(si
, STRBR
));
372 } while (stuart_readl(si
, STLSR
) & LSR_DR
);
373 si
->last_clk
= sched_clock();
376 case 0x02: /* Transmit FIFO Data Request */
377 while ((si
->tx_buff
.len
) &&
378 (stuart_readl(si
, STLSR
) & LSR_TDRQ
)) {
379 stuart_writel(si
, *si
->tx_buff
.data
++, STTHR
);
380 si
->tx_buff
.len
-= 1;
383 if (si
->tx_buff
.len
== 0) {
384 dev
->stats
.tx_packets
++;
385 dev
->stats
.tx_bytes
+= si
->tx_buff
.data
- si
->tx_buff
.head
;
387 /* We need to ensure that the transmitter has finished. */
388 while ((stuart_readl(si
, STLSR
) & LSR_TEMT
) == 0)
390 si
->last_clk
= sched_clock();
393 * Ok, we've finished transmitting. Now enable
394 * the receiver. Sometimes we get a receive IRQ
395 * immediately after a transmit...
398 pxa_irda_set_speed(si
, si
->newspeed
);
401 /* enable IR Receiver, disable IR Transmitter */
402 stuart_writel(si
, IrSR_IR_RECEIVE_ON
|
403 IrSR_XMODE_PULSE_1_6
, STISR
);
404 /* enable STUART and receive interrupts */
405 stuart_writel(si
, IER_UUE
| IER_RLSE
|
406 IER_RAVIE
| IER_RTIOE
, STIER
);
409 netif_wake_queue(dev
);
417 /* FIR Receive DMA interrupt handler */
418 static void pxa_irda_fir_dma_rx_irq(void *data
)
420 struct net_device
*dev
= data
;
421 struct pxa_irda
*si
= netdev_priv(dev
);
423 dmaengine_terminate_all(si
->rxdma
);
424 netdev_dbg(dev
, "pxa_ir: fir rx dma bus error\n");
427 /* FIR Transmit DMA interrupt handler */
428 static void pxa_irda_fir_dma_tx_irq(void *data
)
430 struct net_device
*dev
= data
;
431 struct pxa_irda
*si
= netdev_priv(dev
);
433 dmaengine_terminate_all(si
->txdma
);
434 if (dmaengine_tx_status(si
->txdma
, si
->tx_cookie
, NULL
) == DMA_ERROR
) {
435 dev
->stats
.tx_errors
++;
437 dev
->stats
.tx_packets
++;
438 dev
->stats
.tx_bytes
+= si
->dma_tx_buff_len
;
441 while (ficp_readl(si
, ICSR1
) & ICSR1_TBY
)
443 si
->last_clk
= sched_clock();
446 * HACK: It looks like the TBY bit is dropped too soon.
447 * Without this delay things break.
452 pxa_irda_set_speed(si
, si
->newspeed
);
457 ficp_writel(si
, 0, ICCR0
);
458 pxa_irda_fir_dma_rx_start(si
);
459 while ((ficp_readl(si
, ICSR1
) & ICSR1_RNE
) && i
--)
460 ficp_readl(si
, ICDR
);
461 ficp_writel(si
, ICCR0_ITR
| ICCR0_RXE
, ICCR0
);
464 printk(KERN_ERR
"pxa_ir: cannot clear Rx FIFO!\n");
466 netif_wake_queue(dev
);
469 /* EIF(Error in FIFO/End in Frame) handler for FIR */
470 static void pxa_irda_fir_irq_eif(struct pxa_irda
*si
, struct net_device
*dev
, int icsr0
)
472 unsigned int len
, stat
, data
;
473 struct dma_tx_state state
;
475 /* Get the current data position. */
477 dmaengine_tx_status(si
->rxdma
, si
->rx_cookie
, &state
);
478 len
= IRDA_FRAME_SIZE_LIMIT
- state
.residue
;
481 /* Read Status, and then Data. */
482 stat
= ficp_readl(si
, ICSR1
);
484 data
= ficp_readl(si
, ICDR
);
486 if (stat
& (ICSR1_CRE
| ICSR1_ROR
)) {
487 dev
->stats
.rx_errors
++;
488 if (stat
& ICSR1_CRE
) {
489 printk(KERN_DEBUG
"pxa_ir: fir receive CRC error\n");
490 dev
->stats
.rx_crc_errors
++;
492 if (stat
& ICSR1_ROR
) {
493 printk(KERN_DEBUG
"pxa_ir: fir receive overrun\n");
494 dev
->stats
.rx_over_errors
++;
497 si
->dma_rx_buff
[len
++] = data
;
499 /* If we hit the end of frame, there's no point in continuing. */
500 if (stat
& ICSR1_EOF
)
502 } while (ficp_readl(si
, ICSR0
) & ICSR0_EIF
);
504 if (stat
& ICSR1_EOF
) {
508 if (icsr0
& ICSR0_FRE
) {
509 printk(KERN_ERR
"pxa_ir: dropping erroneous frame\n");
510 dev
->stats
.rx_dropped
++;
514 skb
= alloc_skb(len
+1,GFP_ATOMIC
);
516 printk(KERN_ERR
"pxa_ir: fir out of memory for receive skb\n");
517 dev
->stats
.rx_dropped
++;
521 /* Align IP header to 20 bytes */
523 skb_copy_to_linear_data(skb
, si
->dma_rx_buff
, len
);
526 /* Feed it to IrLAP */
528 skb_reset_mac_header(skb
);
529 skb
->protocol
= htons(ETH_P_IRDA
);
532 dev
->stats
.rx_packets
++;
533 dev
->stats
.rx_bytes
+= len
;
537 /* FIR interrupt handler */
538 static irqreturn_t
pxa_irda_fir_irq(int irq
, void *dev_id
)
540 struct net_device
*dev
= dev_id
;
541 struct pxa_irda
*si
= netdev_priv(dev
);
545 dmaengine_terminate_all(si
->rxdma
);
546 si
->last_clk
= sched_clock();
547 icsr0
= ficp_readl(si
, ICSR0
);
549 if (icsr0
& (ICSR0_FRE
| ICSR0_RAB
)) {
550 if (icsr0
& ICSR0_FRE
) {
551 printk(KERN_DEBUG
"pxa_ir: fir receive frame error\n");
552 dev
->stats
.rx_frame_errors
++;
554 printk(KERN_DEBUG
"pxa_ir: fir receive abort\n");
555 dev
->stats
.rx_errors
++;
557 ficp_writel(si
, icsr0
& (ICSR0_FRE
| ICSR0_RAB
), ICSR0
);
560 if (icsr0
& ICSR0_EIF
) {
561 /* An error in FIFO occurred, or there is a end of frame */
562 pxa_irda_fir_irq_eif(si
, dev
, icsr0
);
565 ficp_writel(si
, 0, ICCR0
);
566 pxa_irda_fir_dma_rx_start(si
);
567 while ((ficp_readl(si
, ICSR1
) & ICSR1_RNE
) && i
--)
568 ficp_readl(si
, ICDR
);
569 ficp_writel(si
, ICCR0_ITR
| ICCR0_RXE
, ICCR0
);
572 printk(KERN_ERR
"pxa_ir: cannot clear Rx FIFO!\n");
577 /* hard_xmit interface of irda device */
578 static int pxa_irda_hard_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
580 struct pxa_irda
*si
= netdev_priv(dev
);
581 int speed
= irda_get_next_speed(skb
);
584 * Does this packet contain a request to change the interface
585 * speed? If so, remember it until we complete the transmission
588 if (speed
!= si
->speed
&& speed
!= -1)
589 si
->newspeed
= speed
;
592 * If this is an empty frame, we can bypass a lot.
597 pxa_irda_set_speed(si
, speed
);
603 netif_stop_queue(dev
);
606 si
->tx_buff
.data
= si
->tx_buff
.head
;
607 si
->tx_buff
.len
= async_wrap_skb(skb
, si
->tx_buff
.data
, si
->tx_buff
.truesize
);
609 /* Disable STUART interrupts and switch to transmit mode. */
610 stuart_writel(si
, 0, STIER
);
611 stuart_writel(si
, IrSR_IR_TRANSMIT_ON
| IrSR_XMODE_PULSE_1_6
,
614 /* enable STUART and transmit interrupts */
615 stuart_writel(si
, IER_UUE
| IER_TIE
, STIER
);
617 unsigned long mtt
= irda_get_mtt(skb
);
619 si
->dma_tx_buff_len
= skb
->len
;
620 skb_copy_from_linear_data(skb
, si
->dma_tx_buff
, skb
->len
);
623 while ((sched_clock() - si
->last_clk
) * 1000 < mtt
)
626 /* stop RX DMA, disable FICP */
627 dmaengine_terminate_all(si
->rxdma
);
628 ficp_writel(si
, 0, ICCR0
);
630 pxa_irda_fir_dma_tx_start(si
);
631 ficp_writel(si
, ICCR0_ITR
| ICCR0_TXE
, ICCR0
);
638 static int pxa_irda_ioctl(struct net_device
*dev
, struct ifreq
*ifreq
, int cmd
)
640 struct if_irda_req
*rq
= (struct if_irda_req
*)ifreq
;
641 struct pxa_irda
*si
= netdev_priv(dev
);
647 if (capable(CAP_NET_ADMIN
)) {
649 * We are unable to set the speed if the
650 * device is not running.
652 if (netif_running(dev
)) {
653 ret
= pxa_irda_set_speed(si
,
656 printk(KERN_INFO
"pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
664 if (capable(CAP_NET_ADMIN
)) {
665 irda_device_set_media_busy(dev
, TRUE
);
672 rq
->ifr_receiving
= IS_FIR(si
) ? 0
673 : si
->rx_buff
.state
!= OUTSIDE_FRAME
;
684 static void pxa_irda_startup(struct pxa_irda
*si
)
686 /* Disable STUART interrupts */
687 stuart_writel(si
, 0, STIER
);
688 /* enable STUART interrupt to the processor */
689 stuart_writel(si
, MCR_OUT2
, STMCR
);
690 /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
691 stuart_writel(si
, LCR_WLS0
| LCR_WLS1
, STLCR
);
692 /* enable FIFO, we use FIFO to improve performance */
693 stuart_writel(si
, FCR_TRFIFOE
| FCR_ITL_32
, STFCR
);
696 ficp_writel(si
, 0, ICCR0
);
697 /* configure FICP ICCR2 */
698 ficp_writel(si
, ICCR2_TXP
| ICCR2_TRIG_32
, ICCR2
);
700 /* force SIR reinitialization */
702 pxa_irda_set_speed(si
, 9600);
704 printk(KERN_DEBUG
"pxa_ir: irda startup\n");
707 static void pxa_irda_shutdown(struct pxa_irda
*si
)
711 local_irq_save(flags
);
713 /* disable STUART and interrupt */
714 stuart_writel(si
, 0, STIER
);
715 /* disable STUART SIR mode */
716 stuart_writel(si
, 0, STISR
);
719 dmaengine_terminate_all(si
->rxdma
);
720 dmaengine_terminate_all(si
->txdma
);
722 ficp_writel(si
, 0, ICCR0
);
724 /* disable the STUART or FICP clocks */
725 pxa_irda_disable_clk(si
);
727 local_irq_restore(flags
);
729 /* power off board transceiver */
730 pxa_irda_set_mode(si
, IR_OFF
);
732 printk(KERN_DEBUG
"pxa_ir: irda shutdown\n");
735 static int pxa_irda_start(struct net_device
*dev
)
737 struct pxa_irda
*si
= netdev_priv(dev
);
739 struct dma_slave_config config
;
740 struct pxad_param param
;
745 err
= request_irq(si
->uart_irq
, pxa_irda_sir_irq
, 0, dev
->name
, dev
);
749 err
= request_irq(si
->icp_irq
, pxa_irda_fir_irq
, 0, dev
->name
, dev
);
754 * The interrupt must remain disabled for now.
756 disable_irq(si
->uart_irq
);
757 disable_irq(si
->icp_irq
);
761 dma_cap_set(DMA_SLAVE
, mask
);
762 param
.prio
= PXAD_PRIO_LOWEST
;
764 memset(&config
, 0, sizeof(config
));
765 config
.src_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
766 config
.dst_addr_width
= DMA_SLAVE_BUSWIDTH_1_BYTE
;
767 config
.src_addr
= (dma_addr_t
)si
->irda_base
+ ICDR
;
768 config
.dst_addr
= (dma_addr_t
)si
->irda_base
+ ICDR
;
769 config
.src_maxburst
= 32;
770 config
.dst_maxburst
= 32;
772 param
.drcmr
= si
->drcmr_rx
;
773 si
->rxdma
= dma_request_slave_channel_compat(mask
, pxad_filter_fn
,
774 ¶m
, &dev
->dev
, "rx");
778 param
.drcmr
= si
->drcmr_tx
;
779 si
->txdma
= dma_request_slave_channel_compat(mask
, pxad_filter_fn
,
780 ¶m
, &dev
->dev
, "tx");
784 err
= dmaengine_slave_config(si
->rxdma
, &config
);
786 goto err_dma_rx_buff
;
787 err
= dmaengine_slave_config(si
->txdma
, &config
);
789 goto err_dma_rx_buff
;
792 si
->dma_rx_buff
= dma_alloc_coherent(si
->dev
, IRDA_FRAME_SIZE_LIMIT
,
793 &si
->dma_rx_buff_phy
, GFP_KERNEL
);
794 if (!si
->dma_rx_buff
)
795 goto err_dma_rx_buff
;
797 si
->dma_tx_buff
= dma_alloc_coherent(si
->dev
, IRDA_FRAME_SIZE_LIMIT
,
798 &si
->dma_tx_buff_phy
, GFP_KERNEL
);
799 if (!si
->dma_tx_buff
)
800 goto err_dma_tx_buff
;
802 /* Setup the serial port for the initial speed. */
803 pxa_irda_startup(si
);
806 * Open a new IrLAP layer instance.
808 si
->irlap
= irlap_open(dev
, &si
->qos
, "pxa");
814 * Now enable the interrupt and start the queue
816 enable_irq(si
->uart_irq
);
817 enable_irq(si
->icp_irq
);
818 netif_start_queue(dev
);
820 printk(KERN_DEBUG
"pxa_ir: irda driver opened\n");
825 pxa_irda_shutdown(si
);
826 dma_free_coherent(si
->dev
, IRDA_FRAME_SIZE_LIMIT
, si
->dma_tx_buff
, si
->dma_tx_buff_phy
);
828 dma_free_coherent(si
->dev
, IRDA_FRAME_SIZE_LIMIT
, si
->dma_rx_buff
, si
->dma_rx_buff_phy
);
830 dma_release_channel(si
->txdma
);
832 dma_release_channel(si
->rxdma
);
834 free_irq(si
->icp_irq
, dev
);
836 free_irq(si
->uart_irq
, dev
);
842 static int pxa_irda_stop(struct net_device
*dev
)
844 struct pxa_irda
*si
= netdev_priv(dev
);
846 netif_stop_queue(dev
);
848 pxa_irda_shutdown(si
);
852 irlap_close(si
->irlap
);
856 free_irq(si
->uart_irq
, dev
);
857 free_irq(si
->icp_irq
, dev
);
859 dmaengine_terminate_all(si
->rxdma
);
860 dmaengine_terminate_all(si
->txdma
);
861 dma_release_channel(si
->rxdma
);
862 dma_release_channel(si
->txdma
);
865 dma_free_coherent(si
->dev
, IRDA_FRAME_SIZE_LIMIT
, si
->dma_tx_buff
, si
->dma_tx_buff_phy
);
867 dma_free_coherent(si
->dev
, IRDA_FRAME_SIZE_LIMIT
, si
->dma_rx_buff
, si
->dma_rx_buff_phy
);
869 printk(KERN_DEBUG
"pxa_ir: irda driver closed\n");
873 static int pxa_irda_suspend(struct platform_device
*_dev
, pm_message_t state
)
875 struct net_device
*dev
= platform_get_drvdata(_dev
);
878 if (dev
&& netif_running(dev
)) {
879 si
= netdev_priv(dev
);
880 netif_device_detach(dev
);
881 pxa_irda_shutdown(si
);
887 static int pxa_irda_resume(struct platform_device
*_dev
)
889 struct net_device
*dev
= platform_get_drvdata(_dev
);
892 if (dev
&& netif_running(dev
)) {
893 si
= netdev_priv(dev
);
894 pxa_irda_startup(si
);
895 netif_device_attach(dev
);
896 netif_wake_queue(dev
);
903 static int pxa_irda_init_iobuf(iobuff_t
*io
, int size
)
905 io
->head
= kmalloc(size
, GFP_KERNEL
| GFP_DMA
);
906 if (io
->head
!= NULL
) {
908 io
->in_frame
= FALSE
;
909 io
->state
= OUTSIDE_FRAME
;
912 return io
->head
? 0 : -ENOMEM
;
915 static const struct net_device_ops pxa_irda_netdev_ops
= {
916 .ndo_open
= pxa_irda_start
,
917 .ndo_stop
= pxa_irda_stop
,
918 .ndo_start_xmit
= pxa_irda_hard_xmit
,
919 .ndo_do_ioctl
= pxa_irda_ioctl
,
922 static int pxa_irda_probe(struct platform_device
*pdev
)
924 struct net_device
*dev
;
925 struct resource
*res
;
927 void __iomem
*ficp
, *stuart
;
928 unsigned int baudrate_mask
;
931 if (!pdev
->dev
.platform_data
)
934 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
935 ficp
= devm_ioremap_resource(&pdev
->dev
, res
);
937 dev_err(&pdev
->dev
, "resource ficp not defined\n");
938 return PTR_ERR(ficp
);
941 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 1);
942 stuart
= devm_ioremap_resource(&pdev
->dev
, res
);
943 if (IS_ERR(stuart
)) {
944 dev_err(&pdev
->dev
, "resource stuart not defined\n");
945 return PTR_ERR(stuart
);
948 dev
= alloc_irdadev(sizeof(struct pxa_irda
));
954 SET_NETDEV_DEV(dev
, &pdev
->dev
);
955 si
= netdev_priv(dev
);
956 si
->dev
= &pdev
->dev
;
957 si
->pdata
= pdev
->dev
.platform_data
;
959 si
->irda_base
= ficp
;
960 si
->stuart_base
= stuart
;
961 si
->uart_irq
= platform_get_irq(pdev
, 0);
962 si
->icp_irq
= platform_get_irq(pdev
, 1);
964 si
->sir_clk
= devm_clk_get(&pdev
->dev
, "UARTCLK");
965 si
->fir_clk
= devm_clk_get(&pdev
->dev
, "FICPCLK");
966 if (IS_ERR(si
->sir_clk
) || IS_ERR(si
->fir_clk
)) {
967 err
= PTR_ERR(IS_ERR(si
->sir_clk
) ? si
->sir_clk
: si
->fir_clk
);
971 res
= platform_get_resource(pdev
, IORESOURCE_DMA
, 0);
973 si
->drcmr_rx
= res
->start
;
974 res
= platform_get_resource(pdev
, IORESOURCE_DMA
, 1);
976 si
->drcmr_tx
= res
->start
;
979 * Initialise the SIR buffers
981 err
= pxa_irda_init_iobuf(&si
->rx_buff
, 14384);
984 err
= pxa_irda_init_iobuf(&si
->tx_buff
, 4000);
988 if (gpio_is_valid(si
->pdata
->gpio_pwdown
)) {
989 err
= gpio_request(si
->pdata
->gpio_pwdown
, "IrDA switch");
992 err
= gpio_direction_output(si
->pdata
->gpio_pwdown
,
993 !si
->pdata
->gpio_pwdown_inverted
);
995 gpio_free(si
->pdata
->gpio_pwdown
);
1000 if (si
->pdata
->startup
) {
1001 err
= si
->pdata
->startup(si
->dev
);
1006 if (gpio_is_valid(si
->pdata
->gpio_pwdown
) && si
->pdata
->startup
)
1007 dev_warn(si
->dev
, "gpio_pwdown and startup() both defined!\n");
1009 dev
->netdev_ops
= &pxa_irda_netdev_ops
;
1011 irda_init_max_qos_capabilies(&si
->qos
);
1014 if (si
->pdata
->transceiver_cap
& IR_SIRMODE
)
1015 baudrate_mask
|= IR_9600
|IR_19200
|IR_38400
|IR_57600
|IR_115200
;
1016 if (si
->pdata
->transceiver_cap
& IR_FIRMODE
)
1017 baudrate_mask
|= IR_4000000
<< 8;
1019 si
->qos
.baud_rate
.bits
&= baudrate_mask
;
1020 si
->qos
.min_turn_time
.bits
= 7; /* 1ms or more */
1022 irda_qos_bits_to_value(&si
->qos
);
1024 err
= register_netdev(dev
);
1027 platform_set_drvdata(pdev
, dev
);
1030 if (si
->pdata
->shutdown
)
1031 si
->pdata
->shutdown(si
->dev
);
1033 kfree(si
->tx_buff
.head
);
1035 kfree(si
->rx_buff
.head
);
1043 static int pxa_irda_remove(struct platform_device
*_dev
)
1045 struct net_device
*dev
= platform_get_drvdata(_dev
);
1048 struct pxa_irda
*si
= netdev_priv(dev
);
1049 unregister_netdev(dev
);
1050 if (gpio_is_valid(si
->pdata
->gpio_pwdown
))
1051 gpio_free(si
->pdata
->gpio_pwdown
);
1052 if (si
->pdata
->shutdown
)
1053 si
->pdata
->shutdown(si
->dev
);
1054 kfree(si
->tx_buff
.head
);
1055 kfree(si
->rx_buff
.head
);
1062 static struct platform_driver pxa_ir_driver
= {
1064 .name
= "pxa2xx-ir",
1066 .probe
= pxa_irda_probe
,
1067 .remove
= pxa_irda_remove
,
1068 .suspend
= pxa_irda_suspend
,
1069 .resume
= pxa_irda_resume
,
1072 module_platform_driver(pxa_ir_driver
);
1074 MODULE_LICENSE("GPL");
1075 MODULE_ALIAS("platform:pxa2xx-ir");