Merge tag 'chrome-platform-for-linus-4.13' of git://git.kernel.org/pub/scm/linux...
[linux/fpc-iii.git] / drivers / net / irda / pxaficp_ir.c
blob1dba16bc7f8d7781be68fa4cb39ca41422b0625b
1 /*
2 * linux/drivers/net/irda/pxaficp_ir.c
4 * Based on sa1100_ir.c by Russell King
6 * Changes copyright (C) 2003-2005 MontaVista Software, Inc.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
15 #include <linux/dma-mapping.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/netdevice.h>
19 #include <linux/etherdevice.h>
20 #include <linux/platform_device.h>
21 #include <linux/clk.h>
22 #include <linux/dmaengine.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dma/pxa-dma.h>
25 #include <linux/gpio.h>
26 #include <linux/slab.h>
27 #include <linux/sched/clock.h>
29 #include <net/irda/irda.h>
30 #include <net/irda/irmod.h>
31 #include <net/irda/wrapper.h>
32 #include <net/irda/irda_device.h>
34 #include <linux/platform_data/irda-pxaficp.h>
35 #undef __REG
36 #define __REG(x) ((x) & 0xffff)
37 #include <mach/regs-uart.h>
39 #define ICCR0 0x0000 /* ICP Control Register 0 */
40 #define ICCR1 0x0004 /* ICP Control Register 1 */
41 #define ICCR2 0x0008 /* ICP Control Register 2 */
42 #define ICDR 0x000c /* ICP Data Register */
43 #define ICSR0 0x0014 /* ICP Status Register 0 */
44 #define ICSR1 0x0018 /* ICP Status Register 1 */
46 #define ICCR0_AME (1 << 7) /* Address match enable */
47 #define ICCR0_TIE (1 << 6) /* Transmit FIFO interrupt enable */
48 #define ICCR0_RIE (1 << 5) /* Receive FIFO interrupt enable */
49 #define ICCR0_RXE (1 << 4) /* Receive enable */
50 #define ICCR0_TXE (1 << 3) /* Transmit enable */
51 #define ICCR0_TUS (1 << 2) /* Transmit FIFO underrun select */
52 #define ICCR0_LBM (1 << 1) /* Loopback mode */
53 #define ICCR0_ITR (1 << 0) /* IrDA transmission */
55 #define ICCR2_RXP (1 << 3) /* Receive Pin Polarity select */
56 #define ICCR2_TXP (1 << 2) /* Transmit Pin Polarity select */
57 #define ICCR2_TRIG (3 << 0) /* Receive FIFO Trigger threshold */
58 #define ICCR2_TRIG_8 (0 << 0) /* >= 8 bytes */
59 #define ICCR2_TRIG_16 (1 << 0) /* >= 16 bytes */
60 #define ICCR2_TRIG_32 (2 << 0) /* >= 32 bytes */
62 #define ICSR0_EOC (1 << 6) /* DMA End of Descriptor Chain */
63 #define ICSR0_FRE (1 << 5) /* Framing error */
64 #define ICSR0_RFS (1 << 4) /* Receive FIFO service request */
65 #define ICSR0_TFS (1 << 3) /* Transnit FIFO service request */
66 #define ICSR0_RAB (1 << 2) /* Receiver abort */
67 #define ICSR0_TUR (1 << 1) /* Trunsmit FIFO underun */
68 #define ICSR0_EIF (1 << 0) /* End/Error in FIFO */
70 #define ICSR1_ROR (1 << 6) /* Receiver FIFO underrun */
71 #define ICSR1_CRE (1 << 5) /* CRC error */
72 #define ICSR1_EOF (1 << 4) /* End of frame */
73 #define ICSR1_TNF (1 << 3) /* Transmit FIFO not full */
74 #define ICSR1_RNE (1 << 2) /* Receive FIFO not empty */
75 #define ICSR1_TBY (1 << 1) /* Tramsmiter busy flag */
76 #define ICSR1_RSY (1 << 0) /* Recevier synchronized flag */
78 #define IrSR_RXPL_NEG_IS_ZERO (1<<4)
79 #define IrSR_RXPL_POS_IS_ZERO 0x0
80 #define IrSR_TXPL_NEG_IS_ZERO (1<<3)
81 #define IrSR_TXPL_POS_IS_ZERO 0x0
82 #define IrSR_XMODE_PULSE_1_6 (1<<2)
83 #define IrSR_XMODE_PULSE_3_16 0x0
84 #define IrSR_RCVEIR_IR_MODE (1<<1)
85 #define IrSR_RCVEIR_UART_MODE 0x0
86 #define IrSR_XMITIR_IR_MODE (1<<0)
87 #define IrSR_XMITIR_UART_MODE 0x0
89 #define IrSR_IR_RECEIVE_ON (\
90 IrSR_RXPL_NEG_IS_ZERO | \
91 IrSR_TXPL_POS_IS_ZERO | \
92 IrSR_XMODE_PULSE_3_16 | \
93 IrSR_RCVEIR_IR_MODE | \
94 IrSR_XMITIR_UART_MODE)
96 #define IrSR_IR_TRANSMIT_ON (\
97 IrSR_RXPL_NEG_IS_ZERO | \
98 IrSR_TXPL_POS_IS_ZERO | \
99 IrSR_XMODE_PULSE_3_16 | \
100 IrSR_RCVEIR_UART_MODE | \
101 IrSR_XMITIR_IR_MODE)
103 /* macros for registers read/write */
104 #define ficp_writel(irda, val, off) \
105 do { \
106 dev_vdbg(irda->dev, \
107 "%s():%d ficp_writel(0x%x, %s)\n", \
108 __func__, __LINE__, (val), #off); \
109 writel_relaxed((val), (irda)->irda_base + (off)); \
110 } while (0)
112 #define ficp_readl(irda, off) \
113 ({ \
114 unsigned int _v; \
115 _v = readl_relaxed((irda)->irda_base + (off)); \
116 dev_vdbg(irda->dev, \
117 "%s():%d ficp_readl(%s): 0x%x\n", \
118 __func__, __LINE__, #off, _v); \
119 _v; \
122 #define stuart_writel(irda, val, off) \
123 do { \
124 dev_vdbg(irda->dev, \
125 "%s():%d stuart_writel(0x%x, %s)\n", \
126 __func__, __LINE__, (val), #off); \
127 writel_relaxed((val), (irda)->stuart_base + (off)); \
128 } while (0)
130 #define stuart_readl(irda, off) \
131 ({ \
132 unsigned int _v; \
133 _v = readl_relaxed((irda)->stuart_base + (off)); \
134 dev_vdbg(irda->dev, \
135 "%s():%d stuart_readl(%s): 0x%x\n", \
136 __func__, __LINE__, #off, _v); \
137 _v; \
140 struct pxa_irda {
141 int speed;
142 int newspeed;
143 unsigned long long last_clk;
145 void __iomem *stuart_base;
146 void __iomem *irda_base;
147 unsigned char *dma_rx_buff;
148 unsigned char *dma_tx_buff;
149 dma_addr_t dma_rx_buff_phy;
150 dma_addr_t dma_tx_buff_phy;
151 unsigned int dma_tx_buff_len;
152 struct dma_chan *txdma;
153 struct dma_chan *rxdma;
154 dma_cookie_t rx_cookie;
155 dma_cookie_t tx_cookie;
156 int drcmr_rx;
157 int drcmr_tx;
159 int uart_irq;
160 int icp_irq;
162 struct irlap_cb *irlap;
163 struct qos_info qos;
165 iobuff_t tx_buff;
166 iobuff_t rx_buff;
168 struct device *dev;
169 struct pxaficp_platform_data *pdata;
170 struct clk *fir_clk;
171 struct clk *sir_clk;
172 struct clk *cur_clk;
175 static int pxa_irda_set_speed(struct pxa_irda *si, int speed);
177 static inline void pxa_irda_disable_clk(struct pxa_irda *si)
179 if (si->cur_clk)
180 clk_disable_unprepare(si->cur_clk);
181 si->cur_clk = NULL;
184 static inline void pxa_irda_enable_firclk(struct pxa_irda *si)
186 si->cur_clk = si->fir_clk;
187 clk_prepare_enable(si->fir_clk);
190 static inline void pxa_irda_enable_sirclk(struct pxa_irda *si)
192 si->cur_clk = si->sir_clk;
193 clk_prepare_enable(si->sir_clk);
197 #define IS_FIR(si) ((si)->speed >= 4000000)
198 #define IRDA_FRAME_SIZE_LIMIT 2047
200 static void pxa_irda_fir_dma_rx_irq(void *data);
201 static void pxa_irda_fir_dma_tx_irq(void *data);
203 inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
205 struct dma_async_tx_descriptor *tx;
207 tx = dmaengine_prep_slave_single(si->rxdma, si->dma_rx_buff_phy,
208 IRDA_FRAME_SIZE_LIMIT, DMA_FROM_DEVICE,
209 DMA_PREP_INTERRUPT);
210 if (!tx) {
211 dev_err(si->dev, "prep_slave_sg() failed\n");
212 return;
214 tx->callback = pxa_irda_fir_dma_rx_irq;
215 tx->callback_param = si;
216 si->rx_cookie = dmaengine_submit(tx);
217 dma_async_issue_pending(si->rxdma);
220 inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
222 struct dma_async_tx_descriptor *tx;
224 tx = dmaengine_prep_slave_single(si->txdma, si->dma_tx_buff_phy,
225 si->dma_tx_buff_len, DMA_TO_DEVICE,
226 DMA_PREP_INTERRUPT);
227 if (!tx) {
228 dev_err(si->dev, "prep_slave_sg() failed\n");
229 return;
231 tx->callback = pxa_irda_fir_dma_tx_irq;
232 tx->callback_param = si;
233 si->tx_cookie = dmaengine_submit(tx);
234 dma_async_issue_pending(si->rxdma);
238 * Set the IrDA communications mode.
240 static void pxa_irda_set_mode(struct pxa_irda *si, int mode)
242 if (si->pdata->transceiver_mode)
243 si->pdata->transceiver_mode(si->dev, mode);
244 else {
245 if (gpio_is_valid(si->pdata->gpio_pwdown))
246 gpio_set_value(si->pdata->gpio_pwdown,
247 !(mode & IR_OFF) ^
248 !si->pdata->gpio_pwdown_inverted);
249 pxa2xx_transceiver_mode(si->dev, mode);
254 * Set the IrDA communications speed.
256 static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
258 unsigned long flags;
259 unsigned int divisor;
261 switch (speed) {
262 case 9600: case 19200: case 38400:
263 case 57600: case 115200:
265 /* refer to PXA250/210 Developer's Manual 10-7 */
266 /* BaudRate = 14.7456 MHz / (16*Divisor) */
267 divisor = 14745600 / (16 * speed);
269 local_irq_save(flags);
271 if (IS_FIR(si)) {
272 /* stop RX DMA */
273 dmaengine_terminate_all(si->rxdma);
274 /* disable FICP */
275 ficp_writel(si, 0, ICCR0);
276 pxa_irda_disable_clk(si);
278 /* set board transceiver to SIR mode */
279 pxa_irda_set_mode(si, IR_SIRMODE);
281 /* enable the STUART clock */
282 pxa_irda_enable_sirclk(si);
285 /* disable STUART first */
286 stuart_writel(si, 0, STIER);
288 /* access DLL & DLH */
289 stuart_writel(si, stuart_readl(si, STLCR) | LCR_DLAB, STLCR);
290 stuart_writel(si, divisor & 0xff, STDLL);
291 stuart_writel(si, divisor >> 8, STDLH);
292 stuart_writel(si, stuart_readl(si, STLCR) & ~LCR_DLAB, STLCR);
294 si->speed = speed;
295 stuart_writel(si, IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6,
296 STISR);
297 stuart_writel(si, IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE,
298 STIER);
300 local_irq_restore(flags);
301 break;
303 case 4000000:
304 local_irq_save(flags);
306 /* disable STUART */
307 stuart_writel(si, 0, STIER);
308 stuart_writel(si, 0, STISR);
309 pxa_irda_disable_clk(si);
311 /* disable FICP first */
312 ficp_writel(si, 0, ICCR0);
314 /* set board transceiver to FIR mode */
315 pxa_irda_set_mode(si, IR_FIRMODE);
317 /* enable the FICP clock */
318 pxa_irda_enable_firclk(si);
320 si->speed = speed;
321 pxa_irda_fir_dma_rx_start(si);
322 ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
324 local_irq_restore(flags);
325 break;
327 default:
328 return -EINVAL;
331 return 0;
334 /* SIR interrupt service routine. */
335 static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
337 struct net_device *dev = dev_id;
338 struct pxa_irda *si = netdev_priv(dev);
339 int iir, lsr, data;
341 iir = stuart_readl(si, STIIR);
343 switch (iir & 0x0F) {
344 case 0x06: /* Receiver Line Status */
345 lsr = stuart_readl(si, STLSR);
346 while (lsr & LSR_FIFOE) {
347 data = stuart_readl(si, STRBR);
348 if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
349 printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
350 dev->stats.rx_errors++;
351 if (lsr & LSR_FE)
352 dev->stats.rx_frame_errors++;
353 if (lsr & LSR_OE)
354 dev->stats.rx_fifo_errors++;
355 } else {
356 dev->stats.rx_bytes++;
357 async_unwrap_char(dev, &dev->stats,
358 &si->rx_buff, data);
360 lsr = stuart_readl(si, STLSR);
362 si->last_clk = sched_clock();
363 break;
365 case 0x04: /* Received Data Available */
366 /* forth through */
368 case 0x0C: /* Character Timeout Indication */
369 do {
370 dev->stats.rx_bytes++;
371 async_unwrap_char(dev, &dev->stats, &si->rx_buff,
372 stuart_readl(si, STRBR));
373 } while (stuart_readl(si, STLSR) & LSR_DR);
374 si->last_clk = sched_clock();
375 break;
377 case 0x02: /* Transmit FIFO Data Request */
378 while ((si->tx_buff.len) &&
379 (stuart_readl(si, STLSR) & LSR_TDRQ)) {
380 stuart_writel(si, *si->tx_buff.data++, STTHR);
381 si->tx_buff.len -= 1;
384 if (si->tx_buff.len == 0) {
385 dev->stats.tx_packets++;
386 dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head;
388 /* We need to ensure that the transmitter has finished. */
389 while ((stuart_readl(si, STLSR) & LSR_TEMT) == 0)
390 cpu_relax();
391 si->last_clk = sched_clock();
394 * Ok, we've finished transmitting. Now enable
395 * the receiver. Sometimes we get a receive IRQ
396 * immediately after a transmit...
398 if (si->newspeed) {
399 pxa_irda_set_speed(si, si->newspeed);
400 si->newspeed = 0;
401 } else {
402 /* enable IR Receiver, disable IR Transmitter */
403 stuart_writel(si, IrSR_IR_RECEIVE_ON |
404 IrSR_XMODE_PULSE_1_6, STISR);
405 /* enable STUART and receive interrupts */
406 stuart_writel(si, IER_UUE | IER_RLSE |
407 IER_RAVIE | IER_RTIOE, STIER);
409 /* I'm hungry! */
410 netif_wake_queue(dev);
412 break;
415 return IRQ_HANDLED;
418 /* FIR Receive DMA interrupt handler */
419 static void pxa_irda_fir_dma_rx_irq(void *data)
421 struct net_device *dev = data;
422 struct pxa_irda *si = netdev_priv(dev);
424 dmaengine_terminate_all(si->rxdma);
425 netdev_dbg(dev, "pxa_ir: fir rx dma bus error\n");
428 /* FIR Transmit DMA interrupt handler */
429 static void pxa_irda_fir_dma_tx_irq(void *data)
431 struct net_device *dev = data;
432 struct pxa_irda *si = netdev_priv(dev);
434 dmaengine_terminate_all(si->txdma);
435 if (dmaengine_tx_status(si->txdma, si->tx_cookie, NULL) == DMA_ERROR) {
436 dev->stats.tx_errors++;
437 } else {
438 dev->stats.tx_packets++;
439 dev->stats.tx_bytes += si->dma_tx_buff_len;
442 while (ficp_readl(si, ICSR1) & ICSR1_TBY)
443 cpu_relax();
444 si->last_clk = sched_clock();
447 * HACK: It looks like the TBY bit is dropped too soon.
448 * Without this delay things break.
450 udelay(120);
452 if (si->newspeed) {
453 pxa_irda_set_speed(si, si->newspeed);
454 si->newspeed = 0;
455 } else {
456 int i = 64;
458 ficp_writel(si, 0, ICCR0);
459 pxa_irda_fir_dma_rx_start(si);
460 while ((ficp_readl(si, ICSR1) & ICSR1_RNE) && i--)
461 ficp_readl(si, ICDR);
462 ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
464 if (i < 0)
465 printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
467 netif_wake_queue(dev);
470 /* EIF(Error in FIFO/End in Frame) handler for FIR */
471 static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
473 unsigned int len, stat, data;
474 struct dma_tx_state state;
476 /* Get the current data position. */
478 dmaengine_tx_status(si->rxdma, si->rx_cookie, &state);
479 len = IRDA_FRAME_SIZE_LIMIT - state.residue;
481 do {
482 /* Read Status, and then Data. */
483 stat = ficp_readl(si, ICSR1);
484 rmb();
485 data = ficp_readl(si, ICDR);
487 if (stat & (ICSR1_CRE | ICSR1_ROR)) {
488 dev->stats.rx_errors++;
489 if (stat & ICSR1_CRE) {
490 printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
491 dev->stats.rx_crc_errors++;
493 if (stat & ICSR1_ROR) {
494 printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
495 dev->stats.rx_over_errors++;
497 } else {
498 si->dma_rx_buff[len++] = data;
500 /* If we hit the end of frame, there's no point in continuing. */
501 if (stat & ICSR1_EOF)
502 break;
503 } while (ficp_readl(si, ICSR0) & ICSR0_EIF);
505 if (stat & ICSR1_EOF) {
506 /* end of frame. */
507 struct sk_buff *skb;
509 if (icsr0 & ICSR0_FRE) {
510 printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
511 dev->stats.rx_dropped++;
512 return;
515 skb = alloc_skb(len+1,GFP_ATOMIC);
516 if (!skb) {
517 printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
518 dev->stats.rx_dropped++;
519 return;
522 /* Align IP header to 20 bytes */
523 skb_reserve(skb, 1);
524 skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
525 skb_put(skb, len);
527 /* Feed it to IrLAP */
528 skb->dev = dev;
529 skb_reset_mac_header(skb);
530 skb->protocol = htons(ETH_P_IRDA);
531 netif_rx(skb);
533 dev->stats.rx_packets++;
534 dev->stats.rx_bytes += len;
538 /* FIR interrupt handler */
539 static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
541 struct net_device *dev = dev_id;
542 struct pxa_irda *si = netdev_priv(dev);
543 int icsr0, i = 64;
545 /* stop RX DMA */
546 dmaengine_terminate_all(si->rxdma);
547 si->last_clk = sched_clock();
548 icsr0 = ficp_readl(si, ICSR0);
550 if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
551 if (icsr0 & ICSR0_FRE) {
552 printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
553 dev->stats.rx_frame_errors++;
554 } else {
555 printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
556 dev->stats.rx_errors++;
558 ficp_writel(si, icsr0 & (ICSR0_FRE | ICSR0_RAB), ICSR0);
561 if (icsr0 & ICSR0_EIF) {
562 /* An error in FIFO occurred, or there is a end of frame */
563 pxa_irda_fir_irq_eif(si, dev, icsr0);
566 ficp_writel(si, 0, ICCR0);
567 pxa_irda_fir_dma_rx_start(si);
568 while ((ficp_readl(si, ICSR1) & ICSR1_RNE) && i--)
569 ficp_readl(si, ICDR);
570 ficp_writel(si, ICCR0_ITR | ICCR0_RXE, ICCR0);
572 if (i < 0)
573 printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
575 return IRQ_HANDLED;
578 /* hard_xmit interface of irda device */
579 static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
581 struct pxa_irda *si = netdev_priv(dev);
582 int speed = irda_get_next_speed(skb);
585 * Does this packet contain a request to change the interface
586 * speed? If so, remember it until we complete the transmission
587 * of this frame.
589 if (speed != si->speed && speed != -1)
590 si->newspeed = speed;
593 * If this is an empty frame, we can bypass a lot.
595 if (skb->len == 0) {
596 if (si->newspeed) {
597 si->newspeed = 0;
598 pxa_irda_set_speed(si, speed);
600 dev_kfree_skb(skb);
601 return NETDEV_TX_OK;
604 netif_stop_queue(dev);
606 if (!IS_FIR(si)) {
607 si->tx_buff.data = si->tx_buff.head;
608 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
610 /* Disable STUART interrupts and switch to transmit mode. */
611 stuart_writel(si, 0, STIER);
612 stuart_writel(si, IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6,
613 STISR);
615 /* enable STUART and transmit interrupts */
616 stuart_writel(si, IER_UUE | IER_TIE, STIER);
617 } else {
618 unsigned long mtt = irda_get_mtt(skb);
620 si->dma_tx_buff_len = skb->len;
621 skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
623 if (mtt)
624 while ((sched_clock() - si->last_clk) * 1000 < mtt)
625 cpu_relax();
627 /* stop RX DMA, disable FICP */
628 dmaengine_terminate_all(si->rxdma);
629 ficp_writel(si, 0, ICCR0);
631 pxa_irda_fir_dma_tx_start(si);
632 ficp_writel(si, ICCR0_ITR | ICCR0_TXE, ICCR0);
635 dev_kfree_skb(skb);
636 return NETDEV_TX_OK;
639 static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
641 struct if_irda_req *rq = (struct if_irda_req *)ifreq;
642 struct pxa_irda *si = netdev_priv(dev);
643 int ret;
645 switch (cmd) {
646 case SIOCSBANDWIDTH:
647 ret = -EPERM;
648 if (capable(CAP_NET_ADMIN)) {
650 * We are unable to set the speed if the
651 * device is not running.
653 if (netif_running(dev)) {
654 ret = pxa_irda_set_speed(si,
655 rq->ifr_baudrate);
656 } else {
657 printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
658 ret = 0;
661 break;
663 case SIOCSMEDIABUSY:
664 ret = -EPERM;
665 if (capable(CAP_NET_ADMIN)) {
666 irda_device_set_media_busy(dev, TRUE);
667 ret = 0;
669 break;
671 case SIOCGRECEIVING:
672 ret = 0;
673 rq->ifr_receiving = IS_FIR(si) ? 0
674 : si->rx_buff.state != OUTSIDE_FRAME;
675 break;
677 default:
678 ret = -EOPNOTSUPP;
679 break;
682 return ret;
685 static void pxa_irda_startup(struct pxa_irda *si)
687 /* Disable STUART interrupts */
688 stuart_writel(si, 0, STIER);
689 /* enable STUART interrupt to the processor */
690 stuart_writel(si, MCR_OUT2, STMCR);
691 /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
692 stuart_writel(si, LCR_WLS0 | LCR_WLS1, STLCR);
693 /* enable FIFO, we use FIFO to improve performance */
694 stuart_writel(si, FCR_TRFIFOE | FCR_ITL_32, STFCR);
696 /* disable FICP */
697 ficp_writel(si, 0, ICCR0);
698 /* configure FICP ICCR2 */
699 ficp_writel(si, ICCR2_TXP | ICCR2_TRIG_32, ICCR2);
701 /* force SIR reinitialization */
702 si->speed = 4000000;
703 pxa_irda_set_speed(si, 9600);
705 printk(KERN_DEBUG "pxa_ir: irda startup\n");
708 static void pxa_irda_shutdown(struct pxa_irda *si)
710 unsigned long flags;
712 local_irq_save(flags);
714 /* disable STUART and interrupt */
715 stuart_writel(si, 0, STIER);
716 /* disable STUART SIR mode */
717 stuart_writel(si, 0, STISR);
719 /* disable DMA */
720 dmaengine_terminate_all(si->rxdma);
721 dmaengine_terminate_all(si->txdma);
722 /* disable FICP */
723 ficp_writel(si, 0, ICCR0);
725 /* disable the STUART or FICP clocks */
726 pxa_irda_disable_clk(si);
728 local_irq_restore(flags);
730 /* power off board transceiver */
731 pxa_irda_set_mode(si, IR_OFF);
733 printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
736 static int pxa_irda_start(struct net_device *dev)
738 struct pxa_irda *si = netdev_priv(dev);
739 dma_cap_mask_t mask;
740 struct dma_slave_config config;
741 struct pxad_param param;
742 int err;
744 si->speed = 9600;
746 err = request_irq(si->uart_irq, pxa_irda_sir_irq, 0, dev->name, dev);
747 if (err)
748 goto err_irq1;
750 err = request_irq(si->icp_irq, pxa_irda_fir_irq, 0, dev->name, dev);
751 if (err)
752 goto err_irq2;
755 * The interrupt must remain disabled for now.
757 disable_irq(si->uart_irq);
758 disable_irq(si->icp_irq);
760 err = -EBUSY;
761 dma_cap_zero(mask);
762 dma_cap_set(DMA_SLAVE, mask);
763 param.prio = PXAD_PRIO_LOWEST;
765 memset(&config, 0, sizeof(config));
766 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
767 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
768 config.src_addr = (dma_addr_t)si->irda_base + ICDR;
769 config.dst_addr = (dma_addr_t)si->irda_base + ICDR;
770 config.src_maxburst = 32;
771 config.dst_maxburst = 32;
773 param.drcmr = si->drcmr_rx;
774 si->rxdma = dma_request_slave_channel_compat(mask, pxad_filter_fn,
775 &param, &dev->dev, "rx");
776 if (!si->rxdma)
777 goto err_rx_dma;
779 param.drcmr = si->drcmr_tx;
780 si->txdma = dma_request_slave_channel_compat(mask, pxad_filter_fn,
781 &param, &dev->dev, "tx");
782 if (!si->txdma)
783 goto err_tx_dma;
785 err = dmaengine_slave_config(si->rxdma, &config);
786 if (err)
787 goto err_dma_rx_buff;
788 err = dmaengine_slave_config(si->txdma, &config);
789 if (err)
790 goto err_dma_rx_buff;
792 err = -ENOMEM;
793 si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
794 &si->dma_rx_buff_phy, GFP_KERNEL);
795 if (!si->dma_rx_buff)
796 goto err_dma_rx_buff;
798 si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
799 &si->dma_tx_buff_phy, GFP_KERNEL);
800 if (!si->dma_tx_buff)
801 goto err_dma_tx_buff;
803 /* Setup the serial port for the initial speed. */
804 pxa_irda_startup(si);
807 * Open a new IrLAP layer instance.
809 si->irlap = irlap_open(dev, &si->qos, "pxa");
810 err = -ENOMEM;
811 if (!si->irlap)
812 goto err_irlap;
815 * Now enable the interrupt and start the queue
817 enable_irq(si->uart_irq);
818 enable_irq(si->icp_irq);
819 netif_start_queue(dev);
821 printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
823 return 0;
825 err_irlap:
826 pxa_irda_shutdown(si);
827 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
828 err_dma_tx_buff:
829 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
830 err_dma_rx_buff:
831 dma_release_channel(si->txdma);
832 err_tx_dma:
833 dma_release_channel(si->rxdma);
834 err_rx_dma:
835 free_irq(si->icp_irq, dev);
836 err_irq2:
837 free_irq(si->uart_irq, dev);
838 err_irq1:
840 return err;
843 static int pxa_irda_stop(struct net_device *dev)
845 struct pxa_irda *si = netdev_priv(dev);
847 netif_stop_queue(dev);
849 pxa_irda_shutdown(si);
851 /* Stop IrLAP */
852 if (si->irlap) {
853 irlap_close(si->irlap);
854 si->irlap = NULL;
857 free_irq(si->uart_irq, dev);
858 free_irq(si->icp_irq, dev);
860 dmaengine_terminate_all(si->rxdma);
861 dmaengine_terminate_all(si->txdma);
862 dma_release_channel(si->rxdma);
863 dma_release_channel(si->txdma);
865 if (si->dma_rx_buff)
866 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
867 if (si->dma_tx_buff)
868 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
870 printk(KERN_DEBUG "pxa_ir: irda driver closed\n");
871 return 0;
874 static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state)
876 struct net_device *dev = platform_get_drvdata(_dev);
877 struct pxa_irda *si;
879 if (dev && netif_running(dev)) {
880 si = netdev_priv(dev);
881 netif_device_detach(dev);
882 pxa_irda_shutdown(si);
885 return 0;
888 static int pxa_irda_resume(struct platform_device *_dev)
890 struct net_device *dev = platform_get_drvdata(_dev);
891 struct pxa_irda *si;
893 if (dev && netif_running(dev)) {
894 si = netdev_priv(dev);
895 pxa_irda_startup(si);
896 netif_device_attach(dev);
897 netif_wake_queue(dev);
900 return 0;
904 static int pxa_irda_init_iobuf(iobuff_t *io, int size)
906 io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
907 if (io->head != NULL) {
908 io->truesize = size;
909 io->in_frame = FALSE;
910 io->state = OUTSIDE_FRAME;
911 io->data = io->head;
913 return io->head ? 0 : -ENOMEM;
916 static const struct net_device_ops pxa_irda_netdev_ops = {
917 .ndo_open = pxa_irda_start,
918 .ndo_stop = pxa_irda_stop,
919 .ndo_start_xmit = pxa_irda_hard_xmit,
920 .ndo_do_ioctl = pxa_irda_ioctl,
923 static int pxa_irda_probe(struct platform_device *pdev)
925 struct net_device *dev;
926 struct resource *res;
927 struct pxa_irda *si;
928 void __iomem *ficp, *stuart;
929 unsigned int baudrate_mask;
930 int err;
932 if (!pdev->dev.platform_data)
933 return -ENODEV;
935 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
936 ficp = devm_ioremap_resource(&pdev->dev, res);
937 if (IS_ERR(ficp)) {
938 dev_err(&pdev->dev, "resource ficp not defined\n");
939 return PTR_ERR(ficp);
942 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
943 stuart = devm_ioremap_resource(&pdev->dev, res);
944 if (IS_ERR(stuart)) {
945 dev_err(&pdev->dev, "resource stuart not defined\n");
946 return PTR_ERR(stuart);
949 dev = alloc_irdadev(sizeof(struct pxa_irda));
950 if (!dev) {
951 err = -ENOMEM;
952 goto err_mem_1;
955 SET_NETDEV_DEV(dev, &pdev->dev);
956 si = netdev_priv(dev);
957 si->dev = &pdev->dev;
958 si->pdata = pdev->dev.platform_data;
960 si->irda_base = ficp;
961 si->stuart_base = stuart;
962 si->uart_irq = platform_get_irq(pdev, 0);
963 si->icp_irq = platform_get_irq(pdev, 1);
965 si->sir_clk = devm_clk_get(&pdev->dev, "UARTCLK");
966 si->fir_clk = devm_clk_get(&pdev->dev, "FICPCLK");
967 if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) {
968 err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk);
969 goto err_mem_4;
972 res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
973 if (res)
974 si->drcmr_rx = res->start;
975 res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
976 if (res)
977 si->drcmr_tx = res->start;
980 * Initialise the SIR buffers
982 err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
983 if (err)
984 goto err_mem_4;
985 err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
986 if (err)
987 goto err_mem_5;
989 if (gpio_is_valid(si->pdata->gpio_pwdown)) {
990 err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch");
991 if (err)
992 goto err_startup;
993 err = gpio_direction_output(si->pdata->gpio_pwdown,
994 !si->pdata->gpio_pwdown_inverted);
995 if (err) {
996 gpio_free(si->pdata->gpio_pwdown);
997 goto err_startup;
1001 if (si->pdata->startup) {
1002 err = si->pdata->startup(si->dev);
1003 if (err)
1004 goto err_startup;
1007 if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup)
1008 dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n");
1010 dev->netdev_ops = &pxa_irda_netdev_ops;
1012 irda_init_max_qos_capabilies(&si->qos);
1014 baudrate_mask = 0;
1015 if (si->pdata->transceiver_cap & IR_SIRMODE)
1016 baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
1017 if (si->pdata->transceiver_cap & IR_FIRMODE)
1018 baudrate_mask |= IR_4000000 << 8;
1020 si->qos.baud_rate.bits &= baudrate_mask;
1021 si->qos.min_turn_time.bits = 7; /* 1ms or more */
1023 irda_qos_bits_to_value(&si->qos);
1025 err = register_netdev(dev);
1027 if (err == 0)
1028 platform_set_drvdata(pdev, dev);
1030 if (err) {
1031 if (si->pdata->shutdown)
1032 si->pdata->shutdown(si->dev);
1033 err_startup:
1034 kfree(si->tx_buff.head);
1035 err_mem_5:
1036 kfree(si->rx_buff.head);
1037 err_mem_4:
1038 free_netdev(dev);
1040 err_mem_1:
1041 return err;
1044 static int pxa_irda_remove(struct platform_device *_dev)
1046 struct net_device *dev = platform_get_drvdata(_dev);
1048 if (dev) {
1049 struct pxa_irda *si = netdev_priv(dev);
1050 unregister_netdev(dev);
1051 if (gpio_is_valid(si->pdata->gpio_pwdown))
1052 gpio_free(si->pdata->gpio_pwdown);
1053 if (si->pdata->shutdown)
1054 si->pdata->shutdown(si->dev);
1055 kfree(si->tx_buff.head);
1056 kfree(si->rx_buff.head);
1057 free_netdev(dev);
1060 return 0;
1063 static struct platform_driver pxa_ir_driver = {
1064 .driver = {
1065 .name = "pxa2xx-ir",
1067 .probe = pxa_irda_probe,
1068 .remove = pxa_irda_remove,
1069 .suspend = pxa_irda_suspend,
1070 .resume = pxa_irda_resume,
1073 module_platform_driver(pxa_ir_driver);
1075 MODULE_LICENSE("GPL");
1076 MODULE_ALIAS("platform:pxa2xx-ir");