fs: use kmem_cache_zalloc instead
[pv_ops_mirror.git] / drivers / net / irda / pxaficp_ir.c
blob8c09344f58dc7a2440fbe968b4d76c32ee7300b1
1 /*
2 * linux/drivers/net/irda/pxaficp_ir.c
4 * Based on sa1100_ir.c by Russell King
6 * Changes copyright (C) 2003-2005 MontaVista Software, Inc.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/init.h>
18 #include <linux/errno.h>
19 #include <linux/netdevice.h>
20 #include <linux/slab.h>
21 #include <linux/rtnetlink.h>
22 #include <linux/interrupt.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm.h>
26 #include <linux/clk.h>
28 #include <net/irda/irda.h>
29 #include <net/irda/irmod.h>
30 #include <net/irda/wrapper.h>
31 #include <net/irda/irda_device.h>
33 #include <asm/irq.h>
34 #include <asm/dma.h>
35 #include <asm/delay.h>
36 #include <asm/hardware.h>
37 #include <asm/arch/irda.h>
38 #include <asm/arch/pxa-regs.h>
40 #ifdef CONFIG_MACH_MAINSTONE
41 #include <asm/arch/mainstone.h>
42 #endif
44 #define IrSR_RXPL_NEG_IS_ZERO (1<<4)
45 #define IrSR_RXPL_POS_IS_ZERO 0x0
46 #define IrSR_TXPL_NEG_IS_ZERO (1<<3)
47 #define IrSR_TXPL_POS_IS_ZERO 0x0
48 #define IrSR_XMODE_PULSE_1_6 (1<<2)
49 #define IrSR_XMODE_PULSE_3_16 0x0
50 #define IrSR_RCVEIR_IR_MODE (1<<1)
51 #define IrSR_RCVEIR_UART_MODE 0x0
52 #define IrSR_XMITIR_IR_MODE (1<<0)
53 #define IrSR_XMITIR_UART_MODE 0x0
55 #define IrSR_IR_RECEIVE_ON (\
56 IrSR_RXPL_NEG_IS_ZERO | \
57 IrSR_TXPL_POS_IS_ZERO | \
58 IrSR_XMODE_PULSE_3_16 | \
59 IrSR_RCVEIR_IR_MODE | \
60 IrSR_XMITIR_UART_MODE)
62 #define IrSR_IR_TRANSMIT_ON (\
63 IrSR_RXPL_NEG_IS_ZERO | \
64 IrSR_TXPL_POS_IS_ZERO | \
65 IrSR_XMODE_PULSE_3_16 | \
66 IrSR_RCVEIR_UART_MODE | \
67 IrSR_XMITIR_IR_MODE)
69 struct pxa_irda {
70 int speed;
71 int newspeed;
72 unsigned long last_oscr;
74 unsigned char *dma_rx_buff;
75 unsigned char *dma_tx_buff;
76 dma_addr_t dma_rx_buff_phy;
77 dma_addr_t dma_tx_buff_phy;
78 unsigned int dma_tx_buff_len;
79 int txdma;
80 int rxdma;
82 struct net_device_stats stats;
83 struct irlap_cb *irlap;
84 struct qos_info qos;
86 iobuff_t tx_buff;
87 iobuff_t rx_buff;
89 struct device *dev;
90 struct pxaficp_platform_data *pdata;
91 struct clk *fir_clk;
92 struct clk *sir_clk;
93 struct clk *cur_clk;
96 static inline void pxa_irda_disable_clk(struct pxa_irda *si)
98 if (si->cur_clk)
99 clk_disable(si->cur_clk);
100 si->cur_clk = NULL;
103 static inline void pxa_irda_enable_firclk(struct pxa_irda *si)
105 si->cur_clk = si->fir_clk;
106 clk_enable(si->fir_clk);
109 static inline void pxa_irda_enable_sirclk(struct pxa_irda *si)
111 si->cur_clk = si->sir_clk;
112 clk_enable(si->sir_clk);
116 #define IS_FIR(si) ((si)->speed >= 4000000)
117 #define IRDA_FRAME_SIZE_LIMIT 2047
119 inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
121 DCSR(si->rxdma) = DCSR_NODESC;
122 DSADR(si->rxdma) = __PREG(ICDR);
123 DTADR(si->rxdma) = si->dma_rx_buff_phy;
124 DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT;
125 DCSR(si->rxdma) |= DCSR_RUN;
128 inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
130 DCSR(si->txdma) = DCSR_NODESC;
131 DSADR(si->txdma) = si->dma_tx_buff_phy;
132 DTADR(si->txdma) = __PREG(ICDR);
133 DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len;
134 DCSR(si->txdma) |= DCSR_RUN;
138 * Set the IrDA communications speed.
140 static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
142 unsigned long flags;
143 unsigned int divisor;
145 switch (speed) {
146 case 9600: case 19200: case 38400:
147 case 57600: case 115200:
149 /* refer to PXA250/210 Developer's Manual 10-7 */
150 /* BaudRate = 14.7456 MHz / (16*Divisor) */
151 divisor = 14745600 / (16 * speed);
153 local_irq_save(flags);
155 if (IS_FIR(si)) {
156 /* stop RX DMA */
157 DCSR(si->rxdma) &= ~DCSR_RUN;
158 /* disable FICP */
159 ICCR0 = 0;
160 pxa_irda_disable_clk(si);
162 /* set board transceiver to SIR mode */
163 si->pdata->transceiver_mode(si->dev, IR_SIRMODE);
165 /* configure GPIO46/47 */
166 pxa_gpio_mode(GPIO46_STRXD_MD);
167 pxa_gpio_mode(GPIO47_STTXD_MD);
169 /* enable the STUART clock */
170 pxa_irda_enable_sirclk(si);
173 /* disable STUART first */
174 STIER = 0;
176 /* access DLL & DLH */
177 STLCR |= LCR_DLAB;
178 STDLL = divisor & 0xff;
179 STDLH = divisor >> 8;
180 STLCR &= ~LCR_DLAB;
182 si->speed = speed;
183 STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
184 STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
186 local_irq_restore(flags);
187 break;
189 case 4000000:
190 local_irq_save(flags);
192 /* disable STUART */
193 STIER = 0;
194 STISR = 0;
195 pxa_irda_disable_clk(si);
197 /* disable FICP first */
198 ICCR0 = 0;
200 /* set board transceiver to FIR mode */
201 si->pdata->transceiver_mode(si->dev, IR_FIRMODE);
203 /* configure GPIO46/47 */
204 pxa_gpio_mode(GPIO46_ICPRXD_MD);
205 pxa_gpio_mode(GPIO47_ICPTXD_MD);
207 /* enable the FICP clock */
208 pxa_irda_enable_firclk(si);
210 si->speed = speed;
211 pxa_irda_fir_dma_rx_start(si);
212 ICCR0 = ICCR0_ITR | ICCR0_RXE;
214 local_irq_restore(flags);
215 break;
217 default:
218 return -EINVAL;
221 return 0;
224 /* SIR interrupt service routine. */
225 static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
227 struct net_device *dev = dev_id;
228 struct pxa_irda *si = netdev_priv(dev);
229 int iir, lsr, data;
231 iir = STIIR;
233 switch (iir & 0x0F) {
234 case 0x06: /* Receiver Line Status */
235 lsr = STLSR;
236 while (lsr & LSR_FIFOE) {
237 data = STRBR;
238 if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
239 printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
240 si->stats.rx_errors++;
241 if (lsr & LSR_FE)
242 si->stats.rx_frame_errors++;
243 if (lsr & LSR_OE)
244 si->stats.rx_fifo_errors++;
245 } else {
246 si->stats.rx_bytes++;
247 async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
249 lsr = STLSR;
251 dev->last_rx = jiffies;
252 si->last_oscr = OSCR;
253 break;
255 case 0x04: /* Received Data Available */
256 /* forth through */
258 case 0x0C: /* Character Timeout Indication */
259 do {
260 si->stats.rx_bytes++;
261 async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR);
262 } while (STLSR & LSR_DR);
263 dev->last_rx = jiffies;
264 si->last_oscr = OSCR;
265 break;
267 case 0x02: /* Transmit FIFO Data Request */
268 while ((si->tx_buff.len) && (STLSR & LSR_TDRQ)) {
269 STTHR = *si->tx_buff.data++;
270 si->tx_buff.len -= 1;
273 if (si->tx_buff.len == 0) {
274 si->stats.tx_packets++;
275 si->stats.tx_bytes += si->tx_buff.data -
276 si->tx_buff.head;
278 /* We need to ensure that the transmitter has finished. */
279 while ((STLSR & LSR_TEMT) == 0)
280 cpu_relax();
281 si->last_oscr = OSCR;
284 * Ok, we've finished transmitting. Now enable
285 * the receiver. Sometimes we get a receive IRQ
286 * immediately after a transmit...
288 if (si->newspeed) {
289 pxa_irda_set_speed(si, si->newspeed);
290 si->newspeed = 0;
291 } else {
292 /* enable IR Receiver, disable IR Transmitter */
293 STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
294 /* enable STUART and receive interrupts */
295 STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
297 /* I'm hungry! */
298 netif_wake_queue(dev);
300 break;
303 return IRQ_HANDLED;
306 /* FIR Receive DMA interrupt handler */
307 static void pxa_irda_fir_dma_rx_irq(int channel, void *data)
309 int dcsr = DCSR(channel);
311 DCSR(channel) = dcsr & ~DCSR_RUN;
313 printk(KERN_DEBUG "pxa_ir: fir rx dma bus error %#x\n", dcsr);
316 /* FIR Transmit DMA interrupt handler */
317 static void pxa_irda_fir_dma_tx_irq(int channel, void *data)
319 struct net_device *dev = data;
320 struct pxa_irda *si = netdev_priv(dev);
321 int dcsr;
323 dcsr = DCSR(channel);
324 DCSR(channel) = dcsr & ~DCSR_RUN;
326 if (dcsr & DCSR_ENDINTR) {
327 si->stats.tx_packets++;
328 si->stats.tx_bytes += si->dma_tx_buff_len;
329 } else {
330 si->stats.tx_errors++;
333 while (ICSR1 & ICSR1_TBY)
334 cpu_relax();
335 si->last_oscr = OSCR;
338 * HACK: It looks like the TBY bit is dropped too soon.
339 * Without this delay things break.
341 udelay(120);
343 if (si->newspeed) {
344 pxa_irda_set_speed(si, si->newspeed);
345 si->newspeed = 0;
346 } else {
347 int i = 64;
349 ICCR0 = 0;
350 pxa_irda_fir_dma_rx_start(si);
351 while ((ICSR1 & ICSR1_RNE) && i--)
352 (void)ICDR;
353 ICCR0 = ICCR0_ITR | ICCR0_RXE;
355 if (i < 0)
356 printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
358 netif_wake_queue(dev);
361 /* EIF(Error in FIFO/End in Frame) handler for FIR */
362 static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
364 unsigned int len, stat, data;
366 /* Get the current data position. */
367 len = DTADR(si->rxdma) - si->dma_rx_buff_phy;
369 do {
370 /* Read Status, and then Data. */
371 stat = ICSR1;
372 rmb();
373 data = ICDR;
375 if (stat & (ICSR1_CRE | ICSR1_ROR)) {
376 si->stats.rx_errors++;
377 if (stat & ICSR1_CRE) {
378 printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
379 si->stats.rx_crc_errors++;
381 if (stat & ICSR1_ROR) {
382 printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
383 si->stats.rx_over_errors++;
385 } else {
386 si->dma_rx_buff[len++] = data;
388 /* If we hit the end of frame, there's no point in continuing. */
389 if (stat & ICSR1_EOF)
390 break;
391 } while (ICSR0 & ICSR0_EIF);
393 if (stat & ICSR1_EOF) {
394 /* end of frame. */
395 struct sk_buff *skb;
397 if (icsr0 & ICSR0_FRE) {
398 printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
399 si->stats.rx_dropped++;
400 return;
403 skb = alloc_skb(len+1,GFP_ATOMIC);
404 if (!skb) {
405 printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
406 si->stats.rx_dropped++;
407 return;
410 /* Align IP header to 20 bytes */
411 skb_reserve(skb, 1);
412 skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
413 skb_put(skb, len);
415 /* Feed it to IrLAP */
416 skb->dev = dev;
417 skb_reset_mac_header(skb);
418 skb->protocol = htons(ETH_P_IRDA);
419 netif_rx(skb);
421 si->stats.rx_packets++;
422 si->stats.rx_bytes += len;
424 dev->last_rx = jiffies;
428 /* FIR interrupt handler */
429 static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
431 struct net_device *dev = dev_id;
432 struct pxa_irda *si = netdev_priv(dev);
433 int icsr0, i = 64;
435 /* stop RX DMA */
436 DCSR(si->rxdma) &= ~DCSR_RUN;
437 si->last_oscr = OSCR;
438 icsr0 = ICSR0;
440 if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
441 if (icsr0 & ICSR0_FRE) {
442 printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
443 si->stats.rx_frame_errors++;
444 } else {
445 printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
446 si->stats.rx_errors++;
448 ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB);
451 if (icsr0 & ICSR0_EIF) {
452 /* An error in FIFO occured, or there is a end of frame */
453 pxa_irda_fir_irq_eif(si, dev, icsr0);
456 ICCR0 = 0;
457 pxa_irda_fir_dma_rx_start(si);
458 while ((ICSR1 & ICSR1_RNE) && i--)
459 (void)ICDR;
460 ICCR0 = ICCR0_ITR | ICCR0_RXE;
462 if (i < 0)
463 printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
465 return IRQ_HANDLED;
468 /* hard_xmit interface of irda device */
469 static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
471 struct pxa_irda *si = netdev_priv(dev);
472 int speed = irda_get_next_speed(skb);
475 * Does this packet contain a request to change the interface
476 * speed? If so, remember it until we complete the transmission
477 * of this frame.
479 if (speed != si->speed && speed != -1)
480 si->newspeed = speed;
483 * If this is an empty frame, we can bypass a lot.
485 if (skb->len == 0) {
486 if (si->newspeed) {
487 si->newspeed = 0;
488 pxa_irda_set_speed(si, speed);
490 dev_kfree_skb(skb);
491 return 0;
494 netif_stop_queue(dev);
496 if (!IS_FIR(si)) {
497 si->tx_buff.data = si->tx_buff.head;
498 si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
500 /* Disable STUART interrupts and switch to transmit mode. */
501 STIER = 0;
502 STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6;
504 /* enable STUART and transmit interrupts */
505 STIER = IER_UUE | IER_TIE;
506 } else {
507 unsigned long mtt = irda_get_mtt(skb);
509 si->dma_tx_buff_len = skb->len;
510 skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
512 if (mtt)
513 while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
514 cpu_relax();
516 /* stop RX DMA, disable FICP */
517 DCSR(si->rxdma) &= ~DCSR_RUN;
518 ICCR0 = 0;
520 pxa_irda_fir_dma_tx_start(si);
521 ICCR0 = ICCR0_ITR | ICCR0_TXE;
524 dev_kfree_skb(skb);
525 dev->trans_start = jiffies;
526 return 0;
529 static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
531 struct if_irda_req *rq = (struct if_irda_req *)ifreq;
532 struct pxa_irda *si = netdev_priv(dev);
533 int ret;
535 switch (cmd) {
536 case SIOCSBANDWIDTH:
537 ret = -EPERM;
538 if (capable(CAP_NET_ADMIN)) {
540 * We are unable to set the speed if the
541 * device is not running.
543 if (netif_running(dev)) {
544 ret = pxa_irda_set_speed(si,
545 rq->ifr_baudrate);
546 } else {
547 printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
548 ret = 0;
551 break;
553 case SIOCSMEDIABUSY:
554 ret = -EPERM;
555 if (capable(CAP_NET_ADMIN)) {
556 irda_device_set_media_busy(dev, TRUE);
557 ret = 0;
559 break;
561 case SIOCGRECEIVING:
562 ret = 0;
563 rq->ifr_receiving = IS_FIR(si) ? 0
564 : si->rx_buff.state != OUTSIDE_FRAME;
565 break;
567 default:
568 ret = -EOPNOTSUPP;
569 break;
572 return ret;
575 static struct net_device_stats *pxa_irda_stats(struct net_device *dev)
577 struct pxa_irda *si = netdev_priv(dev);
578 return &si->stats;
581 static void pxa_irda_startup(struct pxa_irda *si)
583 /* Disable STUART interrupts */
584 STIER = 0;
585 /* enable STUART interrupt to the processor */
586 STMCR = MCR_OUT2;
587 /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
588 STLCR = LCR_WLS0 | LCR_WLS1;
589 /* enable FIFO, we use FIFO to improve performance */
590 STFCR = FCR_TRFIFOE | FCR_ITL_32;
592 /* disable FICP */
593 ICCR0 = 0;
594 /* configure FICP ICCR2 */
595 ICCR2 = ICCR2_TXP | ICCR2_TRIG_32;
597 /* configure DMAC */
598 DRCMR17 = si->rxdma | DRCMR_MAPVLD;
599 DRCMR18 = si->txdma | DRCMR_MAPVLD;
601 /* force SIR reinitialization */
602 si->speed = 4000000;
603 pxa_irda_set_speed(si, 9600);
605 printk(KERN_DEBUG "pxa_ir: irda startup\n");
608 static void pxa_irda_shutdown(struct pxa_irda *si)
610 unsigned long flags;
612 local_irq_save(flags);
614 /* disable STUART and interrupt */
615 STIER = 0;
616 /* disable STUART SIR mode */
617 STISR = 0;
619 /* disable DMA */
620 DCSR(si->txdma) &= ~DCSR_RUN;
621 DCSR(si->rxdma) &= ~DCSR_RUN;
622 /* disable FICP */
623 ICCR0 = 0;
625 /* disable the STUART or FICP clocks */
626 pxa_irda_disable_clk(si);
628 DRCMR17 = 0;
629 DRCMR18 = 0;
631 local_irq_restore(flags);
633 /* power off board transceiver */
634 si->pdata->transceiver_mode(si->dev, IR_OFF);
636 printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
639 static int pxa_irda_start(struct net_device *dev)
641 struct pxa_irda *si = netdev_priv(dev);
642 int err;
644 si->speed = 9600;
646 err = request_irq(IRQ_STUART, pxa_irda_sir_irq, 0, dev->name, dev);
647 if (err)
648 goto err_irq1;
650 err = request_irq(IRQ_ICP, pxa_irda_fir_irq, 0, dev->name, dev);
651 if (err)
652 goto err_irq2;
655 * The interrupt must remain disabled for now.
657 disable_irq(IRQ_STUART);
658 disable_irq(IRQ_ICP);
660 err = -EBUSY;
661 si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev);
662 if (si->rxdma < 0)
663 goto err_rx_dma;
665 si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev);
666 if (si->txdma < 0)
667 goto err_tx_dma;
669 err = -ENOMEM;
670 si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
671 &si->dma_rx_buff_phy, GFP_KERNEL );
672 if (!si->dma_rx_buff)
673 goto err_dma_rx_buff;
675 si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
676 &si->dma_tx_buff_phy, GFP_KERNEL );
677 if (!si->dma_tx_buff)
678 goto err_dma_tx_buff;
680 /* Setup the serial port for the initial speed. */
681 pxa_irda_startup(si);
684 * Open a new IrLAP layer instance.
686 si->irlap = irlap_open(dev, &si->qos, "pxa");
687 err = -ENOMEM;
688 if (!si->irlap)
689 goto err_irlap;
692 * Now enable the interrupt and start the queue
694 enable_irq(IRQ_STUART);
695 enable_irq(IRQ_ICP);
696 netif_start_queue(dev);
698 printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
700 return 0;
702 err_irlap:
703 pxa_irda_shutdown(si);
704 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
705 err_dma_tx_buff:
706 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
707 err_dma_rx_buff:
708 pxa_free_dma(si->txdma);
709 err_tx_dma:
710 pxa_free_dma(si->rxdma);
711 err_rx_dma:
712 free_irq(IRQ_ICP, dev);
713 err_irq2:
714 free_irq(IRQ_STUART, dev);
715 err_irq1:
717 return err;
720 static int pxa_irda_stop(struct net_device *dev)
722 struct pxa_irda *si = netdev_priv(dev);
724 netif_stop_queue(dev);
726 pxa_irda_shutdown(si);
728 /* Stop IrLAP */
729 if (si->irlap) {
730 irlap_close(si->irlap);
731 si->irlap = NULL;
734 free_irq(IRQ_STUART, dev);
735 free_irq(IRQ_ICP, dev);
737 pxa_free_dma(si->rxdma);
738 pxa_free_dma(si->txdma);
740 if (si->dma_rx_buff)
741 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
742 if (si->dma_tx_buff)
743 dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
745 printk(KERN_DEBUG "pxa_ir: irda driver closed\n");
746 return 0;
749 static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state)
751 struct net_device *dev = platform_get_drvdata(_dev);
752 struct pxa_irda *si;
754 if (dev && netif_running(dev)) {
755 si = netdev_priv(dev);
756 netif_device_detach(dev);
757 pxa_irda_shutdown(si);
760 return 0;
763 static int pxa_irda_resume(struct platform_device *_dev)
765 struct net_device *dev = platform_get_drvdata(_dev);
766 struct pxa_irda *si;
768 if (dev && netif_running(dev)) {
769 si = netdev_priv(dev);
770 pxa_irda_startup(si);
771 netif_device_attach(dev);
772 netif_wake_queue(dev);
775 return 0;
779 static int pxa_irda_init_iobuf(iobuff_t *io, int size)
781 io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
782 if (io->head != NULL) {
783 io->truesize = size;
784 io->in_frame = FALSE;
785 io->state = OUTSIDE_FRAME;
786 io->data = io->head;
788 return io->head ? 0 : -ENOMEM;
791 static int pxa_irda_probe(struct platform_device *pdev)
793 struct net_device *dev;
794 struct pxa_irda *si;
795 unsigned int baudrate_mask;
796 int err;
798 if (!pdev->dev.platform_data)
799 return -ENODEV;
801 err = request_mem_region(__PREG(STUART), 0x24, "IrDA") ? 0 : -EBUSY;
802 if (err)
803 goto err_mem_1;
805 err = request_mem_region(__PREG(FICP), 0x1c, "IrDA") ? 0 : -EBUSY;
806 if (err)
807 goto err_mem_2;
809 dev = alloc_irdadev(sizeof(struct pxa_irda));
810 if (!dev)
811 goto err_mem_3;
813 si = netdev_priv(dev);
814 si->dev = &pdev->dev;
815 si->pdata = pdev->dev.platform_data;
817 si->sir_clk = clk_get(&pdev->dev, "UARTCLK");
818 si->fir_clk = clk_get(&pdev->dev, "FICPCLK");
819 if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) {
820 err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk);
821 goto err_mem_4;
825 * Initialise the SIR buffers
827 err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
828 if (err)
829 goto err_mem_4;
830 err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
831 if (err)
832 goto err_mem_5;
834 dev->hard_start_xmit = pxa_irda_hard_xmit;
835 dev->open = pxa_irda_start;
836 dev->stop = pxa_irda_stop;
837 dev->do_ioctl = pxa_irda_ioctl;
838 dev->get_stats = pxa_irda_stats;
840 irda_init_max_qos_capabilies(&si->qos);
842 baudrate_mask = 0;
843 if (si->pdata->transceiver_cap & IR_SIRMODE)
844 baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
845 if (si->pdata->transceiver_cap & IR_FIRMODE)
846 baudrate_mask |= IR_4000000 << 8;
848 si->qos.baud_rate.bits &= baudrate_mask;
849 si->qos.min_turn_time.bits = 7; /* 1ms or more */
851 irda_qos_bits_to_value(&si->qos);
853 err = register_netdev(dev);
855 if (err == 0)
856 dev_set_drvdata(&pdev->dev, dev);
858 if (err) {
859 kfree(si->tx_buff.head);
860 err_mem_5:
861 kfree(si->rx_buff.head);
862 err_mem_4:
863 if (si->sir_clk && !IS_ERR(si->sir_clk))
864 clk_put(si->sir_clk);
865 if (si->fir_clk && !IS_ERR(si->fir_clk))
866 clk_put(si->fir_clk);
867 free_netdev(dev);
868 err_mem_3:
869 release_mem_region(__PREG(FICP), 0x1c);
870 err_mem_2:
871 release_mem_region(__PREG(STUART), 0x24);
873 err_mem_1:
874 return err;
877 static int pxa_irda_remove(struct platform_device *_dev)
879 struct net_device *dev = platform_get_drvdata(_dev);
881 if (dev) {
882 struct pxa_irda *si = netdev_priv(dev);
883 unregister_netdev(dev);
884 kfree(si->tx_buff.head);
885 kfree(si->rx_buff.head);
886 clk_put(si->fir_clk);
887 clk_put(si->sir_clk);
888 free_netdev(dev);
891 release_mem_region(__PREG(STUART), 0x24);
892 release_mem_region(__PREG(FICP), 0x1c);
894 return 0;
897 static struct platform_driver pxa_ir_driver = {
898 .driver = {
899 .name = "pxa2xx-ir",
901 .probe = pxa_irda_probe,
902 .remove = pxa_irda_remove,
903 .suspend = pxa_irda_suspend,
904 .resume = pxa_irda_resume,
907 static int __init pxa_irda_init(void)
909 return platform_driver_register(&pxa_ir_driver);
912 static void __exit pxa_irda_exit(void)
914 platform_driver_unregister(&pxa_ir_driver);
917 module_init(pxa_irda_init);
918 module_exit(pxa_irda_exit);
920 MODULE_LICENSE("GPL");