2 * Blackfin Infra-red Driver
4 * Copyright 2006-2009 Analog Devices Inc.
6 * Enter bugs at http://blackfin.uclinux.org/
8 * Licensed under the GPL-2 or later.
13 #ifdef CONFIG_SIR_BFIN_DMA
14 #define DMA_SIR_RX_XCNT 10
15 #define DMA_SIR_RX_YCNT (PAGE_SIZE / DMA_SIR_RX_XCNT)
16 #define DMA_SIR_RX_FLUSH_JIFS (HZ * 4 / 250)
20 static int max_rate
= 57600;
22 static int max_rate
= 115200;
25 static void turnaround_delay(unsigned long last_jif
, int mtt
)
29 mtt
= mtt
< 10000 ? 10000 : mtt
;
30 ticks
= 1 + mtt
/ (USEC_PER_SEC
/ HZ
);
31 schedule_timeout_uninterruptible(ticks
);
34 static void __devinit
bfin_sir_init_ports(struct bfin_sir_port
*sp
, struct platform_device
*pdev
)
39 for (i
= 0; i
< pdev
->num_resources
; i
++) {
40 res
= &pdev
->resource
[i
];
43 sp
->membase
= (void __iomem
*)res
->start
;
49 sp
->rx_dma_channel
= res
->start
;
50 sp
->tx_dma_channel
= res
->end
;
58 #ifdef CONFIG_SIR_BFIN_DMA
60 init_timer(&(sp
->rx_dma_timer
));
64 static void bfin_sir_stop_tx(struct bfin_sir_port
*port
)
66 #ifdef CONFIG_SIR_BFIN_DMA
67 disable_dma(port
->tx_dma_channel
);
70 while (!(UART_GET_LSR(port
) & THRE
)) {
75 UART_CLEAR_IER(port
, ETBEI
);
78 static void bfin_sir_enable_tx(struct bfin_sir_port
*port
)
80 UART_SET_IER(port
, ETBEI
);
83 static void bfin_sir_stop_rx(struct bfin_sir_port
*port
)
85 UART_CLEAR_IER(port
, ERBFI
);
88 static void bfin_sir_enable_rx(struct bfin_sir_port
*port
)
90 UART_SET_IER(port
, ERBFI
);
93 static int bfin_sir_set_speed(struct bfin_sir_port
*port
, int speed
)
97 unsigned short val
, lsr
, lcr
;
111 * IRDA is not affected by anomaly 05000230, so there is no
112 * need to tweak the divisor like he UART driver (which will
113 * slightly speed up the baud rate on us).
115 quot
= (port
->clk
+ (8 * speed
)) / (16 * speed
);
119 lsr
= UART_GET_LSR(port
);
120 } while (!(lsr
& TEMT
) && count
--);
122 /* The useconds for 1 bits to transmit */
123 utime
= 1000000 / speed
+ 1;
125 /* Clear UCEN bit to reset the UART state machine
126 * and control registers
128 val
= UART_GET_GCTL(port
);
130 UART_PUT_GCTL(port
, val
);
132 /* Set DLAB in LCR to Access THR RBR IER */
136 UART_PUT_DLL(port
, quot
& 0xFF);
137 UART_PUT_DLH(port
, (quot
>> 8) & 0xFF);
140 /* Clear DLAB in LCR */
141 UART_CLEAR_DLAB(port
);
144 UART_PUT_LCR(port
, lcr
);
146 val
= UART_GET_GCTL(port
);
148 UART_PUT_GCTL(port
, val
);
153 printk(KERN_WARNING
"bfin_sir: Invalid speed %d\n", speed
);
157 val
= UART_GET_GCTL(port
);
158 /* If not add the 'RPOLC', we can't catch the receive interrupt.
159 * It's related with the HW layout and the IR transiver.
162 UART_PUT_GCTL(port
, val
);
166 static int bfin_sir_is_receiving(struct net_device
*dev
)
168 struct bfin_sir_self
*self
= netdev_priv(dev
);
169 struct bfin_sir_port
*port
= self
->sir_port
;
171 if (!(UART_GET_IER(port
) & ERBFI
))
173 return self
->rx_buff
.state
!= OUTSIDE_FRAME
;
176 #ifdef CONFIG_SIR_BFIN_PIO
177 static void bfin_sir_tx_chars(struct net_device
*dev
)
180 struct bfin_sir_self
*self
= netdev_priv(dev
);
181 struct bfin_sir_port
*port
= self
->sir_port
;
183 if (self
->tx_buff
.len
!= 0) {
184 chr
= *(self
->tx_buff
.data
);
185 UART_PUT_CHAR(port
, chr
);
186 self
->tx_buff
.data
++;
189 self
->stats
.tx_packets
++;
190 self
->stats
.tx_bytes
+= self
->tx_buff
.data
- self
->tx_buff
.head
;
191 if (self
->newspeed
) {
192 bfin_sir_set_speed(port
, self
->newspeed
);
193 self
->speed
= self
->newspeed
;
196 bfin_sir_stop_tx(port
);
197 bfin_sir_enable_rx(port
);
199 netif_wake_queue(dev
);
203 static void bfin_sir_rx_chars(struct net_device
*dev
)
205 struct bfin_sir_self
*self
= netdev_priv(dev
);
206 struct bfin_sir_port
*port
= self
->sir_port
;
209 UART_CLEAR_LSR(port
);
210 ch
= UART_GET_CHAR(port
);
211 async_unwrap_char(dev
, &self
->stats
, &self
->rx_buff
, ch
);
212 dev
->last_rx
= jiffies
;
215 static irqreturn_t
bfin_sir_rx_int(int irq
, void *dev_id
)
217 struct net_device
*dev
= dev_id
;
218 struct bfin_sir_self
*self
= netdev_priv(dev
);
219 struct bfin_sir_port
*port
= self
->sir_port
;
221 spin_lock(&self
->lock
);
222 while ((UART_GET_LSR(port
) & DR
))
223 bfin_sir_rx_chars(dev
);
224 spin_unlock(&self
->lock
);
229 static irqreturn_t
bfin_sir_tx_int(int irq
, void *dev_id
)
231 struct net_device
*dev
= dev_id
;
232 struct bfin_sir_self
*self
= netdev_priv(dev
);
233 struct bfin_sir_port
*port
= self
->sir_port
;
235 spin_lock(&self
->lock
);
236 if (UART_GET_LSR(port
) & THRE
)
237 bfin_sir_tx_chars(dev
);
238 spin_unlock(&self
->lock
);
242 #endif /* CONFIG_SIR_BFIN_PIO */
244 #ifdef CONFIG_SIR_BFIN_DMA
245 static void bfin_sir_dma_tx_chars(struct net_device
*dev
)
247 struct bfin_sir_self
*self
= netdev_priv(dev
);
248 struct bfin_sir_port
*port
= self
->sir_port
;
254 if (self
->tx_buff
.len
== 0) {
255 self
->stats
.tx_packets
++;
256 if (self
->newspeed
) {
257 bfin_sir_set_speed(port
, self
->newspeed
);
258 self
->speed
= self
->newspeed
;
261 bfin_sir_enable_rx(port
);
263 netif_wake_queue(dev
);
267 blackfin_dcache_flush_range((unsigned long)(self
->tx_buff
.data
),
268 (unsigned long)(self
->tx_buff
.data
+self
->tx_buff
.len
));
269 set_dma_config(port
->tx_dma_channel
,
270 set_bfin_dma_config(DIR_READ
, DMA_FLOW_STOP
,
271 INTR_ON_BUF
, DIMENSION_LINEAR
, DATA_SIZE_8
,
273 set_dma_start_addr(port
->tx_dma_channel
,
274 (unsigned long)(self
->tx_buff
.data
));
275 set_dma_x_count(port
->tx_dma_channel
, self
->tx_buff
.len
);
276 set_dma_x_modify(port
->tx_dma_channel
, 1);
277 enable_dma(port
->tx_dma_channel
);
280 static irqreturn_t
bfin_sir_dma_tx_int(int irq
, void *dev_id
)
282 struct net_device
*dev
= dev_id
;
283 struct bfin_sir_self
*self
= netdev_priv(dev
);
284 struct bfin_sir_port
*port
= self
->sir_port
;
286 spin_lock(&self
->lock
);
287 if (!(get_dma_curr_irqstat(port
->tx_dma_channel
) & DMA_RUN
)) {
288 clear_dma_irqstat(port
->tx_dma_channel
);
289 bfin_sir_stop_tx(port
);
291 self
->stats
.tx_packets
++;
292 self
->stats
.tx_bytes
+= self
->tx_buff
.len
;
293 self
->tx_buff
.len
= 0;
294 if (self
->newspeed
) {
295 bfin_sir_set_speed(port
, self
->newspeed
);
296 self
->speed
= self
->newspeed
;
299 bfin_sir_enable_rx(port
);
301 netif_wake_queue(dev
);
304 spin_unlock(&self
->lock
);
309 static void bfin_sir_dma_rx_chars(struct net_device
*dev
)
311 struct bfin_sir_self
*self
= netdev_priv(dev
);
312 struct bfin_sir_port
*port
= self
->sir_port
;
315 UART_CLEAR_LSR(port
);
317 for (i
= port
->rx_dma_buf
.head
; i
< port
->rx_dma_buf
.tail
; i
++)
318 async_unwrap_char(dev
, &self
->stats
, &self
->rx_buff
, port
->rx_dma_buf
.buf
[i
]);
321 void bfin_sir_rx_dma_timeout(struct net_device
*dev
)
323 struct bfin_sir_self
*self
= netdev_priv(dev
);
324 struct bfin_sir_port
*port
= self
->sir_port
;
328 spin_lock_irqsave(&self
->lock
, flags
);
329 x_pos
= DMA_SIR_RX_XCNT
- get_dma_curr_xcount(port
->rx_dma_channel
);
330 if (x_pos
== DMA_SIR_RX_XCNT
)
333 pos
= port
->rx_dma_nrows
* DMA_SIR_RX_XCNT
+ x_pos
;
335 if (pos
> port
->rx_dma_buf
.tail
) {
336 port
->rx_dma_buf
.tail
= pos
;
337 bfin_sir_dma_rx_chars(dev
);
338 port
->rx_dma_buf
.head
= port
->rx_dma_buf
.tail
;
340 spin_unlock_irqrestore(&self
->lock
, flags
);
343 static irqreturn_t
bfin_sir_dma_rx_int(int irq
, void *dev_id
)
345 struct net_device
*dev
= dev_id
;
346 struct bfin_sir_self
*self
= netdev_priv(dev
);
347 struct bfin_sir_port
*port
= self
->sir_port
;
348 unsigned short irqstat
;
350 spin_lock(&self
->lock
);
352 port
->rx_dma_nrows
++;
353 port
->rx_dma_buf
.tail
= DMA_SIR_RX_XCNT
* port
->rx_dma_nrows
;
354 bfin_sir_dma_rx_chars(dev
);
355 if (port
->rx_dma_nrows
>= DMA_SIR_RX_YCNT
) {
356 port
->rx_dma_nrows
= 0;
357 port
->rx_dma_buf
.tail
= 0;
359 port
->rx_dma_buf
.head
= port
->rx_dma_buf
.tail
;
361 irqstat
= get_dma_curr_irqstat(port
->rx_dma_channel
);
362 clear_dma_irqstat(port
->rx_dma_channel
);
363 spin_unlock(&self
->lock
);
365 mod_timer(&port
->rx_dma_timer
, jiffies
+ DMA_SIR_RX_FLUSH_JIFS
);
368 #endif /* CONFIG_SIR_BFIN_DMA */
370 static int bfin_sir_startup(struct bfin_sir_port
*port
, struct net_device
*dev
)
372 #ifdef CONFIG_SIR_BFIN_DMA
373 dma_addr_t dma_handle
;
374 #endif /* CONFIG_SIR_BFIN_DMA */
376 if (request_dma(port
->rx_dma_channel
, "BFIN_UART_RX") < 0) {
377 dev_warn(&dev
->dev
, "Unable to attach SIR RX DMA channel\n");
381 if (request_dma(port
->tx_dma_channel
, "BFIN_UART_TX") < 0) {
382 dev_warn(&dev
->dev
, "Unable to attach SIR TX DMA channel\n");
383 free_dma(port
->rx_dma_channel
);
387 #ifdef CONFIG_SIR_BFIN_DMA
389 set_dma_callback(port
->rx_dma_channel
, bfin_sir_dma_rx_int
, dev
);
390 set_dma_callback(port
->tx_dma_channel
, bfin_sir_dma_tx_int
, dev
);
392 port
->rx_dma_buf
.buf
= (unsigned char *)dma_alloc_coherent(NULL
, PAGE_SIZE
, &dma_handle
, GFP_DMA
);
393 port
->rx_dma_buf
.head
= 0;
394 port
->rx_dma_buf
.tail
= 0;
395 port
->rx_dma_nrows
= 0;
397 set_dma_config(port
->rx_dma_channel
,
398 set_bfin_dma_config(DIR_WRITE
, DMA_FLOW_AUTO
,
399 INTR_ON_ROW
, DIMENSION_2D
,
400 DATA_SIZE_8
, DMA_SYNC_RESTART
));
401 set_dma_x_count(port
->rx_dma_channel
, DMA_SIR_RX_XCNT
);
402 set_dma_x_modify(port
->rx_dma_channel
, 1);
403 set_dma_y_count(port
->rx_dma_channel
, DMA_SIR_RX_YCNT
);
404 set_dma_y_modify(port
->rx_dma_channel
, 1);
405 set_dma_start_addr(port
->rx_dma_channel
, (unsigned long)port
->rx_dma_buf
.buf
);
406 enable_dma(port
->rx_dma_channel
);
408 port
->rx_dma_timer
.data
= (unsigned long)(dev
);
409 port
->rx_dma_timer
.function
= (void *)bfin_sir_rx_dma_timeout
;
413 if (request_irq(port
->irq
, bfin_sir_rx_int
, IRQF_DISABLED
, "BFIN_SIR_RX", dev
)) {
414 dev_warn(&dev
->dev
, "Unable to attach SIR RX interrupt\n");
418 if (request_irq(port
->irq
+1, bfin_sir_tx_int
, IRQF_DISABLED
, "BFIN_SIR_TX", dev
)) {
419 dev_warn(&dev
->dev
, "Unable to attach SIR TX interrupt\n");
420 free_irq(port
->irq
, dev
);
428 static void bfin_sir_shutdown(struct bfin_sir_port
*port
, struct net_device
*dev
)
432 bfin_sir_stop_rx(port
);
434 val
= UART_GET_GCTL(port
);
435 val
&= ~(UCEN
| IREN
| RPOLC
);
436 UART_PUT_GCTL(port
, val
);
438 #ifdef CONFIG_SIR_BFIN_DMA
439 disable_dma(port
->tx_dma_channel
);
440 disable_dma(port
->rx_dma_channel
);
441 del_timer(&(port
->rx_dma_timer
));
442 dma_free_coherent(NULL
, PAGE_SIZE
, port
->rx_dma_buf
.buf
, 0);
444 free_irq(port
->irq
+1, dev
);
445 free_irq(port
->irq
, dev
);
447 free_dma(port
->tx_dma_channel
);
448 free_dma(port
->rx_dma_channel
);
452 static int bfin_sir_suspend(struct platform_device
*pdev
, pm_message_t state
)
454 struct bfin_sir_port
*sir_port
;
455 struct net_device
*dev
;
456 struct bfin_sir_self
*self
;
458 sir_port
= platform_get_drvdata(pdev
);
463 self
= netdev_priv(dev
);
465 flush_work(&self
->work
);
466 bfin_sir_shutdown(self
->sir_port
, dev
);
467 netif_device_detach(dev
);
472 static int bfin_sir_resume(struct platform_device
*pdev
)
474 struct bfin_sir_port
*sir_port
;
475 struct net_device
*dev
;
476 struct bfin_sir_self
*self
;
477 struct bfin_sir_port
*port
;
479 sir_port
= platform_get_drvdata(pdev
);
484 self
= netdev_priv(dev
);
485 port
= self
->sir_port
;
487 if (self
->newspeed
) {
488 self
->speed
= self
->newspeed
;
491 bfin_sir_startup(port
, dev
);
492 bfin_sir_set_speed(port
, 9600);
493 bfin_sir_enable_rx(port
);
494 netif_device_attach(dev
);
499 #define bfin_sir_suspend NULL
500 #define bfin_sir_resume NULL
503 static void bfin_sir_send_work(struct work_struct
*work
)
505 struct bfin_sir_self
*self
= container_of(work
, struct bfin_sir_self
, work
);
506 struct net_device
*dev
= self
->sir_port
->dev
;
507 struct bfin_sir_port
*port
= self
->sir_port
;
511 while (bfin_sir_is_receiving(dev
) && --tx_cnt
)
512 turnaround_delay(dev
->last_rx
, self
->mtt
);
514 bfin_sir_stop_rx(port
);
516 /* To avoid losting RX interrupt, we reset IR function before
517 * sending data. We also can set the speed, which will
518 * reset all the UART.
520 val
= UART_GET_GCTL(port
);
521 val
&= ~(IREN
| RPOLC
);
522 UART_PUT_GCTL(port
, val
);
525 UART_PUT_GCTL(port
, val
);
527 /* bfin_sir_set_speed(port, self->speed); */
529 #ifdef CONFIG_SIR_BFIN_DMA
530 bfin_sir_dma_tx_chars(dev
);
532 bfin_sir_enable_tx(port
);
533 dev
->trans_start
= jiffies
;
536 static int bfin_sir_hard_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
538 struct bfin_sir_self
*self
= netdev_priv(dev
);
539 int speed
= irda_get_next_speed(skb
);
541 netif_stop_queue(dev
);
543 self
->mtt
= irda_get_mtt(skb
);
545 if (speed
!= self
->speed
&& speed
!= -1)
546 self
->newspeed
= speed
;
548 self
->tx_buff
.data
= self
->tx_buff
.head
;
550 self
->tx_buff
.len
= 0;
552 self
->tx_buff
.len
= async_wrap_skb(skb
, self
->tx_buff
.data
, self
->tx_buff
.truesize
);
554 schedule_work(&self
->work
);
560 static int bfin_sir_ioctl(struct net_device
*dev
, struct ifreq
*ifreq
, int cmd
)
562 struct if_irda_req
*rq
= (struct if_irda_req
*)ifreq
;
563 struct bfin_sir_self
*self
= netdev_priv(dev
);
564 struct bfin_sir_port
*port
= self
->sir_port
;
569 if (capable(CAP_NET_ADMIN
)) {
571 ret
= bfin_sir_set_speed(port
, rq
->ifr_baudrate
);
572 bfin_sir_enable_rx(port
);
574 dev_warn(&dev
->dev
, "SIOCSBANDWIDTH: !netif_running\n");
582 if (capable(CAP_NET_ADMIN
)) {
583 irda_device_set_media_busy(dev
, TRUE
);
589 rq
->ifr_receiving
= bfin_sir_is_receiving(dev
);
600 static struct net_device_stats
*bfin_sir_stats(struct net_device
*dev
)
602 struct bfin_sir_self
*self
= netdev_priv(dev
);
607 static int bfin_sir_open(struct net_device
*dev
)
609 struct bfin_sir_self
*self
= netdev_priv(dev
);
610 struct bfin_sir_port
*port
= self
->sir_port
;
616 spin_lock_init(&self
->lock
);
618 err
= bfin_sir_startup(port
, dev
);
622 bfin_sir_set_speed(port
, 9600);
624 self
->irlap
= irlap_open(dev
, &self
->qos
, DRIVER_NAME
);
628 INIT_WORK(&self
->work
, bfin_sir_send_work
);
631 * Now enable the interrupt then start the queue
634 bfin_sir_enable_rx(port
);
636 netif_start_queue(dev
);
642 bfin_sir_shutdown(port
, dev
);
647 static int bfin_sir_stop(struct net_device
*dev
)
649 struct bfin_sir_self
*self
= netdev_priv(dev
);
651 flush_work(&self
->work
);
652 bfin_sir_shutdown(self
->sir_port
, dev
);
655 dev_kfree_skb(self
->rxskb
);
661 irlap_close(self
->irlap
);
665 netif_stop_queue(dev
);
671 static int bfin_sir_init_iobuf(iobuff_t
*io
, int size
)
673 io
->head
= kmalloc(size
, GFP_KERNEL
);
677 io
->in_frame
= FALSE
;
678 io
->state
= OUTSIDE_FRAME
;
683 static const struct net_device_ops bfin_sir_ndo
= {
684 .ndo_open
= bfin_sir_open
,
685 .ndo_stop
= bfin_sir_stop
,
686 .ndo_start_xmit
= bfin_sir_hard_xmit
,
687 .ndo_do_ioctl
= bfin_sir_ioctl
,
688 .ndo_get_stats
= bfin_sir_stats
,
691 static int __devinit
bfin_sir_probe(struct platform_device
*pdev
)
693 struct net_device
*dev
;
694 struct bfin_sir_self
*self
;
695 unsigned int baudrate_mask
;
696 struct bfin_sir_port
*sir_port
;
699 if (pdev
->id
>= 0 && pdev
->id
< ARRAY_SIZE(per
) && \
700 per
[pdev
->id
][3] == pdev
->id
) {
701 err
= peripheral_request_list(per
[pdev
->id
], DRIVER_NAME
);
705 dev_err(&pdev
->dev
, "Invalid pdev id, please check board file\n");
710 sir_port
= kmalloc(sizeof(*sir_port
), GFP_KERNEL
);
714 bfin_sir_init_ports(sir_port
, pdev
);
716 dev
= alloc_irdadev(sizeof(*self
));
720 self
= netdev_priv(dev
);
721 self
->dev
= &pdev
->dev
;
722 self
->sir_port
= sir_port
;
725 err
= bfin_sir_init_iobuf(&self
->rx_buff
, IRDA_SKB_MAX_MTU
);
728 err
= bfin_sir_init_iobuf(&self
->tx_buff
, IRDA_SIR_MAX_FRAME
);
732 dev
->netdev_ops
= &bfin_sir_ndo
;
733 dev
->irq
= sir_port
->irq
;
735 irda_init_max_qos_capabilies(&self
->qos
);
737 baudrate_mask
= IR_9600
;
741 baudrate_mask
|= IR_115200
;
743 baudrate_mask
|= IR_57600
;
745 baudrate_mask
|= IR_38400
;
747 baudrate_mask
|= IR_19200
;
751 dev_warn(&pdev
->dev
, "Invalid maximum baud rate, using 9600\n");
754 self
->qos
.baud_rate
.bits
&= baudrate_mask
;
756 self
->qos
.min_turn_time
.bits
= 1; /* 10 ms or more */
758 irda_qos_bits_to_value(&self
->qos
);
760 err
= register_netdev(dev
);
763 kfree(self
->tx_buff
.head
);
765 kfree(self
->rx_buff
.head
);
771 peripheral_free_list(per
[pdev
->id
]);
773 platform_set_drvdata(pdev
, sir_port
);
778 static int __devexit
bfin_sir_remove(struct platform_device
*pdev
)
780 struct bfin_sir_port
*sir_port
;
781 struct net_device
*dev
= NULL
;
782 struct bfin_sir_self
*self
;
784 sir_port
= platform_get_drvdata(pdev
);
788 self
= netdev_priv(dev
);
789 unregister_netdev(dev
);
790 kfree(self
->tx_buff
.head
);
791 kfree(self
->rx_buff
.head
);
794 platform_set_drvdata(pdev
, NULL
);
799 static struct platform_driver bfin_ir_driver
= {
800 .probe
= bfin_sir_probe
,
801 .remove
= __devexit_p(bfin_sir_remove
),
802 .suspend
= bfin_sir_suspend
,
803 .resume
= bfin_sir_resume
,
809 static int __init
bfin_sir_init(void)
811 return platform_driver_register(&bfin_ir_driver
);
814 static void __exit
bfin_sir_exit(void)
816 platform_driver_unregister(&bfin_ir_driver
);
819 module_init(bfin_sir_init
);
820 module_exit(bfin_sir_exit
);
822 module_param(max_rate
, int, 0);
823 MODULE_PARM_DESC(max_rate
, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
825 MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>");
826 MODULE_DESCRIPTION("Blackfin IrDA driver");
827 MODULE_LICENSE("GPL");