2 * Blackfin Infra-red Driver
4 * Copyright 2006-2009 Analog Devices Inc.
6 * Enter bugs at http://blackfin.uclinux.org/
8 * Licensed under the GPL-2 or later.
13 #ifdef CONFIG_SIR_BFIN_DMA
14 #define DMA_SIR_RX_XCNT 10
15 #define DMA_SIR_RX_YCNT (PAGE_SIZE / DMA_SIR_RX_XCNT)
16 #define DMA_SIR_RX_FLUSH_JIFS (HZ * 4 / 250)
20 static int max_rate
= 57600;
22 static int max_rate
= 115200;
25 static void turnaround_delay(unsigned long last_jif
, int mtt
)
29 mtt
= mtt
< 10000 ? 10000 : mtt
;
30 ticks
= 1 + mtt
/ (USEC_PER_SEC
/ HZ
);
31 schedule_timeout_uninterruptible(ticks
);
34 static void bfin_sir_init_ports(struct bfin_sir_port
*sp
, struct platform_device
*pdev
)
39 for (i
= 0; i
< pdev
->num_resources
; i
++) {
40 res
= &pdev
->resource
[i
];
43 sp
->membase
= (void __iomem
*)res
->start
;
49 sp
->rx_dma_channel
= res
->start
;
50 sp
->tx_dma_channel
= res
->end
;
58 #ifdef CONFIG_SIR_BFIN_DMA
60 init_timer(&(sp
->rx_dma_timer
));
64 static void bfin_sir_stop_tx(struct bfin_sir_port
*port
)
66 #ifdef CONFIG_SIR_BFIN_DMA
67 disable_dma(port
->tx_dma_channel
);
70 while (!(UART_GET_LSR(port
) & THRE
)) {
75 UART_CLEAR_IER(port
, ETBEI
);
78 static void bfin_sir_enable_tx(struct bfin_sir_port
*port
)
80 UART_SET_IER(port
, ETBEI
);
83 static void bfin_sir_stop_rx(struct bfin_sir_port
*port
)
85 UART_CLEAR_IER(port
, ERBFI
);
88 static void bfin_sir_enable_rx(struct bfin_sir_port
*port
)
90 UART_SET_IER(port
, ERBFI
);
93 static int bfin_sir_set_speed(struct bfin_sir_port
*port
, int speed
)
97 unsigned short val
, lsr
, lcr
;
111 * IRDA is not affected by anomaly 05000230, so there is no
112 * need to tweak the divisor like he UART driver (which will
113 * slightly speed up the baud rate on us).
115 quot
= (port
->clk
+ (8 * speed
)) / (16 * speed
);
119 lsr
= UART_GET_LSR(port
);
120 } while (!(lsr
& TEMT
) && count
--);
122 /* The useconds for 1 bits to transmit */
123 utime
= 1000000 / speed
+ 1;
125 /* Clear UCEN bit to reset the UART state machine
126 * and control registers
128 val
= UART_GET_GCTL(port
);
130 UART_PUT_GCTL(port
, val
);
132 /* Set DLAB in LCR to Access THR RBR IER */
136 UART_PUT_DLL(port
, quot
& 0xFF);
137 UART_PUT_DLH(port
, (quot
>> 8) & 0xFF);
140 /* Clear DLAB in LCR */
141 UART_CLEAR_DLAB(port
);
144 UART_PUT_LCR(port
, lcr
);
146 val
= UART_GET_GCTL(port
);
148 UART_PUT_GCTL(port
, val
);
153 printk(KERN_WARNING
"bfin_sir: Invalid speed %d\n", speed
);
157 val
= UART_GET_GCTL(port
);
158 /* If not add the 'RPOLC', we can't catch the receive interrupt.
159 * It's related with the HW layout and the IR transiver.
161 val
|= UMOD_IRDA
| RPOLC
;
162 UART_PUT_GCTL(port
, val
);
166 static int bfin_sir_is_receiving(struct net_device
*dev
)
168 struct bfin_sir_self
*self
= netdev_priv(dev
);
169 struct bfin_sir_port
*port
= self
->sir_port
;
171 if (!(UART_GET_IER(port
) & ERBFI
))
173 return self
->rx_buff
.state
!= OUTSIDE_FRAME
;
176 #ifdef CONFIG_SIR_BFIN_PIO
177 static void bfin_sir_tx_chars(struct net_device
*dev
)
180 struct bfin_sir_self
*self
= netdev_priv(dev
);
181 struct bfin_sir_port
*port
= self
->sir_port
;
183 if (self
->tx_buff
.len
!= 0) {
184 chr
= *(self
->tx_buff
.data
);
185 UART_PUT_CHAR(port
, chr
);
186 self
->tx_buff
.data
++;
189 self
->stats
.tx_packets
++;
190 self
->stats
.tx_bytes
+= self
->tx_buff
.data
- self
->tx_buff
.head
;
191 if (self
->newspeed
) {
192 bfin_sir_set_speed(port
, self
->newspeed
);
193 self
->speed
= self
->newspeed
;
196 bfin_sir_stop_tx(port
);
197 bfin_sir_enable_rx(port
);
199 netif_wake_queue(dev
);
203 static void bfin_sir_rx_chars(struct net_device
*dev
)
205 struct bfin_sir_self
*self
= netdev_priv(dev
);
206 struct bfin_sir_port
*port
= self
->sir_port
;
209 UART_CLEAR_LSR(port
);
210 ch
= UART_GET_CHAR(port
);
211 async_unwrap_char(dev
, &self
->stats
, &self
->rx_buff
, ch
);
212 dev
->last_rx
= jiffies
;
215 static irqreturn_t
bfin_sir_rx_int(int irq
, void *dev_id
)
217 struct net_device
*dev
= dev_id
;
218 struct bfin_sir_self
*self
= netdev_priv(dev
);
219 struct bfin_sir_port
*port
= self
->sir_port
;
221 spin_lock(&self
->lock
);
222 while ((UART_GET_LSR(port
) & DR
))
223 bfin_sir_rx_chars(dev
);
224 spin_unlock(&self
->lock
);
229 static irqreturn_t
bfin_sir_tx_int(int irq
, void *dev_id
)
231 struct net_device
*dev
= dev_id
;
232 struct bfin_sir_self
*self
= netdev_priv(dev
);
233 struct bfin_sir_port
*port
= self
->sir_port
;
235 spin_lock(&self
->lock
);
236 if (UART_GET_LSR(port
) & THRE
)
237 bfin_sir_tx_chars(dev
);
238 spin_unlock(&self
->lock
);
242 #endif /* CONFIG_SIR_BFIN_PIO */
244 #ifdef CONFIG_SIR_BFIN_DMA
245 static void bfin_sir_dma_tx_chars(struct net_device
*dev
)
247 struct bfin_sir_self
*self
= netdev_priv(dev
);
248 struct bfin_sir_port
*port
= self
->sir_port
;
254 if (self
->tx_buff
.len
== 0) {
255 self
->stats
.tx_packets
++;
256 if (self
->newspeed
) {
257 bfin_sir_set_speed(port
, self
->newspeed
);
258 self
->speed
= self
->newspeed
;
261 bfin_sir_enable_rx(port
);
263 netif_wake_queue(dev
);
267 blackfin_dcache_flush_range((unsigned long)(self
->tx_buff
.data
),
268 (unsigned long)(self
->tx_buff
.data
+self
->tx_buff
.len
));
269 set_dma_config(port
->tx_dma_channel
,
270 set_bfin_dma_config(DIR_READ
, DMA_FLOW_STOP
,
271 INTR_ON_BUF
, DIMENSION_LINEAR
, DATA_SIZE_8
,
273 set_dma_start_addr(port
->tx_dma_channel
,
274 (unsigned long)(self
->tx_buff
.data
));
275 set_dma_x_count(port
->tx_dma_channel
, self
->tx_buff
.len
);
276 set_dma_x_modify(port
->tx_dma_channel
, 1);
277 enable_dma(port
->tx_dma_channel
);
280 static irqreturn_t
bfin_sir_dma_tx_int(int irq
, void *dev_id
)
282 struct net_device
*dev
= dev_id
;
283 struct bfin_sir_self
*self
= netdev_priv(dev
);
284 struct bfin_sir_port
*port
= self
->sir_port
;
286 spin_lock(&self
->lock
);
287 if (!(get_dma_curr_irqstat(port
->tx_dma_channel
) & DMA_RUN
)) {
288 clear_dma_irqstat(port
->tx_dma_channel
);
289 bfin_sir_stop_tx(port
);
291 self
->stats
.tx_packets
++;
292 self
->stats
.tx_bytes
+= self
->tx_buff
.len
;
293 self
->tx_buff
.len
= 0;
294 if (self
->newspeed
) {
295 bfin_sir_set_speed(port
, self
->newspeed
);
296 self
->speed
= self
->newspeed
;
299 bfin_sir_enable_rx(port
);
301 netif_wake_queue(dev
);
304 spin_unlock(&self
->lock
);
309 static void bfin_sir_dma_rx_chars(struct net_device
*dev
)
311 struct bfin_sir_self
*self
= netdev_priv(dev
);
312 struct bfin_sir_port
*port
= self
->sir_port
;
315 UART_CLEAR_LSR(port
);
317 for (i
= port
->rx_dma_buf
.head
; i
< port
->rx_dma_buf
.tail
; i
++)
318 async_unwrap_char(dev
, &self
->stats
, &self
->rx_buff
, port
->rx_dma_buf
.buf
[i
]);
321 void bfin_sir_rx_dma_timeout(struct net_device
*dev
)
323 struct bfin_sir_self
*self
= netdev_priv(dev
);
324 struct bfin_sir_port
*port
= self
->sir_port
;
328 spin_lock_irqsave(&self
->lock
, flags
);
329 x_pos
= DMA_SIR_RX_XCNT
- get_dma_curr_xcount(port
->rx_dma_channel
);
330 if (x_pos
== DMA_SIR_RX_XCNT
)
333 pos
= port
->rx_dma_nrows
* DMA_SIR_RX_XCNT
+ x_pos
;
335 if (pos
> port
->rx_dma_buf
.tail
) {
336 port
->rx_dma_buf
.tail
= pos
;
337 bfin_sir_dma_rx_chars(dev
);
338 port
->rx_dma_buf
.head
= port
->rx_dma_buf
.tail
;
340 spin_unlock_irqrestore(&self
->lock
, flags
);
343 static irqreturn_t
bfin_sir_dma_rx_int(int irq
, void *dev_id
)
345 struct net_device
*dev
= dev_id
;
346 struct bfin_sir_self
*self
= netdev_priv(dev
);
347 struct bfin_sir_port
*port
= self
->sir_port
;
348 unsigned short irqstat
;
350 spin_lock(&self
->lock
);
352 port
->rx_dma_nrows
++;
353 port
->rx_dma_buf
.tail
= DMA_SIR_RX_XCNT
* port
->rx_dma_nrows
;
354 bfin_sir_dma_rx_chars(dev
);
355 if (port
->rx_dma_nrows
>= DMA_SIR_RX_YCNT
) {
356 port
->rx_dma_nrows
= 0;
357 port
->rx_dma_buf
.tail
= 0;
359 port
->rx_dma_buf
.head
= port
->rx_dma_buf
.tail
;
361 irqstat
= get_dma_curr_irqstat(port
->rx_dma_channel
);
362 clear_dma_irqstat(port
->rx_dma_channel
);
363 spin_unlock(&self
->lock
);
365 mod_timer(&port
->rx_dma_timer
, jiffies
+ DMA_SIR_RX_FLUSH_JIFS
);
368 #endif /* CONFIG_SIR_BFIN_DMA */
370 static int bfin_sir_startup(struct bfin_sir_port
*port
, struct net_device
*dev
)
372 #ifdef CONFIG_SIR_BFIN_DMA
373 dma_addr_t dma_handle
;
374 #endif /* CONFIG_SIR_BFIN_DMA */
376 if (request_dma(port
->rx_dma_channel
, "BFIN_UART_RX") < 0) {
377 dev_warn(&dev
->dev
, "Unable to attach SIR RX DMA channel\n");
381 if (request_dma(port
->tx_dma_channel
, "BFIN_UART_TX") < 0) {
382 dev_warn(&dev
->dev
, "Unable to attach SIR TX DMA channel\n");
383 free_dma(port
->rx_dma_channel
);
387 #ifdef CONFIG_SIR_BFIN_DMA
389 set_dma_callback(port
->rx_dma_channel
, bfin_sir_dma_rx_int
, dev
);
390 set_dma_callback(port
->tx_dma_channel
, bfin_sir_dma_tx_int
, dev
);
392 port
->rx_dma_buf
.buf
= dma_alloc_coherent(NULL
, PAGE_SIZE
,
393 &dma_handle
, GFP_DMA
);
394 port
->rx_dma_buf
.head
= 0;
395 port
->rx_dma_buf
.tail
= 0;
396 port
->rx_dma_nrows
= 0;
398 set_dma_config(port
->rx_dma_channel
,
399 set_bfin_dma_config(DIR_WRITE
, DMA_FLOW_AUTO
,
400 INTR_ON_ROW
, DIMENSION_2D
,
401 DATA_SIZE_8
, DMA_SYNC_RESTART
));
402 set_dma_x_count(port
->rx_dma_channel
, DMA_SIR_RX_XCNT
);
403 set_dma_x_modify(port
->rx_dma_channel
, 1);
404 set_dma_y_count(port
->rx_dma_channel
, DMA_SIR_RX_YCNT
);
405 set_dma_y_modify(port
->rx_dma_channel
, 1);
406 set_dma_start_addr(port
->rx_dma_channel
, (unsigned long)port
->rx_dma_buf
.buf
);
407 enable_dma(port
->rx_dma_channel
);
409 port
->rx_dma_timer
.data
= (unsigned long)(dev
);
410 port
->rx_dma_timer
.function
= (void *)bfin_sir_rx_dma_timeout
;
414 if (request_irq(port
->irq
, bfin_sir_rx_int
, 0, "BFIN_SIR_RX", dev
)) {
415 dev_warn(&dev
->dev
, "Unable to attach SIR RX interrupt\n");
419 if (request_irq(port
->irq
+1, bfin_sir_tx_int
, 0, "BFIN_SIR_TX", dev
)) {
420 dev_warn(&dev
->dev
, "Unable to attach SIR TX interrupt\n");
421 free_irq(port
->irq
, dev
);
429 static void bfin_sir_shutdown(struct bfin_sir_port
*port
, struct net_device
*dev
)
433 bfin_sir_stop_rx(port
);
435 val
= UART_GET_GCTL(port
);
436 val
&= ~(UCEN
| UMOD_MASK
| RPOLC
);
437 UART_PUT_GCTL(port
, val
);
439 #ifdef CONFIG_SIR_BFIN_DMA
440 disable_dma(port
->tx_dma_channel
);
441 disable_dma(port
->rx_dma_channel
);
442 del_timer(&(port
->rx_dma_timer
));
443 dma_free_coherent(NULL
, PAGE_SIZE
, port
->rx_dma_buf
.buf
, 0);
445 free_irq(port
->irq
+1, dev
);
446 free_irq(port
->irq
, dev
);
448 free_dma(port
->tx_dma_channel
);
449 free_dma(port
->rx_dma_channel
);
453 static int bfin_sir_suspend(struct platform_device
*pdev
, pm_message_t state
)
455 struct bfin_sir_port
*sir_port
;
456 struct net_device
*dev
;
457 struct bfin_sir_self
*self
;
459 sir_port
= platform_get_drvdata(pdev
);
464 self
= netdev_priv(dev
);
466 flush_work(&self
->work
);
467 bfin_sir_shutdown(self
->sir_port
, dev
);
468 netif_device_detach(dev
);
473 static int bfin_sir_resume(struct platform_device
*pdev
)
475 struct bfin_sir_port
*sir_port
;
476 struct net_device
*dev
;
477 struct bfin_sir_self
*self
;
478 struct bfin_sir_port
*port
;
480 sir_port
= platform_get_drvdata(pdev
);
485 self
= netdev_priv(dev
);
486 port
= self
->sir_port
;
488 if (self
->newspeed
) {
489 self
->speed
= self
->newspeed
;
492 bfin_sir_startup(port
, dev
);
493 bfin_sir_set_speed(port
, 9600);
494 bfin_sir_enable_rx(port
);
495 netif_device_attach(dev
);
500 #define bfin_sir_suspend NULL
501 #define bfin_sir_resume NULL
504 static void bfin_sir_send_work(struct work_struct
*work
)
506 struct bfin_sir_self
*self
= container_of(work
, struct bfin_sir_self
, work
);
507 struct net_device
*dev
= self
->sir_port
->dev
;
508 struct bfin_sir_port
*port
= self
->sir_port
;
512 while (bfin_sir_is_receiving(dev
) && --tx_cnt
)
513 turnaround_delay(dev
->last_rx
, self
->mtt
);
515 bfin_sir_stop_rx(port
);
517 /* To avoid losting RX interrupt, we reset IR function before
518 * sending data. We also can set the speed, which will
519 * reset all the UART.
521 val
= UART_GET_GCTL(port
);
522 val
&= ~(UMOD_MASK
| RPOLC
);
523 UART_PUT_GCTL(port
, val
);
525 val
|= UMOD_IRDA
| RPOLC
;
526 UART_PUT_GCTL(port
, val
);
528 /* bfin_sir_set_speed(port, self->speed); */
530 #ifdef CONFIG_SIR_BFIN_DMA
531 bfin_sir_dma_tx_chars(dev
);
533 bfin_sir_enable_tx(port
);
534 dev
->trans_start
= jiffies
;
537 static int bfin_sir_hard_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
539 struct bfin_sir_self
*self
= netdev_priv(dev
);
540 int speed
= irda_get_next_speed(skb
);
542 netif_stop_queue(dev
);
544 self
->mtt
= irda_get_mtt(skb
);
546 if (speed
!= self
->speed
&& speed
!= -1)
547 self
->newspeed
= speed
;
549 self
->tx_buff
.data
= self
->tx_buff
.head
;
551 self
->tx_buff
.len
= 0;
553 self
->tx_buff
.len
= async_wrap_skb(skb
, self
->tx_buff
.data
, self
->tx_buff
.truesize
);
555 schedule_work(&self
->work
);
561 static int bfin_sir_ioctl(struct net_device
*dev
, struct ifreq
*ifreq
, int cmd
)
563 struct if_irda_req
*rq
= (struct if_irda_req
*)ifreq
;
564 struct bfin_sir_self
*self
= netdev_priv(dev
);
565 struct bfin_sir_port
*port
= self
->sir_port
;
570 if (capable(CAP_NET_ADMIN
)) {
572 ret
= bfin_sir_set_speed(port
, rq
->ifr_baudrate
);
573 bfin_sir_enable_rx(port
);
575 dev_warn(&dev
->dev
, "SIOCSBANDWIDTH: !netif_running\n");
583 if (capable(CAP_NET_ADMIN
)) {
584 irda_device_set_media_busy(dev
, TRUE
);
590 rq
->ifr_receiving
= bfin_sir_is_receiving(dev
);
601 static struct net_device_stats
*bfin_sir_stats(struct net_device
*dev
)
603 struct bfin_sir_self
*self
= netdev_priv(dev
);
608 static int bfin_sir_open(struct net_device
*dev
)
610 struct bfin_sir_self
*self
= netdev_priv(dev
);
611 struct bfin_sir_port
*port
= self
->sir_port
;
617 spin_lock_init(&self
->lock
);
619 err
= bfin_sir_startup(port
, dev
);
623 bfin_sir_set_speed(port
, 9600);
625 self
->irlap
= irlap_open(dev
, &self
->qos
, DRIVER_NAME
);
631 INIT_WORK(&self
->work
, bfin_sir_send_work
);
634 * Now enable the interrupt then start the queue
637 bfin_sir_enable_rx(port
);
639 netif_start_queue(dev
);
645 bfin_sir_shutdown(port
, dev
);
650 static int bfin_sir_stop(struct net_device
*dev
)
652 struct bfin_sir_self
*self
= netdev_priv(dev
);
654 flush_work(&self
->work
);
655 bfin_sir_shutdown(self
->sir_port
, dev
);
658 dev_kfree_skb(self
->rxskb
);
664 irlap_close(self
->irlap
);
668 netif_stop_queue(dev
);
674 static int bfin_sir_init_iobuf(iobuff_t
*io
, int size
)
676 io
->head
= kmalloc(size
, GFP_KERNEL
);
680 io
->in_frame
= FALSE
;
681 io
->state
= OUTSIDE_FRAME
;
686 static const struct net_device_ops bfin_sir_ndo
= {
687 .ndo_open
= bfin_sir_open
,
688 .ndo_stop
= bfin_sir_stop
,
689 .ndo_start_xmit
= bfin_sir_hard_xmit
,
690 .ndo_do_ioctl
= bfin_sir_ioctl
,
691 .ndo_get_stats
= bfin_sir_stats
,
694 static int bfin_sir_probe(struct platform_device
*pdev
)
696 struct net_device
*dev
;
697 struct bfin_sir_self
*self
;
698 unsigned int baudrate_mask
;
699 struct bfin_sir_port
*sir_port
;
702 if (pdev
->id
>= 0 && pdev
->id
< ARRAY_SIZE(per
) && \
703 per
[pdev
->id
][3] == pdev
->id
) {
704 err
= peripheral_request_list(per
[pdev
->id
], DRIVER_NAME
);
708 dev_err(&pdev
->dev
, "Invalid pdev id, please check board file\n");
713 sir_port
= kmalloc(sizeof(*sir_port
), GFP_KERNEL
);
717 bfin_sir_init_ports(sir_port
, pdev
);
719 dev
= alloc_irdadev(sizeof(*self
));
723 self
= netdev_priv(dev
);
724 self
->dev
= &pdev
->dev
;
725 self
->sir_port
= sir_port
;
728 err
= bfin_sir_init_iobuf(&self
->rx_buff
, IRDA_SKB_MAX_MTU
);
731 err
= bfin_sir_init_iobuf(&self
->tx_buff
, IRDA_SIR_MAX_FRAME
);
735 dev
->netdev_ops
= &bfin_sir_ndo
;
736 dev
->irq
= sir_port
->irq
;
738 irda_init_max_qos_capabilies(&self
->qos
);
740 baudrate_mask
= IR_9600
;
744 baudrate_mask
|= IR_115200
;
746 baudrate_mask
|= IR_57600
;
748 baudrate_mask
|= IR_38400
;
750 baudrate_mask
|= IR_19200
;
754 dev_warn(&pdev
->dev
, "Invalid maximum baud rate, using 9600\n");
757 self
->qos
.baud_rate
.bits
&= baudrate_mask
;
759 self
->qos
.min_turn_time
.bits
= 1; /* 10 ms or more */
761 irda_qos_bits_to_value(&self
->qos
);
763 err
= register_netdev(dev
);
766 kfree(self
->tx_buff
.head
);
768 kfree(self
->rx_buff
.head
);
774 peripheral_free_list(per
[pdev
->id
]);
776 platform_set_drvdata(pdev
, sir_port
);
781 static int bfin_sir_remove(struct platform_device
*pdev
)
783 struct bfin_sir_port
*sir_port
;
784 struct net_device
*dev
= NULL
;
785 struct bfin_sir_self
*self
;
787 sir_port
= platform_get_drvdata(pdev
);
791 self
= netdev_priv(dev
);
792 unregister_netdev(dev
);
793 kfree(self
->tx_buff
.head
);
794 kfree(self
->rx_buff
.head
);
801 static struct platform_driver bfin_ir_driver
= {
802 .probe
= bfin_sir_probe
,
803 .remove
= bfin_sir_remove
,
804 .suspend
= bfin_sir_suspend
,
805 .resume
= bfin_sir_resume
,
811 module_platform_driver(bfin_ir_driver
);
813 module_param(max_rate
, int, 0);
814 MODULE_PARM_DESC(max_rate
, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
816 MODULE_AUTHOR("Graf Yang <graf.yang@analog.com>");
817 MODULE_DESCRIPTION("Blackfin IrDA driver");
818 MODULE_LICENSE("GPL");