4 * Copyright (C) 2009 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
8 * Copyright 2006-2009 Analog Devices Inc.
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <net/irda/wrapper.h>
19 #include <net/irda/irda_device.h>
20 #include <asm/clock.h>
22 #define DRIVER_NAME "sh_sir"
24 #define RX_PHASE (1 << 0)
25 #define TX_PHASE (1 << 1)
26 #define TX_COMP_PHASE (1 << 2) /* tx complete */
27 #define NONE_PHASE (1 << 31)
29 #define IRIF_RINTCLR 0x0016 /* DMA rx interrupt source clear */
30 #define IRIF_TINTCLR 0x0018 /* DMA tx interrupt source clear */
31 #define IRIF_SIR0 0x0020 /* IrDA-SIR10 control */
32 #define IRIF_SIR1 0x0022 /* IrDA-SIR10 baudrate error correction */
33 #define IRIF_SIR2 0x0024 /* IrDA-SIR10 baudrate count */
34 #define IRIF_SIR3 0x0026 /* IrDA-SIR10 status */
35 #define IRIF_SIR_FRM 0x0028 /* Hardware frame processing set */
36 #define IRIF_SIR_EOF 0x002A /* EOF value */
37 #define IRIF_SIR_FLG 0x002C /* Flag clear */
38 #define IRIF_UART_STS2 0x002E /* UART status 2 */
39 #define IRIF_UART0 0x0030 /* UART control */
40 #define IRIF_UART1 0x0032 /* UART status */
41 #define IRIF_UART2 0x0034 /* UART mode */
42 #define IRIF_UART3 0x0036 /* UART transmit data */
43 #define IRIF_UART4 0x0038 /* UART receive data */
44 #define IRIF_UART5 0x003A /* UART interrupt mask */
45 #define IRIF_UART6 0x003C /* UART baud rate error correction */
46 #define IRIF_UART7 0x003E /* UART baud rate count set */
47 #define IRIF_CRC0 0x0040 /* CRC engine control */
48 #define IRIF_CRC1 0x0042 /* CRC engine input data */
49 #define IRIF_CRC2 0x0044 /* CRC engine calculation */
50 #define IRIF_CRC3 0x0046 /* CRC engine output data 1 */
51 #define IRIF_CRC4 0x0048 /* CRC engine output data 2 */
54 #define IRTPW (1 << 1) /* transmit pulse width select */
55 #define IRERRC (1 << 0) /* Clear receive pulse width error */
58 #define IRERR (1 << 0) /* received pulse width Error */
61 #define EOFD (1 << 9) /* EOF detection flag */
62 #define FRER (1 << 8) /* Frame Error bit */
63 #define FRP (1 << 0) /* Frame processing set */
66 #define IRSME (1 << 6) /* Receive Sum Error flag */
67 #define IROVE (1 << 5) /* Receive Overrun Error flag */
68 #define IRFRE (1 << 4) /* Receive Framing Error flag */
69 #define IRPRE (1 << 3) /* Receive Parity Error flag */
72 #define TBEC (1 << 2) /* Transmit Data Clear */
73 #define RIE (1 << 1) /* Receive Enable */
74 #define TIE (1 << 0) /* Transmit Enable */
77 #define URSME (1 << 6) /* Receive Sum Error Flag */
78 #define UROVE (1 << 5) /* Receive Overrun Error Flag */
79 #define URFRE (1 << 4) /* Receive Framing Error Flag */
80 #define URPRE (1 << 3) /* Receive Parity Error Flag */
81 #define RBF (1 << 2) /* Receive Buffer Full Flag */
82 #define TSBE (1 << 1) /* Transmit Shift Buffer Empty Flag */
83 #define TBE (1 << 0) /* Transmit Buffer Empty flag */
84 #define TBCOMP (TSBE | TBE)
87 #define RSEIM (1 << 6) /* Receive Sum Error Flag IRQ Mask */
88 #define RBFIM (1 << 2) /* Receive Buffer Full Flag IRQ Mask */
89 #define TSBEIM (1 << 1) /* Transmit Shift Buffer Empty Flag IRQ Mask */
90 #define TBEIM (1 << 0) /* Transmit Buffer Empty Flag IRQ Mask */
91 #define RX_MASK (RSEIM | RBFIM)
94 #define CRC_RST (1 << 15) /* CRC Engine Reset */
95 #define CRC_CT_MASK 0x0FFF
97 /************************************************************************
103 ************************************************************************/
105 void __iomem
*membase
;
109 struct net_device
*ndev
;
111 struct irlap_cb
*irlap
;
118 /************************************************************************
124 ************************************************************************/
125 static void sh_sir_write(struct sh_sir_self
*self
, u32 offset
, u16 data
)
127 iowrite16(data
, self
->membase
+ offset
);
130 static u16
sh_sir_read(struct sh_sir_self
*self
, u32 offset
)
132 return ioread16(self
->membase
+ offset
);
135 static void sh_sir_update_bits(struct sh_sir_self
*self
, u32 offset
,
140 old
= sh_sir_read(self
, offset
);
141 new = (old
& ~mask
) | data
;
143 sh_sir_write(self
, offset
, new);
146 /************************************************************************
152 ************************************************************************/
153 static void sh_sir_crc_reset(struct sh_sir_self
*self
)
155 sh_sir_write(self
, IRIF_CRC0
, CRC_RST
);
158 static void sh_sir_crc_add(struct sh_sir_self
*self
, u8 data
)
160 sh_sir_write(self
, IRIF_CRC1
, (u16
)data
);
163 static u16
sh_sir_crc_cnt(struct sh_sir_self
*self
)
165 return CRC_CT_MASK
& sh_sir_read(self
, IRIF_CRC0
);
168 static u16
sh_sir_crc_out(struct sh_sir_self
*self
)
170 return sh_sir_read(self
, IRIF_CRC4
);
173 static int sh_sir_crc_init(struct sh_sir_self
*self
)
175 struct device
*dev
= &self
->ndev
->dev
;
179 sh_sir_crc_reset(self
);
181 sh_sir_crc_add(self
, 0xCC);
182 sh_sir_crc_add(self
, 0xF5);
183 sh_sir_crc_add(self
, 0xF1);
184 sh_sir_crc_add(self
, 0xA7);
186 val
= sh_sir_crc_cnt(self
);
188 dev_err(dev
, "CRC count error %x\n", val
);
192 val
= sh_sir_crc_out(self
);
194 dev_err(dev
, "CRC result error%x\n", val
);
202 sh_sir_crc_reset(self
);
206 /************************************************************************
212 ************************************************************************/
213 #define SCLK_BASE 1843200 /* 1.8432MHz */
215 static u32
sh_sir_find_sclk(struct clk
*irda_clk
)
217 struct cpufreq_frequency_table
*freq_table
= irda_clk
->freq_table
;
218 struct clk
*pclk
= clk_get(NULL
, "peripheral_clk");
219 u32 limit
, min
= 0xffffffff, tmp
;
222 limit
= clk_get_rate(pclk
);
225 /* IrDA can not set over peripheral_clk */
227 freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
;
229 u32 freq
= freq_table
[i
].frequency
;
231 if (freq
== CPUFREQ_ENTRY_INVALID
)
234 /* IrDA should not over peripheral_clk */
238 tmp
= freq
% SCLK_BASE
;
245 return freq_table
[index
].frequency
;
248 #define ERR_ROUNDING(a) ((a + 5000) / 10000)
249 static int sh_sir_set_baudrate(struct sh_sir_self
*self
, u32 baudrate
)
252 struct device
*dev
= &self
->ndev
->dev
;
259 /* Baud Rate Error Correction x 10000 */
260 u32 rate_err_array
[] = {
262 2500, 3125, 3750, 4375,
263 5000, 5625, 6250, 6875,
264 7500, 8125, 8750, 9375,
270 * it support 9600 only now
276 dev_err(dev
, "un-supported baudrate %d\n", baudrate
);
280 clk
= clk_get(NULL
, "irda_clk");
282 dev_err(dev
, "can not get irda_clk\n");
286 clk_set_rate(clk
, sh_sir_find_sclk(clk
));
287 rate
= clk_get_rate(clk
);
290 dev_dbg(dev
, "selected sclk = %d\n", rate
);
295 * 1843200 = system rate / (irbca + (irbc + 1))
298 irbc
= rate
/ SCLK_BASE
;
300 tmp
= rate
- (SCLK_BASE
* irbc
);
303 rerr
= tmp
/ SCLK_BASE
;
307 for (i
= 0; i
< ARRAY_SIZE(rate_err_array
); i
++) {
308 tmp
= abs(rate_err_array
[i
] - rerr
);
315 tmp
= rate
/ (irbc
+ ERR_ROUNDING(rate_err_array
[irbca
]));
316 if ((SCLK_BASE
/ 100) < abs(tmp
- SCLK_BASE
))
317 dev_warn(dev
, "IrDA freq error margin over %d\n", tmp
);
319 dev_dbg(dev
, "target = %d, result = %d, infrared = %d.%d\n",
320 SCLK_BASE
, tmp
, irbc
, rate_err_array
[irbca
]);
322 irbca
= (irbca
& 0xF) << 4;
323 irbc
= (irbc
- 1) & 0xF;
326 dev_err(dev
, "sh_sir can not set 0 in IRIF_SIR2\n");
330 sh_sir_write(self
, IRIF_SIR0
, IRTPW
| IRERRC
);
331 sh_sir_write(self
, IRIF_SIR1
, irbca
);
332 sh_sir_write(self
, IRIF_SIR2
, irbc
);
337 * BaudRate[bps] = system rate / (uabca + (uabc + 1) x 16)
340 uabc
= rate
/ baudrate
;
341 uabc
= (uabc
/ 16) - 1;
342 uabc
= (uabc
+ 1) * 16;
344 tmp
= rate
- (uabc
* baudrate
);
347 rerr
= tmp
/ baudrate
;
351 for (i
= 0; i
< ARRAY_SIZE(rate_err_array
); i
++) {
352 tmp
= abs(rate_err_array
[i
] - rerr
);
359 tmp
= rate
/ (uabc
+ ERR_ROUNDING(rate_err_array
[uabca
]));
360 if ((baudrate
/ 100) < abs(tmp
- baudrate
))
361 dev_warn(dev
, "UART freq error margin over %d\n", tmp
);
363 dev_dbg(dev
, "target = %d, result = %d, uart = %d.%d\n",
365 uabc
, rate_err_array
[uabca
]);
367 uabca
= (uabca
& 0xF) << 4;
368 uabc
= (uabc
/ 16) - 1;
370 sh_sir_write(self
, IRIF_UART6
, uabca
);
371 sh_sir_write(self
, IRIF_UART7
, uabc
);
376 /************************************************************************
382 ************************************************************************/
383 static int __sh_sir_init_iobuf(iobuff_t
*io
, int size
)
385 io
->head
= kmalloc(size
, GFP_KERNEL
);
390 io
->in_frame
= FALSE
;
391 io
->state
= OUTSIDE_FRAME
;
397 static void sh_sir_remove_iobuf(struct sh_sir_self
*self
)
399 kfree(self
->rx_buff
.head
);
400 kfree(self
->tx_buff
.head
);
402 self
->rx_buff
.head
= NULL
;
403 self
->tx_buff
.head
= NULL
;
406 static int sh_sir_init_iobuf(struct sh_sir_self
*self
, int rxsize
, int txsize
)
410 if (self
->rx_buff
.head
||
411 self
->tx_buff
.head
) {
412 dev_err(&self
->ndev
->dev
, "iobuff has already existed.");
416 err
= __sh_sir_init_iobuf(&self
->rx_buff
, rxsize
);
420 err
= __sh_sir_init_iobuf(&self
->tx_buff
, txsize
);
424 sh_sir_remove_iobuf(self
);
429 /************************************************************************
435 ************************************************************************/
436 static void sh_sir_clear_all_err(struct sh_sir_self
*self
)
438 /* Clear error flag for receive pulse width */
439 sh_sir_update_bits(self
, IRIF_SIR0
, IRERRC
, IRERRC
);
441 /* Clear frame / EOF error flag */
442 sh_sir_write(self
, IRIF_SIR_FLG
, 0xffff);
444 /* Clear all status error */
445 sh_sir_write(self
, IRIF_UART_STS2
, 0);
448 static void sh_sir_set_phase(struct sh_sir_self
*self
, int phase
)
470 sh_sir_write(self
, IRIF_UART5
, uart5
);
471 sh_sir_write(self
, IRIF_UART0
, uart0
);
474 static int sh_sir_is_which_phase(struct sh_sir_self
*self
)
476 u16 val
= sh_sir_read(self
, IRIF_UART5
);
482 return TX_COMP_PHASE
;
490 static void sh_sir_tx(struct sh_sir_self
*self
, int phase
)
494 if (0 >= self
->tx_buff
.len
) {
495 sh_sir_set_phase(self
, TX_COMP_PHASE
);
497 sh_sir_write(self
, IRIF_UART3
, self
->tx_buff
.data
[0]);
499 self
->tx_buff
.data
++;
503 sh_sir_set_phase(self
, RX_PHASE
);
504 netif_wake_queue(self
->ndev
);
507 dev_err(&self
->ndev
->dev
, "should not happen\n");
512 static int sh_sir_read_data(struct sh_sir_self
*self
)
518 val
= sh_sir_read(self
, IRIF_UART1
);
522 if (val
& (URSME
| UROVE
| URFRE
| URPRE
))
525 return (int)sh_sir_read(self
, IRIF_UART4
);
531 dev_err(&self
->ndev
->dev
, "UART1 %04x : STATUS %04x\n",
532 val
, sh_sir_read(self
, IRIF_UART_STS2
));
534 /* read data register for clear error */
535 sh_sir_read(self
, IRIF_UART4
);
540 static void sh_sir_rx(struct sh_sir_self
*self
)
546 data
= sh_sir_read_data(self
);
550 async_unwrap_char(self
->ndev
, &self
->ndev
->stats
,
551 &self
->rx_buff
, (u8
)data
);
552 self
->ndev
->last_rx
= jiffies
;
554 if (EOFD
& sh_sir_read(self
, IRIF_SIR_FRM
))
561 static irqreturn_t
sh_sir_irq(int irq
, void *dev_id
)
563 struct sh_sir_self
*self
= dev_id
;
564 struct device
*dev
= &self
->ndev
->dev
;
565 int phase
= sh_sir_is_which_phase(self
);
570 sh_sir_tx(self
, phase
);
573 if (sh_sir_read(self
, IRIF_SIR3
))
574 dev_err(dev
, "rcv pulse width error occurred\n");
577 sh_sir_clear_all_err(self
);
580 dev_err(dev
, "unknown interrupt\n");
586 /************************************************************************
589 net_device_ops function
592 ************************************************************************/
593 static int sh_sir_hard_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
595 struct sh_sir_self
*self
= netdev_priv(ndev
);
596 int speed
= irda_get_next_speed(skb
);
600 dev_err(&ndev
->dev
, "support 9600 only (%d)\n", speed
);
604 netif_stop_queue(ndev
);
606 self
->tx_buff
.data
= self
->tx_buff
.head
;
607 self
->tx_buff
.len
= 0;
609 self
->tx_buff
.len
= async_wrap_skb(skb
, self
->tx_buff
.data
,
610 self
->tx_buff
.truesize
);
612 sh_sir_set_phase(self
, TX_PHASE
);
618 static int sh_sir_ioctl(struct net_device
*ndev
, struct ifreq
*ifreq
, int cmd
)
623 * This function is needed for irda framework.
624 * But nothing to do now
629 static struct net_device_stats
*sh_sir_stats(struct net_device
*ndev
)
631 struct sh_sir_self
*self
= netdev_priv(ndev
);
633 return &self
->ndev
->stats
;
636 static int sh_sir_open(struct net_device
*ndev
)
638 struct sh_sir_self
*self
= netdev_priv(ndev
);
641 clk_enable(self
->clk
);
642 err
= sh_sir_crc_init(self
);
646 sh_sir_set_baudrate(self
, 9600);
648 self
->irlap
= irlap_open(ndev
, &self
->qos
, DRIVER_NAME
);
655 * Now enable the interrupt then start the queue
657 sh_sir_update_bits(self
, IRIF_SIR_FRM
, FRP
, FRP
);
658 sh_sir_read(self
, IRIF_UART1
); /* flag clear */
659 sh_sir_read(self
, IRIF_UART4
); /* flag clear */
660 sh_sir_set_phase(self
, RX_PHASE
);
662 netif_start_queue(ndev
);
664 dev_info(&self
->ndev
->dev
, "opened\n");
669 clk_disable(self
->clk
);
674 static int sh_sir_stop(struct net_device
*ndev
)
676 struct sh_sir_self
*self
= netdev_priv(ndev
);
680 irlap_close(self
->irlap
);
684 netif_stop_queue(ndev
);
686 dev_info(&ndev
->dev
, "stoped\n");
691 static const struct net_device_ops sh_sir_ndo
= {
692 .ndo_open
= sh_sir_open
,
693 .ndo_stop
= sh_sir_stop
,
694 .ndo_start_xmit
= sh_sir_hard_xmit
,
695 .ndo_do_ioctl
= sh_sir_ioctl
,
696 .ndo_get_stats
= sh_sir_stats
,
699 /************************************************************************
702 platform_driver function
705 ************************************************************************/
706 static int __devinit
sh_sir_probe(struct platform_device
*pdev
)
708 struct net_device
*ndev
;
709 struct sh_sir_self
*self
;
710 struct resource
*res
;
715 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
716 irq
= platform_get_irq(pdev
, 0);
717 if (!res
|| irq
< 0) {
718 dev_err(&pdev
->dev
, "Not enough platform resources.\n");
722 ndev
= alloc_irdadev(sizeof(*self
));
726 self
= netdev_priv(ndev
);
727 self
->membase
= ioremap_nocache(res
->start
, resource_size(res
));
728 if (!self
->membase
) {
730 dev_err(&pdev
->dev
, "Unable to ioremap.\n");
734 err
= sh_sir_init_iobuf(self
, IRDA_SKB_MAX_MTU
, IRDA_SIR_MAX_FRAME
);
738 snprintf(clk_name
, sizeof(clk_name
), "irda%d", pdev
->id
);
739 self
->clk
= clk_get(&pdev
->dev
, clk_name
);
740 if (IS_ERR(self
->clk
)) {
741 dev_err(&pdev
->dev
, "cannot get clock \"%s\"\n", clk_name
);
745 irda_init_max_qos_capabilies(&self
->qos
);
747 ndev
->netdev_ops
= &sh_sir_ndo
;
751 self
->qos
.baud_rate
.bits
&= IR_9600
; /* FIXME */
752 self
->qos
.min_turn_time
.bits
= 1; /* 10 ms or more */
754 irda_qos_bits_to_value(&self
->qos
);
756 err
= register_netdev(ndev
);
760 platform_set_drvdata(pdev
, ndev
);
762 if (request_irq(irq
, sh_sir_irq
, IRQF_DISABLED
, "sh_sir", self
)) {
763 dev_warn(&pdev
->dev
, "Unable to attach sh_sir interrupt\n");
767 dev_info(&pdev
->dev
, "SuperH IrDA probed\n");
774 sh_sir_remove_iobuf(self
);
776 iounmap(self
->membase
);
783 static int __devexit
sh_sir_remove(struct platform_device
*pdev
)
785 struct net_device
*ndev
= platform_get_drvdata(pdev
);
786 struct sh_sir_self
*self
= netdev_priv(ndev
);
791 unregister_netdev(ndev
);
793 sh_sir_remove_iobuf(self
);
794 iounmap(self
->membase
);
796 platform_set_drvdata(pdev
, NULL
);
801 static struct platform_driver sh_sir_driver
= {
802 .probe
= sh_sir_probe
,
803 .remove
= __devexit_p(sh_sir_remove
),
809 static int __init
sh_sir_init(void)
811 return platform_driver_register(&sh_sir_driver
);
814 static void __exit
sh_sir_exit(void)
816 platform_driver_unregister(&sh_sir_driver
);
819 module_init(sh_sir_init
);
820 module_exit(sh_sir_exit
);
822 MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
823 MODULE_DESCRIPTION("SuperH IrDA driver");
824 MODULE_LICENSE("GPL");