2 * CAN bus driver for Bosch M_CAN controller
4 * Copyright (C) 2014 Freescale Semiconductor, Inc.
5 * Dong Aisheng <b29396@freescale.com>
7 * Bosch M_CAN user manual can be obtained from:
8 * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/
9 * mcan_users_manual_v302.pdf
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/clk.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/netdevice.h>
24 #include <linux/of_device.h>
25 #include <linux/platform_device.h>
27 #include <linux/can/dev.h>
30 #define M_CAN_NAPI_WEIGHT 64
32 /* message ram configuration data length */
33 #define MRAM_CFG_LEN 8
35 /* registers definition */
85 /* m_can lec values */
108 /* Fast Bit Timing & Prescaler Register (FBTP) */
109 #define FBTR_FBRP_MASK 0x1f
110 #define FBTR_FBRP_SHIFT 16
111 #define FBTR_FTSEG1_SHIFT 8
112 #define FBTR_FTSEG1_MASK (0xf << FBTR_FTSEG1_SHIFT)
113 #define FBTR_FTSEG2_SHIFT 4
114 #define FBTR_FTSEG2_MASK (0x7 << FBTR_FTSEG2_SHIFT)
115 #define FBTR_FSJW_SHIFT 0
116 #define FBTR_FSJW_MASK 0x3
118 /* Test Register (TEST) */
119 #define TEST_LBCK BIT(4)
121 /* CC Control Register(CCCR) */
122 #define CCCR_TEST BIT(7)
123 #define CCCR_CMR_MASK 0x3
124 #define CCCR_CMR_SHIFT 10
125 #define CCCR_CMR_CANFD 0x1
126 #define CCCR_CMR_CANFD_BRS 0x2
127 #define CCCR_CMR_CAN 0x3
128 #define CCCR_CME_MASK 0x3
129 #define CCCR_CME_SHIFT 8
130 #define CCCR_CME_CAN 0
131 #define CCCR_CME_CANFD 0x1
132 #define CCCR_CME_CANFD_BRS 0x2
133 #define CCCR_TEST BIT(7)
134 #define CCCR_MON BIT(5)
135 #define CCCR_CCE BIT(1)
136 #define CCCR_INIT BIT(0)
137 #define CCCR_CANFD 0x10
139 /* Bit Timing & Prescaler Register (BTP) */
140 #define BTR_BRP_MASK 0x3ff
141 #define BTR_BRP_SHIFT 16
142 #define BTR_TSEG1_SHIFT 8
143 #define BTR_TSEG1_MASK (0x3f << BTR_TSEG1_SHIFT)
144 #define BTR_TSEG2_SHIFT 4
145 #define BTR_TSEG2_MASK (0xf << BTR_TSEG2_SHIFT)
146 #define BTR_SJW_SHIFT 0
147 #define BTR_SJW_MASK 0xf
149 /* Error Counter Register(ECR) */
150 #define ECR_RP BIT(15)
151 #define ECR_REC_SHIFT 8
152 #define ECR_REC_MASK (0x7f << ECR_REC_SHIFT)
153 #define ECR_TEC_SHIFT 0
154 #define ECR_TEC_MASK 0xff
156 /* Protocol Status Register(PSR) */
157 #define PSR_BO BIT(7)
158 #define PSR_EW BIT(6)
159 #define PSR_EP BIT(5)
160 #define PSR_LEC_MASK 0x7
162 /* Interrupt Register(IR) */
163 #define IR_ALL_INT 0xffffffff
164 #define IR_STE BIT(31)
165 #define IR_FOE BIT(30)
166 #define IR_ACKE BIT(29)
167 #define IR_BE BIT(28)
168 #define IR_CRCE BIT(27)
169 #define IR_WDI BIT(26)
170 #define IR_BO BIT(25)
171 #define IR_EW BIT(24)
172 #define IR_EP BIT(23)
173 #define IR_ELO BIT(22)
174 #define IR_BEU BIT(21)
175 #define IR_BEC BIT(20)
176 #define IR_DRX BIT(19)
177 #define IR_TOO BIT(18)
178 #define IR_MRAF BIT(17)
179 #define IR_TSW BIT(16)
180 #define IR_TEFL BIT(15)
181 #define IR_TEFF BIT(14)
182 #define IR_TEFW BIT(13)
183 #define IR_TEFN BIT(12)
184 #define IR_TFE BIT(11)
185 #define IR_TCF BIT(10)
187 #define IR_HPM BIT(8)
188 #define IR_RF1L BIT(7)
189 #define IR_RF1F BIT(6)
190 #define IR_RF1W BIT(5)
191 #define IR_RF1N BIT(4)
192 #define IR_RF0L BIT(3)
193 #define IR_RF0F BIT(2)
194 #define IR_RF0W BIT(1)
195 #define IR_RF0N BIT(0)
196 #define IR_ERR_STATE (IR_BO | IR_EW | IR_EP)
197 #define IR_ERR_LEC (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
198 #define IR_ERR_BUS (IR_ERR_LEC | IR_WDI | IR_ELO | IR_BEU | \
199 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
201 #define IR_ERR_ALL (IR_ERR_STATE | IR_ERR_BUS)
203 /* Interrupt Line Select (ILS) */
204 #define ILS_ALL_INT0 0x0
205 #define ILS_ALL_INT1 0xFFFFFFFF
207 /* Interrupt Line Enable (ILE) */
208 #define ILE_EINT0 BIT(0)
209 #define ILE_EINT1 BIT(1)
211 /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
212 #define RXFC_FWM_OFF 24
213 #define RXFC_FWM_MASK 0x7f
214 #define RXFC_FWM_1 (1 << RXFC_FWM_OFF)
215 #define RXFC_FS_OFF 16
216 #define RXFC_FS_MASK 0x7f
218 /* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
219 #define RXFS_RFL BIT(25)
220 #define RXFS_FF BIT(24)
221 #define RXFS_FPI_OFF 16
222 #define RXFS_FPI_MASK 0x3f0000
223 #define RXFS_FGI_OFF 8
224 #define RXFS_FGI_MASK 0x3f00
225 #define RXFS_FFL_MASK 0x7f
227 /* Rx Buffer / FIFO Element Size Configuration (RXESC) */
228 #define M_CAN_RXESC_8BYTES 0x0
229 #define M_CAN_RXESC_64BYTES 0x777
231 /* Tx Buffer Configuration(TXBC) */
232 #define TXBC_NDTB_OFF 16
233 #define TXBC_NDTB_MASK 0x3f
235 /* Tx Buffer Element Size Configuration(TXESC) */
236 #define TXESC_TBDS_8BYTES 0x0
237 #define TXESC_TBDS_64BYTES 0x7
239 /* Tx Event FIFO Con.guration (TXEFC) */
240 #define TXEFC_EFS_OFF 16
241 #define TXEFC_EFS_MASK 0x3f
243 /* Message RAM Configuration (in bytes) */
244 #define SIDF_ELEMENT_SIZE 4
245 #define XIDF_ELEMENT_SIZE 8
246 #define RXF0_ELEMENT_SIZE 72
247 #define RXF1_ELEMENT_SIZE 72
248 #define RXB_ELEMENT_SIZE 16
249 #define TXE_ELEMENT_SIZE 8
250 #define TXB_ELEMENT_SIZE 72
252 /* Message RAM Elements */
253 #define M_CAN_FIFO_ID 0x0
254 #define M_CAN_FIFO_DLC 0x4
255 #define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2))
257 /* Rx Buffer Element */
259 #define RX_BUF_ESI BIT(31)
260 #define RX_BUF_XTD BIT(30)
261 #define RX_BUF_RTR BIT(29)
263 #define RX_BUF_ANMF BIT(31)
264 #define RX_BUF_EDL BIT(21)
265 #define RX_BUF_BRS BIT(20)
267 /* Tx Buffer Element */
269 #define TX_BUF_XTD BIT(30)
270 #define TX_BUF_RTR BIT(29)
272 /* address offset and element number for each FIFO/Buffer in the Message RAM */
278 /* m_can private data structure */
280 struct can_priv can
; /* must be the first member */
281 struct napi_struct napi
;
282 struct net_device
*dev
;
283 struct device
*device
;
289 /* message ram configuration */
290 void __iomem
*mram_base
;
291 struct mram_cfg mcfg
[MRAM_CFG_NUM
];
294 static inline u32
m_can_read(const struct m_can_priv
*priv
, enum m_can_reg reg
)
296 return readl(priv
->base
+ reg
);
299 static inline void m_can_write(const struct m_can_priv
*priv
,
300 enum m_can_reg reg
, u32 val
)
302 writel(val
, priv
->base
+ reg
);
305 static inline u32
m_can_fifo_read(const struct m_can_priv
*priv
,
306 u32 fgi
, unsigned int offset
)
308 return readl(priv
->mram_base
+ priv
->mcfg
[MRAM_RXF0
].off
+
309 fgi
* RXF0_ELEMENT_SIZE
+ offset
);
312 static inline void m_can_fifo_write(const struct m_can_priv
*priv
,
313 u32 fpi
, unsigned int offset
, u32 val
)
315 return writel(val
, priv
->mram_base
+ priv
->mcfg
[MRAM_TXB
].off
+
316 fpi
* TXB_ELEMENT_SIZE
+ offset
);
319 static inline void m_can_config_endisable(const struct m_can_priv
*priv
,
322 u32 cccr
= m_can_read(priv
, M_CAN_CCCR
);
327 /* enable m_can configuration */
328 m_can_write(priv
, M_CAN_CCCR
, cccr
| CCCR_INIT
);
330 /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
331 m_can_write(priv
, M_CAN_CCCR
, cccr
| CCCR_INIT
| CCCR_CCE
);
333 m_can_write(priv
, M_CAN_CCCR
, cccr
& ~(CCCR_INIT
| CCCR_CCE
));
336 /* there's a delay for module initialization */
338 val
= CCCR_INIT
| CCCR_CCE
;
340 while ((m_can_read(priv
, M_CAN_CCCR
) & (CCCR_INIT
| CCCR_CCE
)) != val
) {
342 netdev_warn(priv
->dev
, "Failed to init module\n");
350 static inline void m_can_enable_all_interrupts(const struct m_can_priv
*priv
)
352 m_can_write(priv
, M_CAN_ILE
, ILE_EINT0
| ILE_EINT1
);
355 static inline void m_can_disable_all_interrupts(const struct m_can_priv
*priv
)
357 m_can_write(priv
, M_CAN_ILE
, 0x0);
360 static void m_can_read_fifo(struct net_device
*dev
, u32 rxfs
)
362 struct net_device_stats
*stats
= &dev
->stats
;
363 struct m_can_priv
*priv
= netdev_priv(dev
);
364 struct canfd_frame
*cf
;
369 /* calculate the fifo get index for where to read data */
370 fgi
= (rxfs
& RXFS_FGI_MASK
) >> RXFS_FGI_OFF
;
371 dlc
= m_can_fifo_read(priv
, fgi
, M_CAN_FIFO_DLC
);
372 if (dlc
& RX_BUF_EDL
)
373 skb
= alloc_canfd_skb(dev
, &cf
);
375 skb
= alloc_can_skb(dev
, (struct can_frame
**)&cf
);
381 if (dlc
& RX_BUF_EDL
)
382 cf
->len
= can_dlc2len((dlc
>> 16) & 0x0F);
384 cf
->len
= get_can_dlc((dlc
>> 16) & 0x0F);
386 id
= m_can_fifo_read(priv
, fgi
, M_CAN_FIFO_ID
);
388 cf
->can_id
= (id
& CAN_EFF_MASK
) | CAN_EFF_FLAG
;
390 cf
->can_id
= (id
>> 18) & CAN_SFF_MASK
;
392 if (id
& RX_BUF_ESI
) {
393 cf
->flags
|= CANFD_ESI
;
394 netdev_dbg(dev
, "ESI Error\n");
397 if (!(dlc
& RX_BUF_EDL
) && (id
& RX_BUF_RTR
)) {
398 cf
->can_id
|= CAN_RTR_FLAG
;
400 if (dlc
& RX_BUF_BRS
)
401 cf
->flags
|= CANFD_BRS
;
403 for (i
= 0; i
< cf
->len
; i
+= 4)
404 *(u32
*)(cf
->data
+ i
) =
405 m_can_fifo_read(priv
, fgi
,
406 M_CAN_FIFO_DATA(i
/ 4));
409 /* acknowledge rx fifo 0 */
410 m_can_write(priv
, M_CAN_RXF0A
, fgi
);
413 stats
->rx_bytes
+= cf
->len
;
415 netif_receive_skb(skb
);
418 static int m_can_do_rx_poll(struct net_device
*dev
, int quota
)
420 struct m_can_priv
*priv
= netdev_priv(dev
);
424 rxfs
= m_can_read(priv
, M_CAN_RXF0S
);
425 if (!(rxfs
& RXFS_FFL_MASK
)) {
426 netdev_dbg(dev
, "no messages in fifo0\n");
430 while ((rxfs
& RXFS_FFL_MASK
) && (quota
> 0)) {
432 netdev_warn(dev
, "Rx FIFO 0 Message Lost\n");
434 m_can_read_fifo(dev
, rxfs
);
438 rxfs
= m_can_read(priv
, M_CAN_RXF0S
);
442 can_led_event(dev
, CAN_LED_EVENT_RX
);
447 static int m_can_handle_lost_msg(struct net_device
*dev
)
449 struct net_device_stats
*stats
= &dev
->stats
;
451 struct can_frame
*frame
;
453 netdev_err(dev
, "msg lost in rxf0\n");
456 stats
->rx_over_errors
++;
458 skb
= alloc_can_err_skb(dev
, &frame
);
462 frame
->can_id
|= CAN_ERR_CRTL
;
463 frame
->data
[1] = CAN_ERR_CRTL_RX_OVERFLOW
;
465 netif_receive_skb(skb
);
470 static int m_can_handle_lec_err(struct net_device
*dev
,
471 enum m_can_lec_type lec_type
)
473 struct m_can_priv
*priv
= netdev_priv(dev
);
474 struct net_device_stats
*stats
= &dev
->stats
;
475 struct can_frame
*cf
;
478 priv
->can
.can_stats
.bus_error
++;
481 /* propagate the error condition to the CAN stack */
482 skb
= alloc_can_err_skb(dev
, &cf
);
486 /* check for 'last error code' which tells us the
487 * type of the last error to occur on the CAN bus
489 cf
->can_id
|= CAN_ERR_PROT
| CAN_ERR_BUSERROR
;
490 cf
->data
[2] |= CAN_ERR_PROT_UNSPEC
;
493 case LEC_STUFF_ERROR
:
494 netdev_dbg(dev
, "stuff error\n");
495 cf
->data
[2] |= CAN_ERR_PROT_STUFF
;
498 netdev_dbg(dev
, "form error\n");
499 cf
->data
[2] |= CAN_ERR_PROT_FORM
;
502 netdev_dbg(dev
, "ack error\n");
503 cf
->data
[3] |= (CAN_ERR_PROT_LOC_ACK
|
504 CAN_ERR_PROT_LOC_ACK_DEL
);
507 netdev_dbg(dev
, "bit1 error\n");
508 cf
->data
[2] |= CAN_ERR_PROT_BIT1
;
511 netdev_dbg(dev
, "bit0 error\n");
512 cf
->data
[2] |= CAN_ERR_PROT_BIT0
;
515 netdev_dbg(dev
, "CRC error\n");
516 cf
->data
[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ
|
517 CAN_ERR_PROT_LOC_CRC_DEL
);
524 stats
->rx_bytes
+= cf
->can_dlc
;
525 netif_receive_skb(skb
);
530 static int __m_can_get_berr_counter(const struct net_device
*dev
,
531 struct can_berr_counter
*bec
)
533 struct m_can_priv
*priv
= netdev_priv(dev
);
536 ecr
= m_can_read(priv
, M_CAN_ECR
);
537 bec
->rxerr
= (ecr
& ECR_REC_MASK
) >> ECR_REC_SHIFT
;
538 bec
->txerr
= ecr
& ECR_TEC_MASK
;
543 static int m_can_get_berr_counter(const struct net_device
*dev
,
544 struct can_berr_counter
*bec
)
546 struct m_can_priv
*priv
= netdev_priv(dev
);
549 err
= clk_prepare_enable(priv
->hclk
);
553 err
= clk_prepare_enable(priv
->cclk
);
555 clk_disable_unprepare(priv
->hclk
);
559 __m_can_get_berr_counter(dev
, bec
);
561 clk_disable_unprepare(priv
->cclk
);
562 clk_disable_unprepare(priv
->hclk
);
567 static int m_can_handle_state_change(struct net_device
*dev
,
568 enum can_state new_state
)
570 struct m_can_priv
*priv
= netdev_priv(dev
);
571 struct net_device_stats
*stats
= &dev
->stats
;
572 struct can_frame
*cf
;
574 struct can_berr_counter bec
;
578 case CAN_STATE_ERROR_ACTIVE
:
579 /* error warning state */
580 priv
->can
.can_stats
.error_warning
++;
581 priv
->can
.state
= CAN_STATE_ERROR_WARNING
;
583 case CAN_STATE_ERROR_PASSIVE
:
584 /* error passive state */
585 priv
->can
.can_stats
.error_passive
++;
586 priv
->can
.state
= CAN_STATE_ERROR_PASSIVE
;
588 case CAN_STATE_BUS_OFF
:
590 priv
->can
.state
= CAN_STATE_BUS_OFF
;
591 m_can_disable_all_interrupts(priv
);
598 /* propagate the error condition to the CAN stack */
599 skb
= alloc_can_err_skb(dev
, &cf
);
603 __m_can_get_berr_counter(dev
, &bec
);
606 case CAN_STATE_ERROR_ACTIVE
:
607 /* error warning state */
608 cf
->can_id
|= CAN_ERR_CRTL
;
609 cf
->data
[1] = (bec
.txerr
> bec
.rxerr
) ?
610 CAN_ERR_CRTL_TX_WARNING
:
611 CAN_ERR_CRTL_RX_WARNING
;
612 cf
->data
[6] = bec
.txerr
;
613 cf
->data
[7] = bec
.rxerr
;
615 case CAN_STATE_ERROR_PASSIVE
:
616 /* error passive state */
617 cf
->can_id
|= CAN_ERR_CRTL
;
618 ecr
= m_can_read(priv
, M_CAN_ECR
);
620 cf
->data
[1] |= CAN_ERR_CRTL_RX_PASSIVE
;
622 cf
->data
[1] |= CAN_ERR_CRTL_TX_PASSIVE
;
623 cf
->data
[6] = bec
.txerr
;
624 cf
->data
[7] = bec
.rxerr
;
626 case CAN_STATE_BUS_OFF
:
628 cf
->can_id
|= CAN_ERR_BUSOFF
;
635 stats
->rx_bytes
+= cf
->can_dlc
;
636 netif_receive_skb(skb
);
641 static int m_can_handle_state_errors(struct net_device
*dev
, u32 psr
)
643 struct m_can_priv
*priv
= netdev_priv(dev
);
646 if ((psr
& PSR_EW
) &&
647 (priv
->can
.state
!= CAN_STATE_ERROR_WARNING
)) {
648 netdev_dbg(dev
, "entered error warning state\n");
649 work_done
+= m_can_handle_state_change(dev
,
650 CAN_STATE_ERROR_WARNING
);
653 if ((psr
& PSR_EP
) &&
654 (priv
->can
.state
!= CAN_STATE_ERROR_PASSIVE
)) {
655 netdev_dbg(dev
, "entered error passive state\n");
656 work_done
+= m_can_handle_state_change(dev
,
657 CAN_STATE_ERROR_PASSIVE
);
660 if ((psr
& PSR_BO
) &&
661 (priv
->can
.state
!= CAN_STATE_BUS_OFF
)) {
662 netdev_dbg(dev
, "entered error bus off state\n");
663 work_done
+= m_can_handle_state_change(dev
,
670 static void m_can_handle_other_err(struct net_device
*dev
, u32 irqstatus
)
672 if (irqstatus
& IR_WDI
)
673 netdev_err(dev
, "Message RAM Watchdog event due to missing READY\n");
674 if (irqstatus
& IR_ELO
)
675 netdev_err(dev
, "Error Logging Overflow\n");
676 if (irqstatus
& IR_BEU
)
677 netdev_err(dev
, "Bit Error Uncorrected\n");
678 if (irqstatus
& IR_BEC
)
679 netdev_err(dev
, "Bit Error Corrected\n");
680 if (irqstatus
& IR_TOO
)
681 netdev_err(dev
, "Timeout reached\n");
682 if (irqstatus
& IR_MRAF
)
683 netdev_err(dev
, "Message RAM access failure occurred\n");
686 static inline bool is_lec_err(u32 psr
)
690 return psr
&& (psr
!= LEC_UNUSED
);
693 static int m_can_handle_bus_errors(struct net_device
*dev
, u32 irqstatus
,
696 struct m_can_priv
*priv
= netdev_priv(dev
);
699 if (irqstatus
& IR_RF0L
)
700 work_done
+= m_can_handle_lost_msg(dev
);
702 /* handle lec errors on the bus */
703 if ((priv
->can
.ctrlmode
& CAN_CTRLMODE_BERR_REPORTING
) &&
705 work_done
+= m_can_handle_lec_err(dev
, psr
& LEC_UNUSED
);
707 /* other unproccessed error interrupts */
708 m_can_handle_other_err(dev
, irqstatus
);
713 static int m_can_poll(struct napi_struct
*napi
, int quota
)
715 struct net_device
*dev
= napi
->dev
;
716 struct m_can_priv
*priv
= netdev_priv(dev
);
720 irqstatus
= priv
->irqstatus
| m_can_read(priv
, M_CAN_IR
);
724 psr
= m_can_read(priv
, M_CAN_PSR
);
725 if (irqstatus
& IR_ERR_STATE
)
726 work_done
+= m_can_handle_state_errors(dev
, psr
);
728 if (irqstatus
& IR_ERR_BUS
)
729 work_done
+= m_can_handle_bus_errors(dev
, irqstatus
, psr
);
731 if (irqstatus
& IR_RF0N
)
732 work_done
+= m_can_do_rx_poll(dev
, (quota
- work_done
));
734 if (work_done
< quota
) {
736 m_can_enable_all_interrupts(priv
);
743 static irqreturn_t
m_can_isr(int irq
, void *dev_id
)
745 struct net_device
*dev
= (struct net_device
*)dev_id
;
746 struct m_can_priv
*priv
= netdev_priv(dev
);
747 struct net_device_stats
*stats
= &dev
->stats
;
750 ir
= m_can_read(priv
, M_CAN_IR
);
756 m_can_write(priv
, M_CAN_IR
, ir
);
758 /* schedule NAPI in case of
761 * - bus error IRQ and bus error reporting
763 if ((ir
& IR_RF0N
) || (ir
& IR_ERR_ALL
)) {
764 priv
->irqstatus
= ir
;
765 m_can_disable_all_interrupts(priv
);
766 napi_schedule(&priv
->napi
);
769 /* transmission complete interrupt */
771 stats
->tx_bytes
+= can_get_echo_skb(dev
, 0);
773 can_led_event(dev
, CAN_LED_EVENT_TX
);
774 netif_wake_queue(dev
);
780 static const struct can_bittiming_const m_can_bittiming_const
= {
781 .name
= KBUILD_MODNAME
,
782 .tseg1_min
= 2, /* Time segment 1 = prop_seg + phase_seg1 */
784 .tseg2_min
= 1, /* Time segment 2 = phase_seg2 */
792 static const struct can_bittiming_const m_can_data_bittiming_const
= {
793 .name
= KBUILD_MODNAME
,
794 .tseg1_min
= 2, /* Time segment 1 = prop_seg + phase_seg1 */
796 .tseg2_min
= 1, /* Time segment 2 = phase_seg2 */
804 static int m_can_set_bittiming(struct net_device
*dev
)
806 struct m_can_priv
*priv
= netdev_priv(dev
);
807 const struct can_bittiming
*bt
= &priv
->can
.bittiming
;
808 const struct can_bittiming
*dbt
= &priv
->can
.data_bittiming
;
809 u16 brp
, sjw
, tseg1
, tseg2
;
814 tseg1
= bt
->prop_seg
+ bt
->phase_seg1
- 1;
815 tseg2
= bt
->phase_seg2
- 1;
816 reg_btp
= (brp
<< BTR_BRP_SHIFT
) | (sjw
<< BTR_SJW_SHIFT
) |
817 (tseg1
<< BTR_TSEG1_SHIFT
) | (tseg2
<< BTR_TSEG2_SHIFT
);
818 m_can_write(priv
, M_CAN_BTP
, reg_btp
);
820 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD
) {
823 tseg1
= dbt
->prop_seg
+ dbt
->phase_seg1
- 1;
824 tseg2
= dbt
->phase_seg2
- 1;
825 reg_btp
= (brp
<< FBTR_FBRP_SHIFT
) | (sjw
<< FBTR_FSJW_SHIFT
) |
826 (tseg1
<< FBTR_FTSEG1_SHIFT
) |
827 (tseg2
<< FBTR_FTSEG2_SHIFT
);
828 m_can_write(priv
, M_CAN_FBTP
, reg_btp
);
834 /* Configure M_CAN chip:
835 * - set rx buffer/fifo element size
836 * - configure rx fifo
837 * - accept non-matching frame into fifo 0
838 * - configure tx buffer
842 static void m_can_chip_config(struct net_device
*dev
)
844 struct m_can_priv
*priv
= netdev_priv(dev
);
847 m_can_config_endisable(priv
, true);
849 /* RX Buffer/FIFO Element Size 64 bytes data field */
850 m_can_write(priv
, M_CAN_RXESC
, M_CAN_RXESC_64BYTES
);
852 /* Accept Non-matching Frames Into FIFO 0 */
853 m_can_write(priv
, M_CAN_GFC
, 0x0);
855 /* only support one Tx Buffer currently */
856 m_can_write(priv
, M_CAN_TXBC
, (1 << TXBC_NDTB_OFF
) |
857 priv
->mcfg
[MRAM_TXB
].off
);
859 /* support 64 bytes payload */
860 m_can_write(priv
, M_CAN_TXESC
, TXESC_TBDS_64BYTES
);
862 m_can_write(priv
, M_CAN_TXEFC
, (1 << TXEFC_EFS_OFF
) |
863 priv
->mcfg
[MRAM_TXE
].off
);
865 /* rx fifo configuration, blocking mode, fifo size 1 */
866 m_can_write(priv
, M_CAN_RXF0C
,
867 (priv
->mcfg
[MRAM_RXF0
].num
<< RXFC_FS_OFF
) |
868 RXFC_FWM_1
| priv
->mcfg
[MRAM_RXF0
].off
);
870 m_can_write(priv
, M_CAN_RXF1C
,
871 (priv
->mcfg
[MRAM_RXF1
].num
<< RXFC_FS_OFF
) |
872 RXFC_FWM_1
| priv
->mcfg
[MRAM_RXF1
].off
);
874 cccr
= m_can_read(priv
, M_CAN_CCCR
);
875 cccr
&= ~(CCCR_TEST
| CCCR_MON
| (CCCR_CMR_MASK
<< CCCR_CMR_SHIFT
) |
876 (CCCR_CME_MASK
<< CCCR_CME_SHIFT
));
877 test
= m_can_read(priv
, M_CAN_TEST
);
880 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_LISTENONLY
)
883 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_LOOPBACK
) {
888 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD
)
889 cccr
|= CCCR_CME_CANFD_BRS
<< CCCR_CME_SHIFT
;
891 m_can_write(priv
, M_CAN_CCCR
, cccr
);
892 m_can_write(priv
, M_CAN_TEST
, test
);
894 /* enable interrupts */
895 m_can_write(priv
, M_CAN_IR
, IR_ALL_INT
);
896 if (!(priv
->can
.ctrlmode
& CAN_CTRLMODE_BERR_REPORTING
))
897 m_can_write(priv
, M_CAN_IE
, IR_ALL_INT
& ~IR_ERR_LEC
);
899 m_can_write(priv
, M_CAN_IE
, IR_ALL_INT
);
901 /* route all interrupts to INT0 */
902 m_can_write(priv
, M_CAN_ILS
, ILS_ALL_INT0
);
904 /* set bittiming params */
905 m_can_set_bittiming(dev
);
907 m_can_config_endisable(priv
, false);
910 static void m_can_start(struct net_device
*dev
)
912 struct m_can_priv
*priv
= netdev_priv(dev
);
914 /* basic m_can configuration */
915 m_can_chip_config(dev
);
917 priv
->can
.state
= CAN_STATE_ERROR_ACTIVE
;
919 m_can_enable_all_interrupts(priv
);
922 static int m_can_set_mode(struct net_device
*dev
, enum can_mode mode
)
927 netif_wake_queue(dev
);
936 static void free_m_can_dev(struct net_device
*dev
)
941 static struct net_device
*alloc_m_can_dev(void)
943 struct net_device
*dev
;
944 struct m_can_priv
*priv
;
946 dev
= alloc_candev(sizeof(*priv
), 1);
950 priv
= netdev_priv(dev
);
951 netif_napi_add(dev
, &priv
->napi
, m_can_poll
, M_CAN_NAPI_WEIGHT
);
954 priv
->can
.bittiming_const
= &m_can_bittiming_const
;
955 priv
->can
.data_bittiming_const
= &m_can_data_bittiming_const
;
956 priv
->can
.do_set_mode
= m_can_set_mode
;
957 priv
->can
.do_get_berr_counter
= m_can_get_berr_counter
;
958 priv
->can
.ctrlmode_supported
= CAN_CTRLMODE_LOOPBACK
|
959 CAN_CTRLMODE_LISTENONLY
|
960 CAN_CTRLMODE_BERR_REPORTING
|
966 static int m_can_open(struct net_device
*dev
)
968 struct m_can_priv
*priv
= netdev_priv(dev
);
971 err
= clk_prepare_enable(priv
->hclk
);
975 err
= clk_prepare_enable(priv
->cclk
);
977 goto exit_disable_hclk
;
979 /* open the can device */
980 err
= open_candev(dev
);
982 netdev_err(dev
, "failed to open can device\n");
983 goto exit_disable_cclk
;
986 /* register interrupt handler */
987 err
= request_irq(dev
->irq
, m_can_isr
, IRQF_SHARED
, dev
->name
,
990 netdev_err(dev
, "failed to request interrupt\n");
994 /* start the m_can controller */
997 can_led_event(dev
, CAN_LED_EVENT_OPEN
);
998 napi_enable(&priv
->napi
);
999 netif_start_queue(dev
);
1006 clk_disable_unprepare(priv
->cclk
);
1008 clk_disable_unprepare(priv
->hclk
);
1012 static void m_can_stop(struct net_device
*dev
)
1014 struct m_can_priv
*priv
= netdev_priv(dev
);
1016 /* disable all interrupts */
1017 m_can_disable_all_interrupts(priv
);
1019 clk_disable_unprepare(priv
->hclk
);
1020 clk_disable_unprepare(priv
->cclk
);
1022 /* set the state as STOPPED */
1023 priv
->can
.state
= CAN_STATE_STOPPED
;
1026 static int m_can_close(struct net_device
*dev
)
1028 struct m_can_priv
*priv
= netdev_priv(dev
);
1030 netif_stop_queue(dev
);
1031 napi_disable(&priv
->napi
);
1033 free_irq(dev
->irq
, dev
);
1035 can_led_event(dev
, CAN_LED_EVENT_STOP
);
1040 static netdev_tx_t
m_can_start_xmit(struct sk_buff
*skb
,
1041 struct net_device
*dev
)
1043 struct m_can_priv
*priv
= netdev_priv(dev
);
1044 struct canfd_frame
*cf
= (struct canfd_frame
*)skb
->data
;
1048 if (can_dropped_invalid_skb(dev
, skb
))
1049 return NETDEV_TX_OK
;
1051 netif_stop_queue(dev
);
1053 if (cf
->can_id
& CAN_EFF_FLAG
) {
1054 id
= cf
->can_id
& CAN_EFF_MASK
;
1057 id
= ((cf
->can_id
& CAN_SFF_MASK
) << 18);
1060 if (cf
->can_id
& CAN_RTR_FLAG
)
1063 /* message ram configuration */
1064 m_can_fifo_write(priv
, 0, M_CAN_FIFO_ID
, id
);
1065 m_can_fifo_write(priv
, 0, M_CAN_FIFO_DLC
, can_len2dlc(cf
->len
) << 16);
1067 for (i
= 0; i
< cf
->len
; i
+= 4)
1068 m_can_fifo_write(priv
, 0, M_CAN_FIFO_DATA(i
/ 4),
1069 *(u32
*)(cf
->data
+ i
));
1071 can_put_echo_skb(skb
, dev
, 0);
1073 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD
) {
1074 cccr
= m_can_read(priv
, M_CAN_CCCR
);
1075 cccr
&= ~(CCCR_CMR_MASK
<< CCCR_CMR_SHIFT
);
1076 if (can_is_canfd_skb(skb
)) {
1077 if (cf
->flags
& CANFD_BRS
)
1078 cccr
|= CCCR_CMR_CANFD_BRS
<< CCCR_CMR_SHIFT
;
1080 cccr
|= CCCR_CMR_CANFD
<< CCCR_CMR_SHIFT
;
1082 cccr
|= CCCR_CMR_CAN
<< CCCR_CMR_SHIFT
;
1084 m_can_write(priv
, M_CAN_CCCR
, cccr
);
1087 /* enable first TX buffer to start transfer */
1088 m_can_write(priv
, M_CAN_TXBTIE
, 0x1);
1089 m_can_write(priv
, M_CAN_TXBAR
, 0x1);
1091 return NETDEV_TX_OK
;
1094 static const struct net_device_ops m_can_netdev_ops
= {
1095 .ndo_open
= m_can_open
,
1096 .ndo_stop
= m_can_close
,
1097 .ndo_start_xmit
= m_can_start_xmit
,
1098 .ndo_change_mtu
= can_change_mtu
,
1101 static int register_m_can_dev(struct net_device
*dev
)
1103 dev
->flags
|= IFF_ECHO
; /* we support local echo */
1104 dev
->netdev_ops
= &m_can_netdev_ops
;
1106 return register_candev(dev
);
1109 static int m_can_of_parse_mram(struct platform_device
*pdev
,
1110 struct m_can_priv
*priv
)
1112 struct device_node
*np
= pdev
->dev
.of_node
;
1113 struct resource
*res
;
1115 u32 out_val
[MRAM_CFG_LEN
];
1116 int i
, start
, end
, ret
;
1118 /* message ram could be shared */
1119 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "message_ram");
1123 addr
= devm_ioremap(&pdev
->dev
, res
->start
, resource_size(res
));
1127 /* get message ram configuration */
1128 ret
= of_property_read_u32_array(np
, "bosch,mram-cfg",
1129 out_val
, sizeof(out_val
) / 4);
1131 dev_err(&pdev
->dev
, "can not get message ram configuration\n");
1135 priv
->mram_base
= addr
;
1136 priv
->mcfg
[MRAM_SIDF
].off
= out_val
[0];
1137 priv
->mcfg
[MRAM_SIDF
].num
= out_val
[1];
1138 priv
->mcfg
[MRAM_XIDF
].off
= priv
->mcfg
[MRAM_SIDF
].off
+
1139 priv
->mcfg
[MRAM_SIDF
].num
* SIDF_ELEMENT_SIZE
;
1140 priv
->mcfg
[MRAM_XIDF
].num
= out_val
[2];
1141 priv
->mcfg
[MRAM_RXF0
].off
= priv
->mcfg
[MRAM_XIDF
].off
+
1142 priv
->mcfg
[MRAM_XIDF
].num
* XIDF_ELEMENT_SIZE
;
1143 priv
->mcfg
[MRAM_RXF0
].num
= out_val
[3] & RXFC_FS_MASK
;
1144 priv
->mcfg
[MRAM_RXF1
].off
= priv
->mcfg
[MRAM_RXF0
].off
+
1145 priv
->mcfg
[MRAM_RXF0
].num
* RXF0_ELEMENT_SIZE
;
1146 priv
->mcfg
[MRAM_RXF1
].num
= out_val
[4] & RXFC_FS_MASK
;
1147 priv
->mcfg
[MRAM_RXB
].off
= priv
->mcfg
[MRAM_RXF1
].off
+
1148 priv
->mcfg
[MRAM_RXF1
].num
* RXF1_ELEMENT_SIZE
;
1149 priv
->mcfg
[MRAM_RXB
].num
= out_val
[5];
1150 priv
->mcfg
[MRAM_TXE
].off
= priv
->mcfg
[MRAM_RXB
].off
+
1151 priv
->mcfg
[MRAM_RXB
].num
* RXB_ELEMENT_SIZE
;
1152 priv
->mcfg
[MRAM_TXE
].num
= out_val
[6];
1153 priv
->mcfg
[MRAM_TXB
].off
= priv
->mcfg
[MRAM_TXE
].off
+
1154 priv
->mcfg
[MRAM_TXE
].num
* TXE_ELEMENT_SIZE
;
1155 priv
->mcfg
[MRAM_TXB
].num
= out_val
[7] & TXBC_NDTB_MASK
;
1157 dev_dbg(&pdev
->dev
, "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
1159 priv
->mcfg
[MRAM_SIDF
].off
, priv
->mcfg
[MRAM_SIDF
].num
,
1160 priv
->mcfg
[MRAM_XIDF
].off
, priv
->mcfg
[MRAM_XIDF
].num
,
1161 priv
->mcfg
[MRAM_RXF0
].off
, priv
->mcfg
[MRAM_RXF0
].num
,
1162 priv
->mcfg
[MRAM_RXF1
].off
, priv
->mcfg
[MRAM_RXF1
].num
,
1163 priv
->mcfg
[MRAM_RXB
].off
, priv
->mcfg
[MRAM_RXB
].num
,
1164 priv
->mcfg
[MRAM_TXE
].off
, priv
->mcfg
[MRAM_TXE
].num
,
1165 priv
->mcfg
[MRAM_TXB
].off
, priv
->mcfg
[MRAM_TXB
].num
);
1167 /* initialize the entire Message RAM in use to avoid possible
1168 * ECC/parity checksum errors when reading an uninitialized buffer
1170 start
= priv
->mcfg
[MRAM_SIDF
].off
;
1171 end
= priv
->mcfg
[MRAM_TXB
].off
+
1172 priv
->mcfg
[MRAM_TXB
].num
* TXB_ELEMENT_SIZE
;
1173 for (i
= start
; i
< end
; i
+= 4)
1174 writel(0x0, priv
->mram_base
+ i
);
1179 static int m_can_plat_probe(struct platform_device
*pdev
)
1181 struct net_device
*dev
;
1182 struct m_can_priv
*priv
;
1183 struct resource
*res
;
1185 struct clk
*hclk
, *cclk
;
1188 hclk
= devm_clk_get(&pdev
->dev
, "hclk");
1189 cclk
= devm_clk_get(&pdev
->dev
, "cclk");
1190 if (IS_ERR(hclk
) || IS_ERR(cclk
)) {
1191 dev_err(&pdev
->dev
, "no clock find\n");
1195 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "m_can");
1196 addr
= devm_ioremap_resource(&pdev
->dev
, res
);
1197 irq
= platform_get_irq_byname(pdev
, "int0");
1198 if (IS_ERR(addr
) || irq
< 0)
1201 /* allocate the m_can device */
1202 dev
= alloc_m_can_dev();
1206 priv
= netdev_priv(dev
);
1209 priv
->device
= &pdev
->dev
;
1212 priv
->can
.clock
.freq
= clk_get_rate(cclk
);
1214 ret
= m_can_of_parse_mram(pdev
, priv
);
1216 goto failed_free_dev
;
1218 platform_set_drvdata(pdev
, dev
);
1219 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1221 ret
= register_m_can_dev(dev
);
1223 dev_err(&pdev
->dev
, "registering %s failed (err=%d)\n",
1224 KBUILD_MODNAME
, ret
);
1225 goto failed_free_dev
;
1228 devm_can_led_init(dev
);
1230 dev_info(&pdev
->dev
, "%s device registered (regs=%p, irq=%d)\n",
1231 KBUILD_MODNAME
, priv
->base
, dev
->irq
);
1236 free_m_can_dev(dev
);
1240 static __maybe_unused
int m_can_suspend(struct device
*dev
)
1242 struct net_device
*ndev
= dev_get_drvdata(dev
);
1243 struct m_can_priv
*priv
= netdev_priv(ndev
);
1245 if (netif_running(ndev
)) {
1246 netif_stop_queue(ndev
);
1247 netif_device_detach(ndev
);
1250 /* TODO: enter low power */
1252 priv
->can
.state
= CAN_STATE_SLEEPING
;
1257 static __maybe_unused
int m_can_resume(struct device
*dev
)
1259 struct net_device
*ndev
= dev_get_drvdata(dev
);
1260 struct m_can_priv
*priv
= netdev_priv(ndev
);
1262 /* TODO: exit low power */
1264 priv
->can
.state
= CAN_STATE_ERROR_ACTIVE
;
1266 if (netif_running(ndev
)) {
1267 netif_device_attach(ndev
);
1268 netif_start_queue(ndev
);
1274 static void unregister_m_can_dev(struct net_device
*dev
)
1276 unregister_candev(dev
);
1279 static int m_can_plat_remove(struct platform_device
*pdev
)
1281 struct net_device
*dev
= platform_get_drvdata(pdev
);
1283 unregister_m_can_dev(dev
);
1284 platform_set_drvdata(pdev
, NULL
);
1286 free_m_can_dev(dev
);
1291 static const struct dev_pm_ops m_can_pmops
= {
1292 SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend
, m_can_resume
)
1295 static const struct of_device_id m_can_of_table
[] = {
1296 { .compatible
= "bosch,m_can", .data
= NULL
},
1299 MODULE_DEVICE_TABLE(of
, m_can_of_table
);
1301 static struct platform_driver m_can_plat_driver
= {
1303 .name
= KBUILD_MODNAME
,
1304 .of_match_table
= m_can_of_table
,
1307 .probe
= m_can_plat_probe
,
1308 .remove
= m_can_plat_remove
,
1311 module_platform_driver(m_can_plat_driver
);
1313 MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
1314 MODULE_LICENSE("GPL v2");
1315 MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");