2 * CAN bus driver for Bosch M_CAN controller
4 * Copyright (C) 2014 Freescale Semiconductor, Inc.
5 * Dong Aisheng <b29396@freescale.com>
7 * Bosch M_CAN user manual can be obtained from:
8 * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/
9 * mcan_users_manual_v302.pdf
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/clk.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/netdevice.h>
24 #include <linux/of_device.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/iopoll.h>
28 #include <linux/can/dev.h>
29 #include <linux/pinctrl/consumer.h>
32 #define M_CAN_NAPI_WEIGHT 64
34 /* message ram configuration data length */
35 #define MRAM_CFG_LEN 8
37 /* registers definition */
53 /* TDCR Register only available for version >=3.1.x */
89 /* m_can lec values */
101 enum m_can_mram_cfg
{
112 /* Core Release Register (CREL) */
113 #define CREL_REL_SHIFT 28
114 #define CREL_REL_MASK (0xF << CREL_REL_SHIFT)
115 #define CREL_STEP_SHIFT 24
116 #define CREL_STEP_MASK (0xF << CREL_STEP_SHIFT)
117 #define CREL_SUBSTEP_SHIFT 20
118 #define CREL_SUBSTEP_MASK (0xF << CREL_SUBSTEP_SHIFT)
120 /* Data Bit Timing & Prescaler Register (DBTP) */
121 #define DBTP_TDC BIT(23)
122 #define DBTP_DBRP_SHIFT 16
123 #define DBTP_DBRP_MASK (0x1f << DBTP_DBRP_SHIFT)
124 #define DBTP_DTSEG1_SHIFT 8
125 #define DBTP_DTSEG1_MASK (0x1f << DBTP_DTSEG1_SHIFT)
126 #define DBTP_DTSEG2_SHIFT 4
127 #define DBTP_DTSEG2_MASK (0xf << DBTP_DTSEG2_SHIFT)
128 #define DBTP_DSJW_SHIFT 0
129 #define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT)
131 /* Transmitter Delay Compensation Register (TDCR) */
132 #define TDCR_TDCO_SHIFT 8
133 #define TDCR_TDCO_MASK (0x7F << TDCR_TDCO_SHIFT)
134 #define TDCR_TDCF_SHIFT 0
135 #define TDCR_TDCF_MASK (0x7F << TDCR_TDCF_SHIFT)
137 /* Test Register (TEST) */
138 #define TEST_LBCK BIT(4)
140 /* CC Control Register(CCCR) */
141 #define CCCR_CMR_MASK 0x3
142 #define CCCR_CMR_SHIFT 10
143 #define CCCR_CMR_CANFD 0x1
144 #define CCCR_CMR_CANFD_BRS 0x2
145 #define CCCR_CMR_CAN 0x3
146 #define CCCR_CME_MASK 0x3
147 #define CCCR_CME_SHIFT 8
148 #define CCCR_CME_CAN 0
149 #define CCCR_CME_CANFD 0x1
150 #define CCCR_CME_CANFD_BRS 0x2
151 #define CCCR_TXP BIT(14)
152 #define CCCR_TEST BIT(7)
153 #define CCCR_MON BIT(5)
154 #define CCCR_CSR BIT(4)
155 #define CCCR_CSA BIT(3)
156 #define CCCR_ASM BIT(2)
157 #define CCCR_CCE BIT(1)
158 #define CCCR_INIT BIT(0)
159 #define CCCR_CANFD 0x10
160 /* for version >=3.1.x */
161 #define CCCR_EFBI BIT(13)
162 #define CCCR_PXHD BIT(12)
163 #define CCCR_BRSE BIT(9)
164 #define CCCR_FDOE BIT(8)
165 /* only for version >=3.2.x */
166 #define CCCR_NISO BIT(15)
168 /* Nominal Bit Timing & Prescaler Register (NBTP) */
169 #define NBTP_NSJW_SHIFT 25
170 #define NBTP_NSJW_MASK (0x7f << NBTP_NSJW_SHIFT)
171 #define NBTP_NBRP_SHIFT 16
172 #define NBTP_NBRP_MASK (0x1ff << NBTP_NBRP_SHIFT)
173 #define NBTP_NTSEG1_SHIFT 8
174 #define NBTP_NTSEG1_MASK (0xff << NBTP_NTSEG1_SHIFT)
175 #define NBTP_NTSEG2_SHIFT 0
176 #define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT)
178 /* Error Counter Register(ECR) */
179 #define ECR_RP BIT(15)
180 #define ECR_REC_SHIFT 8
181 #define ECR_REC_MASK (0x7f << ECR_REC_SHIFT)
182 #define ECR_TEC_SHIFT 0
183 #define ECR_TEC_MASK 0xff
185 /* Protocol Status Register(PSR) */
186 #define PSR_BO BIT(7)
187 #define PSR_EW BIT(6)
188 #define PSR_EP BIT(5)
189 #define PSR_LEC_MASK 0x7
191 /* Interrupt Register(IR) */
192 #define IR_ALL_INT 0xffffffff
194 /* Renamed bits for versions > 3.1.x */
195 #define IR_ARA BIT(29)
196 #define IR_PED BIT(28)
197 #define IR_PEA BIT(27)
199 /* Bits for version 3.0.x */
200 #define IR_STE BIT(31)
201 #define IR_FOE BIT(30)
202 #define IR_ACKE BIT(29)
203 #define IR_BE BIT(28)
204 #define IR_CRCE BIT(27)
205 #define IR_WDI BIT(26)
206 #define IR_BO BIT(25)
207 #define IR_EW BIT(24)
208 #define IR_EP BIT(23)
209 #define IR_ELO BIT(22)
210 #define IR_BEU BIT(21)
211 #define IR_BEC BIT(20)
212 #define IR_DRX BIT(19)
213 #define IR_TOO BIT(18)
214 #define IR_MRAF BIT(17)
215 #define IR_TSW BIT(16)
216 #define IR_TEFL BIT(15)
217 #define IR_TEFF BIT(14)
218 #define IR_TEFW BIT(13)
219 #define IR_TEFN BIT(12)
220 #define IR_TFE BIT(11)
221 #define IR_TCF BIT(10)
223 #define IR_HPM BIT(8)
224 #define IR_RF1L BIT(7)
225 #define IR_RF1F BIT(6)
226 #define IR_RF1W BIT(5)
227 #define IR_RF1N BIT(4)
228 #define IR_RF0L BIT(3)
229 #define IR_RF0F BIT(2)
230 #define IR_RF0W BIT(1)
231 #define IR_RF0N BIT(0)
232 #define IR_ERR_STATE (IR_BO | IR_EW | IR_EP)
234 /* Interrupts for version 3.0.x */
235 #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
236 #define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
237 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
239 #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
240 /* Interrupts for version >= 3.1.x */
241 #define IR_ERR_LEC_31X (IR_PED | IR_PEA)
242 #define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
243 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
245 #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
247 /* Interrupt Line Select (ILS) */
248 #define ILS_ALL_INT0 0x0
249 #define ILS_ALL_INT1 0xFFFFFFFF
251 /* Interrupt Line Enable (ILE) */
252 #define ILE_EINT1 BIT(1)
253 #define ILE_EINT0 BIT(0)
255 /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
256 #define RXFC_FWM_SHIFT 24
257 #define RXFC_FWM_MASK (0x7f << RXFC_FWM_SHIFT)
258 #define RXFC_FS_SHIFT 16
259 #define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT)
261 /* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
262 #define RXFS_RFL BIT(25)
263 #define RXFS_FF BIT(24)
264 #define RXFS_FPI_SHIFT 16
265 #define RXFS_FPI_MASK 0x3f0000
266 #define RXFS_FGI_SHIFT 8
267 #define RXFS_FGI_MASK 0x3f00
268 #define RXFS_FFL_MASK 0x7f
270 /* Rx Buffer / FIFO Element Size Configuration (RXESC) */
271 #define M_CAN_RXESC_8BYTES 0x0
272 #define M_CAN_RXESC_64BYTES 0x777
274 /* Tx Buffer Configuration(TXBC) */
275 #define TXBC_NDTB_SHIFT 16
276 #define TXBC_NDTB_MASK (0x3f << TXBC_NDTB_SHIFT)
277 #define TXBC_TFQS_SHIFT 24
278 #define TXBC_TFQS_MASK (0x3f << TXBC_TFQS_SHIFT)
280 /* Tx FIFO/Queue Status (TXFQS) */
281 #define TXFQS_TFQF BIT(21)
282 #define TXFQS_TFQPI_SHIFT 16
283 #define TXFQS_TFQPI_MASK (0x1f << TXFQS_TFQPI_SHIFT)
284 #define TXFQS_TFGI_SHIFT 8
285 #define TXFQS_TFGI_MASK (0x1f << TXFQS_TFGI_SHIFT)
286 #define TXFQS_TFFL_SHIFT 0
287 #define TXFQS_TFFL_MASK (0x3f << TXFQS_TFFL_SHIFT)
289 /* Tx Buffer Element Size Configuration(TXESC) */
290 #define TXESC_TBDS_8BYTES 0x0
291 #define TXESC_TBDS_64BYTES 0x7
293 /* Tx Event FIFO Configuration (TXEFC) */
294 #define TXEFC_EFS_SHIFT 16
295 #define TXEFC_EFS_MASK (0x3f << TXEFC_EFS_SHIFT)
297 /* Tx Event FIFO Status (TXEFS) */
298 #define TXEFS_TEFL BIT(25)
299 #define TXEFS_EFF BIT(24)
300 #define TXEFS_EFGI_SHIFT 8
301 #define TXEFS_EFGI_MASK (0x1f << TXEFS_EFGI_SHIFT)
302 #define TXEFS_EFFL_SHIFT 0
303 #define TXEFS_EFFL_MASK (0x3f << TXEFS_EFFL_SHIFT)
305 /* Tx Event FIFO Acknowledge (TXEFA) */
306 #define TXEFA_EFAI_SHIFT 0
307 #define TXEFA_EFAI_MASK (0x1f << TXEFA_EFAI_SHIFT)
309 /* Message RAM Configuration (in bytes) */
310 #define SIDF_ELEMENT_SIZE 4
311 #define XIDF_ELEMENT_SIZE 8
312 #define RXF0_ELEMENT_SIZE 72
313 #define RXF1_ELEMENT_SIZE 72
314 #define RXB_ELEMENT_SIZE 72
315 #define TXE_ELEMENT_SIZE 8
316 #define TXB_ELEMENT_SIZE 72
318 /* Message RAM Elements */
319 #define M_CAN_FIFO_ID 0x0
320 #define M_CAN_FIFO_DLC 0x4
321 #define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2))
323 /* Rx Buffer Element */
325 #define RX_BUF_ESI BIT(31)
326 #define RX_BUF_XTD BIT(30)
327 #define RX_BUF_RTR BIT(29)
329 #define RX_BUF_ANMF BIT(31)
330 #define RX_BUF_FDF BIT(21)
331 #define RX_BUF_BRS BIT(20)
333 /* Tx Buffer Element */
335 #define TX_BUF_ESI BIT(31)
336 #define TX_BUF_XTD BIT(30)
337 #define TX_BUF_RTR BIT(29)
339 #define TX_BUF_EFC BIT(23)
340 #define TX_BUF_FDF BIT(21)
341 #define TX_BUF_BRS BIT(20)
342 #define TX_BUF_MM_SHIFT 24
343 #define TX_BUF_MM_MASK (0xff << TX_BUF_MM_SHIFT)
345 /* Tx event FIFO Element */
347 #define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT
348 #define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT)
350 /* address offset and element number for each FIFO/Buffer in the Message RAM */
356 /* m_can private data structure */
358 struct can_priv can
; /* must be the first member */
359 struct napi_struct napi
;
360 struct net_device
*dev
;
361 struct device
*device
;
368 /* message ram configuration */
369 void __iomem
*mram_base
;
370 struct mram_cfg mcfg
[MRAM_CFG_NUM
];
373 static inline u32
m_can_read(const struct m_can_priv
*priv
, enum m_can_reg reg
)
375 return readl(priv
->base
+ reg
);
378 static inline void m_can_write(const struct m_can_priv
*priv
,
379 enum m_can_reg reg
, u32 val
)
381 writel(val
, priv
->base
+ reg
);
384 static inline u32
m_can_fifo_read(const struct m_can_priv
*priv
,
385 u32 fgi
, unsigned int offset
)
387 return readl(priv
->mram_base
+ priv
->mcfg
[MRAM_RXF0
].off
+
388 fgi
* RXF0_ELEMENT_SIZE
+ offset
);
391 static inline void m_can_fifo_write(const struct m_can_priv
*priv
,
392 u32 fpi
, unsigned int offset
, u32 val
)
394 writel(val
, priv
->mram_base
+ priv
->mcfg
[MRAM_TXB
].off
+
395 fpi
* TXB_ELEMENT_SIZE
+ offset
);
398 static inline u32
m_can_txe_fifo_read(const struct m_can_priv
*priv
,
401 return readl(priv
->mram_base
+ priv
->mcfg
[MRAM_TXE
].off
+
402 fgi
* TXE_ELEMENT_SIZE
+ offset
);
405 static inline bool m_can_tx_fifo_full(const struct m_can_priv
*priv
)
407 return !!(m_can_read(priv
, M_CAN_TXFQS
) & TXFQS_TFQF
);
410 static inline void m_can_config_endisable(const struct m_can_priv
*priv
,
413 u32 cccr
= m_can_read(priv
, M_CAN_CCCR
);
418 /* enable m_can configuration */
419 m_can_write(priv
, M_CAN_CCCR
, cccr
| CCCR_INIT
);
421 /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
422 m_can_write(priv
, M_CAN_CCCR
, cccr
| CCCR_INIT
| CCCR_CCE
);
424 m_can_write(priv
, M_CAN_CCCR
, cccr
& ~(CCCR_INIT
| CCCR_CCE
));
427 /* there's a delay for module initialization */
429 val
= CCCR_INIT
| CCCR_CCE
;
431 while ((m_can_read(priv
, M_CAN_CCCR
) & (CCCR_INIT
| CCCR_CCE
)) != val
) {
433 netdev_warn(priv
->dev
, "Failed to init module\n");
441 static inline void m_can_enable_all_interrupts(const struct m_can_priv
*priv
)
443 /* Only interrupt line 0 is used in this driver */
444 m_can_write(priv
, M_CAN_ILE
, ILE_EINT0
);
447 static inline void m_can_disable_all_interrupts(const struct m_can_priv
*priv
)
449 m_can_write(priv
, M_CAN_ILE
, 0x0);
452 static void m_can_read_fifo(struct net_device
*dev
, u32 rxfs
)
454 struct net_device_stats
*stats
= &dev
->stats
;
455 struct m_can_priv
*priv
= netdev_priv(dev
);
456 struct canfd_frame
*cf
;
461 /* calculate the fifo get index for where to read data */
462 fgi
= (rxfs
& RXFS_FGI_MASK
) >> RXFS_FGI_SHIFT
;
463 dlc
= m_can_fifo_read(priv
, fgi
, M_CAN_FIFO_DLC
);
464 if (dlc
& RX_BUF_FDF
)
465 skb
= alloc_canfd_skb(dev
, &cf
);
467 skb
= alloc_can_skb(dev
, (struct can_frame
**)&cf
);
473 if (dlc
& RX_BUF_FDF
)
474 cf
->len
= can_dlc2len((dlc
>> 16) & 0x0F);
476 cf
->len
= get_can_dlc((dlc
>> 16) & 0x0F);
478 id
= m_can_fifo_read(priv
, fgi
, M_CAN_FIFO_ID
);
480 cf
->can_id
= (id
& CAN_EFF_MASK
) | CAN_EFF_FLAG
;
482 cf
->can_id
= (id
>> 18) & CAN_SFF_MASK
;
484 if (id
& RX_BUF_ESI
) {
485 cf
->flags
|= CANFD_ESI
;
486 netdev_dbg(dev
, "ESI Error\n");
489 if (!(dlc
& RX_BUF_FDF
) && (id
& RX_BUF_RTR
)) {
490 cf
->can_id
|= CAN_RTR_FLAG
;
492 if (dlc
& RX_BUF_BRS
)
493 cf
->flags
|= CANFD_BRS
;
495 for (i
= 0; i
< cf
->len
; i
+= 4)
496 *(u32
*)(cf
->data
+ i
) =
497 m_can_fifo_read(priv
, fgi
,
498 M_CAN_FIFO_DATA(i
/ 4));
501 /* acknowledge rx fifo 0 */
502 m_can_write(priv
, M_CAN_RXF0A
, fgi
);
505 stats
->rx_bytes
+= cf
->len
;
507 netif_receive_skb(skb
);
510 static int m_can_do_rx_poll(struct net_device
*dev
, int quota
)
512 struct m_can_priv
*priv
= netdev_priv(dev
);
516 rxfs
= m_can_read(priv
, M_CAN_RXF0S
);
517 if (!(rxfs
& RXFS_FFL_MASK
)) {
518 netdev_dbg(dev
, "no messages in fifo0\n");
522 while ((rxfs
& RXFS_FFL_MASK
) && (quota
> 0)) {
524 netdev_warn(dev
, "Rx FIFO 0 Message Lost\n");
526 m_can_read_fifo(dev
, rxfs
);
530 rxfs
= m_can_read(priv
, M_CAN_RXF0S
);
534 can_led_event(dev
, CAN_LED_EVENT_RX
);
539 static int m_can_handle_lost_msg(struct net_device
*dev
)
541 struct net_device_stats
*stats
= &dev
->stats
;
543 struct can_frame
*frame
;
545 netdev_err(dev
, "msg lost in rxf0\n");
548 stats
->rx_over_errors
++;
550 skb
= alloc_can_err_skb(dev
, &frame
);
554 frame
->can_id
|= CAN_ERR_CRTL
;
555 frame
->data
[1] = CAN_ERR_CRTL_RX_OVERFLOW
;
557 netif_receive_skb(skb
);
562 static int m_can_handle_lec_err(struct net_device
*dev
,
563 enum m_can_lec_type lec_type
)
565 struct m_can_priv
*priv
= netdev_priv(dev
);
566 struct net_device_stats
*stats
= &dev
->stats
;
567 struct can_frame
*cf
;
570 priv
->can
.can_stats
.bus_error
++;
573 /* propagate the error condition to the CAN stack */
574 skb
= alloc_can_err_skb(dev
, &cf
);
578 /* check for 'last error code' which tells us the
579 * type of the last error to occur on the CAN bus
581 cf
->can_id
|= CAN_ERR_PROT
| CAN_ERR_BUSERROR
;
584 case LEC_STUFF_ERROR
:
585 netdev_dbg(dev
, "stuff error\n");
586 cf
->data
[2] |= CAN_ERR_PROT_STUFF
;
589 netdev_dbg(dev
, "form error\n");
590 cf
->data
[2] |= CAN_ERR_PROT_FORM
;
593 netdev_dbg(dev
, "ack error\n");
594 cf
->data
[3] = CAN_ERR_PROT_LOC_ACK
;
597 netdev_dbg(dev
, "bit1 error\n");
598 cf
->data
[2] |= CAN_ERR_PROT_BIT1
;
601 netdev_dbg(dev
, "bit0 error\n");
602 cf
->data
[2] |= CAN_ERR_PROT_BIT0
;
605 netdev_dbg(dev
, "CRC error\n");
606 cf
->data
[3] = CAN_ERR_PROT_LOC_CRC_SEQ
;
613 stats
->rx_bytes
+= cf
->can_dlc
;
614 netif_receive_skb(skb
);
619 static int __m_can_get_berr_counter(const struct net_device
*dev
,
620 struct can_berr_counter
*bec
)
622 struct m_can_priv
*priv
= netdev_priv(dev
);
625 ecr
= m_can_read(priv
, M_CAN_ECR
);
626 bec
->rxerr
= (ecr
& ECR_REC_MASK
) >> ECR_REC_SHIFT
;
627 bec
->txerr
= (ecr
& ECR_TEC_MASK
) >> ECR_TEC_SHIFT
;
632 static int m_can_clk_start(struct m_can_priv
*priv
)
636 err
= pm_runtime_get_sync(priv
->device
);
638 pm_runtime_put_noidle(priv
->device
);
645 static void m_can_clk_stop(struct m_can_priv
*priv
)
647 pm_runtime_put_sync(priv
->device
);
650 static int m_can_get_berr_counter(const struct net_device
*dev
,
651 struct can_berr_counter
*bec
)
653 struct m_can_priv
*priv
= netdev_priv(dev
);
656 err
= m_can_clk_start(priv
);
660 __m_can_get_berr_counter(dev
, bec
);
662 m_can_clk_stop(priv
);
667 static int m_can_handle_state_change(struct net_device
*dev
,
668 enum can_state new_state
)
670 struct m_can_priv
*priv
= netdev_priv(dev
);
671 struct net_device_stats
*stats
= &dev
->stats
;
672 struct can_frame
*cf
;
674 struct can_berr_counter bec
;
678 case CAN_STATE_ERROR_ACTIVE
:
679 /* error warning state */
680 priv
->can
.can_stats
.error_warning
++;
681 priv
->can
.state
= CAN_STATE_ERROR_WARNING
;
683 case CAN_STATE_ERROR_PASSIVE
:
684 /* error passive state */
685 priv
->can
.can_stats
.error_passive
++;
686 priv
->can
.state
= CAN_STATE_ERROR_PASSIVE
;
688 case CAN_STATE_BUS_OFF
:
690 priv
->can
.state
= CAN_STATE_BUS_OFF
;
691 m_can_disable_all_interrupts(priv
);
692 priv
->can
.can_stats
.bus_off
++;
699 /* propagate the error condition to the CAN stack */
700 skb
= alloc_can_err_skb(dev
, &cf
);
704 __m_can_get_berr_counter(dev
, &bec
);
707 case CAN_STATE_ERROR_ACTIVE
:
708 /* error warning state */
709 cf
->can_id
|= CAN_ERR_CRTL
;
710 cf
->data
[1] = (bec
.txerr
> bec
.rxerr
) ?
711 CAN_ERR_CRTL_TX_WARNING
:
712 CAN_ERR_CRTL_RX_WARNING
;
713 cf
->data
[6] = bec
.txerr
;
714 cf
->data
[7] = bec
.rxerr
;
716 case CAN_STATE_ERROR_PASSIVE
:
717 /* error passive state */
718 cf
->can_id
|= CAN_ERR_CRTL
;
719 ecr
= m_can_read(priv
, M_CAN_ECR
);
721 cf
->data
[1] |= CAN_ERR_CRTL_RX_PASSIVE
;
723 cf
->data
[1] |= CAN_ERR_CRTL_TX_PASSIVE
;
724 cf
->data
[6] = bec
.txerr
;
725 cf
->data
[7] = bec
.rxerr
;
727 case CAN_STATE_BUS_OFF
:
729 cf
->can_id
|= CAN_ERR_BUSOFF
;
736 stats
->rx_bytes
+= cf
->can_dlc
;
737 netif_receive_skb(skb
);
742 static int m_can_handle_state_errors(struct net_device
*dev
, u32 psr
)
744 struct m_can_priv
*priv
= netdev_priv(dev
);
747 if ((psr
& PSR_EW
) &&
748 (priv
->can
.state
!= CAN_STATE_ERROR_WARNING
)) {
749 netdev_dbg(dev
, "entered error warning state\n");
750 work_done
+= m_can_handle_state_change(dev
,
751 CAN_STATE_ERROR_WARNING
);
754 if ((psr
& PSR_EP
) &&
755 (priv
->can
.state
!= CAN_STATE_ERROR_PASSIVE
)) {
756 netdev_dbg(dev
, "entered error passive state\n");
757 work_done
+= m_can_handle_state_change(dev
,
758 CAN_STATE_ERROR_PASSIVE
);
761 if ((psr
& PSR_BO
) &&
762 (priv
->can
.state
!= CAN_STATE_BUS_OFF
)) {
763 netdev_dbg(dev
, "entered error bus off state\n");
764 work_done
+= m_can_handle_state_change(dev
,
771 static void m_can_handle_other_err(struct net_device
*dev
, u32 irqstatus
)
773 if (irqstatus
& IR_WDI
)
774 netdev_err(dev
, "Message RAM Watchdog event due to missing READY\n");
775 if (irqstatus
& IR_ELO
)
776 netdev_err(dev
, "Error Logging Overflow\n");
777 if (irqstatus
& IR_BEU
)
778 netdev_err(dev
, "Bit Error Uncorrected\n");
779 if (irqstatus
& IR_BEC
)
780 netdev_err(dev
, "Bit Error Corrected\n");
781 if (irqstatus
& IR_TOO
)
782 netdev_err(dev
, "Timeout reached\n");
783 if (irqstatus
& IR_MRAF
)
784 netdev_err(dev
, "Message RAM access failure occurred\n");
787 static inline bool is_lec_err(u32 psr
)
791 return psr
&& (psr
!= LEC_UNUSED
);
794 static int m_can_handle_bus_errors(struct net_device
*dev
, u32 irqstatus
,
797 struct m_can_priv
*priv
= netdev_priv(dev
);
800 if (irqstatus
& IR_RF0L
)
801 work_done
+= m_can_handle_lost_msg(dev
);
803 /* handle lec errors on the bus */
804 if ((priv
->can
.ctrlmode
& CAN_CTRLMODE_BERR_REPORTING
) &&
806 work_done
+= m_can_handle_lec_err(dev
, psr
& LEC_UNUSED
);
808 /* other unproccessed error interrupts */
809 m_can_handle_other_err(dev
, irqstatus
);
814 static int m_can_poll(struct napi_struct
*napi
, int quota
)
816 struct net_device
*dev
= napi
->dev
;
817 struct m_can_priv
*priv
= netdev_priv(dev
);
821 irqstatus
= priv
->irqstatus
| m_can_read(priv
, M_CAN_IR
);
825 /* Errata workaround for issue "Needless activation of MRAF irq"
826 * During frame reception while the MCAN is in Error Passive state
827 * and the Receive Error Counter has the value MCAN_ECR.REC = 127,
828 * it may happen that MCAN_IR.MRAF is set although there was no
829 * Message RAM access failure.
830 * If MCAN_IR.MRAF is enabled, an interrupt to the Host CPU is generated
831 * The Message RAM Access Failure interrupt routine needs to check
832 * whether MCAN_ECR.RP = ’1’ and MCAN_ECR.REC = 127.
833 * In this case, reset MCAN_IR.MRAF. No further action is required.
835 if ((priv
->version
<= 31) && (irqstatus
& IR_MRAF
) &&
836 (m_can_read(priv
, M_CAN_ECR
) & ECR_RP
)) {
837 struct can_berr_counter bec
;
839 __m_can_get_berr_counter(dev
, &bec
);
840 if (bec
.rxerr
== 127) {
841 m_can_write(priv
, M_CAN_IR
, IR_MRAF
);
842 irqstatus
&= ~IR_MRAF
;
846 psr
= m_can_read(priv
, M_CAN_PSR
);
847 if (irqstatus
& IR_ERR_STATE
)
848 work_done
+= m_can_handle_state_errors(dev
, psr
);
850 if (irqstatus
& IR_ERR_BUS_30X
)
851 work_done
+= m_can_handle_bus_errors(dev
, irqstatus
, psr
);
853 if (irqstatus
& IR_RF0N
)
854 work_done
+= m_can_do_rx_poll(dev
, (quota
- work_done
));
856 if (work_done
< quota
) {
857 napi_complete_done(napi
, work_done
);
858 m_can_enable_all_interrupts(priv
);
865 static void m_can_echo_tx_event(struct net_device
*dev
)
871 unsigned int msg_mark
;
873 struct m_can_priv
*priv
= netdev_priv(dev
);
874 struct net_device_stats
*stats
= &dev
->stats
;
876 /* read tx event fifo status */
877 m_can_txefs
= m_can_read(priv
, M_CAN_TXEFS
);
879 /* Get Tx Event fifo element count */
880 txe_count
= (m_can_txefs
& TXEFS_EFFL_MASK
)
883 /* Get and process all sent elements */
884 for (i
= 0; i
< txe_count
; i
++) {
885 /* retrieve get index */
886 fgi
= (m_can_read(priv
, M_CAN_TXEFS
) & TXEFS_EFGI_MASK
)
889 /* get message marker */
890 msg_mark
= (m_can_txe_fifo_read(priv
, fgi
, 4) &
891 TX_EVENT_MM_MASK
) >> TX_EVENT_MM_SHIFT
;
893 /* ack txe element */
894 m_can_write(priv
, M_CAN_TXEFA
, (TXEFA_EFAI_MASK
&
895 (fgi
<< TXEFA_EFAI_SHIFT
)));
898 stats
->tx_bytes
+= can_get_echo_skb(dev
, msg_mark
);
903 static irqreturn_t
m_can_isr(int irq
, void *dev_id
)
905 struct net_device
*dev
= (struct net_device
*)dev_id
;
906 struct m_can_priv
*priv
= netdev_priv(dev
);
907 struct net_device_stats
*stats
= &dev
->stats
;
910 ir
= m_can_read(priv
, M_CAN_IR
);
916 m_can_write(priv
, M_CAN_IR
, ir
);
918 /* schedule NAPI in case of
921 * - bus error IRQ and bus error reporting
923 if ((ir
& IR_RF0N
) || (ir
& IR_ERR_ALL_30X
)) {
924 priv
->irqstatus
= ir
;
925 m_can_disable_all_interrupts(priv
);
926 napi_schedule(&priv
->napi
);
929 if (priv
->version
== 30) {
931 /* Transmission Complete Interrupt*/
932 stats
->tx_bytes
+= can_get_echo_skb(dev
, 0);
934 can_led_event(dev
, CAN_LED_EVENT_TX
);
935 netif_wake_queue(dev
);
939 /* New TX FIFO Element arrived */
940 m_can_echo_tx_event(dev
);
941 can_led_event(dev
, CAN_LED_EVENT_TX
);
942 if (netif_queue_stopped(dev
) &&
943 !m_can_tx_fifo_full(priv
))
944 netif_wake_queue(dev
);
951 static const struct can_bittiming_const m_can_bittiming_const_30X
= {
952 .name
= KBUILD_MODNAME
,
953 .tseg1_min
= 2, /* Time segment 1 = prop_seg + phase_seg1 */
955 .tseg2_min
= 1, /* Time segment 2 = phase_seg2 */
963 static const struct can_bittiming_const m_can_data_bittiming_const_30X
= {
964 .name
= KBUILD_MODNAME
,
965 .tseg1_min
= 2, /* Time segment 1 = prop_seg + phase_seg1 */
967 .tseg2_min
= 1, /* Time segment 2 = phase_seg2 */
975 static const struct can_bittiming_const m_can_bittiming_const_31X
= {
976 .name
= KBUILD_MODNAME
,
977 .tseg1_min
= 2, /* Time segment 1 = prop_seg + phase_seg1 */
979 .tseg2_min
= 1, /* Time segment 2 = phase_seg2 */
987 static const struct can_bittiming_const m_can_data_bittiming_const_31X
= {
988 .name
= KBUILD_MODNAME
,
989 .tseg1_min
= 1, /* Time segment 1 = prop_seg + phase_seg1 */
991 .tseg2_min
= 1, /* Time segment 2 = phase_seg2 */
999 static int m_can_set_bittiming(struct net_device
*dev
)
1001 struct m_can_priv
*priv
= netdev_priv(dev
);
1002 const struct can_bittiming
*bt
= &priv
->can
.bittiming
;
1003 const struct can_bittiming
*dbt
= &priv
->can
.data_bittiming
;
1004 u16 brp
, sjw
, tseg1
, tseg2
;
1009 tseg1
= bt
->prop_seg
+ bt
->phase_seg1
- 1;
1010 tseg2
= bt
->phase_seg2
- 1;
1011 reg_btp
= (brp
<< NBTP_NBRP_SHIFT
) | (sjw
<< NBTP_NSJW_SHIFT
) |
1012 (tseg1
<< NBTP_NTSEG1_SHIFT
) | (tseg2
<< NBTP_NTSEG2_SHIFT
);
1013 m_can_write(priv
, M_CAN_NBTP
, reg_btp
);
1015 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD
) {
1019 tseg1
= dbt
->prop_seg
+ dbt
->phase_seg1
- 1;
1020 tseg2
= dbt
->phase_seg2
- 1;
1022 /* TDC is only needed for bitrates beyond 2.5 MBit/s.
1023 * This is mentioned in the "Bit Time Requirements for CAN FD"
1024 * paper presented at the International CAN Conference 2013
1026 if (dbt
->bitrate
> 2500000) {
1029 /* Use the same value of secondary sampling point
1030 * as the data sampling point
1032 ssp
= dbt
->sample_point
;
1034 /* Equation based on Bosch's M_CAN User Manual's
1035 * Transmitter Delay Compensation Section
1037 tdco
= (priv
->can
.clock
.freq
/ 1000) *
1040 /* Max valid TDCO value is 127 */
1042 netdev_warn(dev
, "TDCO value of %u is beyond maximum. Using maximum possible value\n",
1047 reg_btp
|= DBTP_TDC
;
1048 m_can_write(priv
, M_CAN_TDCR
,
1049 tdco
<< TDCR_TDCO_SHIFT
);
1052 reg_btp
|= (brp
<< DBTP_DBRP_SHIFT
) |
1053 (sjw
<< DBTP_DSJW_SHIFT
) |
1054 (tseg1
<< DBTP_DTSEG1_SHIFT
) |
1055 (tseg2
<< DBTP_DTSEG2_SHIFT
);
1057 m_can_write(priv
, M_CAN_DBTP
, reg_btp
);
1063 /* Configure M_CAN chip:
1064 * - set rx buffer/fifo element size
1065 * - configure rx fifo
1066 * - accept non-matching frame into fifo 0
1067 * - configure tx buffer
1068 * - >= v3.1.x: TX FIFO is used
1072 static void m_can_chip_config(struct net_device
*dev
)
1074 struct m_can_priv
*priv
= netdev_priv(dev
);
1077 m_can_config_endisable(priv
, true);
1079 /* RX Buffer/FIFO Element Size 64 bytes data field */
1080 m_can_write(priv
, M_CAN_RXESC
, M_CAN_RXESC_64BYTES
);
1082 /* Accept Non-matching Frames Into FIFO 0 */
1083 m_can_write(priv
, M_CAN_GFC
, 0x0);
1085 if (priv
->version
== 30) {
1086 /* only support one Tx Buffer currently */
1087 m_can_write(priv
, M_CAN_TXBC
, (1 << TXBC_NDTB_SHIFT
) |
1088 priv
->mcfg
[MRAM_TXB
].off
);
1090 /* TX FIFO is used for newer IP Core versions */
1091 m_can_write(priv
, M_CAN_TXBC
,
1092 (priv
->mcfg
[MRAM_TXB
].num
<< TXBC_TFQS_SHIFT
) |
1093 (priv
->mcfg
[MRAM_TXB
].off
));
1096 /* support 64 bytes payload */
1097 m_can_write(priv
, M_CAN_TXESC
, TXESC_TBDS_64BYTES
);
1100 if (priv
->version
== 30) {
1101 m_can_write(priv
, M_CAN_TXEFC
, (1 << TXEFC_EFS_SHIFT
) |
1102 priv
->mcfg
[MRAM_TXE
].off
);
1104 /* Full TX Event FIFO is used */
1105 m_can_write(priv
, M_CAN_TXEFC
,
1106 ((priv
->mcfg
[MRAM_TXE
].num
<< TXEFC_EFS_SHIFT
)
1108 priv
->mcfg
[MRAM_TXE
].off
);
1111 /* rx fifo configuration, blocking mode, fifo size 1 */
1112 m_can_write(priv
, M_CAN_RXF0C
,
1113 (priv
->mcfg
[MRAM_RXF0
].num
<< RXFC_FS_SHIFT
) |
1114 priv
->mcfg
[MRAM_RXF0
].off
);
1116 m_can_write(priv
, M_CAN_RXF1C
,
1117 (priv
->mcfg
[MRAM_RXF1
].num
<< RXFC_FS_SHIFT
) |
1118 priv
->mcfg
[MRAM_RXF1
].off
);
1120 cccr
= m_can_read(priv
, M_CAN_CCCR
);
1121 test
= m_can_read(priv
, M_CAN_TEST
);
1123 if (priv
->version
== 30) {
1126 cccr
&= ~(CCCR_TEST
| CCCR_MON
|
1127 (CCCR_CMR_MASK
<< CCCR_CMR_SHIFT
) |
1128 (CCCR_CME_MASK
<< CCCR_CME_SHIFT
));
1130 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD
)
1131 cccr
|= CCCR_CME_CANFD_BRS
<< CCCR_CME_SHIFT
;
1134 /* Version 3.1.x or 3.2.x */
1135 cccr
&= ~(CCCR_TEST
| CCCR_MON
| CCCR_BRSE
| CCCR_FDOE
|
1138 /* Only 3.2.x has NISO Bit implemented */
1139 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD_NON_ISO
)
1142 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD
)
1143 cccr
|= (CCCR_BRSE
| CCCR_FDOE
);
1147 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_LOOPBACK
) {
1148 cccr
|= CCCR_TEST
| CCCR_MON
;
1152 /* Enable Monitoring (all versions) */
1153 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_LISTENONLY
)
1157 m_can_write(priv
, M_CAN_CCCR
, cccr
);
1158 m_can_write(priv
, M_CAN_TEST
, test
);
1160 /* Enable interrupts */
1161 m_can_write(priv
, M_CAN_IR
, IR_ALL_INT
);
1162 if (!(priv
->can
.ctrlmode
& CAN_CTRLMODE_BERR_REPORTING
))
1163 if (priv
->version
== 30)
1164 m_can_write(priv
, M_CAN_IE
, IR_ALL_INT
&
1167 m_can_write(priv
, M_CAN_IE
, IR_ALL_INT
&
1170 m_can_write(priv
, M_CAN_IE
, IR_ALL_INT
);
1172 /* route all interrupts to INT0 */
1173 m_can_write(priv
, M_CAN_ILS
, ILS_ALL_INT0
);
1175 /* set bittiming params */
1176 m_can_set_bittiming(dev
);
1178 m_can_config_endisable(priv
, false);
1181 static void m_can_start(struct net_device
*dev
)
1183 struct m_can_priv
*priv
= netdev_priv(dev
);
1185 /* basic m_can configuration */
1186 m_can_chip_config(dev
);
1188 priv
->can
.state
= CAN_STATE_ERROR_ACTIVE
;
1190 m_can_enable_all_interrupts(priv
);
1193 static int m_can_set_mode(struct net_device
*dev
, enum can_mode mode
)
1196 case CAN_MODE_START
:
1198 netif_wake_queue(dev
);
1207 /* Checks core release number of M_CAN
1208 * returns 0 if an unsupported device is detected
1209 * else it returns the release and step coded as:
1210 * return value = 10 * <release> + 1 * <step>
1212 static int m_can_check_core_release(void __iomem
*m_can_base
)
1218 struct m_can_priv temp_priv
= {
1222 /* Read Core Release Version and split into version number
1223 * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1;
1225 crel_reg
= m_can_read(&temp_priv
, M_CAN_CREL
);
1226 rel
= (u8
)((crel_reg
& CREL_REL_MASK
) >> CREL_REL_SHIFT
);
1227 step
= (u8
)((crel_reg
& CREL_STEP_MASK
) >> CREL_STEP_SHIFT
);
1230 /* M_CAN v3.x.y: create return value */
1233 /* Unsupported M_CAN version */
1240 /* Selectable Non ISO support only in version 3.2.x
1241 * This function checks if the bit is writable.
1243 static bool m_can_niso_supported(const struct m_can_priv
*priv
)
1245 u32 cccr_reg
, cccr_poll
;
1248 m_can_config_endisable(priv
, true);
1249 cccr_reg
= m_can_read(priv
, M_CAN_CCCR
);
1250 cccr_reg
|= CCCR_NISO
;
1251 m_can_write(priv
, M_CAN_CCCR
, cccr_reg
);
1253 niso_timeout
= readl_poll_timeout((priv
->base
+ M_CAN_CCCR
), cccr_poll
,
1254 (cccr_poll
== cccr_reg
), 0, 10);
1257 cccr_reg
&= ~(CCCR_NISO
);
1258 m_can_write(priv
, M_CAN_CCCR
, cccr_reg
);
1260 m_can_config_endisable(priv
, false);
1262 /* return false if time out (-ETIMEDOUT), else return true */
1263 return !niso_timeout
;
1266 static int m_can_dev_setup(struct platform_device
*pdev
, struct net_device
*dev
,
1269 struct m_can_priv
*priv
;
1272 m_can_version
= m_can_check_core_release(addr
);
1273 /* return if unsupported version */
1274 if (!m_can_version
) {
1275 dev_err(&pdev
->dev
, "Unsupported version number: %2d",
1280 priv
= netdev_priv(dev
);
1281 netif_napi_add(dev
, &priv
->napi
, m_can_poll
, M_CAN_NAPI_WEIGHT
);
1283 /* Shared properties of all M_CAN versions */
1284 priv
->version
= m_can_version
;
1287 priv
->can
.do_set_mode
= m_can_set_mode
;
1288 priv
->can
.do_get_berr_counter
= m_can_get_berr_counter
;
1290 /* Set M_CAN supported operations */
1291 priv
->can
.ctrlmode_supported
= CAN_CTRLMODE_LOOPBACK
|
1292 CAN_CTRLMODE_LISTENONLY
|
1293 CAN_CTRLMODE_BERR_REPORTING
|
1296 /* Set properties depending on M_CAN version */
1297 switch (priv
->version
) {
1299 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
1300 can_set_static_ctrlmode(dev
, CAN_CTRLMODE_FD_NON_ISO
);
1301 priv
->can
.bittiming_const
= &m_can_bittiming_const_30X
;
1302 priv
->can
.data_bittiming_const
=
1303 &m_can_data_bittiming_const_30X
;
1306 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
1307 can_set_static_ctrlmode(dev
, CAN_CTRLMODE_FD_NON_ISO
);
1308 priv
->can
.bittiming_const
= &m_can_bittiming_const_31X
;
1309 priv
->can
.data_bittiming_const
=
1310 &m_can_data_bittiming_const_31X
;
1313 priv
->can
.bittiming_const
= &m_can_bittiming_const_31X
;
1314 priv
->can
.data_bittiming_const
=
1315 &m_can_data_bittiming_const_31X
;
1316 priv
->can
.ctrlmode_supported
|= (m_can_niso_supported(priv
)
1317 ? CAN_CTRLMODE_FD_NON_ISO
1321 dev_err(&pdev
->dev
, "Unsupported version number: %2d",
1329 static int m_can_open(struct net_device
*dev
)
1331 struct m_can_priv
*priv
= netdev_priv(dev
);
1334 err
= m_can_clk_start(priv
);
1338 /* open the can device */
1339 err
= open_candev(dev
);
1341 netdev_err(dev
, "failed to open can device\n");
1342 goto exit_disable_clks
;
1345 /* register interrupt handler */
1346 err
= request_irq(dev
->irq
, m_can_isr
, IRQF_SHARED
, dev
->name
,
1349 netdev_err(dev
, "failed to request interrupt\n");
1353 /* start the m_can controller */
1356 can_led_event(dev
, CAN_LED_EVENT_OPEN
);
1357 napi_enable(&priv
->napi
);
1358 netif_start_queue(dev
);
1365 m_can_clk_stop(priv
);
1369 static void m_can_stop(struct net_device
*dev
)
1371 struct m_can_priv
*priv
= netdev_priv(dev
);
1373 /* disable all interrupts */
1374 m_can_disable_all_interrupts(priv
);
1376 /* set the state as STOPPED */
1377 priv
->can
.state
= CAN_STATE_STOPPED
;
1380 static int m_can_close(struct net_device
*dev
)
1382 struct m_can_priv
*priv
= netdev_priv(dev
);
1384 netif_stop_queue(dev
);
1385 napi_disable(&priv
->napi
);
1387 m_can_clk_stop(priv
);
1388 free_irq(dev
->irq
, dev
);
1390 can_led_event(dev
, CAN_LED_EVENT_STOP
);
1395 static int m_can_next_echo_skb_occupied(struct net_device
*dev
, int putidx
)
1397 struct m_can_priv
*priv
= netdev_priv(dev
);
1398 /*get wrap around for loopback skb index */
1399 unsigned int wrap
= priv
->can
.echo_skb_max
;
1402 /* calculate next index */
1403 next_idx
= (++putidx
>= wrap
? 0 : putidx
);
1405 /* check if occupied */
1406 return !!priv
->can
.echo_skb
[next_idx
];
1409 static netdev_tx_t
m_can_start_xmit(struct sk_buff
*skb
,
1410 struct net_device
*dev
)
1412 struct m_can_priv
*priv
= netdev_priv(dev
);
1413 struct canfd_frame
*cf
= (struct canfd_frame
*)skb
->data
;
1414 u32 id
, cccr
, fdflags
;
1418 if (can_dropped_invalid_skb(dev
, skb
))
1419 return NETDEV_TX_OK
;
1421 /* Generate ID field for TX buffer Element */
1422 /* Common to all supported M_CAN versions */
1423 if (cf
->can_id
& CAN_EFF_FLAG
) {
1424 id
= cf
->can_id
& CAN_EFF_MASK
;
1427 id
= ((cf
->can_id
& CAN_SFF_MASK
) << 18);
1430 if (cf
->can_id
& CAN_RTR_FLAG
)
1433 if (priv
->version
== 30) {
1434 netif_stop_queue(dev
);
1436 /* message ram configuration */
1437 m_can_fifo_write(priv
, 0, M_CAN_FIFO_ID
, id
);
1438 m_can_fifo_write(priv
, 0, M_CAN_FIFO_DLC
,
1439 can_len2dlc(cf
->len
) << 16);
1441 for (i
= 0; i
< cf
->len
; i
+= 4)
1442 m_can_fifo_write(priv
, 0,
1443 M_CAN_FIFO_DATA(i
/ 4),
1444 *(u32
*)(cf
->data
+ i
));
1446 can_put_echo_skb(skb
, dev
, 0);
1448 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD
) {
1449 cccr
= m_can_read(priv
, M_CAN_CCCR
);
1450 cccr
&= ~(CCCR_CMR_MASK
<< CCCR_CMR_SHIFT
);
1451 if (can_is_canfd_skb(skb
)) {
1452 if (cf
->flags
& CANFD_BRS
)
1453 cccr
|= CCCR_CMR_CANFD_BRS
<<
1456 cccr
|= CCCR_CMR_CANFD
<<
1459 cccr
|= CCCR_CMR_CAN
<< CCCR_CMR_SHIFT
;
1461 m_can_write(priv
, M_CAN_CCCR
, cccr
);
1463 m_can_write(priv
, M_CAN_TXBTIE
, 0x1);
1464 m_can_write(priv
, M_CAN_TXBAR
, 0x1);
1465 /* End of xmit function for version 3.0.x */
1467 /* Transmit routine for version >= v3.1.x */
1469 /* Check if FIFO full */
1470 if (m_can_tx_fifo_full(priv
)) {
1471 /* This shouldn't happen */
1472 netif_stop_queue(dev
);
1474 "TX queue active although FIFO is full.");
1475 return NETDEV_TX_BUSY
;
1478 /* get put index for frame */
1479 putidx
= ((m_can_read(priv
, M_CAN_TXFQS
) & TXFQS_TFQPI_MASK
)
1480 >> TXFQS_TFQPI_SHIFT
);
1481 /* Write ID Field to FIFO Element */
1482 m_can_fifo_write(priv
, putidx
, M_CAN_FIFO_ID
, id
);
1484 /* get CAN FD configuration of frame */
1486 if (can_is_canfd_skb(skb
)) {
1487 fdflags
|= TX_BUF_FDF
;
1488 if (cf
->flags
& CANFD_BRS
)
1489 fdflags
|= TX_BUF_BRS
;
1492 /* Construct DLC Field. Also contains CAN-FD configuration
1493 * use put index of fifo as message marker
1494 * it is used in TX interrupt for
1495 * sending the correct echo frame
1497 m_can_fifo_write(priv
, putidx
, M_CAN_FIFO_DLC
,
1498 ((putidx
<< TX_BUF_MM_SHIFT
) &
1500 (can_len2dlc(cf
->len
) << 16) |
1501 fdflags
| TX_BUF_EFC
);
1503 for (i
= 0; i
< cf
->len
; i
+= 4)
1504 m_can_fifo_write(priv
, putidx
, M_CAN_FIFO_DATA(i
/ 4),
1505 *(u32
*)(cf
->data
+ i
));
1507 /* Push loopback echo.
1508 * Will be looped back on TX interrupt based on message marker
1510 can_put_echo_skb(skb
, dev
, putidx
);
1512 /* Enable TX FIFO element to start transfer */
1513 m_can_write(priv
, M_CAN_TXBAR
, (1 << putidx
));
1515 /* stop network queue if fifo full */
1516 if (m_can_tx_fifo_full(priv
) ||
1517 m_can_next_echo_skb_occupied(dev
, putidx
))
1518 netif_stop_queue(dev
);
1521 return NETDEV_TX_OK
;
1524 static const struct net_device_ops m_can_netdev_ops
= {
1525 .ndo_open
= m_can_open
,
1526 .ndo_stop
= m_can_close
,
1527 .ndo_start_xmit
= m_can_start_xmit
,
1528 .ndo_change_mtu
= can_change_mtu
,
1531 static int register_m_can_dev(struct net_device
*dev
)
1533 dev
->flags
|= IFF_ECHO
; /* we support local echo */
1534 dev
->netdev_ops
= &m_can_netdev_ops
;
1536 return register_candev(dev
);
1539 static void m_can_init_ram(struct m_can_priv
*priv
)
1543 /* initialize the entire Message RAM in use to avoid possible
1544 * ECC/parity checksum errors when reading an uninitialized buffer
1546 start
= priv
->mcfg
[MRAM_SIDF
].off
;
1547 end
= priv
->mcfg
[MRAM_TXB
].off
+
1548 priv
->mcfg
[MRAM_TXB
].num
* TXB_ELEMENT_SIZE
;
1549 for (i
= start
; i
< end
; i
+= 4)
1550 writel(0x0, priv
->mram_base
+ i
);
1553 static void m_can_of_parse_mram(struct m_can_priv
*priv
,
1554 const u32
*mram_config_vals
)
1556 priv
->mcfg
[MRAM_SIDF
].off
= mram_config_vals
[0];
1557 priv
->mcfg
[MRAM_SIDF
].num
= mram_config_vals
[1];
1558 priv
->mcfg
[MRAM_XIDF
].off
= priv
->mcfg
[MRAM_SIDF
].off
+
1559 priv
->mcfg
[MRAM_SIDF
].num
* SIDF_ELEMENT_SIZE
;
1560 priv
->mcfg
[MRAM_XIDF
].num
= mram_config_vals
[2];
1561 priv
->mcfg
[MRAM_RXF0
].off
= priv
->mcfg
[MRAM_XIDF
].off
+
1562 priv
->mcfg
[MRAM_XIDF
].num
* XIDF_ELEMENT_SIZE
;
1563 priv
->mcfg
[MRAM_RXF0
].num
= mram_config_vals
[3] &
1564 (RXFC_FS_MASK
>> RXFC_FS_SHIFT
);
1565 priv
->mcfg
[MRAM_RXF1
].off
= priv
->mcfg
[MRAM_RXF0
].off
+
1566 priv
->mcfg
[MRAM_RXF0
].num
* RXF0_ELEMENT_SIZE
;
1567 priv
->mcfg
[MRAM_RXF1
].num
= mram_config_vals
[4] &
1568 (RXFC_FS_MASK
>> RXFC_FS_SHIFT
);
1569 priv
->mcfg
[MRAM_RXB
].off
= priv
->mcfg
[MRAM_RXF1
].off
+
1570 priv
->mcfg
[MRAM_RXF1
].num
* RXF1_ELEMENT_SIZE
;
1571 priv
->mcfg
[MRAM_RXB
].num
= mram_config_vals
[5];
1572 priv
->mcfg
[MRAM_TXE
].off
= priv
->mcfg
[MRAM_RXB
].off
+
1573 priv
->mcfg
[MRAM_RXB
].num
* RXB_ELEMENT_SIZE
;
1574 priv
->mcfg
[MRAM_TXE
].num
= mram_config_vals
[6];
1575 priv
->mcfg
[MRAM_TXB
].off
= priv
->mcfg
[MRAM_TXE
].off
+
1576 priv
->mcfg
[MRAM_TXE
].num
* TXE_ELEMENT_SIZE
;
1577 priv
->mcfg
[MRAM_TXB
].num
= mram_config_vals
[7] &
1578 (TXBC_NDTB_MASK
>> TXBC_NDTB_SHIFT
);
1580 dev_dbg(priv
->device
,
1581 "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
1583 priv
->mcfg
[MRAM_SIDF
].off
, priv
->mcfg
[MRAM_SIDF
].num
,
1584 priv
->mcfg
[MRAM_XIDF
].off
, priv
->mcfg
[MRAM_XIDF
].num
,
1585 priv
->mcfg
[MRAM_RXF0
].off
, priv
->mcfg
[MRAM_RXF0
].num
,
1586 priv
->mcfg
[MRAM_RXF1
].off
, priv
->mcfg
[MRAM_RXF1
].num
,
1587 priv
->mcfg
[MRAM_RXB
].off
, priv
->mcfg
[MRAM_RXB
].num
,
1588 priv
->mcfg
[MRAM_TXE
].off
, priv
->mcfg
[MRAM_TXE
].num
,
1589 priv
->mcfg
[MRAM_TXB
].off
, priv
->mcfg
[MRAM_TXB
].num
);
1591 m_can_init_ram(priv
);
1594 static int m_can_plat_probe(struct platform_device
*pdev
)
1596 struct net_device
*dev
;
1597 struct m_can_priv
*priv
;
1598 struct resource
*res
;
1600 void __iomem
*mram_addr
;
1601 struct clk
*hclk
, *cclk
;
1603 struct device_node
*np
;
1604 u32 mram_config_vals
[MRAM_CFG_LEN
];
1607 np
= pdev
->dev
.of_node
;
1609 hclk
= devm_clk_get(&pdev
->dev
, "hclk");
1610 cclk
= devm_clk_get(&pdev
->dev
, "cclk");
1612 if (IS_ERR(hclk
) || IS_ERR(cclk
)) {
1613 dev_err(&pdev
->dev
, "no clock found\n");
1618 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "m_can");
1619 addr
= devm_ioremap_resource(&pdev
->dev
, res
);
1620 irq
= platform_get_irq_byname(pdev
, "int0");
1622 if (IS_ERR(addr
) || irq
< 0) {
1627 /* message ram could be shared */
1628 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "message_ram");
1634 mram_addr
= devm_ioremap(&pdev
->dev
, res
->start
, resource_size(res
));
1640 /* get message ram configuration */
1641 ret
= of_property_read_u32_array(np
, "bosch,mram-cfg",
1643 sizeof(mram_config_vals
) / 4);
1645 dev_err(&pdev
->dev
, "Could not get Message RAM configuration.");
1650 * Defines the total amount of echo buffers for loopback
1652 tx_fifo_size
= mram_config_vals
[7];
1654 /* allocate the m_can device */
1655 dev
= alloc_candev(sizeof(*priv
), tx_fifo_size
);
1661 priv
= netdev_priv(dev
);
1663 priv
->device
= &pdev
->dev
;
1666 priv
->can
.clock
.freq
= clk_get_rate(cclk
);
1667 priv
->mram_base
= mram_addr
;
1669 platform_set_drvdata(pdev
, dev
);
1670 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1672 /* Enable clocks. Necessary to read Core Release in order to determine
1675 pm_runtime_enable(&pdev
->dev
);
1676 ret
= m_can_clk_start(priv
);
1678 goto pm_runtime_fail
;
1680 ret
= m_can_dev_setup(pdev
, dev
, addr
);
1684 ret
= register_m_can_dev(dev
);
1686 dev_err(&pdev
->dev
, "registering %s failed (err=%d)\n",
1687 KBUILD_MODNAME
, ret
);
1691 m_can_of_parse_mram(priv
, mram_config_vals
);
1693 devm_can_led_init(dev
);
1695 of_can_transceiver(dev
);
1697 dev_info(&pdev
->dev
, "%s device registered (irq=%d, version=%d)\n",
1698 KBUILD_MODNAME
, dev
->irq
, priv
->version
);
1701 * Stop clocks. They will be reactivated once the M_CAN device is opened
1704 m_can_clk_stop(priv
);
1707 pm_runtime_disable(&pdev
->dev
);
1714 static __maybe_unused
int m_can_suspend(struct device
*dev
)
1716 struct net_device
*ndev
= dev_get_drvdata(dev
);
1717 struct m_can_priv
*priv
= netdev_priv(ndev
);
1719 if (netif_running(ndev
)) {
1720 netif_stop_queue(ndev
);
1721 netif_device_detach(ndev
);
1723 m_can_clk_stop(priv
);
1726 pinctrl_pm_select_sleep_state(dev
);
1728 priv
->can
.state
= CAN_STATE_SLEEPING
;
1733 static __maybe_unused
int m_can_resume(struct device
*dev
)
1735 struct net_device
*ndev
= dev_get_drvdata(dev
);
1736 struct m_can_priv
*priv
= netdev_priv(ndev
);
1738 pinctrl_pm_select_default_state(dev
);
1740 priv
->can
.state
= CAN_STATE_ERROR_ACTIVE
;
1742 if (netif_running(ndev
)) {
1745 ret
= m_can_clk_start(priv
);
1749 m_can_init_ram(priv
);
1751 netif_device_attach(ndev
);
1752 netif_start_queue(ndev
);
1758 static void unregister_m_can_dev(struct net_device
*dev
)
1760 unregister_candev(dev
);
1763 static int m_can_plat_remove(struct platform_device
*pdev
)
1765 struct net_device
*dev
= platform_get_drvdata(pdev
);
1767 unregister_m_can_dev(dev
);
1769 pm_runtime_disable(&pdev
->dev
);
1771 platform_set_drvdata(pdev
, NULL
);
1778 static int __maybe_unused
m_can_runtime_suspend(struct device
*dev
)
1780 struct net_device
*ndev
= dev_get_drvdata(dev
);
1781 struct m_can_priv
*priv
= netdev_priv(ndev
);
1783 clk_disable_unprepare(priv
->cclk
);
1784 clk_disable_unprepare(priv
->hclk
);
1789 static int __maybe_unused
m_can_runtime_resume(struct device
*dev
)
1791 struct net_device
*ndev
= dev_get_drvdata(dev
);
1792 struct m_can_priv
*priv
= netdev_priv(ndev
);
1795 err
= clk_prepare_enable(priv
->hclk
);
1799 err
= clk_prepare_enable(priv
->cclk
);
1801 clk_disable_unprepare(priv
->hclk
);
1806 static const struct dev_pm_ops m_can_pmops
= {
1807 SET_RUNTIME_PM_OPS(m_can_runtime_suspend
,
1808 m_can_runtime_resume
, NULL
)
1809 SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend
, m_can_resume
)
1812 static const struct of_device_id m_can_of_table
[] = {
1813 { .compatible
= "bosch,m_can", .data
= NULL
},
1816 MODULE_DEVICE_TABLE(of
, m_can_of_table
);
1818 static struct platform_driver m_can_plat_driver
= {
1820 .name
= KBUILD_MODNAME
,
1821 .of_match_table
= m_can_of_table
,
1824 .probe
= m_can_plat_probe
,
1825 .remove
= m_can_plat_remove
,
1828 module_platform_driver(m_can_plat_driver
);
1830 MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
1831 MODULE_LICENSE("GPL v2");
1832 MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");