2 * CAN bus driver for Bosch M_CAN controller
4 * Copyright (C) 2014 Freescale Semiconductor, Inc.
5 * Dong Aisheng <b29396@freescale.com>
7 * Bosch M_CAN user manual can be obtained from:
8 * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/
9 * mcan_users_manual_v302.pdf
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
16 #include <linux/clk.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/netdevice.h>
24 #include <linux/of_device.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/iopoll.h>
28 #include <linux/can/dev.h>
31 #define M_CAN_NAPI_WEIGHT 64
33 /* message ram configuration data length */
34 #define MRAM_CFG_LEN 8
36 /* registers definition */
52 /* TDCR Register only available for version >=3.1.x */
88 /* m_can lec values */
100 enum m_can_mram_cfg
{
111 /* Core Release Register (CREL) */
112 #define CREL_REL_SHIFT 28
113 #define CREL_REL_MASK (0xF << CREL_REL_SHIFT)
114 #define CREL_STEP_SHIFT 24
115 #define CREL_STEP_MASK (0xF << CREL_STEP_SHIFT)
116 #define CREL_SUBSTEP_SHIFT 20
117 #define CREL_SUBSTEP_MASK (0xF << CREL_SUBSTEP_SHIFT)
119 /* Data Bit Timing & Prescaler Register (DBTP) */
120 #define DBTP_TDC BIT(23)
121 #define DBTP_DBRP_SHIFT 16
122 #define DBTP_DBRP_MASK (0x1f << DBTP_DBRP_SHIFT)
123 #define DBTP_DTSEG1_SHIFT 8
124 #define DBTP_DTSEG1_MASK (0x1f << DBTP_DTSEG1_SHIFT)
125 #define DBTP_DTSEG2_SHIFT 4
126 #define DBTP_DTSEG2_MASK (0xf << DBTP_DTSEG2_SHIFT)
127 #define DBTP_DSJW_SHIFT 0
128 #define DBTP_DSJW_MASK (0xf << DBTP_DSJW_SHIFT)
130 /* Transmitter Delay Compensation Register (TDCR) */
131 #define TDCR_TDCO_SHIFT 8
132 #define TDCR_TDCO_MASK (0x7F << TDCR_TDCO_SHIFT)
133 #define TDCR_TDCF_SHIFT 0
134 #define TDCR_TDCF_MASK (0x7F << TDCR_TDCF_SHIFT)
136 /* Test Register (TEST) */
137 #define TEST_LBCK BIT(4)
139 /* CC Control Register(CCCR) */
140 #define CCCR_CMR_MASK 0x3
141 #define CCCR_CMR_SHIFT 10
142 #define CCCR_CMR_CANFD 0x1
143 #define CCCR_CMR_CANFD_BRS 0x2
144 #define CCCR_CMR_CAN 0x3
145 #define CCCR_CME_MASK 0x3
146 #define CCCR_CME_SHIFT 8
147 #define CCCR_CME_CAN 0
148 #define CCCR_CME_CANFD 0x1
149 #define CCCR_CME_CANFD_BRS 0x2
150 #define CCCR_TXP BIT(14)
151 #define CCCR_TEST BIT(7)
152 #define CCCR_MON BIT(5)
153 #define CCCR_CSR BIT(4)
154 #define CCCR_CSA BIT(3)
155 #define CCCR_ASM BIT(2)
156 #define CCCR_CCE BIT(1)
157 #define CCCR_INIT BIT(0)
158 #define CCCR_CANFD 0x10
159 /* for version >=3.1.x */
160 #define CCCR_EFBI BIT(13)
161 #define CCCR_PXHD BIT(12)
162 #define CCCR_BRSE BIT(9)
163 #define CCCR_FDOE BIT(8)
164 /* only for version >=3.2.x */
165 #define CCCR_NISO BIT(15)
167 /* Nominal Bit Timing & Prescaler Register (NBTP) */
168 #define NBTP_NSJW_SHIFT 25
169 #define NBTP_NSJW_MASK (0x7f << NBTP_NSJW_SHIFT)
170 #define NBTP_NBRP_SHIFT 16
171 #define NBTP_NBRP_MASK (0x1ff << NBTP_NBRP_SHIFT)
172 #define NBTP_NTSEG1_SHIFT 8
173 #define NBTP_NTSEG1_MASK (0xff << NBTP_NTSEG1_SHIFT)
174 #define NBTP_NTSEG2_SHIFT 0
175 #define NBTP_NTSEG2_MASK (0x7f << NBTP_NTSEG2_SHIFT)
177 /* Error Counter Register(ECR) */
178 #define ECR_RP BIT(15)
179 #define ECR_REC_SHIFT 8
180 #define ECR_REC_MASK (0x7f << ECR_REC_SHIFT)
181 #define ECR_TEC_SHIFT 0
182 #define ECR_TEC_MASK 0xff
184 /* Protocol Status Register(PSR) */
185 #define PSR_BO BIT(7)
186 #define PSR_EW BIT(6)
187 #define PSR_EP BIT(5)
188 #define PSR_LEC_MASK 0x7
190 /* Interrupt Register(IR) */
191 #define IR_ALL_INT 0xffffffff
193 /* Renamed bits for versions > 3.1.x */
194 #define IR_ARA BIT(29)
195 #define IR_PED BIT(28)
196 #define IR_PEA BIT(27)
198 /* Bits for version 3.0.x */
199 #define IR_STE BIT(31)
200 #define IR_FOE BIT(30)
201 #define IR_ACKE BIT(29)
202 #define IR_BE BIT(28)
203 #define IR_CRCE BIT(27)
204 #define IR_WDI BIT(26)
205 #define IR_BO BIT(25)
206 #define IR_EW BIT(24)
207 #define IR_EP BIT(23)
208 #define IR_ELO BIT(22)
209 #define IR_BEU BIT(21)
210 #define IR_BEC BIT(20)
211 #define IR_DRX BIT(19)
212 #define IR_TOO BIT(18)
213 #define IR_MRAF BIT(17)
214 #define IR_TSW BIT(16)
215 #define IR_TEFL BIT(15)
216 #define IR_TEFF BIT(14)
217 #define IR_TEFW BIT(13)
218 #define IR_TEFN BIT(12)
219 #define IR_TFE BIT(11)
220 #define IR_TCF BIT(10)
222 #define IR_HPM BIT(8)
223 #define IR_RF1L BIT(7)
224 #define IR_RF1F BIT(6)
225 #define IR_RF1W BIT(5)
226 #define IR_RF1N BIT(4)
227 #define IR_RF0L BIT(3)
228 #define IR_RF0F BIT(2)
229 #define IR_RF0W BIT(1)
230 #define IR_RF0N BIT(0)
231 #define IR_ERR_STATE (IR_BO | IR_EW | IR_EP)
233 /* Interrupts for version 3.0.x */
234 #define IR_ERR_LEC_30X (IR_STE | IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
235 #define IR_ERR_BUS_30X (IR_ERR_LEC_30X | IR_WDI | IR_ELO | IR_BEU | \
236 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
238 #define IR_ERR_ALL_30X (IR_ERR_STATE | IR_ERR_BUS_30X)
239 /* Interrupts for version >= 3.1.x */
240 #define IR_ERR_LEC_31X (IR_PED | IR_PEA)
241 #define IR_ERR_BUS_31X (IR_ERR_LEC_31X | IR_WDI | IR_ELO | IR_BEU | \
242 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
244 #define IR_ERR_ALL_31X (IR_ERR_STATE | IR_ERR_BUS_31X)
246 /* Interrupt Line Select (ILS) */
247 #define ILS_ALL_INT0 0x0
248 #define ILS_ALL_INT1 0xFFFFFFFF
250 /* Interrupt Line Enable (ILE) */
251 #define ILE_EINT1 BIT(1)
252 #define ILE_EINT0 BIT(0)
254 /* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
255 #define RXFC_FWM_SHIFT 24
256 #define RXFC_FWM_MASK (0x7f < RXFC_FWM_SHIFT)
257 #define RXFC_FS_SHIFT 16
258 #define RXFC_FS_MASK (0x7f << RXFC_FS_SHIFT)
260 /* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
261 #define RXFS_RFL BIT(25)
262 #define RXFS_FF BIT(24)
263 #define RXFS_FPI_SHIFT 16
264 #define RXFS_FPI_MASK 0x3f0000
265 #define RXFS_FGI_SHIFT 8
266 #define RXFS_FGI_MASK 0x3f00
267 #define RXFS_FFL_MASK 0x7f
269 /* Rx Buffer / FIFO Element Size Configuration (RXESC) */
270 #define M_CAN_RXESC_8BYTES 0x0
271 #define M_CAN_RXESC_64BYTES 0x777
273 /* Tx Buffer Configuration(TXBC) */
274 #define TXBC_NDTB_SHIFT 16
275 #define TXBC_NDTB_MASK (0x3f << TXBC_NDTB_SHIFT)
276 #define TXBC_TFQS_SHIFT 24
277 #define TXBC_TFQS_MASK (0x3f << TXBC_TFQS_SHIFT)
279 /* Tx FIFO/Queue Status (TXFQS) */
280 #define TXFQS_TFQF BIT(21)
281 #define TXFQS_TFQPI_SHIFT 16
282 #define TXFQS_TFQPI_MASK (0x1f << TXFQS_TFQPI_SHIFT)
283 #define TXFQS_TFGI_SHIFT 8
284 #define TXFQS_TFGI_MASK (0x1f << TXFQS_TFGI_SHIFT)
285 #define TXFQS_TFFL_SHIFT 0
286 #define TXFQS_TFFL_MASK (0x3f << TXFQS_TFFL_SHIFT)
288 /* Tx Buffer Element Size Configuration(TXESC) */
289 #define TXESC_TBDS_8BYTES 0x0
290 #define TXESC_TBDS_64BYTES 0x7
292 /* Tx Event FIFO Configuration (TXEFC) */
293 #define TXEFC_EFS_SHIFT 16
294 #define TXEFC_EFS_MASK (0x3f << TXEFC_EFS_SHIFT)
296 /* Tx Event FIFO Status (TXEFS) */
297 #define TXEFS_TEFL BIT(25)
298 #define TXEFS_EFF BIT(24)
299 #define TXEFS_EFGI_SHIFT 8
300 #define TXEFS_EFGI_MASK (0x1f << TXEFS_EFGI_SHIFT)
301 #define TXEFS_EFFL_SHIFT 0
302 #define TXEFS_EFFL_MASK (0x3f << TXEFS_EFFL_SHIFT)
304 /* Tx Event FIFO Acknowledge (TXEFA) */
305 #define TXEFA_EFAI_SHIFT 0
306 #define TXEFA_EFAI_MASK (0x1f << TXEFA_EFAI_SHIFT)
308 /* Message RAM Configuration (in bytes) */
309 #define SIDF_ELEMENT_SIZE 4
310 #define XIDF_ELEMENT_SIZE 8
311 #define RXF0_ELEMENT_SIZE 72
312 #define RXF1_ELEMENT_SIZE 72
313 #define RXB_ELEMENT_SIZE 72
314 #define TXE_ELEMENT_SIZE 8
315 #define TXB_ELEMENT_SIZE 72
317 /* Message RAM Elements */
318 #define M_CAN_FIFO_ID 0x0
319 #define M_CAN_FIFO_DLC 0x4
320 #define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2))
322 /* Rx Buffer Element */
324 #define RX_BUF_ESI BIT(31)
325 #define RX_BUF_XTD BIT(30)
326 #define RX_BUF_RTR BIT(29)
328 #define RX_BUF_ANMF BIT(31)
329 #define RX_BUF_FDF BIT(21)
330 #define RX_BUF_BRS BIT(20)
332 /* Tx Buffer Element */
334 #define TX_BUF_ESI BIT(31)
335 #define TX_BUF_XTD BIT(30)
336 #define TX_BUF_RTR BIT(29)
338 #define TX_BUF_EFC BIT(23)
339 #define TX_BUF_FDF BIT(21)
340 #define TX_BUF_BRS BIT(20)
341 #define TX_BUF_MM_SHIFT 24
342 #define TX_BUF_MM_MASK (0xff << TX_BUF_MM_SHIFT)
344 /* Tx event FIFO Element */
346 #define TX_EVENT_MM_SHIFT TX_BUF_MM_SHIFT
347 #define TX_EVENT_MM_MASK (0xff << TX_EVENT_MM_SHIFT)
349 /* address offset and element number for each FIFO/Buffer in the Message RAM */
355 /* m_can private data structure */
357 struct can_priv can
; /* must be the first member */
358 struct napi_struct napi
;
359 struct net_device
*dev
;
360 struct device
*device
;
367 /* message ram configuration */
368 void __iomem
*mram_base
;
369 struct mram_cfg mcfg
[MRAM_CFG_NUM
];
372 static inline u32
m_can_read(const struct m_can_priv
*priv
, enum m_can_reg reg
)
374 return readl(priv
->base
+ reg
);
377 static inline void m_can_write(const struct m_can_priv
*priv
,
378 enum m_can_reg reg
, u32 val
)
380 writel(val
, priv
->base
+ reg
);
383 static inline u32
m_can_fifo_read(const struct m_can_priv
*priv
,
384 u32 fgi
, unsigned int offset
)
386 return readl(priv
->mram_base
+ priv
->mcfg
[MRAM_RXF0
].off
+
387 fgi
* RXF0_ELEMENT_SIZE
+ offset
);
390 static inline void m_can_fifo_write(const struct m_can_priv
*priv
,
391 u32 fpi
, unsigned int offset
, u32 val
)
393 writel(val
, priv
->mram_base
+ priv
->mcfg
[MRAM_TXB
].off
+
394 fpi
* TXB_ELEMENT_SIZE
+ offset
);
397 static inline u32
m_can_txe_fifo_read(const struct m_can_priv
*priv
,
400 return readl(priv
->mram_base
+ priv
->mcfg
[MRAM_TXE
].off
+
401 fgi
* TXE_ELEMENT_SIZE
+ offset
);
404 static inline bool m_can_tx_fifo_full(const struct m_can_priv
*priv
)
406 return !!(m_can_read(priv
, M_CAN_TXFQS
) & TXFQS_TFQF
);
409 static inline void m_can_config_endisable(const struct m_can_priv
*priv
,
412 u32 cccr
= m_can_read(priv
, M_CAN_CCCR
);
417 /* enable m_can configuration */
418 m_can_write(priv
, M_CAN_CCCR
, cccr
| CCCR_INIT
);
420 /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
421 m_can_write(priv
, M_CAN_CCCR
, cccr
| CCCR_INIT
| CCCR_CCE
);
423 m_can_write(priv
, M_CAN_CCCR
, cccr
& ~(CCCR_INIT
| CCCR_CCE
));
426 /* there's a delay for module initialization */
428 val
= CCCR_INIT
| CCCR_CCE
;
430 while ((m_can_read(priv
, M_CAN_CCCR
) & (CCCR_INIT
| CCCR_CCE
)) != val
) {
432 netdev_warn(priv
->dev
, "Failed to init module\n");
440 static inline void m_can_enable_all_interrupts(const struct m_can_priv
*priv
)
442 /* Only interrupt line 0 is used in this driver */
443 m_can_write(priv
, M_CAN_ILE
, ILE_EINT0
);
446 static inline void m_can_disable_all_interrupts(const struct m_can_priv
*priv
)
448 m_can_write(priv
, M_CAN_ILE
, 0x0);
451 static void m_can_read_fifo(struct net_device
*dev
, u32 rxfs
)
453 struct net_device_stats
*stats
= &dev
->stats
;
454 struct m_can_priv
*priv
= netdev_priv(dev
);
455 struct canfd_frame
*cf
;
460 /* calculate the fifo get index for where to read data */
461 fgi
= (rxfs
& RXFS_FGI_MASK
) >> RXFS_FGI_SHIFT
;
462 dlc
= m_can_fifo_read(priv
, fgi
, M_CAN_FIFO_DLC
);
463 if (dlc
& RX_BUF_FDF
)
464 skb
= alloc_canfd_skb(dev
, &cf
);
466 skb
= alloc_can_skb(dev
, (struct can_frame
**)&cf
);
472 if (dlc
& RX_BUF_FDF
)
473 cf
->len
= can_dlc2len((dlc
>> 16) & 0x0F);
475 cf
->len
= get_can_dlc((dlc
>> 16) & 0x0F);
477 id
= m_can_fifo_read(priv
, fgi
, M_CAN_FIFO_ID
);
479 cf
->can_id
= (id
& CAN_EFF_MASK
) | CAN_EFF_FLAG
;
481 cf
->can_id
= (id
>> 18) & CAN_SFF_MASK
;
483 if (id
& RX_BUF_ESI
) {
484 cf
->flags
|= CANFD_ESI
;
485 netdev_dbg(dev
, "ESI Error\n");
488 if (!(dlc
& RX_BUF_FDF
) && (id
& RX_BUF_RTR
)) {
489 cf
->can_id
|= CAN_RTR_FLAG
;
491 if (dlc
& RX_BUF_BRS
)
492 cf
->flags
|= CANFD_BRS
;
494 for (i
= 0; i
< cf
->len
; i
+= 4)
495 *(u32
*)(cf
->data
+ i
) =
496 m_can_fifo_read(priv
, fgi
,
497 M_CAN_FIFO_DATA(i
/ 4));
500 /* acknowledge rx fifo 0 */
501 m_can_write(priv
, M_CAN_RXF0A
, fgi
);
504 stats
->rx_bytes
+= cf
->len
;
506 netif_receive_skb(skb
);
509 static int m_can_do_rx_poll(struct net_device
*dev
, int quota
)
511 struct m_can_priv
*priv
= netdev_priv(dev
);
515 rxfs
= m_can_read(priv
, M_CAN_RXF0S
);
516 if (!(rxfs
& RXFS_FFL_MASK
)) {
517 netdev_dbg(dev
, "no messages in fifo0\n");
521 while ((rxfs
& RXFS_FFL_MASK
) && (quota
> 0)) {
523 netdev_warn(dev
, "Rx FIFO 0 Message Lost\n");
525 m_can_read_fifo(dev
, rxfs
);
529 rxfs
= m_can_read(priv
, M_CAN_RXF0S
);
533 can_led_event(dev
, CAN_LED_EVENT_RX
);
538 static int m_can_handle_lost_msg(struct net_device
*dev
)
540 struct net_device_stats
*stats
= &dev
->stats
;
542 struct can_frame
*frame
;
544 netdev_err(dev
, "msg lost in rxf0\n");
547 stats
->rx_over_errors
++;
549 skb
= alloc_can_err_skb(dev
, &frame
);
553 frame
->can_id
|= CAN_ERR_CRTL
;
554 frame
->data
[1] = CAN_ERR_CRTL_RX_OVERFLOW
;
556 netif_receive_skb(skb
);
561 static int m_can_handle_lec_err(struct net_device
*dev
,
562 enum m_can_lec_type lec_type
)
564 struct m_can_priv
*priv
= netdev_priv(dev
);
565 struct net_device_stats
*stats
= &dev
->stats
;
566 struct can_frame
*cf
;
569 priv
->can
.can_stats
.bus_error
++;
572 /* propagate the error condition to the CAN stack */
573 skb
= alloc_can_err_skb(dev
, &cf
);
577 /* check for 'last error code' which tells us the
578 * type of the last error to occur on the CAN bus
580 cf
->can_id
|= CAN_ERR_PROT
| CAN_ERR_BUSERROR
;
583 case LEC_STUFF_ERROR
:
584 netdev_dbg(dev
, "stuff error\n");
585 cf
->data
[2] |= CAN_ERR_PROT_STUFF
;
588 netdev_dbg(dev
, "form error\n");
589 cf
->data
[2] |= CAN_ERR_PROT_FORM
;
592 netdev_dbg(dev
, "ack error\n");
593 cf
->data
[3] = CAN_ERR_PROT_LOC_ACK
;
596 netdev_dbg(dev
, "bit1 error\n");
597 cf
->data
[2] |= CAN_ERR_PROT_BIT1
;
600 netdev_dbg(dev
, "bit0 error\n");
601 cf
->data
[2] |= CAN_ERR_PROT_BIT0
;
604 netdev_dbg(dev
, "CRC error\n");
605 cf
->data
[3] = CAN_ERR_PROT_LOC_CRC_SEQ
;
612 stats
->rx_bytes
+= cf
->can_dlc
;
613 netif_receive_skb(skb
);
618 static int __m_can_get_berr_counter(const struct net_device
*dev
,
619 struct can_berr_counter
*bec
)
621 struct m_can_priv
*priv
= netdev_priv(dev
);
624 ecr
= m_can_read(priv
, M_CAN_ECR
);
625 bec
->rxerr
= (ecr
& ECR_REC_MASK
) >> ECR_REC_SHIFT
;
626 bec
->txerr
= (ecr
& ECR_TEC_MASK
) >> ECR_TEC_SHIFT
;
631 static int m_can_clk_start(struct m_can_priv
*priv
)
635 err
= pm_runtime_get_sync(priv
->device
);
637 pm_runtime_put_noidle(priv
->device
);
642 static void m_can_clk_stop(struct m_can_priv
*priv
)
644 pm_runtime_put_sync(priv
->device
);
647 static int m_can_get_berr_counter(const struct net_device
*dev
,
648 struct can_berr_counter
*bec
)
650 struct m_can_priv
*priv
= netdev_priv(dev
);
653 err
= m_can_clk_start(priv
);
657 __m_can_get_berr_counter(dev
, bec
);
659 m_can_clk_stop(priv
);
664 static int m_can_handle_state_change(struct net_device
*dev
,
665 enum can_state new_state
)
667 struct m_can_priv
*priv
= netdev_priv(dev
);
668 struct net_device_stats
*stats
= &dev
->stats
;
669 struct can_frame
*cf
;
671 struct can_berr_counter bec
;
675 case CAN_STATE_ERROR_ACTIVE
:
676 /* error warning state */
677 priv
->can
.can_stats
.error_warning
++;
678 priv
->can
.state
= CAN_STATE_ERROR_WARNING
;
680 case CAN_STATE_ERROR_PASSIVE
:
681 /* error passive state */
682 priv
->can
.can_stats
.error_passive
++;
683 priv
->can
.state
= CAN_STATE_ERROR_PASSIVE
;
685 case CAN_STATE_BUS_OFF
:
687 priv
->can
.state
= CAN_STATE_BUS_OFF
;
688 m_can_disable_all_interrupts(priv
);
689 priv
->can
.can_stats
.bus_off
++;
696 /* propagate the error condition to the CAN stack */
697 skb
= alloc_can_err_skb(dev
, &cf
);
701 __m_can_get_berr_counter(dev
, &bec
);
704 case CAN_STATE_ERROR_ACTIVE
:
705 /* error warning state */
706 cf
->can_id
|= CAN_ERR_CRTL
;
707 cf
->data
[1] = (bec
.txerr
> bec
.rxerr
) ?
708 CAN_ERR_CRTL_TX_WARNING
:
709 CAN_ERR_CRTL_RX_WARNING
;
710 cf
->data
[6] = bec
.txerr
;
711 cf
->data
[7] = bec
.rxerr
;
713 case CAN_STATE_ERROR_PASSIVE
:
714 /* error passive state */
715 cf
->can_id
|= CAN_ERR_CRTL
;
716 ecr
= m_can_read(priv
, M_CAN_ECR
);
718 cf
->data
[1] |= CAN_ERR_CRTL_RX_PASSIVE
;
720 cf
->data
[1] |= CAN_ERR_CRTL_TX_PASSIVE
;
721 cf
->data
[6] = bec
.txerr
;
722 cf
->data
[7] = bec
.rxerr
;
724 case CAN_STATE_BUS_OFF
:
726 cf
->can_id
|= CAN_ERR_BUSOFF
;
733 stats
->rx_bytes
+= cf
->can_dlc
;
734 netif_receive_skb(skb
);
739 static int m_can_handle_state_errors(struct net_device
*dev
, u32 psr
)
741 struct m_can_priv
*priv
= netdev_priv(dev
);
744 if ((psr
& PSR_EW
) &&
745 (priv
->can
.state
!= CAN_STATE_ERROR_WARNING
)) {
746 netdev_dbg(dev
, "entered error warning state\n");
747 work_done
+= m_can_handle_state_change(dev
,
748 CAN_STATE_ERROR_WARNING
);
751 if ((psr
& PSR_EP
) &&
752 (priv
->can
.state
!= CAN_STATE_ERROR_PASSIVE
)) {
753 netdev_dbg(dev
, "entered error passive state\n");
754 work_done
+= m_can_handle_state_change(dev
,
755 CAN_STATE_ERROR_PASSIVE
);
758 if ((psr
& PSR_BO
) &&
759 (priv
->can
.state
!= CAN_STATE_BUS_OFF
)) {
760 netdev_dbg(dev
, "entered error bus off state\n");
761 work_done
+= m_can_handle_state_change(dev
,
768 static void m_can_handle_other_err(struct net_device
*dev
, u32 irqstatus
)
770 if (irqstatus
& IR_WDI
)
771 netdev_err(dev
, "Message RAM Watchdog event due to missing READY\n");
772 if (irqstatus
& IR_ELO
)
773 netdev_err(dev
, "Error Logging Overflow\n");
774 if (irqstatus
& IR_BEU
)
775 netdev_err(dev
, "Bit Error Uncorrected\n");
776 if (irqstatus
& IR_BEC
)
777 netdev_err(dev
, "Bit Error Corrected\n");
778 if (irqstatus
& IR_TOO
)
779 netdev_err(dev
, "Timeout reached\n");
780 if (irqstatus
& IR_MRAF
)
781 netdev_err(dev
, "Message RAM access failure occurred\n");
784 static inline bool is_lec_err(u32 psr
)
788 return psr
&& (psr
!= LEC_UNUSED
);
791 static int m_can_handle_bus_errors(struct net_device
*dev
, u32 irqstatus
,
794 struct m_can_priv
*priv
= netdev_priv(dev
);
797 if (irqstatus
& IR_RF0L
)
798 work_done
+= m_can_handle_lost_msg(dev
);
800 /* handle lec errors on the bus */
801 if ((priv
->can
.ctrlmode
& CAN_CTRLMODE_BERR_REPORTING
) &&
803 work_done
+= m_can_handle_lec_err(dev
, psr
& LEC_UNUSED
);
805 /* other unproccessed error interrupts */
806 m_can_handle_other_err(dev
, irqstatus
);
811 static int m_can_poll(struct napi_struct
*napi
, int quota
)
813 struct net_device
*dev
= napi
->dev
;
814 struct m_can_priv
*priv
= netdev_priv(dev
);
818 irqstatus
= priv
->irqstatus
| m_can_read(priv
, M_CAN_IR
);
822 psr
= m_can_read(priv
, M_CAN_PSR
);
823 if (irqstatus
& IR_ERR_STATE
)
824 work_done
+= m_can_handle_state_errors(dev
, psr
);
826 if (irqstatus
& IR_ERR_BUS_30X
)
827 work_done
+= m_can_handle_bus_errors(dev
, irqstatus
, psr
);
829 if (irqstatus
& IR_RF0N
)
830 work_done
+= m_can_do_rx_poll(dev
, (quota
- work_done
));
832 if (work_done
< quota
) {
833 napi_complete_done(napi
, work_done
);
834 m_can_enable_all_interrupts(priv
);
841 static void m_can_echo_tx_event(struct net_device
*dev
)
847 unsigned int msg_mark
;
849 struct m_can_priv
*priv
= netdev_priv(dev
);
850 struct net_device_stats
*stats
= &dev
->stats
;
852 /* read tx event fifo status */
853 m_can_txefs
= m_can_read(priv
, M_CAN_TXEFS
);
855 /* Get Tx Event fifo element count */
856 txe_count
= (m_can_txefs
& TXEFS_EFFL_MASK
)
859 /* Get and process all sent elements */
860 for (i
= 0; i
< txe_count
; i
++) {
861 /* retrieve get index */
862 fgi
= (m_can_read(priv
, M_CAN_TXEFS
) & TXEFS_EFGI_MASK
)
865 /* get message marker */
866 msg_mark
= (m_can_txe_fifo_read(priv
, fgi
, 4) &
867 TX_EVENT_MM_MASK
) >> TX_EVENT_MM_SHIFT
;
869 /* ack txe element */
870 m_can_write(priv
, M_CAN_TXEFA
, (TXEFA_EFAI_MASK
&
871 (fgi
<< TXEFA_EFAI_SHIFT
)));
874 stats
->tx_bytes
+= can_get_echo_skb(dev
, msg_mark
);
879 static irqreturn_t
m_can_isr(int irq
, void *dev_id
)
881 struct net_device
*dev
= (struct net_device
*)dev_id
;
882 struct m_can_priv
*priv
= netdev_priv(dev
);
883 struct net_device_stats
*stats
= &dev
->stats
;
886 ir
= m_can_read(priv
, M_CAN_IR
);
892 m_can_write(priv
, M_CAN_IR
, ir
);
894 /* schedule NAPI in case of
897 * - bus error IRQ and bus error reporting
899 if ((ir
& IR_RF0N
) || (ir
& IR_ERR_ALL_30X
)) {
900 priv
->irqstatus
= ir
;
901 m_can_disable_all_interrupts(priv
);
902 napi_schedule(&priv
->napi
);
905 if (priv
->version
== 30) {
907 /* Transmission Complete Interrupt*/
908 stats
->tx_bytes
+= can_get_echo_skb(dev
, 0);
910 can_led_event(dev
, CAN_LED_EVENT_TX
);
911 netif_wake_queue(dev
);
915 /* New TX FIFO Element arrived */
916 m_can_echo_tx_event(dev
);
917 can_led_event(dev
, CAN_LED_EVENT_TX
);
918 if (netif_queue_stopped(dev
) &&
919 !m_can_tx_fifo_full(priv
))
920 netif_wake_queue(dev
);
927 static const struct can_bittiming_const m_can_bittiming_const_30X
= {
928 .name
= KBUILD_MODNAME
,
929 .tseg1_min
= 2, /* Time segment 1 = prop_seg + phase_seg1 */
931 .tseg2_min
= 1, /* Time segment 2 = phase_seg2 */
939 static const struct can_bittiming_const m_can_data_bittiming_const_30X
= {
940 .name
= KBUILD_MODNAME
,
941 .tseg1_min
= 2, /* Time segment 1 = prop_seg + phase_seg1 */
943 .tseg2_min
= 1, /* Time segment 2 = phase_seg2 */
951 static const struct can_bittiming_const m_can_bittiming_const_31X
= {
952 .name
= KBUILD_MODNAME
,
953 .tseg1_min
= 2, /* Time segment 1 = prop_seg + phase_seg1 */
955 .tseg2_min
= 1, /* Time segment 2 = phase_seg2 */
963 static const struct can_bittiming_const m_can_data_bittiming_const_31X
= {
964 .name
= KBUILD_MODNAME
,
965 .tseg1_min
= 1, /* Time segment 1 = prop_seg + phase_seg1 */
967 .tseg2_min
= 1, /* Time segment 2 = phase_seg2 */
975 static int m_can_set_bittiming(struct net_device
*dev
)
977 struct m_can_priv
*priv
= netdev_priv(dev
);
978 const struct can_bittiming
*bt
= &priv
->can
.bittiming
;
979 const struct can_bittiming
*dbt
= &priv
->can
.data_bittiming
;
980 u16 brp
, sjw
, tseg1
, tseg2
;
985 tseg1
= bt
->prop_seg
+ bt
->phase_seg1
- 1;
986 tseg2
= bt
->phase_seg2
- 1;
987 reg_btp
= (brp
<< NBTP_NBRP_SHIFT
) | (sjw
<< NBTP_NSJW_SHIFT
) |
988 (tseg1
<< NBTP_NTSEG1_SHIFT
) | (tseg2
<< NBTP_NTSEG2_SHIFT
);
989 m_can_write(priv
, M_CAN_NBTP
, reg_btp
);
991 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD
) {
995 tseg1
= dbt
->prop_seg
+ dbt
->phase_seg1
- 1;
996 tseg2
= dbt
->phase_seg2
- 1;
998 /* TDC is only needed for bitrates beyond 2.5 MBit/s.
999 * This is mentioned in the "Bit Time Requirements for CAN FD"
1000 * paper presented at the International CAN Conference 2013
1002 if (dbt
->bitrate
> 2500000) {
1005 /* Use the same value of secondary sampling point
1006 * as the data sampling point
1008 ssp
= dbt
->sample_point
;
1010 /* Equation based on Bosch's M_CAN User Manual's
1011 * Transmitter Delay Compensation Section
1013 tdco
= (priv
->can
.clock
.freq
/ 1000) *
1016 /* Max valid TDCO value is 127 */
1018 netdev_warn(dev
, "TDCO value of %u is beyond maximum. Using maximum possible value\n",
1023 reg_btp
|= DBTP_TDC
;
1024 m_can_write(priv
, M_CAN_TDCR
,
1025 tdco
<< TDCR_TDCO_SHIFT
);
1028 reg_btp
|= (brp
<< DBTP_DBRP_SHIFT
) |
1029 (sjw
<< DBTP_DSJW_SHIFT
) |
1030 (tseg1
<< DBTP_DTSEG1_SHIFT
) |
1031 (tseg2
<< DBTP_DTSEG2_SHIFT
);
1033 m_can_write(priv
, M_CAN_DBTP
, reg_btp
);
1039 /* Configure M_CAN chip:
1040 * - set rx buffer/fifo element size
1041 * - configure rx fifo
1042 * - accept non-matching frame into fifo 0
1043 * - configure tx buffer
1044 * - >= v3.1.x: TX FIFO is used
1048 static void m_can_chip_config(struct net_device
*dev
)
1050 struct m_can_priv
*priv
= netdev_priv(dev
);
1053 m_can_config_endisable(priv
, true);
1055 /* RX Buffer/FIFO Element Size 64 bytes data field */
1056 m_can_write(priv
, M_CAN_RXESC
, M_CAN_RXESC_64BYTES
);
1058 /* Accept Non-matching Frames Into FIFO 0 */
1059 m_can_write(priv
, M_CAN_GFC
, 0x0);
1061 if (priv
->version
== 30) {
1062 /* only support one Tx Buffer currently */
1063 m_can_write(priv
, M_CAN_TXBC
, (1 << TXBC_NDTB_SHIFT
) |
1064 priv
->mcfg
[MRAM_TXB
].off
);
1066 /* TX FIFO is used for newer IP Core versions */
1067 m_can_write(priv
, M_CAN_TXBC
,
1068 (priv
->mcfg
[MRAM_TXB
].num
<< TXBC_TFQS_SHIFT
) |
1069 (priv
->mcfg
[MRAM_TXB
].off
));
1072 /* support 64 bytes payload */
1073 m_can_write(priv
, M_CAN_TXESC
, TXESC_TBDS_64BYTES
);
1076 if (priv
->version
== 30) {
1077 m_can_write(priv
, M_CAN_TXEFC
, (1 << TXEFC_EFS_SHIFT
) |
1078 priv
->mcfg
[MRAM_TXE
].off
);
1080 /* Full TX Event FIFO is used */
1081 m_can_write(priv
, M_CAN_TXEFC
,
1082 ((priv
->mcfg
[MRAM_TXE
].num
<< TXEFC_EFS_SHIFT
)
1084 priv
->mcfg
[MRAM_TXE
].off
);
1087 /* rx fifo configuration, blocking mode, fifo size 1 */
1088 m_can_write(priv
, M_CAN_RXF0C
,
1089 (priv
->mcfg
[MRAM_RXF0
].num
<< RXFC_FS_SHIFT
) |
1090 priv
->mcfg
[MRAM_RXF0
].off
);
1092 m_can_write(priv
, M_CAN_RXF1C
,
1093 (priv
->mcfg
[MRAM_RXF1
].num
<< RXFC_FS_SHIFT
) |
1094 priv
->mcfg
[MRAM_RXF1
].off
);
1096 cccr
= m_can_read(priv
, M_CAN_CCCR
);
1097 test
= m_can_read(priv
, M_CAN_TEST
);
1099 if (priv
->version
== 30) {
1102 cccr
&= ~(CCCR_TEST
| CCCR_MON
|
1103 (CCCR_CMR_MASK
<< CCCR_CMR_SHIFT
) |
1104 (CCCR_CME_MASK
<< CCCR_CME_SHIFT
));
1106 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD
)
1107 cccr
|= CCCR_CME_CANFD_BRS
<< CCCR_CME_SHIFT
;
1110 /* Version 3.1.x or 3.2.x */
1111 cccr
&= ~(CCCR_TEST
| CCCR_MON
| CCCR_BRSE
| CCCR_FDOE
);
1113 /* Only 3.2.x has NISO Bit implemented */
1114 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD_NON_ISO
)
1117 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD
)
1118 cccr
|= (CCCR_BRSE
| CCCR_FDOE
);
1122 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_LOOPBACK
) {
1123 cccr
|= CCCR_TEST
| CCCR_MON
;
1127 /* Enable Monitoring (all versions) */
1128 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_LISTENONLY
)
1132 m_can_write(priv
, M_CAN_CCCR
, cccr
);
1133 m_can_write(priv
, M_CAN_TEST
, test
);
1135 /* Enable interrupts */
1136 m_can_write(priv
, M_CAN_IR
, IR_ALL_INT
);
1137 if (!(priv
->can
.ctrlmode
& CAN_CTRLMODE_BERR_REPORTING
))
1138 if (priv
->version
== 30)
1139 m_can_write(priv
, M_CAN_IE
, IR_ALL_INT
&
1142 m_can_write(priv
, M_CAN_IE
, IR_ALL_INT
&
1145 m_can_write(priv
, M_CAN_IE
, IR_ALL_INT
);
1147 /* route all interrupts to INT0 */
1148 m_can_write(priv
, M_CAN_ILS
, ILS_ALL_INT0
);
1150 /* set bittiming params */
1151 m_can_set_bittiming(dev
);
1153 m_can_config_endisable(priv
, false);
1156 static void m_can_start(struct net_device
*dev
)
1158 struct m_can_priv
*priv
= netdev_priv(dev
);
1160 /* basic m_can configuration */
1161 m_can_chip_config(dev
);
1163 priv
->can
.state
= CAN_STATE_ERROR_ACTIVE
;
1165 m_can_enable_all_interrupts(priv
);
1168 static int m_can_set_mode(struct net_device
*dev
, enum can_mode mode
)
1171 case CAN_MODE_START
:
1173 netif_wake_queue(dev
);
1182 /* Checks core release number of M_CAN
1183 * returns 0 if an unsupported device is detected
1184 * else it returns the release and step coded as:
1185 * return value = 10 * <release> + 1 * <step>
1187 static int m_can_check_core_release(void __iomem
*m_can_base
)
1193 struct m_can_priv temp_priv
= {
1197 /* Read Core Release Version and split into version number
1198 * Example: Version 3.2.1 => rel = 3; step = 2; substep = 1;
1200 crel_reg
= m_can_read(&temp_priv
, M_CAN_CREL
);
1201 rel
= (u8
)((crel_reg
& CREL_REL_MASK
) >> CREL_REL_SHIFT
);
1202 step
= (u8
)((crel_reg
& CREL_STEP_MASK
) >> CREL_STEP_SHIFT
);
1205 /* M_CAN v3.x.y: create return value */
1208 /* Unsupported M_CAN version */
1215 /* Selectable Non ISO support only in version 3.2.x
1216 * This function checks if the bit is writable.
1218 static bool m_can_niso_supported(const struct m_can_priv
*priv
)
1220 u32 cccr_reg
, cccr_poll
;
1223 m_can_config_endisable(priv
, true);
1224 cccr_reg
= m_can_read(priv
, M_CAN_CCCR
);
1225 cccr_reg
|= CCCR_NISO
;
1226 m_can_write(priv
, M_CAN_CCCR
, cccr_reg
);
1228 niso_timeout
= readl_poll_timeout((priv
->base
+ M_CAN_CCCR
), cccr_poll
,
1229 (cccr_poll
== cccr_reg
), 0, 10);
1232 cccr_reg
&= ~(CCCR_NISO
);
1233 m_can_write(priv
, M_CAN_CCCR
, cccr_reg
);
1235 m_can_config_endisable(priv
, false);
1237 /* return false if time out (-ETIMEDOUT), else return true */
1238 return !niso_timeout
;
1241 static int m_can_dev_setup(struct platform_device
*pdev
, struct net_device
*dev
,
1244 struct m_can_priv
*priv
;
1247 m_can_version
= m_can_check_core_release(addr
);
1248 /* return if unsupported version */
1249 if (!m_can_version
) {
1250 dev_err(&pdev
->dev
, "Unsupported version number: %2d",
1255 priv
= netdev_priv(dev
);
1256 netif_napi_add(dev
, &priv
->napi
, m_can_poll
, M_CAN_NAPI_WEIGHT
);
1258 /* Shared properties of all M_CAN versions */
1259 priv
->version
= m_can_version
;
1262 priv
->can
.do_set_mode
= m_can_set_mode
;
1263 priv
->can
.do_get_berr_counter
= m_can_get_berr_counter
;
1265 /* Set M_CAN supported operations */
1266 priv
->can
.ctrlmode_supported
= CAN_CTRLMODE_LOOPBACK
|
1267 CAN_CTRLMODE_LISTENONLY
|
1268 CAN_CTRLMODE_BERR_REPORTING
|
1271 /* Set properties depending on M_CAN version */
1272 switch (priv
->version
) {
1274 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.x */
1275 can_set_static_ctrlmode(dev
, CAN_CTRLMODE_FD_NON_ISO
);
1276 priv
->can
.bittiming_const
= &m_can_bittiming_const_30X
;
1277 priv
->can
.data_bittiming_const
=
1278 &m_can_data_bittiming_const_30X
;
1281 /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.1.x */
1282 can_set_static_ctrlmode(dev
, CAN_CTRLMODE_FD_NON_ISO
);
1283 priv
->can
.bittiming_const
= &m_can_bittiming_const_31X
;
1284 priv
->can
.data_bittiming_const
=
1285 &m_can_data_bittiming_const_31X
;
1288 priv
->can
.bittiming_const
= &m_can_bittiming_const_31X
;
1289 priv
->can
.data_bittiming_const
=
1290 &m_can_data_bittiming_const_31X
;
1291 priv
->can
.ctrlmode_supported
|= (m_can_niso_supported(priv
)
1292 ? CAN_CTRLMODE_FD_NON_ISO
1296 dev_err(&pdev
->dev
, "Unsupported version number: %2d",
1304 static int m_can_open(struct net_device
*dev
)
1306 struct m_can_priv
*priv
= netdev_priv(dev
);
1309 err
= m_can_clk_start(priv
);
1313 /* open the can device */
1314 err
= open_candev(dev
);
1316 netdev_err(dev
, "failed to open can device\n");
1317 goto exit_disable_clks
;
1320 /* register interrupt handler */
1321 err
= request_irq(dev
->irq
, m_can_isr
, IRQF_SHARED
, dev
->name
,
1324 netdev_err(dev
, "failed to request interrupt\n");
1328 /* start the m_can controller */
1331 can_led_event(dev
, CAN_LED_EVENT_OPEN
);
1332 napi_enable(&priv
->napi
);
1333 netif_start_queue(dev
);
1340 m_can_clk_stop(priv
);
1344 static void m_can_stop(struct net_device
*dev
)
1346 struct m_can_priv
*priv
= netdev_priv(dev
);
1348 /* disable all interrupts */
1349 m_can_disable_all_interrupts(priv
);
1351 /* set the state as STOPPED */
1352 priv
->can
.state
= CAN_STATE_STOPPED
;
1355 static int m_can_close(struct net_device
*dev
)
1357 struct m_can_priv
*priv
= netdev_priv(dev
);
1359 netif_stop_queue(dev
);
1360 napi_disable(&priv
->napi
);
1362 m_can_clk_stop(priv
);
1363 free_irq(dev
->irq
, dev
);
1365 can_led_event(dev
, CAN_LED_EVENT_STOP
);
1370 static int m_can_next_echo_skb_occupied(struct net_device
*dev
, int putidx
)
1372 struct m_can_priv
*priv
= netdev_priv(dev
);
1373 /*get wrap around for loopback skb index */
1374 unsigned int wrap
= priv
->can
.echo_skb_max
;
1377 /* calculate next index */
1378 next_idx
= (++putidx
>= wrap
? 0 : putidx
);
1380 /* check if occupied */
1381 return !!priv
->can
.echo_skb
[next_idx
];
1384 static netdev_tx_t
m_can_start_xmit(struct sk_buff
*skb
,
1385 struct net_device
*dev
)
1387 struct m_can_priv
*priv
= netdev_priv(dev
);
1388 struct canfd_frame
*cf
= (struct canfd_frame
*)skb
->data
;
1389 u32 id
, cccr
, fdflags
;
1393 if (can_dropped_invalid_skb(dev
, skb
))
1394 return NETDEV_TX_OK
;
1396 /* Generate ID field for TX buffer Element */
1397 /* Common to all supported M_CAN versions */
1398 if (cf
->can_id
& CAN_EFF_FLAG
) {
1399 id
= cf
->can_id
& CAN_EFF_MASK
;
1402 id
= ((cf
->can_id
& CAN_SFF_MASK
) << 18);
1405 if (cf
->can_id
& CAN_RTR_FLAG
)
1408 if (priv
->version
== 30) {
1409 netif_stop_queue(dev
);
1411 /* message ram configuration */
1412 m_can_fifo_write(priv
, 0, M_CAN_FIFO_ID
, id
);
1413 m_can_fifo_write(priv
, 0, M_CAN_FIFO_DLC
,
1414 can_len2dlc(cf
->len
) << 16);
1416 for (i
= 0; i
< cf
->len
; i
+= 4)
1417 m_can_fifo_write(priv
, 0,
1418 M_CAN_FIFO_DATA(i
/ 4),
1419 *(u32
*)(cf
->data
+ i
));
1421 can_put_echo_skb(skb
, dev
, 0);
1423 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_FD
) {
1424 cccr
= m_can_read(priv
, M_CAN_CCCR
);
1425 cccr
&= ~(CCCR_CMR_MASK
<< CCCR_CMR_SHIFT
);
1426 if (can_is_canfd_skb(skb
)) {
1427 if (cf
->flags
& CANFD_BRS
)
1428 cccr
|= CCCR_CMR_CANFD_BRS
<<
1431 cccr
|= CCCR_CMR_CANFD
<<
1434 cccr
|= CCCR_CMR_CAN
<< CCCR_CMR_SHIFT
;
1436 m_can_write(priv
, M_CAN_CCCR
, cccr
);
1438 m_can_write(priv
, M_CAN_TXBTIE
, 0x1);
1439 m_can_write(priv
, M_CAN_TXBAR
, 0x1);
1440 /* End of xmit function for version 3.0.x */
1442 /* Transmit routine for version >= v3.1.x */
1444 /* Check if FIFO full */
1445 if (m_can_tx_fifo_full(priv
)) {
1446 /* This shouldn't happen */
1447 netif_stop_queue(dev
);
1449 "TX queue active although FIFO is full.");
1450 return NETDEV_TX_BUSY
;
1453 /* get put index for frame */
1454 putidx
= ((m_can_read(priv
, M_CAN_TXFQS
) & TXFQS_TFQPI_MASK
)
1455 >> TXFQS_TFQPI_SHIFT
);
1456 /* Write ID Field to FIFO Element */
1457 m_can_fifo_write(priv
, putidx
, M_CAN_FIFO_ID
, id
);
1459 /* get CAN FD configuration of frame */
1461 if (can_is_canfd_skb(skb
)) {
1462 fdflags
|= TX_BUF_FDF
;
1463 if (cf
->flags
& CANFD_BRS
)
1464 fdflags
|= TX_BUF_BRS
;
1467 /* Construct DLC Field. Also contains CAN-FD configuration
1468 * use put index of fifo as message marker
1469 * it is used in TX interrupt for
1470 * sending the correct echo frame
1472 m_can_fifo_write(priv
, putidx
, M_CAN_FIFO_DLC
,
1473 ((putidx
<< TX_BUF_MM_SHIFT
) &
1475 (can_len2dlc(cf
->len
) << 16) |
1476 fdflags
| TX_BUF_EFC
);
1478 for (i
= 0; i
< cf
->len
; i
+= 4)
1479 m_can_fifo_write(priv
, putidx
, M_CAN_FIFO_DATA(i
/ 4),
1480 *(u32
*)(cf
->data
+ i
));
1482 /* Push loopback echo.
1483 * Will be looped back on TX interrupt based on message marker
1485 can_put_echo_skb(skb
, dev
, putidx
);
1487 /* Enable TX FIFO element to start transfer */
1488 m_can_write(priv
, M_CAN_TXBAR
, (1 << putidx
));
1490 /* stop network queue if fifo full */
1491 if (m_can_tx_fifo_full(priv
) ||
1492 m_can_next_echo_skb_occupied(dev
, putidx
))
1493 netif_stop_queue(dev
);
1496 return NETDEV_TX_OK
;
1499 static const struct net_device_ops m_can_netdev_ops
= {
1500 .ndo_open
= m_can_open
,
1501 .ndo_stop
= m_can_close
,
1502 .ndo_start_xmit
= m_can_start_xmit
,
1503 .ndo_change_mtu
= can_change_mtu
,
1506 static int register_m_can_dev(struct net_device
*dev
)
1508 dev
->flags
|= IFF_ECHO
; /* we support local echo */
1509 dev
->netdev_ops
= &m_can_netdev_ops
;
1511 return register_candev(dev
);
1514 static void m_can_init_ram(struct m_can_priv
*priv
)
1518 /* initialize the entire Message RAM in use to avoid possible
1519 * ECC/parity checksum errors when reading an uninitialized buffer
1521 start
= priv
->mcfg
[MRAM_SIDF
].off
;
1522 end
= priv
->mcfg
[MRAM_TXB
].off
+
1523 priv
->mcfg
[MRAM_TXB
].num
* TXB_ELEMENT_SIZE
;
1524 for (i
= start
; i
< end
; i
+= 4)
1525 writel(0x0, priv
->mram_base
+ i
);
1528 static void m_can_of_parse_mram(struct m_can_priv
*priv
,
1529 const u32
*mram_config_vals
)
1531 priv
->mcfg
[MRAM_SIDF
].off
= mram_config_vals
[0];
1532 priv
->mcfg
[MRAM_SIDF
].num
= mram_config_vals
[1];
1533 priv
->mcfg
[MRAM_XIDF
].off
= priv
->mcfg
[MRAM_SIDF
].off
+
1534 priv
->mcfg
[MRAM_SIDF
].num
* SIDF_ELEMENT_SIZE
;
1535 priv
->mcfg
[MRAM_XIDF
].num
= mram_config_vals
[2];
1536 priv
->mcfg
[MRAM_RXF0
].off
= priv
->mcfg
[MRAM_XIDF
].off
+
1537 priv
->mcfg
[MRAM_XIDF
].num
* XIDF_ELEMENT_SIZE
;
1538 priv
->mcfg
[MRAM_RXF0
].num
= mram_config_vals
[3] &
1539 (RXFC_FS_MASK
>> RXFC_FS_SHIFT
);
1540 priv
->mcfg
[MRAM_RXF1
].off
= priv
->mcfg
[MRAM_RXF0
].off
+
1541 priv
->mcfg
[MRAM_RXF0
].num
* RXF0_ELEMENT_SIZE
;
1542 priv
->mcfg
[MRAM_RXF1
].num
= mram_config_vals
[4] &
1543 (RXFC_FS_MASK
>> RXFC_FS_SHIFT
);
1544 priv
->mcfg
[MRAM_RXB
].off
= priv
->mcfg
[MRAM_RXF1
].off
+
1545 priv
->mcfg
[MRAM_RXF1
].num
* RXF1_ELEMENT_SIZE
;
1546 priv
->mcfg
[MRAM_RXB
].num
= mram_config_vals
[5];
1547 priv
->mcfg
[MRAM_TXE
].off
= priv
->mcfg
[MRAM_RXB
].off
+
1548 priv
->mcfg
[MRAM_RXB
].num
* RXB_ELEMENT_SIZE
;
1549 priv
->mcfg
[MRAM_TXE
].num
= mram_config_vals
[6];
1550 priv
->mcfg
[MRAM_TXB
].off
= priv
->mcfg
[MRAM_TXE
].off
+
1551 priv
->mcfg
[MRAM_TXE
].num
* TXE_ELEMENT_SIZE
;
1552 priv
->mcfg
[MRAM_TXB
].num
= mram_config_vals
[7] &
1553 (TXBC_NDTB_MASK
>> TXBC_NDTB_SHIFT
);
1555 dev_dbg(priv
->device
,
1556 "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
1558 priv
->mcfg
[MRAM_SIDF
].off
, priv
->mcfg
[MRAM_SIDF
].num
,
1559 priv
->mcfg
[MRAM_XIDF
].off
, priv
->mcfg
[MRAM_XIDF
].num
,
1560 priv
->mcfg
[MRAM_RXF0
].off
, priv
->mcfg
[MRAM_RXF0
].num
,
1561 priv
->mcfg
[MRAM_RXF1
].off
, priv
->mcfg
[MRAM_RXF1
].num
,
1562 priv
->mcfg
[MRAM_RXB
].off
, priv
->mcfg
[MRAM_RXB
].num
,
1563 priv
->mcfg
[MRAM_TXE
].off
, priv
->mcfg
[MRAM_TXE
].num
,
1564 priv
->mcfg
[MRAM_TXB
].off
, priv
->mcfg
[MRAM_TXB
].num
);
1566 m_can_init_ram(priv
);
1569 static int m_can_plat_probe(struct platform_device
*pdev
)
1571 struct net_device
*dev
;
1572 struct m_can_priv
*priv
;
1573 struct resource
*res
;
1575 void __iomem
*mram_addr
;
1576 struct clk
*hclk
, *cclk
;
1578 struct device_node
*np
;
1579 u32 mram_config_vals
[MRAM_CFG_LEN
];
1582 np
= pdev
->dev
.of_node
;
1584 hclk
= devm_clk_get(&pdev
->dev
, "hclk");
1585 cclk
= devm_clk_get(&pdev
->dev
, "cclk");
1587 if (IS_ERR(hclk
) || IS_ERR(cclk
)) {
1588 dev_err(&pdev
->dev
, "no clock found\n");
1593 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "m_can");
1594 addr
= devm_ioremap_resource(&pdev
->dev
, res
);
1595 irq
= platform_get_irq_byname(pdev
, "int0");
1597 if (IS_ERR(addr
) || irq
< 0) {
1602 /* message ram could be shared */
1603 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "message_ram");
1609 mram_addr
= devm_ioremap(&pdev
->dev
, res
->start
, resource_size(res
));
1615 /* get message ram configuration */
1616 ret
= of_property_read_u32_array(np
, "bosch,mram-cfg",
1618 sizeof(mram_config_vals
) / 4);
1620 dev_err(&pdev
->dev
, "Could not get Message RAM configuration.");
1625 * Defines the total amount of echo buffers for loopback
1627 tx_fifo_size
= mram_config_vals
[7];
1629 /* allocate the m_can device */
1630 dev
= alloc_candev(sizeof(*priv
), tx_fifo_size
);
1636 priv
= netdev_priv(dev
);
1638 priv
->device
= &pdev
->dev
;
1641 priv
->can
.clock
.freq
= clk_get_rate(cclk
);
1642 priv
->mram_base
= mram_addr
;
1644 m_can_of_parse_mram(priv
, mram_config_vals
);
1646 platform_set_drvdata(pdev
, dev
);
1647 SET_NETDEV_DEV(dev
, &pdev
->dev
);
1649 /* Enable clocks. Necessary to read Core Release in order to determine
1652 pm_runtime_enable(&pdev
->dev
);
1653 ret
= m_can_clk_start(priv
);
1655 goto pm_runtime_fail
;
1657 ret
= m_can_dev_setup(pdev
, dev
, addr
);
1661 ret
= register_m_can_dev(dev
);
1663 dev_err(&pdev
->dev
, "registering %s failed (err=%d)\n",
1664 KBUILD_MODNAME
, ret
);
1668 devm_can_led_init(dev
);
1670 of_can_transceiver(dev
);
1672 dev_info(&pdev
->dev
, "%s device registered (irq=%d, version=%d)\n",
1673 KBUILD_MODNAME
, dev
->irq
, priv
->version
);
1676 * Stop clocks. They will be reactivated once the M_CAN device is opened
1679 m_can_clk_stop(priv
);
1682 pm_runtime_disable(&pdev
->dev
);
1689 /* TODO: runtime PM with power down or sleep mode */
1691 static __maybe_unused
int m_can_suspend(struct device
*dev
)
1693 struct net_device
*ndev
= dev_get_drvdata(dev
);
1694 struct m_can_priv
*priv
= netdev_priv(ndev
);
1696 if (netif_running(ndev
)) {
1697 netif_stop_queue(ndev
);
1698 netif_device_detach(ndev
);
1700 m_can_clk_stop(priv
);
1703 priv
->can
.state
= CAN_STATE_SLEEPING
;
1708 static __maybe_unused
int m_can_resume(struct device
*dev
)
1710 struct net_device
*ndev
= dev_get_drvdata(dev
);
1711 struct m_can_priv
*priv
= netdev_priv(ndev
);
1713 m_can_init_ram(priv
);
1715 priv
->can
.state
= CAN_STATE_ERROR_ACTIVE
;
1717 if (netif_running(ndev
)) {
1720 ret
= m_can_clk_start(priv
);
1725 netif_device_attach(ndev
);
1726 netif_start_queue(ndev
);
1732 static void unregister_m_can_dev(struct net_device
*dev
)
1734 unregister_candev(dev
);
1737 static int m_can_plat_remove(struct platform_device
*pdev
)
1739 struct net_device
*dev
= platform_get_drvdata(pdev
);
1741 unregister_m_can_dev(dev
);
1743 pm_runtime_disable(&pdev
->dev
);
1745 platform_set_drvdata(pdev
, NULL
);
1752 static int __maybe_unused
m_can_runtime_suspend(struct device
*dev
)
1754 struct net_device
*ndev
= dev_get_drvdata(dev
);
1755 struct m_can_priv
*priv
= netdev_priv(ndev
);
1757 clk_disable_unprepare(priv
->cclk
);
1758 clk_disable_unprepare(priv
->hclk
);
1763 static int __maybe_unused
m_can_runtime_resume(struct device
*dev
)
1765 struct net_device
*ndev
= dev_get_drvdata(dev
);
1766 struct m_can_priv
*priv
= netdev_priv(ndev
);
1769 err
= clk_prepare_enable(priv
->hclk
);
1773 err
= clk_prepare_enable(priv
->cclk
);
1775 clk_disable_unprepare(priv
->hclk
);
1780 static const struct dev_pm_ops m_can_pmops
= {
1781 SET_RUNTIME_PM_OPS(m_can_runtime_suspend
,
1782 m_can_runtime_resume
, NULL
)
1783 SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend
, m_can_resume
)
1786 static const struct of_device_id m_can_of_table
[] = {
1787 { .compatible
= "bosch,m_can", .data
= NULL
},
1790 MODULE_DEVICE_TABLE(of
, m_can_of_table
);
1792 static struct platform_driver m_can_plat_driver
= {
1794 .name
= KBUILD_MODNAME
,
1795 .of_match_table
= m_can_of_table
,
1798 .probe
= m_can_plat_probe
,
1799 .remove
= m_can_plat_remove
,
1802 module_platform_driver(m_can_plat_driver
);
1804 MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
1805 MODULE_LICENSE("GPL v2");
1806 MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");