1 // SPDX-License-Identifier: GPL-2.0-only
3 * CAN bus driver for the alone generic (as possible as) MSCAN controller.
5 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
7 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
8 * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/delay.h>
15 #include <linux/netdevice.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_ether.h>
18 #include <linux/list.h>
19 #include <linux/can/dev.h>
20 #include <linux/can/error.h>
25 static const struct can_bittiming_const mscan_bittiming_const
= {
43 static enum can_state state_map
[] = {
44 CAN_STATE_ERROR_ACTIVE
,
45 CAN_STATE_ERROR_WARNING
,
46 CAN_STATE_ERROR_PASSIVE
,
50 static int mscan_set_mode(struct net_device
*dev
, u8 mode
)
52 struct mscan_priv
*priv
= netdev_priv(dev
);
53 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
58 if (mode
!= MSCAN_NORMAL_MODE
) {
59 if (priv
->tx_active
) {
60 /* Abort transfers before going to sleep */#
61 out_8(®s
->cantarq
, priv
->tx_active
);
62 /* Suppress TX done interrupts */
63 out_8(®s
->cantier
, 0);
66 canctl1
= in_8(®s
->canctl1
);
67 if ((mode
& MSCAN_SLPRQ
) && !(canctl1
& MSCAN_SLPAK
)) {
68 setbits8(®s
->canctl0
, MSCAN_SLPRQ
);
69 for (i
= 0; i
< MSCAN_SET_MODE_RETRIES
; i
++) {
70 if (in_8(®s
->canctl1
) & MSCAN_SLPAK
)
75 * The mscan controller will fail to enter sleep mode,
76 * while there are irregular activities on bus, like
77 * somebody keeps retransmitting. This behavior is
78 * undocumented and seems to differ between mscan built
79 * in mpc5200b and mpc5200. We proceed in that case,
80 * since otherwise the slprq will be kept set and the
81 * controller will get stuck. NOTE: INITRQ or CSWAI
82 * will abort all active transmit actions, if still
85 if (i
>= MSCAN_SET_MODE_RETRIES
)
87 "device failed to enter sleep mode. "
88 "We proceed anyhow.\n");
90 priv
->can
.state
= CAN_STATE_SLEEPING
;
93 if ((mode
& MSCAN_INITRQ
) && !(canctl1
& MSCAN_INITAK
)) {
94 setbits8(®s
->canctl0
, MSCAN_INITRQ
);
95 for (i
= 0; i
< MSCAN_SET_MODE_RETRIES
; i
++) {
96 if (in_8(®s
->canctl1
) & MSCAN_INITAK
)
99 if (i
>= MSCAN_SET_MODE_RETRIES
)
103 priv
->can
.state
= CAN_STATE_STOPPED
;
105 if (mode
& MSCAN_CSWAI
)
106 setbits8(®s
->canctl0
, MSCAN_CSWAI
);
109 canctl1
= in_8(®s
->canctl1
);
110 if (canctl1
& (MSCAN_SLPAK
| MSCAN_INITAK
)) {
111 clrbits8(®s
->canctl0
, MSCAN_SLPRQ
| MSCAN_INITRQ
);
112 for (i
= 0; i
< MSCAN_SET_MODE_RETRIES
; i
++) {
113 canctl1
= in_8(®s
->canctl1
);
114 if (!(canctl1
& (MSCAN_INITAK
| MSCAN_SLPAK
)))
117 if (i
>= MSCAN_SET_MODE_RETRIES
)
120 priv
->can
.state
= CAN_STATE_ERROR_ACTIVE
;
126 static int mscan_start(struct net_device
*dev
)
128 struct mscan_priv
*priv
= netdev_priv(dev
);
129 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
133 out_8(®s
->canrier
, 0);
135 INIT_LIST_HEAD(&priv
->tx_head
);
136 priv
->prev_buf_id
= 0;
139 priv
->shadow_canrier
= 0;
142 if (priv
->type
== MSCAN_TYPE_MPC5121
) {
143 /* Clear pending bus-off condition */
144 if (in_8(®s
->canmisc
) & MSCAN_BOHOLD
)
145 out_8(®s
->canmisc
, MSCAN_BOHOLD
);
148 err
= mscan_set_mode(dev
, MSCAN_NORMAL_MODE
);
152 canrflg
= in_8(®s
->canrflg
);
153 priv
->shadow_statflg
= canrflg
& MSCAN_STAT_MSK
;
154 priv
->can
.state
= state_map
[max(MSCAN_STATE_RX(canrflg
),
155 MSCAN_STATE_TX(canrflg
))];
156 out_8(®s
->cantier
, 0);
158 /* Enable receive interrupts. */
159 out_8(®s
->canrier
, MSCAN_RX_INTS_ENABLE
);
164 static int mscan_restart(struct net_device
*dev
)
166 struct mscan_priv
*priv
= netdev_priv(dev
);
168 if (priv
->type
== MSCAN_TYPE_MPC5121
) {
169 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
171 priv
->can
.state
= CAN_STATE_ERROR_ACTIVE
;
172 WARN(!(in_8(®s
->canmisc
) & MSCAN_BOHOLD
),
173 "bus-off state expected\n");
174 out_8(®s
->canmisc
, MSCAN_BOHOLD
);
175 /* Re-enable receive interrupts. */
176 out_8(®s
->canrier
, MSCAN_RX_INTS_ENABLE
);
178 if (priv
->can
.state
<= CAN_STATE_BUS_OFF
)
179 mscan_set_mode(dev
, MSCAN_INIT_MODE
);
180 return mscan_start(dev
);
186 static netdev_tx_t
mscan_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
188 struct can_frame
*frame
= (struct can_frame
*)skb
->data
;
189 struct mscan_priv
*priv
= netdev_priv(dev
);
190 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
194 if (can_dropped_invalid_skb(dev
, skb
))
197 out_8(®s
->cantier
, 0);
199 i
= ~priv
->tx_active
& MSCAN_TXE
;
201 switch (hweight8(i
)) {
203 netif_stop_queue(dev
);
204 netdev_err(dev
, "Tx Ring full when queue awake!\n");
205 return NETDEV_TX_BUSY
;
208 * if buf_id < 3, then current frame will be send out of order,
209 * since buffer with lower id have higher priority (hell..)
211 netif_stop_queue(dev
);
214 if (buf_id
< priv
->prev_buf_id
) {
216 if (priv
->cur_pri
== 0xff) {
217 set_bit(F_TX_WAIT_ALL
, &priv
->flags
);
218 netif_stop_queue(dev
);
221 set_bit(F_TX_PROGRESS
, &priv
->flags
);
224 priv
->prev_buf_id
= buf_id
;
225 out_8(®s
->cantbsel
, i
);
227 rtr
= frame
->can_id
& CAN_RTR_FLAG
;
229 /* RTR is always the lowest bit of interest, then IDs follow */
230 if (frame
->can_id
& CAN_EFF_FLAG
) {
231 can_id
= (frame
->can_id
& CAN_EFF_MASK
)
232 << (MSCAN_EFF_RTR_SHIFT
+ 1);
234 can_id
|= 1 << MSCAN_EFF_RTR_SHIFT
;
235 out_be16(®s
->tx
.idr3_2
, can_id
);
238 /* EFF_FLAGS are between the IDs :( */
239 can_id
= (can_id
& 0x7) | ((can_id
<< 2) & 0xffe0)
242 can_id
= (frame
->can_id
& CAN_SFF_MASK
)
243 << (MSCAN_SFF_RTR_SHIFT
+ 1);
245 can_id
|= 1 << MSCAN_SFF_RTR_SHIFT
;
247 out_be16(®s
->tx
.idr1_0
, can_id
);
250 void __iomem
*data
= ®s
->tx
.dsr1_0
;
251 u16
*payload
= (u16
*)frame
->data
;
253 for (i
= 0; i
< frame
->len
/ 2; i
++) {
254 out_be16(data
, *payload
++);
255 data
+= 2 + _MSCAN_RESERVED_DSR_SIZE
;
257 /* write remaining byte if necessary */
259 out_8(data
, frame
->data
[frame
->len
- 1]);
262 out_8(®s
->tx
.dlr
, frame
->len
);
263 out_8(®s
->tx
.tbpr
, priv
->cur_pri
);
265 /* Start transmission. */
266 out_8(®s
->cantflg
, 1 << buf_id
);
268 if (!test_bit(F_TX_PROGRESS
, &priv
->flags
))
269 netif_trans_update(dev
);
271 list_add_tail(&priv
->tx_queue
[buf_id
].list
, &priv
->tx_head
);
273 can_put_echo_skb(skb
, dev
, buf_id
);
275 /* Enable interrupt. */
276 priv
->tx_active
|= 1 << buf_id
;
277 out_8(®s
->cantier
, priv
->tx_active
);
282 static enum can_state
get_new_state(struct net_device
*dev
, u8 canrflg
)
284 struct mscan_priv
*priv
= netdev_priv(dev
);
286 if (unlikely(canrflg
& MSCAN_CSCIF
))
287 return state_map
[max(MSCAN_STATE_RX(canrflg
),
288 MSCAN_STATE_TX(canrflg
))];
290 return priv
->can
.state
;
293 static void mscan_get_rx_frame(struct net_device
*dev
, struct can_frame
*frame
)
295 struct mscan_priv
*priv
= netdev_priv(dev
);
296 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
300 can_id
= in_be16(®s
->rx
.idr1_0
);
301 if (can_id
& (1 << 3)) {
302 frame
->can_id
= CAN_EFF_FLAG
;
303 can_id
= ((can_id
<< 16) | in_be16(®s
->rx
.idr3_2
));
304 can_id
= ((can_id
& 0xffe00000) |
305 ((can_id
& 0x7ffff) << 2)) >> 2;
311 frame
->can_id
|= can_id
>> 1;
313 frame
->can_id
|= CAN_RTR_FLAG
;
315 frame
->len
= can_cc_dlc2len(in_8(®s
->rx
.dlr
) & 0xf);
317 if (!(frame
->can_id
& CAN_RTR_FLAG
)) {
318 void __iomem
*data
= ®s
->rx
.dsr1_0
;
319 u16
*payload
= (u16
*)frame
->data
;
321 for (i
= 0; i
< frame
->len
/ 2; i
++) {
322 *payload
++ = in_be16(data
);
323 data
+= 2 + _MSCAN_RESERVED_DSR_SIZE
;
325 /* read remaining byte if necessary */
327 frame
->data
[frame
->len
- 1] = in_8(data
);
330 out_8(®s
->canrflg
, MSCAN_RXF
);
333 static void mscan_get_err_frame(struct net_device
*dev
, struct can_frame
*frame
,
336 struct mscan_priv
*priv
= netdev_priv(dev
);
337 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
338 struct net_device_stats
*stats
= &dev
->stats
;
339 enum can_state new_state
;
341 netdev_dbg(dev
, "error interrupt (canrflg=%#x)\n", canrflg
);
342 frame
->can_id
= CAN_ERR_FLAG
;
344 if (canrflg
& MSCAN_OVRIF
) {
345 frame
->can_id
|= CAN_ERR_CRTL
;
346 frame
->data
[1] = CAN_ERR_CRTL_RX_OVERFLOW
;
347 stats
->rx_over_errors
++;
353 new_state
= get_new_state(dev
, canrflg
);
354 if (new_state
!= priv
->can
.state
) {
355 can_change_state(dev
, frame
,
356 state_map
[MSCAN_STATE_TX(canrflg
)],
357 state_map
[MSCAN_STATE_RX(canrflg
)]);
359 if (priv
->can
.state
== CAN_STATE_BUS_OFF
) {
361 * The MSCAN on the MPC5200 does recover from bus-off
362 * automatically. To avoid that we stop the chip doing
363 * a light-weight stop (we are in irq-context).
365 if (priv
->type
!= MSCAN_TYPE_MPC5121
) {
366 out_8(®s
->cantier
, 0);
367 out_8(®s
->canrier
, 0);
368 setbits8(®s
->canctl0
,
369 MSCAN_SLPRQ
| MSCAN_INITRQ
);
374 priv
->shadow_statflg
= canrflg
& MSCAN_STAT_MSK
;
375 frame
->len
= CAN_ERR_DLC
;
376 out_8(®s
->canrflg
, MSCAN_ERR_IF
);
379 static int mscan_rx_poll(struct napi_struct
*napi
, int quota
)
381 struct mscan_priv
*priv
= container_of(napi
, struct mscan_priv
, napi
);
382 struct net_device
*dev
= napi
->dev
;
383 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
384 struct net_device_stats
*stats
= &dev
->stats
;
387 struct can_frame
*frame
;
390 while (work_done
< quota
) {
391 canrflg
= in_8(®s
->canrflg
);
392 if (!(canrflg
& (MSCAN_RXF
| MSCAN_ERR_IF
)))
395 skb
= alloc_can_skb(dev
, &frame
);
397 if (printk_ratelimit())
398 netdev_notice(dev
, "packet dropped\n");
400 out_8(®s
->canrflg
, canrflg
);
404 if (canrflg
& MSCAN_RXF
)
405 mscan_get_rx_frame(dev
, frame
);
406 else if (canrflg
& MSCAN_ERR_IF
)
407 mscan_get_err_frame(dev
, frame
, canrflg
);
410 stats
->rx_bytes
+= frame
->len
;
412 netif_receive_skb(skb
);
415 if (work_done
< quota
) {
416 if (likely(napi_complete_done(&priv
->napi
, work_done
))) {
417 clear_bit(F_RX_PROGRESS
, &priv
->flags
);
418 if (priv
->can
.state
< CAN_STATE_BUS_OFF
)
419 out_8(®s
->canrier
, priv
->shadow_canrier
);
425 static irqreturn_t
mscan_isr(int irq
, void *dev_id
)
427 struct net_device
*dev
= (struct net_device
*)dev_id
;
428 struct mscan_priv
*priv
= netdev_priv(dev
);
429 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
430 struct net_device_stats
*stats
= &dev
->stats
;
431 u8 cantier
, cantflg
, canrflg
;
432 irqreturn_t ret
= IRQ_NONE
;
434 cantier
= in_8(®s
->cantier
) & MSCAN_TXE
;
435 cantflg
= in_8(®s
->cantflg
) & cantier
;
437 if (cantier
&& cantflg
) {
438 struct list_head
*tmp
, *pos
;
440 list_for_each_safe(pos
, tmp
, &priv
->tx_head
) {
441 struct tx_queue_entry
*entry
=
442 list_entry(pos
, struct tx_queue_entry
, list
);
443 u8 mask
= entry
->mask
;
445 if (!(cantflg
& mask
))
448 out_8(®s
->cantbsel
, mask
);
449 stats
->tx_bytes
+= in_8(®s
->tx
.dlr
);
451 can_get_echo_skb(dev
, entry
->id
);
452 priv
->tx_active
&= ~mask
;
456 if (list_empty(&priv
->tx_head
)) {
457 clear_bit(F_TX_WAIT_ALL
, &priv
->flags
);
458 clear_bit(F_TX_PROGRESS
, &priv
->flags
);
461 netif_trans_update(dev
);
464 if (!test_bit(F_TX_WAIT_ALL
, &priv
->flags
))
465 netif_wake_queue(dev
);
467 out_8(®s
->cantier
, priv
->tx_active
);
471 canrflg
= in_8(®s
->canrflg
);
472 if ((canrflg
& ~MSCAN_STAT_MSK
) &&
473 !test_and_set_bit(F_RX_PROGRESS
, &priv
->flags
)) {
474 if (canrflg
& ~MSCAN_STAT_MSK
) {
475 priv
->shadow_canrier
= in_8(®s
->canrier
);
476 out_8(®s
->canrier
, 0);
477 napi_schedule(&priv
->napi
);
480 clear_bit(F_RX_PROGRESS
, &priv
->flags
);
486 static int mscan_do_set_mode(struct net_device
*dev
, enum can_mode mode
)
492 ret
= mscan_restart(dev
);
495 if (netif_queue_stopped(dev
))
496 netif_wake_queue(dev
);
506 static int mscan_do_set_bittiming(struct net_device
*dev
)
508 struct mscan_priv
*priv
= netdev_priv(dev
);
509 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
510 struct can_bittiming
*bt
= &priv
->can
.bittiming
;
513 btr0
= BTR0_SET_BRP(bt
->brp
) | BTR0_SET_SJW(bt
->sjw
);
514 btr1
= (BTR1_SET_TSEG1(bt
->prop_seg
+ bt
->phase_seg1
) |
515 BTR1_SET_TSEG2(bt
->phase_seg2
) |
516 BTR1_SET_SAM(priv
->can
.ctrlmode
& CAN_CTRLMODE_3_SAMPLES
));
518 netdev_info(dev
, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0
, btr1
);
520 out_8(®s
->canbtr0
, btr0
);
521 out_8(®s
->canbtr1
, btr1
);
526 static int mscan_get_berr_counter(const struct net_device
*dev
,
527 struct can_berr_counter
*bec
)
529 struct mscan_priv
*priv
= netdev_priv(dev
);
530 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
532 bec
->txerr
= in_8(®s
->cantxerr
);
533 bec
->rxerr
= in_8(®s
->canrxerr
);
538 static int mscan_open(struct net_device
*dev
)
541 struct mscan_priv
*priv
= netdev_priv(dev
);
542 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
544 ret
= clk_prepare_enable(priv
->clk_ipg
);
547 ret
= clk_prepare_enable(priv
->clk_can
);
549 goto exit_dis_ipg_clock
;
552 ret
= open_candev(dev
);
554 goto exit_dis_can_clock
;
556 napi_enable(&priv
->napi
);
558 ret
= request_irq(dev
->irq
, mscan_isr
, 0, dev
->name
, dev
);
560 netdev_err(dev
, "failed to attach interrupt\n");
561 goto exit_napi_disable
;
564 if (priv
->can
.ctrlmode
& CAN_CTRLMODE_LISTENONLY
)
565 setbits8(®s
->canctl1
, MSCAN_LISTEN
);
567 clrbits8(®s
->canctl1
, MSCAN_LISTEN
);
569 ret
= mscan_start(dev
);
573 netif_start_queue(dev
);
578 free_irq(dev
->irq
, dev
);
580 napi_disable(&priv
->napi
);
583 clk_disable_unprepare(priv
->clk_can
);
585 clk_disable_unprepare(priv
->clk_ipg
);
590 static int mscan_close(struct net_device
*dev
)
592 struct mscan_priv
*priv
= netdev_priv(dev
);
593 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
595 netif_stop_queue(dev
);
596 napi_disable(&priv
->napi
);
598 out_8(®s
->cantier
, 0);
599 out_8(®s
->canrier
, 0);
600 mscan_set_mode(dev
, MSCAN_INIT_MODE
);
602 free_irq(dev
->irq
, dev
);
604 clk_disable_unprepare(priv
->clk_can
);
605 clk_disable_unprepare(priv
->clk_ipg
);
610 static const struct net_device_ops mscan_netdev_ops
= {
611 .ndo_open
= mscan_open
,
612 .ndo_stop
= mscan_close
,
613 .ndo_start_xmit
= mscan_start_xmit
,
614 .ndo_change_mtu
= can_change_mtu
,
617 int register_mscandev(struct net_device
*dev
, int mscan_clksrc
)
619 struct mscan_priv
*priv
= netdev_priv(dev
);
620 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
623 ctl1
= in_8(®s
->canctl1
);
625 ctl1
|= MSCAN_CLKSRC
;
627 ctl1
&= ~MSCAN_CLKSRC
;
629 if (priv
->type
== MSCAN_TYPE_MPC5121
) {
630 priv
->can
.do_get_berr_counter
= mscan_get_berr_counter
;
631 ctl1
|= MSCAN_BORM
; /* bus-off recovery upon request */
635 out_8(®s
->canctl1
, ctl1
);
638 /* acceptance mask/acceptance code (accept everything) */
639 out_be16(®s
->canidar1_0
, 0);
640 out_be16(®s
->canidar3_2
, 0);
641 out_be16(®s
->canidar5_4
, 0);
642 out_be16(®s
->canidar7_6
, 0);
644 out_be16(®s
->canidmr1_0
, 0xffff);
645 out_be16(®s
->canidmr3_2
, 0xffff);
646 out_be16(®s
->canidmr5_4
, 0xffff);
647 out_be16(®s
->canidmr7_6
, 0xffff);
648 /* Two 32 bit Acceptance Filters */
649 out_8(®s
->canidac
, MSCAN_AF_32BIT
);
651 mscan_set_mode(dev
, MSCAN_INIT_MODE
);
653 return register_candev(dev
);
656 void unregister_mscandev(struct net_device
*dev
)
658 struct mscan_priv
*priv
= netdev_priv(dev
);
659 struct mscan_regs __iomem
*regs
= priv
->reg_base
;
660 mscan_set_mode(dev
, MSCAN_INIT_MODE
);
661 clrbits8(®s
->canctl1
, MSCAN_CANE
);
662 unregister_candev(dev
);
665 struct net_device
*alloc_mscandev(void)
667 struct net_device
*dev
;
668 struct mscan_priv
*priv
;
671 dev
= alloc_candev(sizeof(struct mscan_priv
), MSCAN_ECHO_SKB_MAX
);
674 priv
= netdev_priv(dev
);
676 dev
->netdev_ops
= &mscan_netdev_ops
;
678 dev
->flags
|= IFF_ECHO
; /* we support local echo */
680 netif_napi_add(dev
, &priv
->napi
, mscan_rx_poll
, 8);
682 priv
->can
.bittiming_const
= &mscan_bittiming_const
;
683 priv
->can
.do_set_bittiming
= mscan_do_set_bittiming
;
684 priv
->can
.do_set_mode
= mscan_do_set_mode
;
685 priv
->can
.ctrlmode_supported
= CAN_CTRLMODE_3_SAMPLES
|
686 CAN_CTRLMODE_LISTENONLY
;
688 for (i
= 0; i
< TX_QUEUE_SIZE
; i
++) {
689 priv
->tx_queue
[i
].id
= i
;
690 priv
->tx_queue
[i
].mask
= 1 << i
;
696 MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
697 MODULE_LICENSE("GPL v2");
698 MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");