Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / net / can / esd / esdacc.c
blobc80032bc1a5218a18276595d25406c47fea3f225
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh
3 * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh
4 */
6 #include "esdacc.h"
8 #include <linux/bitfield.h>
9 #include <linux/delay.h>
10 #include <linux/io.h>
11 #include <linux/ktime.h>
13 /* esdACC ID register layout */
14 #define ACC_ID_ID_MASK GENMASK(28, 0)
15 #define ACC_ID_EFF_FLAG BIT(29)
17 /* esdACC DLC register layout */
18 #define ACC_DLC_DLC_MASK GENMASK(3, 0)
19 #define ACC_DLC_RTR_FLAG BIT(4)
20 #define ACC_DLC_SSTX_FLAG BIT(24) /* Single Shot TX */
22 /* esdACC DLC in struct acc_bmmsg_rxtxdone::acc_dlc.len only! */
23 #define ACC_DLC_TXD_FLAG BIT(5)
25 /* ecc value of esdACC equals SJA1000's ECC register */
26 #define ACC_ECC_SEG 0x1f
27 #define ACC_ECC_DIR 0x20
28 #define ACC_ECC_BIT 0x00
29 #define ACC_ECC_FORM 0x40
30 #define ACC_ECC_STUFF 0x80
31 #define ACC_ECC_MASK 0xc0
33 /* esdACC Status Register bits. Unused bits not documented. */
34 #define ACC_REG_STATUS_MASK_STATUS_ES BIT(17)
35 #define ACC_REG_STATUS_MASK_STATUS_EP BIT(18)
36 #define ACC_REG_STATUS_MASK_STATUS_BS BIT(19)
38 /* esdACC Overview Module BM_IRQ_Mask register related defines */
39 /* Two bit wide command masks to mask or unmask a single core IRQ */
40 #define ACC_BM_IRQ_UNMASK BIT(0)
41 #define ACC_BM_IRQ_MASK (ACC_BM_IRQ_UNMASK << 1)
42 /* Command to unmask all IRQ sources. Created by shifting
43 * and oring the two bit wide ACC_BM_IRQ_UNMASK 16 times.
45 #define ACC_BM_IRQ_UNMASK_ALL 0x55555555U
47 static void acc_resetmode_enter(struct acc_core *core)
49 acc_set_bits(core, ACC_CORE_OF_CTRL,
50 ACC_REG_CTRL_MASK_RESETMODE);
52 /* Read back reset mode bit to flush PCI write posting */
53 acc_resetmode_entered(core);
56 static void acc_resetmode_leave(struct acc_core *core)
58 acc_clear_bits(core, ACC_CORE_OF_CTRL,
59 ACC_REG_CTRL_MASK_RESETMODE);
61 /* Read back reset mode bit to flush PCI write posting */
62 acc_resetmode_entered(core);
65 static void acc_txq_put(struct acc_core *core, u32 acc_id, u32 acc_dlc,
66 const void *data)
68 acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_1,
69 *((const u32 *)(data + 4)));
70 acc_write32_noswap(core, ACC_CORE_OF_TXFIFO_DATA_0,
71 *((const u32 *)data));
72 acc_write32(core, ACC_CORE_OF_TXFIFO_DLC, acc_dlc);
73 /* CAN id must be written at last. This write starts TX. */
74 acc_write32(core, ACC_CORE_OF_TXFIFO_ID, acc_id);
77 static u8 acc_tx_fifo_next(struct acc_core *core, u8 tx_fifo_idx)
79 ++tx_fifo_idx;
80 if (tx_fifo_idx >= core->tx_fifo_size)
81 tx_fifo_idx = 0U;
82 return tx_fifo_idx;
85 /* Convert timestamp from esdACC time stamp ticks to ns
87 * The conversion factor ts2ns from time stamp counts to ns is basically
88 * ts2ns = NSEC_PER_SEC / timestamp_frequency
90 * We handle here only a fixed timestamp frequency of 80MHz. The
91 * resulting ts2ns factor would be 12.5.
93 * At the end we multiply by 12 and add the half of the HW timestamp
94 * to get a multiplication by 12.5. This way any overflow is
95 * avoided until ktime_t itself overflows.
97 #define ACC_TS_FACTOR (NSEC_PER_SEC / ACC_TS_FREQ_80MHZ)
98 #define ACC_TS_80MHZ_SHIFT 1
100 static ktime_t acc_ts2ktime(struct acc_ov *ov, u64 ts)
102 u64 ns;
104 ns = (ts * ACC_TS_FACTOR) + (ts >> ACC_TS_80MHZ_SHIFT);
106 return ns_to_ktime(ns);
109 #undef ACC_TS_FACTOR
110 #undef ACC_TS_80MHZ_SHIFT
112 void acc_init_ov(struct acc_ov *ov, struct device *dev)
114 u32 temp;
116 temp = acc_ov_read32(ov, ACC_OV_OF_VERSION);
117 ov->version = temp;
118 ov->features = (temp >> 16);
120 temp = acc_ov_read32(ov, ACC_OV_OF_INFO);
121 ov->total_cores = temp;
122 ov->active_cores = (temp >> 8);
124 ov->core_frequency = acc_ov_read32(ov, ACC_OV_OF_CANCORE_FREQ);
125 ov->timestamp_frequency = acc_ov_read32(ov, ACC_OV_OF_TS_FREQ_LO);
127 /* Depending on esdACC feature NEW_PSC enable the new prescaler
128 * or adjust core_frequency according to the implicit division by 2.
130 if (ov->features & ACC_OV_REG_FEAT_MASK_NEW_PSC) {
131 acc_ov_set_bits(ov, ACC_OV_OF_MODE,
132 ACC_OV_REG_MODE_MASK_NEW_PSC_ENABLE);
133 } else {
134 ov->core_frequency /= 2;
137 dev_dbg(dev,
138 "esdACC v%u, freq: %u/%u, feat/strap: 0x%x/0x%x, cores: %u/%u\n",
139 ov->version, ov->core_frequency, ov->timestamp_frequency,
140 ov->features, acc_ov_read32(ov, ACC_OV_OF_INFO) >> 16,
141 ov->active_cores, ov->total_cores);
144 void acc_init_bm_ptr(struct acc_ov *ov, struct acc_core *cores, const void *mem)
146 unsigned int u;
148 /* DMA buffer layout as follows where N is the number of CAN cores
149 * implemented in the FPGA, i.e. N = ov->total_cores
151 * Section Layout Section size
152 * ----------------------------------------------
153 * FIFO Card/Overview ACC_CORE_DMABUF_SIZE
154 * FIFO Core0 ACC_CORE_DMABUF_SIZE
155 * ... ...
156 * FIFO CoreN ACC_CORE_DMABUF_SIZE
157 * irq_cnt Card/Overview sizeof(u32)
158 * irq_cnt Core0 sizeof(u32)
159 * ... ...
160 * irq_cnt CoreN sizeof(u32)
162 ov->bmfifo.messages = mem;
163 ov->bmfifo.irq_cnt = mem + (ov->total_cores + 1U) * ACC_CORE_DMABUF_SIZE;
165 for (u = 0U; u < ov->active_cores; u++) {
166 struct acc_core *core = &cores[u];
168 core->bmfifo.messages = mem + (u + 1U) * ACC_CORE_DMABUF_SIZE;
169 core->bmfifo.irq_cnt = ov->bmfifo.irq_cnt + (u + 1U);
173 int acc_open(struct net_device *netdev)
175 struct acc_net_priv *priv = netdev_priv(netdev);
176 struct acc_core *core = priv->core;
177 u32 tx_fifo_status;
178 u32 ctrl;
179 int err;
181 /* Retry to enter RESET mode if out of sync. */
182 if (priv->can.state != CAN_STATE_STOPPED) {
183 netdev_warn(netdev, "Entered %s() with bad can.state: %s\n",
184 __func__, can_get_state_str(priv->can.state));
185 acc_resetmode_enter(core);
186 priv->can.state = CAN_STATE_STOPPED;
189 err = open_candev(netdev);
190 if (err)
191 return err;
193 ctrl = ACC_REG_CTRL_MASK_IE_RXTX |
194 ACC_REG_CTRL_MASK_IE_TXERROR |
195 ACC_REG_CTRL_MASK_IE_ERRWARN |
196 ACC_REG_CTRL_MASK_IE_OVERRUN |
197 ACC_REG_CTRL_MASK_IE_ERRPASS;
199 if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
200 ctrl |= ACC_REG_CTRL_MASK_IE_BUSERR;
202 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
203 ctrl |= ACC_REG_CTRL_MASK_LOM;
205 acc_set_bits(core, ACC_CORE_OF_CTRL, ctrl);
207 acc_resetmode_leave(core);
208 priv->can.state = CAN_STATE_ERROR_ACTIVE;
210 /* Resync TX FIFO indices to HW state after (re-)start. */
211 tx_fifo_status = acc_read32(core, ACC_CORE_OF_TXFIFO_STATUS);
212 core->tx_fifo_head = tx_fifo_status & 0xff;
213 core->tx_fifo_tail = (tx_fifo_status >> 8) & 0xff;
215 netif_start_queue(netdev);
216 return 0;
219 int acc_close(struct net_device *netdev)
221 struct acc_net_priv *priv = netdev_priv(netdev);
222 struct acc_core *core = priv->core;
224 acc_clear_bits(core, ACC_CORE_OF_CTRL,
225 ACC_REG_CTRL_MASK_IE_RXTX |
226 ACC_REG_CTRL_MASK_IE_TXERROR |
227 ACC_REG_CTRL_MASK_IE_ERRWARN |
228 ACC_REG_CTRL_MASK_IE_OVERRUN |
229 ACC_REG_CTRL_MASK_IE_ERRPASS |
230 ACC_REG_CTRL_MASK_IE_BUSERR);
232 netif_stop_queue(netdev);
233 acc_resetmode_enter(core);
234 priv->can.state = CAN_STATE_STOPPED;
236 /* Mark pending TX requests to be aborted after controller restart. */
237 acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
239 /* ACC_REG_CTRL_MASK_LOM is only accessible in RESET mode */
240 acc_clear_bits(core, ACC_CORE_OF_CTRL,
241 ACC_REG_CTRL_MASK_LOM);
243 close_candev(netdev);
244 return 0;
247 netdev_tx_t acc_start_xmit(struct sk_buff *skb, struct net_device *netdev)
249 struct acc_net_priv *priv = netdev_priv(netdev);
250 struct acc_core *core = priv->core;
251 struct can_frame *cf = (struct can_frame *)skb->data;
252 u8 tx_fifo_head = core->tx_fifo_head;
253 int fifo_usage;
254 u32 acc_id;
255 u32 acc_dlc;
257 if (can_dropped_invalid_skb(netdev, skb))
258 return NETDEV_TX_OK;
260 /* Access core->tx_fifo_tail only once because it may be changed
261 * from the interrupt level.
263 fifo_usage = tx_fifo_head - core->tx_fifo_tail;
264 if (fifo_usage < 0)
265 fifo_usage += core->tx_fifo_size;
267 if (fifo_usage >= core->tx_fifo_size - 1) {
268 netdev_err(core->netdev,
269 "BUG: TX ring full when queue awake!\n");
270 netif_stop_queue(netdev);
271 return NETDEV_TX_BUSY;
274 if (fifo_usage == core->tx_fifo_size - 2)
275 netif_stop_queue(netdev);
277 acc_dlc = can_get_cc_dlc(cf, priv->can.ctrlmode);
278 if (cf->can_id & CAN_RTR_FLAG)
279 acc_dlc |= ACC_DLC_RTR_FLAG;
280 if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
281 acc_dlc |= ACC_DLC_SSTX_FLAG;
283 if (cf->can_id & CAN_EFF_FLAG) {
284 acc_id = cf->can_id & CAN_EFF_MASK;
285 acc_id |= ACC_ID_EFF_FLAG;
286 } else {
287 acc_id = cf->can_id & CAN_SFF_MASK;
290 can_put_echo_skb(skb, netdev, core->tx_fifo_head, 0);
292 core->tx_fifo_head = acc_tx_fifo_next(core, tx_fifo_head);
294 acc_txq_put(core, acc_id, acc_dlc, cf->data);
296 return NETDEV_TX_OK;
299 int acc_get_berr_counter(const struct net_device *netdev,
300 struct can_berr_counter *bec)
302 struct acc_net_priv *priv = netdev_priv(netdev);
303 u32 core_status = acc_read32(priv->core, ACC_CORE_OF_STATUS);
305 bec->txerr = (core_status >> 8) & 0xff;
306 bec->rxerr = core_status & 0xff;
308 return 0;
311 int acc_set_mode(struct net_device *netdev, enum can_mode mode)
313 struct acc_net_priv *priv = netdev_priv(netdev);
315 switch (mode) {
316 case CAN_MODE_START:
317 /* Paranoid FIFO index check. */
319 const u32 tx_fifo_status =
320 acc_read32(priv->core, ACC_CORE_OF_TXFIFO_STATUS);
321 const u8 hw_fifo_head = tx_fifo_status;
323 if (hw_fifo_head != priv->core->tx_fifo_head ||
324 hw_fifo_head != priv->core->tx_fifo_tail) {
325 netdev_warn(netdev,
326 "TX FIFO mismatch: T %2u H %2u; TFHW %#08x\n",
327 priv->core->tx_fifo_tail,
328 priv->core->tx_fifo_head,
329 tx_fifo_status);
332 acc_resetmode_leave(priv->core);
333 /* To leave the bus-off state the esdACC controller begins
334 * here a grace period where it counts 128 "idle conditions" (each
335 * of 11 consecutive recessive bits) on the bus as required
336 * by the CAN spec.
338 * During this time the TX FIFO may still contain already
339 * aborted "zombie" frames that are only drained from the FIFO
340 * at the end of the grace period.
342 * To not to interfere with this drain process we don't
343 * call netif_wake_queue() here. When the controller reaches
344 * the error-active state again, it informs us about that
345 * with an acc_bmmsg_errstatechange message. Then
346 * netif_wake_queue() is called from
347 * handle_core_msg_errstatechange() instead.
349 break;
351 default:
352 return -EOPNOTSUPP;
355 return 0;
358 int acc_set_bittiming(struct net_device *netdev)
360 struct acc_net_priv *priv = netdev_priv(netdev);
361 const struct can_bittiming *bt = &priv->can.bittiming;
362 u32 brp;
363 u32 btr;
365 if (priv->ov->features & ACC_OV_REG_FEAT_MASK_CANFD) {
366 u32 fbtr = 0;
368 netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
369 bt->brp, bt->prop_seg,
370 bt->phase_seg1, bt->phase_seg2, bt->sjw);
372 brp = FIELD_PREP(ACC_REG_BRP_FD_MASK_BRP, bt->brp - 1);
374 btr = FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
375 btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_TSEG2, bt->phase_seg2 - 1);
376 btr |= FIELD_PREP(ACC_REG_BTR_FD_MASK_SJW, bt->sjw - 1);
378 /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
379 acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
380 acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
382 netdev_dbg(netdev, "esdACC: BRP %u, NBTR 0x%08x, DBTR 0x%08x",
383 brp, btr, fbtr);
384 } else {
385 netdev_dbg(netdev, "bit timing: brp %u, prop %u, ph1 %u ph2 %u, sjw %u\n",
386 bt->brp, bt->prop_seg,
387 bt->phase_seg1, bt->phase_seg2, bt->sjw);
389 brp = FIELD_PREP(ACC_REG_BRP_CL_MASK_BRP, bt->brp - 1);
391 btr = FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG1, bt->phase_seg1 + bt->prop_seg - 1);
392 btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_TSEG2, bt->phase_seg2 - 1);
393 btr |= FIELD_PREP(ACC_REG_BTR_CL_MASK_SJW, bt->sjw - 1);
395 /* Keep order of accesses to ACC_CORE_OF_BRP and ACC_CORE_OF_BTR. */
396 acc_write32(priv->core, ACC_CORE_OF_BRP, brp);
397 acc_write32(priv->core, ACC_CORE_OF_BTR, btr);
399 netdev_dbg(netdev, "esdACC: BRP %u, BTR 0x%08x", brp, btr);
402 return 0;
405 static void handle_core_msg_rxtxdone(struct acc_core *core,
406 const struct acc_bmmsg_rxtxdone *msg)
408 struct acc_net_priv *priv = netdev_priv(core->netdev);
409 struct net_device_stats *stats = &core->netdev->stats;
410 struct sk_buff *skb;
412 if (msg->acc_dlc.len & ACC_DLC_TXD_FLAG) {
413 u8 tx_fifo_tail = core->tx_fifo_tail;
415 if (core->tx_fifo_head == tx_fifo_tail) {
416 netdev_warn(core->netdev,
417 "TX interrupt, but queue is empty!?\n");
418 return;
421 /* Direct access echo skb to attach HW time stamp. */
422 skb = priv->can.echo_skb[tx_fifo_tail];
423 if (skb) {
424 skb_hwtstamps(skb)->hwtstamp =
425 acc_ts2ktime(priv->ov, msg->ts);
428 stats->tx_packets++;
429 stats->tx_bytes += can_get_echo_skb(core->netdev, tx_fifo_tail,
430 NULL);
432 core->tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
434 netif_wake_queue(core->netdev);
436 } else {
437 struct can_frame *cf;
439 skb = alloc_can_skb(core->netdev, &cf);
440 if (!skb) {
441 stats->rx_dropped++;
442 return;
445 cf->can_id = msg->id & ACC_ID_ID_MASK;
446 if (msg->id & ACC_ID_EFF_FLAG)
447 cf->can_id |= CAN_EFF_FLAG;
449 can_frame_set_cc_len(cf, msg->acc_dlc.len & ACC_DLC_DLC_MASK,
450 priv->can.ctrlmode);
452 if (msg->acc_dlc.len & ACC_DLC_RTR_FLAG) {
453 cf->can_id |= CAN_RTR_FLAG;
454 } else {
455 memcpy(cf->data, msg->data, cf->len);
456 stats->rx_bytes += cf->len;
458 stats->rx_packets++;
460 skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
462 netif_rx(skb);
466 static void handle_core_msg_txabort(struct acc_core *core,
467 const struct acc_bmmsg_txabort *msg)
469 struct net_device_stats *stats = &core->netdev->stats;
470 u8 tx_fifo_tail = core->tx_fifo_tail;
471 u32 abort_mask = msg->abort_mask; /* u32 extend to avoid warnings later */
473 /* The abort_mask shows which frames were aborted in esdACC's FIFO. */
474 while (tx_fifo_tail != core->tx_fifo_head && (abort_mask)) {
475 const u32 tail_mask = (1U << tx_fifo_tail);
477 if (!(abort_mask & tail_mask))
478 break;
479 abort_mask &= ~tail_mask;
481 can_free_echo_skb(core->netdev, tx_fifo_tail, NULL);
482 stats->tx_dropped++;
483 stats->tx_aborted_errors++;
485 tx_fifo_tail = acc_tx_fifo_next(core, tx_fifo_tail);
487 core->tx_fifo_tail = tx_fifo_tail;
488 if (abort_mask)
489 netdev_warn(core->netdev, "Unhandled aborted messages\n");
491 if (!acc_resetmode_entered(core))
492 netif_wake_queue(core->netdev);
495 static void handle_core_msg_overrun(struct acc_core *core,
496 const struct acc_bmmsg_overrun *msg)
498 struct acc_net_priv *priv = netdev_priv(core->netdev);
499 struct net_device_stats *stats = &core->netdev->stats;
500 struct can_frame *cf;
501 struct sk_buff *skb;
503 /* lost_cnt may be 0 if not supported by esdACC version */
504 if (msg->lost_cnt) {
505 stats->rx_errors += msg->lost_cnt;
506 stats->rx_over_errors += msg->lost_cnt;
507 } else {
508 stats->rx_errors++;
509 stats->rx_over_errors++;
512 skb = alloc_can_err_skb(core->netdev, &cf);
513 if (!skb)
514 return;
516 cf->can_id |= CAN_ERR_CRTL;
517 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
519 skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
521 netif_rx(skb);
524 static void handle_core_msg_buserr(struct acc_core *core,
525 const struct acc_bmmsg_buserr *msg)
527 struct acc_net_priv *priv = netdev_priv(core->netdev);
528 struct net_device_stats *stats = &core->netdev->stats;
529 struct can_frame *cf;
530 struct sk_buff *skb;
531 const u32 reg_status = msg->reg_status;
532 const u8 rxerr = reg_status;
533 const u8 txerr = (reg_status >> 8);
534 u8 can_err_prot_type = 0U;
536 priv->can.can_stats.bus_error++;
538 /* Error occurred during transmission? */
539 if (msg->ecc & ACC_ECC_DIR) {
540 stats->rx_errors++;
541 } else {
542 can_err_prot_type |= CAN_ERR_PROT_TX;
543 stats->tx_errors++;
545 /* Determine error type */
546 switch (msg->ecc & ACC_ECC_MASK) {
547 case ACC_ECC_BIT:
548 can_err_prot_type |= CAN_ERR_PROT_BIT;
549 break;
550 case ACC_ECC_FORM:
551 can_err_prot_type |= CAN_ERR_PROT_FORM;
552 break;
553 case ACC_ECC_STUFF:
554 can_err_prot_type |= CAN_ERR_PROT_STUFF;
555 break;
556 default:
557 can_err_prot_type |= CAN_ERR_PROT_UNSPEC;
558 break;
561 skb = alloc_can_err_skb(core->netdev, &cf);
562 if (!skb)
563 return;
565 cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT;
567 /* Set protocol error type */
568 cf->data[2] = can_err_prot_type;
569 /* Set error location */
570 cf->data[3] = msg->ecc & ACC_ECC_SEG;
572 /* Insert CAN TX and RX error counters. */
573 cf->data[6] = txerr;
574 cf->data[7] = rxerr;
576 skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
578 netif_rx(skb);
581 static void
582 handle_core_msg_errstatechange(struct acc_core *core,
583 const struct acc_bmmsg_errstatechange *msg)
585 struct acc_net_priv *priv = netdev_priv(core->netdev);
586 struct can_frame *cf = NULL;
587 struct sk_buff *skb;
588 const u32 reg_status = msg->reg_status;
589 const u8 rxerr = reg_status;
590 const u8 txerr = (reg_status >> 8);
591 enum can_state new_state;
593 if (reg_status & ACC_REG_STATUS_MASK_STATUS_BS) {
594 new_state = CAN_STATE_BUS_OFF;
595 } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_EP) {
596 new_state = CAN_STATE_ERROR_PASSIVE;
597 } else if (reg_status & ACC_REG_STATUS_MASK_STATUS_ES) {
598 new_state = CAN_STATE_ERROR_WARNING;
599 } else {
600 new_state = CAN_STATE_ERROR_ACTIVE;
601 if (priv->can.state == CAN_STATE_BUS_OFF) {
602 /* See comment in acc_set_mode() for CAN_MODE_START */
603 netif_wake_queue(core->netdev);
607 skb = alloc_can_err_skb(core->netdev, &cf);
609 if (new_state != priv->can.state) {
610 enum can_state tx_state, rx_state;
612 tx_state = (txerr >= rxerr) ?
613 new_state : CAN_STATE_ERROR_ACTIVE;
614 rx_state = (rxerr >= txerr) ?
615 new_state : CAN_STATE_ERROR_ACTIVE;
617 /* Always call can_change_state() to update the state
618 * even if alloc_can_err_skb() may have failed.
619 * can_change_state() can cope with a NULL cf pointer.
621 can_change_state(core->netdev, cf, tx_state, rx_state);
624 if (skb) {
625 cf->can_id |= CAN_ERR_CNT;
626 cf->data[6] = txerr;
627 cf->data[7] = rxerr;
629 skb_hwtstamps(skb)->hwtstamp = acc_ts2ktime(priv->ov, msg->ts);
631 netif_rx(skb);
634 if (new_state == CAN_STATE_BUS_OFF) {
635 acc_write32(core, ACC_CORE_OF_TX_ABORT_MASK, 0xffff);
636 can_bus_off(core->netdev);
640 static void handle_core_interrupt(struct acc_core *core)
642 u32 msg_fifo_head = core->bmfifo.local_irq_cnt & 0xff;
644 while (core->bmfifo.msg_fifo_tail != msg_fifo_head) {
645 const union acc_bmmsg *msg =
646 &core->bmfifo.messages[core->bmfifo.msg_fifo_tail];
648 switch (msg->msg_id) {
649 case BM_MSG_ID_RXTXDONE:
650 handle_core_msg_rxtxdone(core, &msg->rxtxdone);
651 break;
653 case BM_MSG_ID_TXABORT:
654 handle_core_msg_txabort(core, &msg->txabort);
655 break;
657 case BM_MSG_ID_OVERRUN:
658 handle_core_msg_overrun(core, &msg->overrun);
659 break;
661 case BM_MSG_ID_BUSERR:
662 handle_core_msg_buserr(core, &msg->buserr);
663 break;
665 case BM_MSG_ID_ERRPASSIVE:
666 case BM_MSG_ID_ERRWARN:
667 handle_core_msg_errstatechange(core,
668 &msg->errstatechange);
669 break;
671 default:
672 /* Ignore all other BM messages (like the CAN-FD messages) */
673 break;
676 core->bmfifo.msg_fifo_tail =
677 (core->bmfifo.msg_fifo_tail + 1) & 0xff;
682 * acc_card_interrupt() - handle the interrupts of an esdACC FPGA
684 * @ov: overview module structure
685 * @cores: array of core structures
687 * This function handles all interrupts pending for the overview module and the
688 * CAN cores of the esdACC FPGA.
690 * It examines for all cores (the overview module core and the CAN cores)
691 * the bmfifo.irq_cnt and compares it with the previously saved
692 * bmfifo.local_irq_cnt. An IRQ is pending if they differ. The esdACC FPGA
693 * updates the bmfifo.irq_cnt values by DMA.
695 * The pending interrupts are masked by writing to the IRQ mask register at
696 * ACC_OV_OF_BM_IRQ_MASK. This register has for each core a two bit command
697 * field evaluated as follows:
699 * Define, bit pattern: meaning
700 * 00: no action
701 * ACC_BM_IRQ_UNMASK, 01: unmask interrupt
702 * ACC_BM_IRQ_MASK, 10: mask interrupt
703 * 11: no action
705 * For each CAN core with a pending IRQ handle_core_interrupt() handles all
706 * busmaster messages from the message FIFO. The last handled message (FIFO
707 * index) is written to the CAN core to acknowledge its handling.
709 * Last step is to unmask all interrupts in the FPGA using
710 * ACC_BM_IRQ_UNMASK_ALL.
712 * Return:
713 * IRQ_HANDLED, if card generated an interrupt that was handled
714 * IRQ_NONE, if the interrupt is not ours
716 irqreturn_t acc_card_interrupt(struct acc_ov *ov, struct acc_core *cores)
718 u32 irqmask;
719 int i;
721 /* First we look for whom interrupts are pending, card/overview
722 * or any of the cores. Two bits in irqmask are used for each;
723 * Each two bit field is set to ACC_BM_IRQ_MASK if an IRQ is
724 * pending.
726 irqmask = 0U;
727 if (READ_ONCE(*ov->bmfifo.irq_cnt) != ov->bmfifo.local_irq_cnt) {
728 irqmask |= ACC_BM_IRQ_MASK;
729 ov->bmfifo.local_irq_cnt = READ_ONCE(*ov->bmfifo.irq_cnt);
732 for (i = 0; i < ov->active_cores; i++) {
733 struct acc_core *core = &cores[i];
735 if (READ_ONCE(*core->bmfifo.irq_cnt) != core->bmfifo.local_irq_cnt) {
736 irqmask |= (ACC_BM_IRQ_MASK << (2 * (i + 1)));
737 core->bmfifo.local_irq_cnt = READ_ONCE(*core->bmfifo.irq_cnt);
741 if (!irqmask)
742 return IRQ_NONE;
744 /* At second we tell the card we're working on them by writing irqmask,
745 * call handle_{ov|core}_interrupt and then acknowledge the
746 * interrupts by writing irq_cnt:
748 acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, irqmask);
750 if (irqmask & ACC_BM_IRQ_MASK) {
751 /* handle_ov_interrupt(); - no use yet. */
752 acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_COUNTER,
753 ov->bmfifo.local_irq_cnt);
756 for (i = 0; i < ov->active_cores; i++) {
757 struct acc_core *core = &cores[i];
759 if (irqmask & (ACC_BM_IRQ_MASK << (2 * (i + 1)))) {
760 handle_core_interrupt(core);
761 acc_write32(core, ACC_OV_OF_BM_IRQ_COUNTER,
762 core->bmfifo.local_irq_cnt);
766 acc_ov_write32(ov, ACC_OV_OF_BM_IRQ_MASK, ACC_BM_IRQ_UNMASK_ALL);
768 return IRQ_HANDLED;