treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / net / can / peak_canfd / peak_canfd.c
blob10aa3e457c33d48fe2055a422c6399b56798b9e3
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
3 * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
5 * Copyright (C) 2016 PEAK System-Technik GmbH
6 */
8 #include <linux/can.h>
9 #include <linux/can/dev.h>
11 #include "peak_canfd_user.h"
13 /* internal IP core cache size (used as default echo skbs max number) */
14 #define PCANFD_ECHO_SKB_MAX 24
16 /* bittiming ranges of the PEAK-System PC CAN-FD interfaces */
17 static const struct can_bittiming_const peak_canfd_nominal_const = {
18 .name = "peak_canfd",
19 .tseg1_min = 1,
20 .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
21 .tseg2_min = 1,
22 .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
23 .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
24 .brp_min = 1,
25 .brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
26 .brp_inc = 1,
29 static const struct can_bittiming_const peak_canfd_data_const = {
30 .name = "peak_canfd",
31 .tseg1_min = 1,
32 .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
33 .tseg2_min = 1,
34 .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
35 .sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
36 .brp_min = 1,
37 .brp_max = (1 << PUCAN_TFAST_BRP_BITS),
38 .brp_inc = 1,
41 static struct peak_canfd_priv *pucan_init_cmd(struct peak_canfd_priv *priv)
43 priv->cmd_len = 0;
44 return priv;
47 static void *pucan_add_cmd(struct peak_canfd_priv *priv, int cmd_op)
49 struct pucan_command *cmd;
51 if (priv->cmd_len + sizeof(*cmd) > priv->cmd_maxlen)
52 return NULL;
54 cmd = priv->cmd_buffer + priv->cmd_len;
56 /* reset all unused bit to default */
57 memset(cmd, 0, sizeof(*cmd));
59 cmd->opcode_channel = pucan_cmd_opcode_channel(priv->index, cmd_op);
60 priv->cmd_len += sizeof(*cmd);
62 return cmd;
65 static int pucan_write_cmd(struct peak_canfd_priv *priv)
67 int err;
69 if (priv->pre_cmd) {
70 err = priv->pre_cmd(priv);
71 if (err)
72 return err;
75 err = priv->write_cmd(priv);
76 if (err)
77 return err;
79 if (priv->post_cmd)
80 err = priv->post_cmd(priv);
82 return err;
85 /* uCAN commands interface functions */
86 static int pucan_set_reset_mode(struct peak_canfd_priv *priv)
88 pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RESET_MODE);
89 return pucan_write_cmd(priv);
92 static int pucan_set_normal_mode(struct peak_canfd_priv *priv)
94 int err;
96 pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_NORMAL_MODE);
97 err = pucan_write_cmd(priv);
98 if (!err)
99 priv->can.state = CAN_STATE_ERROR_ACTIVE;
101 return err;
104 static int pucan_set_listen_only_mode(struct peak_canfd_priv *priv)
106 int err;
108 pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_LISTEN_ONLY_MODE);
109 err = pucan_write_cmd(priv);
110 if (!err)
111 priv->can.state = CAN_STATE_ERROR_ACTIVE;
113 return err;
116 static int pucan_set_timing_slow(struct peak_canfd_priv *priv,
117 const struct can_bittiming *pbt)
119 struct pucan_timing_slow *cmd;
121 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW);
123 cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1,
124 priv->can.ctrlmode &
125 CAN_CTRLMODE_3_SAMPLES);
126 cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
127 cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1);
128 cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1));
130 cmd->ewl = 96; /* default */
132 netdev_dbg(priv->ndev,
133 "nominal: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
134 le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw_t);
136 return pucan_write_cmd(priv);
139 static int pucan_set_timing_fast(struct peak_canfd_priv *priv,
140 const struct can_bittiming *pbt)
142 struct pucan_timing_fast *cmd;
144 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_FAST);
146 cmd->sjw = PUCAN_TFAST_SJW(pbt->sjw - 1);
147 cmd->tseg1 = PUCAN_TFAST_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
148 cmd->tseg2 = PUCAN_TFAST_TSEG2(pbt->phase_seg2 - 1);
149 cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(pbt->brp - 1));
151 netdev_dbg(priv->ndev,
152 "data: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
153 le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw);
155 return pucan_write_cmd(priv);
158 static int pucan_set_std_filter(struct peak_canfd_priv *priv, u8 row, u32 mask)
160 struct pucan_std_filter *cmd;
162 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_STD_FILTER);
164 /* all the 11-bits CAN ID values are represented by one bit in a
165 * 64 rows array of 32 bits: the upper 6 bits of the CAN ID select the
166 * row while the lowest 5 bits select the bit in that row.
168 * bit filter
169 * 1 passed
170 * 0 discarded
173 /* select the row */
174 cmd->idx = row;
176 /* set/unset bits in the row */
177 cmd->mask = cpu_to_le32(mask);
179 return pucan_write_cmd(priv);
182 static int pucan_tx_abort(struct peak_canfd_priv *priv, u16 flags)
184 struct pucan_tx_abort *cmd;
186 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TX_ABORT);
188 cmd->flags = cpu_to_le16(flags);
190 return pucan_write_cmd(priv);
193 static int pucan_clr_err_counters(struct peak_canfd_priv *priv)
195 struct pucan_wr_err_cnt *cmd;
197 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_WR_ERR_CNT);
199 cmd->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE | PUCAN_WRERRCNT_RE);
200 cmd->tx_counter = 0;
201 cmd->rx_counter = 0;
203 return pucan_write_cmd(priv);
206 static int pucan_set_options(struct peak_canfd_priv *priv, u16 opt_mask)
208 struct pucan_options *cmd;
210 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_EN_OPTION);
212 cmd->options = cpu_to_le16(opt_mask);
214 return pucan_write_cmd(priv);
217 static int pucan_clr_options(struct peak_canfd_priv *priv, u16 opt_mask)
219 struct pucan_options *cmd;
221 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_CLR_DIS_OPTION);
223 cmd->options = cpu_to_le16(opt_mask);
225 return pucan_write_cmd(priv);
228 static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv)
230 pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RX_BARRIER);
232 return pucan_write_cmd(priv);
235 static int pucan_netif_rx(struct sk_buff *skb, __le32 ts_low, __le32 ts_high)
237 struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
238 u64 ts_us;
240 ts_us = (u64)le32_to_cpu(ts_high) << 32;
241 ts_us |= le32_to_cpu(ts_low);
243 /* IP core timestamps are µs. */
244 hwts->hwtstamp = ns_to_ktime(ts_us * NSEC_PER_USEC);
246 return netif_rx(skb);
249 /* handle the reception of one CAN frame */
250 static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
251 struct pucan_rx_msg *msg)
253 struct net_device_stats *stats = &priv->ndev->stats;
254 struct canfd_frame *cf;
255 struct sk_buff *skb;
256 const u16 rx_msg_flags = le16_to_cpu(msg->flags);
257 u8 cf_len;
259 if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN)
260 cf_len = can_dlc2len(get_canfd_dlc(pucan_msg_get_dlc(msg)));
261 else
262 cf_len = get_can_dlc(pucan_msg_get_dlc(msg));
264 /* if this frame is an echo, */
265 if ((rx_msg_flags & PUCAN_MSG_LOOPED_BACK) &&
266 !(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) {
267 unsigned long flags;
269 spin_lock_irqsave(&priv->echo_lock, flags);
270 can_get_echo_skb(priv->ndev, msg->client);
272 /* count bytes of the echo instead of skb */
273 stats->tx_bytes += cf_len;
274 stats->tx_packets++;
276 /* restart tx queue (a slot is free) */
277 netif_wake_queue(priv->ndev);
279 spin_unlock_irqrestore(&priv->echo_lock, flags);
280 return 0;
283 /* otherwise, it should be pushed into rx fifo */
284 if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
285 /* CANFD frame case */
286 skb = alloc_canfd_skb(priv->ndev, &cf);
287 if (!skb)
288 return -ENOMEM;
290 if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH)
291 cf->flags |= CANFD_BRS;
293 if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND)
294 cf->flags |= CANFD_ESI;
295 } else {
296 /* CAN 2.0 frame case */
297 skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
298 if (!skb)
299 return -ENOMEM;
302 cf->can_id = le32_to_cpu(msg->can_id);
303 cf->len = cf_len;
305 if (rx_msg_flags & PUCAN_MSG_EXT_ID)
306 cf->can_id |= CAN_EFF_FLAG;
308 if (rx_msg_flags & PUCAN_MSG_RTR)
309 cf->can_id |= CAN_RTR_FLAG;
310 else
311 memcpy(cf->data, msg->d, cf->len);
313 stats->rx_bytes += cf->len;
314 stats->rx_packets++;
316 pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
318 return 0;
321 /* handle rx/tx error counters notification */
322 static int pucan_handle_error(struct peak_canfd_priv *priv,
323 struct pucan_error_msg *msg)
325 priv->bec.txerr = msg->tx_err_cnt;
326 priv->bec.rxerr = msg->rx_err_cnt;
328 return 0;
331 /* handle status notification */
332 static int pucan_handle_status(struct peak_canfd_priv *priv,
333 struct pucan_status_msg *msg)
335 struct net_device *ndev = priv->ndev;
336 struct net_device_stats *stats = &ndev->stats;
337 struct can_frame *cf;
338 struct sk_buff *skb;
340 /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
341 if (pucan_status_is_rx_barrier(msg)) {
342 if (priv->enable_tx_path) {
343 int err = priv->enable_tx_path(priv);
345 if (err)
346 return err;
349 /* start network queue (echo_skb array is empty) */
350 netif_start_queue(ndev);
352 return 0;
355 skb = alloc_can_err_skb(ndev, &cf);
357 /* test state error bits according to their priority */
358 if (pucan_status_is_busoff(msg)) {
359 netdev_dbg(ndev, "Bus-off entry status\n");
360 priv->can.state = CAN_STATE_BUS_OFF;
361 priv->can.can_stats.bus_off++;
362 can_bus_off(ndev);
363 if (skb)
364 cf->can_id |= CAN_ERR_BUSOFF;
366 } else if (pucan_status_is_passive(msg)) {
367 netdev_dbg(ndev, "Error passive status\n");
368 priv->can.state = CAN_STATE_ERROR_PASSIVE;
369 priv->can.can_stats.error_passive++;
370 if (skb) {
371 cf->can_id |= CAN_ERR_CRTL;
372 cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
373 CAN_ERR_CRTL_TX_PASSIVE :
374 CAN_ERR_CRTL_RX_PASSIVE;
375 cf->data[6] = priv->bec.txerr;
376 cf->data[7] = priv->bec.rxerr;
379 } else if (pucan_status_is_warning(msg)) {
380 netdev_dbg(ndev, "Error warning status\n");
381 priv->can.state = CAN_STATE_ERROR_WARNING;
382 priv->can.can_stats.error_warning++;
383 if (skb) {
384 cf->can_id |= CAN_ERR_CRTL;
385 cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
386 CAN_ERR_CRTL_TX_WARNING :
387 CAN_ERR_CRTL_RX_WARNING;
388 cf->data[6] = priv->bec.txerr;
389 cf->data[7] = priv->bec.rxerr;
392 } else if (priv->can.state != CAN_STATE_ERROR_ACTIVE) {
393 /* back to ERROR_ACTIVE */
394 netdev_dbg(ndev, "Error active status\n");
395 can_change_state(ndev, cf, CAN_STATE_ERROR_ACTIVE,
396 CAN_STATE_ERROR_ACTIVE);
397 } else {
398 dev_kfree_skb(skb);
399 return 0;
402 if (!skb) {
403 stats->rx_dropped++;
404 return -ENOMEM;
407 stats->rx_packets++;
408 stats->rx_bytes += cf->can_dlc;
409 pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
411 return 0;
414 /* handle uCAN Rx overflow notification */
415 static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
417 struct net_device_stats *stats = &priv->ndev->stats;
418 struct can_frame *cf;
419 struct sk_buff *skb;
421 stats->rx_over_errors++;
422 stats->rx_errors++;
424 skb = alloc_can_err_skb(priv->ndev, &cf);
425 if (!skb) {
426 stats->rx_dropped++;
427 return -ENOMEM;
430 cf->can_id |= CAN_ERR_CRTL;
431 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
433 cf->data[6] = priv->bec.txerr;
434 cf->data[7] = priv->bec.rxerr;
436 stats->rx_bytes += cf->can_dlc;
437 stats->rx_packets++;
438 netif_rx(skb);
440 return 0;
443 /* handle a single uCAN message */
444 int peak_canfd_handle_msg(struct peak_canfd_priv *priv,
445 struct pucan_rx_msg *msg)
447 u16 msg_type = le16_to_cpu(msg->type);
448 int msg_size = le16_to_cpu(msg->size);
449 int err;
451 if (!msg_size || !msg_type) {
452 /* null packet found: end of list */
453 goto exit;
456 switch (msg_type) {
457 case PUCAN_MSG_CAN_RX:
458 err = pucan_handle_can_rx(priv, (struct pucan_rx_msg *)msg);
459 break;
460 case PUCAN_MSG_ERROR:
461 err = pucan_handle_error(priv, (struct pucan_error_msg *)msg);
462 break;
463 case PUCAN_MSG_STATUS:
464 err = pucan_handle_status(priv, (struct pucan_status_msg *)msg);
465 break;
466 case PUCAN_MSG_CACHE_CRITICAL:
467 err = pucan_handle_cache_critical(priv);
468 break;
469 default:
470 err = 0;
473 if (err < 0)
474 return err;
476 exit:
477 return msg_size;
480 /* handle a list of rx_count messages from rx_msg memory address */
481 int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
482 struct pucan_rx_msg *msg_list, int msg_count)
484 void *msg_ptr = msg_list;
485 int i, msg_size = 0;
487 for (i = 0; i < msg_count; i++) {
488 msg_size = peak_canfd_handle_msg(priv, msg_ptr);
490 /* a null packet can be found at the end of a list */
491 if (msg_size <= 0)
492 break;
494 msg_ptr += ALIGN(msg_size, 4);
497 if (msg_size < 0)
498 return msg_size;
500 return i;
503 static int peak_canfd_start(struct peak_canfd_priv *priv)
505 int err;
507 err = pucan_clr_err_counters(priv);
508 if (err)
509 goto err_exit;
511 priv->echo_idx = 0;
513 priv->bec.txerr = 0;
514 priv->bec.rxerr = 0;
516 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
517 err = pucan_set_listen_only_mode(priv);
518 else
519 err = pucan_set_normal_mode(priv);
521 err_exit:
522 return err;
525 static void peak_canfd_stop(struct peak_canfd_priv *priv)
527 int err;
529 /* go back to RESET mode */
530 err = pucan_set_reset_mode(priv);
531 if (err) {
532 netdev_err(priv->ndev, "channel %u reset failed\n",
533 priv->index);
534 } else {
535 /* abort last Tx (MUST be done in RESET mode only!) */
536 pucan_tx_abort(priv, PUCAN_TX_ABORT_FLUSH);
540 static int peak_canfd_set_mode(struct net_device *ndev, enum can_mode mode)
542 struct peak_canfd_priv *priv = netdev_priv(ndev);
544 switch (mode) {
545 case CAN_MODE_START:
546 peak_canfd_start(priv);
547 netif_wake_queue(ndev);
548 break;
549 default:
550 return -EOPNOTSUPP;
553 return 0;
556 static int peak_canfd_get_berr_counter(const struct net_device *ndev,
557 struct can_berr_counter *bec)
559 struct peak_canfd_priv *priv = netdev_priv(ndev);
561 *bec = priv->bec;
562 return 0;
565 static int peak_canfd_open(struct net_device *ndev)
567 struct peak_canfd_priv *priv = netdev_priv(ndev);
568 int i, err = 0;
570 err = open_candev(ndev);
571 if (err) {
572 netdev_err(ndev, "open_candev() failed, error %d\n", err);
573 goto err_exit;
576 err = pucan_set_reset_mode(priv);
577 if (err)
578 goto err_close;
580 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
581 if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
582 err = pucan_clr_options(priv, PUCAN_OPTION_CANDFDISO);
583 else
584 err = pucan_set_options(priv, PUCAN_OPTION_CANDFDISO);
586 if (err)
587 goto err_close;
590 /* set option: get rx/tx error counters */
591 err = pucan_set_options(priv, PUCAN_OPTION_ERROR);
592 if (err)
593 goto err_close;
595 /* accept all standard CAN ID */
596 for (i = 0; i <= PUCAN_FLTSTD_ROW_IDX_MAX; i++)
597 pucan_set_std_filter(priv, i, 0xffffffff);
599 err = peak_canfd_start(priv);
600 if (err)
601 goto err_close;
603 /* receiving the RB status says when Tx path is ready */
604 err = pucan_setup_rx_barrier(priv);
605 if (!err)
606 goto err_exit;
608 err_close:
609 close_candev(ndev);
610 err_exit:
611 return err;
614 static int peak_canfd_set_bittiming(struct net_device *ndev)
616 struct peak_canfd_priv *priv = netdev_priv(ndev);
618 return pucan_set_timing_slow(priv, &priv->can.bittiming);
621 static int peak_canfd_set_data_bittiming(struct net_device *ndev)
623 struct peak_canfd_priv *priv = netdev_priv(ndev);
625 return pucan_set_timing_fast(priv, &priv->can.data_bittiming);
628 static int peak_canfd_close(struct net_device *ndev)
630 struct peak_canfd_priv *priv = netdev_priv(ndev);
632 netif_stop_queue(ndev);
633 peak_canfd_stop(priv);
634 close_candev(ndev);
636 return 0;
639 static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
640 struct net_device *ndev)
642 struct peak_canfd_priv *priv = netdev_priv(ndev);
643 struct net_device_stats *stats = &ndev->stats;
644 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
645 struct pucan_tx_msg *msg;
646 u16 msg_size, msg_flags;
647 unsigned long flags;
648 bool should_stop_tx_queue;
649 int room_left;
650 u8 can_dlc;
652 if (can_dropped_invalid_skb(ndev, skb))
653 return NETDEV_TX_OK;
655 msg_size = ALIGN(sizeof(*msg) + cf->len, 4);
656 msg = priv->alloc_tx_msg(priv, msg_size, &room_left);
658 /* should never happen except under bus-off condition and (auto-)restart
659 * mechanism
661 if (!msg) {
662 stats->tx_dropped++;
663 netif_stop_queue(ndev);
664 return NETDEV_TX_BUSY;
667 msg->size = cpu_to_le16(msg_size);
668 msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
669 msg_flags = 0;
671 if (cf->can_id & CAN_EFF_FLAG) {
672 msg_flags |= PUCAN_MSG_EXT_ID;
673 msg->can_id = cpu_to_le32(cf->can_id & CAN_EFF_MASK);
674 } else {
675 msg->can_id = cpu_to_le32(cf->can_id & CAN_SFF_MASK);
678 if (can_is_canfd_skb(skb)) {
679 /* CAN FD frame format */
680 can_dlc = can_len2dlc(cf->len);
682 msg_flags |= PUCAN_MSG_EXT_DATA_LEN;
684 if (cf->flags & CANFD_BRS)
685 msg_flags |= PUCAN_MSG_BITRATE_SWITCH;
687 if (cf->flags & CANFD_ESI)
688 msg_flags |= PUCAN_MSG_ERROR_STATE_IND;
689 } else {
690 /* CAN 2.0 frame format */
691 can_dlc = cf->len;
693 if (cf->can_id & CAN_RTR_FLAG)
694 msg_flags |= PUCAN_MSG_RTR;
697 /* always ask loopback for echo management */
698 msg_flags |= PUCAN_MSG_LOOPED_BACK;
700 /* set driver specific bit to differentiate with application loopback */
701 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
702 msg_flags |= PUCAN_MSG_SELF_RECEIVE;
704 msg->flags = cpu_to_le16(msg_flags);
705 msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, can_dlc);
706 memcpy(msg->d, cf->data, cf->len);
708 /* struct msg client field is used as an index in the echo skbs ring */
709 msg->client = priv->echo_idx;
711 spin_lock_irqsave(&priv->echo_lock, flags);
713 /* prepare and save echo skb in internal slot */
714 can_put_echo_skb(skb, ndev, priv->echo_idx);
716 /* move echo index to the next slot */
717 priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max;
719 /* if next slot is not free, stop network queue (no slot free in echo
720 * skb ring means that the controller did not write these frames on
721 * the bus: no need to continue).
723 should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]);
725 /* stop network tx queue if not enough room to save one more msg too */
726 if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
727 should_stop_tx_queue |= (room_left <
728 (sizeof(*msg) + CANFD_MAX_DLEN));
729 else
730 should_stop_tx_queue |= (room_left <
731 (sizeof(*msg) + CAN_MAX_DLEN));
733 if (should_stop_tx_queue)
734 netif_stop_queue(ndev);
736 spin_unlock_irqrestore(&priv->echo_lock, flags);
738 /* write the skb on the interface */
739 priv->write_tx_msg(priv, msg);
741 return NETDEV_TX_OK;
744 static const struct net_device_ops peak_canfd_netdev_ops = {
745 .ndo_open = peak_canfd_open,
746 .ndo_stop = peak_canfd_close,
747 .ndo_start_xmit = peak_canfd_start_xmit,
748 .ndo_change_mtu = can_change_mtu,
751 struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
752 int echo_skb_max)
754 struct net_device *ndev;
755 struct peak_canfd_priv *priv;
757 /* we DO support local echo */
758 if (echo_skb_max < 0)
759 echo_skb_max = PCANFD_ECHO_SKB_MAX;
761 /* allocate the candev object */
762 ndev = alloc_candev(sizeof_priv, echo_skb_max);
763 if (!ndev)
764 return NULL;
766 priv = netdev_priv(ndev);
768 /* complete now socket-can initialization side */
769 priv->can.state = CAN_STATE_STOPPED;
770 priv->can.bittiming_const = &peak_canfd_nominal_const;
771 priv->can.data_bittiming_const = &peak_canfd_data_const;
773 priv->can.do_set_mode = peak_canfd_set_mode;
774 priv->can.do_get_berr_counter = peak_canfd_get_berr_counter;
775 priv->can.do_set_bittiming = peak_canfd_set_bittiming;
776 priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming;
777 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
778 CAN_CTRLMODE_LISTENONLY |
779 CAN_CTRLMODE_3_SAMPLES |
780 CAN_CTRLMODE_FD |
781 CAN_CTRLMODE_FD_NON_ISO |
782 CAN_CTRLMODE_BERR_REPORTING;
784 priv->ndev = ndev;
785 priv->index = index;
786 priv->cmd_len = 0;
787 spin_lock_init(&priv->echo_lock);
789 ndev->flags |= IFF_ECHO;
790 ndev->netdev_ops = &peak_canfd_netdev_ops;
791 ndev->dev_id = index;
793 return ndev;