cpufreq/amd-pstate: Stop caching EPP
[pf-kernel.git] / drivers / net / can / peak_canfd / peak_canfd.c
blob28f3fd805273060bdeedd1671cf97201f3146f84
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2007, 2011 Wolfgang Grandegger <wg@grandegger.com>
3 * Copyright (C) 2012 Stephane Grosjean <s.grosjean@peak-system.com>
5 * Copyright (C) 2016 PEAK System-Technik GmbH
6 */
8 #include <linux/can.h>
9 #include <linux/can/dev.h>
10 #include <linux/ethtool.h>
12 #include "peak_canfd_user.h"
14 /* internal IP core cache size (used as default echo skbs max number) */
15 #define PCANFD_ECHO_SKB_MAX 24
17 /* bittiming ranges of the PEAK-System PC CAN-FD interfaces */
18 static const struct can_bittiming_const peak_canfd_nominal_const = {
19 .name = "peak_canfd",
20 .tseg1_min = 1,
21 .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
22 .tseg2_min = 1,
23 .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
24 .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
25 .brp_min = 1,
26 .brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
27 .brp_inc = 1,
30 static const struct can_bittiming_const peak_canfd_data_const = {
31 .name = "peak_canfd",
32 .tseg1_min = 1,
33 .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS),
34 .tseg2_min = 1,
35 .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS),
36 .sjw_max = (1 << PUCAN_TFAST_SJW_BITS),
37 .brp_min = 1,
38 .brp_max = (1 << PUCAN_TFAST_BRP_BITS),
39 .brp_inc = 1,
42 static struct peak_canfd_priv *pucan_init_cmd(struct peak_canfd_priv *priv)
44 priv->cmd_len = 0;
45 return priv;
48 static void *pucan_add_cmd(struct peak_canfd_priv *priv, int cmd_op)
50 struct pucan_command *cmd;
52 if (priv->cmd_len + sizeof(*cmd) > priv->cmd_maxlen)
53 return NULL;
55 cmd = priv->cmd_buffer + priv->cmd_len;
57 /* reset all unused bit to default */
58 memset(cmd, 0, sizeof(*cmd));
60 cmd->opcode_channel = pucan_cmd_opcode_channel(priv->index, cmd_op);
61 priv->cmd_len += sizeof(*cmd);
63 return cmd;
66 static int pucan_write_cmd(struct peak_canfd_priv *priv)
68 int err;
70 if (priv->pre_cmd) {
71 err = priv->pre_cmd(priv);
72 if (err)
73 return err;
76 err = priv->write_cmd(priv);
77 if (err)
78 return err;
80 if (priv->post_cmd)
81 err = priv->post_cmd(priv);
83 return err;
86 /* uCAN commands interface functions */
87 static int pucan_set_reset_mode(struct peak_canfd_priv *priv)
89 pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RESET_MODE);
90 return pucan_write_cmd(priv);
93 static int pucan_set_normal_mode(struct peak_canfd_priv *priv)
95 int err;
97 pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_NORMAL_MODE);
98 err = pucan_write_cmd(priv);
99 if (!err)
100 priv->can.state = CAN_STATE_ERROR_ACTIVE;
102 return err;
105 static int pucan_set_listen_only_mode(struct peak_canfd_priv *priv)
107 int err;
109 pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_LISTEN_ONLY_MODE);
110 err = pucan_write_cmd(priv);
111 if (!err)
112 priv->can.state = CAN_STATE_ERROR_ACTIVE;
114 return err;
117 static int pucan_set_timing_slow(struct peak_canfd_priv *priv,
118 const struct can_bittiming *pbt)
120 struct pucan_timing_slow *cmd;
122 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW);
124 cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1,
125 priv->can.ctrlmode &
126 CAN_CTRLMODE_3_SAMPLES);
127 cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
128 cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1);
129 cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1));
131 cmd->ewl = 96; /* default */
133 netdev_dbg(priv->ndev,
134 "nominal: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
135 le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw_t);
137 return pucan_write_cmd(priv);
140 static int pucan_set_timing_fast(struct peak_canfd_priv *priv,
141 const struct can_bittiming *pbt)
143 struct pucan_timing_fast *cmd;
145 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_FAST);
147 cmd->sjw = PUCAN_TFAST_SJW(pbt->sjw - 1);
148 cmd->tseg1 = PUCAN_TFAST_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1);
149 cmd->tseg2 = PUCAN_TFAST_TSEG2(pbt->phase_seg2 - 1);
150 cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(pbt->brp - 1));
152 netdev_dbg(priv->ndev,
153 "data: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
154 le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw);
156 return pucan_write_cmd(priv);
159 static int pucan_set_std_filter(struct peak_canfd_priv *priv, u8 row, u32 mask)
161 struct pucan_std_filter *cmd;
163 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_STD_FILTER);
165 /* all the 11-bits CAN ID values are represented by one bit in a
166 * 64 rows array of 32 bits: the upper 6 bits of the CAN ID select the
167 * row while the lowest 5 bits select the bit in that row.
169 * bit filter
170 * 1 passed
171 * 0 discarded
174 /* select the row */
175 cmd->idx = row;
177 /* set/unset bits in the row */
178 cmd->mask = cpu_to_le32(mask);
180 return pucan_write_cmd(priv);
183 static int pucan_tx_abort(struct peak_canfd_priv *priv, u16 flags)
185 struct pucan_tx_abort *cmd;
187 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TX_ABORT);
189 cmd->flags = cpu_to_le16(flags);
191 return pucan_write_cmd(priv);
194 static int pucan_clr_err_counters(struct peak_canfd_priv *priv)
196 struct pucan_wr_err_cnt *cmd;
198 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_WR_ERR_CNT);
200 cmd->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE | PUCAN_WRERRCNT_RE);
201 cmd->tx_counter = 0;
202 cmd->rx_counter = 0;
204 return pucan_write_cmd(priv);
207 static int pucan_set_options(struct peak_canfd_priv *priv, u16 opt_mask)
209 struct pucan_options *cmd;
211 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_EN_OPTION);
213 cmd->options = cpu_to_le16(opt_mask);
215 return pucan_write_cmd(priv);
218 static int pucan_clr_options(struct peak_canfd_priv *priv, u16 opt_mask)
220 struct pucan_options *cmd;
222 cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_CLR_DIS_OPTION);
224 cmd->options = cpu_to_le16(opt_mask);
226 return pucan_write_cmd(priv);
229 static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv)
231 pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RX_BARRIER);
233 return pucan_write_cmd(priv);
236 static int pucan_netif_rx(struct sk_buff *skb, __le32 ts_low, __le32 ts_high)
238 struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
239 u64 ts_us;
241 ts_us = (u64)le32_to_cpu(ts_high) << 32;
242 ts_us |= le32_to_cpu(ts_low);
244 /* IP core timestamps are µs. */
245 hwts->hwtstamp = ns_to_ktime(ts_us * NSEC_PER_USEC);
247 return netif_rx(skb);
250 /* handle the reception of one CAN frame */
251 static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
252 struct pucan_rx_msg *msg)
254 struct net_device_stats *stats = &priv->ndev->stats;
255 struct canfd_frame *cf;
256 struct sk_buff *skb;
257 const u16 rx_msg_flags = le16_to_cpu(msg->flags);
258 u8 cf_len;
260 if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN)
261 cf_len = can_fd_dlc2len(pucan_msg_get_dlc(msg));
262 else
263 cf_len = can_cc_dlc2len(pucan_msg_get_dlc(msg));
265 /* if this frame is an echo, */
266 if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) {
267 unsigned long flags;
269 spin_lock_irqsave(&priv->echo_lock, flags);
271 /* count bytes of the echo instead of skb */
272 stats->tx_bytes += can_get_echo_skb(priv->ndev, msg->client, NULL);
273 stats->tx_packets++;
275 /* restart tx queue (a slot is free) */
276 netif_wake_queue(priv->ndev);
278 spin_unlock_irqrestore(&priv->echo_lock, flags);
280 /* if this frame is only an echo, stop here. Otherwise,
281 * continue to push this application self-received frame into
282 * its own rx queue.
284 if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE))
285 return 0;
288 /* otherwise, it should be pushed into rx fifo */
289 if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
290 /* CANFD frame case */
291 skb = alloc_canfd_skb(priv->ndev, &cf);
292 if (!skb)
293 return -ENOMEM;
295 if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH)
296 cf->flags |= CANFD_BRS;
298 if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND)
299 cf->flags |= CANFD_ESI;
300 } else {
301 /* CAN 2.0 frame case */
302 skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf);
303 if (!skb)
304 return -ENOMEM;
307 cf->can_id = le32_to_cpu(msg->can_id);
308 cf->len = cf_len;
310 if (rx_msg_flags & PUCAN_MSG_EXT_ID)
311 cf->can_id |= CAN_EFF_FLAG;
313 if (rx_msg_flags & PUCAN_MSG_RTR) {
314 cf->can_id |= CAN_RTR_FLAG;
315 } else {
316 memcpy(cf->data, msg->d, cf->len);
318 stats->rx_bytes += cf->len;
320 stats->rx_packets++;
322 pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
324 return 0;
327 /* handle rx/tx error counters notification */
328 static int pucan_handle_error(struct peak_canfd_priv *priv,
329 struct pucan_error_msg *msg)
331 priv->bec.txerr = msg->tx_err_cnt;
332 priv->bec.rxerr = msg->rx_err_cnt;
334 return 0;
337 /* handle status notification */
338 static int pucan_handle_status(struct peak_canfd_priv *priv,
339 struct pucan_status_msg *msg)
341 struct net_device *ndev = priv->ndev;
342 struct net_device_stats *stats = &ndev->stats;
343 struct can_frame *cf;
344 struct sk_buff *skb;
346 /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
347 if (pucan_status_is_rx_barrier(msg)) {
348 if (priv->enable_tx_path) {
349 int err = priv->enable_tx_path(priv);
351 if (err)
352 return err;
355 /* wake network queue up (echo_skb array is empty) */
356 netif_wake_queue(ndev);
358 return 0;
361 skb = alloc_can_err_skb(ndev, &cf);
363 /* test state error bits according to their priority */
364 if (pucan_status_is_busoff(msg)) {
365 netdev_dbg(ndev, "Bus-off entry status\n");
366 priv->can.state = CAN_STATE_BUS_OFF;
367 priv->can.can_stats.bus_off++;
368 can_bus_off(ndev);
369 if (skb)
370 cf->can_id |= CAN_ERR_BUSOFF;
372 } else if (pucan_status_is_passive(msg)) {
373 netdev_dbg(ndev, "Error passive status\n");
374 priv->can.state = CAN_STATE_ERROR_PASSIVE;
375 priv->can.can_stats.error_passive++;
376 if (skb) {
377 cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
378 cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
379 CAN_ERR_CRTL_TX_PASSIVE :
380 CAN_ERR_CRTL_RX_PASSIVE;
381 cf->data[6] = priv->bec.txerr;
382 cf->data[7] = priv->bec.rxerr;
385 } else if (pucan_status_is_warning(msg)) {
386 netdev_dbg(ndev, "Error warning status\n");
387 priv->can.state = CAN_STATE_ERROR_WARNING;
388 priv->can.can_stats.error_warning++;
389 if (skb) {
390 cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
391 cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
392 CAN_ERR_CRTL_TX_WARNING :
393 CAN_ERR_CRTL_RX_WARNING;
394 cf->data[6] = priv->bec.txerr;
395 cf->data[7] = priv->bec.rxerr;
398 } else if (priv->can.state != CAN_STATE_ERROR_ACTIVE) {
399 /* back to ERROR_ACTIVE */
400 netdev_dbg(ndev, "Error active status\n");
401 can_change_state(ndev, cf, CAN_STATE_ERROR_ACTIVE,
402 CAN_STATE_ERROR_ACTIVE);
403 } else {
404 dev_kfree_skb(skb);
405 return 0;
408 if (!skb) {
409 stats->rx_dropped++;
410 return -ENOMEM;
413 pucan_netif_rx(skb, msg->ts_low, msg->ts_high);
415 return 0;
418 /* handle uCAN Rx overflow notification */
419 static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
421 struct net_device_stats *stats = &priv->ndev->stats;
422 struct can_frame *cf;
423 struct sk_buff *skb;
425 stats->rx_over_errors++;
426 stats->rx_errors++;
428 skb = alloc_can_err_skb(priv->ndev, &cf);
429 if (!skb) {
430 stats->rx_dropped++;
431 return -ENOMEM;
434 cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT;
435 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
437 cf->data[6] = priv->bec.txerr;
438 cf->data[7] = priv->bec.rxerr;
440 netif_rx(skb);
442 return 0;
445 /* handle a single uCAN message */
446 int peak_canfd_handle_msg(struct peak_canfd_priv *priv,
447 struct pucan_rx_msg *msg)
449 u16 msg_type = le16_to_cpu(msg->type);
450 int msg_size = le16_to_cpu(msg->size);
451 int err;
453 if (!msg_size || !msg_type) {
454 /* null packet found: end of list */
455 goto exit;
458 switch (msg_type) {
459 case PUCAN_MSG_CAN_RX:
460 err = pucan_handle_can_rx(priv, (struct pucan_rx_msg *)msg);
461 break;
462 case PUCAN_MSG_ERROR:
463 err = pucan_handle_error(priv, (struct pucan_error_msg *)msg);
464 break;
465 case PUCAN_MSG_STATUS:
466 err = pucan_handle_status(priv, (struct pucan_status_msg *)msg);
467 break;
468 case PUCAN_MSG_CACHE_CRITICAL:
469 err = pucan_handle_cache_critical(priv);
470 break;
471 default:
472 err = 0;
475 if (err < 0)
476 return err;
478 exit:
479 return msg_size;
482 /* handle a list of rx_count messages from rx_msg memory address */
483 int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
484 struct pucan_rx_msg *msg_list, int msg_count)
486 void *msg_ptr = msg_list;
487 int i, msg_size = 0;
489 for (i = 0; i < msg_count; i++) {
490 msg_size = peak_canfd_handle_msg(priv, msg_ptr);
492 /* a null packet can be found at the end of a list */
493 if (msg_size <= 0)
494 break;
496 msg_ptr += ALIGN(msg_size, 4);
499 if (msg_size < 0)
500 return msg_size;
502 return i;
505 static int peak_canfd_start(struct peak_canfd_priv *priv)
507 int err;
509 err = pucan_clr_err_counters(priv);
510 if (err)
511 goto err_exit;
513 priv->echo_idx = 0;
515 priv->bec.txerr = 0;
516 priv->bec.rxerr = 0;
518 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
519 err = pucan_set_listen_only_mode(priv);
520 else
521 err = pucan_set_normal_mode(priv);
523 err_exit:
524 return err;
527 static void peak_canfd_stop(struct peak_canfd_priv *priv)
529 int err;
531 /* go back to RESET mode */
532 err = pucan_set_reset_mode(priv);
533 if (err) {
534 netdev_err(priv->ndev, "channel %u reset failed\n",
535 priv->index);
536 } else {
537 /* abort last Tx (MUST be done in RESET mode only!) */
538 pucan_tx_abort(priv, PUCAN_TX_ABORT_FLUSH);
542 static int peak_canfd_set_mode(struct net_device *ndev, enum can_mode mode)
544 struct peak_canfd_priv *priv = netdev_priv(ndev);
546 switch (mode) {
547 case CAN_MODE_START:
548 peak_canfd_start(priv);
549 netif_wake_queue(ndev);
550 break;
551 default:
552 return -EOPNOTSUPP;
555 return 0;
558 static int peak_canfd_get_berr_counter(const struct net_device *ndev,
559 struct can_berr_counter *bec)
561 struct peak_canfd_priv *priv = netdev_priv(ndev);
563 *bec = priv->bec;
564 return 0;
567 static int peak_canfd_open(struct net_device *ndev)
569 struct peak_canfd_priv *priv = netdev_priv(ndev);
570 int i, err = 0;
572 err = open_candev(ndev);
573 if (err) {
574 netdev_err(ndev, "open_candev() failed, error %d\n", err);
575 goto err_exit;
578 err = pucan_set_reset_mode(priv);
579 if (err)
580 goto err_close;
582 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
583 if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
584 err = pucan_clr_options(priv, PUCAN_OPTION_CANDFDISO);
585 else
586 err = pucan_set_options(priv, PUCAN_OPTION_CANDFDISO);
588 if (err)
589 goto err_close;
592 /* set option: get rx/tx error counters */
593 err = pucan_set_options(priv, PUCAN_OPTION_ERROR);
594 if (err)
595 goto err_close;
597 /* accept all standard CAN ID */
598 for (i = 0; i <= PUCAN_FLTSTD_ROW_IDX_MAX; i++)
599 pucan_set_std_filter(priv, i, 0xffffffff);
601 err = peak_canfd_start(priv);
602 if (err)
603 goto err_close;
605 /* receiving the RB status says when Tx path is ready */
606 err = pucan_setup_rx_barrier(priv);
607 if (!err)
608 goto err_exit;
610 err_close:
611 close_candev(ndev);
612 err_exit:
613 return err;
616 static int peak_canfd_set_bittiming(struct net_device *ndev)
618 struct peak_canfd_priv *priv = netdev_priv(ndev);
620 return pucan_set_timing_slow(priv, &priv->can.bittiming);
623 static int peak_canfd_set_data_bittiming(struct net_device *ndev)
625 struct peak_canfd_priv *priv = netdev_priv(ndev);
627 return pucan_set_timing_fast(priv, &priv->can.data_bittiming);
630 static int peak_canfd_close(struct net_device *ndev)
632 struct peak_canfd_priv *priv = netdev_priv(ndev);
634 netif_stop_queue(ndev);
635 peak_canfd_stop(priv);
636 close_candev(ndev);
638 return 0;
641 static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb,
642 struct net_device *ndev)
644 struct peak_canfd_priv *priv = netdev_priv(ndev);
645 struct net_device_stats *stats = &ndev->stats;
646 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
647 struct pucan_tx_msg *msg;
648 u16 msg_size, msg_flags;
649 unsigned long flags;
650 bool should_stop_tx_queue;
651 int room_left;
652 u8 len;
654 if (can_dev_dropped_skb(ndev, skb))
655 return NETDEV_TX_OK;
657 msg_size = ALIGN(sizeof(*msg) + cf->len, 4);
658 msg = priv->alloc_tx_msg(priv, msg_size, &room_left);
660 /* should never happen except under bus-off condition and (auto-)restart
661 * mechanism
663 if (!msg) {
664 stats->tx_dropped++;
665 netif_stop_queue(ndev);
666 return NETDEV_TX_BUSY;
669 msg->size = cpu_to_le16(msg_size);
670 msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
671 msg_flags = 0;
673 if (cf->can_id & CAN_EFF_FLAG) {
674 msg_flags |= PUCAN_MSG_EXT_ID;
675 msg->can_id = cpu_to_le32(cf->can_id & CAN_EFF_MASK);
676 } else {
677 msg->can_id = cpu_to_le32(cf->can_id & CAN_SFF_MASK);
680 if (can_is_canfd_skb(skb)) {
681 /* CAN FD frame format */
682 len = can_fd_len2dlc(cf->len);
684 msg_flags |= PUCAN_MSG_EXT_DATA_LEN;
686 if (cf->flags & CANFD_BRS)
687 msg_flags |= PUCAN_MSG_BITRATE_SWITCH;
689 if (cf->flags & CANFD_ESI)
690 msg_flags |= PUCAN_MSG_ERROR_STATE_IND;
691 } else {
692 /* CAN 2.0 frame format */
693 len = cf->len;
695 if (cf->can_id & CAN_RTR_FLAG)
696 msg_flags |= PUCAN_MSG_RTR;
699 /* always ask loopback for echo management */
700 msg_flags |= PUCAN_MSG_LOOPED_BACK;
702 /* set driver specific bit to differentiate with application loopback */
703 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
704 msg_flags |= PUCAN_MSG_SELF_RECEIVE;
706 msg->flags = cpu_to_le16(msg_flags);
707 msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, len);
708 memcpy(msg->d, cf->data, cf->len);
710 /* struct msg client field is used as an index in the echo skbs ring */
711 msg->client = priv->echo_idx;
713 spin_lock_irqsave(&priv->echo_lock, flags);
715 /* prepare and save echo skb in internal slot */
716 can_put_echo_skb(skb, ndev, priv->echo_idx, 0);
718 /* move echo index to the next slot */
719 priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max;
721 /* if next slot is not free, stop network queue (no slot free in echo
722 * skb ring means that the controller did not write these frames on
723 * the bus: no need to continue).
725 should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]);
727 /* stop network tx queue if not enough room to save one more msg too */
728 if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
729 should_stop_tx_queue |= (room_left <
730 (sizeof(*msg) + CANFD_MAX_DLEN));
731 else
732 should_stop_tx_queue |= (room_left <
733 (sizeof(*msg) + CAN_MAX_DLEN));
735 if (should_stop_tx_queue)
736 netif_stop_queue(ndev);
738 spin_unlock_irqrestore(&priv->echo_lock, flags);
740 /* write the skb on the interface */
741 priv->write_tx_msg(priv, msg);
743 return NETDEV_TX_OK;
746 static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
748 struct hwtstamp_config hwts_cfg = { 0 };
750 switch (cmd) {
751 case SIOCSHWTSTAMP: /* set */
752 if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg)))
753 return -EFAULT;
754 if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF &&
755 hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL)
756 return 0;
757 return -ERANGE;
759 case SIOCGHWTSTAMP: /* get */
760 hwts_cfg.tx_type = HWTSTAMP_TX_OFF;
761 hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL;
762 if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg)))
763 return -EFAULT;
764 return 0;
766 default:
767 return -EOPNOTSUPP;
771 static const struct net_device_ops peak_canfd_netdev_ops = {
772 .ndo_open = peak_canfd_open,
773 .ndo_stop = peak_canfd_close,
774 .ndo_eth_ioctl = peak_eth_ioctl,
775 .ndo_start_xmit = peak_canfd_start_xmit,
776 .ndo_change_mtu = can_change_mtu,
779 static int peak_get_ts_info(struct net_device *dev,
780 struct kernel_ethtool_ts_info *info)
782 info->so_timestamping =
783 SOF_TIMESTAMPING_TX_SOFTWARE |
784 SOF_TIMESTAMPING_RX_HARDWARE |
785 SOF_TIMESTAMPING_RAW_HARDWARE;
786 info->tx_types = BIT(HWTSTAMP_TX_OFF);
787 info->rx_filters = BIT(HWTSTAMP_FILTER_ALL);
789 return 0;
792 static const struct ethtool_ops peak_canfd_ethtool_ops = {
793 .get_ts_info = peak_get_ts_info,
796 struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index,
797 int echo_skb_max)
799 struct net_device *ndev;
800 struct peak_canfd_priv *priv;
802 /* we DO support local echo */
803 if (echo_skb_max < 0)
804 echo_skb_max = PCANFD_ECHO_SKB_MAX;
806 /* allocate the candev object */
807 ndev = alloc_candev(sizeof_priv, echo_skb_max);
808 if (!ndev)
809 return NULL;
811 priv = netdev_priv(ndev);
813 /* complete now socket-can initialization side */
814 priv->can.state = CAN_STATE_STOPPED;
815 priv->can.bittiming_const = &peak_canfd_nominal_const;
816 priv->can.data_bittiming_const = &peak_canfd_data_const;
818 priv->can.do_set_mode = peak_canfd_set_mode;
819 priv->can.do_get_berr_counter = peak_canfd_get_berr_counter;
820 priv->can.do_set_bittiming = peak_canfd_set_bittiming;
821 priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming;
822 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
823 CAN_CTRLMODE_LISTENONLY |
824 CAN_CTRLMODE_3_SAMPLES |
825 CAN_CTRLMODE_FD |
826 CAN_CTRLMODE_FD_NON_ISO |
827 CAN_CTRLMODE_BERR_REPORTING;
829 priv->ndev = ndev;
830 priv->index = index;
831 priv->cmd_len = 0;
832 spin_lock_init(&priv->echo_lock);
834 ndev->flags |= IFF_ECHO;
835 ndev->netdev_ops = &peak_canfd_netdev_ops;
836 ndev->ethtool_ops = &peak_canfd_ethtool_ops;
837 ndev->dev_id = index;
839 return ndev;