1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2021 in-tech smart charging GmbH
4 * driver is based on micrel/ks8851_spi.c
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/ethtool.h>
15 #include <linux/cache.h>
16 #include <linux/debugfs.h>
17 #include <linux/seq_file.h>
19 #include <linux/spi/spi.h>
20 #include <linux/of_net.h>
22 #define MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
25 #define DRV_NAME "mse102x"
27 #define DET_CMD 0x0001
28 #define DET_SOF 0x0002
29 #define DET_DFT 0x55AA
32 #define CMD_RTS (0x1 << CMD_SHIFT)
33 #define CMD_CTR (0x2 << CMD_SHIFT)
35 #define CMD_MASK GENMASK(15, CMD_SHIFT)
36 #define LEN_MASK GENMASK(CMD_SHIFT - 1, 0)
42 #define MIN_FREQ_HZ 6000000
43 #define MAX_FREQ_HZ 7142857
45 struct mse102x_stats
{
56 static const char mse102x_gstrings_stats
[][ETH_GSTRING_LEN
] = {
57 "SPI transfer errors",
61 "Invalid frame length",
68 struct net_device
*ndev
;
73 u32 msg_enable ____cacheline_aligned
;
75 struct sk_buff_head txq
;
76 struct mse102x_stats stats
;
79 struct mse102x_net_spi
{
80 struct mse102x_net mse102x
;
81 struct mutex lock
; /* Protect SPI frame transfer */
82 struct work_struct tx_work
;
83 struct spi_device
*spidev
;
84 struct spi_message spi_msg
;
85 struct spi_transfer spi_xfer
;
87 #ifdef CONFIG_DEBUG_FS
88 struct dentry
*device_root
;
92 #define to_mse102x_spi(mse) container_of((mse), struct mse102x_net_spi, mse102x)
94 #ifdef CONFIG_DEBUG_FS
96 static int mse102x_info_show(struct seq_file
*s
, void *what
)
98 struct mse102x_net_spi
*mses
= s
->private;
100 seq_printf(s
, "TX ring size : %u\n",
101 skb_queue_len(&mses
->mse102x
.txq
));
103 seq_printf(s
, "IRQ : %d\n",
106 seq_printf(s
, "SPI effective speed : %lu\n",
107 (unsigned long)mses
->spi_xfer
.effective_speed_hz
);
108 seq_printf(s
, "SPI mode : %x\n",
113 DEFINE_SHOW_ATTRIBUTE(mse102x_info
);
115 static void mse102x_init_device_debugfs(struct mse102x_net_spi
*mses
)
117 mses
->device_root
= debugfs_create_dir(dev_name(&mses
->mse102x
.ndev
->dev
),
120 debugfs_create_file("info", S_IFREG
| 0444, mses
->device_root
, mses
,
124 static void mse102x_remove_device_debugfs(struct mse102x_net_spi
*mses
)
126 debugfs_remove_recursive(mses
->device_root
);
129 #else /* CONFIG_DEBUG_FS */
131 static void mse102x_init_device_debugfs(struct mse102x_net_spi
*mses
)
135 static void mse102x_remove_device_debugfs(struct mse102x_net_spi
*mses
)
141 /* SPI register read/write calls.
143 * All these calls issue SPI transactions to access the chip's registers. They
144 * all require that the necessary lock is held to prevent accesses when the
145 * chip is busy transferring packet data.
148 static void mse102x_tx_cmd_spi(struct mse102x_net
*mse
, u16 cmd
)
150 struct mse102x_net_spi
*mses
= to_mse102x_spi(mse
);
151 struct spi_transfer
*xfer
= &mses
->spi_xfer
;
152 struct spi_message
*msg
= &mses
->spi_msg
;
156 txb
[0] = cpu_to_be16(DET_CMD
);
157 txb
[1] = cpu_to_be16(cmd
);
161 xfer
->len
= DET_CMD_LEN
;
163 ret
= spi_sync(mses
->spidev
, msg
);
165 netdev_err(mse
->ndev
, "%s: spi_sync() failed: %d\n",
167 mse
->stats
.xfer_err
++;
171 static int mse102x_rx_cmd_spi(struct mse102x_net
*mse
, u8
*rxb
)
173 struct mse102x_net_spi
*mses
= to_mse102x_spi(mse
);
174 struct spi_transfer
*xfer
= &mses
->spi_xfer
;
175 struct spi_message
*msg
= &mses
->spi_msg
;
176 __be16
*txb
= (__be16
*)mse
->txd
;
177 __be16
*cmd
= (__be16
*)mse
->rxd
;
186 xfer
->len
= DET_CMD_LEN
;
188 ret
= spi_sync(mses
->spidev
, msg
);
190 netdev_err(mse
->ndev
, "%s: spi_sync() failed: %d\n",
192 mse
->stats
.xfer_err
++;
193 } else if (*cmd
!= cpu_to_be16(DET_CMD
)) {
194 net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
196 mse
->stats
.invalid_cmd
++;
199 memcpy(rxb
, trx
+ 2, 2);
205 static inline void mse102x_push_header(struct sk_buff
*skb
)
207 __be16
*header
= skb_push(skb
, DET_SOF_LEN
);
209 *header
= cpu_to_be16(DET_SOF
);
212 static inline void mse102x_put_footer(struct sk_buff
*skb
)
214 __be16
*footer
= skb_put(skb
, DET_DFT_LEN
);
216 *footer
= cpu_to_be16(DET_DFT
);
219 static int mse102x_tx_frame_spi(struct mse102x_net
*mse
, struct sk_buff
*txp
,
222 struct mse102x_net_spi
*mses
= to_mse102x_spi(mse
);
223 struct spi_transfer
*xfer
= &mses
->spi_xfer
;
224 struct spi_message
*msg
= &mses
->spi_msg
;
225 struct sk_buff
*tskb
= NULL
;
228 netif_dbg(mse
, tx_queued
, mse
->ndev
, "%s: skb %p, %d@%p\n",
229 __func__
, txp
, txp
->len
, txp
->data
);
231 if ((skb_headroom(txp
) < DET_SOF_LEN
) ||
232 (skb_tailroom(txp
) < DET_DFT_LEN
+ pad
)) {
233 tskb
= skb_copy_expand(txp
, DET_SOF_LEN
, DET_DFT_LEN
+ pad
,
241 mse102x_push_header(txp
);
244 skb_put_zero(txp
, pad
);
246 mse102x_put_footer(txp
);
248 xfer
->tx_buf
= txp
->data
;
250 xfer
->len
= txp
->len
;
252 ret
= spi_sync(mses
->spidev
, msg
);
254 netdev_err(mse
->ndev
, "%s: spi_sync() failed: %d\n",
256 mse
->stats
.xfer_err
++;
264 static int mse102x_rx_frame_spi(struct mse102x_net
*mse
, u8
*buff
,
265 unsigned int frame_len
)
267 struct mse102x_net_spi
*mses
= to_mse102x_spi(mse
);
268 struct spi_transfer
*xfer
= &mses
->spi_xfer
;
269 struct spi_message
*msg
= &mses
->spi_msg
;
270 __be16
*sof
= (__be16
*)buff
;
271 __be16
*dft
= (__be16
*)(buff
+ DET_SOF_LEN
+ frame_len
);
276 xfer
->len
= DET_SOF_LEN
+ frame_len
+ DET_DFT_LEN
;
278 ret
= spi_sync(mses
->spidev
, msg
);
280 netdev_err(mse
->ndev
, "%s: spi_sync() failed: %d\n",
282 mse
->stats
.xfer_err
++;
283 } else if (*sof
!= cpu_to_be16(DET_SOF
)) {
284 netdev_dbg(mse
->ndev
, "%s: SPI start of frame is invalid (0x%04x)\n",
286 mse
->stats
.invalid_sof
++;
288 } else if (*dft
!= cpu_to_be16(DET_DFT
)) {
289 netdev_dbg(mse
->ndev
, "%s: SPI frame tail is invalid (0x%04x)\n",
291 mse
->stats
.invalid_dft
++;
298 static void mse102x_dump_packet(const char *msg
, int len
, const char *data
)
300 printk(KERN_DEBUG
": %s - packet len:%d\n", msg
, len
);
301 print_hex_dump(KERN_DEBUG
, "pk data: ", DUMP_PREFIX_OFFSET
, 16, 1,
305 static void mse102x_rx_pkt_spi(struct mse102x_net
*mse
)
308 unsigned int rxalign
;
315 mse102x_tx_cmd_spi(mse
, CMD_CTR
);
316 ret
= mse102x_rx_cmd_spi(mse
, (u8
*)&rx
);
317 cmd_resp
= be16_to_cpu(rx
);
319 if (ret
|| ((cmd_resp
& CMD_MASK
) != CMD_RTS
)) {
320 usleep_range(50, 100);
322 mse102x_tx_cmd_spi(mse
, CMD_CTR
);
323 ret
= mse102x_rx_cmd_spi(mse
, (u8
*)&rx
);
327 cmd_resp
= be16_to_cpu(rx
);
328 if ((cmd_resp
& CMD_MASK
) != CMD_RTS
) {
329 net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
331 mse
->stats
.invalid_rts
++;
335 net_dbg_ratelimited("%s: Unexpected response to first CMD\n",
339 rxlen
= cmd_resp
& LEN_MASK
;
341 net_dbg_ratelimited("%s: No frame length defined\n", __func__
);
342 mse
->stats
.invalid_len
++;
346 rxalign
= ALIGN(rxlen
+ DET_SOF_LEN
+ DET_DFT_LEN
, 4);
347 skb
= netdev_alloc_skb_ip_align(mse
->ndev
, rxalign
);
351 /* 2 bytes Start of frame (before ethernet header)
352 * 2 bytes Data frame tail (after ethernet frame)
353 * They are copied, but ignored.
355 rxpkt
= skb_put(skb
, rxlen
) - DET_SOF_LEN
;
356 if (mse102x_rx_frame_spi(mse
, rxpkt
, rxlen
)) {
357 mse
->ndev
->stats
.rx_errors
++;
362 if (netif_msg_pktdata(mse
))
363 mse102x_dump_packet(__func__
, skb
->len
, skb
->data
);
365 skb
->protocol
= eth_type_trans(skb
, mse
->ndev
);
368 mse
->ndev
->stats
.rx_packets
++;
369 mse
->ndev
->stats
.rx_bytes
+= rxlen
;
372 static int mse102x_tx_pkt_spi(struct mse102x_net
*mse
, struct sk_buff
*txb
,
373 unsigned long work_timeout
)
375 unsigned int pad
= 0;
381 if (txb
->len
< ETH_ZLEN
)
382 pad
= ETH_ZLEN
- txb
->len
;
385 mse102x_tx_cmd_spi(mse
, CMD_RTS
| (txb
->len
+ pad
));
386 ret
= mse102x_rx_cmd_spi(mse
, (u8
*)&rx
);
387 cmd_resp
= be16_to_cpu(rx
);
390 /* ready to send frame ? */
391 if (cmd_resp
== CMD_CTR
)
394 net_dbg_ratelimited("%s: Unexpected response (0x%04x)\n",
396 mse
->stats
.invalid_ctr
++;
399 /* It's not predictable how long / many retries it takes to
400 * send at least one packet, so TX timeouts are possible.
401 * That's the reason why the netdev watchdog is not used here.
403 if (time_after(jiffies
, work_timeout
))
407 /* throttle at first issue */
408 netif_stop_queue(mse
->ndev
);
410 usleep_range(50, 100);
417 ret
= mse102x_tx_frame_spi(mse
, txb
, pad
);
419 net_dbg_ratelimited("%s: Failed to send (%d), drop frame\n",
425 #define TX_QUEUE_MAX 10
427 static void mse102x_tx_work(struct work_struct
*work
)
429 /* Make sure timeout is sufficient to transfer TX_QUEUE_MAX frames */
430 unsigned long work_timeout
= jiffies
+ msecs_to_jiffies(1000);
431 struct mse102x_net_spi
*mses
;
432 struct mse102x_net
*mse
;
436 mses
= container_of(work
, struct mse102x_net_spi
, tx_work
);
437 mse
= &mses
->mse102x
;
439 while ((txb
= skb_dequeue(&mse
->txq
))) {
440 unsigned int len
= max_t(unsigned int, txb
->len
, ETH_ZLEN
);
442 mutex_lock(&mses
->lock
);
443 ret
= mse102x_tx_pkt_spi(mse
, txb
, work_timeout
);
444 mutex_unlock(&mses
->lock
);
446 mse
->ndev
->stats
.tx_dropped
++;
448 mse
->ndev
->stats
.tx_bytes
+= len
;
449 mse
->ndev
->stats
.tx_packets
++;
455 if (ret
== -ETIMEDOUT
) {
456 if (netif_msg_timer(mse
))
457 netdev_err_once(mse
->ndev
, "tx work timeout\n");
459 mse
->stats
.tx_timeout
++;
462 netif_wake_queue(mse
->ndev
);
465 static netdev_tx_t
mse102x_start_xmit_spi(struct sk_buff
*skb
,
466 struct net_device
*ndev
)
468 struct mse102x_net
*mse
= netdev_priv(ndev
);
469 struct mse102x_net_spi
*mses
= to_mse102x_spi(mse
);
471 netif_dbg(mse
, tx_queued
, ndev
,
472 "%s: skb %p, %d@%p\n", __func__
, skb
, skb
->len
, skb
->data
);
474 skb_queue_tail(&mse
->txq
, skb
);
476 if (skb_queue_len(&mse
->txq
) >= TX_QUEUE_MAX
)
477 netif_stop_queue(ndev
);
479 schedule_work(&mses
->tx_work
);
484 static void mse102x_init_mac(struct mse102x_net
*mse
, struct device_node
*np
)
486 struct net_device
*ndev
= mse
->ndev
;
487 int ret
= of_get_ethdev_address(np
, ndev
);
490 eth_hw_addr_random(ndev
);
491 dev_warn(ndev
->dev
.parent
, "Using random MAC address: %pM\n",
496 /* Assumption: this is called for every incoming packet */
497 static irqreturn_t
mse102x_irq(int irq
, void *_mse
)
499 struct mse102x_net
*mse
= _mse
;
500 struct mse102x_net_spi
*mses
= to_mse102x_spi(mse
);
502 mutex_lock(&mses
->lock
);
503 mse102x_rx_pkt_spi(mse
);
504 mutex_unlock(&mses
->lock
);
509 static int mse102x_net_open(struct net_device
*ndev
)
511 struct mse102x_net
*mse
= netdev_priv(ndev
);
514 ret
= request_threaded_irq(ndev
->irq
, NULL
, mse102x_irq
, IRQF_ONESHOT
,
517 netdev_err(ndev
, "Failed to get irq: %d\n", ret
);
521 netif_dbg(mse
, ifup
, ndev
, "opening\n");
523 netif_start_queue(ndev
);
525 netif_carrier_on(ndev
);
527 netif_dbg(mse
, ifup
, ndev
, "network device up\n");
532 static int mse102x_net_stop(struct net_device
*ndev
)
534 struct mse102x_net
*mse
= netdev_priv(ndev
);
535 struct mse102x_net_spi
*mses
= to_mse102x_spi(mse
);
537 netif_info(mse
, ifdown
, ndev
, "shutting down\n");
539 netif_carrier_off(mse
->ndev
);
541 /* stop any outstanding work */
542 flush_work(&mses
->tx_work
);
544 netif_stop_queue(ndev
);
546 skb_queue_purge(&mse
->txq
);
548 free_irq(ndev
->irq
, mse
);
553 static const struct net_device_ops mse102x_netdev_ops
= {
554 .ndo_open
= mse102x_net_open
,
555 .ndo_stop
= mse102x_net_stop
,
556 .ndo_start_xmit
= mse102x_start_xmit_spi
,
557 .ndo_set_mac_address
= eth_mac_addr
,
558 .ndo_validate_addr
= eth_validate_addr
,
561 /* ethtool support */
563 static void mse102x_get_drvinfo(struct net_device
*ndev
,
564 struct ethtool_drvinfo
*di
)
566 strscpy(di
->driver
, DRV_NAME
, sizeof(di
->driver
));
567 strscpy(di
->bus_info
, dev_name(ndev
->dev
.parent
), sizeof(di
->bus_info
));
570 static u32
mse102x_get_msglevel(struct net_device
*ndev
)
572 struct mse102x_net
*mse
= netdev_priv(ndev
);
574 return mse
->msg_enable
;
577 static void mse102x_set_msglevel(struct net_device
*ndev
, u32 to
)
579 struct mse102x_net
*mse
= netdev_priv(ndev
);
581 mse
->msg_enable
= to
;
584 static void mse102x_get_ethtool_stats(struct net_device
*ndev
,
585 struct ethtool_stats
*estats
, u64
*data
)
587 struct mse102x_net
*mse
= netdev_priv(ndev
);
588 struct mse102x_stats
*st
= &mse
->stats
;
590 memcpy(data
, st
, ARRAY_SIZE(mse102x_gstrings_stats
) * sizeof(u64
));
593 static void mse102x_get_strings(struct net_device
*ndev
, u32 stringset
, u8
*buf
)
597 memcpy(buf
, &mse102x_gstrings_stats
,
598 sizeof(mse102x_gstrings_stats
));
606 static int mse102x_get_sset_count(struct net_device
*ndev
, int sset
)
610 return ARRAY_SIZE(mse102x_gstrings_stats
);
616 static const struct ethtool_ops mse102x_ethtool_ops
= {
617 .get_drvinfo
= mse102x_get_drvinfo
,
618 .get_link
= ethtool_op_get_link
,
619 .get_msglevel
= mse102x_get_msglevel
,
620 .set_msglevel
= mse102x_set_msglevel
,
621 .get_ethtool_stats
= mse102x_get_ethtool_stats
,
622 .get_strings
= mse102x_get_strings
,
623 .get_sset_count
= mse102x_get_sset_count
,
626 /* driver bus management functions */
628 static int mse102x_suspend(struct device
*dev
)
630 struct mse102x_net
*mse
= dev_get_drvdata(dev
);
631 struct net_device
*ndev
= mse
->ndev
;
633 if (netif_running(ndev
)) {
634 netif_device_detach(ndev
);
635 mse102x_net_stop(ndev
);
641 static int mse102x_resume(struct device
*dev
)
643 struct mse102x_net
*mse
= dev_get_drvdata(dev
);
644 struct net_device
*ndev
= mse
->ndev
;
646 if (netif_running(ndev
)) {
647 mse102x_net_open(ndev
);
648 netif_device_attach(ndev
);
654 static DEFINE_SIMPLE_DEV_PM_OPS(mse102x_pm_ops
, mse102x_suspend
, mse102x_resume
);
656 static int mse102x_probe_spi(struct spi_device
*spi
)
658 struct device
*dev
= &spi
->dev
;
659 struct mse102x_net_spi
*mses
;
660 struct net_device
*ndev
;
661 struct mse102x_net
*mse
;
664 spi
->bits_per_word
= 8;
665 spi
->mode
|= SPI_MODE_3
;
666 /* enforce minimum speed to ensure device functionality */
667 spi
->controller
->min_speed_hz
= MIN_FREQ_HZ
;
669 if (!spi
->max_speed_hz
)
670 spi
->max_speed_hz
= MAX_FREQ_HZ
;
672 if (spi
->max_speed_hz
< MIN_FREQ_HZ
||
673 spi
->max_speed_hz
> MAX_FREQ_HZ
) {
674 dev_err(&spi
->dev
, "SPI max frequency out of range (min: %u, max: %u)\n",
675 MIN_FREQ_HZ
, MAX_FREQ_HZ
);
679 ret
= spi_setup(spi
);
681 dev_err(&spi
->dev
, "Unable to setup SPI device: %d\n", ret
);
685 ndev
= devm_alloc_etherdev(dev
, sizeof(struct mse102x_net_spi
));
689 ndev
->needed_tailroom
+= ALIGN(DET_DFT_LEN
, 4);
690 ndev
->needed_headroom
+= ALIGN(DET_SOF_LEN
, 4);
691 ndev
->priv_flags
&= ~IFF_TX_SKB_SHARING
;
692 ndev
->tx_queue_len
= 100;
694 mse
= netdev_priv(ndev
);
695 mses
= to_mse102x_spi(mse
);
698 mutex_init(&mses
->lock
);
699 INIT_WORK(&mses
->tx_work
, mse102x_tx_work
);
701 /* initialise pre-made spi transfer messages */
702 spi_message_init(&mses
->spi_msg
);
703 spi_message_add_tail(&mses
->spi_xfer
, &mses
->spi_msg
);
705 ndev
->irq
= spi
->irq
;
708 /* set the default message enable */
709 mse
->msg_enable
= netif_msg_init(-1, MSG_DEFAULT
);
711 skb_queue_head_init(&mse
->txq
);
713 SET_NETDEV_DEV(ndev
, dev
);
715 dev_set_drvdata(dev
, mse
);
717 netif_carrier_off(mse
->ndev
);
718 ndev
->netdev_ops
= &mse102x_netdev_ops
;
719 ndev
->ethtool_ops
= &mse102x_ethtool_ops
;
721 mse102x_init_mac(mse
, dev
->of_node
);
723 ret
= register_netdev(ndev
);
725 dev_err(dev
, "failed to register network device: %d\n", ret
);
729 mse102x_init_device_debugfs(mses
);
734 static void mse102x_remove_spi(struct spi_device
*spi
)
736 struct mse102x_net
*mse
= dev_get_drvdata(&spi
->dev
);
737 struct mse102x_net_spi
*mses
= to_mse102x_spi(mse
);
739 mse102x_remove_device_debugfs(mses
);
740 unregister_netdev(mse
->ndev
);
743 static const struct of_device_id mse102x_match_table
[] = {
744 { .compatible
= "vertexcom,mse1021" },
745 { .compatible
= "vertexcom,mse1022" },
748 MODULE_DEVICE_TABLE(of
, mse102x_match_table
);
750 static const struct spi_device_id mse102x_ids
[] = {
755 MODULE_DEVICE_TABLE(spi
, mse102x_ids
);
757 static struct spi_driver mse102x_driver
= {
760 .of_match_table
= mse102x_match_table
,
761 .pm
= pm_sleep_ptr(&mse102x_pm_ops
),
763 .probe
= mse102x_probe_spi
,
764 .remove
= mse102x_remove_spi
,
765 .id_table
= mse102x_ids
,
767 module_spi_driver(mse102x_driver
);
769 MODULE_DESCRIPTION("MSE102x Network driver");
770 MODULE_AUTHOR("Stefan Wahren <stefan.wahren@chargebyte.com>");
771 MODULE_LICENSE("GPL");
772 MODULE_ALIAS("spi:" DRV_NAME
);