1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* MHI Network driver - Network over MHI bus
4 * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
7 #include <linux/if_arp.h>
9 #include <linux/mod_devicetable.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/skbuff.h>
13 #include <linux/u64_stats_sync.h>
15 #define MHI_NET_MIN_MTU ETH_MIN_MTU
16 #define MHI_NET_MAX_MTU 0xffff
17 #define MHI_NET_DEFAULT_MTU 0x4000
19 struct mhi_net_stats
{
20 u64_stats_t rx_packets
;
22 u64_stats_t rx_errors
;
23 u64_stats_t rx_dropped
;
24 u64_stats_t tx_packets
;
26 u64_stats_t tx_errors
;
27 u64_stats_t tx_dropped
;
29 struct u64_stats_sync tx_syncp
;
30 struct u64_stats_sync rx_syncp
;
34 struct mhi_device
*mdev
;
35 struct net_device
*ndev
;
36 struct delayed_work rx_refill
;
37 struct mhi_net_stats stats
;
41 static int mhi_ndo_open(struct net_device
*ndev
)
43 struct mhi_net_dev
*mhi_netdev
= netdev_priv(ndev
);
45 /* Feed the rx buffer pool */
46 schedule_delayed_work(&mhi_netdev
->rx_refill
, 0);
48 /* Carrier is established via out-of-band channel (e.g. qmi) */
49 netif_carrier_on(ndev
);
51 netif_start_queue(ndev
);
56 static int mhi_ndo_stop(struct net_device
*ndev
)
58 struct mhi_net_dev
*mhi_netdev
= netdev_priv(ndev
);
60 netif_stop_queue(ndev
);
61 netif_carrier_off(ndev
);
62 cancel_delayed_work_sync(&mhi_netdev
->rx_refill
);
67 static int mhi_ndo_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
69 struct mhi_net_dev
*mhi_netdev
= netdev_priv(ndev
);
70 struct mhi_device
*mdev
= mhi_netdev
->mdev
;
73 err
= mhi_queue_skb(mdev
, DMA_TO_DEVICE
, skb
, skb
->len
, MHI_EOT
);
75 net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
78 u64_stats_update_begin(&mhi_netdev
->stats
.tx_syncp
);
79 u64_stats_inc(&mhi_netdev
->stats
.tx_dropped
);
80 u64_stats_update_end(&mhi_netdev
->stats
.tx_syncp
);
83 dev_kfree_skb_any(skb
);
86 if (mhi_queue_is_full(mdev
, DMA_TO_DEVICE
))
87 netif_stop_queue(ndev
);
92 static void mhi_ndo_get_stats64(struct net_device
*ndev
,
93 struct rtnl_link_stats64
*stats
)
95 struct mhi_net_dev
*mhi_netdev
= netdev_priv(ndev
);
99 start
= u64_stats_fetch_begin_irq(&mhi_netdev
->stats
.rx_syncp
);
100 stats
->rx_packets
= u64_stats_read(&mhi_netdev
->stats
.rx_packets
);
101 stats
->rx_bytes
= u64_stats_read(&mhi_netdev
->stats
.rx_bytes
);
102 stats
->rx_errors
= u64_stats_read(&mhi_netdev
->stats
.rx_errors
);
103 stats
->rx_dropped
= u64_stats_read(&mhi_netdev
->stats
.rx_dropped
);
104 } while (u64_stats_fetch_retry_irq(&mhi_netdev
->stats
.rx_syncp
, start
));
107 start
= u64_stats_fetch_begin_irq(&mhi_netdev
->stats
.tx_syncp
);
108 stats
->tx_packets
= u64_stats_read(&mhi_netdev
->stats
.tx_packets
);
109 stats
->tx_bytes
= u64_stats_read(&mhi_netdev
->stats
.tx_bytes
);
110 stats
->tx_errors
= u64_stats_read(&mhi_netdev
->stats
.tx_errors
);
111 stats
->tx_dropped
= u64_stats_read(&mhi_netdev
->stats
.tx_dropped
);
112 } while (u64_stats_fetch_retry_irq(&mhi_netdev
->stats
.tx_syncp
, start
));
115 static const struct net_device_ops mhi_netdev_ops
= {
116 .ndo_open
= mhi_ndo_open
,
117 .ndo_stop
= mhi_ndo_stop
,
118 .ndo_start_xmit
= mhi_ndo_xmit
,
119 .ndo_get_stats64
= mhi_ndo_get_stats64
,
122 static void mhi_net_setup(struct net_device
*ndev
)
124 ndev
->header_ops
= NULL
; /* No header */
125 ndev
->type
= ARPHRD_NONE
; /* QMAP... */
126 ndev
->hard_header_len
= 0;
128 ndev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
;
129 ndev
->netdev_ops
= &mhi_netdev_ops
;
130 ndev
->mtu
= MHI_NET_DEFAULT_MTU
;
131 ndev
->min_mtu
= MHI_NET_MIN_MTU
;
132 ndev
->max_mtu
= MHI_NET_MAX_MTU
;
133 ndev
->tx_queue_len
= 1000;
136 static void mhi_net_dl_callback(struct mhi_device
*mhi_dev
,
137 struct mhi_result
*mhi_res
)
139 struct mhi_net_dev
*mhi_netdev
= dev_get_drvdata(&mhi_dev
->dev
);
140 struct sk_buff
*skb
= mhi_res
->buf_addr
;
143 remaining
= atomic_dec_return(&mhi_netdev
->stats
.rx_queued
);
145 if (unlikely(mhi_res
->transaction_status
)) {
146 dev_kfree_skb_any(skb
);
148 /* MHI layer stopping/resetting the DL channel */
149 if (mhi_res
->transaction_status
== -ENOTCONN
)
152 u64_stats_update_begin(&mhi_netdev
->stats
.rx_syncp
);
153 u64_stats_inc(&mhi_netdev
->stats
.rx_errors
);
154 u64_stats_update_end(&mhi_netdev
->stats
.rx_syncp
);
156 u64_stats_update_begin(&mhi_netdev
->stats
.rx_syncp
);
157 u64_stats_inc(&mhi_netdev
->stats
.rx_packets
);
158 u64_stats_add(&mhi_netdev
->stats
.rx_bytes
, mhi_res
->bytes_xferd
);
159 u64_stats_update_end(&mhi_netdev
->stats
.rx_syncp
);
161 skb
->protocol
= htons(ETH_P_MAP
);
162 skb_put(skb
, mhi_res
->bytes_xferd
);
166 /* Refill if RX buffers queue becomes low */
167 if (remaining
<= mhi_netdev
->rx_queue_sz
/ 2)
168 schedule_delayed_work(&mhi_netdev
->rx_refill
, 0);
171 static void mhi_net_ul_callback(struct mhi_device
*mhi_dev
,
172 struct mhi_result
*mhi_res
)
174 struct mhi_net_dev
*mhi_netdev
= dev_get_drvdata(&mhi_dev
->dev
);
175 struct net_device
*ndev
= mhi_netdev
->ndev
;
176 struct mhi_device
*mdev
= mhi_netdev
->mdev
;
177 struct sk_buff
*skb
= mhi_res
->buf_addr
;
179 /* Hardware has consumed the buffer, so free the skb (which is not
180 * freed by the MHI stack) and perform accounting.
182 dev_consume_skb_any(skb
);
184 u64_stats_update_begin(&mhi_netdev
->stats
.tx_syncp
);
185 if (unlikely(mhi_res
->transaction_status
)) {
187 /* MHI layer stopping/resetting the UL channel */
188 if (mhi_res
->transaction_status
== -ENOTCONN
) {
189 u64_stats_update_end(&mhi_netdev
->stats
.tx_syncp
);
193 u64_stats_inc(&mhi_netdev
->stats
.tx_errors
);
195 u64_stats_inc(&mhi_netdev
->stats
.tx_packets
);
196 u64_stats_add(&mhi_netdev
->stats
.tx_bytes
, mhi_res
->bytes_xferd
);
198 u64_stats_update_end(&mhi_netdev
->stats
.tx_syncp
);
200 if (netif_queue_stopped(ndev
) && !mhi_queue_is_full(mdev
, DMA_TO_DEVICE
))
201 netif_wake_queue(ndev
);
204 static void mhi_net_rx_refill_work(struct work_struct
*work
)
206 struct mhi_net_dev
*mhi_netdev
= container_of(work
, struct mhi_net_dev
,
208 struct net_device
*ndev
= mhi_netdev
->ndev
;
209 struct mhi_device
*mdev
= mhi_netdev
->mdev
;
210 int size
= READ_ONCE(ndev
->mtu
);
214 while (atomic_read(&mhi_netdev
->stats
.rx_queued
) < mhi_netdev
->rx_queue_sz
) {
215 skb
= netdev_alloc_skb(ndev
, size
);
219 err
= mhi_queue_skb(mdev
, DMA_FROM_DEVICE
, skb
, size
, MHI_EOT
);
221 net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
227 atomic_inc(&mhi_netdev
->stats
.rx_queued
);
229 /* Do not hog the CPU if rx buffers are consumed faster than
235 /* If we're still starved of rx buffers, reschedule later */
236 if (unlikely(!atomic_read(&mhi_netdev
->stats
.rx_queued
)))
237 schedule_delayed_work(&mhi_netdev
->rx_refill
, HZ
/ 2);
240 static int mhi_net_probe(struct mhi_device
*mhi_dev
,
241 const struct mhi_device_id
*id
)
243 const char *netname
= (char *)id
->driver_data
;
244 struct device
*dev
= &mhi_dev
->dev
;
245 struct mhi_net_dev
*mhi_netdev
;
246 struct net_device
*ndev
;
249 ndev
= alloc_netdev(sizeof(*mhi_netdev
), netname
, NET_NAME_PREDICTABLE
,
254 mhi_netdev
= netdev_priv(ndev
);
255 dev_set_drvdata(dev
, mhi_netdev
);
256 mhi_netdev
->ndev
= ndev
;
257 mhi_netdev
->mdev
= mhi_dev
;
258 SET_NETDEV_DEV(ndev
, &mhi_dev
->dev
);
260 /* All MHI net channels have 128 ring elements (at least for now) */
261 mhi_netdev
->rx_queue_sz
= 128;
263 INIT_DELAYED_WORK(&mhi_netdev
->rx_refill
, mhi_net_rx_refill_work
);
264 u64_stats_init(&mhi_netdev
->stats
.rx_syncp
);
265 u64_stats_init(&mhi_netdev
->stats
.tx_syncp
);
267 /* Start MHI channels */
268 err
= mhi_prepare_for_transfer(mhi_dev
);
272 err
= register_netdev(ndev
);
283 static void mhi_net_remove(struct mhi_device
*mhi_dev
)
285 struct mhi_net_dev
*mhi_netdev
= dev_get_drvdata(&mhi_dev
->dev
);
287 unregister_netdev(mhi_netdev
->ndev
);
289 mhi_unprepare_from_transfer(mhi_netdev
->mdev
);
291 free_netdev(mhi_netdev
->ndev
);
294 static const struct mhi_device_id mhi_net_id_table
[] = {
295 { .chan
= "IP_HW0", .driver_data
= (kernel_ulong_t
)"mhi_hwip%d" },
296 { .chan
= "IP_SW0", .driver_data
= (kernel_ulong_t
)"mhi_swip%d" },
299 MODULE_DEVICE_TABLE(mhi
, mhi_net_id_table
);
301 static struct mhi_driver mhi_net_driver
= {
302 .probe
= mhi_net_probe
,
303 .remove
= mhi_net_remove
,
304 .dl_xfer_cb
= mhi_net_dl_callback
,
305 .ul_xfer_cb
= mhi_net_ul_callback
,
306 .id_table
= mhi_net_id_table
,
309 .owner
= THIS_MODULE
,
313 module_mhi_driver(mhi_net_driver
);
315 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
316 MODULE_DESCRIPTION("Network over MHI");
317 MODULE_LICENSE("GPL v2");