1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* MHI Network driver - Network over MHI bus
4 * Copyright (C) 2020 Linaro Ltd <loic.poulain@linaro.org>
7 #include <linux/if_arp.h>
9 #include <linux/mod_devicetable.h>
10 #include <linux/module.h>
11 #include <linux/netdevice.h>
12 #include <linux/skbuff.h>
13 #include <linux/u64_stats_sync.h>
15 #define MHI_NET_MIN_MTU ETH_MIN_MTU
16 #define MHI_NET_MAX_MTU 0xffff
17 #define MHI_NET_DEFAULT_MTU 0x4000
19 struct mhi_net_stats
{
20 u64_stats_t rx_packets
;
22 u64_stats_t rx_errors
;
23 u64_stats_t tx_packets
;
25 u64_stats_t tx_errors
;
26 u64_stats_t tx_dropped
;
27 struct u64_stats_sync tx_syncp
;
28 struct u64_stats_sync rx_syncp
;
32 struct mhi_device
*mdev
;
33 struct net_device
*ndev
;
34 struct sk_buff
*skbagg_head
;
35 struct sk_buff
*skbagg_tail
;
36 struct delayed_work rx_refill
;
37 struct mhi_net_stats stats
;
43 struct mhi_device_info
{
47 static int mhi_ndo_open(struct net_device
*ndev
)
49 struct mhi_net_dev
*mhi_netdev
= netdev_priv(ndev
);
51 /* Feed the rx buffer pool */
52 schedule_delayed_work(&mhi_netdev
->rx_refill
, 0);
54 /* Carrier is established via out-of-band channel (e.g. qmi) */
55 netif_carrier_on(ndev
);
57 netif_start_queue(ndev
);
62 static int mhi_ndo_stop(struct net_device
*ndev
)
64 struct mhi_net_dev
*mhi_netdev
= netdev_priv(ndev
);
66 netif_stop_queue(ndev
);
67 netif_carrier_off(ndev
);
68 cancel_delayed_work_sync(&mhi_netdev
->rx_refill
);
73 static netdev_tx_t
mhi_ndo_xmit(struct sk_buff
*skb
, struct net_device
*ndev
)
75 struct mhi_net_dev
*mhi_netdev
= netdev_priv(ndev
);
76 struct mhi_device
*mdev
= mhi_netdev
->mdev
;
79 err
= mhi_queue_skb(mdev
, DMA_TO_DEVICE
, skb
, skb
->len
, MHI_EOT
);
81 net_err_ratelimited("%s: Failed to queue TX buf (%d)\n",
83 dev_kfree_skb_any(skb
);
87 if (mhi_queue_is_full(mdev
, DMA_TO_DEVICE
))
88 netif_stop_queue(ndev
);
93 u64_stats_update_begin(&mhi_netdev
->stats
.tx_syncp
);
94 u64_stats_inc(&mhi_netdev
->stats
.tx_dropped
);
95 u64_stats_update_end(&mhi_netdev
->stats
.tx_syncp
);
100 static void mhi_ndo_get_stats64(struct net_device
*ndev
,
101 struct rtnl_link_stats64
*stats
)
103 struct mhi_net_dev
*mhi_netdev
= netdev_priv(ndev
);
107 start
= u64_stats_fetch_begin(&mhi_netdev
->stats
.rx_syncp
);
108 stats
->rx_packets
= u64_stats_read(&mhi_netdev
->stats
.rx_packets
);
109 stats
->rx_bytes
= u64_stats_read(&mhi_netdev
->stats
.rx_bytes
);
110 stats
->rx_errors
= u64_stats_read(&mhi_netdev
->stats
.rx_errors
);
111 } while (u64_stats_fetch_retry(&mhi_netdev
->stats
.rx_syncp
, start
));
114 start
= u64_stats_fetch_begin(&mhi_netdev
->stats
.tx_syncp
);
115 stats
->tx_packets
= u64_stats_read(&mhi_netdev
->stats
.tx_packets
);
116 stats
->tx_bytes
= u64_stats_read(&mhi_netdev
->stats
.tx_bytes
);
117 stats
->tx_errors
= u64_stats_read(&mhi_netdev
->stats
.tx_errors
);
118 stats
->tx_dropped
= u64_stats_read(&mhi_netdev
->stats
.tx_dropped
);
119 } while (u64_stats_fetch_retry(&mhi_netdev
->stats
.tx_syncp
, start
));
122 static const struct net_device_ops mhi_netdev_ops
= {
123 .ndo_open
= mhi_ndo_open
,
124 .ndo_stop
= mhi_ndo_stop
,
125 .ndo_start_xmit
= mhi_ndo_xmit
,
126 .ndo_get_stats64
= mhi_ndo_get_stats64
,
129 static void mhi_net_setup(struct net_device
*ndev
)
131 ndev
->header_ops
= NULL
; /* No header */
132 ndev
->type
= ARPHRD_RAWIP
;
133 ndev
->hard_header_len
= 0;
135 ndev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
;
136 ndev
->netdev_ops
= &mhi_netdev_ops
;
137 ndev
->mtu
= MHI_NET_DEFAULT_MTU
;
138 ndev
->min_mtu
= MHI_NET_MIN_MTU
;
139 ndev
->max_mtu
= MHI_NET_MAX_MTU
;
140 ndev
->tx_queue_len
= 1000;
143 static struct sk_buff
*mhi_net_skb_agg(struct mhi_net_dev
*mhi_netdev
,
146 struct sk_buff
*head
= mhi_netdev
->skbagg_head
;
147 struct sk_buff
*tail
= mhi_netdev
->skbagg_tail
;
149 /* This is non-paged skb chaining using frag_list */
151 mhi_netdev
->skbagg_head
= skb
;
155 if (!skb_shinfo(head
)->frag_list
)
156 skb_shinfo(head
)->frag_list
= skb
;
160 head
->len
+= skb
->len
;
161 head
->data_len
+= skb
->len
;
162 head
->truesize
+= skb
->truesize
;
164 mhi_netdev
->skbagg_tail
= skb
;
166 return mhi_netdev
->skbagg_head
;
169 static void mhi_net_dl_callback(struct mhi_device
*mhi_dev
,
170 struct mhi_result
*mhi_res
)
172 struct mhi_net_dev
*mhi_netdev
= dev_get_drvdata(&mhi_dev
->dev
);
173 struct sk_buff
*skb
= mhi_res
->buf_addr
;
176 free_desc_count
= mhi_get_free_desc_count(mhi_dev
, DMA_FROM_DEVICE
);
178 if (unlikely(mhi_res
->transaction_status
)) {
179 switch (mhi_res
->transaction_status
) {
181 /* Packet can not fit in one MHI buffer and has been
182 * split over multiple MHI transfers, do re-aggregation.
183 * That usually means the device side MTU is larger than
184 * the host side MTU/MRU. Since this is not optimal,
185 * print a warning (once).
187 netdev_warn_once(mhi_netdev
->ndev
,
188 "Fragmented packets received, fix MTU?\n");
189 skb_put(skb
, mhi_res
->bytes_xferd
);
190 mhi_net_skb_agg(mhi_netdev
, skb
);
193 /* MHI layer stopping/resetting the DL channel */
194 dev_kfree_skb_any(skb
);
197 /* Unknown error, simply drop */
198 dev_kfree_skb_any(skb
);
199 u64_stats_update_begin(&mhi_netdev
->stats
.rx_syncp
);
200 u64_stats_inc(&mhi_netdev
->stats
.rx_errors
);
201 u64_stats_update_end(&mhi_netdev
->stats
.rx_syncp
);
204 skb_put(skb
, mhi_res
->bytes_xferd
);
206 if (mhi_netdev
->skbagg_head
) {
207 /* Aggregate the final fragment */
208 skb
= mhi_net_skb_agg(mhi_netdev
, skb
);
209 mhi_netdev
->skbagg_head
= NULL
;
212 switch (skb
->data
[0] & 0xf0) {
214 skb
->protocol
= htons(ETH_P_IP
);
217 skb
->protocol
= htons(ETH_P_IPV6
);
220 skb
->protocol
= htons(ETH_P_MAP
);
224 u64_stats_update_begin(&mhi_netdev
->stats
.rx_syncp
);
225 u64_stats_inc(&mhi_netdev
->stats
.rx_packets
);
226 u64_stats_add(&mhi_netdev
->stats
.rx_bytes
, skb
->len
);
227 u64_stats_update_end(&mhi_netdev
->stats
.rx_syncp
);
231 /* Refill if RX buffers queue becomes low */
232 if (free_desc_count
>= mhi_netdev
->rx_queue_sz
/ 2)
233 schedule_delayed_work(&mhi_netdev
->rx_refill
, 0);
236 static void mhi_net_ul_callback(struct mhi_device
*mhi_dev
,
237 struct mhi_result
*mhi_res
)
239 struct mhi_net_dev
*mhi_netdev
= dev_get_drvdata(&mhi_dev
->dev
);
240 struct net_device
*ndev
= mhi_netdev
->ndev
;
241 struct mhi_device
*mdev
= mhi_netdev
->mdev
;
242 struct sk_buff
*skb
= mhi_res
->buf_addr
;
244 /* Hardware has consumed the buffer, so free the skb (which is not
245 * freed by the MHI stack) and perform accounting.
247 dev_consume_skb_any(skb
);
249 u64_stats_update_begin(&mhi_netdev
->stats
.tx_syncp
);
250 if (unlikely(mhi_res
->transaction_status
)) {
251 /* MHI layer stopping/resetting the UL channel */
252 if (mhi_res
->transaction_status
== -ENOTCONN
) {
253 u64_stats_update_end(&mhi_netdev
->stats
.tx_syncp
);
257 u64_stats_inc(&mhi_netdev
->stats
.tx_errors
);
259 u64_stats_inc(&mhi_netdev
->stats
.tx_packets
);
260 u64_stats_add(&mhi_netdev
->stats
.tx_bytes
, mhi_res
->bytes_xferd
);
262 u64_stats_update_end(&mhi_netdev
->stats
.tx_syncp
);
264 if (netif_queue_stopped(ndev
) && !mhi_queue_is_full(mdev
, DMA_TO_DEVICE
))
265 netif_wake_queue(ndev
);
268 static void mhi_net_rx_refill_work(struct work_struct
*work
)
270 struct mhi_net_dev
*mhi_netdev
= container_of(work
, struct mhi_net_dev
,
272 struct net_device
*ndev
= mhi_netdev
->ndev
;
273 struct mhi_device
*mdev
= mhi_netdev
->mdev
;
278 size
= mhi_netdev
->mru
? mhi_netdev
->mru
: READ_ONCE(ndev
->mtu
);
280 while (!mhi_queue_is_full(mdev
, DMA_FROM_DEVICE
)) {
281 skb
= netdev_alloc_skb(ndev
, size
);
285 err
= mhi_queue_skb(mdev
, DMA_FROM_DEVICE
, skb
, size
, MHI_EOT
);
287 net_err_ratelimited("%s: Failed to queue RX buf (%d)\n",
293 /* Do not hog the CPU if rx buffers are consumed faster than
299 /* If we're still starved of rx buffers, reschedule later */
300 if (mhi_get_free_desc_count(mdev
, DMA_FROM_DEVICE
) == mhi_netdev
->rx_queue_sz
)
301 schedule_delayed_work(&mhi_netdev
->rx_refill
, HZ
/ 2);
304 static int mhi_net_newlink(struct mhi_device
*mhi_dev
, struct net_device
*ndev
)
306 struct mhi_net_dev
*mhi_netdev
;
309 mhi_netdev
= netdev_priv(ndev
);
311 dev_set_drvdata(&mhi_dev
->dev
, mhi_netdev
);
312 mhi_netdev
->ndev
= ndev
;
313 mhi_netdev
->mdev
= mhi_dev
;
314 mhi_netdev
->skbagg_head
= NULL
;
315 mhi_netdev
->mru
= mhi_dev
->mhi_cntrl
->mru
;
317 INIT_DELAYED_WORK(&mhi_netdev
->rx_refill
, mhi_net_rx_refill_work
);
318 u64_stats_init(&mhi_netdev
->stats
.rx_syncp
);
319 u64_stats_init(&mhi_netdev
->stats
.tx_syncp
);
321 /* Start MHI channels */
322 err
= mhi_prepare_for_transfer(mhi_dev
);
326 /* Number of transfer descriptors determines size of the queue */
327 mhi_netdev
->rx_queue_sz
= mhi_get_free_desc_count(mhi_dev
, DMA_FROM_DEVICE
);
329 err
= register_netdev(ndev
);
336 static void mhi_net_dellink(struct mhi_device
*mhi_dev
, struct net_device
*ndev
)
338 struct mhi_net_dev
*mhi_netdev
= netdev_priv(ndev
);
340 unregister_netdev(ndev
);
342 mhi_unprepare_from_transfer(mhi_dev
);
344 kfree_skb(mhi_netdev
->skbagg_head
);
348 dev_set_drvdata(&mhi_dev
->dev
, NULL
);
351 static int mhi_net_probe(struct mhi_device
*mhi_dev
,
352 const struct mhi_device_id
*id
)
354 const struct mhi_device_info
*info
= (struct mhi_device_info
*)id
->driver_data
;
355 struct net_device
*ndev
;
358 ndev
= alloc_netdev(sizeof(struct mhi_net_dev
), info
->netname
,
359 NET_NAME_PREDICTABLE
, mhi_net_setup
);
363 SET_NETDEV_DEV(ndev
, &mhi_dev
->dev
);
365 err
= mhi_net_newlink(mhi_dev
, ndev
);
374 static void mhi_net_remove(struct mhi_device
*mhi_dev
)
376 struct mhi_net_dev
*mhi_netdev
= dev_get_drvdata(&mhi_dev
->dev
);
378 mhi_net_dellink(mhi_dev
, mhi_netdev
->ndev
);
381 static const struct mhi_device_info mhi_hwip0
= {
382 .netname
= "mhi_hwip%d",
385 static const struct mhi_device_info mhi_swip0
= {
386 .netname
= "mhi_swip%d",
389 static const struct mhi_device_id mhi_net_id_table
[] = {
390 /* Hardware accelerated data PATH (to modem IPA), protocol agnostic */
391 { .chan
= "IP_HW0", .driver_data
= (kernel_ulong_t
)&mhi_hwip0
},
392 /* Software data PATH (to modem CPU) */
393 { .chan
= "IP_SW0", .driver_data
= (kernel_ulong_t
)&mhi_swip0
},
396 MODULE_DEVICE_TABLE(mhi
, mhi_net_id_table
);
398 static struct mhi_driver mhi_net_driver
= {
399 .probe
= mhi_net_probe
,
400 .remove
= mhi_net_remove
,
401 .dl_xfer_cb
= mhi_net_dl_callback
,
402 .ul_xfer_cb
= mhi_net_ul_callback
,
403 .id_table
= mhi_net_id_table
,
409 module_mhi_driver(mhi_net_driver
);
411 MODULE_AUTHOR("Loic Poulain <loic.poulain@linaro.org>");
412 MODULE_DESCRIPTION("Network over MHI");
413 MODULE_LICENSE("GPL v2");