1 /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
13 * RMNET Data virtual network driver
17 #include <linux/etherdevice.h>
18 #include <linux/if_arp.h>
19 #include <net/pkt_sched.h>
20 #include "rmnet_config.h"
21 #include "rmnet_handlers.h"
22 #include "rmnet_private.h"
23 #include "rmnet_map.h"
24 #include "rmnet_vnd.h"
28 void rmnet_vnd_rx_fixup(struct sk_buff
*skb
, struct net_device
*dev
)
30 struct rmnet_priv
*priv
= netdev_priv(dev
);
31 struct rmnet_pcpu_stats
*pcpu_ptr
;
33 pcpu_ptr
= this_cpu_ptr(priv
->pcpu_stats
);
35 u64_stats_update_begin(&pcpu_ptr
->syncp
);
36 pcpu_ptr
->stats
.rx_pkts
++;
37 pcpu_ptr
->stats
.rx_bytes
+= skb
->len
;
38 u64_stats_update_end(&pcpu_ptr
->syncp
);
41 void rmnet_vnd_tx_fixup(struct sk_buff
*skb
, struct net_device
*dev
)
43 struct rmnet_priv
*priv
= netdev_priv(dev
);
44 struct rmnet_pcpu_stats
*pcpu_ptr
;
46 pcpu_ptr
= this_cpu_ptr(priv
->pcpu_stats
);
48 u64_stats_update_begin(&pcpu_ptr
->syncp
);
49 pcpu_ptr
->stats
.tx_pkts
++;
50 pcpu_ptr
->stats
.tx_bytes
+= skb
->len
;
51 u64_stats_update_end(&pcpu_ptr
->syncp
);
54 /* Network Device Operations */
56 static netdev_tx_t
rmnet_vnd_start_xmit(struct sk_buff
*skb
,
57 struct net_device
*dev
)
59 struct rmnet_priv
*priv
;
61 priv
= netdev_priv(dev
);
63 rmnet_egress_handler(skb
);
65 this_cpu_inc(priv
->pcpu_stats
->stats
.tx_drops
);
71 static int rmnet_vnd_change_mtu(struct net_device
*rmnet_dev
, int new_mtu
)
73 if (new_mtu
< 0 || new_mtu
> RMNET_MAX_PACKET_SIZE
)
76 rmnet_dev
->mtu
= new_mtu
;
80 static int rmnet_vnd_get_iflink(const struct net_device
*dev
)
82 struct rmnet_priv
*priv
= netdev_priv(dev
);
84 return priv
->real_dev
->ifindex
;
87 static int rmnet_vnd_init(struct net_device
*dev
)
89 struct rmnet_priv
*priv
= netdev_priv(dev
);
92 priv
->pcpu_stats
= alloc_percpu(struct rmnet_pcpu_stats
);
93 if (!priv
->pcpu_stats
)
96 err
= gro_cells_init(&priv
->gro_cells
, dev
);
98 free_percpu(priv
->pcpu_stats
);
105 static void rmnet_vnd_uninit(struct net_device
*dev
)
107 struct rmnet_priv
*priv
= netdev_priv(dev
);
109 gro_cells_destroy(&priv
->gro_cells
);
110 free_percpu(priv
->pcpu_stats
);
113 static void rmnet_get_stats64(struct net_device
*dev
,
114 struct rtnl_link_stats64
*s
)
116 struct rmnet_priv
*priv
= netdev_priv(dev
);
117 struct rmnet_vnd_stats total_stats
;
118 struct rmnet_pcpu_stats
*pcpu_ptr
;
119 unsigned int cpu
, start
;
121 memset(&total_stats
, 0, sizeof(struct rmnet_vnd_stats
));
123 for_each_possible_cpu(cpu
) {
124 pcpu_ptr
= this_cpu_ptr(priv
->pcpu_stats
);
127 start
= u64_stats_fetch_begin_irq(&pcpu_ptr
->syncp
);
128 total_stats
.rx_pkts
+= pcpu_ptr
->stats
.rx_pkts
;
129 total_stats
.rx_bytes
+= pcpu_ptr
->stats
.rx_bytes
;
130 total_stats
.tx_pkts
+= pcpu_ptr
->stats
.tx_pkts
;
131 total_stats
.tx_bytes
+= pcpu_ptr
->stats
.tx_bytes
;
132 } while (u64_stats_fetch_retry_irq(&pcpu_ptr
->syncp
, start
));
134 total_stats
.tx_drops
+= pcpu_ptr
->stats
.tx_drops
;
137 s
->rx_packets
= total_stats
.rx_pkts
;
138 s
->rx_bytes
= total_stats
.rx_bytes
;
139 s
->tx_packets
= total_stats
.tx_pkts
;
140 s
->tx_bytes
= total_stats
.tx_bytes
;
141 s
->tx_dropped
= total_stats
.tx_drops
;
144 static const struct net_device_ops rmnet_vnd_ops
= {
145 .ndo_start_xmit
= rmnet_vnd_start_xmit
,
146 .ndo_change_mtu
= rmnet_vnd_change_mtu
,
147 .ndo_get_iflink
= rmnet_vnd_get_iflink
,
148 .ndo_add_slave
= rmnet_add_bridge
,
149 .ndo_del_slave
= rmnet_del_bridge
,
150 .ndo_init
= rmnet_vnd_init
,
151 .ndo_uninit
= rmnet_vnd_uninit
,
152 .ndo_get_stats64
= rmnet_get_stats64
,
155 /* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
156 * flags, ARP type, needed headroom, etc...
158 void rmnet_vnd_setup(struct net_device
*rmnet_dev
)
160 rmnet_dev
->netdev_ops
= &rmnet_vnd_ops
;
161 rmnet_dev
->mtu
= RMNET_DFLT_PACKET_SIZE
;
162 rmnet_dev
->needed_headroom
= RMNET_NEEDED_HEADROOM
;
163 random_ether_addr(rmnet_dev
->dev_addr
);
164 rmnet_dev
->tx_queue_len
= RMNET_TX_QUEUE_LEN
;
167 rmnet_dev
->header_ops
= NULL
; /* No header */
168 rmnet_dev
->type
= ARPHRD_RAWIP
;
169 rmnet_dev
->hard_header_len
= 0;
170 rmnet_dev
->flags
&= ~(IFF_BROADCAST
| IFF_MULTICAST
);
172 rmnet_dev
->needs_free_netdev
= true;
177 int rmnet_vnd_newlink(u8 id
, struct net_device
*rmnet_dev
,
178 struct rmnet_port
*port
,
179 struct net_device
*real_dev
,
180 struct rmnet_endpoint
*ep
)
182 struct rmnet_priv
*priv
;
188 if (rmnet_get_endpoint(port
, id
))
191 rmnet_dev
->hw_features
= NETIF_F_RXCSUM
;
192 rmnet_dev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
193 rmnet_dev
->hw_features
|= NETIF_F_SG
;
195 rc
= register_netdevice(rmnet_dev
);
197 ep
->egress_dev
= rmnet_dev
;
199 port
->nr_rmnet_devs
++;
201 rmnet_dev
->rtnl_link_ops
= &rmnet_link_ops
;
203 priv
= netdev_priv(rmnet_dev
);
205 priv
->real_dev
= real_dev
;
207 netdev_dbg(rmnet_dev
, "rmnet dev created\n");
213 int rmnet_vnd_dellink(u8 id
, struct rmnet_port
*port
,
214 struct rmnet_endpoint
*ep
)
216 if (id
>= RMNET_MAX_LOGICAL_EP
|| !ep
->egress_dev
)
219 ep
->egress_dev
= NULL
;
220 port
->nr_rmnet_devs
--;
224 u8
rmnet_vnd_get_mux(struct net_device
*rmnet_dev
)
226 struct rmnet_priv
*priv
;
228 priv
= netdev_priv(rmnet_dev
);
232 int rmnet_vnd_do_flow_control(struct net_device
*rmnet_dev
, int enable
)
234 netdev_dbg(rmnet_dev
, "Setting VND TX queue state to %d\n", enable
);
235 /* Although we expect similar number of enable/disable
236 * commands, optimize for the disable. That is more
237 * latency sensitive than enable
239 if (unlikely(enable
))
240 netif_wake_queue(rmnet_dev
);
242 netif_stop_queue(rmnet_dev
);