Merge tag 'for-linus-20190706' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / net / can / vxcan.c
blobb2106292230eb5db151d18443478bee9bf3928bd
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vxcan.c - Virtual CAN Tunnel for cross namespace communication
5 * This code is derived from drivers/net/can/vcan.c for the virtual CAN
6 * specific parts and from drivers/net/veth.c to implement the netlink API
7 * for network interface pairs in a common and established way.
9 * Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/netdevice.h>
15 #include <linux/if_arp.h>
16 #include <linux/if_ether.h>
17 #include <linux/can.h>
18 #include <linux/can/dev.h>
19 #include <linux/can/skb.h>
20 #include <linux/can/vxcan.h>
21 #include <linux/slab.h>
22 #include <net/rtnetlink.h>
24 #define DRV_NAME "vxcan"
26 MODULE_DESCRIPTION("Virtual CAN Tunnel");
27 MODULE_LICENSE("GPL");
28 MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
29 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
31 struct vxcan_priv {
32 struct net_device __rcu *peer;
35 static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev)
37 struct vxcan_priv *priv = netdev_priv(dev);
38 struct net_device *peer;
39 struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
40 struct net_device_stats *peerstats, *srcstats = &dev->stats;
42 if (can_dropped_invalid_skb(dev, skb))
43 return NETDEV_TX_OK;
45 rcu_read_lock();
46 peer = rcu_dereference(priv->peer);
47 if (unlikely(!peer)) {
48 kfree_skb(skb);
49 dev->stats.tx_dropped++;
50 goto out_unlock;
53 skb = can_create_echo_skb(skb);
54 if (!skb)
55 goto out_unlock;
57 /* reset CAN GW hop counter */
58 skb->csum_start = 0;
59 skb->pkt_type = PACKET_BROADCAST;
60 skb->dev = peer;
61 skb->ip_summed = CHECKSUM_UNNECESSARY;
63 if (netif_rx_ni(skb) == NET_RX_SUCCESS) {
64 srcstats->tx_packets++;
65 srcstats->tx_bytes += cfd->len;
66 peerstats = &peer->stats;
67 peerstats->rx_packets++;
68 peerstats->rx_bytes += cfd->len;
71 out_unlock:
72 rcu_read_unlock();
73 return NETDEV_TX_OK;
77 static int vxcan_open(struct net_device *dev)
79 struct vxcan_priv *priv = netdev_priv(dev);
80 struct net_device *peer = rtnl_dereference(priv->peer);
82 if (!peer)
83 return -ENOTCONN;
85 if (peer->flags & IFF_UP) {
86 netif_carrier_on(dev);
87 netif_carrier_on(peer);
89 return 0;
92 static int vxcan_close(struct net_device *dev)
94 struct vxcan_priv *priv = netdev_priv(dev);
95 struct net_device *peer = rtnl_dereference(priv->peer);
97 netif_carrier_off(dev);
98 if (peer)
99 netif_carrier_off(peer);
101 return 0;
104 static int vxcan_get_iflink(const struct net_device *dev)
106 struct vxcan_priv *priv = netdev_priv(dev);
107 struct net_device *peer;
108 int iflink;
110 rcu_read_lock();
111 peer = rcu_dereference(priv->peer);
112 iflink = peer ? peer->ifindex : 0;
113 rcu_read_unlock();
115 return iflink;
118 static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
120 /* Do not allow changing the MTU while running */
121 if (dev->flags & IFF_UP)
122 return -EBUSY;
124 if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
125 return -EINVAL;
127 dev->mtu = new_mtu;
128 return 0;
131 static const struct net_device_ops vxcan_netdev_ops = {
132 .ndo_open = vxcan_open,
133 .ndo_stop = vxcan_close,
134 .ndo_start_xmit = vxcan_xmit,
135 .ndo_get_iflink = vxcan_get_iflink,
136 .ndo_change_mtu = vxcan_change_mtu,
139 static void vxcan_setup(struct net_device *dev)
141 dev->type = ARPHRD_CAN;
142 dev->mtu = CANFD_MTU;
143 dev->hard_header_len = 0;
144 dev->addr_len = 0;
145 dev->tx_queue_len = 0;
146 dev->flags = (IFF_NOARP|IFF_ECHO);
147 dev->netdev_ops = &vxcan_netdev_ops;
148 dev->needs_free_netdev = true;
151 /* forward declaration for rtnl_create_link() */
152 static struct rtnl_link_ops vxcan_link_ops;
154 static int vxcan_newlink(struct net *net, struct net_device *dev,
155 struct nlattr *tb[], struct nlattr *data[],
156 struct netlink_ext_ack *extack)
158 struct vxcan_priv *priv;
159 struct net_device *peer;
160 struct net *peer_net;
162 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
163 char ifname[IFNAMSIZ];
164 unsigned char name_assign_type;
165 struct ifinfomsg *ifmp = NULL;
166 int err;
168 /* register peer device */
169 if (data && data[VXCAN_INFO_PEER]) {
170 struct nlattr *nla_peer;
172 nla_peer = data[VXCAN_INFO_PEER];
173 ifmp = nla_data(nla_peer);
174 err = rtnl_nla_parse_ifla(peer_tb,
175 nla_data(nla_peer) +
176 sizeof(struct ifinfomsg),
177 nla_len(nla_peer) -
178 sizeof(struct ifinfomsg),
179 NULL);
180 if (err < 0)
181 return err;
183 tbp = peer_tb;
186 if (ifmp && tbp[IFLA_IFNAME]) {
187 nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
188 name_assign_type = NET_NAME_USER;
189 } else {
190 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
191 name_assign_type = NET_NAME_ENUM;
194 peer_net = rtnl_link_get_net(net, tbp);
195 if (IS_ERR(peer_net))
196 return PTR_ERR(peer_net);
198 peer = rtnl_create_link(peer_net, ifname, name_assign_type,
199 &vxcan_link_ops, tbp, extack);
200 if (IS_ERR(peer)) {
201 put_net(peer_net);
202 return PTR_ERR(peer);
205 if (ifmp && dev->ifindex)
206 peer->ifindex = ifmp->ifi_index;
208 err = register_netdevice(peer);
209 put_net(peer_net);
210 peer_net = NULL;
211 if (err < 0) {
212 free_netdev(peer);
213 return err;
216 netif_carrier_off(peer);
218 err = rtnl_configure_link(peer, ifmp);
219 if (err < 0)
220 goto unregister_network_device;
222 /* register first device */
223 if (tb[IFLA_IFNAME])
224 nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
225 else
226 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
228 err = register_netdevice(dev);
229 if (err < 0)
230 goto unregister_network_device;
232 netif_carrier_off(dev);
234 /* cross link the device pair */
235 priv = netdev_priv(dev);
236 rcu_assign_pointer(priv->peer, peer);
238 priv = netdev_priv(peer);
239 rcu_assign_pointer(priv->peer, dev);
241 return 0;
243 unregister_network_device:
244 unregister_netdevice(peer);
245 return err;
248 static void vxcan_dellink(struct net_device *dev, struct list_head *head)
250 struct vxcan_priv *priv;
251 struct net_device *peer;
253 priv = netdev_priv(dev);
254 peer = rtnl_dereference(priv->peer);
256 /* Note : dellink() is called from default_device_exit_batch(),
257 * before a rcu_synchronize() point. The devices are guaranteed
258 * not being freed before one RCU grace period.
260 RCU_INIT_POINTER(priv->peer, NULL);
261 unregister_netdevice_queue(dev, head);
263 if (peer) {
264 priv = netdev_priv(peer);
265 RCU_INIT_POINTER(priv->peer, NULL);
266 unregister_netdevice_queue(peer, head);
270 static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = {
271 [VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
274 static struct net *vxcan_get_link_net(const struct net_device *dev)
276 struct vxcan_priv *priv = netdev_priv(dev);
277 struct net_device *peer = rtnl_dereference(priv->peer);
279 return peer ? dev_net(peer) : dev_net(dev);
282 static struct rtnl_link_ops vxcan_link_ops = {
283 .kind = DRV_NAME,
284 .priv_size = sizeof(struct vxcan_priv),
285 .setup = vxcan_setup,
286 .newlink = vxcan_newlink,
287 .dellink = vxcan_dellink,
288 .policy = vxcan_policy,
289 .maxtype = VXCAN_INFO_MAX,
290 .get_link_net = vxcan_get_link_net,
293 static __init int vxcan_init(void)
295 pr_info("vxcan: Virtual CAN Tunnel driver\n");
297 return rtnl_link_register(&vxcan_link_ops);
300 static __exit void vxcan_exit(void)
302 rtnl_link_unregister(&vxcan_link_ops);
305 module_init(vxcan_init);
306 module_exit(vxcan_exit);