Merge tag 'trace-printf-v6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/trace...
[drm/drm-misc.git] / drivers / net / can / vxcan.c
blobda7c72105fb6e6cfd6bfe8a335f92befb43d5987
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * vxcan.c - Virtual CAN Tunnel for cross namespace communication
5 * This code is derived from drivers/net/can/vcan.c for the virtual CAN
6 * specific parts and from drivers/net/veth.c to implement the netlink API
7 * for network interface pairs in a common and established way.
9 * Copyright (c) 2017 Oliver Hartkopp <socketcan@hartkopp.net>
12 #include <linux/ethtool.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/netdevice.h>
16 #include <linux/if_arp.h>
17 #include <linux/if_ether.h>
18 #include <linux/can.h>
19 #include <linux/can/dev.h>
20 #include <linux/can/skb.h>
21 #include <linux/can/vxcan.h>
22 #include <linux/can/can-ml.h>
23 #include <linux/slab.h>
24 #include <net/rtnetlink.h>
26 #define DRV_NAME "vxcan"
28 MODULE_DESCRIPTION("Virtual CAN Tunnel");
29 MODULE_LICENSE("GPL");
30 MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
31 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
33 struct vxcan_priv {
34 struct net_device __rcu *peer;
37 static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
39 struct vxcan_priv *priv = netdev_priv(dev);
40 struct net_device *peer;
41 struct net_device_stats *peerstats, *srcstats = &dev->stats;
42 struct sk_buff *skb;
43 unsigned int len;
45 if (can_dropped_invalid_skb(dev, oskb))
46 return NETDEV_TX_OK;
48 rcu_read_lock();
49 peer = rcu_dereference(priv->peer);
50 if (unlikely(!peer)) {
51 kfree_skb(oskb);
52 dev->stats.tx_dropped++;
53 goto out_unlock;
56 skb_tx_timestamp(oskb);
58 skb = skb_clone(oskb, GFP_ATOMIC);
59 if (skb) {
60 consume_skb(oskb);
61 } else {
62 kfree_skb(oskb);
63 goto out_unlock;
66 /* reset CAN GW hop counter */
67 skb->csum_start = 0;
68 skb->pkt_type = PACKET_BROADCAST;
69 skb->dev = peer;
70 skb->ip_summed = CHECKSUM_UNNECESSARY;
72 len = can_skb_get_data_len(skb);
73 if (netif_rx(skb) == NET_RX_SUCCESS) {
74 srcstats->tx_packets++;
75 srcstats->tx_bytes += len;
76 peerstats = &peer->stats;
77 peerstats->rx_packets++;
78 peerstats->rx_bytes += len;
81 out_unlock:
82 rcu_read_unlock();
83 return NETDEV_TX_OK;
87 static int vxcan_open(struct net_device *dev)
89 struct vxcan_priv *priv = netdev_priv(dev);
90 struct net_device *peer = rtnl_dereference(priv->peer);
92 if (!peer)
93 return -ENOTCONN;
95 if (peer->flags & IFF_UP) {
96 netif_carrier_on(dev);
97 netif_carrier_on(peer);
99 return 0;
102 static int vxcan_close(struct net_device *dev)
104 struct vxcan_priv *priv = netdev_priv(dev);
105 struct net_device *peer = rtnl_dereference(priv->peer);
107 netif_carrier_off(dev);
108 if (peer)
109 netif_carrier_off(peer);
111 return 0;
114 static int vxcan_get_iflink(const struct net_device *dev)
116 struct vxcan_priv *priv = netdev_priv(dev);
117 struct net_device *peer;
118 int iflink;
120 rcu_read_lock();
121 peer = rcu_dereference(priv->peer);
122 iflink = peer ? READ_ONCE(peer->ifindex) : 0;
123 rcu_read_unlock();
125 return iflink;
128 static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
130 /* Do not allow changing the MTU while running */
131 if (dev->flags & IFF_UP)
132 return -EBUSY;
134 if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
135 !can_is_canxl_dev_mtu(new_mtu))
136 return -EINVAL;
138 WRITE_ONCE(dev->mtu, new_mtu);
139 return 0;
142 static const struct net_device_ops vxcan_netdev_ops = {
143 .ndo_open = vxcan_open,
144 .ndo_stop = vxcan_close,
145 .ndo_start_xmit = vxcan_xmit,
146 .ndo_get_iflink = vxcan_get_iflink,
147 .ndo_change_mtu = vxcan_change_mtu,
150 static const struct ethtool_ops vxcan_ethtool_ops = {
151 .get_ts_info = ethtool_op_get_ts_info,
154 static void vxcan_setup(struct net_device *dev)
156 struct can_ml_priv *can_ml;
158 dev->type = ARPHRD_CAN;
159 dev->mtu = CANFD_MTU;
160 dev->hard_header_len = 0;
161 dev->addr_len = 0;
162 dev->tx_queue_len = 0;
163 dev->flags = IFF_NOARP;
164 dev->netdev_ops = &vxcan_netdev_ops;
165 dev->ethtool_ops = &vxcan_ethtool_ops;
166 dev->needs_free_netdev = true;
168 can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
169 can_set_ml_priv(dev, can_ml);
172 /* forward declaration for rtnl_create_link() */
173 static struct rtnl_link_ops vxcan_link_ops;
175 static int vxcan_newlink(struct net *net, struct net_device *dev,
176 struct nlattr *tb[], struct nlattr *data[],
177 struct netlink_ext_ack *extack)
179 struct vxcan_priv *priv;
180 struct net_device *peer;
181 struct net *peer_net;
183 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
184 char ifname[IFNAMSIZ];
185 unsigned char name_assign_type;
186 struct ifinfomsg *ifmp = NULL;
187 int err;
189 /* register peer device */
190 if (data && data[VXCAN_INFO_PEER]) {
191 struct nlattr *nla_peer = data[VXCAN_INFO_PEER];
193 ifmp = nla_data(nla_peer);
194 rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
195 tbp = peer_tb;
198 if (ifmp && tbp[IFLA_IFNAME]) {
199 nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
200 name_assign_type = NET_NAME_USER;
201 } else {
202 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
203 name_assign_type = NET_NAME_ENUM;
206 peer_net = rtnl_link_get_net(net, tbp);
207 peer = rtnl_create_link(peer_net, ifname, name_assign_type,
208 &vxcan_link_ops, tbp, extack);
209 if (IS_ERR(peer)) {
210 put_net(peer_net);
211 return PTR_ERR(peer);
214 if (ifmp && dev->ifindex)
215 peer->ifindex = ifmp->ifi_index;
217 err = register_netdevice(peer);
218 put_net(peer_net);
219 peer_net = NULL;
220 if (err < 0) {
221 free_netdev(peer);
222 return err;
225 netif_carrier_off(peer);
227 err = rtnl_configure_link(peer, ifmp, 0, NULL);
228 if (err < 0)
229 goto unregister_network_device;
231 /* register first device */
232 if (tb[IFLA_IFNAME])
233 nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
234 else
235 snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");
237 err = register_netdevice(dev);
238 if (err < 0)
239 goto unregister_network_device;
241 netif_carrier_off(dev);
243 /* cross link the device pair */
244 priv = netdev_priv(dev);
245 rcu_assign_pointer(priv->peer, peer);
247 priv = netdev_priv(peer);
248 rcu_assign_pointer(priv->peer, dev);
250 return 0;
252 unregister_network_device:
253 unregister_netdevice(peer);
254 return err;
257 static void vxcan_dellink(struct net_device *dev, struct list_head *head)
259 struct vxcan_priv *priv;
260 struct net_device *peer;
262 priv = netdev_priv(dev);
263 peer = rtnl_dereference(priv->peer);
265 /* Note : dellink() is called from default_device_exit_batch(),
266 * before a rcu_synchronize() point. The devices are guaranteed
267 * not being freed before one RCU grace period.
269 RCU_INIT_POINTER(priv->peer, NULL);
270 unregister_netdevice_queue(dev, head);
272 if (peer) {
273 priv = netdev_priv(peer);
274 RCU_INIT_POINTER(priv->peer, NULL);
275 unregister_netdevice_queue(peer, head);
279 static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = {
280 [VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
283 static struct net *vxcan_get_link_net(const struct net_device *dev)
285 struct vxcan_priv *priv = netdev_priv(dev);
286 struct net_device *peer = rtnl_dereference(priv->peer);
288 return peer ? dev_net(peer) : dev_net(dev);
291 static struct rtnl_link_ops vxcan_link_ops = {
292 .kind = DRV_NAME,
293 .priv_size = ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv),
294 .setup = vxcan_setup,
295 .newlink = vxcan_newlink,
296 .dellink = vxcan_dellink,
297 .policy = vxcan_policy,
298 .peer_type = VXCAN_INFO_PEER,
299 .maxtype = VXCAN_INFO_MAX,
300 .get_link_net = vxcan_get_link_net,
303 static __init int vxcan_init(void)
305 pr_info("vxcan: Virtual CAN Tunnel driver\n");
307 return rtnl_link_register(&vxcan_link_ops);
310 static __exit void vxcan_exit(void)
312 rtnl_link_unregister(&vxcan_link_ops);
315 module_init(vxcan_init);
316 module_exit(vxcan_exit);