drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / net / openvswitch / vport-netdev.c
blob91a11067e4588c36f6f80442f836411c363e140a
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2007-2012 Nicira, Inc.
4 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/if_arp.h>
9 #include <linux/if_bridge.h>
10 #include <linux/if_vlan.h>
11 #include <linux/kernel.h>
12 #include <linux/llc.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/skbuff.h>
15 #include <linux/openvswitch.h>
16 #include <linux/export.h>
18 #include <net/ip_tunnels.h>
19 #include <net/rtnetlink.h>
21 #include "datapath.h"
22 #include "vport.h"
23 #include "vport-internal_dev.h"
24 #include "vport-netdev.h"
26 static struct vport_ops ovs_netdev_vport_ops;
28 /* Must be called with rcu_read_lock. */
29 static void netdev_port_receive(struct sk_buff *skb)
31 struct vport *vport;
33 vport = ovs_netdev_get_vport(skb->dev);
34 if (unlikely(!vport))
35 goto error;
37 if (unlikely(skb_warn_if_lro(skb)))
38 goto error;
40 /* Make our own copy of the packet. Otherwise we will mangle the
41 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET).
43 skb = skb_share_check(skb, GFP_ATOMIC);
44 if (unlikely(!skb))
45 return;
47 if (skb->dev->type == ARPHRD_ETHER)
48 skb_push_rcsum(skb, ETH_HLEN);
50 ovs_vport_receive(vport, skb, skb_tunnel_info(skb));
51 return;
52 error:
53 kfree_skb(skb);
56 /* Called with rcu_read_lock and bottom-halves disabled. */
57 static rx_handler_result_t netdev_frame_hook(struct sk_buff **pskb)
59 struct sk_buff *skb = *pskb;
61 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
62 return RX_HANDLER_PASS;
64 netdev_port_receive(skb);
65 return RX_HANDLER_CONSUMED;
68 static struct net_device *get_dpdev(const struct datapath *dp)
70 struct vport *local;
72 local = ovs_vport_ovsl(dp, OVSP_LOCAL);
73 return local->dev;
76 struct vport *ovs_netdev_link(struct vport *vport, const char *name)
78 int err;
80 vport->dev = dev_get_by_name(ovs_dp_get_net(vport->dp), name);
81 if (!vport->dev) {
82 err = -ENODEV;
83 goto error_free_vport;
85 /* Ensure that the device exists and that the provided
86 * name is not one of its aliases.
88 if (strcmp(name, ovs_vport_name(vport))) {
89 err = -ENODEV;
90 goto error_put;
92 netdev_tracker_alloc(vport->dev, &vport->dev_tracker, GFP_KERNEL);
93 if (vport->dev->flags & IFF_LOOPBACK ||
94 (vport->dev->type != ARPHRD_ETHER &&
95 vport->dev->type != ARPHRD_NONE) ||
96 ovs_is_internal_dev(vport->dev)) {
97 err = -EINVAL;
98 goto error_put;
101 rtnl_lock();
102 err = netdev_master_upper_dev_link(vport->dev,
103 get_dpdev(vport->dp),
104 NULL, NULL, NULL);
105 if (err)
106 goto error_unlock;
108 err = netdev_rx_handler_register(vport->dev, netdev_frame_hook,
109 vport);
110 if (err)
111 goto error_master_upper_dev_unlink;
113 dev_disable_lro(vport->dev);
114 dev_set_promiscuity(vport->dev, 1);
115 vport->dev->priv_flags |= IFF_OVS_DATAPATH;
116 rtnl_unlock();
118 return vport;
120 error_master_upper_dev_unlink:
121 netdev_upper_dev_unlink(vport->dev, get_dpdev(vport->dp));
122 error_unlock:
123 rtnl_unlock();
124 error_put:
125 netdev_put(vport->dev, &vport->dev_tracker);
126 error_free_vport:
127 ovs_vport_free(vport);
128 return ERR_PTR(err);
130 EXPORT_SYMBOL_GPL(ovs_netdev_link);
132 static struct vport *netdev_create(const struct vport_parms *parms)
134 struct vport *vport;
136 vport = ovs_vport_alloc(0, &ovs_netdev_vport_ops, parms);
137 if (IS_ERR(vport))
138 return vport;
140 return ovs_netdev_link(vport, parms->name);
143 static void vport_netdev_free(struct rcu_head *rcu)
145 struct vport *vport = container_of(rcu, struct vport, rcu);
147 netdev_put(vport->dev, &vport->dev_tracker);
148 ovs_vport_free(vport);
151 void ovs_netdev_detach_dev(struct vport *vport)
153 ASSERT_RTNL();
154 vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
155 netdev_rx_handler_unregister(vport->dev);
156 netdev_upper_dev_unlink(vport->dev,
157 netdev_master_upper_dev_get(vport->dev));
158 dev_set_promiscuity(vport->dev, -1);
161 static void netdev_destroy(struct vport *vport)
163 rtnl_lock();
164 if (netif_is_ovs_port(vport->dev))
165 ovs_netdev_detach_dev(vport);
166 rtnl_unlock();
168 call_rcu(&vport->rcu, vport_netdev_free);
171 void ovs_netdev_tunnel_destroy(struct vport *vport)
173 rtnl_lock();
174 if (netif_is_ovs_port(vport->dev))
175 ovs_netdev_detach_dev(vport);
177 /* We can be invoked by both explicit vport deletion and
178 * underlying netdev deregistration; delete the link only
179 * if it's not already shutting down.
181 if (vport->dev->reg_state == NETREG_REGISTERED)
182 rtnl_delete_link(vport->dev, 0, NULL);
183 netdev_put(vport->dev, &vport->dev_tracker);
184 vport->dev = NULL;
185 rtnl_unlock();
187 call_rcu(&vport->rcu, vport_netdev_free);
189 EXPORT_SYMBOL_GPL(ovs_netdev_tunnel_destroy);
191 /* Returns null if this device is not attached to a datapath. */
192 struct vport *ovs_netdev_get_vport(struct net_device *dev)
194 if (likely(netif_is_ovs_port(dev)))
195 return (struct vport *)
196 rcu_dereference_rtnl(dev->rx_handler_data);
197 else
198 return NULL;
201 static struct vport_ops ovs_netdev_vport_ops = {
202 .type = OVS_VPORT_TYPE_NETDEV,
203 .create = netdev_create,
204 .destroy = netdev_destroy,
205 .send = dev_queue_xmit,
208 int __init ovs_netdev_init(void)
210 return ovs_vport_ops_register(&ovs_netdev_vport_ops);
213 void ovs_netdev_exit(void)
215 ovs_vport_ops_unregister(&ovs_netdev_vport_ops);