drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / net / l3mdev / l3mdev.c
blobca10916340b098fb1f7e8c577bcca1eec4f4814d
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/l3mdev/l3mdev.c - L3 master device implementation
4 * Copyright (c) 2015 Cumulus Networks
5 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
6 */
8 #include <linux/netdevice.h>
9 #include <net/fib_rules.h>
10 #include <net/l3mdev.h>
12 static DEFINE_SPINLOCK(l3mdev_lock);
14 struct l3mdev_handler {
15 lookup_by_table_id_t dev_lookup;
18 static struct l3mdev_handler l3mdev_handlers[L3MDEV_TYPE_MAX + 1];
20 static int l3mdev_check_type(enum l3mdev_type l3type)
22 if (l3type <= L3MDEV_TYPE_UNSPEC || l3type > L3MDEV_TYPE_MAX)
23 return -EINVAL;
25 return 0;
28 int l3mdev_table_lookup_register(enum l3mdev_type l3type,
29 lookup_by_table_id_t fn)
31 struct l3mdev_handler *hdlr;
32 int res;
34 res = l3mdev_check_type(l3type);
35 if (res)
36 return res;
38 hdlr = &l3mdev_handlers[l3type];
40 spin_lock(&l3mdev_lock);
42 if (hdlr->dev_lookup) {
43 res = -EBUSY;
44 goto unlock;
47 hdlr->dev_lookup = fn;
48 res = 0;
50 unlock:
51 spin_unlock(&l3mdev_lock);
53 return res;
55 EXPORT_SYMBOL_GPL(l3mdev_table_lookup_register);
57 void l3mdev_table_lookup_unregister(enum l3mdev_type l3type,
58 lookup_by_table_id_t fn)
60 struct l3mdev_handler *hdlr;
62 if (l3mdev_check_type(l3type))
63 return;
65 hdlr = &l3mdev_handlers[l3type];
67 spin_lock(&l3mdev_lock);
69 if (hdlr->dev_lookup == fn)
70 hdlr->dev_lookup = NULL;
72 spin_unlock(&l3mdev_lock);
74 EXPORT_SYMBOL_GPL(l3mdev_table_lookup_unregister);
76 int l3mdev_ifindex_lookup_by_table_id(enum l3mdev_type l3type,
77 struct net *net, u32 table_id)
79 lookup_by_table_id_t lookup;
80 struct l3mdev_handler *hdlr;
81 int ifindex = -EINVAL;
82 int res;
84 res = l3mdev_check_type(l3type);
85 if (res)
86 return res;
88 hdlr = &l3mdev_handlers[l3type];
90 spin_lock(&l3mdev_lock);
92 lookup = hdlr->dev_lookup;
93 if (!lookup)
94 goto unlock;
96 ifindex = lookup(net, table_id);
98 unlock:
99 spin_unlock(&l3mdev_lock);
101 return ifindex;
103 EXPORT_SYMBOL_GPL(l3mdev_ifindex_lookup_by_table_id);
106 * l3mdev_master_ifindex_rcu - get index of L3 master device
107 * @dev: targeted interface
110 int l3mdev_master_ifindex_rcu(const struct net_device *dev)
112 int ifindex = 0;
114 if (!dev)
115 return 0;
117 if (netif_is_l3_master(dev)) {
118 ifindex = dev->ifindex;
119 } else if (netif_is_l3_slave(dev)) {
120 struct net_device *master;
121 struct net_device *_dev = (struct net_device *)dev;
123 /* netdev_master_upper_dev_get_rcu calls
124 * list_first_or_null_rcu to walk the upper dev list.
125 * list_first_or_null_rcu does not handle a const arg. We aren't
126 * making changes, just want the master device from that list so
127 * typecast to remove the const
129 master = netdev_master_upper_dev_get_rcu(_dev);
130 if (master)
131 ifindex = master->ifindex;
134 return ifindex;
136 EXPORT_SYMBOL_GPL(l3mdev_master_ifindex_rcu);
139 * l3mdev_master_upper_ifindex_by_index_rcu - get index of upper l3 master
140 * device
141 * @net: network namespace for device index lookup
142 * @ifindex: targeted interface
144 int l3mdev_master_upper_ifindex_by_index_rcu(struct net *net, int ifindex)
146 struct net_device *dev;
148 dev = dev_get_by_index_rcu(net, ifindex);
149 while (dev && !netif_is_l3_master(dev))
150 dev = netdev_master_upper_dev_get_rcu(dev);
152 return dev ? dev->ifindex : 0;
154 EXPORT_SYMBOL_GPL(l3mdev_master_upper_ifindex_by_index_rcu);
157 * l3mdev_fib_table_rcu - get FIB table id associated with an L3
158 * master interface
159 * @dev: targeted interface
162 u32 l3mdev_fib_table_rcu(const struct net_device *dev)
164 u32 tb_id = 0;
166 if (!dev)
167 return 0;
169 if (netif_is_l3_master(dev)) {
170 if (dev->l3mdev_ops->l3mdev_fib_table)
171 tb_id = dev->l3mdev_ops->l3mdev_fib_table(dev);
172 } else if (netif_is_l3_slave(dev)) {
173 /* Users of netdev_master_upper_dev_get_rcu need non-const,
174 * but current inet_*type functions take a const
176 struct net_device *_dev = (struct net_device *) dev;
177 const struct net_device *master;
179 master = netdev_master_upper_dev_get_rcu(_dev);
180 if (master &&
181 master->l3mdev_ops->l3mdev_fib_table)
182 tb_id = master->l3mdev_ops->l3mdev_fib_table(master);
185 return tb_id;
187 EXPORT_SYMBOL_GPL(l3mdev_fib_table_rcu);
189 u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
191 struct net_device *dev;
192 u32 tb_id = 0;
194 if (!ifindex)
195 return 0;
197 rcu_read_lock();
199 dev = dev_get_by_index_rcu(net, ifindex);
200 if (dev)
201 tb_id = l3mdev_fib_table_rcu(dev);
203 rcu_read_unlock();
205 return tb_id;
207 EXPORT_SYMBOL_GPL(l3mdev_fib_table_by_index);
210 * l3mdev_link_scope_lookup - IPv6 route lookup based on flow for link
211 * local and multicast addresses
212 * @net: network namespace for device index lookup
213 * @fl6: IPv6 flow struct for lookup
214 * This function does not hold refcnt on the returned dst.
215 * Caller must hold rcu_read_lock().
218 struct dst_entry *l3mdev_link_scope_lookup(struct net *net,
219 struct flowi6 *fl6)
221 struct dst_entry *dst = NULL;
222 struct net_device *dev;
224 WARN_ON_ONCE(!rcu_read_lock_held());
225 if (fl6->flowi6_oif) {
226 dev = dev_get_by_index_rcu(net, fl6->flowi6_oif);
227 if (dev && netif_is_l3_slave(dev))
228 dev = netdev_master_upper_dev_get_rcu(dev);
230 if (dev && netif_is_l3_master(dev) &&
231 dev->l3mdev_ops->l3mdev_link_scope_lookup)
232 dst = dev->l3mdev_ops->l3mdev_link_scope_lookup(dev, fl6);
235 return dst;
237 EXPORT_SYMBOL_GPL(l3mdev_link_scope_lookup);
240 * l3mdev_fib_rule_match - Determine if flowi references an
241 * L3 master device
242 * @net: network namespace for device index lookup
243 * @fl: flow struct
244 * @arg: store the table the rule matched with here
247 int l3mdev_fib_rule_match(struct net *net, struct flowi *fl,
248 struct fib_lookup_arg *arg)
250 struct net_device *dev;
251 int rc = 0;
253 /* update flow ensures flowi_l3mdev is set when relevant */
254 if (!fl->flowi_l3mdev)
255 return 0;
257 rcu_read_lock();
259 dev = dev_get_by_index_rcu(net, fl->flowi_l3mdev);
260 if (dev && netif_is_l3_master(dev) &&
261 dev->l3mdev_ops->l3mdev_fib_table) {
262 arg->table = dev->l3mdev_ops->l3mdev_fib_table(dev);
263 rc = 1;
266 rcu_read_unlock();
268 return rc;
271 void l3mdev_update_flow(struct net *net, struct flowi *fl)
273 struct net_device *dev;
275 rcu_read_lock();
277 if (fl->flowi_oif) {
278 dev = dev_get_by_index_rcu(net, fl->flowi_oif);
279 if (dev) {
280 if (!fl->flowi_l3mdev)
281 fl->flowi_l3mdev = l3mdev_master_ifindex_rcu(dev);
283 /* oif set to L3mdev directs lookup to its table;
284 * reset to avoid oif match in fib_lookup
286 if (netif_is_l3_master(dev))
287 fl->flowi_oif = 0;
288 goto out;
292 if (fl->flowi_iif > LOOPBACK_IFINDEX && !fl->flowi_l3mdev) {
293 dev = dev_get_by_index_rcu(net, fl->flowi_iif);
294 if (dev)
295 fl->flowi_l3mdev = l3mdev_master_ifindex_rcu(dev);
298 out:
299 rcu_read_unlock();
301 EXPORT_SYMBOL_GPL(l3mdev_update_flow);