KVM: PPC: Book3S HV: Don't rely on host's page size information
[linux/fpc-iii.git] / net / bridge / br_netlink.c
blob3bc890716c89cb5fdeafcbd3e5062ed9bb6a9882
1 /*
2 * Bridge netlink control interface
4 * Authors:
5 * Stephen Hemminger <shemminger@osdl.org>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/etherdevice.h>
16 #include <net/rtnetlink.h>
17 #include <net/net_namespace.h>
18 #include <net/sock.h>
19 #include <uapi/linux/if_bridge.h>
21 #include "br_private.h"
22 #include "br_private_stp.h"
23 #include "br_private_tunnel.h"
25 static int __get_num_vlan_infos(struct net_bridge_vlan_group *vg,
26 u32 filter_mask)
28 struct net_bridge_vlan *v;
29 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
30 u16 flags, pvid;
31 int num_vlans = 0;
33 if (!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
34 return 0;
36 pvid = br_get_pvid(vg);
37 /* Count number of vlan infos */
38 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
39 flags = 0;
40 /* only a context, bridge vlan not activated */
41 if (!br_vlan_should_use(v))
42 continue;
43 if (v->vid == pvid)
44 flags |= BRIDGE_VLAN_INFO_PVID;
46 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
47 flags |= BRIDGE_VLAN_INFO_UNTAGGED;
49 if (vid_range_start == 0) {
50 goto initvars;
51 } else if ((v->vid - vid_range_end) == 1 &&
52 flags == vid_range_flags) {
53 vid_range_end = v->vid;
54 continue;
55 } else {
56 if ((vid_range_end - vid_range_start) > 0)
57 num_vlans += 2;
58 else
59 num_vlans += 1;
61 initvars:
62 vid_range_start = v->vid;
63 vid_range_end = v->vid;
64 vid_range_flags = flags;
67 if (vid_range_start != 0) {
68 if ((vid_range_end - vid_range_start) > 0)
69 num_vlans += 2;
70 else
71 num_vlans += 1;
74 return num_vlans;
77 static int br_get_num_vlan_infos(struct net_bridge_vlan_group *vg,
78 u32 filter_mask)
80 int num_vlans;
82 if (!vg)
83 return 0;
85 if (filter_mask & RTEXT_FILTER_BRVLAN)
86 return vg->num_vlans;
88 rcu_read_lock();
89 num_vlans = __get_num_vlan_infos(vg, filter_mask);
90 rcu_read_unlock();
92 return num_vlans;
95 static size_t br_get_link_af_size_filtered(const struct net_device *dev,
96 u32 filter_mask)
98 struct net_bridge_vlan_group *vg = NULL;
99 struct net_bridge_port *p = NULL;
100 struct net_bridge *br;
101 int num_vlan_infos;
102 size_t vinfo_sz = 0;
104 rcu_read_lock();
105 if (br_port_exists(dev)) {
106 p = br_port_get_rcu(dev);
107 vg = nbp_vlan_group_rcu(p);
108 } else if (dev->priv_flags & IFF_EBRIDGE) {
109 br = netdev_priv(dev);
110 vg = br_vlan_group_rcu(br);
112 num_vlan_infos = br_get_num_vlan_infos(vg, filter_mask);
113 rcu_read_unlock();
115 if (p && (p->flags & BR_VLAN_TUNNEL))
116 vinfo_sz += br_get_vlan_tunnel_info_size(vg);
118 /* Each VLAN is returned in bridge_vlan_info along with flags */
119 vinfo_sz += num_vlan_infos * nla_total_size(sizeof(struct bridge_vlan_info));
121 return vinfo_sz;
124 static inline size_t br_port_info_size(void)
126 return nla_total_size(1) /* IFLA_BRPORT_STATE */
127 + nla_total_size(2) /* IFLA_BRPORT_PRIORITY */
128 + nla_total_size(4) /* IFLA_BRPORT_COST */
129 + nla_total_size(1) /* IFLA_BRPORT_MODE */
130 + nla_total_size(1) /* IFLA_BRPORT_GUARD */
131 + nla_total_size(1) /* IFLA_BRPORT_PROTECT */
132 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
133 + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
134 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
135 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
136 + nla_total_size(1) /* IFLA_BRPORT_MCAST_FLOOD */
137 + nla_total_size(1) /* IFLA_BRPORT_BCAST_FLOOD */
138 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
139 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
140 + nla_total_size(1) /* IFLA_BRPORT_VLAN_TUNNEL */
141 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_ROOT_ID */
142 + nla_total_size(sizeof(struct ifla_bridge_id)) /* IFLA_BRPORT_BRIDGE_ID */
143 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_PORT */
144 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_DESIGNATED_COST */
145 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_ID */
146 + nla_total_size(sizeof(u16)) /* IFLA_BRPORT_NO */
147 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_TOPOLOGY_CHANGE_ACK */
148 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_CONFIG_PENDING */
149 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_MESSAGE_AGE_TIMER */
150 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_FORWARD_DELAY_TIMER */
151 + nla_total_size_64bit(sizeof(u64)) /* IFLA_BRPORT_HOLD_TIMER */
152 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
153 + nla_total_size(sizeof(u8)) /* IFLA_BRPORT_MULTICAST_ROUTER */
154 #endif
155 + 0;
158 static inline size_t br_nlmsg_size(struct net_device *dev, u32 filter_mask)
160 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
161 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
162 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
163 + nla_total_size(4) /* IFLA_MASTER */
164 + nla_total_size(4) /* IFLA_MTU */
165 + nla_total_size(4) /* IFLA_LINK */
166 + nla_total_size(1) /* IFLA_OPERSTATE */
167 + nla_total_size(br_port_info_size()) /* IFLA_PROTINFO */
168 + nla_total_size(br_get_link_af_size_filtered(dev,
169 filter_mask)); /* IFLA_AF_SPEC */
172 static int br_port_fill_attrs(struct sk_buff *skb,
173 const struct net_bridge_port *p)
175 u8 mode = !!(p->flags & BR_HAIRPIN_MODE);
176 u64 timerval;
178 if (nla_put_u8(skb, IFLA_BRPORT_STATE, p->state) ||
179 nla_put_u16(skb, IFLA_BRPORT_PRIORITY, p->priority) ||
180 nla_put_u32(skb, IFLA_BRPORT_COST, p->path_cost) ||
181 nla_put_u8(skb, IFLA_BRPORT_MODE, mode) ||
182 nla_put_u8(skb, IFLA_BRPORT_GUARD, !!(p->flags & BR_BPDU_GUARD)) ||
183 nla_put_u8(skb, IFLA_BRPORT_PROTECT,
184 !!(p->flags & BR_ROOT_BLOCK)) ||
185 nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
186 !!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
187 nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
188 !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
189 nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
190 nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
191 !!(p->flags & BR_FLOOD)) ||
192 nla_put_u8(skb, IFLA_BRPORT_MCAST_FLOOD,
193 !!(p->flags & BR_MCAST_FLOOD)) ||
194 nla_put_u8(skb, IFLA_BRPORT_BCAST_FLOOD,
195 !!(p->flags & BR_BCAST_FLOOD)) ||
196 nla_put_u8(skb, IFLA_BRPORT_PROXYARP, !!(p->flags & BR_PROXYARP)) ||
197 nla_put_u8(skb, IFLA_BRPORT_PROXYARP_WIFI,
198 !!(p->flags & BR_PROXYARP_WIFI)) ||
199 nla_put(skb, IFLA_BRPORT_ROOT_ID, sizeof(struct ifla_bridge_id),
200 &p->designated_root) ||
201 nla_put(skb, IFLA_BRPORT_BRIDGE_ID, sizeof(struct ifla_bridge_id),
202 &p->designated_bridge) ||
203 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_PORT, p->designated_port) ||
204 nla_put_u16(skb, IFLA_BRPORT_DESIGNATED_COST, p->designated_cost) ||
205 nla_put_u16(skb, IFLA_BRPORT_ID, p->port_id) ||
206 nla_put_u16(skb, IFLA_BRPORT_NO, p->port_no) ||
207 nla_put_u8(skb, IFLA_BRPORT_TOPOLOGY_CHANGE_ACK,
208 p->topology_change_ack) ||
209 nla_put_u8(skb, IFLA_BRPORT_CONFIG_PENDING, p->config_pending) ||
210 nla_put_u8(skb, IFLA_BRPORT_VLAN_TUNNEL, !!(p->flags &
211 BR_VLAN_TUNNEL)))
212 return -EMSGSIZE;
214 timerval = br_timer_value(&p->message_age_timer);
215 if (nla_put_u64_64bit(skb, IFLA_BRPORT_MESSAGE_AGE_TIMER, timerval,
216 IFLA_BRPORT_PAD))
217 return -EMSGSIZE;
218 timerval = br_timer_value(&p->forward_delay_timer);
219 if (nla_put_u64_64bit(skb, IFLA_BRPORT_FORWARD_DELAY_TIMER, timerval,
220 IFLA_BRPORT_PAD))
221 return -EMSGSIZE;
222 timerval = br_timer_value(&p->hold_timer);
223 if (nla_put_u64_64bit(skb, IFLA_BRPORT_HOLD_TIMER, timerval,
224 IFLA_BRPORT_PAD))
225 return -EMSGSIZE;
227 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
228 if (nla_put_u8(skb, IFLA_BRPORT_MULTICAST_ROUTER,
229 p->multicast_router))
230 return -EMSGSIZE;
231 #endif
233 return 0;
236 static int br_fill_ifvlaninfo_range(struct sk_buff *skb, u16 vid_start,
237 u16 vid_end, u16 flags)
239 struct bridge_vlan_info vinfo;
241 if ((vid_end - vid_start) > 0) {
242 /* add range to skb */
243 vinfo.vid = vid_start;
244 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_BEGIN;
245 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
246 sizeof(vinfo), &vinfo))
247 goto nla_put_failure;
249 vinfo.vid = vid_end;
250 vinfo.flags = flags | BRIDGE_VLAN_INFO_RANGE_END;
251 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
252 sizeof(vinfo), &vinfo))
253 goto nla_put_failure;
254 } else {
255 vinfo.vid = vid_start;
256 vinfo.flags = flags;
257 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
258 sizeof(vinfo), &vinfo))
259 goto nla_put_failure;
262 return 0;
264 nla_put_failure:
265 return -EMSGSIZE;
268 static int br_fill_ifvlaninfo_compressed(struct sk_buff *skb,
269 struct net_bridge_vlan_group *vg)
271 struct net_bridge_vlan *v;
272 u16 vid_range_start = 0, vid_range_end = 0, vid_range_flags = 0;
273 u16 flags, pvid;
274 int err = 0;
276 /* Pack IFLA_BRIDGE_VLAN_INFO's for every vlan
277 * and mark vlan info with begin and end flags
278 * if vlaninfo represents a range
280 pvid = br_get_pvid(vg);
281 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
282 flags = 0;
283 if (!br_vlan_should_use(v))
284 continue;
285 if (v->vid == pvid)
286 flags |= BRIDGE_VLAN_INFO_PVID;
288 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
289 flags |= BRIDGE_VLAN_INFO_UNTAGGED;
291 if (vid_range_start == 0) {
292 goto initvars;
293 } else if ((v->vid - vid_range_end) == 1 &&
294 flags == vid_range_flags) {
295 vid_range_end = v->vid;
296 continue;
297 } else {
298 err = br_fill_ifvlaninfo_range(skb, vid_range_start,
299 vid_range_end,
300 vid_range_flags);
301 if (err)
302 return err;
305 initvars:
306 vid_range_start = v->vid;
307 vid_range_end = v->vid;
308 vid_range_flags = flags;
311 if (vid_range_start != 0) {
312 /* Call it once more to send any left over vlans */
313 err = br_fill_ifvlaninfo_range(skb, vid_range_start,
314 vid_range_end,
315 vid_range_flags);
316 if (err)
317 return err;
320 return 0;
323 static int br_fill_ifvlaninfo(struct sk_buff *skb,
324 struct net_bridge_vlan_group *vg)
326 struct bridge_vlan_info vinfo;
327 struct net_bridge_vlan *v;
328 u16 pvid;
330 pvid = br_get_pvid(vg);
331 list_for_each_entry_rcu(v, &vg->vlan_list, vlist) {
332 if (!br_vlan_should_use(v))
333 continue;
335 vinfo.vid = v->vid;
336 vinfo.flags = 0;
337 if (v->vid == pvid)
338 vinfo.flags |= BRIDGE_VLAN_INFO_PVID;
340 if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
341 vinfo.flags |= BRIDGE_VLAN_INFO_UNTAGGED;
343 if (nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
344 sizeof(vinfo), &vinfo))
345 goto nla_put_failure;
348 return 0;
350 nla_put_failure:
351 return -EMSGSIZE;
355 * Create one netlink message for one interface
356 * Contains port and master info as well as carrier and bridge state.
358 static int br_fill_ifinfo(struct sk_buff *skb,
359 struct net_bridge_port *port,
360 u32 pid, u32 seq, int event, unsigned int flags,
361 u32 filter_mask, const struct net_device *dev)
363 struct net_bridge *br;
364 struct ifinfomsg *hdr;
365 struct nlmsghdr *nlh;
366 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
368 if (port)
369 br = port->br;
370 else
371 br = netdev_priv(dev);
373 br_debug(br, "br_fill_info event %d port %s master %s\n",
374 event, dev->name, br->dev->name);
376 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
377 if (nlh == NULL)
378 return -EMSGSIZE;
380 hdr = nlmsg_data(nlh);
381 hdr->ifi_family = AF_BRIDGE;
382 hdr->__ifi_pad = 0;
383 hdr->ifi_type = dev->type;
384 hdr->ifi_index = dev->ifindex;
385 hdr->ifi_flags = dev_get_flags(dev);
386 hdr->ifi_change = 0;
388 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
389 nla_put_u32(skb, IFLA_MASTER, br->dev->ifindex) ||
390 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
391 nla_put_u8(skb, IFLA_OPERSTATE, operstate) ||
392 (dev->addr_len &&
393 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
394 (dev->ifindex != dev_get_iflink(dev) &&
395 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))))
396 goto nla_put_failure;
398 if (event == RTM_NEWLINK && port) {
399 struct nlattr *nest
400 = nla_nest_start(skb, IFLA_PROTINFO | NLA_F_NESTED);
402 if (nest == NULL || br_port_fill_attrs(skb, port) < 0)
403 goto nla_put_failure;
404 nla_nest_end(skb, nest);
407 /* Check if the VID information is requested */
408 if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
409 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
410 struct net_bridge_vlan_group *vg;
411 struct nlattr *af;
412 int err;
414 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */
415 rcu_read_lock();
416 if (port)
417 vg = nbp_vlan_group_rcu(port);
418 else
419 vg = br_vlan_group_rcu(br);
421 if (!vg || !vg->num_vlans) {
422 rcu_read_unlock();
423 goto done;
425 af = nla_nest_start(skb, IFLA_AF_SPEC);
426 if (!af) {
427 rcu_read_unlock();
428 goto nla_put_failure;
430 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
431 err = br_fill_ifvlaninfo_compressed(skb, vg);
432 else
433 err = br_fill_ifvlaninfo(skb, vg);
435 if (port && (port->flags & BR_VLAN_TUNNEL))
436 err = br_fill_vlan_tunnel_info(skb, vg);
437 rcu_read_unlock();
438 if (err)
439 goto nla_put_failure;
440 nla_nest_end(skb, af);
443 done:
444 nlmsg_end(skb, nlh);
445 return 0;
447 nla_put_failure:
448 nlmsg_cancel(skb, nlh);
449 return -EMSGSIZE;
453 * Notify listeners of a change in port information
455 void br_ifinfo_notify(int event, struct net_bridge_port *port)
457 struct net *net;
458 struct sk_buff *skb;
459 int err = -ENOBUFS;
460 u32 filter = RTEXT_FILTER_BRVLAN_COMPRESSED;
462 if (!port)
463 return;
465 net = dev_net(port->dev);
466 br_debug(port->br, "port %u(%s) event %d\n",
467 (unsigned int)port->port_no, port->dev->name, event);
469 skb = nlmsg_new(br_nlmsg_size(port->dev, filter), GFP_ATOMIC);
470 if (skb == NULL)
471 goto errout;
473 err = br_fill_ifinfo(skb, port, 0, 0, event, 0, filter, port->dev);
474 if (err < 0) {
475 /* -EMSGSIZE implies BUG in br_nlmsg_size() */
476 WARN_ON(err == -EMSGSIZE);
477 kfree_skb(skb);
478 goto errout;
480 rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_ATOMIC);
481 return;
482 errout:
483 rtnl_set_sk_err(net, RTNLGRP_LINK, err);
488 * Dump information about all ports, in response to GETLINK
490 int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
491 struct net_device *dev, u32 filter_mask, int nlflags)
493 struct net_bridge_port *port = br_port_get_rtnl(dev);
495 if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN) &&
496 !(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
497 return 0;
499 return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
500 filter_mask, dev);
503 static int br_vlan_info(struct net_bridge *br, struct net_bridge_port *p,
504 int cmd, struct bridge_vlan_info *vinfo)
506 int err = 0;
508 switch (cmd) {
509 case RTM_SETLINK:
510 if (p) {
511 /* if the MASTER flag is set this will act on the global
512 * per-VLAN entry as well
514 err = nbp_vlan_add(p, vinfo->vid, vinfo->flags);
515 if (err)
516 break;
517 } else {
518 vinfo->flags |= BRIDGE_VLAN_INFO_BRENTRY;
519 err = br_vlan_add(br, vinfo->vid, vinfo->flags);
521 break;
523 case RTM_DELLINK:
524 if (p) {
525 nbp_vlan_delete(p, vinfo->vid);
526 if (vinfo->flags & BRIDGE_VLAN_INFO_MASTER)
527 br_vlan_delete(p->br, vinfo->vid);
528 } else {
529 br_vlan_delete(br, vinfo->vid);
531 break;
534 return err;
537 static int br_process_vlan_info(struct net_bridge *br,
538 struct net_bridge_port *p, int cmd,
539 struct bridge_vlan_info *vinfo_curr,
540 struct bridge_vlan_info **vinfo_last)
542 if (!vinfo_curr->vid || vinfo_curr->vid >= VLAN_VID_MASK)
543 return -EINVAL;
545 if (vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
546 /* check if we are already processing a range */
547 if (*vinfo_last)
548 return -EINVAL;
549 *vinfo_last = vinfo_curr;
550 /* don't allow range of pvids */
551 if ((*vinfo_last)->flags & BRIDGE_VLAN_INFO_PVID)
552 return -EINVAL;
553 return 0;
556 if (*vinfo_last) {
557 struct bridge_vlan_info tmp_vinfo;
558 int v, err;
560 if (!(vinfo_curr->flags & BRIDGE_VLAN_INFO_RANGE_END))
561 return -EINVAL;
563 if (vinfo_curr->vid <= (*vinfo_last)->vid)
564 return -EINVAL;
566 memcpy(&tmp_vinfo, *vinfo_last,
567 sizeof(struct bridge_vlan_info));
568 for (v = (*vinfo_last)->vid; v <= vinfo_curr->vid; v++) {
569 tmp_vinfo.vid = v;
570 err = br_vlan_info(br, p, cmd, &tmp_vinfo);
571 if (err)
572 break;
574 *vinfo_last = NULL;
576 return 0;
579 return br_vlan_info(br, p, cmd, vinfo_curr);
582 static int br_afspec(struct net_bridge *br,
583 struct net_bridge_port *p,
584 struct nlattr *af_spec,
585 int cmd)
587 struct bridge_vlan_info *vinfo_curr = NULL;
588 struct bridge_vlan_info *vinfo_last = NULL;
589 struct nlattr *attr;
590 struct vtunnel_info tinfo_last = {};
591 struct vtunnel_info tinfo_curr = {};
592 int err = 0, rem;
594 nla_for_each_nested(attr, af_spec, rem) {
595 err = 0;
596 switch (nla_type(attr)) {
597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
598 if (!p || !(p->flags & BR_VLAN_TUNNEL))
599 return -EINVAL;
600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
601 if (err)
602 return err;
603 err = br_process_vlan_tunnel_info(br, p, cmd,
604 &tinfo_curr,
605 &tinfo_last);
606 if (err)
607 return err;
608 break;
609 case IFLA_BRIDGE_VLAN_INFO:
610 if (nla_len(attr) != sizeof(struct bridge_vlan_info))
611 return -EINVAL;
612 vinfo_curr = nla_data(attr);
613 err = br_process_vlan_info(br, p, cmd, vinfo_curr,
614 &vinfo_last);
615 if (err)
616 return err;
617 break;
621 return err;
624 static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
625 [IFLA_BRPORT_STATE] = { .type = NLA_U8 },
626 [IFLA_BRPORT_COST] = { .type = NLA_U32 },
627 [IFLA_BRPORT_PRIORITY] = { .type = NLA_U16 },
628 [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
629 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
630 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
631 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
632 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
633 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
634 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
635 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
636 [IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
637 [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
638 [IFLA_BRPORT_MCAST_FLOOD] = { .type = NLA_U8 },
639 [IFLA_BRPORT_BCAST_FLOOD] = { .type = NLA_U8 },
642 /* Change the state of the port and notify spanning tree */
643 static int br_set_port_state(struct net_bridge_port *p, u8 state)
645 if (state > BR_STATE_BLOCKING)
646 return -EINVAL;
648 /* if kernel STP is running, don't allow changes */
649 if (p->br->stp_enabled == BR_KERNEL_STP)
650 return -EBUSY;
652 /* if device is not up, change is not allowed
653 * if link is not present, only allowable state is disabled
655 if (!netif_running(p->dev) ||
656 (!netif_oper_up(p->dev) && state != BR_STATE_DISABLED))
657 return -ENETDOWN;
659 br_set_state(p, state);
660 br_port_state_selection(p->br);
661 return 0;
664 /* Set/clear or port flags based on attribute */
665 static int br_set_port_flag(struct net_bridge_port *p, struct nlattr *tb[],
666 int attrtype, unsigned long mask)
668 unsigned long flags;
669 int err;
671 if (!tb[attrtype])
672 return 0;
674 if (nla_get_u8(tb[attrtype]))
675 flags = p->flags | mask;
676 else
677 flags = p->flags & ~mask;
679 err = br_switchdev_set_port_flag(p, flags, mask);
680 if (err)
681 return err;
683 p->flags = flags;
684 return 0;
687 /* Process bridge protocol info on port */
688 static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
690 unsigned long old_flags = p->flags;
691 bool br_vlan_tunnel_old = false;
692 int err;
694 err = br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);
695 if (err)
696 return err;
698 err = br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);
699 if (err)
700 return err;
702 err = br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE);
703 if (err)
704 return err;
706 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);
707 if (err)
708 return err;
710 err = br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
711 if (err)
712 return err;
714 err = br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
715 if (err)
716 return err;
718 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
719 if (err)
720 return err;
722 err = br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
723 if (err)
724 return err;
726 err = br_set_port_flag(p, tb, IFLA_BRPORT_BCAST_FLOOD, BR_BCAST_FLOOD);
727 if (err)
728 return err;
730 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
731 if (err)
732 return err;
734 err = br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
735 if (err)
736 return err;
738 br_vlan_tunnel_old = (p->flags & BR_VLAN_TUNNEL) ? true : false;
739 err = br_set_port_flag(p, tb, IFLA_BRPORT_VLAN_TUNNEL, BR_VLAN_TUNNEL);
740 if (err)
741 return err;
743 if (br_vlan_tunnel_old && !(p->flags & BR_VLAN_TUNNEL))
744 nbp_vlan_tunnel_info_flush(p);
746 if (tb[IFLA_BRPORT_COST]) {
747 err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST]));
748 if (err)
749 return err;
752 if (tb[IFLA_BRPORT_PRIORITY]) {
753 err = br_stp_set_port_priority(p, nla_get_u16(tb[IFLA_BRPORT_PRIORITY]));
754 if (err)
755 return err;
758 if (tb[IFLA_BRPORT_STATE]) {
759 err = br_set_port_state(p, nla_get_u8(tb[IFLA_BRPORT_STATE]));
760 if (err)
761 return err;
764 if (tb[IFLA_BRPORT_FLUSH])
765 br_fdb_delete_by_port(p->br, p, 0, 0);
767 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
768 if (tb[IFLA_BRPORT_MULTICAST_ROUTER]) {
769 u8 mcast_router = nla_get_u8(tb[IFLA_BRPORT_MULTICAST_ROUTER]);
771 err = br_multicast_set_port_router(p, mcast_router);
772 if (err)
773 return err;
775 #endif
776 br_port_flags_change(p, old_flags ^ p->flags);
777 return 0;
780 /* Change state and parameters on port. */
781 int br_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
783 struct nlattr *protinfo;
784 struct nlattr *afspec;
785 struct net_bridge_port *p;
786 struct nlattr *tb[IFLA_BRPORT_MAX + 1];
787 int err = 0;
789 protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_PROTINFO);
790 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
791 if (!protinfo && !afspec)
792 return 0;
794 p = br_port_get_rtnl(dev);
795 /* We want to accept dev as bridge itself if the AF_SPEC
796 * is set to see if someone is setting vlan info on the bridge
798 if (!p && !afspec)
799 return -EINVAL;
801 if (p && protinfo) {
802 if (protinfo->nla_type & NLA_F_NESTED) {
803 err = nla_parse_nested(tb, IFLA_BRPORT_MAX, protinfo,
804 br_port_policy, NULL);
805 if (err)
806 return err;
808 spin_lock_bh(&p->br->lock);
809 err = br_setport(p, tb);
810 spin_unlock_bh(&p->br->lock);
811 } else {
812 /* Binary compatibility with old RSTP */
813 if (nla_len(protinfo) < sizeof(u8))
814 return -EINVAL;
816 spin_lock_bh(&p->br->lock);
817 err = br_set_port_state(p, nla_get_u8(protinfo));
818 spin_unlock_bh(&p->br->lock);
820 if (err)
821 goto out;
824 if (afspec) {
825 err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
826 afspec, RTM_SETLINK);
829 if (err == 0)
830 br_ifinfo_notify(RTM_NEWLINK, p);
831 out:
832 return err;
835 /* Delete port information */
836 int br_dellink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags)
838 struct nlattr *afspec;
839 struct net_bridge_port *p;
840 int err = 0;
842 afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
843 if (!afspec)
844 return 0;
846 p = br_port_get_rtnl(dev);
847 /* We want to accept dev as bridge itself as well */
848 if (!p && !(dev->priv_flags & IFF_EBRIDGE))
849 return -EINVAL;
851 err = br_afspec((struct net_bridge *)netdev_priv(dev), p,
852 afspec, RTM_DELLINK);
853 if (err == 0)
854 /* Send RTM_NEWLINK because userspace
855 * expects RTM_NEWLINK for vlan dels
857 br_ifinfo_notify(RTM_NEWLINK, p);
859 return err;
862 static int br_validate(struct nlattr *tb[], struct nlattr *data[],
863 struct netlink_ext_ack *extack)
865 if (tb[IFLA_ADDRESS]) {
866 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
867 return -EINVAL;
868 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
869 return -EADDRNOTAVAIL;
872 if (!data)
873 return 0;
875 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
876 if (data[IFLA_BR_VLAN_PROTOCOL]) {
877 switch (nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL])) {
878 case htons(ETH_P_8021Q):
879 case htons(ETH_P_8021AD):
880 break;
881 default:
882 return -EPROTONOSUPPORT;
886 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
887 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
889 if (defpvid >= VLAN_VID_MASK)
890 return -EINVAL;
892 #endif
894 return 0;
897 static int br_port_slave_changelink(struct net_device *brdev,
898 struct net_device *dev,
899 struct nlattr *tb[],
900 struct nlattr *data[],
901 struct netlink_ext_ack *extack)
903 struct net_bridge *br = netdev_priv(brdev);
904 int ret;
906 if (!data)
907 return 0;
909 spin_lock_bh(&br->lock);
910 ret = br_setport(br_port_get_rtnl(dev), data);
911 spin_unlock_bh(&br->lock);
913 return ret;
916 static int br_port_fill_slave_info(struct sk_buff *skb,
917 const struct net_device *brdev,
918 const struct net_device *dev)
920 return br_port_fill_attrs(skb, br_port_get_rtnl(dev));
923 static size_t br_port_get_slave_size(const struct net_device *brdev,
924 const struct net_device *dev)
926 return br_port_info_size();
929 static const struct nla_policy br_policy[IFLA_BR_MAX + 1] = {
930 [IFLA_BR_FORWARD_DELAY] = { .type = NLA_U32 },
931 [IFLA_BR_HELLO_TIME] = { .type = NLA_U32 },
932 [IFLA_BR_MAX_AGE] = { .type = NLA_U32 },
933 [IFLA_BR_AGEING_TIME] = { .type = NLA_U32 },
934 [IFLA_BR_STP_STATE] = { .type = NLA_U32 },
935 [IFLA_BR_PRIORITY] = { .type = NLA_U16 },
936 [IFLA_BR_VLAN_FILTERING] = { .type = NLA_U8 },
937 [IFLA_BR_VLAN_PROTOCOL] = { .type = NLA_U16 },
938 [IFLA_BR_GROUP_FWD_MASK] = { .type = NLA_U16 },
939 [IFLA_BR_GROUP_ADDR] = { .type = NLA_BINARY,
940 .len = ETH_ALEN },
941 [IFLA_BR_MCAST_ROUTER] = { .type = NLA_U8 },
942 [IFLA_BR_MCAST_SNOOPING] = { .type = NLA_U8 },
943 [IFLA_BR_MCAST_QUERY_USE_IFADDR] = { .type = NLA_U8 },
944 [IFLA_BR_MCAST_QUERIER] = { .type = NLA_U8 },
945 [IFLA_BR_MCAST_HASH_ELASTICITY] = { .type = NLA_U32 },
946 [IFLA_BR_MCAST_HASH_MAX] = { .type = NLA_U32 },
947 [IFLA_BR_MCAST_LAST_MEMBER_CNT] = { .type = NLA_U32 },
948 [IFLA_BR_MCAST_STARTUP_QUERY_CNT] = { .type = NLA_U32 },
949 [IFLA_BR_MCAST_LAST_MEMBER_INTVL] = { .type = NLA_U64 },
950 [IFLA_BR_MCAST_MEMBERSHIP_INTVL] = { .type = NLA_U64 },
951 [IFLA_BR_MCAST_QUERIER_INTVL] = { .type = NLA_U64 },
952 [IFLA_BR_MCAST_QUERY_INTVL] = { .type = NLA_U64 },
953 [IFLA_BR_MCAST_QUERY_RESPONSE_INTVL] = { .type = NLA_U64 },
954 [IFLA_BR_MCAST_STARTUP_QUERY_INTVL] = { .type = NLA_U64 },
955 [IFLA_BR_NF_CALL_IPTABLES] = { .type = NLA_U8 },
956 [IFLA_BR_NF_CALL_IP6TABLES] = { .type = NLA_U8 },
957 [IFLA_BR_NF_CALL_ARPTABLES] = { .type = NLA_U8 },
958 [IFLA_BR_VLAN_DEFAULT_PVID] = { .type = NLA_U16 },
959 [IFLA_BR_VLAN_STATS_ENABLED] = { .type = NLA_U8 },
960 [IFLA_BR_MCAST_STATS_ENABLED] = { .type = NLA_U8 },
961 [IFLA_BR_MCAST_IGMP_VERSION] = { .type = NLA_U8 },
962 [IFLA_BR_MCAST_MLD_VERSION] = { .type = NLA_U8 },
965 static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
966 struct nlattr *data[],
967 struct netlink_ext_ack *extack)
969 struct net_bridge *br = netdev_priv(brdev);
970 int err;
972 if (!data)
973 return 0;
975 if (data[IFLA_BR_FORWARD_DELAY]) {
976 err = br_set_forward_delay(br, nla_get_u32(data[IFLA_BR_FORWARD_DELAY]));
977 if (err)
978 return err;
981 if (data[IFLA_BR_HELLO_TIME]) {
982 err = br_set_hello_time(br, nla_get_u32(data[IFLA_BR_HELLO_TIME]));
983 if (err)
984 return err;
987 if (data[IFLA_BR_MAX_AGE]) {
988 err = br_set_max_age(br, nla_get_u32(data[IFLA_BR_MAX_AGE]));
989 if (err)
990 return err;
993 if (data[IFLA_BR_AGEING_TIME]) {
994 err = br_set_ageing_time(br, nla_get_u32(data[IFLA_BR_AGEING_TIME]));
995 if (err)
996 return err;
999 if (data[IFLA_BR_STP_STATE]) {
1000 u32 stp_enabled = nla_get_u32(data[IFLA_BR_STP_STATE]);
1002 br_stp_set_enabled(br, stp_enabled);
1005 if (data[IFLA_BR_PRIORITY]) {
1006 u32 priority = nla_get_u16(data[IFLA_BR_PRIORITY]);
1008 br_stp_set_bridge_priority(br, priority);
1011 if (data[IFLA_BR_VLAN_FILTERING]) {
1012 u8 vlan_filter = nla_get_u8(data[IFLA_BR_VLAN_FILTERING]);
1014 err = __br_vlan_filter_toggle(br, vlan_filter);
1015 if (err)
1016 return err;
1019 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1020 if (data[IFLA_BR_VLAN_PROTOCOL]) {
1021 __be16 vlan_proto = nla_get_be16(data[IFLA_BR_VLAN_PROTOCOL]);
1023 err = __br_vlan_set_proto(br, vlan_proto);
1024 if (err)
1025 return err;
1028 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
1029 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
1031 err = __br_vlan_set_default_pvid(br, defpvid);
1032 if (err)
1033 return err;
1036 if (data[IFLA_BR_VLAN_STATS_ENABLED]) {
1037 __u8 vlan_stats = nla_get_u8(data[IFLA_BR_VLAN_STATS_ENABLED]);
1039 err = br_vlan_set_stats(br, vlan_stats);
1040 if (err)
1041 return err;
1043 #endif
1045 if (data[IFLA_BR_GROUP_FWD_MASK]) {
1046 u16 fwd_mask = nla_get_u16(data[IFLA_BR_GROUP_FWD_MASK]);
1048 if (fwd_mask & BR_GROUPFWD_RESTRICTED)
1049 return -EINVAL;
1050 br->group_fwd_mask = fwd_mask;
1053 if (data[IFLA_BR_GROUP_ADDR]) {
1054 u8 new_addr[ETH_ALEN];
1056 if (nla_len(data[IFLA_BR_GROUP_ADDR]) != ETH_ALEN)
1057 return -EINVAL;
1058 memcpy(new_addr, nla_data(data[IFLA_BR_GROUP_ADDR]), ETH_ALEN);
1059 if (!is_link_local_ether_addr(new_addr))
1060 return -EINVAL;
1061 if (new_addr[5] == 1 || /* 802.3x Pause address */
1062 new_addr[5] == 2 || /* 802.3ad Slow protocols */
1063 new_addr[5] == 3) /* 802.1X PAE address */
1064 return -EINVAL;
1065 spin_lock_bh(&br->lock);
1066 memcpy(br->group_addr, new_addr, sizeof(br->group_addr));
1067 spin_unlock_bh(&br->lock);
1068 br->group_addr_set = true;
1069 br_recalculate_fwd_mask(br);
1072 if (data[IFLA_BR_FDB_FLUSH])
1073 br_fdb_flush(br);
1075 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1076 if (data[IFLA_BR_MCAST_ROUTER]) {
1077 u8 multicast_router = nla_get_u8(data[IFLA_BR_MCAST_ROUTER]);
1079 err = br_multicast_set_router(br, multicast_router);
1080 if (err)
1081 return err;
1084 if (data[IFLA_BR_MCAST_SNOOPING]) {
1085 u8 mcast_snooping = nla_get_u8(data[IFLA_BR_MCAST_SNOOPING]);
1087 err = br_multicast_toggle(br, mcast_snooping);
1088 if (err)
1089 return err;
1092 if (data[IFLA_BR_MCAST_QUERY_USE_IFADDR]) {
1093 u8 val;
1095 val = nla_get_u8(data[IFLA_BR_MCAST_QUERY_USE_IFADDR]);
1096 br->multicast_query_use_ifaddr = !!val;
1099 if (data[IFLA_BR_MCAST_QUERIER]) {
1100 u8 mcast_querier = nla_get_u8(data[IFLA_BR_MCAST_QUERIER]);
1102 err = br_multicast_set_querier(br, mcast_querier);
1103 if (err)
1104 return err;
1107 if (data[IFLA_BR_MCAST_HASH_ELASTICITY]) {
1108 u32 val = nla_get_u32(data[IFLA_BR_MCAST_HASH_ELASTICITY]);
1110 br->hash_elasticity = val;
1113 if (data[IFLA_BR_MCAST_HASH_MAX]) {
1114 u32 hash_max = nla_get_u32(data[IFLA_BR_MCAST_HASH_MAX]);
1116 err = br_multicast_set_hash_max(br, hash_max);
1117 if (err)
1118 return err;
1121 if (data[IFLA_BR_MCAST_LAST_MEMBER_CNT]) {
1122 u32 val = nla_get_u32(data[IFLA_BR_MCAST_LAST_MEMBER_CNT]);
1124 br->multicast_last_member_count = val;
1127 if (data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]) {
1128 u32 val = nla_get_u32(data[IFLA_BR_MCAST_STARTUP_QUERY_CNT]);
1130 br->multicast_startup_query_count = val;
1133 if (data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]) {
1134 u64 val = nla_get_u64(data[IFLA_BR_MCAST_LAST_MEMBER_INTVL]);
1136 br->multicast_last_member_interval = clock_t_to_jiffies(val);
1139 if (data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]) {
1140 u64 val = nla_get_u64(data[IFLA_BR_MCAST_MEMBERSHIP_INTVL]);
1142 br->multicast_membership_interval = clock_t_to_jiffies(val);
1145 if (data[IFLA_BR_MCAST_QUERIER_INTVL]) {
1146 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERIER_INTVL]);
1148 br->multicast_querier_interval = clock_t_to_jiffies(val);
1151 if (data[IFLA_BR_MCAST_QUERY_INTVL]) {
1152 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_INTVL]);
1154 br->multicast_query_interval = clock_t_to_jiffies(val);
1157 if (data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]) {
1158 u64 val = nla_get_u64(data[IFLA_BR_MCAST_QUERY_RESPONSE_INTVL]);
1160 br->multicast_query_response_interval = clock_t_to_jiffies(val);
1163 if (data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]) {
1164 u64 val = nla_get_u64(data[IFLA_BR_MCAST_STARTUP_QUERY_INTVL]);
1166 br->multicast_startup_query_interval = clock_t_to_jiffies(val);
1169 if (data[IFLA_BR_MCAST_STATS_ENABLED]) {
1170 __u8 mcast_stats;
1172 mcast_stats = nla_get_u8(data[IFLA_BR_MCAST_STATS_ENABLED]);
1173 br->multicast_stats_enabled = !!mcast_stats;
1176 if (data[IFLA_BR_MCAST_IGMP_VERSION]) {
1177 __u8 igmp_version;
1179 igmp_version = nla_get_u8(data[IFLA_BR_MCAST_IGMP_VERSION]);
1180 err = br_multicast_set_igmp_version(br, igmp_version);
1181 if (err)
1182 return err;
1185 #if IS_ENABLED(CONFIG_IPV6)
1186 if (data[IFLA_BR_MCAST_MLD_VERSION]) {
1187 __u8 mld_version;
1189 mld_version = nla_get_u8(data[IFLA_BR_MCAST_MLD_VERSION]);
1190 err = br_multicast_set_mld_version(br, mld_version);
1191 if (err)
1192 return err;
1194 #endif
1195 #endif
1196 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1197 if (data[IFLA_BR_NF_CALL_IPTABLES]) {
1198 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IPTABLES]);
1200 br->nf_call_iptables = val ? true : false;
1203 if (data[IFLA_BR_NF_CALL_IP6TABLES]) {
1204 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_IP6TABLES]);
1206 br->nf_call_ip6tables = val ? true : false;
1209 if (data[IFLA_BR_NF_CALL_ARPTABLES]) {
1210 u8 val = nla_get_u8(data[IFLA_BR_NF_CALL_ARPTABLES]);
1212 br->nf_call_arptables = val ? true : false;
1214 #endif
1216 return 0;
1219 static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1220 struct nlattr *tb[], struct nlattr *data[],
1221 struct netlink_ext_ack *extack)
1223 struct net_bridge *br = netdev_priv(dev);
1224 int err;
1226 if (tb[IFLA_ADDRESS]) {
1227 spin_lock_bh(&br->lock);
1228 br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1229 spin_unlock_bh(&br->lock);
1232 err = register_netdevice(dev);
1233 if (err)
1234 return err;
1236 err = br_changelink(dev, tb, data, extack);
1237 if (err)
1238 unregister_netdevice(dev);
1239 return err;
1242 static size_t br_get_size(const struct net_device *brdev)
1244 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
1245 nla_total_size(sizeof(u32)) + /* IFLA_BR_HELLO_TIME */
1246 nla_total_size(sizeof(u32)) + /* IFLA_BR_MAX_AGE */
1247 nla_total_size(sizeof(u32)) + /* IFLA_BR_AGEING_TIME */
1248 nla_total_size(sizeof(u32)) + /* IFLA_BR_STP_STATE */
1249 nla_total_size(sizeof(u16)) + /* IFLA_BR_PRIORITY */
1250 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_FILTERING */
1251 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1252 nla_total_size(sizeof(__be16)) + /* IFLA_BR_VLAN_PROTOCOL */
1253 nla_total_size(sizeof(u16)) + /* IFLA_BR_VLAN_DEFAULT_PVID */
1254 nla_total_size(sizeof(u8)) + /* IFLA_BR_VLAN_STATS_ENABLED */
1255 #endif
1256 nla_total_size(sizeof(u16)) + /* IFLA_BR_GROUP_FWD_MASK */
1257 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_ROOT_ID */
1258 nla_total_size(sizeof(struct ifla_bridge_id)) + /* IFLA_BR_BRIDGE_ID */
1259 nla_total_size(sizeof(u16)) + /* IFLA_BR_ROOT_PORT */
1260 nla_total_size(sizeof(u32)) + /* IFLA_BR_ROOT_PATH_COST */
1261 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE */
1262 nla_total_size(sizeof(u8)) + /* IFLA_BR_TOPOLOGY_CHANGE_DETECTED */
1263 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_HELLO_TIMER */
1264 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TCN_TIMER */
1265 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_TOPOLOGY_CHANGE_TIMER */
1266 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_GC_TIMER */
1267 nla_total_size(ETH_ALEN) + /* IFLA_BR_GROUP_ADDR */
1268 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1269 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_ROUTER */
1270 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_SNOOPING */
1271 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERY_USE_IFADDR */
1272 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_QUERIER */
1273 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_STATS_ENABLED */
1274 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_ELASTICITY */
1275 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_HASH_MAX */
1276 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_LAST_MEMBER_CNT */
1277 nla_total_size(sizeof(u32)) + /* IFLA_BR_MCAST_STARTUP_QUERY_CNT */
1278 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_LAST_MEMBER_INTVL */
1279 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_MEMBERSHIP_INTVL */
1280 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERIER_INTVL */
1281 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_INTVL */
1282 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_QUERY_RESPONSE_INTVL */
1283 nla_total_size_64bit(sizeof(u64)) + /* IFLA_BR_MCAST_STARTUP_QUERY_INTVL */
1284 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_IGMP_VERSION */
1285 nla_total_size(sizeof(u8)) + /* IFLA_BR_MCAST_MLD_VERSION */
1286 #endif
1287 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1288 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IPTABLES */
1289 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_IP6TABLES */
1290 nla_total_size(sizeof(u8)) + /* IFLA_BR_NF_CALL_ARPTABLES */
1291 #endif
1295 static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
1297 struct net_bridge *br = netdev_priv(brdev);
1298 u32 forward_delay = jiffies_to_clock_t(br->forward_delay);
1299 u32 hello_time = jiffies_to_clock_t(br->hello_time);
1300 u32 age_time = jiffies_to_clock_t(br->max_age);
1301 u32 ageing_time = jiffies_to_clock_t(br->ageing_time);
1302 u32 stp_enabled = br->stp_enabled;
1303 u16 priority = (br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1];
1304 u8 vlan_enabled = br_vlan_enabled(br->dev);
1305 u64 clockval;
1307 clockval = br_timer_value(&br->hello_timer);
1308 if (nla_put_u64_64bit(skb, IFLA_BR_HELLO_TIMER, clockval, IFLA_BR_PAD))
1309 return -EMSGSIZE;
1310 clockval = br_timer_value(&br->tcn_timer);
1311 if (nla_put_u64_64bit(skb, IFLA_BR_TCN_TIMER, clockval, IFLA_BR_PAD))
1312 return -EMSGSIZE;
1313 clockval = br_timer_value(&br->topology_change_timer);
1314 if (nla_put_u64_64bit(skb, IFLA_BR_TOPOLOGY_CHANGE_TIMER, clockval,
1315 IFLA_BR_PAD))
1316 return -EMSGSIZE;
1317 clockval = br_timer_value(&br->gc_work.timer);
1318 if (nla_put_u64_64bit(skb, IFLA_BR_GC_TIMER, clockval, IFLA_BR_PAD))
1319 return -EMSGSIZE;
1321 if (nla_put_u32(skb, IFLA_BR_FORWARD_DELAY, forward_delay) ||
1322 nla_put_u32(skb, IFLA_BR_HELLO_TIME, hello_time) ||
1323 nla_put_u32(skb, IFLA_BR_MAX_AGE, age_time) ||
1324 nla_put_u32(skb, IFLA_BR_AGEING_TIME, ageing_time) ||
1325 nla_put_u32(skb, IFLA_BR_STP_STATE, stp_enabled) ||
1326 nla_put_u16(skb, IFLA_BR_PRIORITY, priority) ||
1327 nla_put_u8(skb, IFLA_BR_VLAN_FILTERING, vlan_enabled) ||
1328 nla_put_u16(skb, IFLA_BR_GROUP_FWD_MASK, br->group_fwd_mask) ||
1329 nla_put(skb, IFLA_BR_BRIDGE_ID, sizeof(struct ifla_bridge_id),
1330 &br->bridge_id) ||
1331 nla_put(skb, IFLA_BR_ROOT_ID, sizeof(struct ifla_bridge_id),
1332 &br->designated_root) ||
1333 nla_put_u16(skb, IFLA_BR_ROOT_PORT, br->root_port) ||
1334 nla_put_u32(skb, IFLA_BR_ROOT_PATH_COST, br->root_path_cost) ||
1335 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE, br->topology_change) ||
1336 nla_put_u8(skb, IFLA_BR_TOPOLOGY_CHANGE_DETECTED,
1337 br->topology_change_detected) ||
1338 nla_put(skb, IFLA_BR_GROUP_ADDR, ETH_ALEN, br->group_addr))
1339 return -EMSGSIZE;
1341 #ifdef CONFIG_BRIDGE_VLAN_FILTERING
1342 if (nla_put_be16(skb, IFLA_BR_VLAN_PROTOCOL, br->vlan_proto) ||
1343 nla_put_u16(skb, IFLA_BR_VLAN_DEFAULT_PVID, br->default_pvid) ||
1344 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, br->vlan_stats_enabled))
1345 return -EMSGSIZE;
1346 #endif
1347 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1348 if (nla_put_u8(skb, IFLA_BR_MCAST_ROUTER, br->multicast_router) ||
1349 nla_put_u8(skb, IFLA_BR_MCAST_SNOOPING, !br->multicast_disabled) ||
1350 nla_put_u8(skb, IFLA_BR_MCAST_QUERY_USE_IFADDR,
1351 br->multicast_query_use_ifaddr) ||
1352 nla_put_u8(skb, IFLA_BR_MCAST_QUERIER, br->multicast_querier) ||
1353 nla_put_u8(skb, IFLA_BR_MCAST_STATS_ENABLED,
1354 br->multicast_stats_enabled) ||
1355 nla_put_u32(skb, IFLA_BR_MCAST_HASH_ELASTICITY,
1356 br->hash_elasticity) ||
1357 nla_put_u32(skb, IFLA_BR_MCAST_HASH_MAX, br->hash_max) ||
1358 nla_put_u32(skb, IFLA_BR_MCAST_LAST_MEMBER_CNT,
1359 br->multicast_last_member_count) ||
1360 nla_put_u32(skb, IFLA_BR_MCAST_STARTUP_QUERY_CNT,
1361 br->multicast_startup_query_count) ||
1362 nla_put_u8(skb, IFLA_BR_MCAST_IGMP_VERSION,
1363 br->multicast_igmp_version))
1364 return -EMSGSIZE;
1365 #if IS_ENABLED(CONFIG_IPV6)
1366 if (nla_put_u8(skb, IFLA_BR_MCAST_MLD_VERSION,
1367 br->multicast_mld_version))
1368 return -EMSGSIZE;
1369 #endif
1370 clockval = jiffies_to_clock_t(br->multicast_last_member_interval);
1371 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_LAST_MEMBER_INTVL, clockval,
1372 IFLA_BR_PAD))
1373 return -EMSGSIZE;
1374 clockval = jiffies_to_clock_t(br->multicast_membership_interval);
1375 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_MEMBERSHIP_INTVL, clockval,
1376 IFLA_BR_PAD))
1377 return -EMSGSIZE;
1378 clockval = jiffies_to_clock_t(br->multicast_querier_interval);
1379 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERIER_INTVL, clockval,
1380 IFLA_BR_PAD))
1381 return -EMSGSIZE;
1382 clockval = jiffies_to_clock_t(br->multicast_query_interval);
1383 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_INTVL, clockval,
1384 IFLA_BR_PAD))
1385 return -EMSGSIZE;
1386 clockval = jiffies_to_clock_t(br->multicast_query_response_interval);
1387 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_QUERY_RESPONSE_INTVL, clockval,
1388 IFLA_BR_PAD))
1389 return -EMSGSIZE;
1390 clockval = jiffies_to_clock_t(br->multicast_startup_query_interval);
1391 if (nla_put_u64_64bit(skb, IFLA_BR_MCAST_STARTUP_QUERY_INTVL, clockval,
1392 IFLA_BR_PAD))
1393 return -EMSGSIZE;
1394 #endif
1395 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
1396 if (nla_put_u8(skb, IFLA_BR_NF_CALL_IPTABLES,
1397 br->nf_call_iptables ? 1 : 0) ||
1398 nla_put_u8(skb, IFLA_BR_NF_CALL_IP6TABLES,
1399 br->nf_call_ip6tables ? 1 : 0) ||
1400 nla_put_u8(skb, IFLA_BR_NF_CALL_ARPTABLES,
1401 br->nf_call_arptables ? 1 : 0))
1402 return -EMSGSIZE;
1403 #endif
1405 return 0;
1408 static size_t br_get_linkxstats_size(const struct net_device *dev, int attr)
1410 struct net_bridge_port *p = NULL;
1411 struct net_bridge_vlan_group *vg;
1412 struct net_bridge_vlan *v;
1413 struct net_bridge *br;
1414 int numvls = 0;
1416 switch (attr) {
1417 case IFLA_STATS_LINK_XSTATS:
1418 br = netdev_priv(dev);
1419 vg = br_vlan_group(br);
1420 break;
1421 case IFLA_STATS_LINK_XSTATS_SLAVE:
1422 p = br_port_get_rtnl(dev);
1423 if (!p)
1424 return 0;
1425 br = p->br;
1426 vg = nbp_vlan_group(p);
1427 break;
1428 default:
1429 return 0;
1432 if (vg) {
1433 /* we need to count all, even placeholder entries */
1434 list_for_each_entry(v, &vg->vlan_list, vlist)
1435 numvls++;
1438 return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) +
1439 nla_total_size(sizeof(struct br_mcast_stats)) +
1440 nla_total_size(0);
1443 static int br_fill_linkxstats(struct sk_buff *skb,
1444 const struct net_device *dev,
1445 int *prividx, int attr)
1447 struct nlattr *nla __maybe_unused;
1448 struct net_bridge_port *p = NULL;
1449 struct net_bridge_vlan_group *vg;
1450 struct net_bridge_vlan *v;
1451 struct net_bridge *br;
1452 struct nlattr *nest;
1453 int vl_idx = 0;
1455 switch (attr) {
1456 case IFLA_STATS_LINK_XSTATS:
1457 br = netdev_priv(dev);
1458 vg = br_vlan_group(br);
1459 break;
1460 case IFLA_STATS_LINK_XSTATS_SLAVE:
1461 p = br_port_get_rtnl(dev);
1462 if (!p)
1463 return 0;
1464 br = p->br;
1465 vg = nbp_vlan_group(p);
1466 break;
1467 default:
1468 return -EINVAL;
1471 nest = nla_nest_start(skb, LINK_XSTATS_TYPE_BRIDGE);
1472 if (!nest)
1473 return -EMSGSIZE;
1475 if (vg) {
1476 u16 pvid;
1478 pvid = br_get_pvid(vg);
1479 list_for_each_entry(v, &vg->vlan_list, vlist) {
1480 struct bridge_vlan_xstats vxi;
1481 struct br_vlan_stats stats;
1483 if (++vl_idx < *prividx)
1484 continue;
1485 memset(&vxi, 0, sizeof(vxi));
1486 vxi.vid = v->vid;
1487 vxi.flags = v->flags;
1488 if (v->vid == pvid)
1489 vxi.flags |= BRIDGE_VLAN_INFO_PVID;
1490 br_vlan_get_stats(v, &stats);
1491 vxi.rx_bytes = stats.rx_bytes;
1492 vxi.rx_packets = stats.rx_packets;
1493 vxi.tx_bytes = stats.tx_bytes;
1494 vxi.tx_packets = stats.tx_packets;
1496 if (nla_put(skb, BRIDGE_XSTATS_VLAN, sizeof(vxi), &vxi))
1497 goto nla_put_failure;
1501 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
1502 if (++vl_idx >= *prividx) {
1503 nla = nla_reserve_64bit(skb, BRIDGE_XSTATS_MCAST,
1504 sizeof(struct br_mcast_stats),
1505 BRIDGE_XSTATS_PAD);
1506 if (!nla)
1507 goto nla_put_failure;
1508 br_multicast_get_stats(br, p, nla_data(nla));
1510 #endif
1511 nla_nest_end(skb, nest);
1512 *prividx = 0;
1514 return 0;
1516 nla_put_failure:
1517 nla_nest_end(skb, nest);
1518 *prividx = vl_idx;
1520 return -EMSGSIZE;
1523 static struct rtnl_af_ops br_af_ops __read_mostly = {
1524 .family = AF_BRIDGE,
1525 .get_link_af_size = br_get_link_af_size_filtered,
1528 struct rtnl_link_ops br_link_ops __read_mostly = {
1529 .kind = "bridge",
1530 .priv_size = sizeof(struct net_bridge),
1531 .setup = br_dev_setup,
1532 .maxtype = IFLA_BR_MAX,
1533 .policy = br_policy,
1534 .validate = br_validate,
1535 .newlink = br_dev_newlink,
1536 .changelink = br_changelink,
1537 .dellink = br_dev_delete,
1538 .get_size = br_get_size,
1539 .fill_info = br_fill_info,
1540 .fill_linkxstats = br_fill_linkxstats,
1541 .get_linkxstats_size = br_get_linkxstats_size,
1543 .slave_maxtype = IFLA_BRPORT_MAX,
1544 .slave_policy = br_port_policy,
1545 .slave_changelink = br_port_slave_changelink,
1546 .get_slave_size = br_port_get_slave_size,
1547 .fill_slave_info = br_port_fill_slave_info,
1550 int __init br_netlink_init(void)
1552 int err;
1554 br_mdb_init();
1555 rtnl_af_register(&br_af_ops);
1557 err = rtnl_link_register(&br_link_ops);
1558 if (err)
1559 goto out_af;
1561 return 0;
1563 out_af:
1564 rtnl_af_unregister(&br_af_ops);
1565 br_mdb_uninit();
1566 return err;
1569 void br_netlink_fini(void)
1571 br_mdb_uninit();
1572 rtnl_af_unregister(&br_af_ops);
1573 rtnl_link_unregister(&br_link_ops);