1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * net/switchdev/switchdev.c - Switch device API
4 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
5 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
8 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/notifier.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
15 #include <linux/if_bridge.h>
16 #include <linux/list.h>
17 #include <linux/workqueue.h>
18 #include <linux/if_vlan.h>
19 #include <linux/rtnetlink.h>
20 #include <net/switchdev.h>
22 static bool switchdev_obj_eq(const struct switchdev_obj
*a
,
23 const struct switchdev_obj
*b
)
25 const struct switchdev_obj_port_vlan
*va
, *vb
;
26 const struct switchdev_obj_port_mdb
*ma
, *mb
;
28 if (a
->id
!= b
->id
|| a
->orig_dev
!= b
->orig_dev
)
32 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
33 va
= SWITCHDEV_OBJ_PORT_VLAN(a
);
34 vb
= SWITCHDEV_OBJ_PORT_VLAN(b
);
35 return va
->flags
== vb
->flags
&&
37 va
->changed
== vb
->changed
;
38 case SWITCHDEV_OBJ_ID_PORT_MDB
:
39 case SWITCHDEV_OBJ_ID_HOST_MDB
:
40 ma
= SWITCHDEV_OBJ_PORT_MDB(a
);
41 mb
= SWITCHDEV_OBJ_PORT_MDB(b
);
42 return ma
->vid
== mb
->vid
&&
43 ether_addr_equal(ma
->addr
, mb
->addr
);
51 static LIST_HEAD(deferred
);
52 static DEFINE_SPINLOCK(deferred_lock
);
54 typedef void switchdev_deferred_func_t(struct net_device
*dev
,
57 struct switchdev_deferred_item
{
58 struct list_head list
;
59 struct net_device
*dev
;
60 netdevice_tracker dev_tracker
;
61 switchdev_deferred_func_t
*func
;
65 static struct switchdev_deferred_item
*switchdev_deferred_dequeue(void)
67 struct switchdev_deferred_item
*dfitem
;
69 spin_lock_bh(&deferred_lock
);
70 if (list_empty(&deferred
)) {
74 dfitem
= list_first_entry(&deferred
,
75 struct switchdev_deferred_item
, list
);
76 list_del(&dfitem
->list
);
78 spin_unlock_bh(&deferred_lock
);
83 * switchdev_deferred_process - Process ops in deferred queue
85 * Called to flush the ops currently queued in deferred ops queue.
86 * rtnl_lock must be held.
88 void switchdev_deferred_process(void)
90 struct switchdev_deferred_item
*dfitem
;
94 while ((dfitem
= switchdev_deferred_dequeue())) {
95 dfitem
->func(dfitem
->dev
, dfitem
->data
);
96 netdev_put(dfitem
->dev
, &dfitem
->dev_tracker
);
100 EXPORT_SYMBOL_GPL(switchdev_deferred_process
);
102 static void switchdev_deferred_process_work(struct work_struct
*work
)
105 switchdev_deferred_process();
109 static DECLARE_WORK(deferred_process_work
, switchdev_deferred_process_work
);
111 static int switchdev_deferred_enqueue(struct net_device
*dev
,
112 const void *data
, size_t data_len
,
113 switchdev_deferred_func_t
*func
)
115 struct switchdev_deferred_item
*dfitem
;
117 dfitem
= kmalloc(struct_size(dfitem
, data
, data_len
), GFP_ATOMIC
);
122 memcpy(dfitem
->data
, data
, data_len
);
123 netdev_hold(dev
, &dfitem
->dev_tracker
, GFP_ATOMIC
);
124 spin_lock_bh(&deferred_lock
);
125 list_add_tail(&dfitem
->list
, &deferred
);
126 spin_unlock_bh(&deferred_lock
);
127 schedule_work(&deferred_process_work
);
131 static int switchdev_port_attr_notify(enum switchdev_notifier_type nt
,
132 struct net_device
*dev
,
133 const struct switchdev_attr
*attr
,
134 struct netlink_ext_ack
*extack
)
139 struct switchdev_notifier_port_attr_info attr_info
= {
144 rc
= call_switchdev_blocking_notifiers(nt
, dev
,
145 &attr_info
.info
, extack
);
146 err
= notifier_to_errno(rc
);
148 WARN_ON(!attr_info
.handled
);
152 if (!attr_info
.handled
)
158 static int switchdev_port_attr_set_now(struct net_device
*dev
,
159 const struct switchdev_attr
*attr
,
160 struct netlink_ext_ack
*extack
)
162 return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET
, dev
, attr
,
166 static void switchdev_port_attr_set_deferred(struct net_device
*dev
,
169 const struct switchdev_attr
*attr
= data
;
172 err
= switchdev_port_attr_set_now(dev
, attr
, NULL
);
173 if (err
&& err
!= -EOPNOTSUPP
)
174 netdev_err(dev
, "failed (err=%d) to set attribute (id=%d)\n",
177 attr
->complete(dev
, err
, attr
->complete_priv
);
180 static int switchdev_port_attr_set_defer(struct net_device
*dev
,
181 const struct switchdev_attr
*attr
)
183 return switchdev_deferred_enqueue(dev
, attr
, sizeof(*attr
),
184 switchdev_port_attr_set_deferred
);
188 * switchdev_port_attr_set - Set port attribute
191 * @attr: attribute to set
192 * @extack: netlink extended ack, for error message propagation
194 * rtnl_lock must be held and must not be in atomic section,
195 * in case SWITCHDEV_F_DEFER flag is not set.
197 int switchdev_port_attr_set(struct net_device
*dev
,
198 const struct switchdev_attr
*attr
,
199 struct netlink_ext_ack
*extack
)
201 if (attr
->flags
& SWITCHDEV_F_DEFER
)
202 return switchdev_port_attr_set_defer(dev
, attr
);
204 return switchdev_port_attr_set_now(dev
, attr
, extack
);
206 EXPORT_SYMBOL_GPL(switchdev_port_attr_set
);
208 static size_t switchdev_obj_size(const struct switchdev_obj
*obj
)
211 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
212 return sizeof(struct switchdev_obj_port_vlan
);
213 case SWITCHDEV_OBJ_ID_PORT_MDB
:
214 return sizeof(struct switchdev_obj_port_mdb
);
215 case SWITCHDEV_OBJ_ID_HOST_MDB
:
216 return sizeof(struct switchdev_obj_port_mdb
);
223 static int switchdev_port_obj_notify(enum switchdev_notifier_type nt
,
224 struct net_device
*dev
,
225 const struct switchdev_obj
*obj
,
226 struct netlink_ext_ack
*extack
)
231 struct switchdev_notifier_port_obj_info obj_info
= {
236 rc
= call_switchdev_blocking_notifiers(nt
, dev
, &obj_info
.info
, extack
);
237 err
= notifier_to_errno(rc
);
239 WARN_ON(!obj_info
.handled
);
242 if (!obj_info
.handled
)
247 static void switchdev_obj_id_to_helpful_msg(struct net_device
*dev
,
248 enum switchdev_obj_id obj_id
,
251 const char *action
= add
? "add" : "del";
252 const char *reason
= "";
257 case SWITCHDEV_OBJ_ID_UNDEFINED
:
258 obj_str
= "Undefined object";
259 problem
= "Attempted operation is undefined, indicating a possible programming\n"
262 case SWITCHDEV_OBJ_ID_PORT_VLAN
:
263 obj_str
= "VLAN entry";
264 problem
= "Failure in VLAN settings on this port might disrupt network\n"
265 "segmentation or traffic isolation, affecting network partitioning.\n";
267 case SWITCHDEV_OBJ_ID_PORT_MDB
:
268 obj_str
= "Port Multicast Database entry";
269 problem
= "Failure in updating the port's Multicast Database could lead to\n"
270 "multicast forwarding issues.\n";
272 case SWITCHDEV_OBJ_ID_HOST_MDB
:
273 obj_str
= "Host Multicast Database entry";
274 problem
= "Failure in updating the host's Multicast Database may impact multicast\n"
275 "group memberships or traffic delivery, affecting multicast\n"
278 case SWITCHDEV_OBJ_ID_MRP
:
279 obj_str
= "Media Redundancy Protocol configuration for port";
280 problem
= "Failure to set MRP ring ID on this port prevents communication with\n"
281 "the specified redundancy ring, resulting in an inability to engage\n"
282 "in MRP-based network operations.\n";
284 case SWITCHDEV_OBJ_ID_RING_TEST_MRP
:
285 obj_str
= "MRP Test Frame Operations for port";
286 problem
= "Failure to generate/monitor MRP test frames may lead to inability to\n"
287 "assess the ring's operational integrity and fault response, hindering\n"
288 "proactive network management.\n";
290 case SWITCHDEV_OBJ_ID_RING_ROLE_MRP
:
291 obj_str
= "MRP Ring Role Configuration";
292 problem
= "Improper MRP ring role configuration may create conflicts in the ring,\n"
293 "disrupting communication for all participants, or isolate the local\n"
294 "system from the ring, hindering its ability to communicate with other\n"
297 case SWITCHDEV_OBJ_ID_RING_STATE_MRP
:
298 obj_str
= "MRP Ring State Configuration";
299 problem
= "Failure to correctly set the MRP ring state can result in network\n"
300 "loops or leave segments without communication. In a Closed state,\n"
301 "it maintains loop prevention by blocking one MRM port, while an Open\n"
302 "state activates in response to failures, changing port states to\n"
303 "preserve network connectivity.\n";
305 case SWITCHDEV_OBJ_ID_IN_TEST_MRP
:
306 obj_str
= "MRP_InTest Frame Generation Configuration";
307 problem
= "Failure in managing MRP_InTest frame generation can misjudge the\n"
308 "interconnection ring's state, leading to incorrect blocking or\n"
309 "unblocking of the I/C port. This misconfiguration might result\n"
310 "in unintended network loops or isolate critical network segments,\n"
311 "compromising network integrity and reliability.\n";
313 case SWITCHDEV_OBJ_ID_IN_ROLE_MRP
:
314 obj_str
= "Interconnection Ring Role Configuration";
315 problem
= "Failure in incorrect assignment of interconnection ring roles\n"
316 "(MIM/MIC) can impair the formation of the interconnection rings.\n";
318 case SWITCHDEV_OBJ_ID_IN_STATE_MRP
:
319 obj_str
= "Interconnection Ring State Configuration";
320 problem
= "Failure in updating the interconnection ring state can lead in\n"
321 "case of Open state to incorrect blocking or unblocking of the\n"
322 "I/C port, resulting in unintended network loops or isolation\n"
323 "of critical network\n";
326 obj_str
= "Unknown object";
327 problem
= "Indicating a possible programming error.\n";
332 reason
= "Current HW/SW setup lacks sufficient resources.\n";
336 netdev_err(dev
, "Failed to %s %s (object id=%d) with error: %pe (%d).\n%s%s\n",
337 action
, obj_str
, obj_id
, ERR_PTR(err
), err
, problem
, reason
);
340 static void switchdev_port_obj_add_deferred(struct net_device
*dev
,
343 const struct switchdev_obj
*obj
= data
;
347 err
= switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD
,
349 if (err
&& err
!= -EOPNOTSUPP
)
350 switchdev_obj_id_to_helpful_msg(dev
, obj
->id
, err
, true);
352 obj
->complete(dev
, err
, obj
->complete_priv
);
355 static int switchdev_port_obj_add_defer(struct net_device
*dev
,
356 const struct switchdev_obj
*obj
)
358 return switchdev_deferred_enqueue(dev
, obj
, switchdev_obj_size(obj
),
359 switchdev_port_obj_add_deferred
);
363 * switchdev_port_obj_add - Add port object
366 * @obj: object to add
367 * @extack: netlink extended ack
369 * rtnl_lock must be held and must not be in atomic section,
370 * in case SWITCHDEV_F_DEFER flag is not set.
372 int switchdev_port_obj_add(struct net_device
*dev
,
373 const struct switchdev_obj
*obj
,
374 struct netlink_ext_ack
*extack
)
376 if (obj
->flags
& SWITCHDEV_F_DEFER
)
377 return switchdev_port_obj_add_defer(dev
, obj
);
379 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD
,
382 EXPORT_SYMBOL_GPL(switchdev_port_obj_add
);
384 static int switchdev_port_obj_del_now(struct net_device
*dev
,
385 const struct switchdev_obj
*obj
)
387 return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL
,
391 static void switchdev_port_obj_del_deferred(struct net_device
*dev
,
394 const struct switchdev_obj
*obj
= data
;
397 err
= switchdev_port_obj_del_now(dev
, obj
);
398 if (err
&& err
!= -EOPNOTSUPP
)
399 switchdev_obj_id_to_helpful_msg(dev
, obj
->id
, err
, false);
401 obj
->complete(dev
, err
, obj
->complete_priv
);
404 static int switchdev_port_obj_del_defer(struct net_device
*dev
,
405 const struct switchdev_obj
*obj
)
407 return switchdev_deferred_enqueue(dev
, obj
, switchdev_obj_size(obj
),
408 switchdev_port_obj_del_deferred
);
412 * switchdev_port_obj_del - Delete port object
415 * @obj: object to delete
417 * rtnl_lock must be held and must not be in atomic section,
418 * in case SWITCHDEV_F_DEFER flag is not set.
420 int switchdev_port_obj_del(struct net_device
*dev
,
421 const struct switchdev_obj
*obj
)
423 if (obj
->flags
& SWITCHDEV_F_DEFER
)
424 return switchdev_port_obj_del_defer(dev
, obj
);
426 return switchdev_port_obj_del_now(dev
, obj
);
428 EXPORT_SYMBOL_GPL(switchdev_port_obj_del
);
431 * switchdev_port_obj_act_is_deferred - Is object action pending?
434 * @nt: type of action; add or delete
435 * @obj: object to test
437 * Returns true if a deferred item is pending, which is
438 * equivalent to the action @nt on an object @obj.
440 * rtnl_lock must be held.
442 bool switchdev_port_obj_act_is_deferred(struct net_device
*dev
,
443 enum switchdev_notifier_type nt
,
444 const struct switchdev_obj
*obj
)
446 struct switchdev_deferred_item
*dfitem
;
451 spin_lock_bh(&deferred_lock
);
453 list_for_each_entry(dfitem
, &deferred
, list
) {
454 if (dfitem
->dev
!= dev
)
457 if ((dfitem
->func
== switchdev_port_obj_add_deferred
&&
458 nt
== SWITCHDEV_PORT_OBJ_ADD
) ||
459 (dfitem
->func
== switchdev_port_obj_del_deferred
&&
460 nt
== SWITCHDEV_PORT_OBJ_DEL
)) {
461 if (switchdev_obj_eq((const void *)dfitem
->data
, obj
)) {
468 spin_unlock_bh(&deferred_lock
);
472 EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred
);
474 static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain
);
475 static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain
);
478 * register_switchdev_notifier - Register notifier
479 * @nb: notifier_block
481 * Register switch device notifier.
483 int register_switchdev_notifier(struct notifier_block
*nb
)
485 return atomic_notifier_chain_register(&switchdev_notif_chain
, nb
);
487 EXPORT_SYMBOL_GPL(register_switchdev_notifier
);
490 * unregister_switchdev_notifier - Unregister notifier
491 * @nb: notifier_block
493 * Unregister switch device notifier.
495 int unregister_switchdev_notifier(struct notifier_block
*nb
)
497 return atomic_notifier_chain_unregister(&switchdev_notif_chain
, nb
);
499 EXPORT_SYMBOL_GPL(unregister_switchdev_notifier
);
502 * call_switchdev_notifiers - Call notifiers
503 * @val: value passed unmodified to notifier function
505 * @info: notifier information data
506 * @extack: netlink extended ack
507 * Call all network notifier blocks.
509 int call_switchdev_notifiers(unsigned long val
, struct net_device
*dev
,
510 struct switchdev_notifier_info
*info
,
511 struct netlink_ext_ack
*extack
)
514 info
->extack
= extack
;
515 return atomic_notifier_call_chain(&switchdev_notif_chain
, val
, info
);
517 EXPORT_SYMBOL_GPL(call_switchdev_notifiers
);
519 int register_switchdev_blocking_notifier(struct notifier_block
*nb
)
521 struct blocking_notifier_head
*chain
= &switchdev_blocking_notif_chain
;
523 return blocking_notifier_chain_register(chain
, nb
);
525 EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier
);
527 int unregister_switchdev_blocking_notifier(struct notifier_block
*nb
)
529 struct blocking_notifier_head
*chain
= &switchdev_blocking_notif_chain
;
531 return blocking_notifier_chain_unregister(chain
, nb
);
533 EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier
);
535 int call_switchdev_blocking_notifiers(unsigned long val
, struct net_device
*dev
,
536 struct switchdev_notifier_info
*info
,
537 struct netlink_ext_ack
*extack
)
540 info
->extack
= extack
;
541 return blocking_notifier_call_chain(&switchdev_blocking_notif_chain
,
544 EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers
);
546 struct switchdev_nested_priv
{
547 bool (*check_cb
)(const struct net_device
*dev
);
548 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
549 const struct net_device
*foreign_dev
);
550 const struct net_device
*dev
;
551 struct net_device
*lower_dev
;
554 static int switchdev_lower_dev_walk(struct net_device
*lower_dev
,
555 struct netdev_nested_priv
*priv
)
557 struct switchdev_nested_priv
*switchdev_priv
= priv
->data
;
558 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
559 const struct net_device
*foreign_dev
);
560 bool (*check_cb
)(const struct net_device
*dev
);
561 const struct net_device
*dev
;
563 check_cb
= switchdev_priv
->check_cb
;
564 foreign_dev_check_cb
= switchdev_priv
->foreign_dev_check_cb
;
565 dev
= switchdev_priv
->dev
;
567 if (check_cb(lower_dev
) && !foreign_dev_check_cb(lower_dev
, dev
)) {
568 switchdev_priv
->lower_dev
= lower_dev
;
575 static struct net_device
*
576 switchdev_lower_dev_find_rcu(struct net_device
*dev
,
577 bool (*check_cb
)(const struct net_device
*dev
),
578 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
579 const struct net_device
*foreign_dev
))
581 struct switchdev_nested_priv switchdev_priv
= {
582 .check_cb
= check_cb
,
583 .foreign_dev_check_cb
= foreign_dev_check_cb
,
587 struct netdev_nested_priv priv
= {
588 .data
= &switchdev_priv
,
591 netdev_walk_all_lower_dev_rcu(dev
, switchdev_lower_dev_walk
, &priv
);
593 return switchdev_priv
.lower_dev
;
596 static struct net_device
*
597 switchdev_lower_dev_find(struct net_device
*dev
,
598 bool (*check_cb
)(const struct net_device
*dev
),
599 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
600 const struct net_device
*foreign_dev
))
602 struct switchdev_nested_priv switchdev_priv
= {
603 .check_cb
= check_cb
,
604 .foreign_dev_check_cb
= foreign_dev_check_cb
,
608 struct netdev_nested_priv priv
= {
609 .data
= &switchdev_priv
,
612 netdev_walk_all_lower_dev(dev
, switchdev_lower_dev_walk
, &priv
);
614 return switchdev_priv
.lower_dev
;
617 static int __switchdev_handle_fdb_event_to_device(struct net_device
*dev
,
618 struct net_device
*orig_dev
, unsigned long event
,
619 const struct switchdev_notifier_fdb_info
*fdb_info
,
620 bool (*check_cb
)(const struct net_device
*dev
),
621 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
622 const struct net_device
*foreign_dev
),
623 int (*mod_cb
)(struct net_device
*dev
, struct net_device
*orig_dev
,
624 unsigned long event
, const void *ctx
,
625 const struct switchdev_notifier_fdb_info
*fdb_info
))
627 const struct switchdev_notifier_info
*info
= &fdb_info
->info
;
628 struct net_device
*br
, *lower_dev
, *switchdev
;
629 struct list_head
*iter
;
630 int err
= -EOPNOTSUPP
;
633 return mod_cb(dev
, orig_dev
, event
, info
->ctx
, fdb_info
);
635 /* Recurse through lower interfaces in case the FDB entry is pointing
636 * towards a bridge or a LAG device.
638 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
639 /* Do not propagate FDB entries across bridges */
640 if (netif_is_bridge_master(lower_dev
))
643 /* Bridge ports might be either us, or LAG interfaces
646 if (!check_cb(lower_dev
) &&
647 !switchdev_lower_dev_find_rcu(lower_dev
, check_cb
,
648 foreign_dev_check_cb
))
651 err
= __switchdev_handle_fdb_event_to_device(lower_dev
, orig_dev
,
652 event
, fdb_info
, check_cb
,
653 foreign_dev_check_cb
,
655 if (err
&& err
!= -EOPNOTSUPP
)
659 /* Event is neither on a bridge nor a LAG. Check whether it is on an
660 * interface that is in a bridge with us.
662 br
= netdev_master_upper_dev_get_rcu(dev
);
663 if (!br
|| !netif_is_bridge_master(br
))
666 switchdev
= switchdev_lower_dev_find_rcu(br
, check_cb
, foreign_dev_check_cb
);
670 if (!foreign_dev_check_cb(switchdev
, dev
))
673 return __switchdev_handle_fdb_event_to_device(br
, orig_dev
, event
, fdb_info
,
674 check_cb
, foreign_dev_check_cb
,
678 int switchdev_handle_fdb_event_to_device(struct net_device
*dev
, unsigned long event
,
679 const struct switchdev_notifier_fdb_info
*fdb_info
,
680 bool (*check_cb
)(const struct net_device
*dev
),
681 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
682 const struct net_device
*foreign_dev
),
683 int (*mod_cb
)(struct net_device
*dev
, struct net_device
*orig_dev
,
684 unsigned long event
, const void *ctx
,
685 const struct switchdev_notifier_fdb_info
*fdb_info
))
689 err
= __switchdev_handle_fdb_event_to_device(dev
, dev
, event
, fdb_info
,
690 check_cb
, foreign_dev_check_cb
,
692 if (err
== -EOPNOTSUPP
)
697 EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device
);
699 static int __switchdev_handle_port_obj_add(struct net_device
*dev
,
700 struct switchdev_notifier_port_obj_info
*port_obj_info
,
701 bool (*check_cb
)(const struct net_device
*dev
),
702 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
703 const struct net_device
*foreign_dev
),
704 int (*add_cb
)(struct net_device
*dev
, const void *ctx
,
705 const struct switchdev_obj
*obj
,
706 struct netlink_ext_ack
*extack
))
708 struct switchdev_notifier_info
*info
= &port_obj_info
->info
;
709 struct net_device
*br
, *lower_dev
, *switchdev
;
710 struct netlink_ext_ack
*extack
;
711 struct list_head
*iter
;
712 int err
= -EOPNOTSUPP
;
714 extack
= switchdev_notifier_info_to_extack(info
);
717 err
= add_cb(dev
, info
->ctx
, port_obj_info
->obj
, extack
);
718 if (err
!= -EOPNOTSUPP
)
719 port_obj_info
->handled
= true;
723 /* Switch ports might be stacked under e.g. a LAG. Ignore the
724 * unsupported devices, another driver might be able to handle them. But
725 * propagate to the callers any hard errors.
727 * If the driver does its own bookkeeping of stacked ports, it's not
728 * necessary to go through this helper.
730 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
731 if (netif_is_bridge_master(lower_dev
))
734 /* When searching for switchdev interfaces that are neighbors
735 * of foreign ones, and @dev is a bridge, do not recurse on the
736 * foreign interface again, it was already visited.
738 if (foreign_dev_check_cb
&& !check_cb(lower_dev
) &&
739 !switchdev_lower_dev_find(lower_dev
, check_cb
, foreign_dev_check_cb
))
742 err
= __switchdev_handle_port_obj_add(lower_dev
, port_obj_info
,
743 check_cb
, foreign_dev_check_cb
,
745 if (err
&& err
!= -EOPNOTSUPP
)
749 /* Event is neither on a bridge nor a LAG. Check whether it is on an
750 * interface that is in a bridge with us.
752 if (!foreign_dev_check_cb
)
755 br
= netdev_master_upper_dev_get(dev
);
756 if (!br
|| !netif_is_bridge_master(br
))
759 switchdev
= switchdev_lower_dev_find(br
, check_cb
, foreign_dev_check_cb
);
763 if (!foreign_dev_check_cb(switchdev
, dev
))
766 return __switchdev_handle_port_obj_add(br
, port_obj_info
, check_cb
,
767 foreign_dev_check_cb
, add_cb
);
770 /* Pass through a port object addition, if @dev passes @check_cb, or replicate
771 * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
774 int switchdev_handle_port_obj_add(struct net_device
*dev
,
775 struct switchdev_notifier_port_obj_info
*port_obj_info
,
776 bool (*check_cb
)(const struct net_device
*dev
),
777 int (*add_cb
)(struct net_device
*dev
, const void *ctx
,
778 const struct switchdev_obj
*obj
,
779 struct netlink_ext_ack
*extack
))
783 err
= __switchdev_handle_port_obj_add(dev
, port_obj_info
, check_cb
,
785 if (err
== -EOPNOTSUPP
)
789 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add
);
791 /* Same as switchdev_handle_port_obj_add(), except if object is notified on a
792 * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
793 * that pass @check_cb and are in the same bridge as @dev.
795 int switchdev_handle_port_obj_add_foreign(struct net_device
*dev
,
796 struct switchdev_notifier_port_obj_info
*port_obj_info
,
797 bool (*check_cb
)(const struct net_device
*dev
),
798 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
799 const struct net_device
*foreign_dev
),
800 int (*add_cb
)(struct net_device
*dev
, const void *ctx
,
801 const struct switchdev_obj
*obj
,
802 struct netlink_ext_ack
*extack
))
806 err
= __switchdev_handle_port_obj_add(dev
, port_obj_info
, check_cb
,
807 foreign_dev_check_cb
, add_cb
);
808 if (err
== -EOPNOTSUPP
)
812 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign
);
814 static int __switchdev_handle_port_obj_del(struct net_device
*dev
,
815 struct switchdev_notifier_port_obj_info
*port_obj_info
,
816 bool (*check_cb
)(const struct net_device
*dev
),
817 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
818 const struct net_device
*foreign_dev
),
819 int (*del_cb
)(struct net_device
*dev
, const void *ctx
,
820 const struct switchdev_obj
*obj
))
822 struct switchdev_notifier_info
*info
= &port_obj_info
->info
;
823 struct net_device
*br
, *lower_dev
, *switchdev
;
824 struct list_head
*iter
;
825 int err
= -EOPNOTSUPP
;
828 err
= del_cb(dev
, info
->ctx
, port_obj_info
->obj
);
829 if (err
!= -EOPNOTSUPP
)
830 port_obj_info
->handled
= true;
834 /* Switch ports might be stacked under e.g. a LAG. Ignore the
835 * unsupported devices, another driver might be able to handle them. But
836 * propagate to the callers any hard errors.
838 * If the driver does its own bookkeeping of stacked ports, it's not
839 * necessary to go through this helper.
841 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
842 if (netif_is_bridge_master(lower_dev
))
845 /* When searching for switchdev interfaces that are neighbors
846 * of foreign ones, and @dev is a bridge, do not recurse on the
847 * foreign interface again, it was already visited.
849 if (foreign_dev_check_cb
&& !check_cb(lower_dev
) &&
850 !switchdev_lower_dev_find(lower_dev
, check_cb
, foreign_dev_check_cb
))
853 err
= __switchdev_handle_port_obj_del(lower_dev
, port_obj_info
,
854 check_cb
, foreign_dev_check_cb
,
856 if (err
&& err
!= -EOPNOTSUPP
)
860 /* Event is neither on a bridge nor a LAG. Check whether it is on an
861 * interface that is in a bridge with us.
863 if (!foreign_dev_check_cb
)
866 br
= netdev_master_upper_dev_get(dev
);
867 if (!br
|| !netif_is_bridge_master(br
))
870 switchdev
= switchdev_lower_dev_find(br
, check_cb
, foreign_dev_check_cb
);
874 if (!foreign_dev_check_cb(switchdev
, dev
))
877 return __switchdev_handle_port_obj_del(br
, port_obj_info
, check_cb
,
878 foreign_dev_check_cb
, del_cb
);
881 /* Pass through a port object deletion, if @dev passes @check_cb, or replicate
882 * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
885 int switchdev_handle_port_obj_del(struct net_device
*dev
,
886 struct switchdev_notifier_port_obj_info
*port_obj_info
,
887 bool (*check_cb
)(const struct net_device
*dev
),
888 int (*del_cb
)(struct net_device
*dev
, const void *ctx
,
889 const struct switchdev_obj
*obj
))
893 err
= __switchdev_handle_port_obj_del(dev
, port_obj_info
, check_cb
,
895 if (err
== -EOPNOTSUPP
)
899 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del
);
901 /* Same as switchdev_handle_port_obj_del(), except if object is notified on a
902 * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
903 * that pass @check_cb and are in the same bridge as @dev.
905 int switchdev_handle_port_obj_del_foreign(struct net_device
*dev
,
906 struct switchdev_notifier_port_obj_info
*port_obj_info
,
907 bool (*check_cb
)(const struct net_device
*dev
),
908 bool (*foreign_dev_check_cb
)(const struct net_device
*dev
,
909 const struct net_device
*foreign_dev
),
910 int (*del_cb
)(struct net_device
*dev
, const void *ctx
,
911 const struct switchdev_obj
*obj
))
915 err
= __switchdev_handle_port_obj_del(dev
, port_obj_info
, check_cb
,
916 foreign_dev_check_cb
, del_cb
);
917 if (err
== -EOPNOTSUPP
)
921 EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign
);
923 static int __switchdev_handle_port_attr_set(struct net_device
*dev
,
924 struct switchdev_notifier_port_attr_info
*port_attr_info
,
925 bool (*check_cb
)(const struct net_device
*dev
),
926 int (*set_cb
)(struct net_device
*dev
, const void *ctx
,
927 const struct switchdev_attr
*attr
,
928 struct netlink_ext_ack
*extack
))
930 struct switchdev_notifier_info
*info
= &port_attr_info
->info
;
931 struct netlink_ext_ack
*extack
;
932 struct net_device
*lower_dev
;
933 struct list_head
*iter
;
934 int err
= -EOPNOTSUPP
;
936 extack
= switchdev_notifier_info_to_extack(info
);
939 err
= set_cb(dev
, info
->ctx
, port_attr_info
->attr
, extack
);
940 if (err
!= -EOPNOTSUPP
)
941 port_attr_info
->handled
= true;
945 /* Switch ports might be stacked under e.g. a LAG. Ignore the
946 * unsupported devices, another driver might be able to handle them. But
947 * propagate to the callers any hard errors.
949 * If the driver does its own bookkeeping of stacked ports, it's not
950 * necessary to go through this helper.
952 netdev_for_each_lower_dev(dev
, lower_dev
, iter
) {
953 if (netif_is_bridge_master(lower_dev
))
956 err
= __switchdev_handle_port_attr_set(lower_dev
, port_attr_info
,
958 if (err
&& err
!= -EOPNOTSUPP
)
965 int switchdev_handle_port_attr_set(struct net_device
*dev
,
966 struct switchdev_notifier_port_attr_info
*port_attr_info
,
967 bool (*check_cb
)(const struct net_device
*dev
),
968 int (*set_cb
)(struct net_device
*dev
, const void *ctx
,
969 const struct switchdev_attr
*attr
,
970 struct netlink_ext_ack
*extack
))
974 err
= __switchdev_handle_port_attr_set(dev
, port_attr_info
, check_cb
,
976 if (err
== -EOPNOTSUPP
)
980 EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set
);
982 int switchdev_bridge_port_offload(struct net_device
*brport_dev
,
983 struct net_device
*dev
, const void *ctx
,
984 struct notifier_block
*atomic_nb
,
985 struct notifier_block
*blocking_nb
,
987 struct netlink_ext_ack
*extack
)
989 struct switchdev_notifier_brport_info brport_info
= {
993 .atomic_nb
= atomic_nb
,
994 .blocking_nb
= blocking_nb
,
995 .tx_fwd_offload
= tx_fwd_offload
,
1002 err
= call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED
,
1003 brport_dev
, &brport_info
.info
,
1005 return notifier_to_errno(err
);
1007 EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload
);
1009 void switchdev_bridge_port_unoffload(struct net_device
*brport_dev
,
1011 struct notifier_block
*atomic_nb
,
1012 struct notifier_block
*blocking_nb
)
1014 struct switchdev_notifier_brport_info brport_info
= {
1017 .atomic_nb
= atomic_nb
,
1018 .blocking_nb
= blocking_nb
,
1024 call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED
,
1025 brport_dev
, &brport_info
.info
,
1028 EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload
);
1030 int switchdev_bridge_port_replay(struct net_device
*brport_dev
,
1031 struct net_device
*dev
, const void *ctx
,
1032 struct notifier_block
*atomic_nb
,
1033 struct notifier_block
*blocking_nb
,
1034 struct netlink_ext_ack
*extack
)
1036 struct switchdev_notifier_brport_info brport_info
= {
1040 .atomic_nb
= atomic_nb
,
1041 .blocking_nb
= blocking_nb
,
1048 err
= call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_REPLAY
,
1049 brport_dev
, &brport_info
.info
,
1051 return notifier_to_errno(err
);
1053 EXPORT_SYMBOL_GPL(switchdev_bridge_port_replay
);