1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * drivers/net/team/team.c - Network team device driver
4 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/rcupdate.h>
13 #include <linux/errno.h>
14 #include <linux/ctype.h>
15 #include <linux/notifier.h>
16 #include <linux/netdevice.h>
17 #include <linux/netpoll.h>
18 #include <linux/if_vlan.h>
19 #include <linux/if_arp.h>
20 #include <linux/socket.h>
21 #include <linux/etherdevice.h>
22 #include <linux/rtnetlink.h>
23 #include <net/rtnetlink.h>
24 #include <net/genetlink.h>
25 #include <net/netlink.h>
26 #include <net/sch_generic.h>
27 #include <generated/utsrelease.h>
28 #include <linux/if_team.h>
30 #define DRV_NAME "team"
37 static struct team_port
*team_port_get_rtnl(const struct net_device
*dev
)
39 struct team_port
*port
= rtnl_dereference(dev
->rx_handler_data
);
41 return netif_is_team_port(dev
) ? port
: NULL
;
45 * Since the ability to change device address for open port device is tested in
46 * team_port_add, this function can be called without control of return value
48 static int __set_port_dev_addr(struct net_device
*port_dev
,
49 const unsigned char *dev_addr
)
51 struct sockaddr_storage addr
;
53 memcpy(addr
.__data
, dev_addr
, port_dev
->addr_len
);
54 addr
.ss_family
= port_dev
->type
;
55 return dev_set_mac_address(port_dev
, (struct sockaddr
*)&addr
, NULL
);
58 static int team_port_set_orig_dev_addr(struct team_port
*port
)
60 return __set_port_dev_addr(port
->dev
, port
->orig
.dev_addr
);
63 static int team_port_set_team_dev_addr(struct team
*team
,
64 struct team_port
*port
)
66 return __set_port_dev_addr(port
->dev
, team
->dev
->dev_addr
);
69 int team_modeop_port_enter(struct team
*team
, struct team_port
*port
)
71 return team_port_set_team_dev_addr(team
, port
);
73 EXPORT_SYMBOL(team_modeop_port_enter
);
75 void team_modeop_port_change_dev_addr(struct team
*team
,
76 struct team_port
*port
)
78 team_port_set_team_dev_addr(team
, port
);
80 EXPORT_SYMBOL(team_modeop_port_change_dev_addr
);
82 static void team_lower_state_changed(struct team_port
*port
)
84 struct netdev_lag_lower_state_info info
;
86 info
.link_up
= port
->linkup
;
87 info
.tx_enabled
= team_port_enabled(port
);
88 netdev_lower_state_changed(port
->dev
, &info
);
91 static void team_refresh_port_linkup(struct team_port
*port
)
93 bool new_linkup
= port
->user
.linkup_enabled
? port
->user
.linkup
:
96 if (port
->linkup
!= new_linkup
) {
97 port
->linkup
= new_linkup
;
98 team_lower_state_changed(port
);
107 struct team_option_inst
{ /* One for each option instance */
108 struct list_head list
;
109 struct list_head tmp_list
;
110 struct team_option
*option
;
111 struct team_option_inst_info info
;
116 static struct team_option
*__team_find_option(struct team
*team
,
117 const char *opt_name
)
119 struct team_option
*option
;
121 list_for_each_entry(option
, &team
->option_list
, list
) {
122 if (strcmp(option
->name
, opt_name
) == 0)
128 static void __team_option_inst_del(struct team_option_inst
*opt_inst
)
130 list_del(&opt_inst
->list
);
134 static void __team_option_inst_del_option(struct team
*team
,
135 struct team_option
*option
)
137 struct team_option_inst
*opt_inst
, *tmp
;
139 list_for_each_entry_safe(opt_inst
, tmp
, &team
->option_inst_list
, list
) {
140 if (opt_inst
->option
== option
)
141 __team_option_inst_del(opt_inst
);
145 static int __team_option_inst_add(struct team
*team
, struct team_option
*option
,
146 struct team_port
*port
)
148 struct team_option_inst
*opt_inst
;
149 unsigned int array_size
;
153 array_size
= option
->array_size
;
155 array_size
= 1; /* No array but still need one instance */
157 for (i
= 0; i
< array_size
; i
++) {
158 opt_inst
= kmalloc(sizeof(*opt_inst
), GFP_KERNEL
);
161 opt_inst
->option
= option
;
162 opt_inst
->info
.port
= port
;
163 opt_inst
->info
.array_index
= i
;
164 opt_inst
->changed
= true;
165 opt_inst
->removed
= false;
166 list_add_tail(&opt_inst
->list
, &team
->option_inst_list
);
168 err
= option
->init(team
, &opt_inst
->info
);
177 static int __team_option_inst_add_option(struct team
*team
,
178 struct team_option
*option
)
182 if (!option
->per_port
) {
183 err
= __team_option_inst_add(team
, option
, NULL
);
185 goto inst_del_option
;
190 __team_option_inst_del_option(team
, option
);
194 static void __team_option_inst_mark_removed_option(struct team
*team
,
195 struct team_option
*option
)
197 struct team_option_inst
*opt_inst
;
199 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
200 if (opt_inst
->option
== option
) {
201 opt_inst
->changed
= true;
202 opt_inst
->removed
= true;
207 static void __team_option_inst_del_port(struct team
*team
,
208 struct team_port
*port
)
210 struct team_option_inst
*opt_inst
, *tmp
;
212 list_for_each_entry_safe(opt_inst
, tmp
, &team
->option_inst_list
, list
) {
213 if (opt_inst
->option
->per_port
&&
214 opt_inst
->info
.port
== port
)
215 __team_option_inst_del(opt_inst
);
219 static int __team_option_inst_add_port(struct team
*team
,
220 struct team_port
*port
)
222 struct team_option
*option
;
225 list_for_each_entry(option
, &team
->option_list
, list
) {
226 if (!option
->per_port
)
228 err
= __team_option_inst_add(team
, option
, port
);
235 __team_option_inst_del_port(team
, port
);
239 static void __team_option_inst_mark_removed_port(struct team
*team
,
240 struct team_port
*port
)
242 struct team_option_inst
*opt_inst
;
244 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
245 if (opt_inst
->info
.port
== port
) {
246 opt_inst
->changed
= true;
247 opt_inst
->removed
= true;
252 static int __team_options_register(struct team
*team
,
253 const struct team_option
*option
,
257 struct team_option
**dst_opts
;
260 dst_opts
= kcalloc(option_count
, sizeof(struct team_option
*),
264 for (i
= 0; i
< option_count
; i
++, option
++) {
265 if (__team_find_option(team
, option
->name
)) {
269 dst_opts
[i
] = kmemdup(option
, sizeof(*option
), GFP_KERNEL
);
276 for (i
= 0; i
< option_count
; i
++) {
277 err
= __team_option_inst_add_option(team
, dst_opts
[i
]);
280 list_add_tail(&dst_opts
[i
]->list
, &team
->option_list
);
287 for (i
--; i
>= 0; i
--)
288 __team_option_inst_del_option(team
, dst_opts
[i
]);
290 i
= option_count
- 1;
292 for (i
--; i
>= 0; i
--)
299 static void __team_options_mark_removed(struct team
*team
,
300 const struct team_option
*option
,
305 for (i
= 0; i
< option_count
; i
++, option
++) {
306 struct team_option
*del_opt
;
308 del_opt
= __team_find_option(team
, option
->name
);
310 __team_option_inst_mark_removed_option(team
, del_opt
);
314 static void __team_options_unregister(struct team
*team
,
315 const struct team_option
*option
,
320 for (i
= 0; i
< option_count
; i
++, option
++) {
321 struct team_option
*del_opt
;
323 del_opt
= __team_find_option(team
, option
->name
);
325 __team_option_inst_del_option(team
, del_opt
);
326 list_del(&del_opt
->list
);
332 static void __team_options_change_check(struct team
*team
);
334 int team_options_register(struct team
*team
,
335 const struct team_option
*option
,
340 err
= __team_options_register(team
, option
, option_count
);
343 __team_options_change_check(team
);
346 EXPORT_SYMBOL(team_options_register
);
348 void team_options_unregister(struct team
*team
,
349 const struct team_option
*option
,
352 __team_options_mark_removed(team
, option
, option_count
);
353 __team_options_change_check(team
);
354 __team_options_unregister(team
, option
, option_count
);
356 EXPORT_SYMBOL(team_options_unregister
);
358 static int team_option_get(struct team
*team
,
359 struct team_option_inst
*opt_inst
,
360 struct team_gsetter_ctx
*ctx
)
362 if (!opt_inst
->option
->getter
)
364 return opt_inst
->option
->getter(team
, ctx
);
367 static int team_option_set(struct team
*team
,
368 struct team_option_inst
*opt_inst
,
369 struct team_gsetter_ctx
*ctx
)
371 if (!opt_inst
->option
->setter
)
373 return opt_inst
->option
->setter(team
, ctx
);
376 void team_option_inst_set_change(struct team_option_inst_info
*opt_inst_info
)
378 struct team_option_inst
*opt_inst
;
380 opt_inst
= container_of(opt_inst_info
, struct team_option_inst
, info
);
381 opt_inst
->changed
= true;
383 EXPORT_SYMBOL(team_option_inst_set_change
);
385 void team_options_change_check(struct team
*team
)
387 __team_options_change_check(team
);
389 EXPORT_SYMBOL(team_options_change_check
);
396 static LIST_HEAD(mode_list
);
397 static DEFINE_SPINLOCK(mode_list_lock
);
399 struct team_mode_item
{
400 struct list_head list
;
401 const struct team_mode
*mode
;
404 static struct team_mode_item
*__find_mode(const char *kind
)
406 struct team_mode_item
*mitem
;
408 list_for_each_entry(mitem
, &mode_list
, list
) {
409 if (strcmp(mitem
->mode
->kind
, kind
) == 0)
415 static bool is_good_mode_name(const char *name
)
417 while (*name
!= '\0') {
418 if (!isalpha(*name
) && !isdigit(*name
) && *name
!= '_')
425 int team_mode_register(const struct team_mode
*mode
)
428 struct team_mode_item
*mitem
;
430 if (!is_good_mode_name(mode
->kind
) ||
431 mode
->priv_size
> TEAM_MODE_PRIV_SIZE
)
434 mitem
= kmalloc(sizeof(*mitem
), GFP_KERNEL
);
438 spin_lock(&mode_list_lock
);
439 if (__find_mode(mode
->kind
)) {
445 list_add_tail(&mitem
->list
, &mode_list
);
447 spin_unlock(&mode_list_lock
);
450 EXPORT_SYMBOL(team_mode_register
);
452 void team_mode_unregister(const struct team_mode
*mode
)
454 struct team_mode_item
*mitem
;
456 spin_lock(&mode_list_lock
);
457 mitem
= __find_mode(mode
->kind
);
459 list_del_init(&mitem
->list
);
462 spin_unlock(&mode_list_lock
);
464 EXPORT_SYMBOL(team_mode_unregister
);
466 static const struct team_mode
*team_mode_get(const char *kind
)
468 struct team_mode_item
*mitem
;
469 const struct team_mode
*mode
= NULL
;
471 if (!try_module_get(THIS_MODULE
))
474 spin_lock(&mode_list_lock
);
475 mitem
= __find_mode(kind
);
477 spin_unlock(&mode_list_lock
);
478 request_module("team-mode-%s", kind
);
479 spin_lock(&mode_list_lock
);
480 mitem
= __find_mode(kind
);
484 if (!try_module_get(mode
->owner
))
488 spin_unlock(&mode_list_lock
);
489 module_put(THIS_MODULE
);
493 static void team_mode_put(const struct team_mode
*mode
)
495 module_put(mode
->owner
);
498 static bool team_dummy_transmit(struct team
*team
, struct sk_buff
*skb
)
500 dev_kfree_skb_any(skb
);
504 static rx_handler_result_t
team_dummy_receive(struct team
*team
,
505 struct team_port
*port
,
508 return RX_HANDLER_ANOTHER
;
511 static const struct team_mode __team_no_mode
= {
515 static bool team_is_mode_set(struct team
*team
)
517 return team
->mode
!= &__team_no_mode
;
520 static void team_set_no_mode(struct team
*team
)
522 team
->user_carrier_enabled
= false;
523 team
->mode
= &__team_no_mode
;
526 static void team_adjust_ops(struct team
*team
)
529 * To avoid checks in rx/tx skb paths, ensure here that non-null and
530 * correct ops are always set.
533 if (!team
->en_port_count
|| !team_is_mode_set(team
) ||
534 !team
->mode
->ops
->transmit
)
535 team
->ops
.transmit
= team_dummy_transmit
;
537 team
->ops
.transmit
= team
->mode
->ops
->transmit
;
539 if (!team
->en_port_count
|| !team_is_mode_set(team
) ||
540 !team
->mode
->ops
->receive
)
541 team
->ops
.receive
= team_dummy_receive
;
543 team
->ops
.receive
= team
->mode
->ops
->receive
;
547 * We can benefit from the fact that it's ensured no port is present
548 * at the time of mode change. Therefore no packets are in fly so there's no
549 * need to set mode operations in any special way.
551 static int __team_change_mode(struct team
*team
,
552 const struct team_mode
*new_mode
)
554 /* Check if mode was previously set and do cleanup if so */
555 if (team_is_mode_set(team
)) {
556 void (*exit_op
)(struct team
*team
) = team
->ops
.exit
;
558 /* Clear ops area so no callback is called any longer */
559 memset(&team
->ops
, 0, sizeof(struct team_mode_ops
));
560 team_adjust_ops(team
);
564 team_mode_put(team
->mode
);
565 team_set_no_mode(team
);
566 /* zero private data area */
567 memset(&team
->mode_priv
, 0,
568 sizeof(struct team
) - offsetof(struct team
, mode_priv
));
574 if (new_mode
->ops
->init
) {
577 err
= new_mode
->ops
->init(team
);
582 team
->mode
= new_mode
;
583 memcpy(&team
->ops
, new_mode
->ops
, sizeof(struct team_mode_ops
));
584 team_adjust_ops(team
);
589 static int team_change_mode(struct team
*team
, const char *kind
)
591 const struct team_mode
*new_mode
;
592 struct net_device
*dev
= team
->dev
;
595 if (!list_empty(&team
->port_list
)) {
596 netdev_err(dev
, "No ports can be present during mode change\n");
600 if (team_is_mode_set(team
) && strcmp(team
->mode
->kind
, kind
) == 0) {
601 netdev_err(dev
, "Unable to change to the same mode the team is in\n");
605 new_mode
= team_mode_get(kind
);
607 netdev_err(dev
, "Mode \"%s\" not found\n", kind
);
611 err
= __team_change_mode(team
, new_mode
);
613 netdev_err(dev
, "Failed to change to mode \"%s\"\n", kind
);
614 team_mode_put(new_mode
);
618 netdev_info(dev
, "Mode changed to \"%s\"\n", kind
);
623 /*********************
625 *********************/
627 static void team_notify_peers_work(struct work_struct
*work
)
632 team
= container_of(work
, struct team
, notify_peers
.dw
.work
);
634 if (!rtnl_trylock()) {
635 schedule_delayed_work(&team
->notify_peers
.dw
, 0);
638 val
= atomic_dec_if_positive(&team
->notify_peers
.count_pending
);
643 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, team
->dev
);
646 schedule_delayed_work(&team
->notify_peers
.dw
,
647 msecs_to_jiffies(team
->notify_peers
.interval
));
650 static void team_notify_peers(struct team
*team
)
652 if (!team
->notify_peers
.count
|| !netif_running(team
->dev
))
654 atomic_add(team
->notify_peers
.count
, &team
->notify_peers
.count_pending
);
655 schedule_delayed_work(&team
->notify_peers
.dw
, 0);
658 static void team_notify_peers_init(struct team
*team
)
660 INIT_DELAYED_WORK(&team
->notify_peers
.dw
, team_notify_peers_work
);
663 static void team_notify_peers_fini(struct team
*team
)
665 cancel_delayed_work_sync(&team
->notify_peers
.dw
);
669 /*******************************
670 * Send multicast group rejoins
671 *******************************/
673 static void team_mcast_rejoin_work(struct work_struct
*work
)
678 team
= container_of(work
, struct team
, mcast_rejoin
.dw
.work
);
680 if (!rtnl_trylock()) {
681 schedule_delayed_work(&team
->mcast_rejoin
.dw
, 0);
684 val
= atomic_dec_if_positive(&team
->mcast_rejoin
.count_pending
);
689 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, team
->dev
);
692 schedule_delayed_work(&team
->mcast_rejoin
.dw
,
693 msecs_to_jiffies(team
->mcast_rejoin
.interval
));
696 static void team_mcast_rejoin(struct team
*team
)
698 if (!team
->mcast_rejoin
.count
|| !netif_running(team
->dev
))
700 atomic_add(team
->mcast_rejoin
.count
, &team
->mcast_rejoin
.count_pending
);
701 schedule_delayed_work(&team
->mcast_rejoin
.dw
, 0);
704 static void team_mcast_rejoin_init(struct team
*team
)
706 INIT_DELAYED_WORK(&team
->mcast_rejoin
.dw
, team_mcast_rejoin_work
);
709 static void team_mcast_rejoin_fini(struct team
*team
)
711 cancel_delayed_work_sync(&team
->mcast_rejoin
.dw
);
715 /************************
716 * Rx path frame handler
717 ************************/
719 /* note: already called with rcu_read_lock */
720 static rx_handler_result_t
team_handle_frame(struct sk_buff
**pskb
)
722 struct sk_buff
*skb
= *pskb
;
723 struct team_port
*port
;
725 rx_handler_result_t res
;
727 skb
= skb_share_check(skb
, GFP_ATOMIC
);
729 return RX_HANDLER_CONSUMED
;
733 port
= team_port_get_rcu(skb
->dev
);
735 if (!team_port_enabled(port
)) {
736 /* allow exact match delivery for disabled ports */
737 res
= RX_HANDLER_EXACT
;
739 res
= team
->ops
.receive(team
, port
, skb
);
741 if (res
== RX_HANDLER_ANOTHER
) {
742 struct team_pcpu_stats
*pcpu_stats
;
744 pcpu_stats
= this_cpu_ptr(team
->pcpu_stats
);
745 u64_stats_update_begin(&pcpu_stats
->syncp
);
746 pcpu_stats
->rx_packets
++;
747 pcpu_stats
->rx_bytes
+= skb
->len
;
748 if (skb
->pkt_type
== PACKET_MULTICAST
)
749 pcpu_stats
->rx_multicast
++;
750 u64_stats_update_end(&pcpu_stats
->syncp
);
752 skb
->dev
= team
->dev
;
753 } else if (res
== RX_HANDLER_EXACT
) {
754 this_cpu_inc(team
->pcpu_stats
->rx_nohandler
);
756 this_cpu_inc(team
->pcpu_stats
->rx_dropped
);
763 /*************************************
764 * Multiqueue Tx port select override
765 *************************************/
767 static int team_queue_override_init(struct team
*team
)
769 struct list_head
*listarr
;
770 unsigned int queue_cnt
= team
->dev
->num_tx_queues
- 1;
775 listarr
= kmalloc_array(queue_cnt
, sizeof(struct list_head
),
779 team
->qom_lists
= listarr
;
780 for (i
= 0; i
< queue_cnt
; i
++)
781 INIT_LIST_HEAD(listarr
++);
785 static void team_queue_override_fini(struct team
*team
)
787 kfree(team
->qom_lists
);
790 static struct list_head
*__team_get_qom_list(struct team
*team
, u16 queue_id
)
792 return &team
->qom_lists
[queue_id
- 1];
796 * note: already called with rcu_read_lock
798 static bool team_queue_override_transmit(struct team
*team
, struct sk_buff
*skb
)
800 struct list_head
*qom_list
;
801 struct team_port
*port
;
803 if (!team
->queue_override_enabled
|| !skb
->queue_mapping
)
805 qom_list
= __team_get_qom_list(team
, skb
->queue_mapping
);
806 list_for_each_entry_rcu(port
, qom_list
, qom_list
) {
807 if (!team_dev_queue_xmit(team
, port
, skb
))
813 static void __team_queue_override_port_del(struct team
*team
,
814 struct team_port
*port
)
818 list_del_rcu(&port
->qom_list
);
821 static bool team_queue_override_port_has_gt_prio_than(struct team_port
*port
,
822 struct team_port
*cur
)
824 if (port
->priority
< cur
->priority
)
826 if (port
->priority
> cur
->priority
)
828 if (port
->index
< cur
->index
)
833 static void __team_queue_override_port_add(struct team
*team
,
834 struct team_port
*port
)
836 struct team_port
*cur
;
837 struct list_head
*qom_list
;
838 struct list_head
*node
;
842 qom_list
= __team_get_qom_list(team
, port
->queue_id
);
844 list_for_each_entry(cur
, qom_list
, qom_list
) {
845 if (team_queue_override_port_has_gt_prio_than(port
, cur
))
847 node
= &cur
->qom_list
;
849 list_add_tail_rcu(&port
->qom_list
, node
);
852 static void __team_queue_override_enabled_check(struct team
*team
)
854 struct team_port
*port
;
855 bool enabled
= false;
857 list_for_each_entry(port
, &team
->port_list
, list
) {
858 if (port
->queue_id
) {
863 if (enabled
== team
->queue_override_enabled
)
865 netdev_dbg(team
->dev
, "%s queue override\n",
866 enabled
? "Enabling" : "Disabling");
867 team
->queue_override_enabled
= enabled
;
870 static void team_queue_override_port_prio_changed(struct team
*team
,
871 struct team_port
*port
)
873 if (!port
->queue_id
|| team_port_enabled(port
))
875 __team_queue_override_port_del(team
, port
);
876 __team_queue_override_port_add(team
, port
);
877 __team_queue_override_enabled_check(team
);
880 static void team_queue_override_port_change_queue_id(struct team
*team
,
881 struct team_port
*port
,
884 if (team_port_enabled(port
)) {
885 __team_queue_override_port_del(team
, port
);
886 port
->queue_id
= new_queue_id
;
887 __team_queue_override_port_add(team
, port
);
888 __team_queue_override_enabled_check(team
);
890 port
->queue_id
= new_queue_id
;
894 static void team_queue_override_port_add(struct team
*team
,
895 struct team_port
*port
)
897 __team_queue_override_port_add(team
, port
);
898 __team_queue_override_enabled_check(team
);
901 static void team_queue_override_port_del(struct team
*team
,
902 struct team_port
*port
)
904 __team_queue_override_port_del(team
, port
);
905 __team_queue_override_enabled_check(team
);
913 static bool team_port_find(const struct team
*team
,
914 const struct team_port
*port
)
916 struct team_port
*cur
;
918 list_for_each_entry(cur
, &team
->port_list
, list
)
925 * Enable/disable port by adding to enabled port hashlist and setting
926 * port->index (Might be racy so reader could see incorrect ifindex when
927 * processing a flying packet, but that is not a problem). Write guarded
930 static void team_port_enable(struct team
*team
,
931 struct team_port
*port
)
933 if (team_port_enabled(port
))
935 port
->index
= team
->en_port_count
++;
936 hlist_add_head_rcu(&port
->hlist
,
937 team_port_index_hash(team
, port
->index
));
938 team_adjust_ops(team
);
939 team_queue_override_port_add(team
, port
);
940 if (team
->ops
.port_enabled
)
941 team
->ops
.port_enabled(team
, port
);
942 team_notify_peers(team
);
943 team_mcast_rejoin(team
);
944 team_lower_state_changed(port
);
947 static void __reconstruct_port_hlist(struct team
*team
, int rm_index
)
950 struct team_port
*port
;
952 for (i
= rm_index
+ 1; i
< team
->en_port_count
; i
++) {
953 port
= team_get_port_by_index(team
, i
);
954 hlist_del_rcu(&port
->hlist
);
956 hlist_add_head_rcu(&port
->hlist
,
957 team_port_index_hash(team
, port
->index
));
961 static void team_port_disable(struct team
*team
,
962 struct team_port
*port
)
964 if (!team_port_enabled(port
))
966 if (team
->ops
.port_disabled
)
967 team
->ops
.port_disabled(team
, port
);
968 hlist_del_rcu(&port
->hlist
);
969 __reconstruct_port_hlist(team
, port
->index
);
971 team
->en_port_count
--;
972 team_queue_override_port_del(team
, port
);
973 team_adjust_ops(team
);
974 team_lower_state_changed(port
);
977 #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
978 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
979 NETIF_F_HIGHDMA | NETIF_F_LRO)
981 #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
982 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
984 static void __team_compute_features(struct team
*team
)
986 struct team_port
*port
;
987 netdev_features_t vlan_features
= TEAM_VLAN_FEATURES
&
989 netdev_features_t enc_features
= TEAM_ENC_FEATURES
;
990 unsigned short max_hard_header_len
= ETH_HLEN
;
991 unsigned int dst_release_flag
= IFF_XMIT_DST_RELEASE
|
992 IFF_XMIT_DST_RELEASE_PERM
;
994 list_for_each_entry(port
, &team
->port_list
, list
) {
995 vlan_features
= netdev_increment_features(vlan_features
,
996 port
->dev
->vlan_features
,
999 netdev_increment_features(enc_features
,
1000 port
->dev
->hw_enc_features
,
1004 dst_release_flag
&= port
->dev
->priv_flags
;
1005 if (port
->dev
->hard_header_len
> max_hard_header_len
)
1006 max_hard_header_len
= port
->dev
->hard_header_len
;
1009 team
->dev
->vlan_features
= vlan_features
;
1010 team
->dev
->hw_enc_features
= enc_features
| NETIF_F_GSO_ENCAP_ALL
|
1011 NETIF_F_HW_VLAN_CTAG_TX
|
1012 NETIF_F_HW_VLAN_STAG_TX
|
1014 team
->dev
->hard_header_len
= max_hard_header_len
;
1016 team
->dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
1017 if (dst_release_flag
== (IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
))
1018 team
->dev
->priv_flags
|= IFF_XMIT_DST_RELEASE
;
1021 static void team_compute_features(struct team
*team
)
1023 mutex_lock(&team
->lock
);
1024 __team_compute_features(team
);
1025 mutex_unlock(&team
->lock
);
1026 netdev_change_features(team
->dev
);
1029 static int team_port_enter(struct team
*team
, struct team_port
*port
)
1033 dev_hold(team
->dev
);
1034 if (team
->ops
.port_enter
) {
1035 err
= team
->ops
.port_enter(team
, port
);
1037 netdev_err(team
->dev
, "Device %s failed to enter team mode\n",
1039 goto err_port_enter
;
1051 static void team_port_leave(struct team
*team
, struct team_port
*port
)
1053 if (team
->ops
.port_leave
)
1054 team
->ops
.port_leave(team
, port
);
1058 #ifdef CONFIG_NET_POLL_CONTROLLER
1059 static int __team_port_enable_netpoll(struct team_port
*port
)
1064 np
= kzalloc(sizeof(*np
), GFP_KERNEL
);
1068 err
= __netpoll_setup(np
, port
->dev
);
1077 static int team_port_enable_netpoll(struct team_port
*port
)
1079 if (!port
->team
->dev
->npinfo
)
1082 return __team_port_enable_netpoll(port
);
1085 static void team_port_disable_netpoll(struct team_port
*port
)
1087 struct netpoll
*np
= port
->np
;
1096 static int team_port_enable_netpoll(struct team_port
*port
)
1100 static void team_port_disable_netpoll(struct team_port
*port
)
1105 static int team_upper_dev_link(struct team
*team
, struct team_port
*port
,
1106 struct netlink_ext_ack
*extack
)
1108 struct netdev_lag_upper_info lag_upper_info
;
1111 lag_upper_info
.tx_type
= team
->mode
->lag_tx_type
;
1112 lag_upper_info
.hash_type
= NETDEV_LAG_HASH_UNKNOWN
;
1113 err
= netdev_master_upper_dev_link(port
->dev
, team
->dev
, NULL
,
1114 &lag_upper_info
, extack
);
1117 port
->dev
->priv_flags
|= IFF_TEAM_PORT
;
1121 static void team_upper_dev_unlink(struct team
*team
, struct team_port
*port
)
1123 netdev_upper_dev_unlink(port
->dev
, team
->dev
);
1124 port
->dev
->priv_flags
&= ~IFF_TEAM_PORT
;
1127 static void __team_port_change_port_added(struct team_port
*port
, bool linkup
);
1128 static int team_dev_type_check_change(struct net_device
*dev
,
1129 struct net_device
*port_dev
);
1131 static int team_port_add(struct team
*team
, struct net_device
*port_dev
,
1132 struct netlink_ext_ack
*extack
)
1134 struct net_device
*dev
= team
->dev
;
1135 struct team_port
*port
;
1136 char *portname
= port_dev
->name
;
1139 if (port_dev
->flags
& IFF_LOOPBACK
) {
1140 NL_SET_ERR_MSG(extack
, "Loopback device can't be added as a team port");
1141 netdev_err(dev
, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1146 if (netif_is_team_port(port_dev
)) {
1147 NL_SET_ERR_MSG(extack
, "Device is already a port of a team device");
1148 netdev_err(dev
, "Device %s is already a port "
1149 "of a team device\n", portname
);
1153 if (dev
== port_dev
) {
1154 NL_SET_ERR_MSG(extack
, "Cannot enslave team device to itself");
1155 netdev_err(dev
, "Cannot enslave team device to itself\n");
1159 if (netdev_has_upper_dev(dev
, port_dev
)) {
1160 NL_SET_ERR_MSG(extack
, "Device is already an upper device of the team interface");
1161 netdev_err(dev
, "Device %s is already an upper device of the team interface\n",
1166 if (port_dev
->features
& NETIF_F_VLAN_CHALLENGED
&&
1167 vlan_uses_dev(dev
)) {
1168 NL_SET_ERR_MSG(extack
, "Device is VLAN challenged and team device has VLAN set up");
1169 netdev_err(dev
, "Device %s is VLAN challenged and team device has VLAN set up\n",
1174 err
= team_dev_type_check_change(dev
, port_dev
);
1178 if (port_dev
->flags
& IFF_UP
) {
1179 NL_SET_ERR_MSG(extack
, "Device is up. Set it down before adding it as a team port");
1180 netdev_err(dev
, "Device %s is up. Set it down before adding it as a team port\n",
1185 port
= kzalloc(sizeof(struct team_port
) + team
->mode
->port_priv_size
,
1190 port
->dev
= port_dev
;
1192 INIT_LIST_HEAD(&port
->qom_list
);
1194 port
->orig
.mtu
= port_dev
->mtu
;
1195 err
= dev_set_mtu(port_dev
, dev
->mtu
);
1197 netdev_dbg(dev
, "Error %d calling dev_set_mtu\n", err
);
1201 memcpy(port
->orig
.dev_addr
, port_dev
->dev_addr
, port_dev
->addr_len
);
1203 err
= team_port_enter(team
, port
);
1205 netdev_err(dev
, "Device %s failed to enter team mode\n",
1207 goto err_port_enter
;
1210 err
= dev_open(port_dev
, extack
);
1212 netdev_dbg(dev
, "Device %s opening failed\n",
1217 err
= vlan_vids_add_by_dev(port_dev
, dev
);
1219 netdev_err(dev
, "Failed to add vlan ids to device %s\n",
1224 err
= team_port_enable_netpoll(port
);
1226 netdev_err(dev
, "Failed to enable netpoll on device %s\n",
1228 goto err_enable_netpoll
;
1231 if (!(dev
->features
& NETIF_F_LRO
))
1232 dev_disable_lro(port_dev
);
1234 err
= netdev_rx_handler_register(port_dev
, team_handle_frame
,
1237 netdev_err(dev
, "Device %s failed to register rx_handler\n",
1239 goto err_handler_register
;
1242 err
= team_upper_dev_link(team
, port
, extack
);
1244 netdev_err(dev
, "Device %s failed to set upper link\n",
1246 goto err_set_upper_link
;
1249 err
= __team_option_inst_add_port(team
, port
);
1251 netdev_err(dev
, "Device %s failed to add per-port options\n",
1253 goto err_option_port_add
;
1256 /* set promiscuity level to new slave */
1257 if (dev
->flags
& IFF_PROMISC
) {
1258 err
= dev_set_promiscuity(port_dev
, 1);
1260 goto err_set_slave_promisc
;
1263 /* set allmulti level to new slave */
1264 if (dev
->flags
& IFF_ALLMULTI
) {
1265 err
= dev_set_allmulti(port_dev
, 1);
1267 if (dev
->flags
& IFF_PROMISC
)
1268 dev_set_promiscuity(port_dev
, -1);
1269 goto err_set_slave_promisc
;
1273 netif_addr_lock_bh(dev
);
1274 dev_uc_sync_multiple(port_dev
, dev
);
1275 dev_mc_sync_multiple(port_dev
, dev
);
1276 netif_addr_unlock_bh(dev
);
1279 list_add_tail_rcu(&port
->list
, &team
->port_list
);
1280 team_port_enable(team
, port
);
1281 __team_compute_features(team
);
1282 __team_port_change_port_added(port
, !!netif_oper_up(port_dev
));
1283 __team_options_change_check(team
);
1285 netdev_info(dev
, "Port device %s added\n", portname
);
1289 err_set_slave_promisc
:
1290 __team_option_inst_del_port(team
, port
);
1292 err_option_port_add
:
1293 team_upper_dev_unlink(team
, port
);
1296 netdev_rx_handler_unregister(port_dev
);
1298 err_handler_register
:
1299 team_port_disable_netpoll(port
);
1302 vlan_vids_del_by_dev(port_dev
, dev
);
1305 dev_close(port_dev
);
1308 team_port_leave(team
, port
);
1309 team_port_set_orig_dev_addr(port
);
1312 dev_set_mtu(port_dev
, port
->orig
.mtu
);
1320 static void __team_port_change_port_removed(struct team_port
*port
);
1322 static int team_port_del(struct team
*team
, struct net_device
*port_dev
)
1324 struct net_device
*dev
= team
->dev
;
1325 struct team_port
*port
;
1326 char *portname
= port_dev
->name
;
1328 port
= team_port_get_rtnl(port_dev
);
1329 if (!port
|| !team_port_find(team
, port
)) {
1330 netdev_err(dev
, "Device %s does not act as a port of this team\n",
1335 team_port_disable(team
, port
);
1336 list_del_rcu(&port
->list
);
1338 if (dev
->flags
& IFF_PROMISC
)
1339 dev_set_promiscuity(port_dev
, -1);
1340 if (dev
->flags
& IFF_ALLMULTI
)
1341 dev_set_allmulti(port_dev
, -1);
1343 team_upper_dev_unlink(team
, port
);
1344 netdev_rx_handler_unregister(port_dev
);
1345 team_port_disable_netpoll(port
);
1346 vlan_vids_del_by_dev(port_dev
, dev
);
1347 dev_uc_unsync(port_dev
, dev
);
1348 dev_mc_unsync(port_dev
, dev
);
1349 dev_close(port_dev
);
1350 team_port_leave(team
, port
);
1352 __team_option_inst_mark_removed_port(team
, port
);
1353 __team_options_change_check(team
);
1354 __team_option_inst_del_port(team
, port
);
1355 __team_port_change_port_removed(port
);
1357 team_port_set_orig_dev_addr(port
);
1358 dev_set_mtu(port_dev
, port
->orig
.mtu
);
1359 kfree_rcu(port
, rcu
);
1360 netdev_info(dev
, "Port device %s removed\n", portname
);
1361 __team_compute_features(team
);
1371 static int team_mode_option_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
1373 ctx
->data
.str_val
= team
->mode
->kind
;
1377 static int team_mode_option_set(struct team
*team
, struct team_gsetter_ctx
*ctx
)
1379 return team_change_mode(team
, ctx
->data
.str_val
);
1382 static int team_notify_peers_count_get(struct team
*team
,
1383 struct team_gsetter_ctx
*ctx
)
1385 ctx
->data
.u32_val
= team
->notify_peers
.count
;
1389 static int team_notify_peers_count_set(struct team
*team
,
1390 struct team_gsetter_ctx
*ctx
)
1392 team
->notify_peers
.count
= ctx
->data
.u32_val
;
1396 static int team_notify_peers_interval_get(struct team
*team
,
1397 struct team_gsetter_ctx
*ctx
)
1399 ctx
->data
.u32_val
= team
->notify_peers
.interval
;
1403 static int team_notify_peers_interval_set(struct team
*team
,
1404 struct team_gsetter_ctx
*ctx
)
1406 team
->notify_peers
.interval
= ctx
->data
.u32_val
;
1410 static int team_mcast_rejoin_count_get(struct team
*team
,
1411 struct team_gsetter_ctx
*ctx
)
1413 ctx
->data
.u32_val
= team
->mcast_rejoin
.count
;
1417 static int team_mcast_rejoin_count_set(struct team
*team
,
1418 struct team_gsetter_ctx
*ctx
)
1420 team
->mcast_rejoin
.count
= ctx
->data
.u32_val
;
1424 static int team_mcast_rejoin_interval_get(struct team
*team
,
1425 struct team_gsetter_ctx
*ctx
)
1427 ctx
->data
.u32_val
= team
->mcast_rejoin
.interval
;
1431 static int team_mcast_rejoin_interval_set(struct team
*team
,
1432 struct team_gsetter_ctx
*ctx
)
1434 team
->mcast_rejoin
.interval
= ctx
->data
.u32_val
;
1438 static int team_port_en_option_get(struct team
*team
,
1439 struct team_gsetter_ctx
*ctx
)
1441 struct team_port
*port
= ctx
->info
->port
;
1443 ctx
->data
.bool_val
= team_port_enabled(port
);
1447 static int team_port_en_option_set(struct team
*team
,
1448 struct team_gsetter_ctx
*ctx
)
1450 struct team_port
*port
= ctx
->info
->port
;
1452 if (ctx
->data
.bool_val
)
1453 team_port_enable(team
, port
);
1455 team_port_disable(team
, port
);
1459 static int team_user_linkup_option_get(struct team
*team
,
1460 struct team_gsetter_ctx
*ctx
)
1462 struct team_port
*port
= ctx
->info
->port
;
1464 ctx
->data
.bool_val
= port
->user
.linkup
;
1468 static void __team_carrier_check(struct team
*team
);
1470 static int team_user_linkup_option_set(struct team
*team
,
1471 struct team_gsetter_ctx
*ctx
)
1473 struct team_port
*port
= ctx
->info
->port
;
1475 port
->user
.linkup
= ctx
->data
.bool_val
;
1476 team_refresh_port_linkup(port
);
1477 __team_carrier_check(port
->team
);
1481 static int team_user_linkup_en_option_get(struct team
*team
,
1482 struct team_gsetter_ctx
*ctx
)
1484 struct team_port
*port
= ctx
->info
->port
;
1486 ctx
->data
.bool_val
= port
->user
.linkup_enabled
;
1490 static int team_user_linkup_en_option_set(struct team
*team
,
1491 struct team_gsetter_ctx
*ctx
)
1493 struct team_port
*port
= ctx
->info
->port
;
1495 port
->user
.linkup_enabled
= ctx
->data
.bool_val
;
1496 team_refresh_port_linkup(port
);
1497 __team_carrier_check(port
->team
);
1501 static int team_priority_option_get(struct team
*team
,
1502 struct team_gsetter_ctx
*ctx
)
1504 struct team_port
*port
= ctx
->info
->port
;
1506 ctx
->data
.s32_val
= port
->priority
;
1510 static int team_priority_option_set(struct team
*team
,
1511 struct team_gsetter_ctx
*ctx
)
1513 struct team_port
*port
= ctx
->info
->port
;
1514 s32 priority
= ctx
->data
.s32_val
;
1516 if (port
->priority
== priority
)
1518 port
->priority
= priority
;
1519 team_queue_override_port_prio_changed(team
, port
);
1523 static int team_queue_id_option_get(struct team
*team
,
1524 struct team_gsetter_ctx
*ctx
)
1526 struct team_port
*port
= ctx
->info
->port
;
1528 ctx
->data
.u32_val
= port
->queue_id
;
1532 static int team_queue_id_option_set(struct team
*team
,
1533 struct team_gsetter_ctx
*ctx
)
1535 struct team_port
*port
= ctx
->info
->port
;
1536 u16 new_queue_id
= ctx
->data
.u32_val
;
1538 if (port
->queue_id
== new_queue_id
)
1540 if (new_queue_id
>= team
->dev
->real_num_tx_queues
)
1542 team_queue_override_port_change_queue_id(team
, port
, new_queue_id
);
1546 static const struct team_option team_options
[] = {
1549 .type
= TEAM_OPTION_TYPE_STRING
,
1550 .getter
= team_mode_option_get
,
1551 .setter
= team_mode_option_set
,
1554 .name
= "notify_peers_count",
1555 .type
= TEAM_OPTION_TYPE_U32
,
1556 .getter
= team_notify_peers_count_get
,
1557 .setter
= team_notify_peers_count_set
,
1560 .name
= "notify_peers_interval",
1561 .type
= TEAM_OPTION_TYPE_U32
,
1562 .getter
= team_notify_peers_interval_get
,
1563 .setter
= team_notify_peers_interval_set
,
1566 .name
= "mcast_rejoin_count",
1567 .type
= TEAM_OPTION_TYPE_U32
,
1568 .getter
= team_mcast_rejoin_count_get
,
1569 .setter
= team_mcast_rejoin_count_set
,
1572 .name
= "mcast_rejoin_interval",
1573 .type
= TEAM_OPTION_TYPE_U32
,
1574 .getter
= team_mcast_rejoin_interval_get
,
1575 .setter
= team_mcast_rejoin_interval_set
,
1579 .type
= TEAM_OPTION_TYPE_BOOL
,
1581 .getter
= team_port_en_option_get
,
1582 .setter
= team_port_en_option_set
,
1585 .name
= "user_linkup",
1586 .type
= TEAM_OPTION_TYPE_BOOL
,
1588 .getter
= team_user_linkup_option_get
,
1589 .setter
= team_user_linkup_option_set
,
1592 .name
= "user_linkup_enabled",
1593 .type
= TEAM_OPTION_TYPE_BOOL
,
1595 .getter
= team_user_linkup_en_option_get
,
1596 .setter
= team_user_linkup_en_option_set
,
1600 .type
= TEAM_OPTION_TYPE_S32
,
1602 .getter
= team_priority_option_get
,
1603 .setter
= team_priority_option_set
,
1607 .type
= TEAM_OPTION_TYPE_U32
,
1609 .getter
= team_queue_id_option_get
,
1610 .setter
= team_queue_id_option_set
,
1615 static int team_init(struct net_device
*dev
)
1617 struct team
*team
= netdev_priv(dev
);
1622 team_set_no_mode(team
);
1624 team
->pcpu_stats
= netdev_alloc_pcpu_stats(struct team_pcpu_stats
);
1625 if (!team
->pcpu_stats
)
1628 for (i
= 0; i
< TEAM_PORT_HASHENTRIES
; i
++)
1629 INIT_HLIST_HEAD(&team
->en_port_hlist
[i
]);
1630 INIT_LIST_HEAD(&team
->port_list
);
1631 err
= team_queue_override_init(team
);
1633 goto err_team_queue_override_init
;
1635 team_adjust_ops(team
);
1637 INIT_LIST_HEAD(&team
->option_list
);
1638 INIT_LIST_HEAD(&team
->option_inst_list
);
1640 team_notify_peers_init(team
);
1641 team_mcast_rejoin_init(team
);
1643 err
= team_options_register(team
, team_options
, ARRAY_SIZE(team_options
));
1645 goto err_options_register
;
1646 netif_carrier_off(dev
);
1648 lockdep_register_key(&team
->team_lock_key
);
1649 __mutex_init(&team
->lock
, "team->team_lock_key", &team
->team_lock_key
);
1653 err_options_register
:
1654 team_mcast_rejoin_fini(team
);
1655 team_notify_peers_fini(team
);
1656 team_queue_override_fini(team
);
1657 err_team_queue_override_init
:
1658 free_percpu(team
->pcpu_stats
);
1663 static void team_uninit(struct net_device
*dev
)
1665 struct team
*team
= netdev_priv(dev
);
1666 struct team_port
*port
;
1667 struct team_port
*tmp
;
1669 mutex_lock(&team
->lock
);
1670 list_for_each_entry_safe(port
, tmp
, &team
->port_list
, list
)
1671 team_port_del(team
, port
->dev
);
1673 __team_change_mode(team
, NULL
); /* cleanup */
1674 __team_options_unregister(team
, team_options
, ARRAY_SIZE(team_options
));
1675 team_mcast_rejoin_fini(team
);
1676 team_notify_peers_fini(team
);
1677 team_queue_override_fini(team
);
1678 mutex_unlock(&team
->lock
);
1679 netdev_change_features(dev
);
1680 lockdep_unregister_key(&team
->team_lock_key
);
1683 static void team_destructor(struct net_device
*dev
)
1685 struct team
*team
= netdev_priv(dev
);
1687 free_percpu(team
->pcpu_stats
);
1690 static int team_open(struct net_device
*dev
)
1695 static int team_close(struct net_device
*dev
)
1701 * note: already called with rcu_read_lock
1703 static netdev_tx_t
team_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1705 struct team
*team
= netdev_priv(dev
);
1707 unsigned int len
= skb
->len
;
1709 tx_success
= team_queue_override_transmit(team
, skb
);
1711 tx_success
= team
->ops
.transmit(team
, skb
);
1713 struct team_pcpu_stats
*pcpu_stats
;
1715 pcpu_stats
= this_cpu_ptr(team
->pcpu_stats
);
1716 u64_stats_update_begin(&pcpu_stats
->syncp
);
1717 pcpu_stats
->tx_packets
++;
1718 pcpu_stats
->tx_bytes
+= len
;
1719 u64_stats_update_end(&pcpu_stats
->syncp
);
1721 this_cpu_inc(team
->pcpu_stats
->tx_dropped
);
1724 return NETDEV_TX_OK
;
1727 static u16
team_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1728 struct net_device
*sb_dev
)
1731 * This helper function exists to help dev_pick_tx get the correct
1732 * destination queue. Using a helper function skips a call to
1733 * skb_tx_hash and will put the skbs in the queue we expect on their
1734 * way down to the team driver.
1736 u16 txq
= skb_rx_queue_recorded(skb
) ? skb_get_rx_queue(skb
) : 0;
1739 * Save the original txq to restore before passing to the driver
1741 qdisc_skb_cb(skb
)->slave_dev_queue_mapping
= skb
->queue_mapping
;
1743 if (unlikely(txq
>= dev
->real_num_tx_queues
)) {
1745 txq
-= dev
->real_num_tx_queues
;
1746 } while (txq
>= dev
->real_num_tx_queues
);
1751 static void team_change_rx_flags(struct net_device
*dev
, int change
)
1753 struct team
*team
= netdev_priv(dev
);
1754 struct team_port
*port
;
1758 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1759 if (change
& IFF_PROMISC
) {
1760 inc
= dev
->flags
& IFF_PROMISC
? 1 : -1;
1761 dev_set_promiscuity(port
->dev
, inc
);
1763 if (change
& IFF_ALLMULTI
) {
1764 inc
= dev
->flags
& IFF_ALLMULTI
? 1 : -1;
1765 dev_set_allmulti(port
->dev
, inc
);
1771 static void team_set_rx_mode(struct net_device
*dev
)
1773 struct team
*team
= netdev_priv(dev
);
1774 struct team_port
*port
;
1777 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1778 dev_uc_sync_multiple(port
->dev
, dev
);
1779 dev_mc_sync_multiple(port
->dev
, dev
);
1784 static int team_set_mac_address(struct net_device
*dev
, void *p
)
1786 struct sockaddr
*addr
= p
;
1787 struct team
*team
= netdev_priv(dev
);
1788 struct team_port
*port
;
1790 if (dev
->type
== ARPHRD_ETHER
&& !is_valid_ether_addr(addr
->sa_data
))
1791 return -EADDRNOTAVAIL
;
1792 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1793 mutex_lock(&team
->lock
);
1794 list_for_each_entry(port
, &team
->port_list
, list
)
1795 if (team
->ops
.port_change_dev_addr
)
1796 team
->ops
.port_change_dev_addr(team
, port
);
1797 mutex_unlock(&team
->lock
);
1801 static int team_change_mtu(struct net_device
*dev
, int new_mtu
)
1803 struct team
*team
= netdev_priv(dev
);
1804 struct team_port
*port
;
1808 * Alhough this is reader, it's guarded by team lock. It's not possible
1809 * to traverse list in reverse under rcu_read_lock
1811 mutex_lock(&team
->lock
);
1812 team
->port_mtu_change_allowed
= true;
1813 list_for_each_entry(port
, &team
->port_list
, list
) {
1814 err
= dev_set_mtu(port
->dev
, new_mtu
);
1816 netdev_err(dev
, "Device %s failed to change mtu",
1821 team
->port_mtu_change_allowed
= false;
1822 mutex_unlock(&team
->lock
);
1829 list_for_each_entry_continue_reverse(port
, &team
->port_list
, list
)
1830 dev_set_mtu(port
->dev
, dev
->mtu
);
1831 team
->port_mtu_change_allowed
= false;
1832 mutex_unlock(&team
->lock
);
1838 team_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1840 struct team
*team
= netdev_priv(dev
);
1841 struct team_pcpu_stats
*p
;
1842 u64 rx_packets
, rx_bytes
, rx_multicast
, tx_packets
, tx_bytes
;
1843 u32 rx_dropped
= 0, tx_dropped
= 0, rx_nohandler
= 0;
1847 for_each_possible_cpu(i
) {
1848 p
= per_cpu_ptr(team
->pcpu_stats
, i
);
1850 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
1851 rx_packets
= p
->rx_packets
;
1852 rx_bytes
= p
->rx_bytes
;
1853 rx_multicast
= p
->rx_multicast
;
1854 tx_packets
= p
->tx_packets
;
1855 tx_bytes
= p
->tx_bytes
;
1856 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
1858 stats
->rx_packets
+= rx_packets
;
1859 stats
->rx_bytes
+= rx_bytes
;
1860 stats
->multicast
+= rx_multicast
;
1861 stats
->tx_packets
+= tx_packets
;
1862 stats
->tx_bytes
+= tx_bytes
;
1864 * rx_dropped, tx_dropped & rx_nohandler are u32,
1865 * updated without syncp protection.
1867 rx_dropped
+= p
->rx_dropped
;
1868 tx_dropped
+= p
->tx_dropped
;
1869 rx_nohandler
+= p
->rx_nohandler
;
1871 stats
->rx_dropped
= rx_dropped
;
1872 stats
->tx_dropped
= tx_dropped
;
1873 stats
->rx_nohandler
= rx_nohandler
;
1876 static int team_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1878 struct team
*team
= netdev_priv(dev
);
1879 struct team_port
*port
;
1883 * Alhough this is reader, it's guarded by team lock. It's not possible
1884 * to traverse list in reverse under rcu_read_lock
1886 mutex_lock(&team
->lock
);
1887 list_for_each_entry(port
, &team
->port_list
, list
) {
1888 err
= vlan_vid_add(port
->dev
, proto
, vid
);
1892 mutex_unlock(&team
->lock
);
1897 list_for_each_entry_continue_reverse(port
, &team
->port_list
, list
)
1898 vlan_vid_del(port
->dev
, proto
, vid
);
1899 mutex_unlock(&team
->lock
);
1904 static int team_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1906 struct team
*team
= netdev_priv(dev
);
1907 struct team_port
*port
;
1909 mutex_lock(&team
->lock
);
1910 list_for_each_entry(port
, &team
->port_list
, list
)
1911 vlan_vid_del(port
->dev
, proto
, vid
);
1912 mutex_unlock(&team
->lock
);
1917 #ifdef CONFIG_NET_POLL_CONTROLLER
1918 static void team_poll_controller(struct net_device
*dev
)
1922 static void __team_netpoll_cleanup(struct team
*team
)
1924 struct team_port
*port
;
1926 list_for_each_entry(port
, &team
->port_list
, list
)
1927 team_port_disable_netpoll(port
);
1930 static void team_netpoll_cleanup(struct net_device
*dev
)
1932 struct team
*team
= netdev_priv(dev
);
1934 mutex_lock(&team
->lock
);
1935 __team_netpoll_cleanup(team
);
1936 mutex_unlock(&team
->lock
);
1939 static int team_netpoll_setup(struct net_device
*dev
,
1940 struct netpoll_info
*npifo
)
1942 struct team
*team
= netdev_priv(dev
);
1943 struct team_port
*port
;
1946 mutex_lock(&team
->lock
);
1947 list_for_each_entry(port
, &team
->port_list
, list
) {
1948 err
= __team_port_enable_netpoll(port
);
1950 __team_netpoll_cleanup(team
);
1954 mutex_unlock(&team
->lock
);
1959 static int team_add_slave(struct net_device
*dev
, struct net_device
*port_dev
,
1960 struct netlink_ext_ack
*extack
)
1962 struct team
*team
= netdev_priv(dev
);
1965 mutex_lock(&team
->lock
);
1966 err
= team_port_add(team
, port_dev
, extack
);
1967 mutex_unlock(&team
->lock
);
1970 netdev_change_features(dev
);
1975 static int team_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
1977 struct team
*team
= netdev_priv(dev
);
1980 mutex_lock(&team
->lock
);
1981 err
= team_port_del(team
, port_dev
);
1982 mutex_unlock(&team
->lock
);
1987 if (netif_is_team_master(port_dev
)) {
1988 lockdep_unregister_key(&team
->team_lock_key
);
1989 lockdep_register_key(&team
->team_lock_key
);
1990 lockdep_set_class(&team
->lock
, &team
->team_lock_key
);
1992 netdev_change_features(dev
);
1997 static netdev_features_t
team_fix_features(struct net_device
*dev
,
1998 netdev_features_t features
)
2000 struct team_port
*port
;
2001 struct team
*team
= netdev_priv(dev
);
2002 netdev_features_t mask
;
2005 features
&= ~NETIF_F_ONE_FOR_ALL
;
2006 features
|= NETIF_F_ALL_FOR_ALL
;
2009 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
2010 features
= netdev_increment_features(features
,
2011 port
->dev
->features
,
2016 features
= netdev_add_tso_features(features
, mask
);
2021 static int team_change_carrier(struct net_device
*dev
, bool new_carrier
)
2023 struct team
*team
= netdev_priv(dev
);
2025 team
->user_carrier_enabled
= true;
2028 netif_carrier_on(dev
);
2030 netif_carrier_off(dev
);
2034 static const struct net_device_ops team_netdev_ops
= {
2035 .ndo_init
= team_init
,
2036 .ndo_uninit
= team_uninit
,
2037 .ndo_open
= team_open
,
2038 .ndo_stop
= team_close
,
2039 .ndo_start_xmit
= team_xmit
,
2040 .ndo_select_queue
= team_select_queue
,
2041 .ndo_change_rx_flags
= team_change_rx_flags
,
2042 .ndo_set_rx_mode
= team_set_rx_mode
,
2043 .ndo_set_mac_address
= team_set_mac_address
,
2044 .ndo_change_mtu
= team_change_mtu
,
2045 .ndo_get_stats64
= team_get_stats64
,
2046 .ndo_vlan_rx_add_vid
= team_vlan_rx_add_vid
,
2047 .ndo_vlan_rx_kill_vid
= team_vlan_rx_kill_vid
,
2048 #ifdef CONFIG_NET_POLL_CONTROLLER
2049 .ndo_poll_controller
= team_poll_controller
,
2050 .ndo_netpoll_setup
= team_netpoll_setup
,
2051 .ndo_netpoll_cleanup
= team_netpoll_cleanup
,
2053 .ndo_add_slave
= team_add_slave
,
2054 .ndo_del_slave
= team_del_slave
,
2055 .ndo_fix_features
= team_fix_features
,
2056 .ndo_change_carrier
= team_change_carrier
,
2057 .ndo_features_check
= passthru_features_check
,
2060 /***********************
2062 ***********************/
2064 static void team_ethtool_get_drvinfo(struct net_device
*dev
,
2065 struct ethtool_drvinfo
*drvinfo
)
2067 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
2068 strlcpy(drvinfo
->version
, UTS_RELEASE
, sizeof(drvinfo
->version
));
2071 static int team_ethtool_get_link_ksettings(struct net_device
*dev
,
2072 struct ethtool_link_ksettings
*cmd
)
2074 struct team
*team
= netdev_priv(dev
);
2075 unsigned long speed
= 0;
2076 struct team_port
*port
;
2078 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
2079 cmd
->base
.port
= PORT_OTHER
;
2082 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
2083 if (team_port_txable(port
)) {
2084 if (port
->state
.speed
!= SPEED_UNKNOWN
)
2085 speed
+= port
->state
.speed
;
2086 if (cmd
->base
.duplex
== DUPLEX_UNKNOWN
&&
2087 port
->state
.duplex
!= DUPLEX_UNKNOWN
)
2088 cmd
->base
.duplex
= port
->state
.duplex
;
2093 cmd
->base
.speed
= speed
? : SPEED_UNKNOWN
;
2098 static const struct ethtool_ops team_ethtool_ops
= {
2099 .get_drvinfo
= team_ethtool_get_drvinfo
,
2100 .get_link
= ethtool_op_get_link
,
2101 .get_link_ksettings
= team_ethtool_get_link_ksettings
,
2104 /***********************
2105 * rt netlink interface
2106 ***********************/
2108 static void team_setup_by_port(struct net_device
*dev
,
2109 struct net_device
*port_dev
)
2111 dev
->header_ops
= port_dev
->header_ops
;
2112 dev
->type
= port_dev
->type
;
2113 dev
->hard_header_len
= port_dev
->hard_header_len
;
2114 dev
->addr_len
= port_dev
->addr_len
;
2115 dev
->mtu
= port_dev
->mtu
;
2116 memcpy(dev
->broadcast
, port_dev
->broadcast
, port_dev
->addr_len
);
2117 eth_hw_addr_inherit(dev
, port_dev
);
2120 static int team_dev_type_check_change(struct net_device
*dev
,
2121 struct net_device
*port_dev
)
2123 struct team
*team
= netdev_priv(dev
);
2124 char *portname
= port_dev
->name
;
2127 if (dev
->type
== port_dev
->type
)
2129 if (!list_empty(&team
->port_list
)) {
2130 netdev_err(dev
, "Device %s is of different type\n", portname
);
2133 err
= call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE
, dev
);
2134 err
= notifier_to_errno(err
);
2136 netdev_err(dev
, "Refused to change device type\n");
2141 team_setup_by_port(dev
, port_dev
);
2142 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE
, dev
);
2146 static void team_setup(struct net_device
*dev
)
2149 dev
->max_mtu
= ETH_MAX_MTU
;
2151 dev
->netdev_ops
= &team_netdev_ops
;
2152 dev
->ethtool_ops
= &team_ethtool_ops
;
2153 dev
->needs_free_netdev
= true;
2154 dev
->priv_destructor
= team_destructor
;
2155 dev
->priv_flags
&= ~(IFF_XMIT_DST_RELEASE
| IFF_TX_SKB_SHARING
);
2156 dev
->priv_flags
|= IFF_NO_QUEUE
;
2157 dev
->priv_flags
|= IFF_TEAM
;
2160 * Indicate we support unicast address filtering. That way core won't
2161 * bring us to promisc mode in case a unicast addr is added.
2162 * Let this up to underlay drivers.
2164 dev
->priv_flags
|= IFF_UNICAST_FLT
| IFF_LIVE_ADDR_CHANGE
;
2166 dev
->features
|= NETIF_F_LLTX
;
2167 dev
->features
|= NETIF_F_GRO
;
2169 /* Don't allow team devices to change network namespaces. */
2170 dev
->features
|= NETIF_F_NETNS_LOCAL
;
2172 dev
->hw_features
= TEAM_VLAN_FEATURES
|
2173 NETIF_F_HW_VLAN_CTAG_RX
|
2174 NETIF_F_HW_VLAN_CTAG_FILTER
;
2176 dev
->hw_features
|= NETIF_F_GSO_ENCAP_ALL
| NETIF_F_GSO_UDP_L4
;
2177 dev
->features
|= dev
->hw_features
;
2178 dev
->features
|= NETIF_F_HW_VLAN_CTAG_TX
| NETIF_F_HW_VLAN_STAG_TX
;
2181 static int team_newlink(struct net
*src_net
, struct net_device
*dev
,
2182 struct nlattr
*tb
[], struct nlattr
*data
[],
2183 struct netlink_ext_ack
*extack
)
2185 if (tb
[IFLA_ADDRESS
] == NULL
)
2186 eth_hw_addr_random(dev
);
2188 return register_netdevice(dev
);
2191 static int team_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
2192 struct netlink_ext_ack
*extack
)
2194 if (tb
[IFLA_ADDRESS
]) {
2195 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
2197 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
2198 return -EADDRNOTAVAIL
;
2203 static unsigned int team_get_num_tx_queues(void)
2205 return TEAM_DEFAULT_NUM_TX_QUEUES
;
2208 static unsigned int team_get_num_rx_queues(void)
2210 return TEAM_DEFAULT_NUM_RX_QUEUES
;
2213 static struct rtnl_link_ops team_link_ops __read_mostly
= {
2215 .priv_size
= sizeof(struct team
),
2216 .setup
= team_setup
,
2217 .newlink
= team_newlink
,
2218 .validate
= team_validate
,
2219 .get_num_tx_queues
= team_get_num_tx_queues
,
2220 .get_num_rx_queues
= team_get_num_rx_queues
,
2224 /***********************************
2225 * Generic netlink custom interface
2226 ***********************************/
2228 static struct genl_family team_nl_family
;
2230 static const struct nla_policy team_nl_policy
[TEAM_ATTR_MAX
+ 1] = {
2231 [TEAM_ATTR_UNSPEC
] = { .type
= NLA_UNSPEC
, },
2232 [TEAM_ATTR_TEAM_IFINDEX
] = { .type
= NLA_U32
},
2233 [TEAM_ATTR_LIST_OPTION
] = { .type
= NLA_NESTED
},
2234 [TEAM_ATTR_LIST_PORT
] = { .type
= NLA_NESTED
},
2237 static const struct nla_policy
2238 team_nl_option_policy
[TEAM_ATTR_OPTION_MAX
+ 1] = {
2239 [TEAM_ATTR_OPTION_UNSPEC
] = { .type
= NLA_UNSPEC
, },
2240 [TEAM_ATTR_OPTION_NAME
] = {
2242 .len
= TEAM_STRING_MAX_LEN
,
2244 [TEAM_ATTR_OPTION_CHANGED
] = { .type
= NLA_FLAG
},
2245 [TEAM_ATTR_OPTION_TYPE
] = { .type
= NLA_U8
},
2246 [TEAM_ATTR_OPTION_DATA
] = { .type
= NLA_BINARY
},
2247 [TEAM_ATTR_OPTION_PORT_IFINDEX
] = { .type
= NLA_U32
},
2248 [TEAM_ATTR_OPTION_ARRAY_INDEX
] = { .type
= NLA_U32
},
2251 static int team_nl_cmd_noop(struct sk_buff
*skb
, struct genl_info
*info
)
2253 struct sk_buff
*msg
;
2257 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2261 hdr
= genlmsg_put(msg
, info
->snd_portid
, info
->snd_seq
,
2262 &team_nl_family
, 0, TEAM_CMD_NOOP
);
2268 genlmsg_end(msg
, hdr
);
2270 return genlmsg_unicast(genl_info_net(info
), msg
, info
->snd_portid
);
2279 * Netlink cmd functions should be locked by following two functions.
2280 * Since dev gets held here, that ensures dev won't disappear in between.
2282 static struct team
*team_nl_team_get(struct genl_info
*info
)
2284 struct net
*net
= genl_info_net(info
);
2286 struct net_device
*dev
;
2289 if (!info
->attrs
[TEAM_ATTR_TEAM_IFINDEX
])
2292 ifindex
= nla_get_u32(info
->attrs
[TEAM_ATTR_TEAM_IFINDEX
]);
2293 dev
= dev_get_by_index(net
, ifindex
);
2294 if (!dev
|| dev
->netdev_ops
!= &team_netdev_ops
) {
2300 team
= netdev_priv(dev
);
2301 mutex_lock(&team
->lock
);
2305 static void team_nl_team_put(struct team
*team
)
2307 mutex_unlock(&team
->lock
);
2311 typedef int team_nl_send_func_t(struct sk_buff
*skb
,
2312 struct team
*team
, u32 portid
);
2314 static int team_nl_send_unicast(struct sk_buff
*skb
, struct team
*team
, u32 portid
)
2316 return genlmsg_unicast(dev_net(team
->dev
), skb
, portid
);
2319 static int team_nl_fill_one_option_get(struct sk_buff
*skb
, struct team
*team
,
2320 struct team_option_inst
*opt_inst
)
2322 struct nlattr
*option_item
;
2323 struct team_option
*option
= opt_inst
->option
;
2324 struct team_option_inst_info
*opt_inst_info
= &opt_inst
->info
;
2325 struct team_gsetter_ctx ctx
;
2328 ctx
.info
= opt_inst_info
;
2329 err
= team_option_get(team
, opt_inst
, &ctx
);
2333 option_item
= nla_nest_start_noflag(skb
, TEAM_ATTR_ITEM_OPTION
);
2337 if (nla_put_string(skb
, TEAM_ATTR_OPTION_NAME
, option
->name
))
2339 if (opt_inst_info
->port
&&
2340 nla_put_u32(skb
, TEAM_ATTR_OPTION_PORT_IFINDEX
,
2341 opt_inst_info
->port
->dev
->ifindex
))
2343 if (opt_inst
->option
->array_size
&&
2344 nla_put_u32(skb
, TEAM_ATTR_OPTION_ARRAY_INDEX
,
2345 opt_inst_info
->array_index
))
2348 switch (option
->type
) {
2349 case TEAM_OPTION_TYPE_U32
:
2350 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_U32
))
2352 if (nla_put_u32(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.u32_val
))
2355 case TEAM_OPTION_TYPE_STRING
:
2356 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_STRING
))
2358 if (nla_put_string(skb
, TEAM_ATTR_OPTION_DATA
,
2362 case TEAM_OPTION_TYPE_BINARY
:
2363 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_BINARY
))
2365 if (nla_put(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.bin_val
.len
,
2366 ctx
.data
.bin_val
.ptr
))
2369 case TEAM_OPTION_TYPE_BOOL
:
2370 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_FLAG
))
2372 if (ctx
.data
.bool_val
&&
2373 nla_put_flag(skb
, TEAM_ATTR_OPTION_DATA
))
2376 case TEAM_OPTION_TYPE_S32
:
2377 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_S32
))
2379 if (nla_put_s32(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.s32_val
))
2385 if (opt_inst
->removed
&& nla_put_flag(skb
, TEAM_ATTR_OPTION_REMOVED
))
2387 if (opt_inst
->changed
) {
2388 if (nla_put_flag(skb
, TEAM_ATTR_OPTION_CHANGED
))
2390 opt_inst
->changed
= false;
2392 nla_nest_end(skb
, option_item
);
2396 nla_nest_cancel(skb
, option_item
);
2400 static int __send_and_alloc_skb(struct sk_buff
**pskb
,
2401 struct team
*team
, u32 portid
,
2402 team_nl_send_func_t
*send_func
)
2407 err
= send_func(*pskb
, team
, portid
);
2411 *pskb
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2417 static int team_nl_send_options_get(struct team
*team
, u32 portid
, u32 seq
,
2418 int flags
, team_nl_send_func_t
*send_func
,
2419 struct list_head
*sel_opt_inst_list
)
2421 struct nlattr
*option_list
;
2422 struct nlmsghdr
*nlh
;
2424 struct team_option_inst
*opt_inst
;
2426 struct sk_buff
*skb
= NULL
;
2430 opt_inst
= list_first_entry(sel_opt_inst_list
,
2431 struct team_option_inst
, tmp_list
);
2434 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2438 hdr
= genlmsg_put(skb
, portid
, seq
, &team_nl_family
, flags
| NLM_F_MULTI
,
2439 TEAM_CMD_OPTIONS_GET
);
2445 if (nla_put_u32(skb
, TEAM_ATTR_TEAM_IFINDEX
, team
->dev
->ifindex
))
2446 goto nla_put_failure
;
2447 option_list
= nla_nest_start_noflag(skb
, TEAM_ATTR_LIST_OPTION
);
2449 goto nla_put_failure
;
2453 list_for_each_entry_from(opt_inst
, sel_opt_inst_list
, tmp_list
) {
2454 err
= team_nl_fill_one_option_get(skb
, team
, opt_inst
);
2456 if (err
== -EMSGSIZE
) {
2467 nla_nest_end(skb
, option_list
);
2468 genlmsg_end(skb
, hdr
);
2473 nlh
= nlmsg_put(skb
, portid
, seq
, NLMSG_DONE
, 0, flags
| NLM_F_MULTI
);
2475 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2481 return send_func(skb
, team
, portid
);
2490 static int team_nl_cmd_options_get(struct sk_buff
*skb
, struct genl_info
*info
)
2493 struct team_option_inst
*opt_inst
;
2495 LIST_HEAD(sel_opt_inst_list
);
2497 team
= team_nl_team_get(info
);
2501 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
)
2502 list_add_tail(&opt_inst
->tmp_list
, &sel_opt_inst_list
);
2503 err
= team_nl_send_options_get(team
, info
->snd_portid
, info
->snd_seq
,
2504 NLM_F_ACK
, team_nl_send_unicast
,
2505 &sel_opt_inst_list
);
2507 team_nl_team_put(team
);
2512 static int team_nl_send_event_options_get(struct team
*team
,
2513 struct list_head
*sel_opt_inst_list
);
2515 static int team_nl_cmd_options_set(struct sk_buff
*skb
, struct genl_info
*info
)
2520 struct nlattr
*nl_option
;
2524 team
= team_nl_team_get(info
);
2531 if (!info
->attrs
[TEAM_ATTR_LIST_OPTION
]) {
2536 nla_for_each_nested(nl_option
, info
->attrs
[TEAM_ATTR_LIST_OPTION
], i
) {
2537 struct nlattr
*opt_attrs
[TEAM_ATTR_OPTION_MAX
+ 1];
2538 struct nlattr
*attr
;
2539 struct nlattr
*attr_data
;
2540 LIST_HEAD(opt_inst_list
);
2541 enum team_option_type opt_type
;
2542 int opt_port_ifindex
= 0; /* != 0 for per-port options */
2543 u32 opt_array_index
= 0;
2544 bool opt_is_array
= false;
2545 struct team_option_inst
*opt_inst
;
2547 bool opt_found
= false;
2549 if (nla_type(nl_option
) != TEAM_ATTR_ITEM_OPTION
) {
2553 err
= nla_parse_nested_deprecated(opt_attrs
,
2554 TEAM_ATTR_OPTION_MAX
,
2556 team_nl_option_policy
,
2560 if (!opt_attrs
[TEAM_ATTR_OPTION_NAME
] ||
2561 !opt_attrs
[TEAM_ATTR_OPTION_TYPE
]) {
2565 switch (nla_get_u8(opt_attrs
[TEAM_ATTR_OPTION_TYPE
])) {
2567 opt_type
= TEAM_OPTION_TYPE_U32
;
2570 opt_type
= TEAM_OPTION_TYPE_STRING
;
2573 opt_type
= TEAM_OPTION_TYPE_BINARY
;
2576 opt_type
= TEAM_OPTION_TYPE_BOOL
;
2579 opt_type
= TEAM_OPTION_TYPE_S32
;
2585 attr_data
= opt_attrs
[TEAM_ATTR_OPTION_DATA
];
2586 if (opt_type
!= TEAM_OPTION_TYPE_BOOL
&& !attr_data
) {
2591 opt_name
= nla_data(opt_attrs
[TEAM_ATTR_OPTION_NAME
]);
2592 attr
= opt_attrs
[TEAM_ATTR_OPTION_PORT_IFINDEX
];
2594 opt_port_ifindex
= nla_get_u32(attr
);
2596 attr
= opt_attrs
[TEAM_ATTR_OPTION_ARRAY_INDEX
];
2598 opt_is_array
= true;
2599 opt_array_index
= nla_get_u32(attr
);
2602 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
2603 struct team_option
*option
= opt_inst
->option
;
2604 struct team_gsetter_ctx ctx
;
2605 struct team_option_inst_info
*opt_inst_info
;
2608 opt_inst_info
= &opt_inst
->info
;
2609 tmp_ifindex
= opt_inst_info
->port
?
2610 opt_inst_info
->port
->dev
->ifindex
: 0;
2611 if (option
->type
!= opt_type
||
2612 strcmp(option
->name
, opt_name
) ||
2613 tmp_ifindex
!= opt_port_ifindex
||
2614 (option
->array_size
&& !opt_is_array
) ||
2615 opt_inst_info
->array_index
!= opt_array_index
)
2618 ctx
.info
= opt_inst_info
;
2620 case TEAM_OPTION_TYPE_U32
:
2621 ctx
.data
.u32_val
= nla_get_u32(attr_data
);
2623 case TEAM_OPTION_TYPE_STRING
:
2624 if (nla_len(attr_data
) > TEAM_STRING_MAX_LEN
) {
2628 ctx
.data
.str_val
= nla_data(attr_data
);
2630 case TEAM_OPTION_TYPE_BINARY
:
2631 ctx
.data
.bin_val
.len
= nla_len(attr_data
);
2632 ctx
.data
.bin_val
.ptr
= nla_data(attr_data
);
2634 case TEAM_OPTION_TYPE_BOOL
:
2635 ctx
.data
.bool_val
= attr_data
? true : false;
2637 case TEAM_OPTION_TYPE_S32
:
2638 ctx
.data
.s32_val
= nla_get_s32(attr_data
);
2643 err
= team_option_set(team
, opt_inst
, &ctx
);
2646 opt_inst
->changed
= true;
2647 list_add(&opt_inst
->tmp_list
, &opt_inst_list
);
2654 err
= team_nl_send_event_options_get(team
, &opt_inst_list
);
2660 team_nl_team_put(team
);
2666 static int team_nl_fill_one_port_get(struct sk_buff
*skb
,
2667 struct team_port
*port
)
2669 struct nlattr
*port_item
;
2671 port_item
= nla_nest_start_noflag(skb
, TEAM_ATTR_ITEM_PORT
);
2674 if (nla_put_u32(skb
, TEAM_ATTR_PORT_IFINDEX
, port
->dev
->ifindex
))
2676 if (port
->changed
) {
2677 if (nla_put_flag(skb
, TEAM_ATTR_PORT_CHANGED
))
2679 port
->changed
= false;
2681 if ((port
->removed
&&
2682 nla_put_flag(skb
, TEAM_ATTR_PORT_REMOVED
)) ||
2683 (port
->state
.linkup
&&
2684 nla_put_flag(skb
, TEAM_ATTR_PORT_LINKUP
)) ||
2685 nla_put_u32(skb
, TEAM_ATTR_PORT_SPEED
, port
->state
.speed
) ||
2686 nla_put_u8(skb
, TEAM_ATTR_PORT_DUPLEX
, port
->state
.duplex
))
2688 nla_nest_end(skb
, port_item
);
2692 nla_nest_cancel(skb
, port_item
);
2696 static int team_nl_send_port_list_get(struct team
*team
, u32 portid
, u32 seq
,
2697 int flags
, team_nl_send_func_t
*send_func
,
2698 struct team_port
*one_port
)
2700 struct nlattr
*port_list
;
2701 struct nlmsghdr
*nlh
;
2703 struct team_port
*port
;
2705 struct sk_buff
*skb
= NULL
;
2709 port
= list_first_entry_or_null(&team
->port_list
,
2710 struct team_port
, list
);
2713 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2717 hdr
= genlmsg_put(skb
, portid
, seq
, &team_nl_family
, flags
| NLM_F_MULTI
,
2718 TEAM_CMD_PORT_LIST_GET
);
2724 if (nla_put_u32(skb
, TEAM_ATTR_TEAM_IFINDEX
, team
->dev
->ifindex
))
2725 goto nla_put_failure
;
2726 port_list
= nla_nest_start_noflag(skb
, TEAM_ATTR_LIST_PORT
);
2728 goto nla_put_failure
;
2733 /* If one port is selected, called wants to send port list containing
2734 * only this port. Otherwise go through all listed ports and send all
2737 err
= team_nl_fill_one_port_get(skb
, one_port
);
2741 list_for_each_entry_from(port
, &team
->port_list
, list
) {
2742 err
= team_nl_fill_one_port_get(skb
, port
);
2744 if (err
== -EMSGSIZE
) {
2756 nla_nest_end(skb
, port_list
);
2757 genlmsg_end(skb
, hdr
);
2762 nlh
= nlmsg_put(skb
, portid
, seq
, NLMSG_DONE
, 0, flags
| NLM_F_MULTI
);
2764 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2770 return send_func(skb
, team
, portid
);
2779 static int team_nl_cmd_port_list_get(struct sk_buff
*skb
,
2780 struct genl_info
*info
)
2785 team
= team_nl_team_get(info
);
2789 err
= team_nl_send_port_list_get(team
, info
->snd_portid
, info
->snd_seq
,
2790 NLM_F_ACK
, team_nl_send_unicast
, NULL
);
2792 team_nl_team_put(team
);
2797 static const struct genl_ops team_nl_ops
[] = {
2799 .cmd
= TEAM_CMD_NOOP
,
2800 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2801 .doit
= team_nl_cmd_noop
,
2804 .cmd
= TEAM_CMD_OPTIONS_SET
,
2805 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2806 .doit
= team_nl_cmd_options_set
,
2807 .flags
= GENL_ADMIN_PERM
,
2810 .cmd
= TEAM_CMD_OPTIONS_GET
,
2811 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2812 .doit
= team_nl_cmd_options_get
,
2813 .flags
= GENL_ADMIN_PERM
,
2816 .cmd
= TEAM_CMD_PORT_LIST_GET
,
2817 .validate
= GENL_DONT_VALIDATE_STRICT
| GENL_DONT_VALIDATE_DUMP
,
2818 .doit
= team_nl_cmd_port_list_get
,
2819 .flags
= GENL_ADMIN_PERM
,
2823 static const struct genl_multicast_group team_nl_mcgrps
[] = {
2824 { .name
= TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME
, },
2827 static struct genl_family team_nl_family __ro_after_init
= {
2828 .name
= TEAM_GENL_NAME
,
2829 .version
= TEAM_GENL_VERSION
,
2830 .maxattr
= TEAM_ATTR_MAX
,
2831 .policy
= team_nl_policy
,
2833 .module
= THIS_MODULE
,
2835 .n_ops
= ARRAY_SIZE(team_nl_ops
),
2836 .mcgrps
= team_nl_mcgrps
,
2837 .n_mcgrps
= ARRAY_SIZE(team_nl_mcgrps
),
2840 static int team_nl_send_multicast(struct sk_buff
*skb
,
2841 struct team
*team
, u32 portid
)
2843 return genlmsg_multicast_netns(&team_nl_family
, dev_net(team
->dev
),
2844 skb
, 0, 0, GFP_KERNEL
);
2847 static int team_nl_send_event_options_get(struct team
*team
,
2848 struct list_head
*sel_opt_inst_list
)
2850 return team_nl_send_options_get(team
, 0, 0, 0, team_nl_send_multicast
,
2854 static int team_nl_send_event_port_get(struct team
*team
,
2855 struct team_port
*port
)
2857 return team_nl_send_port_list_get(team
, 0, 0, 0, team_nl_send_multicast
,
2861 static int __init
team_nl_init(void)
2863 return genl_register_family(&team_nl_family
);
2866 static void team_nl_fini(void)
2868 genl_unregister_family(&team_nl_family
);
2876 static void __team_options_change_check(struct team
*team
)
2879 struct team_option_inst
*opt_inst
;
2880 LIST_HEAD(sel_opt_inst_list
);
2882 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
2883 if (opt_inst
->changed
)
2884 list_add_tail(&opt_inst
->tmp_list
, &sel_opt_inst_list
);
2886 err
= team_nl_send_event_options_get(team
, &sel_opt_inst_list
);
2887 if (err
&& err
!= -ESRCH
)
2888 netdev_warn(team
->dev
, "Failed to send options change via netlink (err %d)\n",
2892 /* rtnl lock is held */
2894 static void __team_port_change_send(struct team_port
*port
, bool linkup
)
2898 port
->changed
= true;
2899 port
->state
.linkup
= linkup
;
2900 team_refresh_port_linkup(port
);
2902 struct ethtool_link_ksettings ecmd
;
2904 err
= __ethtool_get_link_ksettings(port
->dev
, &ecmd
);
2906 port
->state
.speed
= ecmd
.base
.speed
;
2907 port
->state
.duplex
= ecmd
.base
.duplex
;
2911 port
->state
.speed
= 0;
2912 port
->state
.duplex
= 0;
2915 err
= team_nl_send_event_port_get(port
->team
, port
);
2916 if (err
&& err
!= -ESRCH
)
2917 netdev_warn(port
->team
->dev
, "Failed to send port change of device %s via netlink (err %d)\n",
2918 port
->dev
->name
, err
);
2922 static void __team_carrier_check(struct team
*team
)
2924 struct team_port
*port
;
2927 if (team
->user_carrier_enabled
)
2930 team_linkup
= false;
2931 list_for_each_entry(port
, &team
->port_list
, list
) {
2939 netif_carrier_on(team
->dev
);
2941 netif_carrier_off(team
->dev
);
2944 static void __team_port_change_check(struct team_port
*port
, bool linkup
)
2946 if (port
->state
.linkup
!= linkup
)
2947 __team_port_change_send(port
, linkup
);
2948 __team_carrier_check(port
->team
);
2951 static void __team_port_change_port_added(struct team_port
*port
, bool linkup
)
2953 __team_port_change_send(port
, linkup
);
2954 __team_carrier_check(port
->team
);
2957 static void __team_port_change_port_removed(struct team_port
*port
)
2959 port
->removed
= true;
2960 __team_port_change_send(port
, false);
2961 __team_carrier_check(port
->team
);
2964 static void team_port_change_check(struct team_port
*port
, bool linkup
)
2966 struct team
*team
= port
->team
;
2968 mutex_lock(&team
->lock
);
2969 __team_port_change_check(port
, linkup
);
2970 mutex_unlock(&team
->lock
);
2974 /************************************
2975 * Net device notifier event handler
2976 ************************************/
2978 static int team_device_event(struct notifier_block
*unused
,
2979 unsigned long event
, void *ptr
)
2981 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2982 struct team_port
*port
;
2984 port
= team_port_get_rtnl(dev
);
2990 if (netif_oper_up(dev
))
2991 team_port_change_check(port
, true);
2994 team_port_change_check(port
, false);
2997 if (netif_running(port
->dev
))
2998 team_port_change_check(port
,
2999 !!netif_oper_up(port
->dev
));
3001 case NETDEV_UNREGISTER
:
3002 team_del_slave(port
->team
->dev
, dev
);
3004 case NETDEV_FEAT_CHANGE
:
3005 team_compute_features(port
->team
);
3007 case NETDEV_PRECHANGEMTU
:
3008 /* Forbid to change mtu of underlaying device */
3009 if (!port
->team
->port_mtu_change_allowed
)
3012 case NETDEV_PRE_TYPE_CHANGE
:
3013 /* Forbid to change type of underlaying device */
3015 case NETDEV_RESEND_IGMP
:
3016 /* Propagate to master device */
3017 call_netdevice_notifiers(event
, port
->team
->dev
);
3023 static struct notifier_block team_notifier_block __read_mostly
= {
3024 .notifier_call
= team_device_event
,
3028 /***********************
3029 * Module init and exit
3030 ***********************/
3032 static int __init
team_module_init(void)
3036 register_netdevice_notifier(&team_notifier_block
);
3038 err
= rtnl_link_register(&team_link_ops
);
3042 err
= team_nl_init();
3049 rtnl_link_unregister(&team_link_ops
);
3052 unregister_netdevice_notifier(&team_notifier_block
);
3057 static void __exit
team_module_exit(void)
3060 rtnl_link_unregister(&team_link_ops
);
3061 unregister_netdevice_notifier(&team_notifier_block
);
3064 module_init(team_module_init
);
3065 module_exit(team_module_exit
);
3067 MODULE_LICENSE("GPL v2");
3068 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
3069 MODULE_DESCRIPTION("Ethernet team device driver");
3070 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);