2 * drivers/net/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/netpoll.h>
22 #include <linux/if_vlan.h>
23 #include <linux/if_arp.h>
24 #include <linux/socket.h>
25 #include <linux/etherdevice.h>
26 #include <linux/rtnetlink.h>
27 #include <net/rtnetlink.h>
28 #include <net/genetlink.h>
29 #include <net/netlink.h>
30 #include <net/sch_generic.h>
31 #include <net/switchdev.h>
32 #include <generated/utsrelease.h>
33 #include <linux/if_team.h>
35 #define DRV_NAME "team"
42 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
44 static struct team_port
*team_port_get_rtnl(const struct net_device
*dev
)
46 struct team_port
*port
= rtnl_dereference(dev
->rx_handler_data
);
48 return team_port_exists(dev
) ? port
: NULL
;
52 * Since the ability to change device address for open port device is tested in
53 * team_port_add, this function can be called without control of return value
55 static int __set_port_dev_addr(struct net_device
*port_dev
,
56 const unsigned char *dev_addr
)
58 struct sockaddr_storage addr
;
60 memcpy(addr
.__data
, dev_addr
, port_dev
->addr_len
);
61 addr
.ss_family
= port_dev
->type
;
62 return dev_set_mac_address(port_dev
, (struct sockaddr
*)&addr
);
65 static int team_port_set_orig_dev_addr(struct team_port
*port
)
67 return __set_port_dev_addr(port
->dev
, port
->orig
.dev_addr
);
70 static int team_port_set_team_dev_addr(struct team
*team
,
71 struct team_port
*port
)
73 return __set_port_dev_addr(port
->dev
, team
->dev
->dev_addr
);
76 int team_modeop_port_enter(struct team
*team
, struct team_port
*port
)
78 return team_port_set_team_dev_addr(team
, port
);
80 EXPORT_SYMBOL(team_modeop_port_enter
);
82 void team_modeop_port_change_dev_addr(struct team
*team
,
83 struct team_port
*port
)
85 team_port_set_team_dev_addr(team
, port
);
87 EXPORT_SYMBOL(team_modeop_port_change_dev_addr
);
89 static void team_lower_state_changed(struct team_port
*port
)
91 struct netdev_lag_lower_state_info info
;
93 info
.link_up
= port
->linkup
;
94 info
.tx_enabled
= team_port_enabled(port
);
95 netdev_lower_state_changed(port
->dev
, &info
);
98 static void team_refresh_port_linkup(struct team_port
*port
)
100 bool new_linkup
= port
->user
.linkup_enabled
? port
->user
.linkup
:
103 if (port
->linkup
!= new_linkup
) {
104 port
->linkup
= new_linkup
;
105 team_lower_state_changed(port
);
114 struct team_option_inst
{ /* One for each option instance */
115 struct list_head list
;
116 struct list_head tmp_list
;
117 struct team_option
*option
;
118 struct team_option_inst_info info
;
123 static struct team_option
*__team_find_option(struct team
*team
,
124 const char *opt_name
)
126 struct team_option
*option
;
128 list_for_each_entry(option
, &team
->option_list
, list
) {
129 if (strcmp(option
->name
, opt_name
) == 0)
135 static void __team_option_inst_del(struct team_option_inst
*opt_inst
)
137 list_del(&opt_inst
->list
);
141 static void __team_option_inst_del_option(struct team
*team
,
142 struct team_option
*option
)
144 struct team_option_inst
*opt_inst
, *tmp
;
146 list_for_each_entry_safe(opt_inst
, tmp
, &team
->option_inst_list
, list
) {
147 if (opt_inst
->option
== option
)
148 __team_option_inst_del(opt_inst
);
152 static int __team_option_inst_add(struct team
*team
, struct team_option
*option
,
153 struct team_port
*port
)
155 struct team_option_inst
*opt_inst
;
156 unsigned int array_size
;
160 array_size
= option
->array_size
;
162 array_size
= 1; /* No array but still need one instance */
164 for (i
= 0; i
< array_size
; i
++) {
165 opt_inst
= kmalloc(sizeof(*opt_inst
), GFP_KERNEL
);
168 opt_inst
->option
= option
;
169 opt_inst
->info
.port
= port
;
170 opt_inst
->info
.array_index
= i
;
171 opt_inst
->changed
= true;
172 opt_inst
->removed
= false;
173 list_add_tail(&opt_inst
->list
, &team
->option_inst_list
);
175 err
= option
->init(team
, &opt_inst
->info
);
184 static int __team_option_inst_add_option(struct team
*team
,
185 struct team_option
*option
)
189 if (!option
->per_port
) {
190 err
= __team_option_inst_add(team
, option
, NULL
);
192 goto inst_del_option
;
197 __team_option_inst_del_option(team
, option
);
201 static void __team_option_inst_mark_removed_option(struct team
*team
,
202 struct team_option
*option
)
204 struct team_option_inst
*opt_inst
;
206 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
207 if (opt_inst
->option
== option
) {
208 opt_inst
->changed
= true;
209 opt_inst
->removed
= true;
214 static void __team_option_inst_del_port(struct team
*team
,
215 struct team_port
*port
)
217 struct team_option_inst
*opt_inst
, *tmp
;
219 list_for_each_entry_safe(opt_inst
, tmp
, &team
->option_inst_list
, list
) {
220 if (opt_inst
->option
->per_port
&&
221 opt_inst
->info
.port
== port
)
222 __team_option_inst_del(opt_inst
);
226 static int __team_option_inst_add_port(struct team
*team
,
227 struct team_port
*port
)
229 struct team_option
*option
;
232 list_for_each_entry(option
, &team
->option_list
, list
) {
233 if (!option
->per_port
)
235 err
= __team_option_inst_add(team
, option
, port
);
242 __team_option_inst_del_port(team
, port
);
246 static void __team_option_inst_mark_removed_port(struct team
*team
,
247 struct team_port
*port
)
249 struct team_option_inst
*opt_inst
;
251 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
252 if (opt_inst
->info
.port
== port
) {
253 opt_inst
->changed
= true;
254 opt_inst
->removed
= true;
259 static bool __team_option_inst_tmp_find(const struct list_head
*opts
,
260 const struct team_option_inst
*needle
)
262 struct team_option_inst
*opt_inst
;
264 list_for_each_entry(opt_inst
, opts
, tmp_list
)
265 if (opt_inst
== needle
)
270 static int __team_options_register(struct team
*team
,
271 const struct team_option
*option
,
275 struct team_option
**dst_opts
;
278 dst_opts
= kcalloc(option_count
, sizeof(struct team_option
*),
282 for (i
= 0; i
< option_count
; i
++, option
++) {
283 if (__team_find_option(team
, option
->name
)) {
287 dst_opts
[i
] = kmemdup(option
, sizeof(*option
), GFP_KERNEL
);
294 for (i
= 0; i
< option_count
; i
++) {
295 err
= __team_option_inst_add_option(team
, dst_opts
[i
]);
298 list_add_tail(&dst_opts
[i
]->list
, &team
->option_list
);
305 for (i
--; i
>= 0; i
--)
306 __team_option_inst_del_option(team
, dst_opts
[i
]);
308 i
= option_count
- 1;
310 for (i
--; i
>= 0; i
--)
317 static void __team_options_mark_removed(struct team
*team
,
318 const struct team_option
*option
,
323 for (i
= 0; i
< option_count
; i
++, option
++) {
324 struct team_option
*del_opt
;
326 del_opt
= __team_find_option(team
, option
->name
);
328 __team_option_inst_mark_removed_option(team
, del_opt
);
332 static void __team_options_unregister(struct team
*team
,
333 const struct team_option
*option
,
338 for (i
= 0; i
< option_count
; i
++, option
++) {
339 struct team_option
*del_opt
;
341 del_opt
= __team_find_option(team
, option
->name
);
343 __team_option_inst_del_option(team
, del_opt
);
344 list_del(&del_opt
->list
);
350 static void __team_options_change_check(struct team
*team
);
352 int team_options_register(struct team
*team
,
353 const struct team_option
*option
,
358 err
= __team_options_register(team
, option
, option_count
);
361 __team_options_change_check(team
);
364 EXPORT_SYMBOL(team_options_register
);
366 void team_options_unregister(struct team
*team
,
367 const struct team_option
*option
,
370 __team_options_mark_removed(team
, option
, option_count
);
371 __team_options_change_check(team
);
372 __team_options_unregister(team
, option
, option_count
);
374 EXPORT_SYMBOL(team_options_unregister
);
376 static int team_option_get(struct team
*team
,
377 struct team_option_inst
*opt_inst
,
378 struct team_gsetter_ctx
*ctx
)
380 if (!opt_inst
->option
->getter
)
382 return opt_inst
->option
->getter(team
, ctx
);
385 static int team_option_set(struct team
*team
,
386 struct team_option_inst
*opt_inst
,
387 struct team_gsetter_ctx
*ctx
)
389 if (!opt_inst
->option
->setter
)
391 return opt_inst
->option
->setter(team
, ctx
);
394 void team_option_inst_set_change(struct team_option_inst_info
*opt_inst_info
)
396 struct team_option_inst
*opt_inst
;
398 opt_inst
= container_of(opt_inst_info
, struct team_option_inst
, info
);
399 opt_inst
->changed
= true;
401 EXPORT_SYMBOL(team_option_inst_set_change
);
403 void team_options_change_check(struct team
*team
)
405 __team_options_change_check(team
);
407 EXPORT_SYMBOL(team_options_change_check
);
414 static LIST_HEAD(mode_list
);
415 static DEFINE_SPINLOCK(mode_list_lock
);
417 struct team_mode_item
{
418 struct list_head list
;
419 const struct team_mode
*mode
;
422 static struct team_mode_item
*__find_mode(const char *kind
)
424 struct team_mode_item
*mitem
;
426 list_for_each_entry(mitem
, &mode_list
, list
) {
427 if (strcmp(mitem
->mode
->kind
, kind
) == 0)
433 static bool is_good_mode_name(const char *name
)
435 while (*name
!= '\0') {
436 if (!isalpha(*name
) && !isdigit(*name
) && *name
!= '_')
443 int team_mode_register(const struct team_mode
*mode
)
446 struct team_mode_item
*mitem
;
448 if (!is_good_mode_name(mode
->kind
) ||
449 mode
->priv_size
> TEAM_MODE_PRIV_SIZE
)
452 mitem
= kmalloc(sizeof(*mitem
), GFP_KERNEL
);
456 spin_lock(&mode_list_lock
);
457 if (__find_mode(mode
->kind
)) {
463 list_add_tail(&mitem
->list
, &mode_list
);
465 spin_unlock(&mode_list_lock
);
468 EXPORT_SYMBOL(team_mode_register
);
470 void team_mode_unregister(const struct team_mode
*mode
)
472 struct team_mode_item
*mitem
;
474 spin_lock(&mode_list_lock
);
475 mitem
= __find_mode(mode
->kind
);
477 list_del_init(&mitem
->list
);
480 spin_unlock(&mode_list_lock
);
482 EXPORT_SYMBOL(team_mode_unregister
);
484 static const struct team_mode
*team_mode_get(const char *kind
)
486 struct team_mode_item
*mitem
;
487 const struct team_mode
*mode
= NULL
;
489 spin_lock(&mode_list_lock
);
490 mitem
= __find_mode(kind
);
492 spin_unlock(&mode_list_lock
);
493 request_module("team-mode-%s", kind
);
494 spin_lock(&mode_list_lock
);
495 mitem
= __find_mode(kind
);
499 if (!try_module_get(mode
->owner
))
503 spin_unlock(&mode_list_lock
);
507 static void team_mode_put(const struct team_mode
*mode
)
509 module_put(mode
->owner
);
512 static bool team_dummy_transmit(struct team
*team
, struct sk_buff
*skb
)
514 dev_kfree_skb_any(skb
);
518 static rx_handler_result_t
team_dummy_receive(struct team
*team
,
519 struct team_port
*port
,
522 return RX_HANDLER_ANOTHER
;
525 static const struct team_mode __team_no_mode
= {
529 static bool team_is_mode_set(struct team
*team
)
531 return team
->mode
!= &__team_no_mode
;
534 static void team_set_no_mode(struct team
*team
)
536 team
->user_carrier_enabled
= false;
537 team
->mode
= &__team_no_mode
;
540 static void team_adjust_ops(struct team
*team
)
543 * To avoid checks in rx/tx skb paths, ensure here that non-null and
544 * correct ops are always set.
547 if (!team
->en_port_count
|| !team_is_mode_set(team
) ||
548 !team
->mode
->ops
->transmit
)
549 team
->ops
.transmit
= team_dummy_transmit
;
551 team
->ops
.transmit
= team
->mode
->ops
->transmit
;
553 if (!team
->en_port_count
|| !team_is_mode_set(team
) ||
554 !team
->mode
->ops
->receive
)
555 team
->ops
.receive
= team_dummy_receive
;
557 team
->ops
.receive
= team
->mode
->ops
->receive
;
561 * We can benefit from the fact that it's ensured no port is present
562 * at the time of mode change. Therefore no packets are in fly so there's no
563 * need to set mode operations in any special way.
565 static int __team_change_mode(struct team
*team
,
566 const struct team_mode
*new_mode
)
568 /* Check if mode was previously set and do cleanup if so */
569 if (team_is_mode_set(team
)) {
570 void (*exit_op
)(struct team
*team
) = team
->ops
.exit
;
572 /* Clear ops area so no callback is called any longer */
573 memset(&team
->ops
, 0, sizeof(struct team_mode_ops
));
574 team_adjust_ops(team
);
578 team_mode_put(team
->mode
);
579 team_set_no_mode(team
);
580 /* zero private data area */
581 memset(&team
->mode_priv
, 0,
582 sizeof(struct team
) - offsetof(struct team
, mode_priv
));
588 if (new_mode
->ops
->init
) {
591 err
= new_mode
->ops
->init(team
);
596 team
->mode
= new_mode
;
597 memcpy(&team
->ops
, new_mode
->ops
, sizeof(struct team_mode_ops
));
598 team_adjust_ops(team
);
603 static int team_change_mode(struct team
*team
, const char *kind
)
605 const struct team_mode
*new_mode
;
606 struct net_device
*dev
= team
->dev
;
609 if (!list_empty(&team
->port_list
)) {
610 netdev_err(dev
, "No ports can be present during mode change\n");
614 if (team_is_mode_set(team
) && strcmp(team
->mode
->kind
, kind
) == 0) {
615 netdev_err(dev
, "Unable to change to the same mode the team is in\n");
619 new_mode
= team_mode_get(kind
);
621 netdev_err(dev
, "Mode \"%s\" not found\n", kind
);
625 err
= __team_change_mode(team
, new_mode
);
627 netdev_err(dev
, "Failed to change to mode \"%s\"\n", kind
);
628 team_mode_put(new_mode
);
632 netdev_info(dev
, "Mode changed to \"%s\"\n", kind
);
637 /*********************
639 *********************/
641 static void team_notify_peers_work(struct work_struct
*work
)
646 team
= container_of(work
, struct team
, notify_peers
.dw
.work
);
648 if (!rtnl_trylock()) {
649 schedule_delayed_work(&team
->notify_peers
.dw
, 0);
652 val
= atomic_dec_if_positive(&team
->notify_peers
.count_pending
);
657 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, team
->dev
);
660 schedule_delayed_work(&team
->notify_peers
.dw
,
661 msecs_to_jiffies(team
->notify_peers
.interval
));
664 static void team_notify_peers(struct team
*team
)
666 if (!team
->notify_peers
.count
|| !netif_running(team
->dev
))
668 atomic_add(team
->notify_peers
.count
, &team
->notify_peers
.count_pending
);
669 schedule_delayed_work(&team
->notify_peers
.dw
, 0);
672 static void team_notify_peers_init(struct team
*team
)
674 INIT_DELAYED_WORK(&team
->notify_peers
.dw
, team_notify_peers_work
);
677 static void team_notify_peers_fini(struct team
*team
)
679 cancel_delayed_work_sync(&team
->notify_peers
.dw
);
683 /*******************************
684 * Send multicast group rejoins
685 *******************************/
687 static void team_mcast_rejoin_work(struct work_struct
*work
)
692 team
= container_of(work
, struct team
, mcast_rejoin
.dw
.work
);
694 if (!rtnl_trylock()) {
695 schedule_delayed_work(&team
->mcast_rejoin
.dw
, 0);
698 val
= atomic_dec_if_positive(&team
->mcast_rejoin
.count_pending
);
703 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, team
->dev
);
706 schedule_delayed_work(&team
->mcast_rejoin
.dw
,
707 msecs_to_jiffies(team
->mcast_rejoin
.interval
));
710 static void team_mcast_rejoin(struct team
*team
)
712 if (!team
->mcast_rejoin
.count
|| !netif_running(team
->dev
))
714 atomic_add(team
->mcast_rejoin
.count
, &team
->mcast_rejoin
.count_pending
);
715 schedule_delayed_work(&team
->mcast_rejoin
.dw
, 0);
718 static void team_mcast_rejoin_init(struct team
*team
)
720 INIT_DELAYED_WORK(&team
->mcast_rejoin
.dw
, team_mcast_rejoin_work
);
723 static void team_mcast_rejoin_fini(struct team
*team
)
725 cancel_delayed_work_sync(&team
->mcast_rejoin
.dw
);
729 /************************
730 * Rx path frame handler
731 ************************/
733 /* note: already called with rcu_read_lock */
734 static rx_handler_result_t
team_handle_frame(struct sk_buff
**pskb
)
736 struct sk_buff
*skb
= *pskb
;
737 struct team_port
*port
;
739 rx_handler_result_t res
;
741 skb
= skb_share_check(skb
, GFP_ATOMIC
);
743 return RX_HANDLER_CONSUMED
;
747 port
= team_port_get_rcu(skb
->dev
);
749 if (!team_port_enabled(port
)) {
750 /* allow exact match delivery for disabled ports */
751 res
= RX_HANDLER_EXACT
;
753 res
= team
->ops
.receive(team
, port
, skb
);
755 if (res
== RX_HANDLER_ANOTHER
) {
756 struct team_pcpu_stats
*pcpu_stats
;
758 pcpu_stats
= this_cpu_ptr(team
->pcpu_stats
);
759 u64_stats_update_begin(&pcpu_stats
->syncp
);
760 pcpu_stats
->rx_packets
++;
761 pcpu_stats
->rx_bytes
+= skb
->len
;
762 if (skb
->pkt_type
== PACKET_MULTICAST
)
763 pcpu_stats
->rx_multicast
++;
764 u64_stats_update_end(&pcpu_stats
->syncp
);
766 skb
->dev
= team
->dev
;
767 } else if (res
== RX_HANDLER_EXACT
) {
768 this_cpu_inc(team
->pcpu_stats
->rx_nohandler
);
770 this_cpu_inc(team
->pcpu_stats
->rx_dropped
);
777 /*************************************
778 * Multiqueue Tx port select override
779 *************************************/
781 static int team_queue_override_init(struct team
*team
)
783 struct list_head
*listarr
;
784 unsigned int queue_cnt
= team
->dev
->num_tx_queues
- 1;
789 listarr
= kmalloc_array(queue_cnt
, sizeof(struct list_head
),
793 team
->qom_lists
= listarr
;
794 for (i
= 0; i
< queue_cnt
; i
++)
795 INIT_LIST_HEAD(listarr
++);
799 static void team_queue_override_fini(struct team
*team
)
801 kfree(team
->qom_lists
);
804 static struct list_head
*__team_get_qom_list(struct team
*team
, u16 queue_id
)
806 return &team
->qom_lists
[queue_id
- 1];
810 * note: already called with rcu_read_lock
812 static bool team_queue_override_transmit(struct team
*team
, struct sk_buff
*skb
)
814 struct list_head
*qom_list
;
815 struct team_port
*port
;
817 if (!team
->queue_override_enabled
|| !skb
->queue_mapping
)
819 qom_list
= __team_get_qom_list(team
, skb
->queue_mapping
);
820 list_for_each_entry_rcu(port
, qom_list
, qom_list
) {
821 if (!team_dev_queue_xmit(team
, port
, skb
))
827 static void __team_queue_override_port_del(struct team
*team
,
828 struct team_port
*port
)
832 list_del_rcu(&port
->qom_list
);
835 static bool team_queue_override_port_has_gt_prio_than(struct team_port
*port
,
836 struct team_port
*cur
)
838 if (port
->priority
< cur
->priority
)
840 if (port
->priority
> cur
->priority
)
842 if (port
->index
< cur
->index
)
847 static void __team_queue_override_port_add(struct team
*team
,
848 struct team_port
*port
)
850 struct team_port
*cur
;
851 struct list_head
*qom_list
;
852 struct list_head
*node
;
856 qom_list
= __team_get_qom_list(team
, port
->queue_id
);
858 list_for_each_entry(cur
, qom_list
, qom_list
) {
859 if (team_queue_override_port_has_gt_prio_than(port
, cur
))
861 node
= &cur
->qom_list
;
863 list_add_tail_rcu(&port
->qom_list
, node
);
866 static void __team_queue_override_enabled_check(struct team
*team
)
868 struct team_port
*port
;
869 bool enabled
= false;
871 list_for_each_entry(port
, &team
->port_list
, list
) {
872 if (port
->queue_id
) {
877 if (enabled
== team
->queue_override_enabled
)
879 netdev_dbg(team
->dev
, "%s queue override\n",
880 enabled
? "Enabling" : "Disabling");
881 team
->queue_override_enabled
= enabled
;
884 static void team_queue_override_port_prio_changed(struct team
*team
,
885 struct team_port
*port
)
887 if (!port
->queue_id
|| team_port_enabled(port
))
889 __team_queue_override_port_del(team
, port
);
890 __team_queue_override_port_add(team
, port
);
891 __team_queue_override_enabled_check(team
);
894 static void team_queue_override_port_change_queue_id(struct team
*team
,
895 struct team_port
*port
,
898 if (team_port_enabled(port
)) {
899 __team_queue_override_port_del(team
, port
);
900 port
->queue_id
= new_queue_id
;
901 __team_queue_override_port_add(team
, port
);
902 __team_queue_override_enabled_check(team
);
904 port
->queue_id
= new_queue_id
;
908 static void team_queue_override_port_add(struct team
*team
,
909 struct team_port
*port
)
911 __team_queue_override_port_add(team
, port
);
912 __team_queue_override_enabled_check(team
);
915 static void team_queue_override_port_del(struct team
*team
,
916 struct team_port
*port
)
918 __team_queue_override_port_del(team
, port
);
919 __team_queue_override_enabled_check(team
);
927 static bool team_port_find(const struct team
*team
,
928 const struct team_port
*port
)
930 struct team_port
*cur
;
932 list_for_each_entry(cur
, &team
->port_list
, list
)
939 * Enable/disable port by adding to enabled port hashlist and setting
940 * port->index (Might be racy so reader could see incorrect ifindex when
941 * processing a flying packet, but that is not a problem). Write guarded
944 static void team_port_enable(struct team
*team
,
945 struct team_port
*port
)
947 if (team_port_enabled(port
))
949 port
->index
= team
->en_port_count
++;
950 hlist_add_head_rcu(&port
->hlist
,
951 team_port_index_hash(team
, port
->index
));
952 team_adjust_ops(team
);
953 team_queue_override_port_add(team
, port
);
954 if (team
->ops
.port_enabled
)
955 team
->ops
.port_enabled(team
, port
);
956 team_notify_peers(team
);
957 team_mcast_rejoin(team
);
958 team_lower_state_changed(port
);
961 static void __reconstruct_port_hlist(struct team
*team
, int rm_index
)
964 struct team_port
*port
;
966 for (i
= rm_index
+ 1; i
< team
->en_port_count
; i
++) {
967 port
= team_get_port_by_index(team
, i
);
968 hlist_del_rcu(&port
->hlist
);
970 hlist_add_head_rcu(&port
->hlist
,
971 team_port_index_hash(team
, port
->index
));
975 static void team_port_disable(struct team
*team
,
976 struct team_port
*port
)
978 if (!team_port_enabled(port
))
980 if (team
->ops
.port_disabled
)
981 team
->ops
.port_disabled(team
, port
);
982 hlist_del_rcu(&port
->hlist
);
983 __reconstruct_port_hlist(team
, port
->index
);
985 team
->en_port_count
--;
986 team_queue_override_port_del(team
, port
);
987 team_adjust_ops(team
);
988 team_notify_peers(team
);
989 team_mcast_rejoin(team
);
990 team_lower_state_changed(port
);
993 #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
994 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
995 NETIF_F_HIGHDMA | NETIF_F_LRO)
997 #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
998 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
1000 static void __team_compute_features(struct team
*team
)
1002 struct team_port
*port
;
1003 netdev_features_t vlan_features
= TEAM_VLAN_FEATURES
&
1004 NETIF_F_ALL_FOR_ALL
;
1005 netdev_features_t enc_features
= TEAM_ENC_FEATURES
;
1006 unsigned short max_hard_header_len
= ETH_HLEN
;
1007 unsigned int dst_release_flag
= IFF_XMIT_DST_RELEASE
|
1008 IFF_XMIT_DST_RELEASE_PERM
;
1010 list_for_each_entry(port
, &team
->port_list
, list
) {
1011 vlan_features
= netdev_increment_features(vlan_features
,
1012 port
->dev
->vlan_features
,
1013 TEAM_VLAN_FEATURES
);
1015 netdev_increment_features(enc_features
,
1016 port
->dev
->hw_enc_features
,
1020 dst_release_flag
&= port
->dev
->priv_flags
;
1021 if (port
->dev
->hard_header_len
> max_hard_header_len
)
1022 max_hard_header_len
= port
->dev
->hard_header_len
;
1025 team
->dev
->vlan_features
= vlan_features
;
1026 team
->dev
->hw_enc_features
= enc_features
| NETIF_F_GSO_ENCAP_ALL
|
1028 team
->dev
->hard_header_len
= max_hard_header_len
;
1030 team
->dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
1031 if (dst_release_flag
== (IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
))
1032 team
->dev
->priv_flags
|= IFF_XMIT_DST_RELEASE
;
1035 static void team_compute_features(struct team
*team
)
1037 mutex_lock(&team
->lock
);
1038 __team_compute_features(team
);
1039 mutex_unlock(&team
->lock
);
1040 netdev_change_features(team
->dev
);
1043 static int team_port_enter(struct team
*team
, struct team_port
*port
)
1047 dev_hold(team
->dev
);
1048 if (team
->ops
.port_enter
) {
1049 err
= team
->ops
.port_enter(team
, port
);
1051 netdev_err(team
->dev
, "Device %s failed to enter team mode\n",
1053 goto err_port_enter
;
1065 static void team_port_leave(struct team
*team
, struct team_port
*port
)
1067 if (team
->ops
.port_leave
)
1068 team
->ops
.port_leave(team
, port
);
1072 #ifdef CONFIG_NET_POLL_CONTROLLER
1073 static int __team_port_enable_netpoll(struct team_port
*port
)
1078 np
= kzalloc(sizeof(*np
), GFP_KERNEL
);
1082 err
= __netpoll_setup(np
, port
->dev
);
1091 static int team_port_enable_netpoll(struct team_port
*port
)
1093 if (!port
->team
->dev
->npinfo
)
1096 return __team_port_enable_netpoll(port
);
1099 static void team_port_disable_netpoll(struct team_port
*port
)
1101 struct netpoll
*np
= port
->np
;
1110 static int team_port_enable_netpoll(struct team_port
*port
)
1114 static void team_port_disable_netpoll(struct team_port
*port
)
1119 static int team_upper_dev_link(struct team
*team
, struct team_port
*port
,
1120 struct netlink_ext_ack
*extack
)
1122 struct netdev_lag_upper_info lag_upper_info
;
1125 lag_upper_info
.tx_type
= team
->mode
->lag_tx_type
;
1126 lag_upper_info
.hash_type
= NETDEV_LAG_HASH_UNKNOWN
;
1127 err
= netdev_master_upper_dev_link(port
->dev
, team
->dev
, NULL
,
1128 &lag_upper_info
, extack
);
1131 port
->dev
->priv_flags
|= IFF_TEAM_PORT
;
1135 static void team_upper_dev_unlink(struct team
*team
, struct team_port
*port
)
1137 netdev_upper_dev_unlink(port
->dev
, team
->dev
);
1138 port
->dev
->priv_flags
&= ~IFF_TEAM_PORT
;
1141 static void __team_port_change_port_added(struct team_port
*port
, bool linkup
);
1142 static int team_dev_type_check_change(struct net_device
*dev
,
1143 struct net_device
*port_dev
);
1145 static int team_port_add(struct team
*team
, struct net_device
*port_dev
,
1146 struct netlink_ext_ack
*extack
)
1148 struct net_device
*dev
= team
->dev
;
1149 struct team_port
*port
;
1150 char *portname
= port_dev
->name
;
1153 if (port_dev
->flags
& IFF_LOOPBACK
) {
1154 NL_SET_ERR_MSG(extack
, "Loopback device can't be added as a team port");
1155 netdev_err(dev
, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1160 if (team_port_exists(port_dev
)) {
1161 NL_SET_ERR_MSG(extack
, "Device is already a port of a team device");
1162 netdev_err(dev
, "Device %s is already a port "
1163 "of a team device\n", portname
);
1167 if (dev
== port_dev
) {
1168 NL_SET_ERR_MSG(extack
, "Cannot enslave team device to itself");
1169 netdev_err(dev
, "Cannot enslave team device to itself\n");
1173 if (port_dev
->features
& NETIF_F_VLAN_CHALLENGED
&&
1174 vlan_uses_dev(dev
)) {
1175 NL_SET_ERR_MSG(extack
, "Device is VLAN challenged and team device has VLAN set up");
1176 netdev_err(dev
, "Device %s is VLAN challenged and team device has VLAN set up\n",
1181 err
= team_dev_type_check_change(dev
, port_dev
);
1185 if (port_dev
->flags
& IFF_UP
) {
1186 NL_SET_ERR_MSG(extack
, "Device is up. Set it down before adding it as a team port");
1187 netdev_err(dev
, "Device %s is up. Set it down before adding it as a team port\n",
1192 port
= kzalloc(sizeof(struct team_port
) + team
->mode
->port_priv_size
,
1197 port
->dev
= port_dev
;
1199 INIT_LIST_HEAD(&port
->qom_list
);
1201 port
->orig
.mtu
= port_dev
->mtu
;
1202 err
= dev_set_mtu(port_dev
, dev
->mtu
);
1204 netdev_dbg(dev
, "Error %d calling dev_set_mtu\n", err
);
1208 memcpy(port
->orig
.dev_addr
, port_dev
->dev_addr
, port_dev
->addr_len
);
1210 err
= team_port_enter(team
, port
);
1212 netdev_err(dev
, "Device %s failed to enter team mode\n",
1214 goto err_port_enter
;
1217 err
= dev_open(port_dev
);
1219 netdev_dbg(dev
, "Device %s opening failed\n",
1224 err
= vlan_vids_add_by_dev(port_dev
, dev
);
1226 netdev_err(dev
, "Failed to add vlan ids to device %s\n",
1231 err
= team_port_enable_netpoll(port
);
1233 netdev_err(dev
, "Failed to enable netpoll on device %s\n",
1235 goto err_enable_netpoll
;
1238 if (!(dev
->features
& NETIF_F_LRO
))
1239 dev_disable_lro(port_dev
);
1241 err
= netdev_rx_handler_register(port_dev
, team_handle_frame
,
1244 netdev_err(dev
, "Device %s failed to register rx_handler\n",
1246 goto err_handler_register
;
1249 err
= team_upper_dev_link(team
, port
, extack
);
1251 netdev_err(dev
, "Device %s failed to set upper link\n",
1253 goto err_set_upper_link
;
1256 err
= __team_option_inst_add_port(team
, port
);
1258 netdev_err(dev
, "Device %s failed to add per-port options\n",
1260 goto err_option_port_add
;
1263 netif_addr_lock_bh(dev
);
1264 dev_uc_sync_multiple(port_dev
, dev
);
1265 dev_mc_sync_multiple(port_dev
, dev
);
1266 netif_addr_unlock_bh(dev
);
1269 list_add_tail_rcu(&port
->list
, &team
->port_list
);
1270 team_port_enable(team
, port
);
1271 __team_compute_features(team
);
1272 __team_port_change_port_added(port
, !!netif_carrier_ok(port_dev
));
1273 __team_options_change_check(team
);
1275 netdev_info(dev
, "Port device %s added\n", portname
);
1279 err_option_port_add
:
1280 team_upper_dev_unlink(team
, port
);
1283 netdev_rx_handler_unregister(port_dev
);
1285 err_handler_register
:
1286 team_port_disable_netpoll(port
);
1289 vlan_vids_del_by_dev(port_dev
, dev
);
1292 dev_close(port_dev
);
1295 team_port_leave(team
, port
);
1296 team_port_set_orig_dev_addr(port
);
1299 dev_set_mtu(port_dev
, port
->orig
.mtu
);
1307 static void __team_port_change_port_removed(struct team_port
*port
);
1309 static int team_port_del(struct team
*team
, struct net_device
*port_dev
)
1311 struct net_device
*dev
= team
->dev
;
1312 struct team_port
*port
;
1313 char *portname
= port_dev
->name
;
1315 port
= team_port_get_rtnl(port_dev
);
1316 if (!port
|| !team_port_find(team
, port
)) {
1317 netdev_err(dev
, "Device %s does not act as a port of this team\n",
1322 team_port_disable(team
, port
);
1323 list_del_rcu(&port
->list
);
1324 team_upper_dev_unlink(team
, port
);
1325 netdev_rx_handler_unregister(port_dev
);
1326 team_port_disable_netpoll(port
);
1327 vlan_vids_del_by_dev(port_dev
, dev
);
1328 dev_uc_unsync(port_dev
, dev
);
1329 dev_mc_unsync(port_dev
, dev
);
1330 dev_close(port_dev
);
1331 team_port_leave(team
, port
);
1333 __team_option_inst_mark_removed_port(team
, port
);
1334 __team_options_change_check(team
);
1335 __team_option_inst_del_port(team
, port
);
1336 __team_port_change_port_removed(port
);
1338 team_port_set_orig_dev_addr(port
);
1339 dev_set_mtu(port_dev
, port
->orig
.mtu
);
1340 kfree_rcu(port
, rcu
);
1341 netdev_info(dev
, "Port device %s removed\n", portname
);
1342 __team_compute_features(team
);
1352 static int team_mode_option_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
1354 ctx
->data
.str_val
= team
->mode
->kind
;
1358 static int team_mode_option_set(struct team
*team
, struct team_gsetter_ctx
*ctx
)
1360 return team_change_mode(team
, ctx
->data
.str_val
);
1363 static int team_notify_peers_count_get(struct team
*team
,
1364 struct team_gsetter_ctx
*ctx
)
1366 ctx
->data
.u32_val
= team
->notify_peers
.count
;
1370 static int team_notify_peers_count_set(struct team
*team
,
1371 struct team_gsetter_ctx
*ctx
)
1373 team
->notify_peers
.count
= ctx
->data
.u32_val
;
1377 static int team_notify_peers_interval_get(struct team
*team
,
1378 struct team_gsetter_ctx
*ctx
)
1380 ctx
->data
.u32_val
= team
->notify_peers
.interval
;
1384 static int team_notify_peers_interval_set(struct team
*team
,
1385 struct team_gsetter_ctx
*ctx
)
1387 team
->notify_peers
.interval
= ctx
->data
.u32_val
;
1391 static int team_mcast_rejoin_count_get(struct team
*team
,
1392 struct team_gsetter_ctx
*ctx
)
1394 ctx
->data
.u32_val
= team
->mcast_rejoin
.count
;
1398 static int team_mcast_rejoin_count_set(struct team
*team
,
1399 struct team_gsetter_ctx
*ctx
)
1401 team
->mcast_rejoin
.count
= ctx
->data
.u32_val
;
1405 static int team_mcast_rejoin_interval_get(struct team
*team
,
1406 struct team_gsetter_ctx
*ctx
)
1408 ctx
->data
.u32_val
= team
->mcast_rejoin
.interval
;
1412 static int team_mcast_rejoin_interval_set(struct team
*team
,
1413 struct team_gsetter_ctx
*ctx
)
1415 team
->mcast_rejoin
.interval
= ctx
->data
.u32_val
;
1419 static int team_port_en_option_get(struct team
*team
,
1420 struct team_gsetter_ctx
*ctx
)
1422 struct team_port
*port
= ctx
->info
->port
;
1424 ctx
->data
.bool_val
= team_port_enabled(port
);
1428 static int team_port_en_option_set(struct team
*team
,
1429 struct team_gsetter_ctx
*ctx
)
1431 struct team_port
*port
= ctx
->info
->port
;
1433 if (ctx
->data
.bool_val
)
1434 team_port_enable(team
, port
);
1436 team_port_disable(team
, port
);
1440 static int team_user_linkup_option_get(struct team
*team
,
1441 struct team_gsetter_ctx
*ctx
)
1443 struct team_port
*port
= ctx
->info
->port
;
1445 ctx
->data
.bool_val
= port
->user
.linkup
;
1449 static void __team_carrier_check(struct team
*team
);
1451 static int team_user_linkup_option_set(struct team
*team
,
1452 struct team_gsetter_ctx
*ctx
)
1454 struct team_port
*port
= ctx
->info
->port
;
1456 port
->user
.linkup
= ctx
->data
.bool_val
;
1457 team_refresh_port_linkup(port
);
1458 __team_carrier_check(port
->team
);
1462 static int team_user_linkup_en_option_get(struct team
*team
,
1463 struct team_gsetter_ctx
*ctx
)
1465 struct team_port
*port
= ctx
->info
->port
;
1467 ctx
->data
.bool_val
= port
->user
.linkup_enabled
;
1471 static int team_user_linkup_en_option_set(struct team
*team
,
1472 struct team_gsetter_ctx
*ctx
)
1474 struct team_port
*port
= ctx
->info
->port
;
1476 port
->user
.linkup_enabled
= ctx
->data
.bool_val
;
1477 team_refresh_port_linkup(port
);
1478 __team_carrier_check(port
->team
);
1482 static int team_priority_option_get(struct team
*team
,
1483 struct team_gsetter_ctx
*ctx
)
1485 struct team_port
*port
= ctx
->info
->port
;
1487 ctx
->data
.s32_val
= port
->priority
;
1491 static int team_priority_option_set(struct team
*team
,
1492 struct team_gsetter_ctx
*ctx
)
1494 struct team_port
*port
= ctx
->info
->port
;
1495 s32 priority
= ctx
->data
.s32_val
;
1497 if (port
->priority
== priority
)
1499 port
->priority
= priority
;
1500 team_queue_override_port_prio_changed(team
, port
);
1504 static int team_queue_id_option_get(struct team
*team
,
1505 struct team_gsetter_ctx
*ctx
)
1507 struct team_port
*port
= ctx
->info
->port
;
1509 ctx
->data
.u32_val
= port
->queue_id
;
1513 static int team_queue_id_option_set(struct team
*team
,
1514 struct team_gsetter_ctx
*ctx
)
1516 struct team_port
*port
= ctx
->info
->port
;
1517 u16 new_queue_id
= ctx
->data
.u32_val
;
1519 if (port
->queue_id
== new_queue_id
)
1521 if (new_queue_id
>= team
->dev
->real_num_tx_queues
)
1523 team_queue_override_port_change_queue_id(team
, port
, new_queue_id
);
1527 static const struct team_option team_options
[] = {
1530 .type
= TEAM_OPTION_TYPE_STRING
,
1531 .getter
= team_mode_option_get
,
1532 .setter
= team_mode_option_set
,
1535 .name
= "notify_peers_count",
1536 .type
= TEAM_OPTION_TYPE_U32
,
1537 .getter
= team_notify_peers_count_get
,
1538 .setter
= team_notify_peers_count_set
,
1541 .name
= "notify_peers_interval",
1542 .type
= TEAM_OPTION_TYPE_U32
,
1543 .getter
= team_notify_peers_interval_get
,
1544 .setter
= team_notify_peers_interval_set
,
1547 .name
= "mcast_rejoin_count",
1548 .type
= TEAM_OPTION_TYPE_U32
,
1549 .getter
= team_mcast_rejoin_count_get
,
1550 .setter
= team_mcast_rejoin_count_set
,
1553 .name
= "mcast_rejoin_interval",
1554 .type
= TEAM_OPTION_TYPE_U32
,
1555 .getter
= team_mcast_rejoin_interval_get
,
1556 .setter
= team_mcast_rejoin_interval_set
,
1560 .type
= TEAM_OPTION_TYPE_BOOL
,
1562 .getter
= team_port_en_option_get
,
1563 .setter
= team_port_en_option_set
,
1566 .name
= "user_linkup",
1567 .type
= TEAM_OPTION_TYPE_BOOL
,
1569 .getter
= team_user_linkup_option_get
,
1570 .setter
= team_user_linkup_option_set
,
1573 .name
= "user_linkup_enabled",
1574 .type
= TEAM_OPTION_TYPE_BOOL
,
1576 .getter
= team_user_linkup_en_option_get
,
1577 .setter
= team_user_linkup_en_option_set
,
1581 .type
= TEAM_OPTION_TYPE_S32
,
1583 .getter
= team_priority_option_get
,
1584 .setter
= team_priority_option_set
,
1588 .type
= TEAM_OPTION_TYPE_U32
,
1590 .getter
= team_queue_id_option_get
,
1591 .setter
= team_queue_id_option_set
,
1596 static int team_init(struct net_device
*dev
)
1598 struct team
*team
= netdev_priv(dev
);
1603 mutex_init(&team
->lock
);
1604 team_set_no_mode(team
);
1606 team
->pcpu_stats
= netdev_alloc_pcpu_stats(struct team_pcpu_stats
);
1607 if (!team
->pcpu_stats
)
1610 for (i
= 0; i
< TEAM_PORT_HASHENTRIES
; i
++)
1611 INIT_HLIST_HEAD(&team
->en_port_hlist
[i
]);
1612 INIT_LIST_HEAD(&team
->port_list
);
1613 err
= team_queue_override_init(team
);
1615 goto err_team_queue_override_init
;
1617 team_adjust_ops(team
);
1619 INIT_LIST_HEAD(&team
->option_list
);
1620 INIT_LIST_HEAD(&team
->option_inst_list
);
1622 team_notify_peers_init(team
);
1623 team_mcast_rejoin_init(team
);
1625 err
= team_options_register(team
, team_options
, ARRAY_SIZE(team_options
));
1627 goto err_options_register
;
1628 netif_carrier_off(dev
);
1630 netdev_lockdep_set_classes(dev
);
1634 err_options_register
:
1635 team_mcast_rejoin_fini(team
);
1636 team_notify_peers_fini(team
);
1637 team_queue_override_fini(team
);
1638 err_team_queue_override_init
:
1639 free_percpu(team
->pcpu_stats
);
1644 static void team_uninit(struct net_device
*dev
)
1646 struct team
*team
= netdev_priv(dev
);
1647 struct team_port
*port
;
1648 struct team_port
*tmp
;
1650 mutex_lock(&team
->lock
);
1651 list_for_each_entry_safe(port
, tmp
, &team
->port_list
, list
)
1652 team_port_del(team
, port
->dev
);
1654 __team_change_mode(team
, NULL
); /* cleanup */
1655 __team_options_unregister(team
, team_options
, ARRAY_SIZE(team_options
));
1656 team_mcast_rejoin_fini(team
);
1657 team_notify_peers_fini(team
);
1658 team_queue_override_fini(team
);
1659 mutex_unlock(&team
->lock
);
1660 netdev_change_features(dev
);
1663 static void team_destructor(struct net_device
*dev
)
1665 struct team
*team
= netdev_priv(dev
);
1667 free_percpu(team
->pcpu_stats
);
1670 static int team_open(struct net_device
*dev
)
1675 static int team_close(struct net_device
*dev
)
1681 * note: already called with rcu_read_lock
1683 static netdev_tx_t
team_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1685 struct team
*team
= netdev_priv(dev
);
1687 unsigned int len
= skb
->len
;
1689 tx_success
= team_queue_override_transmit(team
, skb
);
1691 tx_success
= team
->ops
.transmit(team
, skb
);
1693 struct team_pcpu_stats
*pcpu_stats
;
1695 pcpu_stats
= this_cpu_ptr(team
->pcpu_stats
);
1696 u64_stats_update_begin(&pcpu_stats
->syncp
);
1697 pcpu_stats
->tx_packets
++;
1698 pcpu_stats
->tx_bytes
+= len
;
1699 u64_stats_update_end(&pcpu_stats
->syncp
);
1701 this_cpu_inc(team
->pcpu_stats
->tx_dropped
);
1704 return NETDEV_TX_OK
;
1707 static u16
team_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1708 struct net_device
*sb_dev
,
1709 select_queue_fallback_t fallback
)
1712 * This helper function exists to help dev_pick_tx get the correct
1713 * destination queue. Using a helper function skips a call to
1714 * skb_tx_hash and will put the skbs in the queue we expect on their
1715 * way down to the team driver.
1717 u16 txq
= skb_rx_queue_recorded(skb
) ? skb_get_rx_queue(skb
) : 0;
1720 * Save the original txq to restore before passing to the driver
1722 qdisc_skb_cb(skb
)->slave_dev_queue_mapping
= skb
->queue_mapping
;
1724 if (unlikely(txq
>= dev
->real_num_tx_queues
)) {
1726 txq
-= dev
->real_num_tx_queues
;
1727 } while (txq
>= dev
->real_num_tx_queues
);
1732 static void team_change_rx_flags(struct net_device
*dev
, int change
)
1734 struct team
*team
= netdev_priv(dev
);
1735 struct team_port
*port
;
1739 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1740 if (change
& IFF_PROMISC
) {
1741 inc
= dev
->flags
& IFF_PROMISC
? 1 : -1;
1742 dev_set_promiscuity(port
->dev
, inc
);
1744 if (change
& IFF_ALLMULTI
) {
1745 inc
= dev
->flags
& IFF_ALLMULTI
? 1 : -1;
1746 dev_set_allmulti(port
->dev
, inc
);
1752 static void team_set_rx_mode(struct net_device
*dev
)
1754 struct team
*team
= netdev_priv(dev
);
1755 struct team_port
*port
;
1758 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1759 dev_uc_sync_multiple(port
->dev
, dev
);
1760 dev_mc_sync_multiple(port
->dev
, dev
);
1765 static int team_set_mac_address(struct net_device
*dev
, void *p
)
1767 struct sockaddr
*addr
= p
;
1768 struct team
*team
= netdev_priv(dev
);
1769 struct team_port
*port
;
1771 if (dev
->type
== ARPHRD_ETHER
&& !is_valid_ether_addr(addr
->sa_data
))
1772 return -EADDRNOTAVAIL
;
1773 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1774 mutex_lock(&team
->lock
);
1775 list_for_each_entry(port
, &team
->port_list
, list
)
1776 if (team
->ops
.port_change_dev_addr
)
1777 team
->ops
.port_change_dev_addr(team
, port
);
1778 mutex_unlock(&team
->lock
);
1782 static int team_change_mtu(struct net_device
*dev
, int new_mtu
)
1784 struct team
*team
= netdev_priv(dev
);
1785 struct team_port
*port
;
1789 * Alhough this is reader, it's guarded by team lock. It's not possible
1790 * to traverse list in reverse under rcu_read_lock
1792 mutex_lock(&team
->lock
);
1793 team
->port_mtu_change_allowed
= true;
1794 list_for_each_entry(port
, &team
->port_list
, list
) {
1795 err
= dev_set_mtu(port
->dev
, new_mtu
);
1797 netdev_err(dev
, "Device %s failed to change mtu",
1802 team
->port_mtu_change_allowed
= false;
1803 mutex_unlock(&team
->lock
);
1810 list_for_each_entry_continue_reverse(port
, &team
->port_list
, list
)
1811 dev_set_mtu(port
->dev
, dev
->mtu
);
1812 team
->port_mtu_change_allowed
= false;
1813 mutex_unlock(&team
->lock
);
1819 team_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1821 struct team
*team
= netdev_priv(dev
);
1822 struct team_pcpu_stats
*p
;
1823 u64 rx_packets
, rx_bytes
, rx_multicast
, tx_packets
, tx_bytes
;
1824 u32 rx_dropped
= 0, tx_dropped
= 0, rx_nohandler
= 0;
1828 for_each_possible_cpu(i
) {
1829 p
= per_cpu_ptr(team
->pcpu_stats
, i
);
1831 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
1832 rx_packets
= p
->rx_packets
;
1833 rx_bytes
= p
->rx_bytes
;
1834 rx_multicast
= p
->rx_multicast
;
1835 tx_packets
= p
->tx_packets
;
1836 tx_bytes
= p
->tx_bytes
;
1837 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
1839 stats
->rx_packets
+= rx_packets
;
1840 stats
->rx_bytes
+= rx_bytes
;
1841 stats
->multicast
+= rx_multicast
;
1842 stats
->tx_packets
+= tx_packets
;
1843 stats
->tx_bytes
+= tx_bytes
;
1845 * rx_dropped, tx_dropped & rx_nohandler are u32,
1846 * updated without syncp protection.
1848 rx_dropped
+= p
->rx_dropped
;
1849 tx_dropped
+= p
->tx_dropped
;
1850 rx_nohandler
+= p
->rx_nohandler
;
1852 stats
->rx_dropped
= rx_dropped
;
1853 stats
->tx_dropped
= tx_dropped
;
1854 stats
->rx_nohandler
= rx_nohandler
;
1857 static int team_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1859 struct team
*team
= netdev_priv(dev
);
1860 struct team_port
*port
;
1864 * Alhough this is reader, it's guarded by team lock. It's not possible
1865 * to traverse list in reverse under rcu_read_lock
1867 mutex_lock(&team
->lock
);
1868 list_for_each_entry(port
, &team
->port_list
, list
) {
1869 err
= vlan_vid_add(port
->dev
, proto
, vid
);
1873 mutex_unlock(&team
->lock
);
1878 list_for_each_entry_continue_reverse(port
, &team
->port_list
, list
)
1879 vlan_vid_del(port
->dev
, proto
, vid
);
1880 mutex_unlock(&team
->lock
);
1885 static int team_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1887 struct team
*team
= netdev_priv(dev
);
1888 struct team_port
*port
;
1890 mutex_lock(&team
->lock
);
1891 list_for_each_entry(port
, &team
->port_list
, list
)
1892 vlan_vid_del(port
->dev
, proto
, vid
);
1893 mutex_unlock(&team
->lock
);
1898 #ifdef CONFIG_NET_POLL_CONTROLLER
1899 static void team_poll_controller(struct net_device
*dev
)
1903 static void __team_netpoll_cleanup(struct team
*team
)
1905 struct team_port
*port
;
1907 list_for_each_entry(port
, &team
->port_list
, list
)
1908 team_port_disable_netpoll(port
);
1911 static void team_netpoll_cleanup(struct net_device
*dev
)
1913 struct team
*team
= netdev_priv(dev
);
1915 mutex_lock(&team
->lock
);
1916 __team_netpoll_cleanup(team
);
1917 mutex_unlock(&team
->lock
);
1920 static int team_netpoll_setup(struct net_device
*dev
,
1921 struct netpoll_info
*npifo
)
1923 struct team
*team
= netdev_priv(dev
);
1924 struct team_port
*port
;
1927 mutex_lock(&team
->lock
);
1928 list_for_each_entry(port
, &team
->port_list
, list
) {
1929 err
= __team_port_enable_netpoll(port
);
1931 __team_netpoll_cleanup(team
);
1935 mutex_unlock(&team
->lock
);
1940 static int team_add_slave(struct net_device
*dev
, struct net_device
*port_dev
,
1941 struct netlink_ext_ack
*extack
)
1943 struct team
*team
= netdev_priv(dev
);
1946 mutex_lock(&team
->lock
);
1947 err
= team_port_add(team
, port_dev
, extack
);
1948 mutex_unlock(&team
->lock
);
1951 netdev_change_features(dev
);
1956 static int team_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
1958 struct team
*team
= netdev_priv(dev
);
1961 mutex_lock(&team
->lock
);
1962 err
= team_port_del(team
, port_dev
);
1963 mutex_unlock(&team
->lock
);
1966 netdev_change_features(dev
);
1971 static netdev_features_t
team_fix_features(struct net_device
*dev
,
1972 netdev_features_t features
)
1974 struct team_port
*port
;
1975 struct team
*team
= netdev_priv(dev
);
1976 netdev_features_t mask
;
1979 features
&= ~NETIF_F_ONE_FOR_ALL
;
1980 features
|= NETIF_F_ALL_FOR_ALL
;
1983 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1984 features
= netdev_increment_features(features
,
1985 port
->dev
->features
,
1990 features
= netdev_add_tso_features(features
, mask
);
1995 static int team_change_carrier(struct net_device
*dev
, bool new_carrier
)
1997 struct team
*team
= netdev_priv(dev
);
1999 team
->user_carrier_enabled
= true;
2002 netif_carrier_on(dev
);
2004 netif_carrier_off(dev
);
2008 static const struct net_device_ops team_netdev_ops
= {
2009 .ndo_init
= team_init
,
2010 .ndo_uninit
= team_uninit
,
2011 .ndo_open
= team_open
,
2012 .ndo_stop
= team_close
,
2013 .ndo_start_xmit
= team_xmit
,
2014 .ndo_select_queue
= team_select_queue
,
2015 .ndo_change_rx_flags
= team_change_rx_flags
,
2016 .ndo_set_rx_mode
= team_set_rx_mode
,
2017 .ndo_set_mac_address
= team_set_mac_address
,
2018 .ndo_change_mtu
= team_change_mtu
,
2019 .ndo_get_stats64
= team_get_stats64
,
2020 .ndo_vlan_rx_add_vid
= team_vlan_rx_add_vid
,
2021 .ndo_vlan_rx_kill_vid
= team_vlan_rx_kill_vid
,
2022 #ifdef CONFIG_NET_POLL_CONTROLLER
2023 .ndo_poll_controller
= team_poll_controller
,
2024 .ndo_netpoll_setup
= team_netpoll_setup
,
2025 .ndo_netpoll_cleanup
= team_netpoll_cleanup
,
2027 .ndo_add_slave
= team_add_slave
,
2028 .ndo_del_slave
= team_del_slave
,
2029 .ndo_fix_features
= team_fix_features
,
2030 .ndo_change_carrier
= team_change_carrier
,
2031 .ndo_features_check
= passthru_features_check
,
2034 /***********************
2036 ***********************/
2038 static void team_ethtool_get_drvinfo(struct net_device
*dev
,
2039 struct ethtool_drvinfo
*drvinfo
)
2041 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
2042 strlcpy(drvinfo
->version
, UTS_RELEASE
, sizeof(drvinfo
->version
));
2045 static const struct ethtool_ops team_ethtool_ops
= {
2046 .get_drvinfo
= team_ethtool_get_drvinfo
,
2047 .get_link
= ethtool_op_get_link
,
2050 /***********************
2051 * rt netlink interface
2052 ***********************/
2054 static void team_setup_by_port(struct net_device
*dev
,
2055 struct net_device
*port_dev
)
2057 dev
->header_ops
= port_dev
->header_ops
;
2058 dev
->type
= port_dev
->type
;
2059 dev
->hard_header_len
= port_dev
->hard_header_len
;
2060 dev
->addr_len
= port_dev
->addr_len
;
2061 dev
->mtu
= port_dev
->mtu
;
2062 memcpy(dev
->broadcast
, port_dev
->broadcast
, port_dev
->addr_len
);
2063 eth_hw_addr_inherit(dev
, port_dev
);
2066 static int team_dev_type_check_change(struct net_device
*dev
,
2067 struct net_device
*port_dev
)
2069 struct team
*team
= netdev_priv(dev
);
2070 char *portname
= port_dev
->name
;
2073 if (dev
->type
== port_dev
->type
)
2075 if (!list_empty(&team
->port_list
)) {
2076 netdev_err(dev
, "Device %s is of different type\n", portname
);
2079 err
= call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE
, dev
);
2080 err
= notifier_to_errno(err
);
2082 netdev_err(dev
, "Refused to change device type\n");
2087 team_setup_by_port(dev
, port_dev
);
2088 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE
, dev
);
2092 static void team_setup(struct net_device
*dev
)
2095 dev
->max_mtu
= ETH_MAX_MTU
;
2097 dev
->netdev_ops
= &team_netdev_ops
;
2098 dev
->ethtool_ops
= &team_ethtool_ops
;
2099 dev
->needs_free_netdev
= true;
2100 dev
->priv_destructor
= team_destructor
;
2101 dev
->priv_flags
&= ~(IFF_XMIT_DST_RELEASE
| IFF_TX_SKB_SHARING
);
2102 dev
->priv_flags
|= IFF_NO_QUEUE
;
2103 dev
->priv_flags
|= IFF_TEAM
;
2106 * Indicate we support unicast address filtering. That way core won't
2107 * bring us to promisc mode in case a unicast addr is added.
2108 * Let this up to underlay drivers.
2110 dev
->priv_flags
|= IFF_UNICAST_FLT
| IFF_LIVE_ADDR_CHANGE
;
2112 dev
->features
|= NETIF_F_LLTX
;
2113 dev
->features
|= NETIF_F_GRO
;
2115 /* Don't allow team devices to change network namespaces. */
2116 dev
->features
|= NETIF_F_NETNS_LOCAL
;
2118 dev
->hw_features
= TEAM_VLAN_FEATURES
|
2119 NETIF_F_HW_VLAN_CTAG_TX
|
2120 NETIF_F_HW_VLAN_CTAG_RX
|
2121 NETIF_F_HW_VLAN_CTAG_FILTER
;
2123 dev
->hw_features
|= NETIF_F_GSO_ENCAP_ALL
| NETIF_F_GSO_UDP_L4
;
2124 dev
->features
|= dev
->hw_features
;
2127 static int team_newlink(struct net
*src_net
, struct net_device
*dev
,
2128 struct nlattr
*tb
[], struct nlattr
*data
[],
2129 struct netlink_ext_ack
*extack
)
2131 if (tb
[IFLA_ADDRESS
] == NULL
)
2132 eth_hw_addr_random(dev
);
2134 return register_netdevice(dev
);
2137 static int team_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
2138 struct netlink_ext_ack
*extack
)
2140 if (tb
[IFLA_ADDRESS
]) {
2141 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
2143 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
2144 return -EADDRNOTAVAIL
;
2149 static unsigned int team_get_num_tx_queues(void)
2151 return TEAM_DEFAULT_NUM_TX_QUEUES
;
2154 static unsigned int team_get_num_rx_queues(void)
2156 return TEAM_DEFAULT_NUM_RX_QUEUES
;
2159 static struct rtnl_link_ops team_link_ops __read_mostly
= {
2161 .priv_size
= sizeof(struct team
),
2162 .setup
= team_setup
,
2163 .newlink
= team_newlink
,
2164 .validate
= team_validate
,
2165 .get_num_tx_queues
= team_get_num_tx_queues
,
2166 .get_num_rx_queues
= team_get_num_rx_queues
,
2170 /***********************************
2171 * Generic netlink custom interface
2172 ***********************************/
2174 static struct genl_family team_nl_family
;
2176 static const struct nla_policy team_nl_policy
[TEAM_ATTR_MAX
+ 1] = {
2177 [TEAM_ATTR_UNSPEC
] = { .type
= NLA_UNSPEC
, },
2178 [TEAM_ATTR_TEAM_IFINDEX
] = { .type
= NLA_U32
},
2179 [TEAM_ATTR_LIST_OPTION
] = { .type
= NLA_NESTED
},
2180 [TEAM_ATTR_LIST_PORT
] = { .type
= NLA_NESTED
},
2183 static const struct nla_policy
2184 team_nl_option_policy
[TEAM_ATTR_OPTION_MAX
+ 1] = {
2185 [TEAM_ATTR_OPTION_UNSPEC
] = { .type
= NLA_UNSPEC
, },
2186 [TEAM_ATTR_OPTION_NAME
] = {
2188 .len
= TEAM_STRING_MAX_LEN
,
2190 [TEAM_ATTR_OPTION_CHANGED
] = { .type
= NLA_FLAG
},
2191 [TEAM_ATTR_OPTION_TYPE
] = { .type
= NLA_U8
},
2192 [TEAM_ATTR_OPTION_DATA
] = { .type
= NLA_BINARY
},
2195 static int team_nl_cmd_noop(struct sk_buff
*skb
, struct genl_info
*info
)
2197 struct sk_buff
*msg
;
2201 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2205 hdr
= genlmsg_put(msg
, info
->snd_portid
, info
->snd_seq
,
2206 &team_nl_family
, 0, TEAM_CMD_NOOP
);
2212 genlmsg_end(msg
, hdr
);
2214 return genlmsg_unicast(genl_info_net(info
), msg
, info
->snd_portid
);
2223 * Netlink cmd functions should be locked by following two functions.
2224 * Since dev gets held here, that ensures dev won't disappear in between.
2226 static struct team
*team_nl_team_get(struct genl_info
*info
)
2228 struct net
*net
= genl_info_net(info
);
2230 struct net_device
*dev
;
2233 if (!info
->attrs
[TEAM_ATTR_TEAM_IFINDEX
])
2236 ifindex
= nla_get_u32(info
->attrs
[TEAM_ATTR_TEAM_IFINDEX
]);
2237 dev
= dev_get_by_index(net
, ifindex
);
2238 if (!dev
|| dev
->netdev_ops
!= &team_netdev_ops
) {
2244 team
= netdev_priv(dev
);
2245 mutex_lock(&team
->lock
);
2249 static void team_nl_team_put(struct team
*team
)
2251 mutex_unlock(&team
->lock
);
2255 typedef int team_nl_send_func_t(struct sk_buff
*skb
,
2256 struct team
*team
, u32 portid
);
2258 static int team_nl_send_unicast(struct sk_buff
*skb
, struct team
*team
, u32 portid
)
2260 return genlmsg_unicast(dev_net(team
->dev
), skb
, portid
);
2263 static int team_nl_fill_one_option_get(struct sk_buff
*skb
, struct team
*team
,
2264 struct team_option_inst
*opt_inst
)
2266 struct nlattr
*option_item
;
2267 struct team_option
*option
= opt_inst
->option
;
2268 struct team_option_inst_info
*opt_inst_info
= &opt_inst
->info
;
2269 struct team_gsetter_ctx ctx
;
2272 ctx
.info
= opt_inst_info
;
2273 err
= team_option_get(team
, opt_inst
, &ctx
);
2277 option_item
= nla_nest_start(skb
, TEAM_ATTR_ITEM_OPTION
);
2281 if (nla_put_string(skb
, TEAM_ATTR_OPTION_NAME
, option
->name
))
2283 if (opt_inst_info
->port
&&
2284 nla_put_u32(skb
, TEAM_ATTR_OPTION_PORT_IFINDEX
,
2285 opt_inst_info
->port
->dev
->ifindex
))
2287 if (opt_inst
->option
->array_size
&&
2288 nla_put_u32(skb
, TEAM_ATTR_OPTION_ARRAY_INDEX
,
2289 opt_inst_info
->array_index
))
2292 switch (option
->type
) {
2293 case TEAM_OPTION_TYPE_U32
:
2294 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_U32
))
2296 if (nla_put_u32(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.u32_val
))
2299 case TEAM_OPTION_TYPE_STRING
:
2300 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_STRING
))
2302 if (nla_put_string(skb
, TEAM_ATTR_OPTION_DATA
,
2306 case TEAM_OPTION_TYPE_BINARY
:
2307 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_BINARY
))
2309 if (nla_put(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.bin_val
.len
,
2310 ctx
.data
.bin_val
.ptr
))
2313 case TEAM_OPTION_TYPE_BOOL
:
2314 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_FLAG
))
2316 if (ctx
.data
.bool_val
&&
2317 nla_put_flag(skb
, TEAM_ATTR_OPTION_DATA
))
2320 case TEAM_OPTION_TYPE_S32
:
2321 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_S32
))
2323 if (nla_put_s32(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.s32_val
))
2329 if (opt_inst
->removed
&& nla_put_flag(skb
, TEAM_ATTR_OPTION_REMOVED
))
2331 if (opt_inst
->changed
) {
2332 if (nla_put_flag(skb
, TEAM_ATTR_OPTION_CHANGED
))
2334 opt_inst
->changed
= false;
2336 nla_nest_end(skb
, option_item
);
2340 nla_nest_cancel(skb
, option_item
);
2344 static int __send_and_alloc_skb(struct sk_buff
**pskb
,
2345 struct team
*team
, u32 portid
,
2346 team_nl_send_func_t
*send_func
)
2351 err
= send_func(*pskb
, team
, portid
);
2355 *pskb
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2361 static int team_nl_send_options_get(struct team
*team
, u32 portid
, u32 seq
,
2362 int flags
, team_nl_send_func_t
*send_func
,
2363 struct list_head
*sel_opt_inst_list
)
2365 struct nlattr
*option_list
;
2366 struct nlmsghdr
*nlh
;
2368 struct team_option_inst
*opt_inst
;
2370 struct sk_buff
*skb
= NULL
;
2374 opt_inst
= list_first_entry(sel_opt_inst_list
,
2375 struct team_option_inst
, tmp_list
);
2378 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2382 hdr
= genlmsg_put(skb
, portid
, seq
, &team_nl_family
, flags
| NLM_F_MULTI
,
2383 TEAM_CMD_OPTIONS_GET
);
2389 if (nla_put_u32(skb
, TEAM_ATTR_TEAM_IFINDEX
, team
->dev
->ifindex
))
2390 goto nla_put_failure
;
2391 option_list
= nla_nest_start(skb
, TEAM_ATTR_LIST_OPTION
);
2393 goto nla_put_failure
;
2397 list_for_each_entry_from(opt_inst
, sel_opt_inst_list
, tmp_list
) {
2398 err
= team_nl_fill_one_option_get(skb
, team
, opt_inst
);
2400 if (err
== -EMSGSIZE
) {
2411 nla_nest_end(skb
, option_list
);
2412 genlmsg_end(skb
, hdr
);
2417 nlh
= nlmsg_put(skb
, portid
, seq
, NLMSG_DONE
, 0, flags
| NLM_F_MULTI
);
2419 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2425 return send_func(skb
, team
, portid
);
2434 static int team_nl_cmd_options_get(struct sk_buff
*skb
, struct genl_info
*info
)
2437 struct team_option_inst
*opt_inst
;
2439 LIST_HEAD(sel_opt_inst_list
);
2441 team
= team_nl_team_get(info
);
2445 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
)
2446 list_add_tail(&opt_inst
->tmp_list
, &sel_opt_inst_list
);
2447 err
= team_nl_send_options_get(team
, info
->snd_portid
, info
->snd_seq
,
2448 NLM_F_ACK
, team_nl_send_unicast
,
2449 &sel_opt_inst_list
);
2451 team_nl_team_put(team
);
2456 static int team_nl_send_event_options_get(struct team
*team
,
2457 struct list_head
*sel_opt_inst_list
);
2459 static int team_nl_cmd_options_set(struct sk_buff
*skb
, struct genl_info
*info
)
2464 struct nlattr
*nl_option
;
2465 LIST_HEAD(opt_inst_list
);
2469 team
= team_nl_team_get(info
);
2476 if (!info
->attrs
[TEAM_ATTR_LIST_OPTION
]) {
2481 nla_for_each_nested(nl_option
, info
->attrs
[TEAM_ATTR_LIST_OPTION
], i
) {
2482 struct nlattr
*opt_attrs
[TEAM_ATTR_OPTION_MAX
+ 1];
2483 struct nlattr
*attr
;
2484 struct nlattr
*attr_data
;
2485 enum team_option_type opt_type
;
2486 int opt_port_ifindex
= 0; /* != 0 for per-port options */
2487 u32 opt_array_index
= 0;
2488 bool opt_is_array
= false;
2489 struct team_option_inst
*opt_inst
;
2491 bool opt_found
= false;
2493 if (nla_type(nl_option
) != TEAM_ATTR_ITEM_OPTION
) {
2497 err
= nla_parse_nested(opt_attrs
, TEAM_ATTR_OPTION_MAX
,
2498 nl_option
, team_nl_option_policy
,
2502 if (!opt_attrs
[TEAM_ATTR_OPTION_NAME
] ||
2503 !opt_attrs
[TEAM_ATTR_OPTION_TYPE
]) {
2507 switch (nla_get_u8(opt_attrs
[TEAM_ATTR_OPTION_TYPE
])) {
2509 opt_type
= TEAM_OPTION_TYPE_U32
;
2512 opt_type
= TEAM_OPTION_TYPE_STRING
;
2515 opt_type
= TEAM_OPTION_TYPE_BINARY
;
2518 opt_type
= TEAM_OPTION_TYPE_BOOL
;
2521 opt_type
= TEAM_OPTION_TYPE_S32
;
2527 attr_data
= opt_attrs
[TEAM_ATTR_OPTION_DATA
];
2528 if (opt_type
!= TEAM_OPTION_TYPE_BOOL
&& !attr_data
) {
2533 opt_name
= nla_data(opt_attrs
[TEAM_ATTR_OPTION_NAME
]);
2534 attr
= opt_attrs
[TEAM_ATTR_OPTION_PORT_IFINDEX
];
2536 opt_port_ifindex
= nla_get_u32(attr
);
2538 attr
= opt_attrs
[TEAM_ATTR_OPTION_ARRAY_INDEX
];
2540 opt_is_array
= true;
2541 opt_array_index
= nla_get_u32(attr
);
2544 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
2545 struct team_option
*option
= opt_inst
->option
;
2546 struct team_gsetter_ctx ctx
;
2547 struct team_option_inst_info
*opt_inst_info
;
2550 opt_inst_info
= &opt_inst
->info
;
2551 tmp_ifindex
= opt_inst_info
->port
?
2552 opt_inst_info
->port
->dev
->ifindex
: 0;
2553 if (option
->type
!= opt_type
||
2554 strcmp(option
->name
, opt_name
) ||
2555 tmp_ifindex
!= opt_port_ifindex
||
2556 (option
->array_size
&& !opt_is_array
) ||
2557 opt_inst_info
->array_index
!= opt_array_index
)
2560 ctx
.info
= opt_inst_info
;
2562 case TEAM_OPTION_TYPE_U32
:
2563 ctx
.data
.u32_val
= nla_get_u32(attr_data
);
2565 case TEAM_OPTION_TYPE_STRING
:
2566 if (nla_len(attr_data
) > TEAM_STRING_MAX_LEN
) {
2570 ctx
.data
.str_val
= nla_data(attr_data
);
2572 case TEAM_OPTION_TYPE_BINARY
:
2573 ctx
.data
.bin_val
.len
= nla_len(attr_data
);
2574 ctx
.data
.bin_val
.ptr
= nla_data(attr_data
);
2576 case TEAM_OPTION_TYPE_BOOL
:
2577 ctx
.data
.bool_val
= attr_data
? true : false;
2579 case TEAM_OPTION_TYPE_S32
:
2580 ctx
.data
.s32_val
= nla_get_s32(attr_data
);
2585 err
= team_option_set(team
, opt_inst
, &ctx
);
2588 opt_inst
->changed
= true;
2590 /* dumb/evil user-space can send us duplicate opt,
2591 * keep only the last one
2593 if (__team_option_inst_tmp_find(&opt_inst_list
,
2597 list_add(&opt_inst
->tmp_list
, &opt_inst_list
);
2605 err
= team_nl_send_event_options_get(team
, &opt_inst_list
);
2608 team_nl_team_put(team
);
2614 static int team_nl_fill_one_port_get(struct sk_buff
*skb
,
2615 struct team_port
*port
)
2617 struct nlattr
*port_item
;
2619 port_item
= nla_nest_start(skb
, TEAM_ATTR_ITEM_PORT
);
2622 if (nla_put_u32(skb
, TEAM_ATTR_PORT_IFINDEX
, port
->dev
->ifindex
))
2624 if (port
->changed
) {
2625 if (nla_put_flag(skb
, TEAM_ATTR_PORT_CHANGED
))
2627 port
->changed
= false;
2629 if ((port
->removed
&&
2630 nla_put_flag(skb
, TEAM_ATTR_PORT_REMOVED
)) ||
2631 (port
->state
.linkup
&&
2632 nla_put_flag(skb
, TEAM_ATTR_PORT_LINKUP
)) ||
2633 nla_put_u32(skb
, TEAM_ATTR_PORT_SPEED
, port
->state
.speed
) ||
2634 nla_put_u8(skb
, TEAM_ATTR_PORT_DUPLEX
, port
->state
.duplex
))
2636 nla_nest_end(skb
, port_item
);
2640 nla_nest_cancel(skb
, port_item
);
2644 static int team_nl_send_port_list_get(struct team
*team
, u32 portid
, u32 seq
,
2645 int flags
, team_nl_send_func_t
*send_func
,
2646 struct team_port
*one_port
)
2648 struct nlattr
*port_list
;
2649 struct nlmsghdr
*nlh
;
2651 struct team_port
*port
;
2653 struct sk_buff
*skb
= NULL
;
2657 port
= list_first_entry_or_null(&team
->port_list
,
2658 struct team_port
, list
);
2661 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2665 hdr
= genlmsg_put(skb
, portid
, seq
, &team_nl_family
, flags
| NLM_F_MULTI
,
2666 TEAM_CMD_PORT_LIST_GET
);
2672 if (nla_put_u32(skb
, TEAM_ATTR_TEAM_IFINDEX
, team
->dev
->ifindex
))
2673 goto nla_put_failure
;
2674 port_list
= nla_nest_start(skb
, TEAM_ATTR_LIST_PORT
);
2676 goto nla_put_failure
;
2681 /* If one port is selected, called wants to send port list containing
2682 * only this port. Otherwise go through all listed ports and send all
2685 err
= team_nl_fill_one_port_get(skb
, one_port
);
2689 list_for_each_entry_from(port
, &team
->port_list
, list
) {
2690 err
= team_nl_fill_one_port_get(skb
, port
);
2692 if (err
== -EMSGSIZE
) {
2704 nla_nest_end(skb
, port_list
);
2705 genlmsg_end(skb
, hdr
);
2710 nlh
= nlmsg_put(skb
, portid
, seq
, NLMSG_DONE
, 0, flags
| NLM_F_MULTI
);
2712 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2718 return send_func(skb
, team
, portid
);
2727 static int team_nl_cmd_port_list_get(struct sk_buff
*skb
,
2728 struct genl_info
*info
)
2733 team
= team_nl_team_get(info
);
2737 err
= team_nl_send_port_list_get(team
, info
->snd_portid
, info
->snd_seq
,
2738 NLM_F_ACK
, team_nl_send_unicast
, NULL
);
2740 team_nl_team_put(team
);
2745 static const struct genl_ops team_nl_ops
[] = {
2747 .cmd
= TEAM_CMD_NOOP
,
2748 .doit
= team_nl_cmd_noop
,
2749 .policy
= team_nl_policy
,
2752 .cmd
= TEAM_CMD_OPTIONS_SET
,
2753 .doit
= team_nl_cmd_options_set
,
2754 .policy
= team_nl_policy
,
2755 .flags
= GENL_ADMIN_PERM
,
2758 .cmd
= TEAM_CMD_OPTIONS_GET
,
2759 .doit
= team_nl_cmd_options_get
,
2760 .policy
= team_nl_policy
,
2761 .flags
= GENL_ADMIN_PERM
,
2764 .cmd
= TEAM_CMD_PORT_LIST_GET
,
2765 .doit
= team_nl_cmd_port_list_get
,
2766 .policy
= team_nl_policy
,
2767 .flags
= GENL_ADMIN_PERM
,
2771 static const struct genl_multicast_group team_nl_mcgrps
[] = {
2772 { .name
= TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME
, },
2775 static struct genl_family team_nl_family __ro_after_init
= {
2776 .name
= TEAM_GENL_NAME
,
2777 .version
= TEAM_GENL_VERSION
,
2778 .maxattr
= TEAM_ATTR_MAX
,
2780 .module
= THIS_MODULE
,
2782 .n_ops
= ARRAY_SIZE(team_nl_ops
),
2783 .mcgrps
= team_nl_mcgrps
,
2784 .n_mcgrps
= ARRAY_SIZE(team_nl_mcgrps
),
2787 static int team_nl_send_multicast(struct sk_buff
*skb
,
2788 struct team
*team
, u32 portid
)
2790 return genlmsg_multicast_netns(&team_nl_family
, dev_net(team
->dev
),
2791 skb
, 0, 0, GFP_KERNEL
);
2794 static int team_nl_send_event_options_get(struct team
*team
,
2795 struct list_head
*sel_opt_inst_list
)
2797 return team_nl_send_options_get(team
, 0, 0, 0, team_nl_send_multicast
,
2801 static int team_nl_send_event_port_get(struct team
*team
,
2802 struct team_port
*port
)
2804 return team_nl_send_port_list_get(team
, 0, 0, 0, team_nl_send_multicast
,
2808 static int __init
team_nl_init(void)
2810 return genl_register_family(&team_nl_family
);
2813 static void team_nl_fini(void)
2815 genl_unregister_family(&team_nl_family
);
2823 static void __team_options_change_check(struct team
*team
)
2826 struct team_option_inst
*opt_inst
;
2827 LIST_HEAD(sel_opt_inst_list
);
2829 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
2830 if (opt_inst
->changed
)
2831 list_add_tail(&opt_inst
->tmp_list
, &sel_opt_inst_list
);
2833 err
= team_nl_send_event_options_get(team
, &sel_opt_inst_list
);
2834 if (err
&& err
!= -ESRCH
)
2835 netdev_warn(team
->dev
, "Failed to send options change via netlink (err %d)\n",
2839 /* rtnl lock is held */
2841 static void __team_port_change_send(struct team_port
*port
, bool linkup
)
2845 port
->changed
= true;
2846 port
->state
.linkup
= linkup
;
2847 team_refresh_port_linkup(port
);
2849 struct ethtool_link_ksettings ecmd
;
2851 err
= __ethtool_get_link_ksettings(port
->dev
, &ecmd
);
2853 port
->state
.speed
= ecmd
.base
.speed
;
2854 port
->state
.duplex
= ecmd
.base
.duplex
;
2858 port
->state
.speed
= 0;
2859 port
->state
.duplex
= 0;
2862 err
= team_nl_send_event_port_get(port
->team
, port
);
2863 if (err
&& err
!= -ESRCH
)
2864 netdev_warn(port
->team
->dev
, "Failed to send port change of device %s via netlink (err %d)\n",
2865 port
->dev
->name
, err
);
2869 static void __team_carrier_check(struct team
*team
)
2871 struct team_port
*port
;
2874 if (team
->user_carrier_enabled
)
2877 team_linkup
= false;
2878 list_for_each_entry(port
, &team
->port_list
, list
) {
2886 netif_carrier_on(team
->dev
);
2888 netif_carrier_off(team
->dev
);
2891 static void __team_port_change_check(struct team_port
*port
, bool linkup
)
2893 if (port
->state
.linkup
!= linkup
)
2894 __team_port_change_send(port
, linkup
);
2895 __team_carrier_check(port
->team
);
2898 static void __team_port_change_port_added(struct team_port
*port
, bool linkup
)
2900 __team_port_change_send(port
, linkup
);
2901 __team_carrier_check(port
->team
);
2904 static void __team_port_change_port_removed(struct team_port
*port
)
2906 port
->removed
= true;
2907 __team_port_change_send(port
, false);
2908 __team_carrier_check(port
->team
);
2911 static void team_port_change_check(struct team_port
*port
, bool linkup
)
2913 struct team
*team
= port
->team
;
2915 mutex_lock(&team
->lock
);
2916 __team_port_change_check(port
, linkup
);
2917 mutex_unlock(&team
->lock
);
2921 /************************************
2922 * Net device notifier event handler
2923 ************************************/
2925 static int team_device_event(struct notifier_block
*unused
,
2926 unsigned long event
, void *ptr
)
2928 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2929 struct team_port
*port
;
2931 port
= team_port_get_rtnl(dev
);
2937 if (netif_carrier_ok(dev
))
2938 team_port_change_check(port
, true);
2941 team_port_change_check(port
, false);
2944 if (netif_running(port
->dev
))
2945 team_port_change_check(port
,
2946 !!netif_oper_up(port
->dev
));
2948 case NETDEV_UNREGISTER
:
2949 team_del_slave(port
->team
->dev
, dev
);
2951 case NETDEV_FEAT_CHANGE
:
2952 team_compute_features(port
->team
);
2954 case NETDEV_PRECHANGEMTU
:
2955 /* Forbid to change mtu of underlaying device */
2956 if (!port
->team
->port_mtu_change_allowed
)
2959 case NETDEV_PRE_TYPE_CHANGE
:
2960 /* Forbid to change type of underlaying device */
2962 case NETDEV_RESEND_IGMP
:
2963 /* Propagate to master device */
2964 call_netdevice_notifiers(event
, port
->team
->dev
);
2970 static struct notifier_block team_notifier_block __read_mostly
= {
2971 .notifier_call
= team_device_event
,
2975 /***********************
2976 * Module init and exit
2977 ***********************/
2979 static int __init
team_module_init(void)
2983 register_netdevice_notifier(&team_notifier_block
);
2985 err
= rtnl_link_register(&team_link_ops
);
2989 err
= team_nl_init();
2996 rtnl_link_unregister(&team_link_ops
);
2999 unregister_netdevice_notifier(&team_notifier_block
);
3004 static void __exit
team_module_exit(void)
3007 rtnl_link_unregister(&team_link_ops
);
3008 unregister_netdevice_notifier(&team_notifier_block
);
3011 module_init(team_module_init
);
3012 module_exit(team_module_exit
);
3014 MODULE_LICENSE("GPL v2");
3015 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
3016 MODULE_DESCRIPTION("Ethernet team device driver");
3017 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);