2 * drivers/net/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/netpoll.h>
22 #include <linux/if_vlan.h>
23 #include <linux/if_arp.h>
24 #include <linux/socket.h>
25 #include <linux/etherdevice.h>
26 #include <linux/rtnetlink.h>
27 #include <net/rtnetlink.h>
28 #include <net/genetlink.h>
29 #include <net/netlink.h>
30 #include <net/sch_generic.h>
31 #include <generated/utsrelease.h>
32 #include <linux/if_team.h>
34 #define DRV_NAME "team"
41 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
43 static struct team_port
*team_port_get_rcu(const struct net_device
*dev
)
45 return rcu_dereference(dev
->rx_handler_data
);
48 static struct team_port
*team_port_get_rtnl(const struct net_device
*dev
)
50 struct team_port
*port
= rtnl_dereference(dev
->rx_handler_data
);
52 return team_port_exists(dev
) ? port
: NULL
;
56 * Since the ability to change device address for open port device is tested in
57 * team_port_add, this function can be called without control of return value
59 static int __set_port_dev_addr(struct net_device
*port_dev
,
60 const unsigned char *dev_addr
)
64 memcpy(addr
.sa_data
, dev_addr
, port_dev
->addr_len
);
65 addr
.sa_family
= port_dev
->type
;
66 return dev_set_mac_address(port_dev
, &addr
);
69 static int team_port_set_orig_dev_addr(struct team_port
*port
)
71 return __set_port_dev_addr(port
->dev
, port
->orig
.dev_addr
);
74 static int team_port_set_team_dev_addr(struct team
*team
,
75 struct team_port
*port
)
77 return __set_port_dev_addr(port
->dev
, team
->dev
->dev_addr
);
80 int team_modeop_port_enter(struct team
*team
, struct team_port
*port
)
82 return team_port_set_team_dev_addr(team
, port
);
84 EXPORT_SYMBOL(team_modeop_port_enter
);
86 void team_modeop_port_change_dev_addr(struct team
*team
,
87 struct team_port
*port
)
89 team_port_set_team_dev_addr(team
, port
);
91 EXPORT_SYMBOL(team_modeop_port_change_dev_addr
);
93 static void team_refresh_port_linkup(struct team_port
*port
)
95 port
->linkup
= port
->user
.linkup_enabled
? port
->user
.linkup
:
104 struct team_option_inst
{ /* One for each option instance */
105 struct list_head list
;
106 struct list_head tmp_list
;
107 struct team_option
*option
;
108 struct team_option_inst_info info
;
113 static struct team_option
*__team_find_option(struct team
*team
,
114 const char *opt_name
)
116 struct team_option
*option
;
118 list_for_each_entry(option
, &team
->option_list
, list
) {
119 if (strcmp(option
->name
, opt_name
) == 0)
125 static void __team_option_inst_del(struct team_option_inst
*opt_inst
)
127 list_del(&opt_inst
->list
);
131 static void __team_option_inst_del_option(struct team
*team
,
132 struct team_option
*option
)
134 struct team_option_inst
*opt_inst
, *tmp
;
136 list_for_each_entry_safe(opt_inst
, tmp
, &team
->option_inst_list
, list
) {
137 if (opt_inst
->option
== option
)
138 __team_option_inst_del(opt_inst
);
142 static int __team_option_inst_add(struct team
*team
, struct team_option
*option
,
143 struct team_port
*port
)
145 struct team_option_inst
*opt_inst
;
146 unsigned int array_size
;
150 array_size
= option
->array_size
;
152 array_size
= 1; /* No array but still need one instance */
154 for (i
= 0; i
< array_size
; i
++) {
155 opt_inst
= kmalloc(sizeof(*opt_inst
), GFP_KERNEL
);
158 opt_inst
->option
= option
;
159 opt_inst
->info
.port
= port
;
160 opt_inst
->info
.array_index
= i
;
161 opt_inst
->changed
= true;
162 opt_inst
->removed
= false;
163 list_add_tail(&opt_inst
->list
, &team
->option_inst_list
);
165 err
= option
->init(team
, &opt_inst
->info
);
174 static int __team_option_inst_add_option(struct team
*team
,
175 struct team_option
*option
)
177 struct team_port
*port
;
180 if (!option
->per_port
) {
181 err
= __team_option_inst_add(team
, option
, NULL
);
183 goto inst_del_option
;
186 list_for_each_entry(port
, &team
->port_list
, list
) {
187 err
= __team_option_inst_add(team
, option
, port
);
189 goto inst_del_option
;
194 __team_option_inst_del_option(team
, option
);
198 static void __team_option_inst_mark_removed_option(struct team
*team
,
199 struct team_option
*option
)
201 struct team_option_inst
*opt_inst
;
203 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
204 if (opt_inst
->option
== option
) {
205 opt_inst
->changed
= true;
206 opt_inst
->removed
= true;
211 static void __team_option_inst_del_port(struct team
*team
,
212 struct team_port
*port
)
214 struct team_option_inst
*opt_inst
, *tmp
;
216 list_for_each_entry_safe(opt_inst
, tmp
, &team
->option_inst_list
, list
) {
217 if (opt_inst
->option
->per_port
&&
218 opt_inst
->info
.port
== port
)
219 __team_option_inst_del(opt_inst
);
223 static int __team_option_inst_add_port(struct team
*team
,
224 struct team_port
*port
)
226 struct team_option
*option
;
229 list_for_each_entry(option
, &team
->option_list
, list
) {
230 if (!option
->per_port
)
232 err
= __team_option_inst_add(team
, option
, port
);
239 __team_option_inst_del_port(team
, port
);
243 static void __team_option_inst_mark_removed_port(struct team
*team
,
244 struct team_port
*port
)
246 struct team_option_inst
*opt_inst
;
248 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
249 if (opt_inst
->info
.port
== port
) {
250 opt_inst
->changed
= true;
251 opt_inst
->removed
= true;
256 static int __team_options_register(struct team
*team
,
257 const struct team_option
*option
,
261 struct team_option
**dst_opts
;
264 dst_opts
= kzalloc(sizeof(struct team_option
*) * option_count
,
268 for (i
= 0; i
< option_count
; i
++, option
++) {
269 if (__team_find_option(team
, option
->name
)) {
273 dst_opts
[i
] = kmemdup(option
, sizeof(*option
), GFP_KERNEL
);
280 for (i
= 0; i
< option_count
; i
++) {
281 err
= __team_option_inst_add_option(team
, dst_opts
[i
]);
284 list_add_tail(&dst_opts
[i
]->list
, &team
->option_list
);
291 for (i
--; i
>= 0; i
--)
292 __team_option_inst_del_option(team
, dst_opts
[i
]);
294 i
= option_count
- 1;
296 for (i
--; i
>= 0; i
--)
303 static void __team_options_mark_removed(struct team
*team
,
304 const struct team_option
*option
,
309 for (i
= 0; i
< option_count
; i
++, option
++) {
310 struct team_option
*del_opt
;
312 del_opt
= __team_find_option(team
, option
->name
);
314 __team_option_inst_mark_removed_option(team
, del_opt
);
318 static void __team_options_unregister(struct team
*team
,
319 const struct team_option
*option
,
324 for (i
= 0; i
< option_count
; i
++, option
++) {
325 struct team_option
*del_opt
;
327 del_opt
= __team_find_option(team
, option
->name
);
329 __team_option_inst_del_option(team
, del_opt
);
330 list_del(&del_opt
->list
);
336 static void __team_options_change_check(struct team
*team
);
338 int team_options_register(struct team
*team
,
339 const struct team_option
*option
,
344 err
= __team_options_register(team
, option
, option_count
);
347 __team_options_change_check(team
);
350 EXPORT_SYMBOL(team_options_register
);
352 void team_options_unregister(struct team
*team
,
353 const struct team_option
*option
,
356 __team_options_mark_removed(team
, option
, option_count
);
357 __team_options_change_check(team
);
358 __team_options_unregister(team
, option
, option_count
);
360 EXPORT_SYMBOL(team_options_unregister
);
362 static int team_option_get(struct team
*team
,
363 struct team_option_inst
*opt_inst
,
364 struct team_gsetter_ctx
*ctx
)
366 if (!opt_inst
->option
->getter
)
368 return opt_inst
->option
->getter(team
, ctx
);
371 static int team_option_set(struct team
*team
,
372 struct team_option_inst
*opt_inst
,
373 struct team_gsetter_ctx
*ctx
)
375 if (!opt_inst
->option
->setter
)
377 return opt_inst
->option
->setter(team
, ctx
);
380 void team_option_inst_set_change(struct team_option_inst_info
*opt_inst_info
)
382 struct team_option_inst
*opt_inst
;
384 opt_inst
= container_of(opt_inst_info
, struct team_option_inst
, info
);
385 opt_inst
->changed
= true;
387 EXPORT_SYMBOL(team_option_inst_set_change
);
389 void team_options_change_check(struct team
*team
)
391 __team_options_change_check(team
);
393 EXPORT_SYMBOL(team_options_change_check
);
400 static LIST_HEAD(mode_list
);
401 static DEFINE_SPINLOCK(mode_list_lock
);
403 struct team_mode_item
{
404 struct list_head list
;
405 const struct team_mode
*mode
;
408 static struct team_mode_item
*__find_mode(const char *kind
)
410 struct team_mode_item
*mitem
;
412 list_for_each_entry(mitem
, &mode_list
, list
) {
413 if (strcmp(mitem
->mode
->kind
, kind
) == 0)
419 static bool is_good_mode_name(const char *name
)
421 while (*name
!= '\0') {
422 if (!isalpha(*name
) && !isdigit(*name
) && *name
!= '_')
429 int team_mode_register(const struct team_mode
*mode
)
432 struct team_mode_item
*mitem
;
434 if (!is_good_mode_name(mode
->kind
) ||
435 mode
->priv_size
> TEAM_MODE_PRIV_SIZE
)
438 mitem
= kmalloc(sizeof(*mitem
), GFP_KERNEL
);
442 spin_lock(&mode_list_lock
);
443 if (__find_mode(mode
->kind
)) {
449 list_add_tail(&mitem
->list
, &mode_list
);
451 spin_unlock(&mode_list_lock
);
454 EXPORT_SYMBOL(team_mode_register
);
456 void team_mode_unregister(const struct team_mode
*mode
)
458 struct team_mode_item
*mitem
;
460 spin_lock(&mode_list_lock
);
461 mitem
= __find_mode(mode
->kind
);
463 list_del_init(&mitem
->list
);
466 spin_unlock(&mode_list_lock
);
468 EXPORT_SYMBOL(team_mode_unregister
);
470 static const struct team_mode
*team_mode_get(const char *kind
)
472 struct team_mode_item
*mitem
;
473 const struct team_mode
*mode
= NULL
;
475 spin_lock(&mode_list_lock
);
476 mitem
= __find_mode(kind
);
478 spin_unlock(&mode_list_lock
);
479 request_module("team-mode-%s", kind
);
480 spin_lock(&mode_list_lock
);
481 mitem
= __find_mode(kind
);
485 if (!try_module_get(mode
->owner
))
489 spin_unlock(&mode_list_lock
);
493 static void team_mode_put(const struct team_mode
*mode
)
495 module_put(mode
->owner
);
498 static bool team_dummy_transmit(struct team
*team
, struct sk_buff
*skb
)
500 dev_kfree_skb_any(skb
);
504 static rx_handler_result_t
team_dummy_receive(struct team
*team
,
505 struct team_port
*port
,
508 return RX_HANDLER_ANOTHER
;
511 static const struct team_mode __team_no_mode
= {
515 static bool team_is_mode_set(struct team
*team
)
517 return team
->mode
!= &__team_no_mode
;
520 static void team_set_no_mode(struct team
*team
)
522 team
->user_carrier_enabled
= false;
523 team
->mode
= &__team_no_mode
;
526 static void team_adjust_ops(struct team
*team
)
529 * To avoid checks in rx/tx skb paths, ensure here that non-null and
530 * correct ops are always set.
533 if (!team
->en_port_count
|| !team_is_mode_set(team
) ||
534 !team
->mode
->ops
->transmit
)
535 team
->ops
.transmit
= team_dummy_transmit
;
537 team
->ops
.transmit
= team
->mode
->ops
->transmit
;
539 if (!team
->en_port_count
|| !team_is_mode_set(team
) ||
540 !team
->mode
->ops
->receive
)
541 team
->ops
.receive
= team_dummy_receive
;
543 team
->ops
.receive
= team
->mode
->ops
->receive
;
547 * We can benefit from the fact that it's ensured no port is present
548 * at the time of mode change. Therefore no packets are in fly so there's no
549 * need to set mode operations in any special way.
551 static int __team_change_mode(struct team
*team
,
552 const struct team_mode
*new_mode
)
554 /* Check if mode was previously set and do cleanup if so */
555 if (team_is_mode_set(team
)) {
556 void (*exit_op
)(struct team
*team
) = team
->ops
.exit
;
558 /* Clear ops area so no callback is called any longer */
559 memset(&team
->ops
, 0, sizeof(struct team_mode_ops
));
560 team_adjust_ops(team
);
564 team_mode_put(team
->mode
);
565 team_set_no_mode(team
);
566 /* zero private data area */
567 memset(&team
->mode_priv
, 0,
568 sizeof(struct team
) - offsetof(struct team
, mode_priv
));
574 if (new_mode
->ops
->init
) {
577 err
= new_mode
->ops
->init(team
);
582 team
->mode
= new_mode
;
583 memcpy(&team
->ops
, new_mode
->ops
, sizeof(struct team_mode_ops
));
584 team_adjust_ops(team
);
589 static int team_change_mode(struct team
*team
, const char *kind
)
591 const struct team_mode
*new_mode
;
592 struct net_device
*dev
= team
->dev
;
595 if (!list_empty(&team
->port_list
)) {
596 netdev_err(dev
, "No ports can be present during mode change\n");
600 if (team_is_mode_set(team
) && strcmp(team
->mode
->kind
, kind
) == 0) {
601 netdev_err(dev
, "Unable to change to the same mode the team is in\n");
605 new_mode
= team_mode_get(kind
);
607 netdev_err(dev
, "Mode \"%s\" not found\n", kind
);
611 err
= __team_change_mode(team
, new_mode
);
613 netdev_err(dev
, "Failed to change to mode \"%s\"\n", kind
);
614 team_mode_put(new_mode
);
618 netdev_info(dev
, "Mode changed to \"%s\"\n", kind
);
623 /*********************
625 *********************/
627 static void team_notify_peers_work(struct work_struct
*work
)
632 team
= container_of(work
, struct team
, notify_peers
.dw
.work
);
634 if (!rtnl_trylock()) {
635 schedule_delayed_work(&team
->notify_peers
.dw
, 0);
638 val
= atomic_dec_if_positive(&team
->notify_peers
.count_pending
);
643 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, team
->dev
);
646 schedule_delayed_work(&team
->notify_peers
.dw
,
647 msecs_to_jiffies(team
->notify_peers
.interval
));
650 static void team_notify_peers(struct team
*team
)
652 if (!team
->notify_peers
.count
|| !netif_running(team
->dev
))
654 atomic_add(team
->notify_peers
.count
, &team
->notify_peers
.count_pending
);
655 schedule_delayed_work(&team
->notify_peers
.dw
, 0);
658 static void team_notify_peers_init(struct team
*team
)
660 INIT_DELAYED_WORK(&team
->notify_peers
.dw
, team_notify_peers_work
);
663 static void team_notify_peers_fini(struct team
*team
)
665 cancel_delayed_work_sync(&team
->notify_peers
.dw
);
669 /*******************************
670 * Send multicast group rejoins
671 *******************************/
673 static void team_mcast_rejoin_work(struct work_struct
*work
)
678 team
= container_of(work
, struct team
, mcast_rejoin
.dw
.work
);
680 if (!rtnl_trylock()) {
681 schedule_delayed_work(&team
->mcast_rejoin
.dw
, 0);
684 val
= atomic_dec_if_positive(&team
->mcast_rejoin
.count_pending
);
689 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, team
->dev
);
692 schedule_delayed_work(&team
->mcast_rejoin
.dw
,
693 msecs_to_jiffies(team
->mcast_rejoin
.interval
));
696 static void team_mcast_rejoin(struct team
*team
)
698 if (!team
->mcast_rejoin
.count
|| !netif_running(team
->dev
))
700 atomic_add(team
->mcast_rejoin
.count
, &team
->mcast_rejoin
.count_pending
);
701 schedule_delayed_work(&team
->mcast_rejoin
.dw
, 0);
704 static void team_mcast_rejoin_init(struct team
*team
)
706 INIT_DELAYED_WORK(&team
->mcast_rejoin
.dw
, team_mcast_rejoin_work
);
709 static void team_mcast_rejoin_fini(struct team
*team
)
711 cancel_delayed_work_sync(&team
->mcast_rejoin
.dw
);
715 /************************
716 * Rx path frame handler
717 ************************/
719 /* note: already called with rcu_read_lock */
720 static rx_handler_result_t
team_handle_frame(struct sk_buff
**pskb
)
722 struct sk_buff
*skb
= *pskb
;
723 struct team_port
*port
;
725 rx_handler_result_t res
;
727 skb
= skb_share_check(skb
, GFP_ATOMIC
);
729 return RX_HANDLER_CONSUMED
;
733 port
= team_port_get_rcu(skb
->dev
);
735 if (!team_port_enabled(port
)) {
736 /* allow exact match delivery for disabled ports */
737 res
= RX_HANDLER_EXACT
;
739 res
= team
->ops
.receive(team
, port
, skb
);
741 if (res
== RX_HANDLER_ANOTHER
) {
742 struct team_pcpu_stats
*pcpu_stats
;
744 pcpu_stats
= this_cpu_ptr(team
->pcpu_stats
);
745 u64_stats_update_begin(&pcpu_stats
->syncp
);
746 pcpu_stats
->rx_packets
++;
747 pcpu_stats
->rx_bytes
+= skb
->len
;
748 if (skb
->pkt_type
== PACKET_MULTICAST
)
749 pcpu_stats
->rx_multicast
++;
750 u64_stats_update_end(&pcpu_stats
->syncp
);
752 skb
->dev
= team
->dev
;
754 this_cpu_inc(team
->pcpu_stats
->rx_dropped
);
761 /*************************************
762 * Multiqueue Tx port select override
763 *************************************/
765 static int team_queue_override_init(struct team
*team
)
767 struct list_head
*listarr
;
768 unsigned int queue_cnt
= team
->dev
->num_tx_queues
- 1;
773 listarr
= kmalloc(sizeof(struct list_head
) * queue_cnt
, GFP_KERNEL
);
776 team
->qom_lists
= listarr
;
777 for (i
= 0; i
< queue_cnt
; i
++)
778 INIT_LIST_HEAD(listarr
++);
782 static void team_queue_override_fini(struct team
*team
)
784 kfree(team
->qom_lists
);
787 static struct list_head
*__team_get_qom_list(struct team
*team
, u16 queue_id
)
789 return &team
->qom_lists
[queue_id
- 1];
793 * note: already called with rcu_read_lock
795 static bool team_queue_override_transmit(struct team
*team
, struct sk_buff
*skb
)
797 struct list_head
*qom_list
;
798 struct team_port
*port
;
800 if (!team
->queue_override_enabled
|| !skb
->queue_mapping
)
802 qom_list
= __team_get_qom_list(team
, skb
->queue_mapping
);
803 list_for_each_entry_rcu(port
, qom_list
, qom_list
) {
804 if (!team_dev_queue_xmit(team
, port
, skb
))
810 static void __team_queue_override_port_del(struct team
*team
,
811 struct team_port
*port
)
815 list_del_rcu(&port
->qom_list
);
818 static bool team_queue_override_port_has_gt_prio_than(struct team_port
*port
,
819 struct team_port
*cur
)
821 if (port
->priority
< cur
->priority
)
823 if (port
->priority
> cur
->priority
)
825 if (port
->index
< cur
->index
)
830 static void __team_queue_override_port_add(struct team
*team
,
831 struct team_port
*port
)
833 struct team_port
*cur
;
834 struct list_head
*qom_list
;
835 struct list_head
*node
;
839 qom_list
= __team_get_qom_list(team
, port
->queue_id
);
841 list_for_each_entry(cur
, qom_list
, qom_list
) {
842 if (team_queue_override_port_has_gt_prio_than(port
, cur
))
844 node
= &cur
->qom_list
;
846 list_add_tail_rcu(&port
->qom_list
, node
);
849 static void __team_queue_override_enabled_check(struct team
*team
)
851 struct team_port
*port
;
852 bool enabled
= false;
854 list_for_each_entry(port
, &team
->port_list
, list
) {
855 if (port
->queue_id
) {
860 if (enabled
== team
->queue_override_enabled
)
862 netdev_dbg(team
->dev
, "%s queue override\n",
863 enabled
? "Enabling" : "Disabling");
864 team
->queue_override_enabled
= enabled
;
867 static void team_queue_override_port_prio_changed(struct team
*team
,
868 struct team_port
*port
)
870 if (!port
->queue_id
|| team_port_enabled(port
))
872 __team_queue_override_port_del(team
, port
);
873 __team_queue_override_port_add(team
, port
);
874 __team_queue_override_enabled_check(team
);
877 static void team_queue_override_port_change_queue_id(struct team
*team
,
878 struct team_port
*port
,
881 if (team_port_enabled(port
)) {
882 __team_queue_override_port_del(team
, port
);
883 port
->queue_id
= new_queue_id
;
884 __team_queue_override_port_add(team
, port
);
885 __team_queue_override_enabled_check(team
);
887 port
->queue_id
= new_queue_id
;
891 static void team_queue_override_port_add(struct team
*team
,
892 struct team_port
*port
)
894 __team_queue_override_port_add(team
, port
);
895 __team_queue_override_enabled_check(team
);
898 static void team_queue_override_port_del(struct team
*team
,
899 struct team_port
*port
)
901 __team_queue_override_port_del(team
, port
);
902 __team_queue_override_enabled_check(team
);
910 static bool team_port_find(const struct team
*team
,
911 const struct team_port
*port
)
913 struct team_port
*cur
;
915 list_for_each_entry(cur
, &team
->port_list
, list
)
922 * Enable/disable port by adding to enabled port hashlist and setting
923 * port->index (Might be racy so reader could see incorrect ifindex when
924 * processing a flying packet, but that is not a problem). Write guarded
927 static void team_port_enable(struct team
*team
,
928 struct team_port
*port
)
930 if (team_port_enabled(port
))
932 port
->index
= team
->en_port_count
++;
933 hlist_add_head_rcu(&port
->hlist
,
934 team_port_index_hash(team
, port
->index
));
935 team_adjust_ops(team
);
936 team_queue_override_port_add(team
, port
);
937 if (team
->ops
.port_enabled
)
938 team
->ops
.port_enabled(team
, port
);
939 team_notify_peers(team
);
940 team_mcast_rejoin(team
);
943 static void __reconstruct_port_hlist(struct team
*team
, int rm_index
)
946 struct team_port
*port
;
948 for (i
= rm_index
+ 1; i
< team
->en_port_count
; i
++) {
949 port
= team_get_port_by_index(team
, i
);
950 hlist_del_rcu(&port
->hlist
);
952 hlist_add_head_rcu(&port
->hlist
,
953 team_port_index_hash(team
, port
->index
));
957 static void team_port_disable(struct team
*team
,
958 struct team_port
*port
)
960 if (!team_port_enabled(port
))
962 if (team
->ops
.port_disabled
)
963 team
->ops
.port_disabled(team
, port
);
964 hlist_del_rcu(&port
->hlist
);
965 __reconstruct_port_hlist(team
, port
->index
);
967 team
->en_port_count
--;
968 team_queue_override_port_del(team
, port
);
969 team_adjust_ops(team
);
970 team_notify_peers(team
);
971 team_mcast_rejoin(team
);
974 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
975 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
976 NETIF_F_HIGHDMA | NETIF_F_LRO)
978 static void __team_compute_features(struct team
*team
)
980 struct team_port
*port
;
981 u32 vlan_features
= TEAM_VLAN_FEATURES
;
982 unsigned short max_hard_header_len
= ETH_HLEN
;
983 unsigned int flags
, dst_release_flag
= IFF_XMIT_DST_RELEASE
;
985 list_for_each_entry(port
, &team
->port_list
, list
) {
986 vlan_features
= netdev_increment_features(vlan_features
,
987 port
->dev
->vlan_features
,
990 dst_release_flag
&= port
->dev
->priv_flags
;
991 if (port
->dev
->hard_header_len
> max_hard_header_len
)
992 max_hard_header_len
= port
->dev
->hard_header_len
;
995 team
->dev
->vlan_features
= vlan_features
;
996 team
->dev
->hard_header_len
= max_hard_header_len
;
998 flags
= team
->dev
->priv_flags
& ~IFF_XMIT_DST_RELEASE
;
999 team
->dev
->priv_flags
= flags
| dst_release_flag
;
1001 netdev_change_features(team
->dev
);
1004 static void team_compute_features(struct team
*team
)
1006 mutex_lock(&team
->lock
);
1007 __team_compute_features(team
);
1008 mutex_unlock(&team
->lock
);
1011 static int team_port_enter(struct team
*team
, struct team_port
*port
)
1015 dev_hold(team
->dev
);
1016 port
->dev
->priv_flags
|= IFF_TEAM_PORT
;
1017 if (team
->ops
.port_enter
) {
1018 err
= team
->ops
.port_enter(team
, port
);
1020 netdev_err(team
->dev
, "Device %s failed to enter team mode\n",
1022 goto err_port_enter
;
1029 port
->dev
->priv_flags
&= ~IFF_TEAM_PORT
;
1035 static void team_port_leave(struct team
*team
, struct team_port
*port
)
1037 if (team
->ops
.port_leave
)
1038 team
->ops
.port_leave(team
, port
);
1039 port
->dev
->priv_flags
&= ~IFF_TEAM_PORT
;
1043 #ifdef CONFIG_NET_POLL_CONTROLLER
1044 static int team_port_enable_netpoll(struct team
*team
, struct team_port
*port
,
1050 if (!team
->dev
->npinfo
)
1053 np
= kzalloc(sizeof(*np
), gfp
);
1057 err
= __netpoll_setup(np
, port
->dev
, gfp
);
1066 static void team_port_disable_netpoll(struct team_port
*port
)
1068 struct netpoll
*np
= port
->np
;
1074 /* Wait for transmitting packets to finish before freeing. */
1075 synchronize_rcu_bh();
1076 __netpoll_cleanup(np
);
1080 static int team_port_enable_netpoll(struct team
*team
, struct team_port
*port
,
1085 static void team_port_disable_netpoll(struct team_port
*port
)
1090 static void __team_port_change_port_added(struct team_port
*port
, bool linkup
);
1091 static int team_dev_type_check_change(struct net_device
*dev
,
1092 struct net_device
*port_dev
);
1094 static int team_port_add(struct team
*team
, struct net_device
*port_dev
)
1096 struct net_device
*dev
= team
->dev
;
1097 struct team_port
*port
;
1098 char *portname
= port_dev
->name
;
1101 if (port_dev
->flags
& IFF_LOOPBACK
) {
1102 netdev_err(dev
, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1107 if (team_port_exists(port_dev
)) {
1108 netdev_err(dev
, "Device %s is already a port "
1109 "of a team device\n", portname
);
1113 if (port_dev
->features
& NETIF_F_VLAN_CHALLENGED
&&
1114 vlan_uses_dev(dev
)) {
1115 netdev_err(dev
, "Device %s is VLAN challenged and team device has VLAN set up\n",
1120 err
= team_dev_type_check_change(dev
, port_dev
);
1124 if (port_dev
->flags
& IFF_UP
) {
1125 netdev_err(dev
, "Device %s is up. Set it down before adding it as a team port\n",
1130 port
= kzalloc(sizeof(struct team_port
) + team
->mode
->port_priv_size
,
1135 port
->dev
= port_dev
;
1137 INIT_LIST_HEAD(&port
->qom_list
);
1139 port
->orig
.mtu
= port_dev
->mtu
;
1140 err
= dev_set_mtu(port_dev
, dev
->mtu
);
1142 netdev_dbg(dev
, "Error %d calling dev_set_mtu\n", err
);
1146 memcpy(port
->orig
.dev_addr
, port_dev
->dev_addr
, port_dev
->addr_len
);
1148 err
= team_port_enter(team
, port
);
1150 netdev_err(dev
, "Device %s failed to enter team mode\n",
1152 goto err_port_enter
;
1155 err
= dev_open(port_dev
);
1157 netdev_dbg(dev
, "Device %s opening failed\n",
1162 err
= vlan_vids_add_by_dev(port_dev
, dev
);
1164 netdev_err(dev
, "Failed to add vlan ids to device %s\n",
1169 err
= team_port_enable_netpoll(team
, port
, GFP_KERNEL
);
1171 netdev_err(dev
, "Failed to enable netpoll on device %s\n",
1173 goto err_enable_netpoll
;
1176 err
= netdev_master_upper_dev_link(port_dev
, dev
);
1178 netdev_err(dev
, "Device %s failed to set upper link\n",
1180 goto err_set_upper_link
;
1183 err
= netdev_rx_handler_register(port_dev
, team_handle_frame
,
1186 netdev_err(dev
, "Device %s failed to register rx_handler\n",
1188 goto err_handler_register
;
1191 err
= __team_option_inst_add_port(team
, port
);
1193 netdev_err(dev
, "Device %s failed to add per-port options\n",
1195 goto err_option_port_add
;
1199 list_add_tail_rcu(&port
->list
, &team
->port_list
);
1200 team_port_enable(team
, port
);
1201 __team_compute_features(team
);
1202 __team_port_change_port_added(port
, !!netif_carrier_ok(port_dev
));
1203 __team_options_change_check(team
);
1205 netdev_info(dev
, "Port device %s added\n", portname
);
1209 err_option_port_add
:
1210 netdev_rx_handler_unregister(port_dev
);
1212 err_handler_register
:
1213 netdev_upper_dev_unlink(port_dev
, dev
);
1216 team_port_disable_netpoll(port
);
1219 vlan_vids_del_by_dev(port_dev
, dev
);
1222 dev_close(port_dev
);
1225 team_port_leave(team
, port
);
1226 team_port_set_orig_dev_addr(port
);
1229 dev_set_mtu(port_dev
, port
->orig
.mtu
);
1237 static void __team_port_change_port_removed(struct team_port
*port
);
1239 static int team_port_del(struct team
*team
, struct net_device
*port_dev
)
1241 struct net_device
*dev
= team
->dev
;
1242 struct team_port
*port
;
1243 char *portname
= port_dev
->name
;
1245 port
= team_port_get_rtnl(port_dev
);
1246 if (!port
|| !team_port_find(team
, port
)) {
1247 netdev_err(dev
, "Device %s does not act as a port of this team\n",
1252 team_port_disable(team
, port
);
1253 list_del_rcu(&port
->list
);
1254 netdev_rx_handler_unregister(port_dev
);
1255 netdev_upper_dev_unlink(port_dev
, dev
);
1256 team_port_disable_netpoll(port
);
1257 vlan_vids_del_by_dev(port_dev
, dev
);
1258 dev_uc_unsync(port_dev
, dev
);
1259 dev_mc_unsync(port_dev
, dev
);
1260 dev_close(port_dev
);
1261 team_port_leave(team
, port
);
1263 __team_option_inst_mark_removed_port(team
, port
);
1264 __team_options_change_check(team
);
1265 __team_option_inst_del_port(team
, port
);
1266 __team_port_change_port_removed(port
);
1268 team_port_set_orig_dev_addr(port
);
1269 dev_set_mtu(port_dev
, port
->orig
.mtu
);
1270 kfree_rcu(port
, rcu
);
1271 netdev_info(dev
, "Port device %s removed\n", portname
);
1272 __team_compute_features(team
);
1282 static int team_mode_option_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
1284 ctx
->data
.str_val
= team
->mode
->kind
;
1288 static int team_mode_option_set(struct team
*team
, struct team_gsetter_ctx
*ctx
)
1290 return team_change_mode(team
, ctx
->data
.str_val
);
1293 static int team_notify_peers_count_get(struct team
*team
,
1294 struct team_gsetter_ctx
*ctx
)
1296 ctx
->data
.u32_val
= team
->notify_peers
.count
;
1300 static int team_notify_peers_count_set(struct team
*team
,
1301 struct team_gsetter_ctx
*ctx
)
1303 team
->notify_peers
.count
= ctx
->data
.u32_val
;
1307 static int team_notify_peers_interval_get(struct team
*team
,
1308 struct team_gsetter_ctx
*ctx
)
1310 ctx
->data
.u32_val
= team
->notify_peers
.interval
;
1314 static int team_notify_peers_interval_set(struct team
*team
,
1315 struct team_gsetter_ctx
*ctx
)
1317 team
->notify_peers
.interval
= ctx
->data
.u32_val
;
1321 static int team_mcast_rejoin_count_get(struct team
*team
,
1322 struct team_gsetter_ctx
*ctx
)
1324 ctx
->data
.u32_val
= team
->mcast_rejoin
.count
;
1328 static int team_mcast_rejoin_count_set(struct team
*team
,
1329 struct team_gsetter_ctx
*ctx
)
1331 team
->mcast_rejoin
.count
= ctx
->data
.u32_val
;
1335 static int team_mcast_rejoin_interval_get(struct team
*team
,
1336 struct team_gsetter_ctx
*ctx
)
1338 ctx
->data
.u32_val
= team
->mcast_rejoin
.interval
;
1342 static int team_mcast_rejoin_interval_set(struct team
*team
,
1343 struct team_gsetter_ctx
*ctx
)
1345 team
->mcast_rejoin
.interval
= ctx
->data
.u32_val
;
1349 static int team_port_en_option_get(struct team
*team
,
1350 struct team_gsetter_ctx
*ctx
)
1352 struct team_port
*port
= ctx
->info
->port
;
1354 ctx
->data
.bool_val
= team_port_enabled(port
);
1358 static int team_port_en_option_set(struct team
*team
,
1359 struct team_gsetter_ctx
*ctx
)
1361 struct team_port
*port
= ctx
->info
->port
;
1363 if (ctx
->data
.bool_val
)
1364 team_port_enable(team
, port
);
1366 team_port_disable(team
, port
);
1370 static int team_user_linkup_option_get(struct team
*team
,
1371 struct team_gsetter_ctx
*ctx
)
1373 struct team_port
*port
= ctx
->info
->port
;
1375 ctx
->data
.bool_val
= port
->user
.linkup
;
1379 static void __team_carrier_check(struct team
*team
);
1381 static int team_user_linkup_option_set(struct team
*team
,
1382 struct team_gsetter_ctx
*ctx
)
1384 struct team_port
*port
= ctx
->info
->port
;
1386 port
->user
.linkup
= ctx
->data
.bool_val
;
1387 team_refresh_port_linkup(port
);
1388 __team_carrier_check(port
->team
);
1392 static int team_user_linkup_en_option_get(struct team
*team
,
1393 struct team_gsetter_ctx
*ctx
)
1395 struct team_port
*port
= ctx
->info
->port
;
1397 ctx
->data
.bool_val
= port
->user
.linkup_enabled
;
1401 static int team_user_linkup_en_option_set(struct team
*team
,
1402 struct team_gsetter_ctx
*ctx
)
1404 struct team_port
*port
= ctx
->info
->port
;
1406 port
->user
.linkup_enabled
= ctx
->data
.bool_val
;
1407 team_refresh_port_linkup(port
);
1408 __team_carrier_check(port
->team
);
1412 static int team_priority_option_get(struct team
*team
,
1413 struct team_gsetter_ctx
*ctx
)
1415 struct team_port
*port
= ctx
->info
->port
;
1417 ctx
->data
.s32_val
= port
->priority
;
1421 static int team_priority_option_set(struct team
*team
,
1422 struct team_gsetter_ctx
*ctx
)
1424 struct team_port
*port
= ctx
->info
->port
;
1425 s32 priority
= ctx
->data
.s32_val
;
1427 if (port
->priority
== priority
)
1429 port
->priority
= priority
;
1430 team_queue_override_port_prio_changed(team
, port
);
1434 static int team_queue_id_option_get(struct team
*team
,
1435 struct team_gsetter_ctx
*ctx
)
1437 struct team_port
*port
= ctx
->info
->port
;
1439 ctx
->data
.u32_val
= port
->queue_id
;
1443 static int team_queue_id_option_set(struct team
*team
,
1444 struct team_gsetter_ctx
*ctx
)
1446 struct team_port
*port
= ctx
->info
->port
;
1447 u16 new_queue_id
= ctx
->data
.u32_val
;
1449 if (port
->queue_id
== new_queue_id
)
1451 if (new_queue_id
>= team
->dev
->real_num_tx_queues
)
1453 team_queue_override_port_change_queue_id(team
, port
, new_queue_id
);
1457 static const struct team_option team_options
[] = {
1460 .type
= TEAM_OPTION_TYPE_STRING
,
1461 .getter
= team_mode_option_get
,
1462 .setter
= team_mode_option_set
,
1465 .name
= "notify_peers_count",
1466 .type
= TEAM_OPTION_TYPE_U32
,
1467 .getter
= team_notify_peers_count_get
,
1468 .setter
= team_notify_peers_count_set
,
1471 .name
= "notify_peers_interval",
1472 .type
= TEAM_OPTION_TYPE_U32
,
1473 .getter
= team_notify_peers_interval_get
,
1474 .setter
= team_notify_peers_interval_set
,
1477 .name
= "mcast_rejoin_count",
1478 .type
= TEAM_OPTION_TYPE_U32
,
1479 .getter
= team_mcast_rejoin_count_get
,
1480 .setter
= team_mcast_rejoin_count_set
,
1483 .name
= "mcast_rejoin_interval",
1484 .type
= TEAM_OPTION_TYPE_U32
,
1485 .getter
= team_mcast_rejoin_interval_get
,
1486 .setter
= team_mcast_rejoin_interval_set
,
1490 .type
= TEAM_OPTION_TYPE_BOOL
,
1492 .getter
= team_port_en_option_get
,
1493 .setter
= team_port_en_option_set
,
1496 .name
= "user_linkup",
1497 .type
= TEAM_OPTION_TYPE_BOOL
,
1499 .getter
= team_user_linkup_option_get
,
1500 .setter
= team_user_linkup_option_set
,
1503 .name
= "user_linkup_enabled",
1504 .type
= TEAM_OPTION_TYPE_BOOL
,
1506 .getter
= team_user_linkup_en_option_get
,
1507 .setter
= team_user_linkup_en_option_set
,
1511 .type
= TEAM_OPTION_TYPE_S32
,
1513 .getter
= team_priority_option_get
,
1514 .setter
= team_priority_option_set
,
1518 .type
= TEAM_OPTION_TYPE_U32
,
1520 .getter
= team_queue_id_option_get
,
1521 .setter
= team_queue_id_option_set
,
1525 static struct lock_class_key team_netdev_xmit_lock_key
;
1526 static struct lock_class_key team_netdev_addr_lock_key
;
1527 static struct lock_class_key team_tx_busylock_key
;
1529 static void team_set_lockdep_class_one(struct net_device
*dev
,
1530 struct netdev_queue
*txq
,
1533 lockdep_set_class(&txq
->_xmit_lock
, &team_netdev_xmit_lock_key
);
1536 static void team_set_lockdep_class(struct net_device
*dev
)
1538 lockdep_set_class(&dev
->addr_list_lock
, &team_netdev_addr_lock_key
);
1539 netdev_for_each_tx_queue(dev
, team_set_lockdep_class_one
, NULL
);
1540 dev
->qdisc_tx_busylock
= &team_tx_busylock_key
;
1543 static int team_init(struct net_device
*dev
)
1545 struct team
*team
= netdev_priv(dev
);
1550 mutex_init(&team
->lock
);
1551 team_set_no_mode(team
);
1553 team
->pcpu_stats
= alloc_percpu(struct team_pcpu_stats
);
1554 if (!team
->pcpu_stats
)
1557 for (i
= 0; i
< TEAM_PORT_HASHENTRIES
; i
++)
1558 INIT_HLIST_HEAD(&team
->en_port_hlist
[i
]);
1559 INIT_LIST_HEAD(&team
->port_list
);
1560 err
= team_queue_override_init(team
);
1562 goto err_team_queue_override_init
;
1564 team_adjust_ops(team
);
1566 INIT_LIST_HEAD(&team
->option_list
);
1567 INIT_LIST_HEAD(&team
->option_inst_list
);
1569 team_notify_peers_init(team
);
1570 team_mcast_rejoin_init(team
);
1572 err
= team_options_register(team
, team_options
, ARRAY_SIZE(team_options
));
1574 goto err_options_register
;
1575 netif_carrier_off(dev
);
1577 team_set_lockdep_class(dev
);
1581 err_options_register
:
1582 team_mcast_rejoin_fini(team
);
1583 team_notify_peers_fini(team
);
1584 team_queue_override_fini(team
);
1585 err_team_queue_override_init
:
1586 free_percpu(team
->pcpu_stats
);
1591 static void team_uninit(struct net_device
*dev
)
1593 struct team
*team
= netdev_priv(dev
);
1594 struct team_port
*port
;
1595 struct team_port
*tmp
;
1597 mutex_lock(&team
->lock
);
1598 list_for_each_entry_safe(port
, tmp
, &team
->port_list
, list
)
1599 team_port_del(team
, port
->dev
);
1601 __team_change_mode(team
, NULL
); /* cleanup */
1602 __team_options_unregister(team
, team_options
, ARRAY_SIZE(team_options
));
1603 team_mcast_rejoin_fini(team
);
1604 team_notify_peers_fini(team
);
1605 team_queue_override_fini(team
);
1606 mutex_unlock(&team
->lock
);
1609 static void team_destructor(struct net_device
*dev
)
1611 struct team
*team
= netdev_priv(dev
);
1613 free_percpu(team
->pcpu_stats
);
1617 static int team_open(struct net_device
*dev
)
1622 static int team_close(struct net_device
*dev
)
1628 * note: already called with rcu_read_lock
1630 static netdev_tx_t
team_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1632 struct team
*team
= netdev_priv(dev
);
1634 unsigned int len
= skb
->len
;
1636 tx_success
= team_queue_override_transmit(team
, skb
);
1638 tx_success
= team
->ops
.transmit(team
, skb
);
1640 struct team_pcpu_stats
*pcpu_stats
;
1642 pcpu_stats
= this_cpu_ptr(team
->pcpu_stats
);
1643 u64_stats_update_begin(&pcpu_stats
->syncp
);
1644 pcpu_stats
->tx_packets
++;
1645 pcpu_stats
->tx_bytes
+= len
;
1646 u64_stats_update_end(&pcpu_stats
->syncp
);
1648 this_cpu_inc(team
->pcpu_stats
->tx_dropped
);
1651 return NETDEV_TX_OK
;
1654 static u16
team_select_queue(struct net_device
*dev
, struct sk_buff
*skb
)
1657 * This helper function exists to help dev_pick_tx get the correct
1658 * destination queue. Using a helper function skips a call to
1659 * skb_tx_hash and will put the skbs in the queue we expect on their
1660 * way down to the team driver.
1662 u16 txq
= skb_rx_queue_recorded(skb
) ? skb_get_rx_queue(skb
) : 0;
1665 * Save the original txq to restore before passing to the driver
1667 qdisc_skb_cb(skb
)->slave_dev_queue_mapping
= skb
->queue_mapping
;
1669 if (unlikely(txq
>= dev
->real_num_tx_queues
)) {
1671 txq
-= dev
->real_num_tx_queues
;
1672 } while (txq
>= dev
->real_num_tx_queues
);
1677 static void team_change_rx_flags(struct net_device
*dev
, int change
)
1679 struct team
*team
= netdev_priv(dev
);
1680 struct team_port
*port
;
1684 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1685 if (change
& IFF_PROMISC
) {
1686 inc
= dev
->flags
& IFF_PROMISC
? 1 : -1;
1687 dev_set_promiscuity(port
->dev
, inc
);
1689 if (change
& IFF_ALLMULTI
) {
1690 inc
= dev
->flags
& IFF_ALLMULTI
? 1 : -1;
1691 dev_set_allmulti(port
->dev
, inc
);
1697 static void team_set_rx_mode(struct net_device
*dev
)
1699 struct team
*team
= netdev_priv(dev
);
1700 struct team_port
*port
;
1703 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1704 dev_uc_sync_multiple(port
->dev
, dev
);
1705 dev_mc_sync_multiple(port
->dev
, dev
);
1710 static int team_set_mac_address(struct net_device
*dev
, void *p
)
1712 struct sockaddr
*addr
= p
;
1713 struct team
*team
= netdev_priv(dev
);
1714 struct team_port
*port
;
1716 if (dev
->type
== ARPHRD_ETHER
&& !is_valid_ether_addr(addr
->sa_data
))
1717 return -EADDRNOTAVAIL
;
1718 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1719 mutex_lock(&team
->lock
);
1720 list_for_each_entry(port
, &team
->port_list
, list
)
1721 if (team
->ops
.port_change_dev_addr
)
1722 team
->ops
.port_change_dev_addr(team
, port
);
1723 mutex_unlock(&team
->lock
);
1727 static int team_change_mtu(struct net_device
*dev
, int new_mtu
)
1729 struct team
*team
= netdev_priv(dev
);
1730 struct team_port
*port
;
1734 * Alhough this is reader, it's guarded by team lock. It's not possible
1735 * to traverse list in reverse under rcu_read_lock
1737 mutex_lock(&team
->lock
);
1738 team
->port_mtu_change_allowed
= true;
1739 list_for_each_entry(port
, &team
->port_list
, list
) {
1740 err
= dev_set_mtu(port
->dev
, new_mtu
);
1742 netdev_err(dev
, "Device %s failed to change mtu",
1747 team
->port_mtu_change_allowed
= false;
1748 mutex_unlock(&team
->lock
);
1755 list_for_each_entry_continue_reverse(port
, &team
->port_list
, list
)
1756 dev_set_mtu(port
->dev
, dev
->mtu
);
1757 team
->port_mtu_change_allowed
= false;
1758 mutex_unlock(&team
->lock
);
1763 static struct rtnl_link_stats64
*
1764 team_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1766 struct team
*team
= netdev_priv(dev
);
1767 struct team_pcpu_stats
*p
;
1768 u64 rx_packets
, rx_bytes
, rx_multicast
, tx_packets
, tx_bytes
;
1769 u32 rx_dropped
= 0, tx_dropped
= 0;
1773 for_each_possible_cpu(i
) {
1774 p
= per_cpu_ptr(team
->pcpu_stats
, i
);
1776 start
= u64_stats_fetch_begin_bh(&p
->syncp
);
1777 rx_packets
= p
->rx_packets
;
1778 rx_bytes
= p
->rx_bytes
;
1779 rx_multicast
= p
->rx_multicast
;
1780 tx_packets
= p
->tx_packets
;
1781 tx_bytes
= p
->tx_bytes
;
1782 } while (u64_stats_fetch_retry_bh(&p
->syncp
, start
));
1784 stats
->rx_packets
+= rx_packets
;
1785 stats
->rx_bytes
+= rx_bytes
;
1786 stats
->multicast
+= rx_multicast
;
1787 stats
->tx_packets
+= tx_packets
;
1788 stats
->tx_bytes
+= tx_bytes
;
1790 * rx_dropped & tx_dropped are u32, updated
1791 * without syncp protection.
1793 rx_dropped
+= p
->rx_dropped
;
1794 tx_dropped
+= p
->tx_dropped
;
1796 stats
->rx_dropped
= rx_dropped
;
1797 stats
->tx_dropped
= tx_dropped
;
1801 static int team_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1803 struct team
*team
= netdev_priv(dev
);
1804 struct team_port
*port
;
1808 * Alhough this is reader, it's guarded by team lock. It's not possible
1809 * to traverse list in reverse under rcu_read_lock
1811 mutex_lock(&team
->lock
);
1812 list_for_each_entry(port
, &team
->port_list
, list
) {
1813 err
= vlan_vid_add(port
->dev
, proto
, vid
);
1817 mutex_unlock(&team
->lock
);
1822 list_for_each_entry_continue_reverse(port
, &team
->port_list
, list
)
1823 vlan_vid_del(port
->dev
, proto
, vid
);
1824 mutex_unlock(&team
->lock
);
1829 static int team_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1831 struct team
*team
= netdev_priv(dev
);
1832 struct team_port
*port
;
1834 mutex_lock(&team
->lock
);
1835 list_for_each_entry(port
, &team
->port_list
, list
)
1836 vlan_vid_del(port
->dev
, proto
, vid
);
1837 mutex_unlock(&team
->lock
);
1842 #ifdef CONFIG_NET_POLL_CONTROLLER
1843 static void team_poll_controller(struct net_device
*dev
)
1847 static void __team_netpoll_cleanup(struct team
*team
)
1849 struct team_port
*port
;
1851 list_for_each_entry(port
, &team
->port_list
, list
)
1852 team_port_disable_netpoll(port
);
1855 static void team_netpoll_cleanup(struct net_device
*dev
)
1857 struct team
*team
= netdev_priv(dev
);
1859 mutex_lock(&team
->lock
);
1860 __team_netpoll_cleanup(team
);
1861 mutex_unlock(&team
->lock
);
1864 static int team_netpoll_setup(struct net_device
*dev
,
1865 struct netpoll_info
*npifo
, gfp_t gfp
)
1867 struct team
*team
= netdev_priv(dev
);
1868 struct team_port
*port
;
1871 mutex_lock(&team
->lock
);
1872 list_for_each_entry(port
, &team
->port_list
, list
) {
1873 err
= team_port_enable_netpoll(team
, port
, gfp
);
1875 __team_netpoll_cleanup(team
);
1879 mutex_unlock(&team
->lock
);
1884 static int team_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
1886 struct team
*team
= netdev_priv(dev
);
1889 mutex_lock(&team
->lock
);
1890 err
= team_port_add(team
, port_dev
);
1891 mutex_unlock(&team
->lock
);
1895 static int team_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
1897 struct team
*team
= netdev_priv(dev
);
1900 mutex_lock(&team
->lock
);
1901 err
= team_port_del(team
, port_dev
);
1902 mutex_unlock(&team
->lock
);
1906 static netdev_features_t
team_fix_features(struct net_device
*dev
,
1907 netdev_features_t features
)
1909 struct team_port
*port
;
1910 struct team
*team
= netdev_priv(dev
);
1911 netdev_features_t mask
;
1914 features
&= ~NETIF_F_ONE_FOR_ALL
;
1915 features
|= NETIF_F_ALL_FOR_ALL
;
1918 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1919 features
= netdev_increment_features(features
,
1920 port
->dev
->features
,
1927 static int team_change_carrier(struct net_device
*dev
, bool new_carrier
)
1929 struct team
*team
= netdev_priv(dev
);
1931 team
->user_carrier_enabled
= true;
1934 netif_carrier_on(dev
);
1936 netif_carrier_off(dev
);
1940 static const struct net_device_ops team_netdev_ops
= {
1941 .ndo_init
= team_init
,
1942 .ndo_uninit
= team_uninit
,
1943 .ndo_open
= team_open
,
1944 .ndo_stop
= team_close
,
1945 .ndo_start_xmit
= team_xmit
,
1946 .ndo_select_queue
= team_select_queue
,
1947 .ndo_change_rx_flags
= team_change_rx_flags
,
1948 .ndo_set_rx_mode
= team_set_rx_mode
,
1949 .ndo_set_mac_address
= team_set_mac_address
,
1950 .ndo_change_mtu
= team_change_mtu
,
1951 .ndo_get_stats64
= team_get_stats64
,
1952 .ndo_vlan_rx_add_vid
= team_vlan_rx_add_vid
,
1953 .ndo_vlan_rx_kill_vid
= team_vlan_rx_kill_vid
,
1954 #ifdef CONFIG_NET_POLL_CONTROLLER
1955 .ndo_poll_controller
= team_poll_controller
,
1956 .ndo_netpoll_setup
= team_netpoll_setup
,
1957 .ndo_netpoll_cleanup
= team_netpoll_cleanup
,
1959 .ndo_add_slave
= team_add_slave
,
1960 .ndo_del_slave
= team_del_slave
,
1961 .ndo_fix_features
= team_fix_features
,
1962 .ndo_change_carrier
= team_change_carrier
,
1965 /***********************
1967 ***********************/
1969 static void team_ethtool_get_drvinfo(struct net_device
*dev
,
1970 struct ethtool_drvinfo
*drvinfo
)
1972 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
1973 strlcpy(drvinfo
->version
, UTS_RELEASE
, sizeof(drvinfo
->version
));
1976 static const struct ethtool_ops team_ethtool_ops
= {
1977 .get_drvinfo
= team_ethtool_get_drvinfo
,
1978 .get_link
= ethtool_op_get_link
,
1981 /***********************
1982 * rt netlink interface
1983 ***********************/
1985 static void team_setup_by_port(struct net_device
*dev
,
1986 struct net_device
*port_dev
)
1988 dev
->header_ops
= port_dev
->header_ops
;
1989 dev
->type
= port_dev
->type
;
1990 dev
->hard_header_len
= port_dev
->hard_header_len
;
1991 dev
->addr_len
= port_dev
->addr_len
;
1992 dev
->mtu
= port_dev
->mtu
;
1993 memcpy(dev
->broadcast
, port_dev
->broadcast
, port_dev
->addr_len
);
1994 eth_hw_addr_inherit(dev
, port_dev
);
1997 static int team_dev_type_check_change(struct net_device
*dev
,
1998 struct net_device
*port_dev
)
2000 struct team
*team
= netdev_priv(dev
);
2001 char *portname
= port_dev
->name
;
2004 if (dev
->type
== port_dev
->type
)
2006 if (!list_empty(&team
->port_list
)) {
2007 netdev_err(dev
, "Device %s is of different type\n", portname
);
2010 err
= call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE
, dev
);
2011 err
= notifier_to_errno(err
);
2013 netdev_err(dev
, "Refused to change device type\n");
2018 team_setup_by_port(dev
, port_dev
);
2019 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE
, dev
);
2023 static void team_setup(struct net_device
*dev
)
2027 dev
->netdev_ops
= &team_netdev_ops
;
2028 dev
->ethtool_ops
= &team_ethtool_ops
;
2029 dev
->destructor
= team_destructor
;
2030 dev
->tx_queue_len
= 0;
2031 dev
->flags
|= IFF_MULTICAST
;
2032 dev
->priv_flags
&= ~(IFF_XMIT_DST_RELEASE
| IFF_TX_SKB_SHARING
);
2035 * Indicate we support unicast address filtering. That way core won't
2036 * bring us to promisc mode in case a unicast addr is added.
2037 * Let this up to underlay drivers.
2039 dev
->priv_flags
|= IFF_UNICAST_FLT
| IFF_LIVE_ADDR_CHANGE
;
2041 dev
->features
|= NETIF_F_LLTX
;
2042 dev
->features
|= NETIF_F_GRO
;
2043 dev
->hw_features
= TEAM_VLAN_FEATURES
|
2044 NETIF_F_HW_VLAN_CTAG_TX
|
2045 NETIF_F_HW_VLAN_CTAG_RX
|
2046 NETIF_F_HW_VLAN_CTAG_FILTER
;
2048 dev
->hw_features
&= ~(NETIF_F_ALL_CSUM
& ~NETIF_F_HW_CSUM
);
2049 dev
->features
|= dev
->hw_features
;
2052 static int team_newlink(struct net
*src_net
, struct net_device
*dev
,
2053 struct nlattr
*tb
[], struct nlattr
*data
[])
2057 if (tb
[IFLA_ADDRESS
] == NULL
)
2058 eth_hw_addr_random(dev
);
2060 err
= register_netdevice(dev
);
2067 static int team_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
2069 if (tb
[IFLA_ADDRESS
]) {
2070 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
2072 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
2073 return -EADDRNOTAVAIL
;
2078 static unsigned int team_get_num_tx_queues(void)
2080 return TEAM_DEFAULT_NUM_TX_QUEUES
;
2083 static unsigned int team_get_num_rx_queues(void)
2085 return TEAM_DEFAULT_NUM_RX_QUEUES
;
2088 static struct rtnl_link_ops team_link_ops __read_mostly
= {
2090 .priv_size
= sizeof(struct team
),
2091 .setup
= team_setup
,
2092 .newlink
= team_newlink
,
2093 .validate
= team_validate
,
2094 .get_num_tx_queues
= team_get_num_tx_queues
,
2095 .get_num_rx_queues
= team_get_num_rx_queues
,
2099 /***********************************
2100 * Generic netlink custom interface
2101 ***********************************/
2103 static struct genl_family team_nl_family
= {
2104 .id
= GENL_ID_GENERATE
,
2105 .name
= TEAM_GENL_NAME
,
2106 .version
= TEAM_GENL_VERSION
,
2107 .maxattr
= TEAM_ATTR_MAX
,
2111 static const struct nla_policy team_nl_policy
[TEAM_ATTR_MAX
+ 1] = {
2112 [TEAM_ATTR_UNSPEC
] = { .type
= NLA_UNSPEC
, },
2113 [TEAM_ATTR_TEAM_IFINDEX
] = { .type
= NLA_U32
},
2114 [TEAM_ATTR_LIST_OPTION
] = { .type
= NLA_NESTED
},
2115 [TEAM_ATTR_LIST_PORT
] = { .type
= NLA_NESTED
},
2118 static const struct nla_policy
2119 team_nl_option_policy
[TEAM_ATTR_OPTION_MAX
+ 1] = {
2120 [TEAM_ATTR_OPTION_UNSPEC
] = { .type
= NLA_UNSPEC
, },
2121 [TEAM_ATTR_OPTION_NAME
] = {
2123 .len
= TEAM_STRING_MAX_LEN
,
2125 [TEAM_ATTR_OPTION_CHANGED
] = { .type
= NLA_FLAG
},
2126 [TEAM_ATTR_OPTION_TYPE
] = { .type
= NLA_U8
},
2127 [TEAM_ATTR_OPTION_DATA
] = { .type
= NLA_BINARY
},
2130 static int team_nl_cmd_noop(struct sk_buff
*skb
, struct genl_info
*info
)
2132 struct sk_buff
*msg
;
2136 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2140 hdr
= genlmsg_put(msg
, info
->snd_portid
, info
->snd_seq
,
2141 &team_nl_family
, 0, TEAM_CMD_NOOP
);
2147 genlmsg_end(msg
, hdr
);
2149 return genlmsg_unicast(genl_info_net(info
), msg
, info
->snd_portid
);
2158 * Netlink cmd functions should be locked by following two functions.
2159 * Since dev gets held here, that ensures dev won't disappear in between.
2161 static struct team
*team_nl_team_get(struct genl_info
*info
)
2163 struct net
*net
= genl_info_net(info
);
2165 struct net_device
*dev
;
2168 if (!info
->attrs
[TEAM_ATTR_TEAM_IFINDEX
])
2171 ifindex
= nla_get_u32(info
->attrs
[TEAM_ATTR_TEAM_IFINDEX
]);
2172 dev
= dev_get_by_index(net
, ifindex
);
2173 if (!dev
|| dev
->netdev_ops
!= &team_netdev_ops
) {
2179 team
= netdev_priv(dev
);
2180 mutex_lock(&team
->lock
);
2184 static void team_nl_team_put(struct team
*team
)
2186 mutex_unlock(&team
->lock
);
2190 typedef int team_nl_send_func_t(struct sk_buff
*skb
,
2191 struct team
*team
, u32 portid
);
2193 static int team_nl_send_unicast(struct sk_buff
*skb
, struct team
*team
, u32 portid
)
2195 return genlmsg_unicast(dev_net(team
->dev
), skb
, portid
);
2198 static int team_nl_fill_one_option_get(struct sk_buff
*skb
, struct team
*team
,
2199 struct team_option_inst
*opt_inst
)
2201 struct nlattr
*option_item
;
2202 struct team_option
*option
= opt_inst
->option
;
2203 struct team_option_inst_info
*opt_inst_info
= &opt_inst
->info
;
2204 struct team_gsetter_ctx ctx
;
2207 ctx
.info
= opt_inst_info
;
2208 err
= team_option_get(team
, opt_inst
, &ctx
);
2212 option_item
= nla_nest_start(skb
, TEAM_ATTR_ITEM_OPTION
);
2216 if (nla_put_string(skb
, TEAM_ATTR_OPTION_NAME
, option
->name
))
2218 if (opt_inst_info
->port
&&
2219 nla_put_u32(skb
, TEAM_ATTR_OPTION_PORT_IFINDEX
,
2220 opt_inst_info
->port
->dev
->ifindex
))
2222 if (opt_inst
->option
->array_size
&&
2223 nla_put_u32(skb
, TEAM_ATTR_OPTION_ARRAY_INDEX
,
2224 opt_inst_info
->array_index
))
2227 switch (option
->type
) {
2228 case TEAM_OPTION_TYPE_U32
:
2229 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_U32
))
2231 if (nla_put_u32(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.u32_val
))
2234 case TEAM_OPTION_TYPE_STRING
:
2235 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_STRING
))
2237 if (nla_put_string(skb
, TEAM_ATTR_OPTION_DATA
,
2241 case TEAM_OPTION_TYPE_BINARY
:
2242 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_BINARY
))
2244 if (nla_put(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.bin_val
.len
,
2245 ctx
.data
.bin_val
.ptr
))
2248 case TEAM_OPTION_TYPE_BOOL
:
2249 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_FLAG
))
2251 if (ctx
.data
.bool_val
&&
2252 nla_put_flag(skb
, TEAM_ATTR_OPTION_DATA
))
2255 case TEAM_OPTION_TYPE_S32
:
2256 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_S32
))
2258 if (nla_put_s32(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.s32_val
))
2264 if (opt_inst
->removed
&& nla_put_flag(skb
, TEAM_ATTR_OPTION_REMOVED
))
2266 if (opt_inst
->changed
) {
2267 if (nla_put_flag(skb
, TEAM_ATTR_OPTION_CHANGED
))
2269 opt_inst
->changed
= false;
2271 nla_nest_end(skb
, option_item
);
2275 nla_nest_cancel(skb
, option_item
);
2279 static int __send_and_alloc_skb(struct sk_buff
**pskb
,
2280 struct team
*team
, u32 portid
,
2281 team_nl_send_func_t
*send_func
)
2286 err
= send_func(*pskb
, team
, portid
);
2290 *pskb
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2296 static int team_nl_send_options_get(struct team
*team
, u32 portid
, u32 seq
,
2297 int flags
, team_nl_send_func_t
*send_func
,
2298 struct list_head
*sel_opt_inst_list
)
2300 struct nlattr
*option_list
;
2301 struct nlmsghdr
*nlh
;
2303 struct team_option_inst
*opt_inst
;
2305 struct sk_buff
*skb
= NULL
;
2309 opt_inst
= list_first_entry(sel_opt_inst_list
,
2310 struct team_option_inst
, tmp_list
);
2313 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2317 hdr
= genlmsg_put(skb
, portid
, seq
, &team_nl_family
, flags
| NLM_F_MULTI
,
2318 TEAM_CMD_OPTIONS_GET
);
2322 if (nla_put_u32(skb
, TEAM_ATTR_TEAM_IFINDEX
, team
->dev
->ifindex
))
2323 goto nla_put_failure
;
2324 option_list
= nla_nest_start(skb
, TEAM_ATTR_LIST_OPTION
);
2326 goto nla_put_failure
;
2330 list_for_each_entry_from(opt_inst
, sel_opt_inst_list
, tmp_list
) {
2331 err
= team_nl_fill_one_option_get(skb
, team
, opt_inst
);
2333 if (err
== -EMSGSIZE
) {
2344 nla_nest_end(skb
, option_list
);
2345 genlmsg_end(skb
, hdr
);
2350 nlh
= nlmsg_put(skb
, portid
, seq
, NLMSG_DONE
, 0, flags
| NLM_F_MULTI
);
2352 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2358 return send_func(skb
, team
, portid
);
2363 genlmsg_cancel(skb
, hdr
);
2368 static int team_nl_cmd_options_get(struct sk_buff
*skb
, struct genl_info
*info
)
2371 struct team_option_inst
*opt_inst
;
2373 LIST_HEAD(sel_opt_inst_list
);
2375 team
= team_nl_team_get(info
);
2379 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
)
2380 list_add_tail(&opt_inst
->tmp_list
, &sel_opt_inst_list
);
2381 err
= team_nl_send_options_get(team
, info
->snd_portid
, info
->snd_seq
,
2382 NLM_F_ACK
, team_nl_send_unicast
,
2383 &sel_opt_inst_list
);
2385 team_nl_team_put(team
);
2390 static int team_nl_send_event_options_get(struct team
*team
,
2391 struct list_head
*sel_opt_inst_list
);
2393 static int team_nl_cmd_options_set(struct sk_buff
*skb
, struct genl_info
*info
)
2398 struct nlattr
*nl_option
;
2399 LIST_HEAD(opt_inst_list
);
2401 team
= team_nl_team_get(info
);
2406 if (!info
->attrs
[TEAM_ATTR_LIST_OPTION
]) {
2411 nla_for_each_nested(nl_option
, info
->attrs
[TEAM_ATTR_LIST_OPTION
], i
) {
2412 struct nlattr
*opt_attrs
[TEAM_ATTR_OPTION_MAX
+ 1];
2413 struct nlattr
*attr
;
2414 struct nlattr
*attr_data
;
2415 enum team_option_type opt_type
;
2416 int opt_port_ifindex
= 0; /* != 0 for per-port options */
2417 u32 opt_array_index
= 0;
2418 bool opt_is_array
= false;
2419 struct team_option_inst
*opt_inst
;
2421 bool opt_found
= false;
2423 if (nla_type(nl_option
) != TEAM_ATTR_ITEM_OPTION
) {
2427 err
= nla_parse_nested(opt_attrs
, TEAM_ATTR_OPTION_MAX
,
2428 nl_option
, team_nl_option_policy
);
2431 if (!opt_attrs
[TEAM_ATTR_OPTION_NAME
] ||
2432 !opt_attrs
[TEAM_ATTR_OPTION_TYPE
]) {
2436 switch (nla_get_u8(opt_attrs
[TEAM_ATTR_OPTION_TYPE
])) {
2438 opt_type
= TEAM_OPTION_TYPE_U32
;
2441 opt_type
= TEAM_OPTION_TYPE_STRING
;
2444 opt_type
= TEAM_OPTION_TYPE_BINARY
;
2447 opt_type
= TEAM_OPTION_TYPE_BOOL
;
2450 opt_type
= TEAM_OPTION_TYPE_S32
;
2456 attr_data
= opt_attrs
[TEAM_ATTR_OPTION_DATA
];
2457 if (opt_type
!= TEAM_OPTION_TYPE_BOOL
&& !attr_data
) {
2462 opt_name
= nla_data(opt_attrs
[TEAM_ATTR_OPTION_NAME
]);
2463 attr
= opt_attrs
[TEAM_ATTR_OPTION_PORT_IFINDEX
];
2465 opt_port_ifindex
= nla_get_u32(attr
);
2467 attr
= opt_attrs
[TEAM_ATTR_OPTION_ARRAY_INDEX
];
2469 opt_is_array
= true;
2470 opt_array_index
= nla_get_u32(attr
);
2473 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
2474 struct team_option
*option
= opt_inst
->option
;
2475 struct team_gsetter_ctx ctx
;
2476 struct team_option_inst_info
*opt_inst_info
;
2479 opt_inst_info
= &opt_inst
->info
;
2480 tmp_ifindex
= opt_inst_info
->port
?
2481 opt_inst_info
->port
->dev
->ifindex
: 0;
2482 if (option
->type
!= opt_type
||
2483 strcmp(option
->name
, opt_name
) ||
2484 tmp_ifindex
!= opt_port_ifindex
||
2485 (option
->array_size
&& !opt_is_array
) ||
2486 opt_inst_info
->array_index
!= opt_array_index
)
2489 ctx
.info
= opt_inst_info
;
2491 case TEAM_OPTION_TYPE_U32
:
2492 ctx
.data
.u32_val
= nla_get_u32(attr_data
);
2494 case TEAM_OPTION_TYPE_STRING
:
2495 if (nla_len(attr_data
) > TEAM_STRING_MAX_LEN
) {
2499 ctx
.data
.str_val
= nla_data(attr_data
);
2501 case TEAM_OPTION_TYPE_BINARY
:
2502 ctx
.data
.bin_val
.len
= nla_len(attr_data
);
2503 ctx
.data
.bin_val
.ptr
= nla_data(attr_data
);
2505 case TEAM_OPTION_TYPE_BOOL
:
2506 ctx
.data
.bool_val
= attr_data
? true : false;
2508 case TEAM_OPTION_TYPE_S32
:
2509 ctx
.data
.s32_val
= nla_get_s32(attr_data
);
2514 err
= team_option_set(team
, opt_inst
, &ctx
);
2517 opt_inst
->changed
= true;
2518 list_add(&opt_inst
->tmp_list
, &opt_inst_list
);
2526 err
= team_nl_send_event_options_get(team
, &opt_inst_list
);
2529 team_nl_team_put(team
);
2534 static int team_nl_fill_one_port_get(struct sk_buff
*skb
,
2535 struct team_port
*port
)
2537 struct nlattr
*port_item
;
2539 port_item
= nla_nest_start(skb
, TEAM_ATTR_ITEM_PORT
);
2542 if (nla_put_u32(skb
, TEAM_ATTR_PORT_IFINDEX
, port
->dev
->ifindex
))
2544 if (port
->changed
) {
2545 if (nla_put_flag(skb
, TEAM_ATTR_PORT_CHANGED
))
2547 port
->changed
= false;
2549 if ((port
->removed
&&
2550 nla_put_flag(skb
, TEAM_ATTR_PORT_REMOVED
)) ||
2551 (port
->state
.linkup
&&
2552 nla_put_flag(skb
, TEAM_ATTR_PORT_LINKUP
)) ||
2553 nla_put_u32(skb
, TEAM_ATTR_PORT_SPEED
, port
->state
.speed
) ||
2554 nla_put_u8(skb
, TEAM_ATTR_PORT_DUPLEX
, port
->state
.duplex
))
2556 nla_nest_end(skb
, port_item
);
2560 nla_nest_cancel(skb
, port_item
);
2564 static int team_nl_send_port_list_get(struct team
*team
, u32 portid
, u32 seq
,
2565 int flags
, team_nl_send_func_t
*send_func
,
2566 struct team_port
*one_port
)
2568 struct nlattr
*port_list
;
2569 struct nlmsghdr
*nlh
;
2571 struct team_port
*port
;
2573 struct sk_buff
*skb
= NULL
;
2577 port
= list_first_entry_or_null(&team
->port_list
,
2578 struct team_port
, list
);
2581 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2585 hdr
= genlmsg_put(skb
, portid
, seq
, &team_nl_family
, flags
| NLM_F_MULTI
,
2586 TEAM_CMD_PORT_LIST_GET
);
2590 if (nla_put_u32(skb
, TEAM_ATTR_TEAM_IFINDEX
, team
->dev
->ifindex
))
2591 goto nla_put_failure
;
2592 port_list
= nla_nest_start(skb
, TEAM_ATTR_LIST_PORT
);
2594 goto nla_put_failure
;
2599 /* If one port is selected, called wants to send port list containing
2600 * only this port. Otherwise go through all listed ports and send all
2603 err
= team_nl_fill_one_port_get(skb
, one_port
);
2607 list_for_each_entry_from(port
, &team
->port_list
, list
) {
2608 err
= team_nl_fill_one_port_get(skb
, port
);
2610 if (err
== -EMSGSIZE
) {
2622 nla_nest_end(skb
, port_list
);
2623 genlmsg_end(skb
, hdr
);
2628 nlh
= nlmsg_put(skb
, portid
, seq
, NLMSG_DONE
, 0, flags
| NLM_F_MULTI
);
2630 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2636 return send_func(skb
, team
, portid
);
2641 genlmsg_cancel(skb
, hdr
);
2646 static int team_nl_cmd_port_list_get(struct sk_buff
*skb
,
2647 struct genl_info
*info
)
2652 team
= team_nl_team_get(info
);
2656 err
= team_nl_send_port_list_get(team
, info
->snd_portid
, info
->snd_seq
,
2657 NLM_F_ACK
, team_nl_send_unicast
, NULL
);
2659 team_nl_team_put(team
);
2664 static struct genl_ops team_nl_ops
[] = {
2666 .cmd
= TEAM_CMD_NOOP
,
2667 .doit
= team_nl_cmd_noop
,
2668 .policy
= team_nl_policy
,
2671 .cmd
= TEAM_CMD_OPTIONS_SET
,
2672 .doit
= team_nl_cmd_options_set
,
2673 .policy
= team_nl_policy
,
2674 .flags
= GENL_ADMIN_PERM
,
2677 .cmd
= TEAM_CMD_OPTIONS_GET
,
2678 .doit
= team_nl_cmd_options_get
,
2679 .policy
= team_nl_policy
,
2680 .flags
= GENL_ADMIN_PERM
,
2683 .cmd
= TEAM_CMD_PORT_LIST_GET
,
2684 .doit
= team_nl_cmd_port_list_get
,
2685 .policy
= team_nl_policy
,
2686 .flags
= GENL_ADMIN_PERM
,
2690 static struct genl_multicast_group team_change_event_mcgrp
= {
2691 .name
= TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME
,
2694 static int team_nl_send_multicast(struct sk_buff
*skb
,
2695 struct team
*team
, u32 portid
)
2697 return genlmsg_multicast_netns(dev_net(team
->dev
), skb
, 0,
2698 team_change_event_mcgrp
.id
, GFP_KERNEL
);
2701 static int team_nl_send_event_options_get(struct team
*team
,
2702 struct list_head
*sel_opt_inst_list
)
2704 return team_nl_send_options_get(team
, 0, 0, 0, team_nl_send_multicast
,
2708 static int team_nl_send_event_port_get(struct team
*team
,
2709 struct team_port
*port
)
2711 return team_nl_send_port_list_get(team
, 0, 0, 0, team_nl_send_multicast
,
2715 static int team_nl_init(void)
2719 err
= genl_register_family_with_ops(&team_nl_family
, team_nl_ops
,
2720 ARRAY_SIZE(team_nl_ops
));
2724 err
= genl_register_mc_group(&team_nl_family
, &team_change_event_mcgrp
);
2726 goto err_change_event_grp_reg
;
2730 err_change_event_grp_reg
:
2731 genl_unregister_family(&team_nl_family
);
2736 static void team_nl_fini(void)
2738 genl_unregister_family(&team_nl_family
);
2746 static void __team_options_change_check(struct team
*team
)
2749 struct team_option_inst
*opt_inst
;
2750 LIST_HEAD(sel_opt_inst_list
);
2752 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
2753 if (opt_inst
->changed
)
2754 list_add_tail(&opt_inst
->tmp_list
, &sel_opt_inst_list
);
2756 err
= team_nl_send_event_options_get(team
, &sel_opt_inst_list
);
2757 if (err
&& err
!= -ESRCH
)
2758 netdev_warn(team
->dev
, "Failed to send options change via netlink (err %d)\n",
2762 /* rtnl lock is held */
2764 static void __team_port_change_send(struct team_port
*port
, bool linkup
)
2768 port
->changed
= true;
2769 port
->state
.linkup
= linkup
;
2770 team_refresh_port_linkup(port
);
2772 struct ethtool_cmd ecmd
;
2774 err
= __ethtool_get_settings(port
->dev
, &ecmd
);
2776 port
->state
.speed
= ethtool_cmd_speed(&ecmd
);
2777 port
->state
.duplex
= ecmd
.duplex
;
2781 port
->state
.speed
= 0;
2782 port
->state
.duplex
= 0;
2785 err
= team_nl_send_event_port_get(port
->team
, port
);
2786 if (err
&& err
!= -ESRCH
)
2787 netdev_warn(port
->team
->dev
, "Failed to send port change of device %s via netlink (err %d)\n",
2788 port
->dev
->name
, err
);
2792 static void __team_carrier_check(struct team
*team
)
2794 struct team_port
*port
;
2797 if (team
->user_carrier_enabled
)
2800 team_linkup
= false;
2801 list_for_each_entry(port
, &team
->port_list
, list
) {
2809 netif_carrier_on(team
->dev
);
2811 netif_carrier_off(team
->dev
);
2814 static void __team_port_change_check(struct team_port
*port
, bool linkup
)
2816 if (port
->state
.linkup
!= linkup
)
2817 __team_port_change_send(port
, linkup
);
2818 __team_carrier_check(port
->team
);
2821 static void __team_port_change_port_added(struct team_port
*port
, bool linkup
)
2823 __team_port_change_send(port
, linkup
);
2824 __team_carrier_check(port
->team
);
2827 static void __team_port_change_port_removed(struct team_port
*port
)
2829 port
->removed
= true;
2830 __team_port_change_send(port
, false);
2831 __team_carrier_check(port
->team
);
2834 static void team_port_change_check(struct team_port
*port
, bool linkup
)
2836 struct team
*team
= port
->team
;
2838 mutex_lock(&team
->lock
);
2839 __team_port_change_check(port
, linkup
);
2840 mutex_unlock(&team
->lock
);
2844 /************************************
2845 * Net device notifier event handler
2846 ************************************/
2848 static int team_device_event(struct notifier_block
*unused
,
2849 unsigned long event
, void *ptr
)
2851 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2852 struct team_port
*port
;
2854 port
= team_port_get_rtnl(dev
);
2860 if (netif_carrier_ok(dev
))
2861 team_port_change_check(port
, true);
2863 team_port_change_check(port
, false);
2865 if (netif_running(port
->dev
))
2866 team_port_change_check(port
,
2867 !!netif_carrier_ok(port
->dev
));
2869 case NETDEV_UNREGISTER
:
2870 team_del_slave(port
->team
->dev
, dev
);
2872 case NETDEV_FEAT_CHANGE
:
2873 team_compute_features(port
->team
);
2875 case NETDEV_CHANGEMTU
:
2876 /* Forbid to change mtu of underlaying device */
2877 if (!port
->team
->port_mtu_change_allowed
)
2880 case NETDEV_PRE_TYPE_CHANGE
:
2881 /* Forbid to change type of underlaying device */
2883 case NETDEV_RESEND_IGMP
:
2884 /* Propagate to master device */
2885 call_netdevice_notifiers(event
, port
->team
->dev
);
2891 static struct notifier_block team_notifier_block __read_mostly
= {
2892 .notifier_call
= team_device_event
,
2896 /***********************
2897 * Module init and exit
2898 ***********************/
2900 static int __init
team_module_init(void)
2904 register_netdevice_notifier(&team_notifier_block
);
2906 err
= rtnl_link_register(&team_link_ops
);
2910 err
= team_nl_init();
2917 rtnl_link_unregister(&team_link_ops
);
2920 unregister_netdevice_notifier(&team_notifier_block
);
2925 static void __exit
team_module_exit(void)
2928 rtnl_link_unregister(&team_link_ops
);
2929 unregister_netdevice_notifier(&team_notifier_block
);
2932 module_init(team_module_init
);
2933 module_exit(team_module_exit
);
2935 MODULE_LICENSE("GPL v2");
2936 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2937 MODULE_DESCRIPTION("Ethernet team device driver");
2938 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);