2 * drivers/net/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/netpoll.h>
22 #include <linux/if_vlan.h>
23 #include <linux/if_arp.h>
24 #include <linux/socket.h>
25 #include <linux/etherdevice.h>
26 #include <linux/rtnetlink.h>
27 #include <net/rtnetlink.h>
28 #include <net/genetlink.h>
29 #include <net/netlink.h>
30 #include <net/sch_generic.h>
31 #include <net/switchdev.h>
32 #include <generated/utsrelease.h>
33 #include <linux/if_team.h>
35 #define DRV_NAME "team"
42 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
44 static struct team_port
*team_port_get_rcu(const struct net_device
*dev
)
46 return rcu_dereference(dev
->rx_handler_data
);
49 static struct team_port
*team_port_get_rtnl(const struct net_device
*dev
)
51 struct team_port
*port
= rtnl_dereference(dev
->rx_handler_data
);
53 return team_port_exists(dev
) ? port
: NULL
;
57 * Since the ability to change device address for open port device is tested in
58 * team_port_add, this function can be called without control of return value
60 static int __set_port_dev_addr(struct net_device
*port_dev
,
61 const unsigned char *dev_addr
)
65 memcpy(addr
.sa_data
, dev_addr
, port_dev
->addr_len
);
66 addr
.sa_family
= port_dev
->type
;
67 return dev_set_mac_address(port_dev
, &addr
);
70 static int team_port_set_orig_dev_addr(struct team_port
*port
)
72 return __set_port_dev_addr(port
->dev
, port
->orig
.dev_addr
);
75 static int team_port_set_team_dev_addr(struct team
*team
,
76 struct team_port
*port
)
78 return __set_port_dev_addr(port
->dev
, team
->dev
->dev_addr
);
81 int team_modeop_port_enter(struct team
*team
, struct team_port
*port
)
83 return team_port_set_team_dev_addr(team
, port
);
85 EXPORT_SYMBOL(team_modeop_port_enter
);
87 void team_modeop_port_change_dev_addr(struct team
*team
,
88 struct team_port
*port
)
90 team_port_set_team_dev_addr(team
, port
);
92 EXPORT_SYMBOL(team_modeop_port_change_dev_addr
);
94 static void team_lower_state_changed(struct team_port
*port
)
96 struct netdev_lag_lower_state_info info
;
98 info
.link_up
= port
->linkup
;
99 info
.tx_enabled
= team_port_enabled(port
);
100 netdev_lower_state_changed(port
->dev
, &info
);
103 static void team_refresh_port_linkup(struct team_port
*port
)
105 bool new_linkup
= port
->user
.linkup_enabled
? port
->user
.linkup
:
108 if (port
->linkup
!= new_linkup
) {
109 port
->linkup
= new_linkup
;
110 team_lower_state_changed(port
);
119 struct team_option_inst
{ /* One for each option instance */
120 struct list_head list
;
121 struct list_head tmp_list
;
122 struct team_option
*option
;
123 struct team_option_inst_info info
;
128 static struct team_option
*__team_find_option(struct team
*team
,
129 const char *opt_name
)
131 struct team_option
*option
;
133 list_for_each_entry(option
, &team
->option_list
, list
) {
134 if (strcmp(option
->name
, opt_name
) == 0)
140 static void __team_option_inst_del(struct team_option_inst
*opt_inst
)
142 list_del(&opt_inst
->list
);
146 static void __team_option_inst_del_option(struct team
*team
,
147 struct team_option
*option
)
149 struct team_option_inst
*opt_inst
, *tmp
;
151 list_for_each_entry_safe(opt_inst
, tmp
, &team
->option_inst_list
, list
) {
152 if (opt_inst
->option
== option
)
153 __team_option_inst_del(opt_inst
);
157 static int __team_option_inst_add(struct team
*team
, struct team_option
*option
,
158 struct team_port
*port
)
160 struct team_option_inst
*opt_inst
;
161 unsigned int array_size
;
165 array_size
= option
->array_size
;
167 array_size
= 1; /* No array but still need one instance */
169 for (i
= 0; i
< array_size
; i
++) {
170 opt_inst
= kmalloc(sizeof(*opt_inst
), GFP_KERNEL
);
173 opt_inst
->option
= option
;
174 opt_inst
->info
.port
= port
;
175 opt_inst
->info
.array_index
= i
;
176 opt_inst
->changed
= true;
177 opt_inst
->removed
= false;
178 list_add_tail(&opt_inst
->list
, &team
->option_inst_list
);
180 err
= option
->init(team
, &opt_inst
->info
);
189 static int __team_option_inst_add_option(struct team
*team
,
190 struct team_option
*option
)
194 if (!option
->per_port
) {
195 err
= __team_option_inst_add(team
, option
, NULL
);
197 goto inst_del_option
;
202 __team_option_inst_del_option(team
, option
);
206 static void __team_option_inst_mark_removed_option(struct team
*team
,
207 struct team_option
*option
)
209 struct team_option_inst
*opt_inst
;
211 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
212 if (opt_inst
->option
== option
) {
213 opt_inst
->changed
= true;
214 opt_inst
->removed
= true;
219 static void __team_option_inst_del_port(struct team
*team
,
220 struct team_port
*port
)
222 struct team_option_inst
*opt_inst
, *tmp
;
224 list_for_each_entry_safe(opt_inst
, tmp
, &team
->option_inst_list
, list
) {
225 if (opt_inst
->option
->per_port
&&
226 opt_inst
->info
.port
== port
)
227 __team_option_inst_del(opt_inst
);
231 static int __team_option_inst_add_port(struct team
*team
,
232 struct team_port
*port
)
234 struct team_option
*option
;
237 list_for_each_entry(option
, &team
->option_list
, list
) {
238 if (!option
->per_port
)
240 err
= __team_option_inst_add(team
, option
, port
);
247 __team_option_inst_del_port(team
, port
);
251 static void __team_option_inst_mark_removed_port(struct team
*team
,
252 struct team_port
*port
)
254 struct team_option_inst
*opt_inst
;
256 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
257 if (opt_inst
->info
.port
== port
) {
258 opt_inst
->changed
= true;
259 opt_inst
->removed
= true;
264 static int __team_options_register(struct team
*team
,
265 const struct team_option
*option
,
269 struct team_option
**dst_opts
;
272 dst_opts
= kzalloc(sizeof(struct team_option
*) * option_count
,
276 for (i
= 0; i
< option_count
; i
++, option
++) {
277 if (__team_find_option(team
, option
->name
)) {
281 dst_opts
[i
] = kmemdup(option
, sizeof(*option
), GFP_KERNEL
);
288 for (i
= 0; i
< option_count
; i
++) {
289 err
= __team_option_inst_add_option(team
, dst_opts
[i
]);
292 list_add_tail(&dst_opts
[i
]->list
, &team
->option_list
);
299 for (i
--; i
>= 0; i
--)
300 __team_option_inst_del_option(team
, dst_opts
[i
]);
302 i
= option_count
- 1;
304 for (i
--; i
>= 0; i
--)
311 static void __team_options_mark_removed(struct team
*team
,
312 const struct team_option
*option
,
317 for (i
= 0; i
< option_count
; i
++, option
++) {
318 struct team_option
*del_opt
;
320 del_opt
= __team_find_option(team
, option
->name
);
322 __team_option_inst_mark_removed_option(team
, del_opt
);
326 static void __team_options_unregister(struct team
*team
,
327 const struct team_option
*option
,
332 for (i
= 0; i
< option_count
; i
++, option
++) {
333 struct team_option
*del_opt
;
335 del_opt
= __team_find_option(team
, option
->name
);
337 __team_option_inst_del_option(team
, del_opt
);
338 list_del(&del_opt
->list
);
344 static void __team_options_change_check(struct team
*team
);
346 int team_options_register(struct team
*team
,
347 const struct team_option
*option
,
352 err
= __team_options_register(team
, option
, option_count
);
355 __team_options_change_check(team
);
358 EXPORT_SYMBOL(team_options_register
);
360 void team_options_unregister(struct team
*team
,
361 const struct team_option
*option
,
364 __team_options_mark_removed(team
, option
, option_count
);
365 __team_options_change_check(team
);
366 __team_options_unregister(team
, option
, option_count
);
368 EXPORT_SYMBOL(team_options_unregister
);
370 static int team_option_get(struct team
*team
,
371 struct team_option_inst
*opt_inst
,
372 struct team_gsetter_ctx
*ctx
)
374 if (!opt_inst
->option
->getter
)
376 return opt_inst
->option
->getter(team
, ctx
);
379 static int team_option_set(struct team
*team
,
380 struct team_option_inst
*opt_inst
,
381 struct team_gsetter_ctx
*ctx
)
383 if (!opt_inst
->option
->setter
)
385 return opt_inst
->option
->setter(team
, ctx
);
388 void team_option_inst_set_change(struct team_option_inst_info
*opt_inst_info
)
390 struct team_option_inst
*opt_inst
;
392 opt_inst
= container_of(opt_inst_info
, struct team_option_inst
, info
);
393 opt_inst
->changed
= true;
395 EXPORT_SYMBOL(team_option_inst_set_change
);
397 void team_options_change_check(struct team
*team
)
399 __team_options_change_check(team
);
401 EXPORT_SYMBOL(team_options_change_check
);
408 static LIST_HEAD(mode_list
);
409 static DEFINE_SPINLOCK(mode_list_lock
);
411 struct team_mode_item
{
412 struct list_head list
;
413 const struct team_mode
*mode
;
416 static struct team_mode_item
*__find_mode(const char *kind
)
418 struct team_mode_item
*mitem
;
420 list_for_each_entry(mitem
, &mode_list
, list
) {
421 if (strcmp(mitem
->mode
->kind
, kind
) == 0)
427 static bool is_good_mode_name(const char *name
)
429 while (*name
!= '\0') {
430 if (!isalpha(*name
) && !isdigit(*name
) && *name
!= '_')
437 int team_mode_register(const struct team_mode
*mode
)
440 struct team_mode_item
*mitem
;
442 if (!is_good_mode_name(mode
->kind
) ||
443 mode
->priv_size
> TEAM_MODE_PRIV_SIZE
)
446 mitem
= kmalloc(sizeof(*mitem
), GFP_KERNEL
);
450 spin_lock(&mode_list_lock
);
451 if (__find_mode(mode
->kind
)) {
457 list_add_tail(&mitem
->list
, &mode_list
);
459 spin_unlock(&mode_list_lock
);
462 EXPORT_SYMBOL(team_mode_register
);
464 void team_mode_unregister(const struct team_mode
*mode
)
466 struct team_mode_item
*mitem
;
468 spin_lock(&mode_list_lock
);
469 mitem
= __find_mode(mode
->kind
);
471 list_del_init(&mitem
->list
);
474 spin_unlock(&mode_list_lock
);
476 EXPORT_SYMBOL(team_mode_unregister
);
478 static const struct team_mode
*team_mode_get(const char *kind
)
480 struct team_mode_item
*mitem
;
481 const struct team_mode
*mode
= NULL
;
483 spin_lock(&mode_list_lock
);
484 mitem
= __find_mode(kind
);
486 spin_unlock(&mode_list_lock
);
487 request_module("team-mode-%s", kind
);
488 spin_lock(&mode_list_lock
);
489 mitem
= __find_mode(kind
);
493 if (!try_module_get(mode
->owner
))
497 spin_unlock(&mode_list_lock
);
501 static void team_mode_put(const struct team_mode
*mode
)
503 module_put(mode
->owner
);
506 static bool team_dummy_transmit(struct team
*team
, struct sk_buff
*skb
)
508 dev_kfree_skb_any(skb
);
512 static rx_handler_result_t
team_dummy_receive(struct team
*team
,
513 struct team_port
*port
,
516 return RX_HANDLER_ANOTHER
;
519 static const struct team_mode __team_no_mode
= {
523 static bool team_is_mode_set(struct team
*team
)
525 return team
->mode
!= &__team_no_mode
;
528 static void team_set_no_mode(struct team
*team
)
530 team
->user_carrier_enabled
= false;
531 team
->mode
= &__team_no_mode
;
534 static void team_adjust_ops(struct team
*team
)
537 * To avoid checks in rx/tx skb paths, ensure here that non-null and
538 * correct ops are always set.
541 if (!team
->en_port_count
|| !team_is_mode_set(team
) ||
542 !team
->mode
->ops
->transmit
)
543 team
->ops
.transmit
= team_dummy_transmit
;
545 team
->ops
.transmit
= team
->mode
->ops
->transmit
;
547 if (!team
->en_port_count
|| !team_is_mode_set(team
) ||
548 !team
->mode
->ops
->receive
)
549 team
->ops
.receive
= team_dummy_receive
;
551 team
->ops
.receive
= team
->mode
->ops
->receive
;
555 * We can benefit from the fact that it's ensured no port is present
556 * at the time of mode change. Therefore no packets are in fly so there's no
557 * need to set mode operations in any special way.
559 static int __team_change_mode(struct team
*team
,
560 const struct team_mode
*new_mode
)
562 /* Check if mode was previously set and do cleanup if so */
563 if (team_is_mode_set(team
)) {
564 void (*exit_op
)(struct team
*team
) = team
->ops
.exit
;
566 /* Clear ops area so no callback is called any longer */
567 memset(&team
->ops
, 0, sizeof(struct team_mode_ops
));
568 team_adjust_ops(team
);
572 team_mode_put(team
->mode
);
573 team_set_no_mode(team
);
574 /* zero private data area */
575 memset(&team
->mode_priv
, 0,
576 sizeof(struct team
) - offsetof(struct team
, mode_priv
));
582 if (new_mode
->ops
->init
) {
585 err
= new_mode
->ops
->init(team
);
590 team
->mode
= new_mode
;
591 memcpy(&team
->ops
, new_mode
->ops
, sizeof(struct team_mode_ops
));
592 team_adjust_ops(team
);
597 static int team_change_mode(struct team
*team
, const char *kind
)
599 const struct team_mode
*new_mode
;
600 struct net_device
*dev
= team
->dev
;
603 if (!list_empty(&team
->port_list
)) {
604 netdev_err(dev
, "No ports can be present during mode change\n");
608 if (team_is_mode_set(team
) && strcmp(team
->mode
->kind
, kind
) == 0) {
609 netdev_err(dev
, "Unable to change to the same mode the team is in\n");
613 new_mode
= team_mode_get(kind
);
615 netdev_err(dev
, "Mode \"%s\" not found\n", kind
);
619 err
= __team_change_mode(team
, new_mode
);
621 netdev_err(dev
, "Failed to change to mode \"%s\"\n", kind
);
622 team_mode_put(new_mode
);
626 netdev_info(dev
, "Mode changed to \"%s\"\n", kind
);
631 /*********************
633 *********************/
635 static void team_notify_peers_work(struct work_struct
*work
)
640 team
= container_of(work
, struct team
, notify_peers
.dw
.work
);
642 if (!rtnl_trylock()) {
643 schedule_delayed_work(&team
->notify_peers
.dw
, 0);
646 val
= atomic_dec_if_positive(&team
->notify_peers
.count_pending
);
651 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, team
->dev
);
654 schedule_delayed_work(&team
->notify_peers
.dw
,
655 msecs_to_jiffies(team
->notify_peers
.interval
));
658 static void team_notify_peers(struct team
*team
)
660 if (!team
->notify_peers
.count
|| !netif_running(team
->dev
))
662 atomic_add(team
->notify_peers
.count
, &team
->notify_peers
.count_pending
);
663 schedule_delayed_work(&team
->notify_peers
.dw
, 0);
666 static void team_notify_peers_init(struct team
*team
)
668 INIT_DELAYED_WORK(&team
->notify_peers
.dw
, team_notify_peers_work
);
671 static void team_notify_peers_fini(struct team
*team
)
673 cancel_delayed_work_sync(&team
->notify_peers
.dw
);
677 /*******************************
678 * Send multicast group rejoins
679 *******************************/
681 static void team_mcast_rejoin_work(struct work_struct
*work
)
686 team
= container_of(work
, struct team
, mcast_rejoin
.dw
.work
);
688 if (!rtnl_trylock()) {
689 schedule_delayed_work(&team
->mcast_rejoin
.dw
, 0);
692 val
= atomic_dec_if_positive(&team
->mcast_rejoin
.count_pending
);
697 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, team
->dev
);
700 schedule_delayed_work(&team
->mcast_rejoin
.dw
,
701 msecs_to_jiffies(team
->mcast_rejoin
.interval
));
704 static void team_mcast_rejoin(struct team
*team
)
706 if (!team
->mcast_rejoin
.count
|| !netif_running(team
->dev
))
708 atomic_add(team
->mcast_rejoin
.count
, &team
->mcast_rejoin
.count_pending
);
709 schedule_delayed_work(&team
->mcast_rejoin
.dw
, 0);
712 static void team_mcast_rejoin_init(struct team
*team
)
714 INIT_DELAYED_WORK(&team
->mcast_rejoin
.dw
, team_mcast_rejoin_work
);
717 static void team_mcast_rejoin_fini(struct team
*team
)
719 cancel_delayed_work_sync(&team
->mcast_rejoin
.dw
);
723 /************************
724 * Rx path frame handler
725 ************************/
727 /* note: already called with rcu_read_lock */
728 static rx_handler_result_t
team_handle_frame(struct sk_buff
**pskb
)
730 struct sk_buff
*skb
= *pskb
;
731 struct team_port
*port
;
733 rx_handler_result_t res
;
735 skb
= skb_share_check(skb
, GFP_ATOMIC
);
737 return RX_HANDLER_CONSUMED
;
741 port
= team_port_get_rcu(skb
->dev
);
743 if (!team_port_enabled(port
)) {
744 /* allow exact match delivery for disabled ports */
745 res
= RX_HANDLER_EXACT
;
747 res
= team
->ops
.receive(team
, port
, skb
);
749 if (res
== RX_HANDLER_ANOTHER
) {
750 struct team_pcpu_stats
*pcpu_stats
;
752 pcpu_stats
= this_cpu_ptr(team
->pcpu_stats
);
753 u64_stats_update_begin(&pcpu_stats
->syncp
);
754 pcpu_stats
->rx_packets
++;
755 pcpu_stats
->rx_bytes
+= skb
->len
;
756 if (skb
->pkt_type
== PACKET_MULTICAST
)
757 pcpu_stats
->rx_multicast
++;
758 u64_stats_update_end(&pcpu_stats
->syncp
);
760 skb
->dev
= team
->dev
;
761 } else if (res
== RX_HANDLER_EXACT
) {
762 this_cpu_inc(team
->pcpu_stats
->rx_nohandler
);
764 this_cpu_inc(team
->pcpu_stats
->rx_dropped
);
771 /*************************************
772 * Multiqueue Tx port select override
773 *************************************/
775 static int team_queue_override_init(struct team
*team
)
777 struct list_head
*listarr
;
778 unsigned int queue_cnt
= team
->dev
->num_tx_queues
- 1;
783 listarr
= kmalloc(sizeof(struct list_head
) * queue_cnt
, GFP_KERNEL
);
786 team
->qom_lists
= listarr
;
787 for (i
= 0; i
< queue_cnt
; i
++)
788 INIT_LIST_HEAD(listarr
++);
792 static void team_queue_override_fini(struct team
*team
)
794 kfree(team
->qom_lists
);
797 static struct list_head
*__team_get_qom_list(struct team
*team
, u16 queue_id
)
799 return &team
->qom_lists
[queue_id
- 1];
803 * note: already called with rcu_read_lock
805 static bool team_queue_override_transmit(struct team
*team
, struct sk_buff
*skb
)
807 struct list_head
*qom_list
;
808 struct team_port
*port
;
810 if (!team
->queue_override_enabled
|| !skb
->queue_mapping
)
812 qom_list
= __team_get_qom_list(team
, skb
->queue_mapping
);
813 list_for_each_entry_rcu(port
, qom_list
, qom_list
) {
814 if (!team_dev_queue_xmit(team
, port
, skb
))
820 static void __team_queue_override_port_del(struct team
*team
,
821 struct team_port
*port
)
825 list_del_rcu(&port
->qom_list
);
828 static bool team_queue_override_port_has_gt_prio_than(struct team_port
*port
,
829 struct team_port
*cur
)
831 if (port
->priority
< cur
->priority
)
833 if (port
->priority
> cur
->priority
)
835 if (port
->index
< cur
->index
)
840 static void __team_queue_override_port_add(struct team
*team
,
841 struct team_port
*port
)
843 struct team_port
*cur
;
844 struct list_head
*qom_list
;
845 struct list_head
*node
;
849 qom_list
= __team_get_qom_list(team
, port
->queue_id
);
851 list_for_each_entry(cur
, qom_list
, qom_list
) {
852 if (team_queue_override_port_has_gt_prio_than(port
, cur
))
854 node
= &cur
->qom_list
;
856 list_add_tail_rcu(&port
->qom_list
, node
);
859 static void __team_queue_override_enabled_check(struct team
*team
)
861 struct team_port
*port
;
862 bool enabled
= false;
864 list_for_each_entry(port
, &team
->port_list
, list
) {
865 if (port
->queue_id
) {
870 if (enabled
== team
->queue_override_enabled
)
872 netdev_dbg(team
->dev
, "%s queue override\n",
873 enabled
? "Enabling" : "Disabling");
874 team
->queue_override_enabled
= enabled
;
877 static void team_queue_override_port_prio_changed(struct team
*team
,
878 struct team_port
*port
)
880 if (!port
->queue_id
|| team_port_enabled(port
))
882 __team_queue_override_port_del(team
, port
);
883 __team_queue_override_port_add(team
, port
);
884 __team_queue_override_enabled_check(team
);
887 static void team_queue_override_port_change_queue_id(struct team
*team
,
888 struct team_port
*port
,
891 if (team_port_enabled(port
)) {
892 __team_queue_override_port_del(team
, port
);
893 port
->queue_id
= new_queue_id
;
894 __team_queue_override_port_add(team
, port
);
895 __team_queue_override_enabled_check(team
);
897 port
->queue_id
= new_queue_id
;
901 static void team_queue_override_port_add(struct team
*team
,
902 struct team_port
*port
)
904 __team_queue_override_port_add(team
, port
);
905 __team_queue_override_enabled_check(team
);
908 static void team_queue_override_port_del(struct team
*team
,
909 struct team_port
*port
)
911 __team_queue_override_port_del(team
, port
);
912 __team_queue_override_enabled_check(team
);
920 static bool team_port_find(const struct team
*team
,
921 const struct team_port
*port
)
923 struct team_port
*cur
;
925 list_for_each_entry(cur
, &team
->port_list
, list
)
932 * Enable/disable port by adding to enabled port hashlist and setting
933 * port->index (Might be racy so reader could see incorrect ifindex when
934 * processing a flying packet, but that is not a problem). Write guarded
937 static void team_port_enable(struct team
*team
,
938 struct team_port
*port
)
940 if (team_port_enabled(port
))
942 port
->index
= team
->en_port_count
++;
943 hlist_add_head_rcu(&port
->hlist
,
944 team_port_index_hash(team
, port
->index
));
945 team_adjust_ops(team
);
946 team_queue_override_port_add(team
, port
);
947 if (team
->ops
.port_enabled
)
948 team
->ops
.port_enabled(team
, port
);
949 team_notify_peers(team
);
950 team_mcast_rejoin(team
);
951 team_lower_state_changed(port
);
954 static void __reconstruct_port_hlist(struct team
*team
, int rm_index
)
957 struct team_port
*port
;
959 for (i
= rm_index
+ 1; i
< team
->en_port_count
; i
++) {
960 port
= team_get_port_by_index(team
, i
);
961 hlist_del_rcu(&port
->hlist
);
963 hlist_add_head_rcu(&port
->hlist
,
964 team_port_index_hash(team
, port
->index
));
968 static void team_port_disable(struct team
*team
,
969 struct team_port
*port
)
971 if (!team_port_enabled(port
))
973 if (team
->ops
.port_disabled
)
974 team
->ops
.port_disabled(team
, port
);
975 hlist_del_rcu(&port
->hlist
);
976 __reconstruct_port_hlist(team
, port
->index
);
978 team
->en_port_count
--;
979 team_queue_override_port_del(team
, port
);
980 team_adjust_ops(team
);
981 team_notify_peers(team
);
982 team_mcast_rejoin(team
);
983 team_lower_state_changed(port
);
986 #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
987 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
988 NETIF_F_HIGHDMA | NETIF_F_LRO)
990 #define TEAM_ENC_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
991 NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
993 static void __team_compute_features(struct team
*team
)
995 struct team_port
*port
;
996 u32 vlan_features
= TEAM_VLAN_FEATURES
& NETIF_F_ALL_FOR_ALL
;
997 netdev_features_t enc_features
= TEAM_ENC_FEATURES
;
998 unsigned short max_hard_header_len
= ETH_HLEN
;
999 unsigned int dst_release_flag
= IFF_XMIT_DST_RELEASE
|
1000 IFF_XMIT_DST_RELEASE_PERM
;
1002 list_for_each_entry(port
, &team
->port_list
, list
) {
1003 vlan_features
= netdev_increment_features(vlan_features
,
1004 port
->dev
->vlan_features
,
1005 TEAM_VLAN_FEATURES
);
1007 netdev_increment_features(enc_features
,
1008 port
->dev
->hw_enc_features
,
1012 dst_release_flag
&= port
->dev
->priv_flags
;
1013 if (port
->dev
->hard_header_len
> max_hard_header_len
)
1014 max_hard_header_len
= port
->dev
->hard_header_len
;
1017 team
->dev
->vlan_features
= vlan_features
;
1018 team
->dev
->hw_enc_features
= enc_features
| NETIF_F_GSO_ENCAP_ALL
;
1019 team
->dev
->hard_header_len
= max_hard_header_len
;
1021 team
->dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
1022 if (dst_release_flag
== (IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
))
1023 team
->dev
->priv_flags
|= IFF_XMIT_DST_RELEASE
;
1025 netdev_change_features(team
->dev
);
1028 static void team_compute_features(struct team
*team
)
1030 mutex_lock(&team
->lock
);
1031 __team_compute_features(team
);
1032 mutex_unlock(&team
->lock
);
1035 static int team_port_enter(struct team
*team
, struct team_port
*port
)
1039 dev_hold(team
->dev
);
1040 if (team
->ops
.port_enter
) {
1041 err
= team
->ops
.port_enter(team
, port
);
1043 netdev_err(team
->dev
, "Device %s failed to enter team mode\n",
1045 goto err_port_enter
;
1057 static void team_port_leave(struct team
*team
, struct team_port
*port
)
1059 if (team
->ops
.port_leave
)
1060 team
->ops
.port_leave(team
, port
);
1064 #ifdef CONFIG_NET_POLL_CONTROLLER
1065 static int team_port_enable_netpoll(struct team
*team
, struct team_port
*port
)
1070 if (!team
->dev
->npinfo
)
1073 np
= kzalloc(sizeof(*np
), GFP_KERNEL
);
1077 err
= __netpoll_setup(np
, port
->dev
);
1086 static void team_port_disable_netpoll(struct team_port
*port
)
1088 struct netpoll
*np
= port
->np
;
1094 /* Wait for transmitting packets to finish before freeing. */
1095 synchronize_rcu_bh();
1096 __netpoll_cleanup(np
);
1100 static int team_port_enable_netpoll(struct team
*team
, struct team_port
*port
)
1104 static void team_port_disable_netpoll(struct team_port
*port
)
1109 static int team_upper_dev_link(struct team
*team
, struct team_port
*port
)
1111 struct netdev_lag_upper_info lag_upper_info
;
1114 lag_upper_info
.tx_type
= team
->mode
->lag_tx_type
;
1115 err
= netdev_master_upper_dev_link(port
->dev
, team
->dev
, NULL
,
1119 port
->dev
->priv_flags
|= IFF_TEAM_PORT
;
1123 static void team_upper_dev_unlink(struct team
*team
, struct team_port
*port
)
1125 netdev_upper_dev_unlink(port
->dev
, team
->dev
);
1126 port
->dev
->priv_flags
&= ~IFF_TEAM_PORT
;
1129 static void __team_port_change_port_added(struct team_port
*port
, bool linkup
);
1130 static int team_dev_type_check_change(struct net_device
*dev
,
1131 struct net_device
*port_dev
);
1133 static int team_port_add(struct team
*team
, struct net_device
*port_dev
)
1135 struct net_device
*dev
= team
->dev
;
1136 struct team_port
*port
;
1137 char *portname
= port_dev
->name
;
1140 if (port_dev
->flags
& IFF_LOOPBACK
) {
1141 netdev_err(dev
, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1146 if (team_port_exists(port_dev
)) {
1147 netdev_err(dev
, "Device %s is already a port "
1148 "of a team device\n", portname
);
1152 if (port_dev
->features
& NETIF_F_VLAN_CHALLENGED
&&
1153 vlan_uses_dev(dev
)) {
1154 netdev_err(dev
, "Device %s is VLAN challenged and team device has VLAN set up\n",
1159 err
= team_dev_type_check_change(dev
, port_dev
);
1163 if (port_dev
->flags
& IFF_UP
) {
1164 netdev_err(dev
, "Device %s is up. Set it down before adding it as a team port\n",
1169 port
= kzalloc(sizeof(struct team_port
) + team
->mode
->port_priv_size
,
1174 port
->dev
= port_dev
;
1176 INIT_LIST_HEAD(&port
->qom_list
);
1178 port
->orig
.mtu
= port_dev
->mtu
;
1179 err
= dev_set_mtu(port_dev
, dev
->mtu
);
1181 netdev_dbg(dev
, "Error %d calling dev_set_mtu\n", err
);
1185 memcpy(port
->orig
.dev_addr
, port_dev
->dev_addr
, port_dev
->addr_len
);
1187 err
= team_port_enter(team
, port
);
1189 netdev_err(dev
, "Device %s failed to enter team mode\n",
1191 goto err_port_enter
;
1194 err
= dev_open(port_dev
);
1196 netdev_dbg(dev
, "Device %s opening failed\n",
1201 dev_uc_sync_multiple(port_dev
, dev
);
1202 dev_mc_sync_multiple(port_dev
, dev
);
1204 err
= vlan_vids_add_by_dev(port_dev
, dev
);
1206 netdev_err(dev
, "Failed to add vlan ids to device %s\n",
1211 err
= team_port_enable_netpoll(team
, port
);
1213 netdev_err(dev
, "Failed to enable netpoll on device %s\n",
1215 goto err_enable_netpoll
;
1218 if (!(dev
->features
& NETIF_F_LRO
))
1219 dev_disable_lro(port_dev
);
1221 err
= netdev_rx_handler_register(port_dev
, team_handle_frame
,
1224 netdev_err(dev
, "Device %s failed to register rx_handler\n",
1226 goto err_handler_register
;
1229 err
= team_upper_dev_link(team
, port
);
1231 netdev_err(dev
, "Device %s failed to set upper link\n",
1233 goto err_set_upper_link
;
1236 err
= __team_option_inst_add_port(team
, port
);
1238 netdev_err(dev
, "Device %s failed to add per-port options\n",
1240 goto err_option_port_add
;
1244 list_add_tail_rcu(&port
->list
, &team
->port_list
);
1245 team_port_enable(team
, port
);
1246 __team_compute_features(team
);
1247 __team_port_change_port_added(port
, !!netif_carrier_ok(port_dev
));
1248 __team_options_change_check(team
);
1250 netdev_info(dev
, "Port device %s added\n", portname
);
1254 err_option_port_add
:
1255 team_upper_dev_unlink(team
, port
);
1258 netdev_rx_handler_unregister(port_dev
);
1260 err_handler_register
:
1261 team_port_disable_netpoll(port
);
1264 vlan_vids_del_by_dev(port_dev
, dev
);
1267 dev_uc_unsync(port_dev
, dev
);
1268 dev_mc_unsync(port_dev
, dev
);
1269 dev_close(port_dev
);
1272 team_port_leave(team
, port
);
1273 team_port_set_orig_dev_addr(port
);
1276 dev_set_mtu(port_dev
, port
->orig
.mtu
);
1284 static void __team_port_change_port_removed(struct team_port
*port
);
1286 static int team_port_del(struct team
*team
, struct net_device
*port_dev
)
1288 struct net_device
*dev
= team
->dev
;
1289 struct team_port
*port
;
1290 char *portname
= port_dev
->name
;
1292 port
= team_port_get_rtnl(port_dev
);
1293 if (!port
|| !team_port_find(team
, port
)) {
1294 netdev_err(dev
, "Device %s does not act as a port of this team\n",
1299 team_port_disable(team
, port
);
1300 list_del_rcu(&port
->list
);
1301 team_upper_dev_unlink(team
, port
);
1302 netdev_rx_handler_unregister(port_dev
);
1303 team_port_disable_netpoll(port
);
1304 vlan_vids_del_by_dev(port_dev
, dev
);
1305 dev_uc_unsync(port_dev
, dev
);
1306 dev_mc_unsync(port_dev
, dev
);
1307 dev_close(port_dev
);
1308 team_port_leave(team
, port
);
1310 __team_option_inst_mark_removed_port(team
, port
);
1311 __team_options_change_check(team
);
1312 __team_option_inst_del_port(team
, port
);
1313 __team_port_change_port_removed(port
);
1315 team_port_set_orig_dev_addr(port
);
1316 dev_set_mtu(port_dev
, port
->orig
.mtu
);
1317 kfree_rcu(port
, rcu
);
1318 netdev_info(dev
, "Port device %s removed\n", portname
);
1319 __team_compute_features(team
);
1329 static int team_mode_option_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
1331 ctx
->data
.str_val
= team
->mode
->kind
;
1335 static int team_mode_option_set(struct team
*team
, struct team_gsetter_ctx
*ctx
)
1337 return team_change_mode(team
, ctx
->data
.str_val
);
1340 static int team_notify_peers_count_get(struct team
*team
,
1341 struct team_gsetter_ctx
*ctx
)
1343 ctx
->data
.u32_val
= team
->notify_peers
.count
;
1347 static int team_notify_peers_count_set(struct team
*team
,
1348 struct team_gsetter_ctx
*ctx
)
1350 team
->notify_peers
.count
= ctx
->data
.u32_val
;
1354 static int team_notify_peers_interval_get(struct team
*team
,
1355 struct team_gsetter_ctx
*ctx
)
1357 ctx
->data
.u32_val
= team
->notify_peers
.interval
;
1361 static int team_notify_peers_interval_set(struct team
*team
,
1362 struct team_gsetter_ctx
*ctx
)
1364 team
->notify_peers
.interval
= ctx
->data
.u32_val
;
1368 static int team_mcast_rejoin_count_get(struct team
*team
,
1369 struct team_gsetter_ctx
*ctx
)
1371 ctx
->data
.u32_val
= team
->mcast_rejoin
.count
;
1375 static int team_mcast_rejoin_count_set(struct team
*team
,
1376 struct team_gsetter_ctx
*ctx
)
1378 team
->mcast_rejoin
.count
= ctx
->data
.u32_val
;
1382 static int team_mcast_rejoin_interval_get(struct team
*team
,
1383 struct team_gsetter_ctx
*ctx
)
1385 ctx
->data
.u32_val
= team
->mcast_rejoin
.interval
;
1389 static int team_mcast_rejoin_interval_set(struct team
*team
,
1390 struct team_gsetter_ctx
*ctx
)
1392 team
->mcast_rejoin
.interval
= ctx
->data
.u32_val
;
1396 static int team_port_en_option_get(struct team
*team
,
1397 struct team_gsetter_ctx
*ctx
)
1399 struct team_port
*port
= ctx
->info
->port
;
1401 ctx
->data
.bool_val
= team_port_enabled(port
);
1405 static int team_port_en_option_set(struct team
*team
,
1406 struct team_gsetter_ctx
*ctx
)
1408 struct team_port
*port
= ctx
->info
->port
;
1410 if (ctx
->data
.bool_val
)
1411 team_port_enable(team
, port
);
1413 team_port_disable(team
, port
);
1417 static int team_user_linkup_option_get(struct team
*team
,
1418 struct team_gsetter_ctx
*ctx
)
1420 struct team_port
*port
= ctx
->info
->port
;
1422 ctx
->data
.bool_val
= port
->user
.linkup
;
1426 static void __team_carrier_check(struct team
*team
);
1428 static int team_user_linkup_option_set(struct team
*team
,
1429 struct team_gsetter_ctx
*ctx
)
1431 struct team_port
*port
= ctx
->info
->port
;
1433 port
->user
.linkup
= ctx
->data
.bool_val
;
1434 team_refresh_port_linkup(port
);
1435 __team_carrier_check(port
->team
);
1439 static int team_user_linkup_en_option_get(struct team
*team
,
1440 struct team_gsetter_ctx
*ctx
)
1442 struct team_port
*port
= ctx
->info
->port
;
1444 ctx
->data
.bool_val
= port
->user
.linkup_enabled
;
1448 static int team_user_linkup_en_option_set(struct team
*team
,
1449 struct team_gsetter_ctx
*ctx
)
1451 struct team_port
*port
= ctx
->info
->port
;
1453 port
->user
.linkup_enabled
= ctx
->data
.bool_val
;
1454 team_refresh_port_linkup(port
);
1455 __team_carrier_check(port
->team
);
1459 static int team_priority_option_get(struct team
*team
,
1460 struct team_gsetter_ctx
*ctx
)
1462 struct team_port
*port
= ctx
->info
->port
;
1464 ctx
->data
.s32_val
= port
->priority
;
1468 static int team_priority_option_set(struct team
*team
,
1469 struct team_gsetter_ctx
*ctx
)
1471 struct team_port
*port
= ctx
->info
->port
;
1472 s32 priority
= ctx
->data
.s32_val
;
1474 if (port
->priority
== priority
)
1476 port
->priority
= priority
;
1477 team_queue_override_port_prio_changed(team
, port
);
1481 static int team_queue_id_option_get(struct team
*team
,
1482 struct team_gsetter_ctx
*ctx
)
1484 struct team_port
*port
= ctx
->info
->port
;
1486 ctx
->data
.u32_val
= port
->queue_id
;
1490 static int team_queue_id_option_set(struct team
*team
,
1491 struct team_gsetter_ctx
*ctx
)
1493 struct team_port
*port
= ctx
->info
->port
;
1494 u16 new_queue_id
= ctx
->data
.u32_val
;
1496 if (port
->queue_id
== new_queue_id
)
1498 if (new_queue_id
>= team
->dev
->real_num_tx_queues
)
1500 team_queue_override_port_change_queue_id(team
, port
, new_queue_id
);
1504 static const struct team_option team_options
[] = {
1507 .type
= TEAM_OPTION_TYPE_STRING
,
1508 .getter
= team_mode_option_get
,
1509 .setter
= team_mode_option_set
,
1512 .name
= "notify_peers_count",
1513 .type
= TEAM_OPTION_TYPE_U32
,
1514 .getter
= team_notify_peers_count_get
,
1515 .setter
= team_notify_peers_count_set
,
1518 .name
= "notify_peers_interval",
1519 .type
= TEAM_OPTION_TYPE_U32
,
1520 .getter
= team_notify_peers_interval_get
,
1521 .setter
= team_notify_peers_interval_set
,
1524 .name
= "mcast_rejoin_count",
1525 .type
= TEAM_OPTION_TYPE_U32
,
1526 .getter
= team_mcast_rejoin_count_get
,
1527 .setter
= team_mcast_rejoin_count_set
,
1530 .name
= "mcast_rejoin_interval",
1531 .type
= TEAM_OPTION_TYPE_U32
,
1532 .getter
= team_mcast_rejoin_interval_get
,
1533 .setter
= team_mcast_rejoin_interval_set
,
1537 .type
= TEAM_OPTION_TYPE_BOOL
,
1539 .getter
= team_port_en_option_get
,
1540 .setter
= team_port_en_option_set
,
1543 .name
= "user_linkup",
1544 .type
= TEAM_OPTION_TYPE_BOOL
,
1546 .getter
= team_user_linkup_option_get
,
1547 .setter
= team_user_linkup_option_set
,
1550 .name
= "user_linkup_enabled",
1551 .type
= TEAM_OPTION_TYPE_BOOL
,
1553 .getter
= team_user_linkup_en_option_get
,
1554 .setter
= team_user_linkup_en_option_set
,
1558 .type
= TEAM_OPTION_TYPE_S32
,
1560 .getter
= team_priority_option_get
,
1561 .setter
= team_priority_option_set
,
1565 .type
= TEAM_OPTION_TYPE_U32
,
1567 .getter
= team_queue_id_option_get
,
1568 .setter
= team_queue_id_option_set
,
1572 static struct lock_class_key team_netdev_xmit_lock_key
;
1573 static struct lock_class_key team_netdev_addr_lock_key
;
1574 static struct lock_class_key team_tx_busylock_key
;
1576 static void team_set_lockdep_class_one(struct net_device
*dev
,
1577 struct netdev_queue
*txq
,
1580 lockdep_set_class(&txq
->_xmit_lock
, &team_netdev_xmit_lock_key
);
1583 static void team_set_lockdep_class(struct net_device
*dev
)
1585 lockdep_set_class(&dev
->addr_list_lock
, &team_netdev_addr_lock_key
);
1586 netdev_for_each_tx_queue(dev
, team_set_lockdep_class_one
, NULL
);
1587 dev
->qdisc_tx_busylock
= &team_tx_busylock_key
;
1590 static int team_init(struct net_device
*dev
)
1592 struct team
*team
= netdev_priv(dev
);
1597 mutex_init(&team
->lock
);
1598 team_set_no_mode(team
);
1600 team
->pcpu_stats
= netdev_alloc_pcpu_stats(struct team_pcpu_stats
);
1601 if (!team
->pcpu_stats
)
1604 for (i
= 0; i
< TEAM_PORT_HASHENTRIES
; i
++)
1605 INIT_HLIST_HEAD(&team
->en_port_hlist
[i
]);
1606 INIT_LIST_HEAD(&team
->port_list
);
1607 err
= team_queue_override_init(team
);
1609 goto err_team_queue_override_init
;
1611 team_adjust_ops(team
);
1613 INIT_LIST_HEAD(&team
->option_list
);
1614 INIT_LIST_HEAD(&team
->option_inst_list
);
1616 team_notify_peers_init(team
);
1617 team_mcast_rejoin_init(team
);
1619 err
= team_options_register(team
, team_options
, ARRAY_SIZE(team_options
));
1621 goto err_options_register
;
1622 netif_carrier_off(dev
);
1624 team_set_lockdep_class(dev
);
1628 err_options_register
:
1629 team_mcast_rejoin_fini(team
);
1630 team_notify_peers_fini(team
);
1631 team_queue_override_fini(team
);
1632 err_team_queue_override_init
:
1633 free_percpu(team
->pcpu_stats
);
1638 static void team_uninit(struct net_device
*dev
)
1640 struct team
*team
= netdev_priv(dev
);
1641 struct team_port
*port
;
1642 struct team_port
*tmp
;
1644 mutex_lock(&team
->lock
);
1645 list_for_each_entry_safe(port
, tmp
, &team
->port_list
, list
)
1646 team_port_del(team
, port
->dev
);
1648 __team_change_mode(team
, NULL
); /* cleanup */
1649 __team_options_unregister(team
, team_options
, ARRAY_SIZE(team_options
));
1650 team_mcast_rejoin_fini(team
);
1651 team_notify_peers_fini(team
);
1652 team_queue_override_fini(team
);
1653 mutex_unlock(&team
->lock
);
1656 static void team_destructor(struct net_device
*dev
)
1658 struct team
*team
= netdev_priv(dev
);
1660 free_percpu(team
->pcpu_stats
);
1664 static int team_open(struct net_device
*dev
)
1669 static int team_close(struct net_device
*dev
)
1675 * note: already called with rcu_read_lock
1677 static netdev_tx_t
team_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1679 struct team
*team
= netdev_priv(dev
);
1681 unsigned int len
= skb
->len
;
1683 tx_success
= team_queue_override_transmit(team
, skb
);
1685 tx_success
= team
->ops
.transmit(team
, skb
);
1687 struct team_pcpu_stats
*pcpu_stats
;
1689 pcpu_stats
= this_cpu_ptr(team
->pcpu_stats
);
1690 u64_stats_update_begin(&pcpu_stats
->syncp
);
1691 pcpu_stats
->tx_packets
++;
1692 pcpu_stats
->tx_bytes
+= len
;
1693 u64_stats_update_end(&pcpu_stats
->syncp
);
1695 this_cpu_inc(team
->pcpu_stats
->tx_dropped
);
1698 return NETDEV_TX_OK
;
1701 static u16
team_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1702 void *accel_priv
, select_queue_fallback_t fallback
)
1705 * This helper function exists to help dev_pick_tx get the correct
1706 * destination queue. Using a helper function skips a call to
1707 * skb_tx_hash and will put the skbs in the queue we expect on their
1708 * way down to the team driver.
1710 u16 txq
= skb_rx_queue_recorded(skb
) ? skb_get_rx_queue(skb
) : 0;
1713 * Save the original txq to restore before passing to the driver
1715 qdisc_skb_cb(skb
)->slave_dev_queue_mapping
= skb
->queue_mapping
;
1717 if (unlikely(txq
>= dev
->real_num_tx_queues
)) {
1719 txq
-= dev
->real_num_tx_queues
;
1720 } while (txq
>= dev
->real_num_tx_queues
);
1725 static void team_change_rx_flags(struct net_device
*dev
, int change
)
1727 struct team
*team
= netdev_priv(dev
);
1728 struct team_port
*port
;
1732 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1733 if (change
& IFF_PROMISC
) {
1734 inc
= dev
->flags
& IFF_PROMISC
? 1 : -1;
1735 dev_set_promiscuity(port
->dev
, inc
);
1737 if (change
& IFF_ALLMULTI
) {
1738 inc
= dev
->flags
& IFF_ALLMULTI
? 1 : -1;
1739 dev_set_allmulti(port
->dev
, inc
);
1745 static void team_set_rx_mode(struct net_device
*dev
)
1747 struct team
*team
= netdev_priv(dev
);
1748 struct team_port
*port
;
1751 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1752 dev_uc_sync_multiple(port
->dev
, dev
);
1753 dev_mc_sync_multiple(port
->dev
, dev
);
1758 static int team_set_mac_address(struct net_device
*dev
, void *p
)
1760 struct sockaddr
*addr
= p
;
1761 struct team
*team
= netdev_priv(dev
);
1762 struct team_port
*port
;
1764 if (dev
->type
== ARPHRD_ETHER
&& !is_valid_ether_addr(addr
->sa_data
))
1765 return -EADDRNOTAVAIL
;
1766 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1767 mutex_lock(&team
->lock
);
1768 list_for_each_entry(port
, &team
->port_list
, list
)
1769 if (team
->ops
.port_change_dev_addr
)
1770 team
->ops
.port_change_dev_addr(team
, port
);
1771 mutex_unlock(&team
->lock
);
1775 static int team_change_mtu(struct net_device
*dev
, int new_mtu
)
1777 struct team
*team
= netdev_priv(dev
);
1778 struct team_port
*port
;
1782 * Alhough this is reader, it's guarded by team lock. It's not possible
1783 * to traverse list in reverse under rcu_read_lock
1785 mutex_lock(&team
->lock
);
1786 team
->port_mtu_change_allowed
= true;
1787 list_for_each_entry(port
, &team
->port_list
, list
) {
1788 err
= dev_set_mtu(port
->dev
, new_mtu
);
1790 netdev_err(dev
, "Device %s failed to change mtu",
1795 team
->port_mtu_change_allowed
= false;
1796 mutex_unlock(&team
->lock
);
1803 list_for_each_entry_continue_reverse(port
, &team
->port_list
, list
)
1804 dev_set_mtu(port
->dev
, dev
->mtu
);
1805 team
->port_mtu_change_allowed
= false;
1806 mutex_unlock(&team
->lock
);
1811 static struct rtnl_link_stats64
*
1812 team_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1814 struct team
*team
= netdev_priv(dev
);
1815 struct team_pcpu_stats
*p
;
1816 u64 rx_packets
, rx_bytes
, rx_multicast
, tx_packets
, tx_bytes
;
1817 u32 rx_dropped
= 0, tx_dropped
= 0, rx_nohandler
= 0;
1821 for_each_possible_cpu(i
) {
1822 p
= per_cpu_ptr(team
->pcpu_stats
, i
);
1824 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
1825 rx_packets
= p
->rx_packets
;
1826 rx_bytes
= p
->rx_bytes
;
1827 rx_multicast
= p
->rx_multicast
;
1828 tx_packets
= p
->tx_packets
;
1829 tx_bytes
= p
->tx_bytes
;
1830 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
1832 stats
->rx_packets
+= rx_packets
;
1833 stats
->rx_bytes
+= rx_bytes
;
1834 stats
->multicast
+= rx_multicast
;
1835 stats
->tx_packets
+= tx_packets
;
1836 stats
->tx_bytes
+= tx_bytes
;
1838 * rx_dropped, tx_dropped & rx_nohandler are u32,
1839 * updated without syncp protection.
1841 rx_dropped
+= p
->rx_dropped
;
1842 tx_dropped
+= p
->tx_dropped
;
1843 rx_nohandler
+= p
->rx_nohandler
;
1845 stats
->rx_dropped
= rx_dropped
;
1846 stats
->tx_dropped
= tx_dropped
;
1847 stats
->rx_nohandler
= rx_nohandler
;
1851 static int team_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1853 struct team
*team
= netdev_priv(dev
);
1854 struct team_port
*port
;
1858 * Alhough this is reader, it's guarded by team lock. It's not possible
1859 * to traverse list in reverse under rcu_read_lock
1861 mutex_lock(&team
->lock
);
1862 list_for_each_entry(port
, &team
->port_list
, list
) {
1863 err
= vlan_vid_add(port
->dev
, proto
, vid
);
1867 mutex_unlock(&team
->lock
);
1872 list_for_each_entry_continue_reverse(port
, &team
->port_list
, list
)
1873 vlan_vid_del(port
->dev
, proto
, vid
);
1874 mutex_unlock(&team
->lock
);
1879 static int team_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1881 struct team
*team
= netdev_priv(dev
);
1882 struct team_port
*port
;
1884 mutex_lock(&team
->lock
);
1885 list_for_each_entry(port
, &team
->port_list
, list
)
1886 vlan_vid_del(port
->dev
, proto
, vid
);
1887 mutex_unlock(&team
->lock
);
1892 #ifdef CONFIG_NET_POLL_CONTROLLER
1893 static void team_poll_controller(struct net_device
*dev
)
1897 static void __team_netpoll_cleanup(struct team
*team
)
1899 struct team_port
*port
;
1901 list_for_each_entry(port
, &team
->port_list
, list
)
1902 team_port_disable_netpoll(port
);
1905 static void team_netpoll_cleanup(struct net_device
*dev
)
1907 struct team
*team
= netdev_priv(dev
);
1909 mutex_lock(&team
->lock
);
1910 __team_netpoll_cleanup(team
);
1911 mutex_unlock(&team
->lock
);
1914 static int team_netpoll_setup(struct net_device
*dev
,
1915 struct netpoll_info
*npifo
)
1917 struct team
*team
= netdev_priv(dev
);
1918 struct team_port
*port
;
1921 mutex_lock(&team
->lock
);
1922 list_for_each_entry(port
, &team
->port_list
, list
) {
1923 err
= team_port_enable_netpoll(team
, port
);
1925 __team_netpoll_cleanup(team
);
1929 mutex_unlock(&team
->lock
);
1934 static int team_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
1936 struct team
*team
= netdev_priv(dev
);
1939 mutex_lock(&team
->lock
);
1940 err
= team_port_add(team
, port_dev
);
1941 mutex_unlock(&team
->lock
);
1945 static int team_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
1947 struct team
*team
= netdev_priv(dev
);
1950 mutex_lock(&team
->lock
);
1951 err
= team_port_del(team
, port_dev
);
1952 mutex_unlock(&team
->lock
);
1956 static netdev_features_t
team_fix_features(struct net_device
*dev
,
1957 netdev_features_t features
)
1959 struct team_port
*port
;
1960 struct team
*team
= netdev_priv(dev
);
1961 netdev_features_t mask
;
1964 features
&= ~NETIF_F_ONE_FOR_ALL
;
1965 features
|= NETIF_F_ALL_FOR_ALL
;
1968 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1969 features
= netdev_increment_features(features
,
1970 port
->dev
->features
,
1975 features
= netdev_add_tso_features(features
, mask
);
1980 static int team_change_carrier(struct net_device
*dev
, bool new_carrier
)
1982 struct team
*team
= netdev_priv(dev
);
1984 team
->user_carrier_enabled
= true;
1987 netif_carrier_on(dev
);
1989 netif_carrier_off(dev
);
1993 static const struct net_device_ops team_netdev_ops
= {
1994 .ndo_init
= team_init
,
1995 .ndo_uninit
= team_uninit
,
1996 .ndo_open
= team_open
,
1997 .ndo_stop
= team_close
,
1998 .ndo_start_xmit
= team_xmit
,
1999 .ndo_select_queue
= team_select_queue
,
2000 .ndo_change_rx_flags
= team_change_rx_flags
,
2001 .ndo_set_rx_mode
= team_set_rx_mode
,
2002 .ndo_set_mac_address
= team_set_mac_address
,
2003 .ndo_change_mtu
= team_change_mtu
,
2004 .ndo_get_stats64
= team_get_stats64
,
2005 .ndo_vlan_rx_add_vid
= team_vlan_rx_add_vid
,
2006 .ndo_vlan_rx_kill_vid
= team_vlan_rx_kill_vid
,
2007 #ifdef CONFIG_NET_POLL_CONTROLLER
2008 .ndo_poll_controller
= team_poll_controller
,
2009 .ndo_netpoll_setup
= team_netpoll_setup
,
2010 .ndo_netpoll_cleanup
= team_netpoll_cleanup
,
2012 .ndo_add_slave
= team_add_slave
,
2013 .ndo_del_slave
= team_del_slave
,
2014 .ndo_fix_features
= team_fix_features
,
2015 .ndo_change_carrier
= team_change_carrier
,
2016 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
2017 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
2018 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
2019 .ndo_fdb_add
= switchdev_port_fdb_add
,
2020 .ndo_fdb_del
= switchdev_port_fdb_del
,
2021 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
2022 .ndo_features_check
= passthru_features_check
,
2025 /***********************
2027 ***********************/
2029 static void team_ethtool_get_drvinfo(struct net_device
*dev
,
2030 struct ethtool_drvinfo
*drvinfo
)
2032 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
2033 strlcpy(drvinfo
->version
, UTS_RELEASE
, sizeof(drvinfo
->version
));
2036 static const struct ethtool_ops team_ethtool_ops
= {
2037 .get_drvinfo
= team_ethtool_get_drvinfo
,
2038 .get_link
= ethtool_op_get_link
,
2041 /***********************
2042 * rt netlink interface
2043 ***********************/
2045 static void team_setup_by_port(struct net_device
*dev
,
2046 struct net_device
*port_dev
)
2048 dev
->header_ops
= port_dev
->header_ops
;
2049 dev
->type
= port_dev
->type
;
2050 dev
->hard_header_len
= port_dev
->hard_header_len
;
2051 dev
->addr_len
= port_dev
->addr_len
;
2052 dev
->mtu
= port_dev
->mtu
;
2053 memcpy(dev
->broadcast
, port_dev
->broadcast
, port_dev
->addr_len
);
2054 eth_hw_addr_inherit(dev
, port_dev
);
2057 static int team_dev_type_check_change(struct net_device
*dev
,
2058 struct net_device
*port_dev
)
2060 struct team
*team
= netdev_priv(dev
);
2061 char *portname
= port_dev
->name
;
2064 if (dev
->type
== port_dev
->type
)
2066 if (!list_empty(&team
->port_list
)) {
2067 netdev_err(dev
, "Device %s is of different type\n", portname
);
2070 err
= call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE
, dev
);
2071 err
= notifier_to_errno(err
);
2073 netdev_err(dev
, "Refused to change device type\n");
2078 team_setup_by_port(dev
, port_dev
);
2079 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE
, dev
);
2083 static void team_setup(struct net_device
*dev
)
2087 dev
->netdev_ops
= &team_netdev_ops
;
2088 dev
->ethtool_ops
= &team_ethtool_ops
;
2089 dev
->destructor
= team_destructor
;
2090 dev
->priv_flags
&= ~(IFF_XMIT_DST_RELEASE
| IFF_TX_SKB_SHARING
);
2091 dev
->priv_flags
|= IFF_NO_QUEUE
;
2092 dev
->priv_flags
|= IFF_TEAM
;
2095 * Indicate we support unicast address filtering. That way core won't
2096 * bring us to promisc mode in case a unicast addr is added.
2097 * Let this up to underlay drivers.
2099 dev
->priv_flags
|= IFF_UNICAST_FLT
| IFF_LIVE_ADDR_CHANGE
;
2101 dev
->features
|= NETIF_F_LLTX
;
2102 dev
->features
|= NETIF_F_GRO
;
2104 /* Don't allow team devices to change network namespaces. */
2105 dev
->features
|= NETIF_F_NETNS_LOCAL
;
2107 dev
->hw_features
= TEAM_VLAN_FEATURES
|
2108 NETIF_F_HW_VLAN_CTAG_TX
|
2109 NETIF_F_HW_VLAN_CTAG_RX
|
2110 NETIF_F_HW_VLAN_CTAG_FILTER
;
2112 dev
->hw_features
|= NETIF_F_GSO_ENCAP_ALL
;
2113 dev
->features
|= dev
->hw_features
;
2116 static int team_newlink(struct net
*src_net
, struct net_device
*dev
,
2117 struct nlattr
*tb
[], struct nlattr
*data
[])
2119 if (tb
[IFLA_ADDRESS
] == NULL
)
2120 eth_hw_addr_random(dev
);
2122 return register_netdevice(dev
);
2125 static int team_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
2127 if (tb
[IFLA_ADDRESS
]) {
2128 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
2130 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
2131 return -EADDRNOTAVAIL
;
2136 static unsigned int team_get_num_tx_queues(void)
2138 return TEAM_DEFAULT_NUM_TX_QUEUES
;
2141 static unsigned int team_get_num_rx_queues(void)
2143 return TEAM_DEFAULT_NUM_RX_QUEUES
;
2146 static struct rtnl_link_ops team_link_ops __read_mostly
= {
2148 .priv_size
= sizeof(struct team
),
2149 .setup
= team_setup
,
2150 .newlink
= team_newlink
,
2151 .validate
= team_validate
,
2152 .get_num_tx_queues
= team_get_num_tx_queues
,
2153 .get_num_rx_queues
= team_get_num_rx_queues
,
2157 /***********************************
2158 * Generic netlink custom interface
2159 ***********************************/
2161 static struct genl_family team_nl_family
= {
2162 .id
= GENL_ID_GENERATE
,
2163 .name
= TEAM_GENL_NAME
,
2164 .version
= TEAM_GENL_VERSION
,
2165 .maxattr
= TEAM_ATTR_MAX
,
2169 static const struct nla_policy team_nl_policy
[TEAM_ATTR_MAX
+ 1] = {
2170 [TEAM_ATTR_UNSPEC
] = { .type
= NLA_UNSPEC
, },
2171 [TEAM_ATTR_TEAM_IFINDEX
] = { .type
= NLA_U32
},
2172 [TEAM_ATTR_LIST_OPTION
] = { .type
= NLA_NESTED
},
2173 [TEAM_ATTR_LIST_PORT
] = { .type
= NLA_NESTED
},
2176 static const struct nla_policy
2177 team_nl_option_policy
[TEAM_ATTR_OPTION_MAX
+ 1] = {
2178 [TEAM_ATTR_OPTION_UNSPEC
] = { .type
= NLA_UNSPEC
, },
2179 [TEAM_ATTR_OPTION_NAME
] = {
2181 .len
= TEAM_STRING_MAX_LEN
,
2183 [TEAM_ATTR_OPTION_CHANGED
] = { .type
= NLA_FLAG
},
2184 [TEAM_ATTR_OPTION_TYPE
] = { .type
= NLA_U8
},
2185 [TEAM_ATTR_OPTION_DATA
] = { .type
= NLA_BINARY
},
2188 static int team_nl_cmd_noop(struct sk_buff
*skb
, struct genl_info
*info
)
2190 struct sk_buff
*msg
;
2194 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2198 hdr
= genlmsg_put(msg
, info
->snd_portid
, info
->snd_seq
,
2199 &team_nl_family
, 0, TEAM_CMD_NOOP
);
2205 genlmsg_end(msg
, hdr
);
2207 return genlmsg_unicast(genl_info_net(info
), msg
, info
->snd_portid
);
2216 * Netlink cmd functions should be locked by following two functions.
2217 * Since dev gets held here, that ensures dev won't disappear in between.
2219 static struct team
*team_nl_team_get(struct genl_info
*info
)
2221 struct net
*net
= genl_info_net(info
);
2223 struct net_device
*dev
;
2226 if (!info
->attrs
[TEAM_ATTR_TEAM_IFINDEX
])
2229 ifindex
= nla_get_u32(info
->attrs
[TEAM_ATTR_TEAM_IFINDEX
]);
2230 dev
= dev_get_by_index(net
, ifindex
);
2231 if (!dev
|| dev
->netdev_ops
!= &team_netdev_ops
) {
2237 team
= netdev_priv(dev
);
2238 mutex_lock(&team
->lock
);
2242 static void team_nl_team_put(struct team
*team
)
2244 mutex_unlock(&team
->lock
);
2248 typedef int team_nl_send_func_t(struct sk_buff
*skb
,
2249 struct team
*team
, u32 portid
);
2251 static int team_nl_send_unicast(struct sk_buff
*skb
, struct team
*team
, u32 portid
)
2253 return genlmsg_unicast(dev_net(team
->dev
), skb
, portid
);
2256 static int team_nl_fill_one_option_get(struct sk_buff
*skb
, struct team
*team
,
2257 struct team_option_inst
*opt_inst
)
2259 struct nlattr
*option_item
;
2260 struct team_option
*option
= opt_inst
->option
;
2261 struct team_option_inst_info
*opt_inst_info
= &opt_inst
->info
;
2262 struct team_gsetter_ctx ctx
;
2265 ctx
.info
= opt_inst_info
;
2266 err
= team_option_get(team
, opt_inst
, &ctx
);
2270 option_item
= nla_nest_start(skb
, TEAM_ATTR_ITEM_OPTION
);
2274 if (nla_put_string(skb
, TEAM_ATTR_OPTION_NAME
, option
->name
))
2276 if (opt_inst_info
->port
&&
2277 nla_put_u32(skb
, TEAM_ATTR_OPTION_PORT_IFINDEX
,
2278 opt_inst_info
->port
->dev
->ifindex
))
2280 if (opt_inst
->option
->array_size
&&
2281 nla_put_u32(skb
, TEAM_ATTR_OPTION_ARRAY_INDEX
,
2282 opt_inst_info
->array_index
))
2285 switch (option
->type
) {
2286 case TEAM_OPTION_TYPE_U32
:
2287 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_U32
))
2289 if (nla_put_u32(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.u32_val
))
2292 case TEAM_OPTION_TYPE_STRING
:
2293 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_STRING
))
2295 if (nla_put_string(skb
, TEAM_ATTR_OPTION_DATA
,
2299 case TEAM_OPTION_TYPE_BINARY
:
2300 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_BINARY
))
2302 if (nla_put(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.bin_val
.len
,
2303 ctx
.data
.bin_val
.ptr
))
2306 case TEAM_OPTION_TYPE_BOOL
:
2307 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_FLAG
))
2309 if (ctx
.data
.bool_val
&&
2310 nla_put_flag(skb
, TEAM_ATTR_OPTION_DATA
))
2313 case TEAM_OPTION_TYPE_S32
:
2314 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_S32
))
2316 if (nla_put_s32(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.s32_val
))
2322 if (opt_inst
->removed
&& nla_put_flag(skb
, TEAM_ATTR_OPTION_REMOVED
))
2324 if (opt_inst
->changed
) {
2325 if (nla_put_flag(skb
, TEAM_ATTR_OPTION_CHANGED
))
2327 opt_inst
->changed
= false;
2329 nla_nest_end(skb
, option_item
);
2333 nla_nest_cancel(skb
, option_item
);
2337 static int __send_and_alloc_skb(struct sk_buff
**pskb
,
2338 struct team
*team
, u32 portid
,
2339 team_nl_send_func_t
*send_func
)
2344 err
= send_func(*pskb
, team
, portid
);
2348 *pskb
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2354 static int team_nl_send_options_get(struct team
*team
, u32 portid
, u32 seq
,
2355 int flags
, team_nl_send_func_t
*send_func
,
2356 struct list_head
*sel_opt_inst_list
)
2358 struct nlattr
*option_list
;
2359 struct nlmsghdr
*nlh
;
2361 struct team_option_inst
*opt_inst
;
2363 struct sk_buff
*skb
= NULL
;
2367 opt_inst
= list_first_entry(sel_opt_inst_list
,
2368 struct team_option_inst
, tmp_list
);
2371 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2375 hdr
= genlmsg_put(skb
, portid
, seq
, &team_nl_family
, flags
| NLM_F_MULTI
,
2376 TEAM_CMD_OPTIONS_GET
);
2380 if (nla_put_u32(skb
, TEAM_ATTR_TEAM_IFINDEX
, team
->dev
->ifindex
))
2381 goto nla_put_failure
;
2382 option_list
= nla_nest_start(skb
, TEAM_ATTR_LIST_OPTION
);
2384 goto nla_put_failure
;
2388 list_for_each_entry_from(opt_inst
, sel_opt_inst_list
, tmp_list
) {
2389 err
= team_nl_fill_one_option_get(skb
, team
, opt_inst
);
2391 if (err
== -EMSGSIZE
) {
2402 nla_nest_end(skb
, option_list
);
2403 genlmsg_end(skb
, hdr
);
2408 nlh
= nlmsg_put(skb
, portid
, seq
, NLMSG_DONE
, 0, flags
| NLM_F_MULTI
);
2410 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2416 return send_func(skb
, team
, portid
);
2421 genlmsg_cancel(skb
, hdr
);
2426 static int team_nl_cmd_options_get(struct sk_buff
*skb
, struct genl_info
*info
)
2429 struct team_option_inst
*opt_inst
;
2431 LIST_HEAD(sel_opt_inst_list
);
2433 team
= team_nl_team_get(info
);
2437 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
)
2438 list_add_tail(&opt_inst
->tmp_list
, &sel_opt_inst_list
);
2439 err
= team_nl_send_options_get(team
, info
->snd_portid
, info
->snd_seq
,
2440 NLM_F_ACK
, team_nl_send_unicast
,
2441 &sel_opt_inst_list
);
2443 team_nl_team_put(team
);
2448 static int team_nl_send_event_options_get(struct team
*team
,
2449 struct list_head
*sel_opt_inst_list
);
2451 static int team_nl_cmd_options_set(struct sk_buff
*skb
, struct genl_info
*info
)
2456 struct nlattr
*nl_option
;
2457 LIST_HEAD(opt_inst_list
);
2461 team
= team_nl_team_get(info
);
2468 if (!info
->attrs
[TEAM_ATTR_LIST_OPTION
]) {
2473 nla_for_each_nested(nl_option
, info
->attrs
[TEAM_ATTR_LIST_OPTION
], i
) {
2474 struct nlattr
*opt_attrs
[TEAM_ATTR_OPTION_MAX
+ 1];
2475 struct nlattr
*attr
;
2476 struct nlattr
*attr_data
;
2477 enum team_option_type opt_type
;
2478 int opt_port_ifindex
= 0; /* != 0 for per-port options */
2479 u32 opt_array_index
= 0;
2480 bool opt_is_array
= false;
2481 struct team_option_inst
*opt_inst
;
2483 bool opt_found
= false;
2485 if (nla_type(nl_option
) != TEAM_ATTR_ITEM_OPTION
) {
2489 err
= nla_parse_nested(opt_attrs
, TEAM_ATTR_OPTION_MAX
,
2490 nl_option
, team_nl_option_policy
);
2493 if (!opt_attrs
[TEAM_ATTR_OPTION_NAME
] ||
2494 !opt_attrs
[TEAM_ATTR_OPTION_TYPE
]) {
2498 switch (nla_get_u8(opt_attrs
[TEAM_ATTR_OPTION_TYPE
])) {
2500 opt_type
= TEAM_OPTION_TYPE_U32
;
2503 opt_type
= TEAM_OPTION_TYPE_STRING
;
2506 opt_type
= TEAM_OPTION_TYPE_BINARY
;
2509 opt_type
= TEAM_OPTION_TYPE_BOOL
;
2512 opt_type
= TEAM_OPTION_TYPE_S32
;
2518 attr_data
= opt_attrs
[TEAM_ATTR_OPTION_DATA
];
2519 if (opt_type
!= TEAM_OPTION_TYPE_BOOL
&& !attr_data
) {
2524 opt_name
= nla_data(opt_attrs
[TEAM_ATTR_OPTION_NAME
]);
2525 attr
= opt_attrs
[TEAM_ATTR_OPTION_PORT_IFINDEX
];
2527 opt_port_ifindex
= nla_get_u32(attr
);
2529 attr
= opt_attrs
[TEAM_ATTR_OPTION_ARRAY_INDEX
];
2531 opt_is_array
= true;
2532 opt_array_index
= nla_get_u32(attr
);
2535 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
2536 struct team_option
*option
= opt_inst
->option
;
2537 struct team_gsetter_ctx ctx
;
2538 struct team_option_inst_info
*opt_inst_info
;
2541 opt_inst_info
= &opt_inst
->info
;
2542 tmp_ifindex
= opt_inst_info
->port
?
2543 opt_inst_info
->port
->dev
->ifindex
: 0;
2544 if (option
->type
!= opt_type
||
2545 strcmp(option
->name
, opt_name
) ||
2546 tmp_ifindex
!= opt_port_ifindex
||
2547 (option
->array_size
&& !opt_is_array
) ||
2548 opt_inst_info
->array_index
!= opt_array_index
)
2551 ctx
.info
= opt_inst_info
;
2553 case TEAM_OPTION_TYPE_U32
:
2554 ctx
.data
.u32_val
= nla_get_u32(attr_data
);
2556 case TEAM_OPTION_TYPE_STRING
:
2557 if (nla_len(attr_data
) > TEAM_STRING_MAX_LEN
) {
2561 ctx
.data
.str_val
= nla_data(attr_data
);
2563 case TEAM_OPTION_TYPE_BINARY
:
2564 ctx
.data
.bin_val
.len
= nla_len(attr_data
);
2565 ctx
.data
.bin_val
.ptr
= nla_data(attr_data
);
2567 case TEAM_OPTION_TYPE_BOOL
:
2568 ctx
.data
.bool_val
= attr_data
? true : false;
2570 case TEAM_OPTION_TYPE_S32
:
2571 ctx
.data
.s32_val
= nla_get_s32(attr_data
);
2576 err
= team_option_set(team
, opt_inst
, &ctx
);
2579 opt_inst
->changed
= true;
2580 list_add(&opt_inst
->tmp_list
, &opt_inst_list
);
2588 err
= team_nl_send_event_options_get(team
, &opt_inst_list
);
2591 team_nl_team_put(team
);
2597 static int team_nl_fill_one_port_get(struct sk_buff
*skb
,
2598 struct team_port
*port
)
2600 struct nlattr
*port_item
;
2602 port_item
= nla_nest_start(skb
, TEAM_ATTR_ITEM_PORT
);
2605 if (nla_put_u32(skb
, TEAM_ATTR_PORT_IFINDEX
, port
->dev
->ifindex
))
2607 if (port
->changed
) {
2608 if (nla_put_flag(skb
, TEAM_ATTR_PORT_CHANGED
))
2610 port
->changed
= false;
2612 if ((port
->removed
&&
2613 nla_put_flag(skb
, TEAM_ATTR_PORT_REMOVED
)) ||
2614 (port
->state
.linkup
&&
2615 nla_put_flag(skb
, TEAM_ATTR_PORT_LINKUP
)) ||
2616 nla_put_u32(skb
, TEAM_ATTR_PORT_SPEED
, port
->state
.speed
) ||
2617 nla_put_u8(skb
, TEAM_ATTR_PORT_DUPLEX
, port
->state
.duplex
))
2619 nla_nest_end(skb
, port_item
);
2623 nla_nest_cancel(skb
, port_item
);
2627 static int team_nl_send_port_list_get(struct team
*team
, u32 portid
, u32 seq
,
2628 int flags
, team_nl_send_func_t
*send_func
,
2629 struct team_port
*one_port
)
2631 struct nlattr
*port_list
;
2632 struct nlmsghdr
*nlh
;
2634 struct team_port
*port
;
2636 struct sk_buff
*skb
= NULL
;
2640 port
= list_first_entry_or_null(&team
->port_list
,
2641 struct team_port
, list
);
2644 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2648 hdr
= genlmsg_put(skb
, portid
, seq
, &team_nl_family
, flags
| NLM_F_MULTI
,
2649 TEAM_CMD_PORT_LIST_GET
);
2653 if (nla_put_u32(skb
, TEAM_ATTR_TEAM_IFINDEX
, team
->dev
->ifindex
))
2654 goto nla_put_failure
;
2655 port_list
= nla_nest_start(skb
, TEAM_ATTR_LIST_PORT
);
2657 goto nla_put_failure
;
2662 /* If one port is selected, called wants to send port list containing
2663 * only this port. Otherwise go through all listed ports and send all
2666 err
= team_nl_fill_one_port_get(skb
, one_port
);
2670 list_for_each_entry_from(port
, &team
->port_list
, list
) {
2671 err
= team_nl_fill_one_port_get(skb
, port
);
2673 if (err
== -EMSGSIZE
) {
2685 nla_nest_end(skb
, port_list
);
2686 genlmsg_end(skb
, hdr
);
2691 nlh
= nlmsg_put(skb
, portid
, seq
, NLMSG_DONE
, 0, flags
| NLM_F_MULTI
);
2693 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2699 return send_func(skb
, team
, portid
);
2704 genlmsg_cancel(skb
, hdr
);
2709 static int team_nl_cmd_port_list_get(struct sk_buff
*skb
,
2710 struct genl_info
*info
)
2715 team
= team_nl_team_get(info
);
2719 err
= team_nl_send_port_list_get(team
, info
->snd_portid
, info
->snd_seq
,
2720 NLM_F_ACK
, team_nl_send_unicast
, NULL
);
2722 team_nl_team_put(team
);
2727 static const struct genl_ops team_nl_ops
[] = {
2729 .cmd
= TEAM_CMD_NOOP
,
2730 .doit
= team_nl_cmd_noop
,
2731 .policy
= team_nl_policy
,
2734 .cmd
= TEAM_CMD_OPTIONS_SET
,
2735 .doit
= team_nl_cmd_options_set
,
2736 .policy
= team_nl_policy
,
2737 .flags
= GENL_ADMIN_PERM
,
2740 .cmd
= TEAM_CMD_OPTIONS_GET
,
2741 .doit
= team_nl_cmd_options_get
,
2742 .policy
= team_nl_policy
,
2743 .flags
= GENL_ADMIN_PERM
,
2746 .cmd
= TEAM_CMD_PORT_LIST_GET
,
2747 .doit
= team_nl_cmd_port_list_get
,
2748 .policy
= team_nl_policy
,
2749 .flags
= GENL_ADMIN_PERM
,
2753 static const struct genl_multicast_group team_nl_mcgrps
[] = {
2754 { .name
= TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME
, },
2757 static int team_nl_send_multicast(struct sk_buff
*skb
,
2758 struct team
*team
, u32 portid
)
2760 return genlmsg_multicast_netns(&team_nl_family
, dev_net(team
->dev
),
2761 skb
, 0, 0, GFP_KERNEL
);
2764 static int team_nl_send_event_options_get(struct team
*team
,
2765 struct list_head
*sel_opt_inst_list
)
2767 return team_nl_send_options_get(team
, 0, 0, 0, team_nl_send_multicast
,
2771 static int team_nl_send_event_port_get(struct team
*team
,
2772 struct team_port
*port
)
2774 return team_nl_send_port_list_get(team
, 0, 0, 0, team_nl_send_multicast
,
2778 static int team_nl_init(void)
2780 return genl_register_family_with_ops_groups(&team_nl_family
, team_nl_ops
,
2784 static void team_nl_fini(void)
2786 genl_unregister_family(&team_nl_family
);
2794 static void __team_options_change_check(struct team
*team
)
2797 struct team_option_inst
*opt_inst
;
2798 LIST_HEAD(sel_opt_inst_list
);
2800 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
2801 if (opt_inst
->changed
)
2802 list_add_tail(&opt_inst
->tmp_list
, &sel_opt_inst_list
);
2804 err
= team_nl_send_event_options_get(team
, &sel_opt_inst_list
);
2805 if (err
&& err
!= -ESRCH
)
2806 netdev_warn(team
->dev
, "Failed to send options change via netlink (err %d)\n",
2810 /* rtnl lock is held */
2812 static void __team_port_change_send(struct team_port
*port
, bool linkup
)
2816 port
->changed
= true;
2817 port
->state
.linkup
= linkup
;
2818 team_refresh_port_linkup(port
);
2820 struct ethtool_link_ksettings ecmd
;
2822 err
= __ethtool_get_link_ksettings(port
->dev
, &ecmd
);
2824 port
->state
.speed
= ecmd
.base
.speed
;
2825 port
->state
.duplex
= ecmd
.base
.duplex
;
2829 port
->state
.speed
= 0;
2830 port
->state
.duplex
= 0;
2833 err
= team_nl_send_event_port_get(port
->team
, port
);
2834 if (err
&& err
!= -ESRCH
)
2835 netdev_warn(port
->team
->dev
, "Failed to send port change of device %s via netlink (err %d)\n",
2836 port
->dev
->name
, err
);
2840 static void __team_carrier_check(struct team
*team
)
2842 struct team_port
*port
;
2845 if (team
->user_carrier_enabled
)
2848 team_linkup
= false;
2849 list_for_each_entry(port
, &team
->port_list
, list
) {
2857 netif_carrier_on(team
->dev
);
2859 netif_carrier_off(team
->dev
);
2862 static void __team_port_change_check(struct team_port
*port
, bool linkup
)
2864 if (port
->state
.linkup
!= linkup
)
2865 __team_port_change_send(port
, linkup
);
2866 __team_carrier_check(port
->team
);
2869 static void __team_port_change_port_added(struct team_port
*port
, bool linkup
)
2871 __team_port_change_send(port
, linkup
);
2872 __team_carrier_check(port
->team
);
2875 static void __team_port_change_port_removed(struct team_port
*port
)
2877 port
->removed
= true;
2878 __team_port_change_send(port
, false);
2879 __team_carrier_check(port
->team
);
2882 static void team_port_change_check(struct team_port
*port
, bool linkup
)
2884 struct team
*team
= port
->team
;
2886 mutex_lock(&team
->lock
);
2887 __team_port_change_check(port
, linkup
);
2888 mutex_unlock(&team
->lock
);
2892 /************************************
2893 * Net device notifier event handler
2894 ************************************/
2896 static int team_device_event(struct notifier_block
*unused
,
2897 unsigned long event
, void *ptr
)
2899 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2900 struct team_port
*port
;
2902 port
= team_port_get_rtnl(dev
);
2908 if (netif_carrier_ok(dev
))
2909 team_port_change_check(port
, true);
2912 team_port_change_check(port
, false);
2915 if (netif_running(port
->dev
))
2916 team_port_change_check(port
,
2917 !!netif_carrier_ok(port
->dev
));
2919 case NETDEV_UNREGISTER
:
2920 team_del_slave(port
->team
->dev
, dev
);
2922 case NETDEV_FEAT_CHANGE
:
2923 team_compute_features(port
->team
);
2925 case NETDEV_PRECHANGEMTU
:
2926 /* Forbid to change mtu of underlaying device */
2927 if (!port
->team
->port_mtu_change_allowed
)
2930 case NETDEV_PRE_TYPE_CHANGE
:
2931 /* Forbid to change type of underlaying device */
2933 case NETDEV_RESEND_IGMP
:
2934 /* Propagate to master device */
2935 call_netdevice_notifiers(event
, port
->team
->dev
);
2941 static struct notifier_block team_notifier_block __read_mostly
= {
2942 .notifier_call
= team_device_event
,
2946 /***********************
2947 * Module init and exit
2948 ***********************/
2950 static int __init
team_module_init(void)
2954 register_netdevice_notifier(&team_notifier_block
);
2956 err
= rtnl_link_register(&team_link_ops
);
2960 err
= team_nl_init();
2967 rtnl_link_unregister(&team_link_ops
);
2970 unregister_netdevice_notifier(&team_notifier_block
);
2975 static void __exit
team_module_exit(void)
2978 rtnl_link_unregister(&team_link_ops
);
2979 unregister_netdevice_notifier(&team_notifier_block
);
2982 module_init(team_module_init
);
2983 module_exit(team_module_exit
);
2985 MODULE_LICENSE("GPL v2");
2986 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2987 MODULE_DESCRIPTION("Ethernet team device driver");
2988 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);