2 * drivers/net/team/team.c - Network team device driver
3 * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/slab.h>
16 #include <linux/rcupdate.h>
17 #include <linux/errno.h>
18 #include <linux/ctype.h>
19 #include <linux/notifier.h>
20 #include <linux/netdevice.h>
21 #include <linux/netpoll.h>
22 #include <linux/if_vlan.h>
23 #include <linux/if_arp.h>
24 #include <linux/socket.h>
25 #include <linux/etherdevice.h>
26 #include <linux/rtnetlink.h>
27 #include <net/rtnetlink.h>
28 #include <net/genetlink.h>
29 #include <net/netlink.h>
30 #include <net/sch_generic.h>
31 #include <net/switchdev.h>
32 #include <generated/utsrelease.h>
33 #include <linux/if_team.h>
35 #define DRV_NAME "team"
42 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
44 static struct team_port
*team_port_get_rcu(const struct net_device
*dev
)
46 return rcu_dereference(dev
->rx_handler_data
);
49 static struct team_port
*team_port_get_rtnl(const struct net_device
*dev
)
51 struct team_port
*port
= rtnl_dereference(dev
->rx_handler_data
);
53 return team_port_exists(dev
) ? port
: NULL
;
57 * Since the ability to change device address for open port device is tested in
58 * team_port_add, this function can be called without control of return value
60 static int __set_port_dev_addr(struct net_device
*port_dev
,
61 const unsigned char *dev_addr
)
65 memcpy(addr
.sa_data
, dev_addr
, port_dev
->addr_len
);
66 addr
.sa_family
= port_dev
->type
;
67 return dev_set_mac_address(port_dev
, &addr
);
70 static int team_port_set_orig_dev_addr(struct team_port
*port
)
72 return __set_port_dev_addr(port
->dev
, port
->orig
.dev_addr
);
75 static int team_port_set_team_dev_addr(struct team
*team
,
76 struct team_port
*port
)
78 return __set_port_dev_addr(port
->dev
, team
->dev
->dev_addr
);
81 int team_modeop_port_enter(struct team
*team
, struct team_port
*port
)
83 return team_port_set_team_dev_addr(team
, port
);
85 EXPORT_SYMBOL(team_modeop_port_enter
);
87 void team_modeop_port_change_dev_addr(struct team
*team
,
88 struct team_port
*port
)
90 team_port_set_team_dev_addr(team
, port
);
92 EXPORT_SYMBOL(team_modeop_port_change_dev_addr
);
94 static void team_refresh_port_linkup(struct team_port
*port
)
96 port
->linkup
= port
->user
.linkup_enabled
? port
->user
.linkup
:
105 struct team_option_inst
{ /* One for each option instance */
106 struct list_head list
;
107 struct list_head tmp_list
;
108 struct team_option
*option
;
109 struct team_option_inst_info info
;
114 static struct team_option
*__team_find_option(struct team
*team
,
115 const char *opt_name
)
117 struct team_option
*option
;
119 list_for_each_entry(option
, &team
->option_list
, list
) {
120 if (strcmp(option
->name
, opt_name
) == 0)
126 static void __team_option_inst_del(struct team_option_inst
*opt_inst
)
128 list_del(&opt_inst
->list
);
132 static void __team_option_inst_del_option(struct team
*team
,
133 struct team_option
*option
)
135 struct team_option_inst
*opt_inst
, *tmp
;
137 list_for_each_entry_safe(opt_inst
, tmp
, &team
->option_inst_list
, list
) {
138 if (opt_inst
->option
== option
)
139 __team_option_inst_del(opt_inst
);
143 static int __team_option_inst_add(struct team
*team
, struct team_option
*option
,
144 struct team_port
*port
)
146 struct team_option_inst
*opt_inst
;
147 unsigned int array_size
;
151 array_size
= option
->array_size
;
153 array_size
= 1; /* No array but still need one instance */
155 for (i
= 0; i
< array_size
; i
++) {
156 opt_inst
= kmalloc(sizeof(*opt_inst
), GFP_KERNEL
);
159 opt_inst
->option
= option
;
160 opt_inst
->info
.port
= port
;
161 opt_inst
->info
.array_index
= i
;
162 opt_inst
->changed
= true;
163 opt_inst
->removed
= false;
164 list_add_tail(&opt_inst
->list
, &team
->option_inst_list
);
166 err
= option
->init(team
, &opt_inst
->info
);
175 static int __team_option_inst_add_option(struct team
*team
,
176 struct team_option
*option
)
180 if (!option
->per_port
) {
181 err
= __team_option_inst_add(team
, option
, NULL
);
183 goto inst_del_option
;
188 __team_option_inst_del_option(team
, option
);
192 static void __team_option_inst_mark_removed_option(struct team
*team
,
193 struct team_option
*option
)
195 struct team_option_inst
*opt_inst
;
197 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
198 if (opt_inst
->option
== option
) {
199 opt_inst
->changed
= true;
200 opt_inst
->removed
= true;
205 static void __team_option_inst_del_port(struct team
*team
,
206 struct team_port
*port
)
208 struct team_option_inst
*opt_inst
, *tmp
;
210 list_for_each_entry_safe(opt_inst
, tmp
, &team
->option_inst_list
, list
) {
211 if (opt_inst
->option
->per_port
&&
212 opt_inst
->info
.port
== port
)
213 __team_option_inst_del(opt_inst
);
217 static int __team_option_inst_add_port(struct team
*team
,
218 struct team_port
*port
)
220 struct team_option
*option
;
223 list_for_each_entry(option
, &team
->option_list
, list
) {
224 if (!option
->per_port
)
226 err
= __team_option_inst_add(team
, option
, port
);
233 __team_option_inst_del_port(team
, port
);
237 static void __team_option_inst_mark_removed_port(struct team
*team
,
238 struct team_port
*port
)
240 struct team_option_inst
*opt_inst
;
242 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
243 if (opt_inst
->info
.port
== port
) {
244 opt_inst
->changed
= true;
245 opt_inst
->removed
= true;
250 static int __team_options_register(struct team
*team
,
251 const struct team_option
*option
,
255 struct team_option
**dst_opts
;
258 dst_opts
= kzalloc(sizeof(struct team_option
*) * option_count
,
262 for (i
= 0; i
< option_count
; i
++, option
++) {
263 if (__team_find_option(team
, option
->name
)) {
267 dst_opts
[i
] = kmemdup(option
, sizeof(*option
), GFP_KERNEL
);
274 for (i
= 0; i
< option_count
; i
++) {
275 err
= __team_option_inst_add_option(team
, dst_opts
[i
]);
278 list_add_tail(&dst_opts
[i
]->list
, &team
->option_list
);
285 for (i
--; i
>= 0; i
--)
286 __team_option_inst_del_option(team
, dst_opts
[i
]);
288 i
= option_count
- 1;
290 for (i
--; i
>= 0; i
--)
297 static void __team_options_mark_removed(struct team
*team
,
298 const struct team_option
*option
,
303 for (i
= 0; i
< option_count
; i
++, option
++) {
304 struct team_option
*del_opt
;
306 del_opt
= __team_find_option(team
, option
->name
);
308 __team_option_inst_mark_removed_option(team
, del_opt
);
312 static void __team_options_unregister(struct team
*team
,
313 const struct team_option
*option
,
318 for (i
= 0; i
< option_count
; i
++, option
++) {
319 struct team_option
*del_opt
;
321 del_opt
= __team_find_option(team
, option
->name
);
323 __team_option_inst_del_option(team
, del_opt
);
324 list_del(&del_opt
->list
);
330 static void __team_options_change_check(struct team
*team
);
332 int team_options_register(struct team
*team
,
333 const struct team_option
*option
,
338 err
= __team_options_register(team
, option
, option_count
);
341 __team_options_change_check(team
);
344 EXPORT_SYMBOL(team_options_register
);
346 void team_options_unregister(struct team
*team
,
347 const struct team_option
*option
,
350 __team_options_mark_removed(team
, option
, option_count
);
351 __team_options_change_check(team
);
352 __team_options_unregister(team
, option
, option_count
);
354 EXPORT_SYMBOL(team_options_unregister
);
356 static int team_option_get(struct team
*team
,
357 struct team_option_inst
*opt_inst
,
358 struct team_gsetter_ctx
*ctx
)
360 if (!opt_inst
->option
->getter
)
362 return opt_inst
->option
->getter(team
, ctx
);
365 static int team_option_set(struct team
*team
,
366 struct team_option_inst
*opt_inst
,
367 struct team_gsetter_ctx
*ctx
)
369 if (!opt_inst
->option
->setter
)
371 return opt_inst
->option
->setter(team
, ctx
);
374 void team_option_inst_set_change(struct team_option_inst_info
*opt_inst_info
)
376 struct team_option_inst
*opt_inst
;
378 opt_inst
= container_of(opt_inst_info
, struct team_option_inst
, info
);
379 opt_inst
->changed
= true;
381 EXPORT_SYMBOL(team_option_inst_set_change
);
383 void team_options_change_check(struct team
*team
)
385 __team_options_change_check(team
);
387 EXPORT_SYMBOL(team_options_change_check
);
394 static LIST_HEAD(mode_list
);
395 static DEFINE_SPINLOCK(mode_list_lock
);
397 struct team_mode_item
{
398 struct list_head list
;
399 const struct team_mode
*mode
;
402 static struct team_mode_item
*__find_mode(const char *kind
)
404 struct team_mode_item
*mitem
;
406 list_for_each_entry(mitem
, &mode_list
, list
) {
407 if (strcmp(mitem
->mode
->kind
, kind
) == 0)
413 static bool is_good_mode_name(const char *name
)
415 while (*name
!= '\0') {
416 if (!isalpha(*name
) && !isdigit(*name
) && *name
!= '_')
423 int team_mode_register(const struct team_mode
*mode
)
426 struct team_mode_item
*mitem
;
428 if (!is_good_mode_name(mode
->kind
) ||
429 mode
->priv_size
> TEAM_MODE_PRIV_SIZE
)
432 mitem
= kmalloc(sizeof(*mitem
), GFP_KERNEL
);
436 spin_lock(&mode_list_lock
);
437 if (__find_mode(mode
->kind
)) {
443 list_add_tail(&mitem
->list
, &mode_list
);
445 spin_unlock(&mode_list_lock
);
448 EXPORT_SYMBOL(team_mode_register
);
450 void team_mode_unregister(const struct team_mode
*mode
)
452 struct team_mode_item
*mitem
;
454 spin_lock(&mode_list_lock
);
455 mitem
= __find_mode(mode
->kind
);
457 list_del_init(&mitem
->list
);
460 spin_unlock(&mode_list_lock
);
462 EXPORT_SYMBOL(team_mode_unregister
);
464 static const struct team_mode
*team_mode_get(const char *kind
)
466 struct team_mode_item
*mitem
;
467 const struct team_mode
*mode
= NULL
;
469 spin_lock(&mode_list_lock
);
470 mitem
= __find_mode(kind
);
472 spin_unlock(&mode_list_lock
);
473 request_module("team-mode-%s", kind
);
474 spin_lock(&mode_list_lock
);
475 mitem
= __find_mode(kind
);
479 if (!try_module_get(mode
->owner
))
483 spin_unlock(&mode_list_lock
);
487 static void team_mode_put(const struct team_mode
*mode
)
489 module_put(mode
->owner
);
492 static bool team_dummy_transmit(struct team
*team
, struct sk_buff
*skb
)
494 dev_kfree_skb_any(skb
);
498 static rx_handler_result_t
team_dummy_receive(struct team
*team
,
499 struct team_port
*port
,
502 return RX_HANDLER_ANOTHER
;
505 static const struct team_mode __team_no_mode
= {
509 static bool team_is_mode_set(struct team
*team
)
511 return team
->mode
!= &__team_no_mode
;
514 static void team_set_no_mode(struct team
*team
)
516 team
->user_carrier_enabled
= false;
517 team
->mode
= &__team_no_mode
;
520 static void team_adjust_ops(struct team
*team
)
523 * To avoid checks in rx/tx skb paths, ensure here that non-null and
524 * correct ops are always set.
527 if (!team
->en_port_count
|| !team_is_mode_set(team
) ||
528 !team
->mode
->ops
->transmit
)
529 team
->ops
.transmit
= team_dummy_transmit
;
531 team
->ops
.transmit
= team
->mode
->ops
->transmit
;
533 if (!team
->en_port_count
|| !team_is_mode_set(team
) ||
534 !team
->mode
->ops
->receive
)
535 team
->ops
.receive
= team_dummy_receive
;
537 team
->ops
.receive
= team
->mode
->ops
->receive
;
541 * We can benefit from the fact that it's ensured no port is present
542 * at the time of mode change. Therefore no packets are in fly so there's no
543 * need to set mode operations in any special way.
545 static int __team_change_mode(struct team
*team
,
546 const struct team_mode
*new_mode
)
548 /* Check if mode was previously set and do cleanup if so */
549 if (team_is_mode_set(team
)) {
550 void (*exit_op
)(struct team
*team
) = team
->ops
.exit
;
552 /* Clear ops area so no callback is called any longer */
553 memset(&team
->ops
, 0, sizeof(struct team_mode_ops
));
554 team_adjust_ops(team
);
558 team_mode_put(team
->mode
);
559 team_set_no_mode(team
);
560 /* zero private data area */
561 memset(&team
->mode_priv
, 0,
562 sizeof(struct team
) - offsetof(struct team
, mode_priv
));
568 if (new_mode
->ops
->init
) {
571 err
= new_mode
->ops
->init(team
);
576 team
->mode
= new_mode
;
577 memcpy(&team
->ops
, new_mode
->ops
, sizeof(struct team_mode_ops
));
578 team_adjust_ops(team
);
583 static int team_change_mode(struct team
*team
, const char *kind
)
585 const struct team_mode
*new_mode
;
586 struct net_device
*dev
= team
->dev
;
589 if (!list_empty(&team
->port_list
)) {
590 netdev_err(dev
, "No ports can be present during mode change\n");
594 if (team_is_mode_set(team
) && strcmp(team
->mode
->kind
, kind
) == 0) {
595 netdev_err(dev
, "Unable to change to the same mode the team is in\n");
599 new_mode
= team_mode_get(kind
);
601 netdev_err(dev
, "Mode \"%s\" not found\n", kind
);
605 err
= __team_change_mode(team
, new_mode
);
607 netdev_err(dev
, "Failed to change to mode \"%s\"\n", kind
);
608 team_mode_put(new_mode
);
612 netdev_info(dev
, "Mode changed to \"%s\"\n", kind
);
617 /*********************
619 *********************/
621 static void team_notify_peers_work(struct work_struct
*work
)
626 team
= container_of(work
, struct team
, notify_peers
.dw
.work
);
628 if (!rtnl_trylock()) {
629 schedule_delayed_work(&team
->notify_peers
.dw
, 0);
632 val
= atomic_dec_if_positive(&team
->notify_peers
.count_pending
);
637 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS
, team
->dev
);
640 schedule_delayed_work(&team
->notify_peers
.dw
,
641 msecs_to_jiffies(team
->notify_peers
.interval
));
644 static void team_notify_peers(struct team
*team
)
646 if (!team
->notify_peers
.count
|| !netif_running(team
->dev
))
648 atomic_add(team
->notify_peers
.count
, &team
->notify_peers
.count_pending
);
649 schedule_delayed_work(&team
->notify_peers
.dw
, 0);
652 static void team_notify_peers_init(struct team
*team
)
654 INIT_DELAYED_WORK(&team
->notify_peers
.dw
, team_notify_peers_work
);
657 static void team_notify_peers_fini(struct team
*team
)
659 cancel_delayed_work_sync(&team
->notify_peers
.dw
);
663 /*******************************
664 * Send multicast group rejoins
665 *******************************/
667 static void team_mcast_rejoin_work(struct work_struct
*work
)
672 team
= container_of(work
, struct team
, mcast_rejoin
.dw
.work
);
674 if (!rtnl_trylock()) {
675 schedule_delayed_work(&team
->mcast_rejoin
.dw
, 0);
678 val
= atomic_dec_if_positive(&team
->mcast_rejoin
.count_pending
);
683 call_netdevice_notifiers(NETDEV_RESEND_IGMP
, team
->dev
);
686 schedule_delayed_work(&team
->mcast_rejoin
.dw
,
687 msecs_to_jiffies(team
->mcast_rejoin
.interval
));
690 static void team_mcast_rejoin(struct team
*team
)
692 if (!team
->mcast_rejoin
.count
|| !netif_running(team
->dev
))
694 atomic_add(team
->mcast_rejoin
.count
, &team
->mcast_rejoin
.count_pending
);
695 schedule_delayed_work(&team
->mcast_rejoin
.dw
, 0);
698 static void team_mcast_rejoin_init(struct team
*team
)
700 INIT_DELAYED_WORK(&team
->mcast_rejoin
.dw
, team_mcast_rejoin_work
);
703 static void team_mcast_rejoin_fini(struct team
*team
)
705 cancel_delayed_work_sync(&team
->mcast_rejoin
.dw
);
709 /************************
710 * Rx path frame handler
711 ************************/
713 /* note: already called with rcu_read_lock */
714 static rx_handler_result_t
team_handle_frame(struct sk_buff
**pskb
)
716 struct sk_buff
*skb
= *pskb
;
717 struct team_port
*port
;
719 rx_handler_result_t res
;
721 skb
= skb_share_check(skb
, GFP_ATOMIC
);
723 return RX_HANDLER_CONSUMED
;
727 port
= team_port_get_rcu(skb
->dev
);
729 if (!team_port_enabled(port
)) {
730 /* allow exact match delivery for disabled ports */
731 res
= RX_HANDLER_EXACT
;
733 res
= team
->ops
.receive(team
, port
, skb
);
735 if (res
== RX_HANDLER_ANOTHER
) {
736 struct team_pcpu_stats
*pcpu_stats
;
738 pcpu_stats
= this_cpu_ptr(team
->pcpu_stats
);
739 u64_stats_update_begin(&pcpu_stats
->syncp
);
740 pcpu_stats
->rx_packets
++;
741 pcpu_stats
->rx_bytes
+= skb
->len
;
742 if (skb
->pkt_type
== PACKET_MULTICAST
)
743 pcpu_stats
->rx_multicast
++;
744 u64_stats_update_end(&pcpu_stats
->syncp
);
746 skb
->dev
= team
->dev
;
748 this_cpu_inc(team
->pcpu_stats
->rx_dropped
);
755 /*************************************
756 * Multiqueue Tx port select override
757 *************************************/
759 static int team_queue_override_init(struct team
*team
)
761 struct list_head
*listarr
;
762 unsigned int queue_cnt
= team
->dev
->num_tx_queues
- 1;
767 listarr
= kmalloc(sizeof(struct list_head
) * queue_cnt
, GFP_KERNEL
);
770 team
->qom_lists
= listarr
;
771 for (i
= 0; i
< queue_cnt
; i
++)
772 INIT_LIST_HEAD(listarr
++);
776 static void team_queue_override_fini(struct team
*team
)
778 kfree(team
->qom_lists
);
781 static struct list_head
*__team_get_qom_list(struct team
*team
, u16 queue_id
)
783 return &team
->qom_lists
[queue_id
- 1];
787 * note: already called with rcu_read_lock
789 static bool team_queue_override_transmit(struct team
*team
, struct sk_buff
*skb
)
791 struct list_head
*qom_list
;
792 struct team_port
*port
;
794 if (!team
->queue_override_enabled
|| !skb
->queue_mapping
)
796 qom_list
= __team_get_qom_list(team
, skb
->queue_mapping
);
797 list_for_each_entry_rcu(port
, qom_list
, qom_list
) {
798 if (!team_dev_queue_xmit(team
, port
, skb
))
804 static void __team_queue_override_port_del(struct team
*team
,
805 struct team_port
*port
)
809 list_del_rcu(&port
->qom_list
);
812 static bool team_queue_override_port_has_gt_prio_than(struct team_port
*port
,
813 struct team_port
*cur
)
815 if (port
->priority
< cur
->priority
)
817 if (port
->priority
> cur
->priority
)
819 if (port
->index
< cur
->index
)
824 static void __team_queue_override_port_add(struct team
*team
,
825 struct team_port
*port
)
827 struct team_port
*cur
;
828 struct list_head
*qom_list
;
829 struct list_head
*node
;
833 qom_list
= __team_get_qom_list(team
, port
->queue_id
);
835 list_for_each_entry(cur
, qom_list
, qom_list
) {
836 if (team_queue_override_port_has_gt_prio_than(port
, cur
))
838 node
= &cur
->qom_list
;
840 list_add_tail_rcu(&port
->qom_list
, node
);
843 static void __team_queue_override_enabled_check(struct team
*team
)
845 struct team_port
*port
;
846 bool enabled
= false;
848 list_for_each_entry(port
, &team
->port_list
, list
) {
849 if (port
->queue_id
) {
854 if (enabled
== team
->queue_override_enabled
)
856 netdev_dbg(team
->dev
, "%s queue override\n",
857 enabled
? "Enabling" : "Disabling");
858 team
->queue_override_enabled
= enabled
;
861 static void team_queue_override_port_prio_changed(struct team
*team
,
862 struct team_port
*port
)
864 if (!port
->queue_id
|| team_port_enabled(port
))
866 __team_queue_override_port_del(team
, port
);
867 __team_queue_override_port_add(team
, port
);
868 __team_queue_override_enabled_check(team
);
871 static void team_queue_override_port_change_queue_id(struct team
*team
,
872 struct team_port
*port
,
875 if (team_port_enabled(port
)) {
876 __team_queue_override_port_del(team
, port
);
877 port
->queue_id
= new_queue_id
;
878 __team_queue_override_port_add(team
, port
);
879 __team_queue_override_enabled_check(team
);
881 port
->queue_id
= new_queue_id
;
885 static void team_queue_override_port_add(struct team
*team
,
886 struct team_port
*port
)
888 __team_queue_override_port_add(team
, port
);
889 __team_queue_override_enabled_check(team
);
892 static void team_queue_override_port_del(struct team
*team
,
893 struct team_port
*port
)
895 __team_queue_override_port_del(team
, port
);
896 __team_queue_override_enabled_check(team
);
904 static bool team_port_find(const struct team
*team
,
905 const struct team_port
*port
)
907 struct team_port
*cur
;
909 list_for_each_entry(cur
, &team
->port_list
, list
)
916 * Enable/disable port by adding to enabled port hashlist and setting
917 * port->index (Might be racy so reader could see incorrect ifindex when
918 * processing a flying packet, but that is not a problem). Write guarded
921 static void team_port_enable(struct team
*team
,
922 struct team_port
*port
)
924 if (team_port_enabled(port
))
926 port
->index
= team
->en_port_count
++;
927 hlist_add_head_rcu(&port
->hlist
,
928 team_port_index_hash(team
, port
->index
));
929 team_adjust_ops(team
);
930 team_queue_override_port_add(team
, port
);
931 if (team
->ops
.port_enabled
)
932 team
->ops
.port_enabled(team
, port
);
933 team_notify_peers(team
);
934 team_mcast_rejoin(team
);
937 static void __reconstruct_port_hlist(struct team
*team
, int rm_index
)
940 struct team_port
*port
;
942 for (i
= rm_index
+ 1; i
< team
->en_port_count
; i
++) {
943 port
= team_get_port_by_index(team
, i
);
944 hlist_del_rcu(&port
->hlist
);
946 hlist_add_head_rcu(&port
->hlist
,
947 team_port_index_hash(team
, port
->index
));
951 static void team_port_disable(struct team
*team
,
952 struct team_port
*port
)
954 if (!team_port_enabled(port
))
956 if (team
->ops
.port_disabled
)
957 team
->ops
.port_disabled(team
, port
);
958 hlist_del_rcu(&port
->hlist
);
959 __reconstruct_port_hlist(team
, port
->index
);
961 team
->en_port_count
--;
962 team_queue_override_port_del(team
, port
);
963 team_adjust_ops(team
);
964 team_notify_peers(team
);
965 team_mcast_rejoin(team
);
968 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
969 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
970 NETIF_F_HIGHDMA | NETIF_F_LRO)
972 static void __team_compute_features(struct team
*team
)
974 struct team_port
*port
;
975 u32 vlan_features
= TEAM_VLAN_FEATURES
& NETIF_F_ALL_FOR_ALL
;
976 unsigned short max_hard_header_len
= ETH_HLEN
;
977 unsigned int dst_release_flag
= IFF_XMIT_DST_RELEASE
|
978 IFF_XMIT_DST_RELEASE_PERM
;
980 list_for_each_entry(port
, &team
->port_list
, list
) {
981 vlan_features
= netdev_increment_features(vlan_features
,
982 port
->dev
->vlan_features
,
985 dst_release_flag
&= port
->dev
->priv_flags
;
986 if (port
->dev
->hard_header_len
> max_hard_header_len
)
987 max_hard_header_len
= port
->dev
->hard_header_len
;
990 team
->dev
->vlan_features
= vlan_features
;
991 team
->dev
->hard_header_len
= max_hard_header_len
;
993 team
->dev
->priv_flags
&= ~IFF_XMIT_DST_RELEASE
;
994 if (dst_release_flag
== (IFF_XMIT_DST_RELEASE
| IFF_XMIT_DST_RELEASE_PERM
))
995 team
->dev
->priv_flags
|= IFF_XMIT_DST_RELEASE
;
997 netdev_change_features(team
->dev
);
1000 static void team_compute_features(struct team
*team
)
1002 mutex_lock(&team
->lock
);
1003 __team_compute_features(team
);
1004 mutex_unlock(&team
->lock
);
1007 static int team_port_enter(struct team
*team
, struct team_port
*port
)
1011 dev_hold(team
->dev
);
1012 if (team
->ops
.port_enter
) {
1013 err
= team
->ops
.port_enter(team
, port
);
1015 netdev_err(team
->dev
, "Device %s failed to enter team mode\n",
1017 goto err_port_enter
;
1029 static void team_port_leave(struct team
*team
, struct team_port
*port
)
1031 if (team
->ops
.port_leave
)
1032 team
->ops
.port_leave(team
, port
);
1036 #ifdef CONFIG_NET_POLL_CONTROLLER
1037 static int team_port_enable_netpoll(struct team
*team
, struct team_port
*port
)
1042 if (!team
->dev
->npinfo
)
1045 np
= kzalloc(sizeof(*np
), GFP_KERNEL
);
1049 err
= __netpoll_setup(np
, port
->dev
);
1058 static void team_port_disable_netpoll(struct team_port
*port
)
1060 struct netpoll
*np
= port
->np
;
1066 /* Wait for transmitting packets to finish before freeing. */
1067 synchronize_rcu_bh();
1068 __netpoll_cleanup(np
);
1072 static int team_port_enable_netpoll(struct team
*team
, struct team_port
*port
)
1076 static void team_port_disable_netpoll(struct team_port
*port
)
1081 static int team_upper_dev_link(struct net_device
*dev
,
1082 struct net_device
*port_dev
)
1086 err
= netdev_master_upper_dev_link(port_dev
, dev
);
1089 port_dev
->priv_flags
|= IFF_TEAM_PORT
;
1093 static void team_upper_dev_unlink(struct net_device
*dev
,
1094 struct net_device
*port_dev
)
1096 netdev_upper_dev_unlink(port_dev
, dev
);
1097 port_dev
->priv_flags
&= ~IFF_TEAM_PORT
;
1100 static void __team_port_change_port_added(struct team_port
*port
, bool linkup
);
1101 static int team_dev_type_check_change(struct net_device
*dev
,
1102 struct net_device
*port_dev
);
1104 static int team_port_add(struct team
*team
, struct net_device
*port_dev
)
1106 struct net_device
*dev
= team
->dev
;
1107 struct team_port
*port
;
1108 char *portname
= port_dev
->name
;
1111 if (port_dev
->flags
& IFF_LOOPBACK
) {
1112 netdev_err(dev
, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1117 if (team_port_exists(port_dev
)) {
1118 netdev_err(dev
, "Device %s is already a port "
1119 "of a team device\n", portname
);
1123 if (port_dev
->features
& NETIF_F_VLAN_CHALLENGED
&&
1124 vlan_uses_dev(dev
)) {
1125 netdev_err(dev
, "Device %s is VLAN challenged and team device has VLAN set up\n",
1130 err
= team_dev_type_check_change(dev
, port_dev
);
1134 if (port_dev
->flags
& IFF_UP
) {
1135 netdev_err(dev
, "Device %s is up. Set it down before adding it as a team port\n",
1140 port
= kzalloc(sizeof(struct team_port
) + team
->mode
->port_priv_size
,
1145 port
->dev
= port_dev
;
1147 INIT_LIST_HEAD(&port
->qom_list
);
1149 port
->orig
.mtu
= port_dev
->mtu
;
1150 err
= dev_set_mtu(port_dev
, dev
->mtu
);
1152 netdev_dbg(dev
, "Error %d calling dev_set_mtu\n", err
);
1156 memcpy(port
->orig
.dev_addr
, port_dev
->dev_addr
, port_dev
->addr_len
);
1158 err
= team_port_enter(team
, port
);
1160 netdev_err(dev
, "Device %s failed to enter team mode\n",
1162 goto err_port_enter
;
1165 err
= dev_open(port_dev
);
1167 netdev_dbg(dev
, "Device %s opening failed\n",
1172 err
= vlan_vids_add_by_dev(port_dev
, dev
);
1174 netdev_err(dev
, "Failed to add vlan ids to device %s\n",
1179 err
= team_port_enable_netpoll(team
, port
);
1181 netdev_err(dev
, "Failed to enable netpoll on device %s\n",
1183 goto err_enable_netpoll
;
1186 if (!(dev
->features
& NETIF_F_LRO
))
1187 dev_disable_lro(port_dev
);
1189 err
= netdev_rx_handler_register(port_dev
, team_handle_frame
,
1192 netdev_err(dev
, "Device %s failed to register rx_handler\n",
1194 goto err_handler_register
;
1197 err
= team_upper_dev_link(dev
, port_dev
);
1199 netdev_err(dev
, "Device %s failed to set upper link\n",
1201 goto err_set_upper_link
;
1204 err
= __team_option_inst_add_port(team
, port
);
1206 netdev_err(dev
, "Device %s failed to add per-port options\n",
1208 goto err_option_port_add
;
1212 list_add_tail_rcu(&port
->list
, &team
->port_list
);
1213 team_port_enable(team
, port
);
1214 __team_compute_features(team
);
1215 __team_port_change_port_added(port
, !!netif_carrier_ok(port_dev
));
1216 __team_options_change_check(team
);
1218 netdev_info(dev
, "Port device %s added\n", portname
);
1222 err_option_port_add
:
1223 team_upper_dev_unlink(dev
, port_dev
);
1226 netdev_rx_handler_unregister(port_dev
);
1228 err_handler_register
:
1229 team_port_disable_netpoll(port
);
1232 vlan_vids_del_by_dev(port_dev
, dev
);
1235 dev_close(port_dev
);
1238 team_port_leave(team
, port
);
1239 team_port_set_orig_dev_addr(port
);
1242 dev_set_mtu(port_dev
, port
->orig
.mtu
);
1250 static void __team_port_change_port_removed(struct team_port
*port
);
1252 static int team_port_del(struct team
*team
, struct net_device
*port_dev
)
1254 struct net_device
*dev
= team
->dev
;
1255 struct team_port
*port
;
1256 char *portname
= port_dev
->name
;
1258 port
= team_port_get_rtnl(port_dev
);
1259 if (!port
|| !team_port_find(team
, port
)) {
1260 netdev_err(dev
, "Device %s does not act as a port of this team\n",
1265 team_port_disable(team
, port
);
1266 list_del_rcu(&port
->list
);
1267 team_upper_dev_unlink(dev
, port_dev
);
1268 netdev_rx_handler_unregister(port_dev
);
1269 team_port_disable_netpoll(port
);
1270 vlan_vids_del_by_dev(port_dev
, dev
);
1271 dev_uc_unsync(port_dev
, dev
);
1272 dev_mc_unsync(port_dev
, dev
);
1273 dev_close(port_dev
);
1274 team_port_leave(team
, port
);
1276 __team_option_inst_mark_removed_port(team
, port
);
1277 __team_options_change_check(team
);
1278 __team_option_inst_del_port(team
, port
);
1279 __team_port_change_port_removed(port
);
1281 team_port_set_orig_dev_addr(port
);
1282 dev_set_mtu(port_dev
, port
->orig
.mtu
);
1283 kfree_rcu(port
, rcu
);
1284 netdev_info(dev
, "Port device %s removed\n", portname
);
1285 __team_compute_features(team
);
1295 static int team_mode_option_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
1297 ctx
->data
.str_val
= team
->mode
->kind
;
1301 static int team_mode_option_set(struct team
*team
, struct team_gsetter_ctx
*ctx
)
1303 return team_change_mode(team
, ctx
->data
.str_val
);
1306 static int team_notify_peers_count_get(struct team
*team
,
1307 struct team_gsetter_ctx
*ctx
)
1309 ctx
->data
.u32_val
= team
->notify_peers
.count
;
1313 static int team_notify_peers_count_set(struct team
*team
,
1314 struct team_gsetter_ctx
*ctx
)
1316 team
->notify_peers
.count
= ctx
->data
.u32_val
;
1320 static int team_notify_peers_interval_get(struct team
*team
,
1321 struct team_gsetter_ctx
*ctx
)
1323 ctx
->data
.u32_val
= team
->notify_peers
.interval
;
1327 static int team_notify_peers_interval_set(struct team
*team
,
1328 struct team_gsetter_ctx
*ctx
)
1330 team
->notify_peers
.interval
= ctx
->data
.u32_val
;
1334 static int team_mcast_rejoin_count_get(struct team
*team
,
1335 struct team_gsetter_ctx
*ctx
)
1337 ctx
->data
.u32_val
= team
->mcast_rejoin
.count
;
1341 static int team_mcast_rejoin_count_set(struct team
*team
,
1342 struct team_gsetter_ctx
*ctx
)
1344 team
->mcast_rejoin
.count
= ctx
->data
.u32_val
;
1348 static int team_mcast_rejoin_interval_get(struct team
*team
,
1349 struct team_gsetter_ctx
*ctx
)
1351 ctx
->data
.u32_val
= team
->mcast_rejoin
.interval
;
1355 static int team_mcast_rejoin_interval_set(struct team
*team
,
1356 struct team_gsetter_ctx
*ctx
)
1358 team
->mcast_rejoin
.interval
= ctx
->data
.u32_val
;
1362 static int team_port_en_option_get(struct team
*team
,
1363 struct team_gsetter_ctx
*ctx
)
1365 struct team_port
*port
= ctx
->info
->port
;
1367 ctx
->data
.bool_val
= team_port_enabled(port
);
1371 static int team_port_en_option_set(struct team
*team
,
1372 struct team_gsetter_ctx
*ctx
)
1374 struct team_port
*port
= ctx
->info
->port
;
1376 if (ctx
->data
.bool_val
)
1377 team_port_enable(team
, port
);
1379 team_port_disable(team
, port
);
1383 static int team_user_linkup_option_get(struct team
*team
,
1384 struct team_gsetter_ctx
*ctx
)
1386 struct team_port
*port
= ctx
->info
->port
;
1388 ctx
->data
.bool_val
= port
->user
.linkup
;
1392 static void __team_carrier_check(struct team
*team
);
1394 static int team_user_linkup_option_set(struct team
*team
,
1395 struct team_gsetter_ctx
*ctx
)
1397 struct team_port
*port
= ctx
->info
->port
;
1399 port
->user
.linkup
= ctx
->data
.bool_val
;
1400 team_refresh_port_linkup(port
);
1401 __team_carrier_check(port
->team
);
1405 static int team_user_linkup_en_option_get(struct team
*team
,
1406 struct team_gsetter_ctx
*ctx
)
1408 struct team_port
*port
= ctx
->info
->port
;
1410 ctx
->data
.bool_val
= port
->user
.linkup_enabled
;
1414 static int team_user_linkup_en_option_set(struct team
*team
,
1415 struct team_gsetter_ctx
*ctx
)
1417 struct team_port
*port
= ctx
->info
->port
;
1419 port
->user
.linkup_enabled
= ctx
->data
.bool_val
;
1420 team_refresh_port_linkup(port
);
1421 __team_carrier_check(port
->team
);
1425 static int team_priority_option_get(struct team
*team
,
1426 struct team_gsetter_ctx
*ctx
)
1428 struct team_port
*port
= ctx
->info
->port
;
1430 ctx
->data
.s32_val
= port
->priority
;
1434 static int team_priority_option_set(struct team
*team
,
1435 struct team_gsetter_ctx
*ctx
)
1437 struct team_port
*port
= ctx
->info
->port
;
1438 s32 priority
= ctx
->data
.s32_val
;
1440 if (port
->priority
== priority
)
1442 port
->priority
= priority
;
1443 team_queue_override_port_prio_changed(team
, port
);
1447 static int team_queue_id_option_get(struct team
*team
,
1448 struct team_gsetter_ctx
*ctx
)
1450 struct team_port
*port
= ctx
->info
->port
;
1452 ctx
->data
.u32_val
= port
->queue_id
;
1456 static int team_queue_id_option_set(struct team
*team
,
1457 struct team_gsetter_ctx
*ctx
)
1459 struct team_port
*port
= ctx
->info
->port
;
1460 u16 new_queue_id
= ctx
->data
.u32_val
;
1462 if (port
->queue_id
== new_queue_id
)
1464 if (new_queue_id
>= team
->dev
->real_num_tx_queues
)
1466 team_queue_override_port_change_queue_id(team
, port
, new_queue_id
);
1470 static const struct team_option team_options
[] = {
1473 .type
= TEAM_OPTION_TYPE_STRING
,
1474 .getter
= team_mode_option_get
,
1475 .setter
= team_mode_option_set
,
1478 .name
= "notify_peers_count",
1479 .type
= TEAM_OPTION_TYPE_U32
,
1480 .getter
= team_notify_peers_count_get
,
1481 .setter
= team_notify_peers_count_set
,
1484 .name
= "notify_peers_interval",
1485 .type
= TEAM_OPTION_TYPE_U32
,
1486 .getter
= team_notify_peers_interval_get
,
1487 .setter
= team_notify_peers_interval_set
,
1490 .name
= "mcast_rejoin_count",
1491 .type
= TEAM_OPTION_TYPE_U32
,
1492 .getter
= team_mcast_rejoin_count_get
,
1493 .setter
= team_mcast_rejoin_count_set
,
1496 .name
= "mcast_rejoin_interval",
1497 .type
= TEAM_OPTION_TYPE_U32
,
1498 .getter
= team_mcast_rejoin_interval_get
,
1499 .setter
= team_mcast_rejoin_interval_set
,
1503 .type
= TEAM_OPTION_TYPE_BOOL
,
1505 .getter
= team_port_en_option_get
,
1506 .setter
= team_port_en_option_set
,
1509 .name
= "user_linkup",
1510 .type
= TEAM_OPTION_TYPE_BOOL
,
1512 .getter
= team_user_linkup_option_get
,
1513 .setter
= team_user_linkup_option_set
,
1516 .name
= "user_linkup_enabled",
1517 .type
= TEAM_OPTION_TYPE_BOOL
,
1519 .getter
= team_user_linkup_en_option_get
,
1520 .setter
= team_user_linkup_en_option_set
,
1524 .type
= TEAM_OPTION_TYPE_S32
,
1526 .getter
= team_priority_option_get
,
1527 .setter
= team_priority_option_set
,
1531 .type
= TEAM_OPTION_TYPE_U32
,
1533 .getter
= team_queue_id_option_get
,
1534 .setter
= team_queue_id_option_set
,
1538 static struct lock_class_key team_netdev_xmit_lock_key
;
1539 static struct lock_class_key team_netdev_addr_lock_key
;
1540 static struct lock_class_key team_tx_busylock_key
;
1542 static void team_set_lockdep_class_one(struct net_device
*dev
,
1543 struct netdev_queue
*txq
,
1546 lockdep_set_class(&txq
->_xmit_lock
, &team_netdev_xmit_lock_key
);
1549 static void team_set_lockdep_class(struct net_device
*dev
)
1551 lockdep_set_class(&dev
->addr_list_lock
, &team_netdev_addr_lock_key
);
1552 netdev_for_each_tx_queue(dev
, team_set_lockdep_class_one
, NULL
);
1553 dev
->qdisc_tx_busylock
= &team_tx_busylock_key
;
1556 static int team_init(struct net_device
*dev
)
1558 struct team
*team
= netdev_priv(dev
);
1563 mutex_init(&team
->lock
);
1564 team_set_no_mode(team
);
1566 team
->pcpu_stats
= netdev_alloc_pcpu_stats(struct team_pcpu_stats
);
1567 if (!team
->pcpu_stats
)
1570 for (i
= 0; i
< TEAM_PORT_HASHENTRIES
; i
++)
1571 INIT_HLIST_HEAD(&team
->en_port_hlist
[i
]);
1572 INIT_LIST_HEAD(&team
->port_list
);
1573 err
= team_queue_override_init(team
);
1575 goto err_team_queue_override_init
;
1577 team_adjust_ops(team
);
1579 INIT_LIST_HEAD(&team
->option_list
);
1580 INIT_LIST_HEAD(&team
->option_inst_list
);
1582 team_notify_peers_init(team
);
1583 team_mcast_rejoin_init(team
);
1585 err
= team_options_register(team
, team_options
, ARRAY_SIZE(team_options
));
1587 goto err_options_register
;
1588 netif_carrier_off(dev
);
1590 team_set_lockdep_class(dev
);
1594 err_options_register
:
1595 team_mcast_rejoin_fini(team
);
1596 team_notify_peers_fini(team
);
1597 team_queue_override_fini(team
);
1598 err_team_queue_override_init
:
1599 free_percpu(team
->pcpu_stats
);
1604 static void team_uninit(struct net_device
*dev
)
1606 struct team
*team
= netdev_priv(dev
);
1607 struct team_port
*port
;
1608 struct team_port
*tmp
;
1610 mutex_lock(&team
->lock
);
1611 list_for_each_entry_safe(port
, tmp
, &team
->port_list
, list
)
1612 team_port_del(team
, port
->dev
);
1614 __team_change_mode(team
, NULL
); /* cleanup */
1615 __team_options_unregister(team
, team_options
, ARRAY_SIZE(team_options
));
1616 team_mcast_rejoin_fini(team
);
1617 team_notify_peers_fini(team
);
1618 team_queue_override_fini(team
);
1619 mutex_unlock(&team
->lock
);
1622 static void team_destructor(struct net_device
*dev
)
1624 struct team
*team
= netdev_priv(dev
);
1626 free_percpu(team
->pcpu_stats
);
1630 static int team_open(struct net_device
*dev
)
1635 static int team_close(struct net_device
*dev
)
1641 * note: already called with rcu_read_lock
1643 static netdev_tx_t
team_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1645 struct team
*team
= netdev_priv(dev
);
1647 unsigned int len
= skb
->len
;
1649 tx_success
= team_queue_override_transmit(team
, skb
);
1651 tx_success
= team
->ops
.transmit(team
, skb
);
1653 struct team_pcpu_stats
*pcpu_stats
;
1655 pcpu_stats
= this_cpu_ptr(team
->pcpu_stats
);
1656 u64_stats_update_begin(&pcpu_stats
->syncp
);
1657 pcpu_stats
->tx_packets
++;
1658 pcpu_stats
->tx_bytes
+= len
;
1659 u64_stats_update_end(&pcpu_stats
->syncp
);
1661 this_cpu_inc(team
->pcpu_stats
->tx_dropped
);
1664 return NETDEV_TX_OK
;
1667 static u16
team_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
1668 void *accel_priv
, select_queue_fallback_t fallback
)
1671 * This helper function exists to help dev_pick_tx get the correct
1672 * destination queue. Using a helper function skips a call to
1673 * skb_tx_hash and will put the skbs in the queue we expect on their
1674 * way down to the team driver.
1676 u16 txq
= skb_rx_queue_recorded(skb
) ? skb_get_rx_queue(skb
) : 0;
1679 * Save the original txq to restore before passing to the driver
1681 qdisc_skb_cb(skb
)->slave_dev_queue_mapping
= skb
->queue_mapping
;
1683 if (unlikely(txq
>= dev
->real_num_tx_queues
)) {
1685 txq
-= dev
->real_num_tx_queues
;
1686 } while (txq
>= dev
->real_num_tx_queues
);
1691 static void team_change_rx_flags(struct net_device
*dev
, int change
)
1693 struct team
*team
= netdev_priv(dev
);
1694 struct team_port
*port
;
1698 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1699 if (change
& IFF_PROMISC
) {
1700 inc
= dev
->flags
& IFF_PROMISC
? 1 : -1;
1701 dev_set_promiscuity(port
->dev
, inc
);
1703 if (change
& IFF_ALLMULTI
) {
1704 inc
= dev
->flags
& IFF_ALLMULTI
? 1 : -1;
1705 dev_set_allmulti(port
->dev
, inc
);
1711 static void team_set_rx_mode(struct net_device
*dev
)
1713 struct team
*team
= netdev_priv(dev
);
1714 struct team_port
*port
;
1717 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1718 dev_uc_sync_multiple(port
->dev
, dev
);
1719 dev_mc_sync_multiple(port
->dev
, dev
);
1724 static int team_set_mac_address(struct net_device
*dev
, void *p
)
1726 struct sockaddr
*addr
= p
;
1727 struct team
*team
= netdev_priv(dev
);
1728 struct team_port
*port
;
1730 if (dev
->type
== ARPHRD_ETHER
&& !is_valid_ether_addr(addr
->sa_data
))
1731 return -EADDRNOTAVAIL
;
1732 memcpy(dev
->dev_addr
, addr
->sa_data
, dev
->addr_len
);
1733 mutex_lock(&team
->lock
);
1734 list_for_each_entry(port
, &team
->port_list
, list
)
1735 if (team
->ops
.port_change_dev_addr
)
1736 team
->ops
.port_change_dev_addr(team
, port
);
1737 mutex_unlock(&team
->lock
);
1741 static int team_change_mtu(struct net_device
*dev
, int new_mtu
)
1743 struct team
*team
= netdev_priv(dev
);
1744 struct team_port
*port
;
1748 * Alhough this is reader, it's guarded by team lock. It's not possible
1749 * to traverse list in reverse under rcu_read_lock
1751 mutex_lock(&team
->lock
);
1752 team
->port_mtu_change_allowed
= true;
1753 list_for_each_entry(port
, &team
->port_list
, list
) {
1754 err
= dev_set_mtu(port
->dev
, new_mtu
);
1756 netdev_err(dev
, "Device %s failed to change mtu",
1761 team
->port_mtu_change_allowed
= false;
1762 mutex_unlock(&team
->lock
);
1769 list_for_each_entry_continue_reverse(port
, &team
->port_list
, list
)
1770 dev_set_mtu(port
->dev
, dev
->mtu
);
1771 team
->port_mtu_change_allowed
= false;
1772 mutex_unlock(&team
->lock
);
1777 static struct rtnl_link_stats64
*
1778 team_get_stats64(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1780 struct team
*team
= netdev_priv(dev
);
1781 struct team_pcpu_stats
*p
;
1782 u64 rx_packets
, rx_bytes
, rx_multicast
, tx_packets
, tx_bytes
;
1783 u32 rx_dropped
= 0, tx_dropped
= 0;
1787 for_each_possible_cpu(i
) {
1788 p
= per_cpu_ptr(team
->pcpu_stats
, i
);
1790 start
= u64_stats_fetch_begin_irq(&p
->syncp
);
1791 rx_packets
= p
->rx_packets
;
1792 rx_bytes
= p
->rx_bytes
;
1793 rx_multicast
= p
->rx_multicast
;
1794 tx_packets
= p
->tx_packets
;
1795 tx_bytes
= p
->tx_bytes
;
1796 } while (u64_stats_fetch_retry_irq(&p
->syncp
, start
));
1798 stats
->rx_packets
+= rx_packets
;
1799 stats
->rx_bytes
+= rx_bytes
;
1800 stats
->multicast
+= rx_multicast
;
1801 stats
->tx_packets
+= tx_packets
;
1802 stats
->tx_bytes
+= tx_bytes
;
1804 * rx_dropped & tx_dropped are u32, updated
1805 * without syncp protection.
1807 rx_dropped
+= p
->rx_dropped
;
1808 tx_dropped
+= p
->tx_dropped
;
1810 stats
->rx_dropped
= rx_dropped
;
1811 stats
->tx_dropped
= tx_dropped
;
1815 static int team_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1817 struct team
*team
= netdev_priv(dev
);
1818 struct team_port
*port
;
1822 * Alhough this is reader, it's guarded by team lock. It's not possible
1823 * to traverse list in reverse under rcu_read_lock
1825 mutex_lock(&team
->lock
);
1826 list_for_each_entry(port
, &team
->port_list
, list
) {
1827 err
= vlan_vid_add(port
->dev
, proto
, vid
);
1831 mutex_unlock(&team
->lock
);
1836 list_for_each_entry_continue_reverse(port
, &team
->port_list
, list
)
1837 vlan_vid_del(port
->dev
, proto
, vid
);
1838 mutex_unlock(&team
->lock
);
1843 static int team_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1845 struct team
*team
= netdev_priv(dev
);
1846 struct team_port
*port
;
1849 list_for_each_entry_rcu(port
, &team
->port_list
, list
)
1850 vlan_vid_del(port
->dev
, proto
, vid
);
1856 #ifdef CONFIG_NET_POLL_CONTROLLER
1857 static void team_poll_controller(struct net_device
*dev
)
1861 static void __team_netpoll_cleanup(struct team
*team
)
1863 struct team_port
*port
;
1865 list_for_each_entry(port
, &team
->port_list
, list
)
1866 team_port_disable_netpoll(port
);
1869 static void team_netpoll_cleanup(struct net_device
*dev
)
1871 struct team
*team
= netdev_priv(dev
);
1873 mutex_lock(&team
->lock
);
1874 __team_netpoll_cleanup(team
);
1875 mutex_unlock(&team
->lock
);
1878 static int team_netpoll_setup(struct net_device
*dev
,
1879 struct netpoll_info
*npifo
)
1881 struct team
*team
= netdev_priv(dev
);
1882 struct team_port
*port
;
1885 mutex_lock(&team
->lock
);
1886 list_for_each_entry(port
, &team
->port_list
, list
) {
1887 err
= team_port_enable_netpoll(team
, port
);
1889 __team_netpoll_cleanup(team
);
1893 mutex_unlock(&team
->lock
);
1898 static int team_add_slave(struct net_device
*dev
, struct net_device
*port_dev
)
1900 struct team
*team
= netdev_priv(dev
);
1903 mutex_lock(&team
->lock
);
1904 err
= team_port_add(team
, port_dev
);
1905 mutex_unlock(&team
->lock
);
1909 static int team_del_slave(struct net_device
*dev
, struct net_device
*port_dev
)
1911 struct team
*team
= netdev_priv(dev
);
1914 mutex_lock(&team
->lock
);
1915 err
= team_port_del(team
, port_dev
);
1916 mutex_unlock(&team
->lock
);
1920 static netdev_features_t
team_fix_features(struct net_device
*dev
,
1921 netdev_features_t features
)
1923 struct team_port
*port
;
1924 struct team
*team
= netdev_priv(dev
);
1925 netdev_features_t mask
;
1928 features
&= ~NETIF_F_ONE_FOR_ALL
;
1929 features
|= NETIF_F_ALL_FOR_ALL
;
1932 list_for_each_entry_rcu(port
, &team
->port_list
, list
) {
1933 features
= netdev_increment_features(features
,
1934 port
->dev
->features
,
1939 features
= netdev_add_tso_features(features
, mask
);
1944 static int team_change_carrier(struct net_device
*dev
, bool new_carrier
)
1946 struct team
*team
= netdev_priv(dev
);
1948 team
->user_carrier_enabled
= true;
1951 netif_carrier_on(dev
);
1953 netif_carrier_off(dev
);
1957 static const struct net_device_ops team_netdev_ops
= {
1958 .ndo_init
= team_init
,
1959 .ndo_uninit
= team_uninit
,
1960 .ndo_open
= team_open
,
1961 .ndo_stop
= team_close
,
1962 .ndo_start_xmit
= team_xmit
,
1963 .ndo_select_queue
= team_select_queue
,
1964 .ndo_change_rx_flags
= team_change_rx_flags
,
1965 .ndo_set_rx_mode
= team_set_rx_mode
,
1966 .ndo_set_mac_address
= team_set_mac_address
,
1967 .ndo_change_mtu
= team_change_mtu
,
1968 .ndo_get_stats64
= team_get_stats64
,
1969 .ndo_vlan_rx_add_vid
= team_vlan_rx_add_vid
,
1970 .ndo_vlan_rx_kill_vid
= team_vlan_rx_kill_vid
,
1971 #ifdef CONFIG_NET_POLL_CONTROLLER
1972 .ndo_poll_controller
= team_poll_controller
,
1973 .ndo_netpoll_setup
= team_netpoll_setup
,
1974 .ndo_netpoll_cleanup
= team_netpoll_cleanup
,
1976 .ndo_add_slave
= team_add_slave
,
1977 .ndo_del_slave
= team_del_slave
,
1978 .ndo_fix_features
= team_fix_features
,
1979 .ndo_change_carrier
= team_change_carrier
,
1980 .ndo_bridge_setlink
= switchdev_port_bridge_setlink
,
1981 .ndo_bridge_getlink
= switchdev_port_bridge_getlink
,
1982 .ndo_bridge_dellink
= switchdev_port_bridge_dellink
,
1983 .ndo_fdb_add
= switchdev_port_fdb_add
,
1984 .ndo_fdb_del
= switchdev_port_fdb_del
,
1985 .ndo_fdb_dump
= switchdev_port_fdb_dump
,
1986 .ndo_features_check
= passthru_features_check
,
1989 /***********************
1991 ***********************/
1993 static void team_ethtool_get_drvinfo(struct net_device
*dev
,
1994 struct ethtool_drvinfo
*drvinfo
)
1996 strlcpy(drvinfo
->driver
, DRV_NAME
, sizeof(drvinfo
->driver
));
1997 strlcpy(drvinfo
->version
, UTS_RELEASE
, sizeof(drvinfo
->version
));
2000 static const struct ethtool_ops team_ethtool_ops
= {
2001 .get_drvinfo
= team_ethtool_get_drvinfo
,
2002 .get_link
= ethtool_op_get_link
,
2005 /***********************
2006 * rt netlink interface
2007 ***********************/
2009 static void team_setup_by_port(struct net_device
*dev
,
2010 struct net_device
*port_dev
)
2012 dev
->header_ops
= port_dev
->header_ops
;
2013 dev
->type
= port_dev
->type
;
2014 dev
->hard_header_len
= port_dev
->hard_header_len
;
2015 dev
->addr_len
= port_dev
->addr_len
;
2016 dev
->mtu
= port_dev
->mtu
;
2017 memcpy(dev
->broadcast
, port_dev
->broadcast
, port_dev
->addr_len
);
2018 eth_hw_addr_inherit(dev
, port_dev
);
2021 static int team_dev_type_check_change(struct net_device
*dev
,
2022 struct net_device
*port_dev
)
2024 struct team
*team
= netdev_priv(dev
);
2025 char *portname
= port_dev
->name
;
2028 if (dev
->type
== port_dev
->type
)
2030 if (!list_empty(&team
->port_list
)) {
2031 netdev_err(dev
, "Device %s is of different type\n", portname
);
2034 err
= call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE
, dev
);
2035 err
= notifier_to_errno(err
);
2037 netdev_err(dev
, "Refused to change device type\n");
2042 team_setup_by_port(dev
, port_dev
);
2043 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE
, dev
);
2047 static void team_setup(struct net_device
*dev
)
2051 dev
->netdev_ops
= &team_netdev_ops
;
2052 dev
->ethtool_ops
= &team_ethtool_ops
;
2053 dev
->destructor
= team_destructor
;
2054 dev
->flags
|= IFF_MULTICAST
;
2055 dev
->priv_flags
&= ~(IFF_XMIT_DST_RELEASE
| IFF_TX_SKB_SHARING
);
2056 dev
->priv_flags
|= IFF_NO_QUEUE
;
2059 * Indicate we support unicast address filtering. That way core won't
2060 * bring us to promisc mode in case a unicast addr is added.
2061 * Let this up to underlay drivers.
2063 dev
->priv_flags
|= IFF_UNICAST_FLT
| IFF_LIVE_ADDR_CHANGE
;
2065 dev
->features
|= NETIF_F_LLTX
;
2066 dev
->features
|= NETIF_F_GRO
;
2068 /* Don't allow team devices to change network namespaces. */
2069 dev
->features
|= NETIF_F_NETNS_LOCAL
;
2071 dev
->hw_features
= TEAM_VLAN_FEATURES
|
2072 NETIF_F_HW_VLAN_CTAG_TX
|
2073 NETIF_F_HW_VLAN_CTAG_RX
|
2074 NETIF_F_HW_VLAN_CTAG_FILTER
;
2076 dev
->hw_features
&= ~(NETIF_F_ALL_CSUM
& ~NETIF_F_HW_CSUM
);
2077 dev
->features
|= dev
->hw_features
;
2080 static int team_newlink(struct net
*src_net
, struct net_device
*dev
,
2081 struct nlattr
*tb
[], struct nlattr
*data
[])
2083 if (tb
[IFLA_ADDRESS
] == NULL
)
2084 eth_hw_addr_random(dev
);
2086 return register_netdevice(dev
);
2089 static int team_validate(struct nlattr
*tb
[], struct nlattr
*data
[])
2091 if (tb
[IFLA_ADDRESS
]) {
2092 if (nla_len(tb
[IFLA_ADDRESS
]) != ETH_ALEN
)
2094 if (!is_valid_ether_addr(nla_data(tb
[IFLA_ADDRESS
])))
2095 return -EADDRNOTAVAIL
;
2100 static unsigned int team_get_num_tx_queues(void)
2102 return TEAM_DEFAULT_NUM_TX_QUEUES
;
2105 static unsigned int team_get_num_rx_queues(void)
2107 return TEAM_DEFAULT_NUM_RX_QUEUES
;
2110 static struct rtnl_link_ops team_link_ops __read_mostly
= {
2112 .priv_size
= sizeof(struct team
),
2113 .setup
= team_setup
,
2114 .newlink
= team_newlink
,
2115 .validate
= team_validate
,
2116 .get_num_tx_queues
= team_get_num_tx_queues
,
2117 .get_num_rx_queues
= team_get_num_rx_queues
,
2121 /***********************************
2122 * Generic netlink custom interface
2123 ***********************************/
2125 static struct genl_family team_nl_family
= {
2126 .id
= GENL_ID_GENERATE
,
2127 .name
= TEAM_GENL_NAME
,
2128 .version
= TEAM_GENL_VERSION
,
2129 .maxattr
= TEAM_ATTR_MAX
,
2133 static const struct nla_policy team_nl_policy
[TEAM_ATTR_MAX
+ 1] = {
2134 [TEAM_ATTR_UNSPEC
] = { .type
= NLA_UNSPEC
, },
2135 [TEAM_ATTR_TEAM_IFINDEX
] = { .type
= NLA_U32
},
2136 [TEAM_ATTR_LIST_OPTION
] = { .type
= NLA_NESTED
},
2137 [TEAM_ATTR_LIST_PORT
] = { .type
= NLA_NESTED
},
2140 static const struct nla_policy
2141 team_nl_option_policy
[TEAM_ATTR_OPTION_MAX
+ 1] = {
2142 [TEAM_ATTR_OPTION_UNSPEC
] = { .type
= NLA_UNSPEC
, },
2143 [TEAM_ATTR_OPTION_NAME
] = {
2145 .len
= TEAM_STRING_MAX_LEN
,
2147 [TEAM_ATTR_OPTION_CHANGED
] = { .type
= NLA_FLAG
},
2148 [TEAM_ATTR_OPTION_TYPE
] = { .type
= NLA_U8
},
2149 [TEAM_ATTR_OPTION_DATA
] = { .type
= NLA_BINARY
},
2152 static int team_nl_cmd_noop(struct sk_buff
*skb
, struct genl_info
*info
)
2154 struct sk_buff
*msg
;
2158 msg
= nlmsg_new(NLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2162 hdr
= genlmsg_put(msg
, info
->snd_portid
, info
->snd_seq
,
2163 &team_nl_family
, 0, TEAM_CMD_NOOP
);
2169 genlmsg_end(msg
, hdr
);
2171 return genlmsg_unicast(genl_info_net(info
), msg
, info
->snd_portid
);
2180 * Netlink cmd functions should be locked by following two functions.
2181 * Since dev gets held here, that ensures dev won't disappear in between.
2183 static struct team
*team_nl_team_get(struct genl_info
*info
)
2185 struct net
*net
= genl_info_net(info
);
2187 struct net_device
*dev
;
2190 if (!info
->attrs
[TEAM_ATTR_TEAM_IFINDEX
])
2193 ifindex
= nla_get_u32(info
->attrs
[TEAM_ATTR_TEAM_IFINDEX
]);
2194 dev
= dev_get_by_index(net
, ifindex
);
2195 if (!dev
|| dev
->netdev_ops
!= &team_netdev_ops
) {
2201 team
= netdev_priv(dev
);
2202 mutex_lock(&team
->lock
);
2206 static void team_nl_team_put(struct team
*team
)
2208 mutex_unlock(&team
->lock
);
2212 typedef int team_nl_send_func_t(struct sk_buff
*skb
,
2213 struct team
*team
, u32 portid
);
2215 static int team_nl_send_unicast(struct sk_buff
*skb
, struct team
*team
, u32 portid
)
2217 return genlmsg_unicast(dev_net(team
->dev
), skb
, portid
);
2220 static int team_nl_fill_one_option_get(struct sk_buff
*skb
, struct team
*team
,
2221 struct team_option_inst
*opt_inst
)
2223 struct nlattr
*option_item
;
2224 struct team_option
*option
= opt_inst
->option
;
2225 struct team_option_inst_info
*opt_inst_info
= &opt_inst
->info
;
2226 struct team_gsetter_ctx ctx
;
2229 ctx
.info
= opt_inst_info
;
2230 err
= team_option_get(team
, opt_inst
, &ctx
);
2234 option_item
= nla_nest_start(skb
, TEAM_ATTR_ITEM_OPTION
);
2238 if (nla_put_string(skb
, TEAM_ATTR_OPTION_NAME
, option
->name
))
2240 if (opt_inst_info
->port
&&
2241 nla_put_u32(skb
, TEAM_ATTR_OPTION_PORT_IFINDEX
,
2242 opt_inst_info
->port
->dev
->ifindex
))
2244 if (opt_inst
->option
->array_size
&&
2245 nla_put_u32(skb
, TEAM_ATTR_OPTION_ARRAY_INDEX
,
2246 opt_inst_info
->array_index
))
2249 switch (option
->type
) {
2250 case TEAM_OPTION_TYPE_U32
:
2251 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_U32
))
2253 if (nla_put_u32(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.u32_val
))
2256 case TEAM_OPTION_TYPE_STRING
:
2257 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_STRING
))
2259 if (nla_put_string(skb
, TEAM_ATTR_OPTION_DATA
,
2263 case TEAM_OPTION_TYPE_BINARY
:
2264 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_BINARY
))
2266 if (nla_put(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.bin_val
.len
,
2267 ctx
.data
.bin_val
.ptr
))
2270 case TEAM_OPTION_TYPE_BOOL
:
2271 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_FLAG
))
2273 if (ctx
.data
.bool_val
&&
2274 nla_put_flag(skb
, TEAM_ATTR_OPTION_DATA
))
2277 case TEAM_OPTION_TYPE_S32
:
2278 if (nla_put_u8(skb
, TEAM_ATTR_OPTION_TYPE
, NLA_S32
))
2280 if (nla_put_s32(skb
, TEAM_ATTR_OPTION_DATA
, ctx
.data
.s32_val
))
2286 if (opt_inst
->removed
&& nla_put_flag(skb
, TEAM_ATTR_OPTION_REMOVED
))
2288 if (opt_inst
->changed
) {
2289 if (nla_put_flag(skb
, TEAM_ATTR_OPTION_CHANGED
))
2291 opt_inst
->changed
= false;
2293 nla_nest_end(skb
, option_item
);
2297 nla_nest_cancel(skb
, option_item
);
2301 static int __send_and_alloc_skb(struct sk_buff
**pskb
,
2302 struct team
*team
, u32 portid
,
2303 team_nl_send_func_t
*send_func
)
2308 err
= send_func(*pskb
, team
, portid
);
2312 *pskb
= genlmsg_new(GENLMSG_DEFAULT_SIZE
, GFP_KERNEL
);
2318 static int team_nl_send_options_get(struct team
*team
, u32 portid
, u32 seq
,
2319 int flags
, team_nl_send_func_t
*send_func
,
2320 struct list_head
*sel_opt_inst_list
)
2322 struct nlattr
*option_list
;
2323 struct nlmsghdr
*nlh
;
2325 struct team_option_inst
*opt_inst
;
2327 struct sk_buff
*skb
= NULL
;
2331 opt_inst
= list_first_entry(sel_opt_inst_list
,
2332 struct team_option_inst
, tmp_list
);
2335 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2339 hdr
= genlmsg_put(skb
, portid
, seq
, &team_nl_family
, flags
| NLM_F_MULTI
,
2340 TEAM_CMD_OPTIONS_GET
);
2344 if (nla_put_u32(skb
, TEAM_ATTR_TEAM_IFINDEX
, team
->dev
->ifindex
))
2345 goto nla_put_failure
;
2346 option_list
= nla_nest_start(skb
, TEAM_ATTR_LIST_OPTION
);
2348 goto nla_put_failure
;
2352 list_for_each_entry_from(opt_inst
, sel_opt_inst_list
, tmp_list
) {
2353 err
= team_nl_fill_one_option_get(skb
, team
, opt_inst
);
2355 if (err
== -EMSGSIZE
) {
2366 nla_nest_end(skb
, option_list
);
2367 genlmsg_end(skb
, hdr
);
2372 nlh
= nlmsg_put(skb
, portid
, seq
, NLMSG_DONE
, 0, flags
| NLM_F_MULTI
);
2374 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2380 return send_func(skb
, team
, portid
);
2385 genlmsg_cancel(skb
, hdr
);
2390 static int team_nl_cmd_options_get(struct sk_buff
*skb
, struct genl_info
*info
)
2393 struct team_option_inst
*opt_inst
;
2395 LIST_HEAD(sel_opt_inst_list
);
2397 team
= team_nl_team_get(info
);
2401 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
)
2402 list_add_tail(&opt_inst
->tmp_list
, &sel_opt_inst_list
);
2403 err
= team_nl_send_options_get(team
, info
->snd_portid
, info
->snd_seq
,
2404 NLM_F_ACK
, team_nl_send_unicast
,
2405 &sel_opt_inst_list
);
2407 team_nl_team_put(team
);
2412 static int team_nl_send_event_options_get(struct team
*team
,
2413 struct list_head
*sel_opt_inst_list
);
2415 static int team_nl_cmd_options_set(struct sk_buff
*skb
, struct genl_info
*info
)
2420 struct nlattr
*nl_option
;
2421 LIST_HEAD(opt_inst_list
);
2423 team
= team_nl_team_get(info
);
2428 if (!info
->attrs
[TEAM_ATTR_LIST_OPTION
]) {
2433 nla_for_each_nested(nl_option
, info
->attrs
[TEAM_ATTR_LIST_OPTION
], i
) {
2434 struct nlattr
*opt_attrs
[TEAM_ATTR_OPTION_MAX
+ 1];
2435 struct nlattr
*attr
;
2436 struct nlattr
*attr_data
;
2437 enum team_option_type opt_type
;
2438 int opt_port_ifindex
= 0; /* != 0 for per-port options */
2439 u32 opt_array_index
= 0;
2440 bool opt_is_array
= false;
2441 struct team_option_inst
*opt_inst
;
2443 bool opt_found
= false;
2445 if (nla_type(nl_option
) != TEAM_ATTR_ITEM_OPTION
) {
2449 err
= nla_parse_nested(opt_attrs
, TEAM_ATTR_OPTION_MAX
,
2450 nl_option
, team_nl_option_policy
);
2453 if (!opt_attrs
[TEAM_ATTR_OPTION_NAME
] ||
2454 !opt_attrs
[TEAM_ATTR_OPTION_TYPE
]) {
2458 switch (nla_get_u8(opt_attrs
[TEAM_ATTR_OPTION_TYPE
])) {
2460 opt_type
= TEAM_OPTION_TYPE_U32
;
2463 opt_type
= TEAM_OPTION_TYPE_STRING
;
2466 opt_type
= TEAM_OPTION_TYPE_BINARY
;
2469 opt_type
= TEAM_OPTION_TYPE_BOOL
;
2472 opt_type
= TEAM_OPTION_TYPE_S32
;
2478 attr_data
= opt_attrs
[TEAM_ATTR_OPTION_DATA
];
2479 if (opt_type
!= TEAM_OPTION_TYPE_BOOL
&& !attr_data
) {
2484 opt_name
= nla_data(opt_attrs
[TEAM_ATTR_OPTION_NAME
]);
2485 attr
= opt_attrs
[TEAM_ATTR_OPTION_PORT_IFINDEX
];
2487 opt_port_ifindex
= nla_get_u32(attr
);
2489 attr
= opt_attrs
[TEAM_ATTR_OPTION_ARRAY_INDEX
];
2491 opt_is_array
= true;
2492 opt_array_index
= nla_get_u32(attr
);
2495 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
2496 struct team_option
*option
= opt_inst
->option
;
2497 struct team_gsetter_ctx ctx
;
2498 struct team_option_inst_info
*opt_inst_info
;
2501 opt_inst_info
= &opt_inst
->info
;
2502 tmp_ifindex
= opt_inst_info
->port
?
2503 opt_inst_info
->port
->dev
->ifindex
: 0;
2504 if (option
->type
!= opt_type
||
2505 strcmp(option
->name
, opt_name
) ||
2506 tmp_ifindex
!= opt_port_ifindex
||
2507 (option
->array_size
&& !opt_is_array
) ||
2508 opt_inst_info
->array_index
!= opt_array_index
)
2511 ctx
.info
= opt_inst_info
;
2513 case TEAM_OPTION_TYPE_U32
:
2514 ctx
.data
.u32_val
= nla_get_u32(attr_data
);
2516 case TEAM_OPTION_TYPE_STRING
:
2517 if (nla_len(attr_data
) > TEAM_STRING_MAX_LEN
) {
2521 ctx
.data
.str_val
= nla_data(attr_data
);
2523 case TEAM_OPTION_TYPE_BINARY
:
2524 ctx
.data
.bin_val
.len
= nla_len(attr_data
);
2525 ctx
.data
.bin_val
.ptr
= nla_data(attr_data
);
2527 case TEAM_OPTION_TYPE_BOOL
:
2528 ctx
.data
.bool_val
= attr_data
? true : false;
2530 case TEAM_OPTION_TYPE_S32
:
2531 ctx
.data
.s32_val
= nla_get_s32(attr_data
);
2536 err
= team_option_set(team
, opt_inst
, &ctx
);
2539 opt_inst
->changed
= true;
2540 list_add(&opt_inst
->tmp_list
, &opt_inst_list
);
2548 err
= team_nl_send_event_options_get(team
, &opt_inst_list
);
2551 team_nl_team_put(team
);
2556 static int team_nl_fill_one_port_get(struct sk_buff
*skb
,
2557 struct team_port
*port
)
2559 struct nlattr
*port_item
;
2561 port_item
= nla_nest_start(skb
, TEAM_ATTR_ITEM_PORT
);
2564 if (nla_put_u32(skb
, TEAM_ATTR_PORT_IFINDEX
, port
->dev
->ifindex
))
2566 if (port
->changed
) {
2567 if (nla_put_flag(skb
, TEAM_ATTR_PORT_CHANGED
))
2569 port
->changed
= false;
2571 if ((port
->removed
&&
2572 nla_put_flag(skb
, TEAM_ATTR_PORT_REMOVED
)) ||
2573 (port
->state
.linkup
&&
2574 nla_put_flag(skb
, TEAM_ATTR_PORT_LINKUP
)) ||
2575 nla_put_u32(skb
, TEAM_ATTR_PORT_SPEED
, port
->state
.speed
) ||
2576 nla_put_u8(skb
, TEAM_ATTR_PORT_DUPLEX
, port
->state
.duplex
))
2578 nla_nest_end(skb
, port_item
);
2582 nla_nest_cancel(skb
, port_item
);
2586 static int team_nl_send_port_list_get(struct team
*team
, u32 portid
, u32 seq
,
2587 int flags
, team_nl_send_func_t
*send_func
,
2588 struct team_port
*one_port
)
2590 struct nlattr
*port_list
;
2591 struct nlmsghdr
*nlh
;
2593 struct team_port
*port
;
2595 struct sk_buff
*skb
= NULL
;
2599 port
= list_first_entry_or_null(&team
->port_list
,
2600 struct team_port
, list
);
2603 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2607 hdr
= genlmsg_put(skb
, portid
, seq
, &team_nl_family
, flags
| NLM_F_MULTI
,
2608 TEAM_CMD_PORT_LIST_GET
);
2612 if (nla_put_u32(skb
, TEAM_ATTR_TEAM_IFINDEX
, team
->dev
->ifindex
))
2613 goto nla_put_failure
;
2614 port_list
= nla_nest_start(skb
, TEAM_ATTR_LIST_PORT
);
2616 goto nla_put_failure
;
2621 /* If one port is selected, called wants to send port list containing
2622 * only this port. Otherwise go through all listed ports and send all
2625 err
= team_nl_fill_one_port_get(skb
, one_port
);
2629 list_for_each_entry_from(port
, &team
->port_list
, list
) {
2630 err
= team_nl_fill_one_port_get(skb
, port
);
2632 if (err
== -EMSGSIZE
) {
2644 nla_nest_end(skb
, port_list
);
2645 genlmsg_end(skb
, hdr
);
2650 nlh
= nlmsg_put(skb
, portid
, seq
, NLMSG_DONE
, 0, flags
| NLM_F_MULTI
);
2652 err
= __send_and_alloc_skb(&skb
, team
, portid
, send_func
);
2658 return send_func(skb
, team
, portid
);
2663 genlmsg_cancel(skb
, hdr
);
2668 static int team_nl_cmd_port_list_get(struct sk_buff
*skb
,
2669 struct genl_info
*info
)
2674 team
= team_nl_team_get(info
);
2678 err
= team_nl_send_port_list_get(team
, info
->snd_portid
, info
->snd_seq
,
2679 NLM_F_ACK
, team_nl_send_unicast
, NULL
);
2681 team_nl_team_put(team
);
2686 static const struct genl_ops team_nl_ops
[] = {
2688 .cmd
= TEAM_CMD_NOOP
,
2689 .doit
= team_nl_cmd_noop
,
2690 .policy
= team_nl_policy
,
2693 .cmd
= TEAM_CMD_OPTIONS_SET
,
2694 .doit
= team_nl_cmd_options_set
,
2695 .policy
= team_nl_policy
,
2696 .flags
= GENL_ADMIN_PERM
,
2699 .cmd
= TEAM_CMD_OPTIONS_GET
,
2700 .doit
= team_nl_cmd_options_get
,
2701 .policy
= team_nl_policy
,
2702 .flags
= GENL_ADMIN_PERM
,
2705 .cmd
= TEAM_CMD_PORT_LIST_GET
,
2706 .doit
= team_nl_cmd_port_list_get
,
2707 .policy
= team_nl_policy
,
2708 .flags
= GENL_ADMIN_PERM
,
2712 static const struct genl_multicast_group team_nl_mcgrps
[] = {
2713 { .name
= TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME
, },
2716 static int team_nl_send_multicast(struct sk_buff
*skb
,
2717 struct team
*team
, u32 portid
)
2719 return genlmsg_multicast_netns(&team_nl_family
, dev_net(team
->dev
),
2720 skb
, 0, 0, GFP_KERNEL
);
2723 static int team_nl_send_event_options_get(struct team
*team
,
2724 struct list_head
*sel_opt_inst_list
)
2726 return team_nl_send_options_get(team
, 0, 0, 0, team_nl_send_multicast
,
2730 static int team_nl_send_event_port_get(struct team
*team
,
2731 struct team_port
*port
)
2733 return team_nl_send_port_list_get(team
, 0, 0, 0, team_nl_send_multicast
,
2737 static int team_nl_init(void)
2739 return genl_register_family_with_ops_groups(&team_nl_family
, team_nl_ops
,
2743 static void team_nl_fini(void)
2745 genl_unregister_family(&team_nl_family
);
2753 static void __team_options_change_check(struct team
*team
)
2756 struct team_option_inst
*opt_inst
;
2757 LIST_HEAD(sel_opt_inst_list
);
2759 list_for_each_entry(opt_inst
, &team
->option_inst_list
, list
) {
2760 if (opt_inst
->changed
)
2761 list_add_tail(&opt_inst
->tmp_list
, &sel_opt_inst_list
);
2763 err
= team_nl_send_event_options_get(team
, &sel_opt_inst_list
);
2764 if (err
&& err
!= -ESRCH
)
2765 netdev_warn(team
->dev
, "Failed to send options change via netlink (err %d)\n",
2769 /* rtnl lock is held */
2771 static void __team_port_change_send(struct team_port
*port
, bool linkup
)
2775 port
->changed
= true;
2776 port
->state
.linkup
= linkup
;
2777 team_refresh_port_linkup(port
);
2779 struct ethtool_cmd ecmd
;
2781 err
= __ethtool_get_settings(port
->dev
, &ecmd
);
2783 port
->state
.speed
= ethtool_cmd_speed(&ecmd
);
2784 port
->state
.duplex
= ecmd
.duplex
;
2788 port
->state
.speed
= 0;
2789 port
->state
.duplex
= 0;
2792 err
= team_nl_send_event_port_get(port
->team
, port
);
2793 if (err
&& err
!= -ESRCH
)
2794 netdev_warn(port
->team
->dev
, "Failed to send port change of device %s via netlink (err %d)\n",
2795 port
->dev
->name
, err
);
2799 static void __team_carrier_check(struct team
*team
)
2801 struct team_port
*port
;
2804 if (team
->user_carrier_enabled
)
2807 team_linkup
= false;
2808 list_for_each_entry(port
, &team
->port_list
, list
) {
2816 netif_carrier_on(team
->dev
);
2818 netif_carrier_off(team
->dev
);
2821 static void __team_port_change_check(struct team_port
*port
, bool linkup
)
2823 if (port
->state
.linkup
!= linkup
)
2824 __team_port_change_send(port
, linkup
);
2825 __team_carrier_check(port
->team
);
2828 static void __team_port_change_port_added(struct team_port
*port
, bool linkup
)
2830 __team_port_change_send(port
, linkup
);
2831 __team_carrier_check(port
->team
);
2834 static void __team_port_change_port_removed(struct team_port
*port
)
2836 port
->removed
= true;
2837 __team_port_change_send(port
, false);
2838 __team_carrier_check(port
->team
);
2841 static void team_port_change_check(struct team_port
*port
, bool linkup
)
2843 struct team
*team
= port
->team
;
2845 mutex_lock(&team
->lock
);
2846 __team_port_change_check(port
, linkup
);
2847 mutex_unlock(&team
->lock
);
2851 /************************************
2852 * Net device notifier event handler
2853 ************************************/
2855 static int team_device_event(struct notifier_block
*unused
,
2856 unsigned long event
, void *ptr
)
2858 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
2859 struct team_port
*port
;
2861 port
= team_port_get_rtnl(dev
);
2867 if (netif_carrier_ok(dev
))
2868 team_port_change_check(port
, true);
2871 team_port_change_check(port
, false);
2874 if (netif_running(port
->dev
))
2875 team_port_change_check(port
,
2876 !!netif_carrier_ok(port
->dev
));
2878 case NETDEV_UNREGISTER
:
2879 team_del_slave(port
->team
->dev
, dev
);
2881 case NETDEV_FEAT_CHANGE
:
2882 team_compute_features(port
->team
);
2884 case NETDEV_PRECHANGEMTU
:
2885 /* Forbid to change mtu of underlaying device */
2886 if (!port
->team
->port_mtu_change_allowed
)
2889 case NETDEV_PRE_TYPE_CHANGE
:
2890 /* Forbid to change type of underlaying device */
2892 case NETDEV_RESEND_IGMP
:
2893 /* Propagate to master device */
2894 call_netdevice_notifiers(event
, port
->team
->dev
);
2900 static struct notifier_block team_notifier_block __read_mostly
= {
2901 .notifier_call
= team_device_event
,
2905 /***********************
2906 * Module init and exit
2907 ***********************/
2909 static int __init
team_module_init(void)
2913 register_netdevice_notifier(&team_notifier_block
);
2915 err
= rtnl_link_register(&team_link_ops
);
2919 err
= team_nl_init();
2926 rtnl_link_unregister(&team_link_ops
);
2929 unregister_netdevice_notifier(&team_notifier_block
);
2934 static void __exit
team_module_exit(void)
2937 rtnl_link_unregister(&team_link_ops
);
2938 unregister_netdevice_notifier(&team_notifier_block
);
2941 module_init(team_module_init
);
2942 module_exit(team_module_exit
);
2944 MODULE_LICENSE("GPL v2");
2945 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2946 MODULE_DESCRIPTION("Ethernet team device driver");
2947 MODULE_ALIAS_RTNL_LINK(DRV_NAME
);