1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team
4 * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/filter.h>
15 #include <linux/if_team.h>
17 static rx_handler_result_t
lb_receive(struct team
*team
, struct team_port
*port
,
20 if (unlikely(skb
->protocol
== htons(ETH_P_SLOW
))) {
21 /* LACPDU packets should go to exact delivery */
22 const unsigned char *dest
= eth_hdr(skb
)->h_dest
;
24 if (is_link_local_ether_addr(dest
) && dest
[5] == 0x02)
25 return RX_HANDLER_EXACT
;
27 return RX_HANDLER_ANOTHER
;
32 typedef struct team_port
*lb_select_tx_port_func_t(struct team
*,
37 #define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */
43 struct lb_pcpu_stats
{
44 struct lb_stats hash_stats
[LB_TX_HASHTABLE_SIZE
];
45 struct u64_stats_sync syncp
;
48 struct lb_stats_info
{
49 struct lb_stats stats
;
50 struct lb_stats last_stats
;
51 struct team_option_inst_info
*opt_inst_info
;
54 struct lb_port_mapping
{
55 struct team_port __rcu
*port
;
56 struct team_option_inst_info
*opt_inst_info
;
61 struct lb_port_mapping tx_hash_to_port_mapping
[LB_TX_HASHTABLE_SIZE
];
62 struct sock_fprog_kern
*orig_fprog
;
64 unsigned int refresh_interval
; /* in tenths of second */
65 struct delayed_work refresh_dw
;
66 struct lb_stats_info info
[LB_TX_HASHTABLE_SIZE
];
71 struct bpf_prog __rcu
*fp
;
72 lb_select_tx_port_func_t __rcu
*select_tx_port_func
;
73 struct lb_pcpu_stats __percpu
*pcpu_stats
;
74 struct lb_priv_ex
*ex
; /* priv extension */
77 static struct lb_priv
*get_lb_priv(struct team
*team
)
79 return (struct lb_priv
*) &team
->mode_priv
;
83 struct lb_stats __percpu
*pcpu_stats
;
84 struct lb_stats_info stats_info
;
87 static struct lb_port_priv
*get_lb_port_priv(struct team_port
*port
)
89 return (struct lb_port_priv
*) &port
->mode_priv
;
92 #define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \
93 (lb_priv)->ex->tx_hash_to_port_mapping[hash].port
95 #define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \
96 (lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info
98 static void lb_tx_hash_to_port_mapping_null_port(struct team
*team
,
99 struct team_port
*port
)
101 struct lb_priv
*lb_priv
= get_lb_priv(team
);
102 bool changed
= false;
105 for (i
= 0; i
< LB_TX_HASHTABLE_SIZE
; i
++) {
106 struct lb_port_mapping
*pm
;
108 pm
= &lb_priv
->ex
->tx_hash_to_port_mapping
[i
];
109 if (rcu_access_pointer(pm
->port
) == port
) {
110 RCU_INIT_POINTER(pm
->port
, NULL
);
111 team_option_inst_set_change(pm
->opt_inst_info
);
116 team_options_change_check(team
);
119 /* Basic tx selection based solely by hash */
120 static struct team_port
*lb_hash_select_tx_port(struct team
*team
,
121 struct lb_priv
*lb_priv
,
125 int port_index
= team_num_to_port_index(team
, hash
);
127 return team_get_port_by_index_rcu(team
, port_index
);
130 /* Hash to port mapping select tx port */
131 static struct team_port
*lb_htpm_select_tx_port(struct team
*team
,
132 struct lb_priv
*lb_priv
,
136 struct team_port
*port
;
138 port
= rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv
, hash
));
141 /* If no valid port in the table, fall back to simple hash */
142 return lb_hash_select_tx_port(team
, lb_priv
, skb
, hash
);
145 struct lb_select_tx_port
{
147 lb_select_tx_port_func_t
*func
;
150 static const struct lb_select_tx_port lb_select_tx_port_list
[] = {
153 .func
= lb_hash_select_tx_port
,
156 .name
= "hash_to_port_mapping",
157 .func
= lb_htpm_select_tx_port
,
160 #define LB_SELECT_TX_PORT_LIST_COUNT ARRAY_SIZE(lb_select_tx_port_list)
162 static char *lb_select_tx_port_get_name(lb_select_tx_port_func_t
*func
)
166 for (i
= 0; i
< LB_SELECT_TX_PORT_LIST_COUNT
; i
++) {
167 const struct lb_select_tx_port
*item
;
169 item
= &lb_select_tx_port_list
[i
];
170 if (item
->func
== func
)
176 static lb_select_tx_port_func_t
*lb_select_tx_port_get_func(const char *name
)
180 for (i
= 0; i
< LB_SELECT_TX_PORT_LIST_COUNT
; i
++) {
181 const struct lb_select_tx_port
*item
;
183 item
= &lb_select_tx_port_list
[i
];
184 if (!strcmp(item
->name
, name
))
190 static unsigned int lb_get_skb_hash(struct lb_priv
*lb_priv
,
197 fp
= rcu_dereference_bh(lb_priv
->fp
);
200 lhash
= BPF_PROG_RUN(fp
, skb
);
202 return c
[0] ^ c
[1] ^ c
[2] ^ c
[3];
205 static void lb_update_tx_stats(unsigned int tx_bytes
, struct lb_priv
*lb_priv
,
206 struct lb_port_priv
*lb_port_priv
,
209 struct lb_pcpu_stats
*pcpu_stats
;
210 struct lb_stats
*port_stats
;
211 struct lb_stats
*hash_stats
;
213 pcpu_stats
= this_cpu_ptr(lb_priv
->pcpu_stats
);
214 port_stats
= this_cpu_ptr(lb_port_priv
->pcpu_stats
);
215 hash_stats
= &pcpu_stats
->hash_stats
[hash
];
216 u64_stats_update_begin(&pcpu_stats
->syncp
);
217 port_stats
->tx_bytes
+= tx_bytes
;
218 hash_stats
->tx_bytes
+= tx_bytes
;
219 u64_stats_update_end(&pcpu_stats
->syncp
);
222 static bool lb_transmit(struct team
*team
, struct sk_buff
*skb
)
224 struct lb_priv
*lb_priv
= get_lb_priv(team
);
225 lb_select_tx_port_func_t
*select_tx_port_func
;
226 struct team_port
*port
;
228 unsigned int tx_bytes
= skb
->len
;
230 hash
= lb_get_skb_hash(lb_priv
, skb
);
231 select_tx_port_func
= rcu_dereference_bh(lb_priv
->select_tx_port_func
);
232 port
= select_tx_port_func(team
, lb_priv
, skb
, hash
);
235 if (team_dev_queue_xmit(team
, port
, skb
))
237 lb_update_tx_stats(tx_bytes
, lb_priv
, get_lb_port_priv(port
), hash
);
241 dev_kfree_skb_any(skb
);
245 static int lb_bpf_func_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
247 struct lb_priv
*lb_priv
= get_lb_priv(team
);
249 if (!lb_priv
->ex
->orig_fprog
) {
250 ctx
->data
.bin_val
.len
= 0;
251 ctx
->data
.bin_val
.ptr
= NULL
;
254 ctx
->data
.bin_val
.len
= lb_priv
->ex
->orig_fprog
->len
*
255 sizeof(struct sock_filter
);
256 ctx
->data
.bin_val
.ptr
= lb_priv
->ex
->orig_fprog
->filter
;
260 static int __fprog_create(struct sock_fprog_kern
**pfprog
, u32 data_len
,
263 struct sock_fprog_kern
*fprog
;
264 struct sock_filter
*filter
= (struct sock_filter
*) data
;
266 if (data_len
% sizeof(struct sock_filter
))
268 fprog
= kmalloc(sizeof(*fprog
), GFP_KERNEL
);
271 fprog
->filter
= kmemdup(filter
, data_len
, GFP_KERNEL
);
272 if (!fprog
->filter
) {
276 fprog
->len
= data_len
/ sizeof(struct sock_filter
);
281 static void __fprog_destroy(struct sock_fprog_kern
*fprog
)
283 kfree(fprog
->filter
);
287 static int lb_bpf_func_set(struct team
*team
, struct team_gsetter_ctx
*ctx
)
289 struct lb_priv
*lb_priv
= get_lb_priv(team
);
290 struct bpf_prog
*fp
= NULL
;
291 struct bpf_prog
*orig_fp
= NULL
;
292 struct sock_fprog_kern
*fprog
= NULL
;
295 if (ctx
->data
.bin_val
.len
) {
296 err
= __fprog_create(&fprog
, ctx
->data
.bin_val
.len
,
297 ctx
->data
.bin_val
.ptr
);
300 err
= bpf_prog_create(&fp
, fprog
);
302 __fprog_destroy(fprog
);
307 if (lb_priv
->ex
->orig_fprog
) {
308 /* Clear old filter data */
309 __fprog_destroy(lb_priv
->ex
->orig_fprog
);
310 orig_fp
= rcu_dereference_protected(lb_priv
->fp
,
311 lockdep_is_held(&team
->lock
));
314 rcu_assign_pointer(lb_priv
->fp
, fp
);
315 lb_priv
->ex
->orig_fprog
= fprog
;
319 bpf_prog_destroy(orig_fp
);
324 static void lb_bpf_func_free(struct team
*team
)
326 struct lb_priv
*lb_priv
= get_lb_priv(team
);
329 if (!lb_priv
->ex
->orig_fprog
)
332 __fprog_destroy(lb_priv
->ex
->orig_fprog
);
333 fp
= rcu_dereference_protected(lb_priv
->fp
,
334 lockdep_is_held(&team
->lock
));
335 bpf_prog_destroy(fp
);
338 static int lb_tx_method_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
340 struct lb_priv
*lb_priv
= get_lb_priv(team
);
341 lb_select_tx_port_func_t
*func
;
344 func
= rcu_dereference_protected(lb_priv
->select_tx_port_func
,
345 lockdep_is_held(&team
->lock
));
346 name
= lb_select_tx_port_get_name(func
);
348 ctx
->data
.str_val
= name
;
352 static int lb_tx_method_set(struct team
*team
, struct team_gsetter_ctx
*ctx
)
354 struct lb_priv
*lb_priv
= get_lb_priv(team
);
355 lb_select_tx_port_func_t
*func
;
357 func
= lb_select_tx_port_get_func(ctx
->data
.str_val
);
360 rcu_assign_pointer(lb_priv
->select_tx_port_func
, func
);
364 static int lb_tx_hash_to_port_mapping_init(struct team
*team
,
365 struct team_option_inst_info
*info
)
367 struct lb_priv
*lb_priv
= get_lb_priv(team
);
368 unsigned char hash
= info
->array_index
;
370 LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv
, hash
) = info
;
374 static int lb_tx_hash_to_port_mapping_get(struct team
*team
,
375 struct team_gsetter_ctx
*ctx
)
377 struct lb_priv
*lb_priv
= get_lb_priv(team
);
378 struct team_port
*port
;
379 unsigned char hash
= ctx
->info
->array_index
;
381 port
= LB_HTPM_PORT_BY_HASH(lb_priv
, hash
);
382 ctx
->data
.u32_val
= port
? port
->dev
->ifindex
: 0;
386 static int lb_tx_hash_to_port_mapping_set(struct team
*team
,
387 struct team_gsetter_ctx
*ctx
)
389 struct lb_priv
*lb_priv
= get_lb_priv(team
);
390 struct team_port
*port
;
391 unsigned char hash
= ctx
->info
->array_index
;
393 list_for_each_entry(port
, &team
->port_list
, list
) {
394 if (ctx
->data
.u32_val
== port
->dev
->ifindex
&&
395 team_port_enabled(port
)) {
396 rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv
, hash
),
404 static int lb_hash_stats_init(struct team
*team
,
405 struct team_option_inst_info
*info
)
407 struct lb_priv
*lb_priv
= get_lb_priv(team
);
408 unsigned char hash
= info
->array_index
;
410 lb_priv
->ex
->stats
.info
[hash
].opt_inst_info
= info
;
414 static int lb_hash_stats_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
416 struct lb_priv
*lb_priv
= get_lb_priv(team
);
417 unsigned char hash
= ctx
->info
->array_index
;
419 ctx
->data
.bin_val
.ptr
= &lb_priv
->ex
->stats
.info
[hash
].stats
;
420 ctx
->data
.bin_val
.len
= sizeof(struct lb_stats
);
424 static int lb_port_stats_init(struct team
*team
,
425 struct team_option_inst_info
*info
)
427 struct team_port
*port
= info
->port
;
428 struct lb_port_priv
*lb_port_priv
= get_lb_port_priv(port
);
430 lb_port_priv
->stats_info
.opt_inst_info
= info
;
434 static int lb_port_stats_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
436 struct team_port
*port
= ctx
->info
->port
;
437 struct lb_port_priv
*lb_port_priv
= get_lb_port_priv(port
);
439 ctx
->data
.bin_val
.ptr
= &lb_port_priv
->stats_info
.stats
;
440 ctx
->data
.bin_val
.len
= sizeof(struct lb_stats
);
444 static void __lb_stats_info_refresh_prepare(struct lb_stats_info
*s_info
)
446 memcpy(&s_info
->last_stats
, &s_info
->stats
, sizeof(struct lb_stats
));
447 memset(&s_info
->stats
, 0, sizeof(struct lb_stats
));
450 static bool __lb_stats_info_refresh_check(struct lb_stats_info
*s_info
,
453 if (memcmp(&s_info
->last_stats
, &s_info
->stats
,
454 sizeof(struct lb_stats
))) {
455 team_option_inst_set_change(s_info
->opt_inst_info
);
461 static void __lb_one_cpu_stats_add(struct lb_stats
*acc_stats
,
462 struct lb_stats
*cpu_stats
,
463 struct u64_stats_sync
*syncp
)
469 start
= u64_stats_fetch_begin_irq(syncp
);
470 tmp
.tx_bytes
= cpu_stats
->tx_bytes
;
471 } while (u64_stats_fetch_retry_irq(syncp
, start
));
472 acc_stats
->tx_bytes
+= tmp
.tx_bytes
;
475 static void lb_stats_refresh(struct work_struct
*work
)
478 struct lb_priv
*lb_priv
;
479 struct lb_priv_ex
*lb_priv_ex
;
480 struct lb_pcpu_stats
*pcpu_stats
;
481 struct lb_stats
*stats
;
482 struct lb_stats_info
*s_info
;
483 struct team_port
*port
;
484 bool changed
= false;
488 lb_priv_ex
= container_of(work
, struct lb_priv_ex
,
489 stats
.refresh_dw
.work
);
491 team
= lb_priv_ex
->team
;
492 lb_priv
= get_lb_priv(team
);
494 if (!mutex_trylock(&team
->lock
)) {
495 schedule_delayed_work(&lb_priv_ex
->stats
.refresh_dw
, 0);
499 for (j
= 0; j
< LB_TX_HASHTABLE_SIZE
; j
++) {
500 s_info
= &lb_priv
->ex
->stats
.info
[j
];
501 __lb_stats_info_refresh_prepare(s_info
);
502 for_each_possible_cpu(i
) {
503 pcpu_stats
= per_cpu_ptr(lb_priv
->pcpu_stats
, i
);
504 stats
= &pcpu_stats
->hash_stats
[j
];
505 __lb_one_cpu_stats_add(&s_info
->stats
, stats
,
508 changed
|= __lb_stats_info_refresh_check(s_info
, team
);
511 list_for_each_entry(port
, &team
->port_list
, list
) {
512 struct lb_port_priv
*lb_port_priv
= get_lb_port_priv(port
);
514 s_info
= &lb_port_priv
->stats_info
;
515 __lb_stats_info_refresh_prepare(s_info
);
516 for_each_possible_cpu(i
) {
517 pcpu_stats
= per_cpu_ptr(lb_priv
->pcpu_stats
, i
);
518 stats
= per_cpu_ptr(lb_port_priv
->pcpu_stats
, i
);
519 __lb_one_cpu_stats_add(&s_info
->stats
, stats
,
522 changed
|= __lb_stats_info_refresh_check(s_info
, team
);
526 team_options_change_check(team
);
528 schedule_delayed_work(&lb_priv_ex
->stats
.refresh_dw
,
529 (lb_priv_ex
->stats
.refresh_interval
* HZ
) / 10);
531 mutex_unlock(&team
->lock
);
534 static int lb_stats_refresh_interval_get(struct team
*team
,
535 struct team_gsetter_ctx
*ctx
)
537 struct lb_priv
*lb_priv
= get_lb_priv(team
);
539 ctx
->data
.u32_val
= lb_priv
->ex
->stats
.refresh_interval
;
543 static int lb_stats_refresh_interval_set(struct team
*team
,
544 struct team_gsetter_ctx
*ctx
)
546 struct lb_priv
*lb_priv
= get_lb_priv(team
);
547 unsigned int interval
;
549 interval
= ctx
->data
.u32_val
;
550 if (lb_priv
->ex
->stats
.refresh_interval
== interval
)
552 lb_priv
->ex
->stats
.refresh_interval
= interval
;
554 schedule_delayed_work(&lb_priv
->ex
->stats
.refresh_dw
, 0);
556 cancel_delayed_work(&lb_priv
->ex
->stats
.refresh_dw
);
560 static const struct team_option lb_options
[] = {
562 .name
= "bpf_hash_func",
563 .type
= TEAM_OPTION_TYPE_BINARY
,
564 .getter
= lb_bpf_func_get
,
565 .setter
= lb_bpf_func_set
,
568 .name
= "lb_tx_method",
569 .type
= TEAM_OPTION_TYPE_STRING
,
570 .getter
= lb_tx_method_get
,
571 .setter
= lb_tx_method_set
,
574 .name
= "lb_tx_hash_to_port_mapping",
575 .array_size
= LB_TX_HASHTABLE_SIZE
,
576 .type
= TEAM_OPTION_TYPE_U32
,
577 .init
= lb_tx_hash_to_port_mapping_init
,
578 .getter
= lb_tx_hash_to_port_mapping_get
,
579 .setter
= lb_tx_hash_to_port_mapping_set
,
582 .name
= "lb_hash_stats",
583 .array_size
= LB_TX_HASHTABLE_SIZE
,
584 .type
= TEAM_OPTION_TYPE_BINARY
,
585 .init
= lb_hash_stats_init
,
586 .getter
= lb_hash_stats_get
,
589 .name
= "lb_port_stats",
591 .type
= TEAM_OPTION_TYPE_BINARY
,
592 .init
= lb_port_stats_init
,
593 .getter
= lb_port_stats_get
,
596 .name
= "lb_stats_refresh_interval",
597 .type
= TEAM_OPTION_TYPE_U32
,
598 .getter
= lb_stats_refresh_interval_get
,
599 .setter
= lb_stats_refresh_interval_set
,
603 static int lb_init(struct team
*team
)
605 struct lb_priv
*lb_priv
= get_lb_priv(team
);
606 lb_select_tx_port_func_t
*func
;
609 /* set default tx port selector */
610 func
= lb_select_tx_port_get_func("hash");
612 rcu_assign_pointer(lb_priv
->select_tx_port_func
, func
);
614 lb_priv
->ex
= kzalloc(sizeof(*lb_priv
->ex
), GFP_KERNEL
);
617 lb_priv
->ex
->team
= team
;
619 lb_priv
->pcpu_stats
= alloc_percpu(struct lb_pcpu_stats
);
620 if (!lb_priv
->pcpu_stats
) {
622 goto err_alloc_pcpu_stats
;
625 for_each_possible_cpu(i
) {
626 struct lb_pcpu_stats
*team_lb_stats
;
627 team_lb_stats
= per_cpu_ptr(lb_priv
->pcpu_stats
, i
);
628 u64_stats_init(&team_lb_stats
->syncp
);
632 INIT_DELAYED_WORK(&lb_priv
->ex
->stats
.refresh_dw
, lb_stats_refresh
);
634 err
= team_options_register(team
, lb_options
, ARRAY_SIZE(lb_options
));
636 goto err_options_register
;
639 err_options_register
:
640 free_percpu(lb_priv
->pcpu_stats
);
641 err_alloc_pcpu_stats
:
646 static void lb_exit(struct team
*team
)
648 struct lb_priv
*lb_priv
= get_lb_priv(team
);
650 team_options_unregister(team
, lb_options
,
651 ARRAY_SIZE(lb_options
));
652 lb_bpf_func_free(team
);
653 cancel_delayed_work_sync(&lb_priv
->ex
->stats
.refresh_dw
);
654 free_percpu(lb_priv
->pcpu_stats
);
658 static int lb_port_enter(struct team
*team
, struct team_port
*port
)
660 struct lb_port_priv
*lb_port_priv
= get_lb_port_priv(port
);
662 lb_port_priv
->pcpu_stats
= alloc_percpu(struct lb_stats
);
663 if (!lb_port_priv
->pcpu_stats
)
668 static void lb_port_leave(struct team
*team
, struct team_port
*port
)
670 struct lb_port_priv
*lb_port_priv
= get_lb_port_priv(port
);
672 free_percpu(lb_port_priv
->pcpu_stats
);
675 static void lb_port_disabled(struct team
*team
, struct team_port
*port
)
677 lb_tx_hash_to_port_mapping_null_port(team
, port
);
680 static const struct team_mode_ops lb_mode_ops
= {
683 .port_enter
= lb_port_enter
,
684 .port_leave
= lb_port_leave
,
685 .port_disabled
= lb_port_disabled
,
686 .receive
= lb_receive
,
687 .transmit
= lb_transmit
,
690 static const struct team_mode lb_mode
= {
691 .kind
= "loadbalance",
692 .owner
= THIS_MODULE
,
693 .priv_size
= sizeof(struct lb_priv
),
694 .port_priv_size
= sizeof(struct lb_port_priv
),
696 .lag_tx_type
= NETDEV_LAG_TX_TYPE_HASH
,
699 static int __init
lb_init_module(void)
701 return team_mode_register(&lb_mode
);
704 static void __exit
lb_cleanup_module(void)
706 team_mode_unregister(&lb_mode
);
709 module_init(lb_init_module
);
710 module_exit(lb_cleanup_module
);
712 MODULE_LICENSE("GPL v2");
713 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
714 MODULE_DESCRIPTION("Load-balancing mode for team");
715 MODULE_ALIAS_TEAM_MODE("loadbalance");