2 * drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team
3 * Copyright (c) 2012 Jiri Pirko <jpirko@redhat.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/types.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/errno.h>
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/filter.h>
19 #include <linux/if_team.h>
21 static rx_handler_result_t
lb_receive(struct team
*team
, struct team_port
*port
,
24 if (unlikely(skb
->protocol
== htons(ETH_P_SLOW
))) {
25 /* LACPDU packets should go to exact delivery */
26 const unsigned char *dest
= eth_hdr(skb
)->h_dest
;
28 if (is_link_local_ether_addr(dest
) && dest
[5] == 0x02)
29 return RX_HANDLER_EXACT
;
31 return RX_HANDLER_ANOTHER
;
36 typedef struct team_port
*lb_select_tx_port_func_t(struct team
*,
41 #define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */
47 struct lb_pcpu_stats
{
48 struct lb_stats hash_stats
[LB_TX_HASHTABLE_SIZE
];
49 struct u64_stats_sync syncp
;
52 struct lb_stats_info
{
53 struct lb_stats stats
;
54 struct lb_stats last_stats
;
55 struct team_option_inst_info
*opt_inst_info
;
58 struct lb_port_mapping
{
59 struct team_port __rcu
*port
;
60 struct team_option_inst_info
*opt_inst_info
;
65 struct lb_port_mapping tx_hash_to_port_mapping
[LB_TX_HASHTABLE_SIZE
];
66 struct sock_fprog_kern
*orig_fprog
;
68 unsigned int refresh_interval
; /* in tenths of second */
69 struct delayed_work refresh_dw
;
70 struct lb_stats_info info
[LB_TX_HASHTABLE_SIZE
];
75 struct bpf_prog __rcu
*fp
;
76 lb_select_tx_port_func_t __rcu
*select_tx_port_func
;
77 struct lb_pcpu_stats __percpu
*pcpu_stats
;
78 struct lb_priv_ex
*ex
; /* priv extension */
81 static struct lb_priv
*get_lb_priv(struct team
*team
)
83 return (struct lb_priv
*) &team
->mode_priv
;
87 struct lb_stats __percpu
*pcpu_stats
;
88 struct lb_stats_info stats_info
;
91 static struct lb_port_priv
*get_lb_port_priv(struct team_port
*port
)
93 return (struct lb_port_priv
*) &port
->mode_priv
;
96 #define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \
97 (lb_priv)->ex->tx_hash_to_port_mapping[hash].port
99 #define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \
100 (lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info
102 static void lb_tx_hash_to_port_mapping_null_port(struct team
*team
,
103 struct team_port
*port
)
105 struct lb_priv
*lb_priv
= get_lb_priv(team
);
106 bool changed
= false;
109 for (i
= 0; i
< LB_TX_HASHTABLE_SIZE
; i
++) {
110 struct lb_port_mapping
*pm
;
112 pm
= &lb_priv
->ex
->tx_hash_to_port_mapping
[i
];
113 if (rcu_access_pointer(pm
->port
) == port
) {
114 RCU_INIT_POINTER(pm
->port
, NULL
);
115 team_option_inst_set_change(pm
->opt_inst_info
);
120 team_options_change_check(team
);
123 /* Basic tx selection based solely by hash */
124 static struct team_port
*lb_hash_select_tx_port(struct team
*team
,
125 struct lb_priv
*lb_priv
,
129 int port_index
= team_num_to_port_index(team
, hash
);
131 return team_get_port_by_index_rcu(team
, port_index
);
134 /* Hash to port mapping select tx port */
135 static struct team_port
*lb_htpm_select_tx_port(struct team
*team
,
136 struct lb_priv
*lb_priv
,
140 return rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv
, hash
));
143 struct lb_select_tx_port
{
145 lb_select_tx_port_func_t
*func
;
148 static const struct lb_select_tx_port lb_select_tx_port_list
[] = {
151 .func
= lb_hash_select_tx_port
,
154 .name
= "hash_to_port_mapping",
155 .func
= lb_htpm_select_tx_port
,
158 #define LB_SELECT_TX_PORT_LIST_COUNT ARRAY_SIZE(lb_select_tx_port_list)
160 static char *lb_select_tx_port_get_name(lb_select_tx_port_func_t
*func
)
164 for (i
= 0; i
< LB_SELECT_TX_PORT_LIST_COUNT
; i
++) {
165 const struct lb_select_tx_port
*item
;
167 item
= &lb_select_tx_port_list
[i
];
168 if (item
->func
== func
)
174 static lb_select_tx_port_func_t
*lb_select_tx_port_get_func(const char *name
)
178 for (i
= 0; i
< LB_SELECT_TX_PORT_LIST_COUNT
; i
++) {
179 const struct lb_select_tx_port
*item
;
181 item
= &lb_select_tx_port_list
[i
];
182 if (!strcmp(item
->name
, name
))
188 static unsigned int lb_get_skb_hash(struct lb_priv
*lb_priv
,
195 fp
= rcu_dereference_bh(lb_priv
->fp
);
198 lhash
= BPF_PROG_RUN(fp
, skb
);
200 return c
[0] ^ c
[1] ^ c
[2] ^ c
[3];
203 static void lb_update_tx_stats(unsigned int tx_bytes
, struct lb_priv
*lb_priv
,
204 struct lb_port_priv
*lb_port_priv
,
207 struct lb_pcpu_stats
*pcpu_stats
;
208 struct lb_stats
*port_stats
;
209 struct lb_stats
*hash_stats
;
211 pcpu_stats
= this_cpu_ptr(lb_priv
->pcpu_stats
);
212 port_stats
= this_cpu_ptr(lb_port_priv
->pcpu_stats
);
213 hash_stats
= &pcpu_stats
->hash_stats
[hash
];
214 u64_stats_update_begin(&pcpu_stats
->syncp
);
215 port_stats
->tx_bytes
+= tx_bytes
;
216 hash_stats
->tx_bytes
+= tx_bytes
;
217 u64_stats_update_end(&pcpu_stats
->syncp
);
220 static bool lb_transmit(struct team
*team
, struct sk_buff
*skb
)
222 struct lb_priv
*lb_priv
= get_lb_priv(team
);
223 lb_select_tx_port_func_t
*select_tx_port_func
;
224 struct team_port
*port
;
226 unsigned int tx_bytes
= skb
->len
;
228 hash
= lb_get_skb_hash(lb_priv
, skb
);
229 select_tx_port_func
= rcu_dereference_bh(lb_priv
->select_tx_port_func
);
230 port
= select_tx_port_func(team
, lb_priv
, skb
, hash
);
233 if (team_dev_queue_xmit(team
, port
, skb
))
235 lb_update_tx_stats(tx_bytes
, lb_priv
, get_lb_port_priv(port
), hash
);
239 dev_kfree_skb_any(skb
);
243 static int lb_bpf_func_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
245 struct lb_priv
*lb_priv
= get_lb_priv(team
);
247 if (!lb_priv
->ex
->orig_fprog
) {
248 ctx
->data
.bin_val
.len
= 0;
249 ctx
->data
.bin_val
.ptr
= NULL
;
252 ctx
->data
.bin_val
.len
= lb_priv
->ex
->orig_fprog
->len
*
253 sizeof(struct sock_filter
);
254 ctx
->data
.bin_val
.ptr
= lb_priv
->ex
->orig_fprog
->filter
;
258 static int __fprog_create(struct sock_fprog_kern
**pfprog
, u32 data_len
,
261 struct sock_fprog_kern
*fprog
;
262 struct sock_filter
*filter
= (struct sock_filter
*) data
;
264 if (data_len
% sizeof(struct sock_filter
))
266 fprog
= kmalloc(sizeof(*fprog
), GFP_KERNEL
);
269 fprog
->filter
= kmemdup(filter
, data_len
, GFP_KERNEL
);
270 if (!fprog
->filter
) {
274 fprog
->len
= data_len
/ sizeof(struct sock_filter
);
279 static void __fprog_destroy(struct sock_fprog_kern
*fprog
)
281 kfree(fprog
->filter
);
285 static int lb_bpf_func_set(struct team
*team
, struct team_gsetter_ctx
*ctx
)
287 struct lb_priv
*lb_priv
= get_lb_priv(team
);
288 struct bpf_prog
*fp
= NULL
;
289 struct bpf_prog
*orig_fp
= NULL
;
290 struct sock_fprog_kern
*fprog
= NULL
;
293 if (ctx
->data
.bin_val
.len
) {
294 err
= __fprog_create(&fprog
, ctx
->data
.bin_val
.len
,
295 ctx
->data
.bin_val
.ptr
);
298 err
= bpf_prog_create(&fp
, fprog
);
300 __fprog_destroy(fprog
);
305 if (lb_priv
->ex
->orig_fprog
) {
306 /* Clear old filter data */
307 __fprog_destroy(lb_priv
->ex
->orig_fprog
);
308 orig_fp
= rcu_dereference_protected(lb_priv
->fp
,
309 lockdep_is_held(&team
->lock
));
312 rcu_assign_pointer(lb_priv
->fp
, fp
);
313 lb_priv
->ex
->orig_fprog
= fprog
;
317 bpf_prog_destroy(orig_fp
);
322 static int lb_tx_method_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
324 struct lb_priv
*lb_priv
= get_lb_priv(team
);
325 lb_select_tx_port_func_t
*func
;
328 func
= rcu_dereference_protected(lb_priv
->select_tx_port_func
,
329 lockdep_is_held(&team
->lock
));
330 name
= lb_select_tx_port_get_name(func
);
332 ctx
->data
.str_val
= name
;
336 static int lb_tx_method_set(struct team
*team
, struct team_gsetter_ctx
*ctx
)
338 struct lb_priv
*lb_priv
= get_lb_priv(team
);
339 lb_select_tx_port_func_t
*func
;
341 func
= lb_select_tx_port_get_func(ctx
->data
.str_val
);
344 rcu_assign_pointer(lb_priv
->select_tx_port_func
, func
);
348 static int lb_tx_hash_to_port_mapping_init(struct team
*team
,
349 struct team_option_inst_info
*info
)
351 struct lb_priv
*lb_priv
= get_lb_priv(team
);
352 unsigned char hash
= info
->array_index
;
354 LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv
, hash
) = info
;
358 static int lb_tx_hash_to_port_mapping_get(struct team
*team
,
359 struct team_gsetter_ctx
*ctx
)
361 struct lb_priv
*lb_priv
= get_lb_priv(team
);
362 struct team_port
*port
;
363 unsigned char hash
= ctx
->info
->array_index
;
365 port
= LB_HTPM_PORT_BY_HASH(lb_priv
, hash
);
366 ctx
->data
.u32_val
= port
? port
->dev
->ifindex
: 0;
370 static int lb_tx_hash_to_port_mapping_set(struct team
*team
,
371 struct team_gsetter_ctx
*ctx
)
373 struct lb_priv
*lb_priv
= get_lb_priv(team
);
374 struct team_port
*port
;
375 unsigned char hash
= ctx
->info
->array_index
;
377 list_for_each_entry(port
, &team
->port_list
, list
) {
378 if (ctx
->data
.u32_val
== port
->dev
->ifindex
&&
379 team_port_enabled(port
)) {
380 rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv
, hash
),
388 static int lb_hash_stats_init(struct team
*team
,
389 struct team_option_inst_info
*info
)
391 struct lb_priv
*lb_priv
= get_lb_priv(team
);
392 unsigned char hash
= info
->array_index
;
394 lb_priv
->ex
->stats
.info
[hash
].opt_inst_info
= info
;
398 static int lb_hash_stats_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
400 struct lb_priv
*lb_priv
= get_lb_priv(team
);
401 unsigned char hash
= ctx
->info
->array_index
;
403 ctx
->data
.bin_val
.ptr
= &lb_priv
->ex
->stats
.info
[hash
].stats
;
404 ctx
->data
.bin_val
.len
= sizeof(struct lb_stats
);
408 static int lb_port_stats_init(struct team
*team
,
409 struct team_option_inst_info
*info
)
411 struct team_port
*port
= info
->port
;
412 struct lb_port_priv
*lb_port_priv
= get_lb_port_priv(port
);
414 lb_port_priv
->stats_info
.opt_inst_info
= info
;
418 static int lb_port_stats_get(struct team
*team
, struct team_gsetter_ctx
*ctx
)
420 struct team_port
*port
= ctx
->info
->port
;
421 struct lb_port_priv
*lb_port_priv
= get_lb_port_priv(port
);
423 ctx
->data
.bin_val
.ptr
= &lb_port_priv
->stats_info
.stats
;
424 ctx
->data
.bin_val
.len
= sizeof(struct lb_stats
);
428 static void __lb_stats_info_refresh_prepare(struct lb_stats_info
*s_info
)
430 memcpy(&s_info
->last_stats
, &s_info
->stats
, sizeof(struct lb_stats
));
431 memset(&s_info
->stats
, 0, sizeof(struct lb_stats
));
434 static bool __lb_stats_info_refresh_check(struct lb_stats_info
*s_info
,
437 if (memcmp(&s_info
->last_stats
, &s_info
->stats
,
438 sizeof(struct lb_stats
))) {
439 team_option_inst_set_change(s_info
->opt_inst_info
);
445 static void __lb_one_cpu_stats_add(struct lb_stats
*acc_stats
,
446 struct lb_stats
*cpu_stats
,
447 struct u64_stats_sync
*syncp
)
453 start
= u64_stats_fetch_begin_irq(syncp
);
454 tmp
.tx_bytes
= cpu_stats
->tx_bytes
;
455 } while (u64_stats_fetch_retry_irq(syncp
, start
));
456 acc_stats
->tx_bytes
+= tmp
.tx_bytes
;
459 static void lb_stats_refresh(struct work_struct
*work
)
462 struct lb_priv
*lb_priv
;
463 struct lb_priv_ex
*lb_priv_ex
;
464 struct lb_pcpu_stats
*pcpu_stats
;
465 struct lb_stats
*stats
;
466 struct lb_stats_info
*s_info
;
467 struct team_port
*port
;
468 bool changed
= false;
472 lb_priv_ex
= container_of(work
, struct lb_priv_ex
,
473 stats
.refresh_dw
.work
);
475 team
= lb_priv_ex
->team
;
476 lb_priv
= get_lb_priv(team
);
478 if (!mutex_trylock(&team
->lock
)) {
479 schedule_delayed_work(&lb_priv_ex
->stats
.refresh_dw
, 0);
483 for (j
= 0; j
< LB_TX_HASHTABLE_SIZE
; j
++) {
484 s_info
= &lb_priv
->ex
->stats
.info
[j
];
485 __lb_stats_info_refresh_prepare(s_info
);
486 for_each_possible_cpu(i
) {
487 pcpu_stats
= per_cpu_ptr(lb_priv
->pcpu_stats
, i
);
488 stats
= &pcpu_stats
->hash_stats
[j
];
489 __lb_one_cpu_stats_add(&s_info
->stats
, stats
,
492 changed
|= __lb_stats_info_refresh_check(s_info
, team
);
495 list_for_each_entry(port
, &team
->port_list
, list
) {
496 struct lb_port_priv
*lb_port_priv
= get_lb_port_priv(port
);
498 s_info
= &lb_port_priv
->stats_info
;
499 __lb_stats_info_refresh_prepare(s_info
);
500 for_each_possible_cpu(i
) {
501 pcpu_stats
= per_cpu_ptr(lb_priv
->pcpu_stats
, i
);
502 stats
= per_cpu_ptr(lb_port_priv
->pcpu_stats
, i
);
503 __lb_one_cpu_stats_add(&s_info
->stats
, stats
,
506 changed
|= __lb_stats_info_refresh_check(s_info
, team
);
510 team_options_change_check(team
);
512 schedule_delayed_work(&lb_priv_ex
->stats
.refresh_dw
,
513 (lb_priv_ex
->stats
.refresh_interval
* HZ
) / 10);
515 mutex_unlock(&team
->lock
);
518 static int lb_stats_refresh_interval_get(struct team
*team
,
519 struct team_gsetter_ctx
*ctx
)
521 struct lb_priv
*lb_priv
= get_lb_priv(team
);
523 ctx
->data
.u32_val
= lb_priv
->ex
->stats
.refresh_interval
;
527 static int lb_stats_refresh_interval_set(struct team
*team
,
528 struct team_gsetter_ctx
*ctx
)
530 struct lb_priv
*lb_priv
= get_lb_priv(team
);
531 unsigned int interval
;
533 interval
= ctx
->data
.u32_val
;
534 if (lb_priv
->ex
->stats
.refresh_interval
== interval
)
536 lb_priv
->ex
->stats
.refresh_interval
= interval
;
538 schedule_delayed_work(&lb_priv
->ex
->stats
.refresh_dw
, 0);
540 cancel_delayed_work(&lb_priv
->ex
->stats
.refresh_dw
);
544 static const struct team_option lb_options
[] = {
546 .name
= "bpf_hash_func",
547 .type
= TEAM_OPTION_TYPE_BINARY
,
548 .getter
= lb_bpf_func_get
,
549 .setter
= lb_bpf_func_set
,
552 .name
= "lb_tx_method",
553 .type
= TEAM_OPTION_TYPE_STRING
,
554 .getter
= lb_tx_method_get
,
555 .setter
= lb_tx_method_set
,
558 .name
= "lb_tx_hash_to_port_mapping",
559 .array_size
= LB_TX_HASHTABLE_SIZE
,
560 .type
= TEAM_OPTION_TYPE_U32
,
561 .init
= lb_tx_hash_to_port_mapping_init
,
562 .getter
= lb_tx_hash_to_port_mapping_get
,
563 .setter
= lb_tx_hash_to_port_mapping_set
,
566 .name
= "lb_hash_stats",
567 .array_size
= LB_TX_HASHTABLE_SIZE
,
568 .type
= TEAM_OPTION_TYPE_BINARY
,
569 .init
= lb_hash_stats_init
,
570 .getter
= lb_hash_stats_get
,
573 .name
= "lb_port_stats",
575 .type
= TEAM_OPTION_TYPE_BINARY
,
576 .init
= lb_port_stats_init
,
577 .getter
= lb_port_stats_get
,
580 .name
= "lb_stats_refresh_interval",
581 .type
= TEAM_OPTION_TYPE_U32
,
582 .getter
= lb_stats_refresh_interval_get
,
583 .setter
= lb_stats_refresh_interval_set
,
587 static int lb_init(struct team
*team
)
589 struct lb_priv
*lb_priv
= get_lb_priv(team
);
590 lb_select_tx_port_func_t
*func
;
593 /* set default tx port selector */
594 func
= lb_select_tx_port_get_func("hash");
596 rcu_assign_pointer(lb_priv
->select_tx_port_func
, func
);
598 lb_priv
->ex
= kzalloc(sizeof(*lb_priv
->ex
), GFP_KERNEL
);
601 lb_priv
->ex
->team
= team
;
603 lb_priv
->pcpu_stats
= alloc_percpu(struct lb_pcpu_stats
);
604 if (!lb_priv
->pcpu_stats
) {
606 goto err_alloc_pcpu_stats
;
609 for_each_possible_cpu(i
) {
610 struct lb_pcpu_stats
*team_lb_stats
;
611 team_lb_stats
= per_cpu_ptr(lb_priv
->pcpu_stats
, i
);
612 u64_stats_init(&team_lb_stats
->syncp
);
616 INIT_DELAYED_WORK(&lb_priv
->ex
->stats
.refresh_dw
, lb_stats_refresh
);
618 err
= team_options_register(team
, lb_options
, ARRAY_SIZE(lb_options
));
620 goto err_options_register
;
623 err_options_register
:
624 free_percpu(lb_priv
->pcpu_stats
);
625 err_alloc_pcpu_stats
:
630 static void lb_exit(struct team
*team
)
632 struct lb_priv
*lb_priv
= get_lb_priv(team
);
634 team_options_unregister(team
, lb_options
,
635 ARRAY_SIZE(lb_options
));
636 cancel_delayed_work_sync(&lb_priv
->ex
->stats
.refresh_dw
);
637 free_percpu(lb_priv
->pcpu_stats
);
641 static int lb_port_enter(struct team
*team
, struct team_port
*port
)
643 struct lb_port_priv
*lb_port_priv
= get_lb_port_priv(port
);
645 lb_port_priv
->pcpu_stats
= alloc_percpu(struct lb_stats
);
646 if (!lb_port_priv
->pcpu_stats
)
651 static void lb_port_leave(struct team
*team
, struct team_port
*port
)
653 struct lb_port_priv
*lb_port_priv
= get_lb_port_priv(port
);
655 free_percpu(lb_port_priv
->pcpu_stats
);
658 static void lb_port_disabled(struct team
*team
, struct team_port
*port
)
660 lb_tx_hash_to_port_mapping_null_port(team
, port
);
663 static const struct team_mode_ops lb_mode_ops
= {
666 .port_enter
= lb_port_enter
,
667 .port_leave
= lb_port_leave
,
668 .port_disabled
= lb_port_disabled
,
669 .receive
= lb_receive
,
670 .transmit
= lb_transmit
,
673 static const struct team_mode lb_mode
= {
674 .kind
= "loadbalance",
675 .owner
= THIS_MODULE
,
676 .priv_size
= sizeof(struct lb_priv
),
677 .port_priv_size
= sizeof(struct lb_port_priv
),
679 .lag_tx_type
= NETDEV_LAG_TX_TYPE_HASH
,
682 static int __init
lb_init_module(void)
684 return team_mode_register(&lb_mode
);
687 static void __exit
lb_cleanup_module(void)
689 team_mode_unregister(&lb_mode
);
692 module_init(lb_init_module
);
693 module_exit(lb_cleanup_module
);
695 MODULE_LICENSE("GPL v2");
696 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
697 MODULE_DESCRIPTION("Load-balancing mode for team");
698 MODULE_ALIAS_TEAM_MODE("loadbalance");