2 * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, see <http://www.gnu.org/licenses/>.
17 * The full GNU General Public License is included in this distribution in the
18 * file called LICENSE.
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/skbuff.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/pkt_sched.h>
28 #include <linux/spinlock.h>
29 #include <linux/slab.h>
30 #include <linux/timer.h>
32 #include <linux/ipv6.h>
33 #include <linux/if_arp.h>
34 #include <linux/if_ether.h>
35 #include <linux/if_bonding.h>
36 #include <linux/if_vlan.h>
41 #include <asm/byteorder.h>
47 #ifndef __long_aligned
48 #define __long_aligned __attribute__((aligned((sizeof(long)))))
50 static const u8 mac_bcast
[ETH_ALEN
] __long_aligned
= {
51 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
53 static const u8 mac_v6_allmcast
[ETH_ALEN
] __long_aligned
= {
54 0x33, 0x33, 0x00, 0x00, 0x00, 0x01
56 static const int alb_delta_in_ticks
= HZ
/ ALB_TIMER_TICKS_PER_SEC
;
63 u8 padding
[ETH_ZLEN
- ETH_HLEN
];
68 __be16 prot_addr_space
;
72 u8 mac_src
[ETH_ALEN
]; /* sender hardware address */
73 __be32 ip_src
; /* sender IP address */
74 u8 mac_dst
[ETH_ALEN
]; /* target hardware address */
75 __be32 ip_dst
; /* target IP address */
79 static inline struct arp_pkt
*arp_pkt(const struct sk_buff
*skb
)
81 return (struct arp_pkt
*)skb_network_header(skb
);
84 /* Forward declaration */
85 static void alb_send_learning_packets(struct slave
*slave
, u8 mac_addr
[]);
86 static void rlb_purge_src_ip(struct bonding
*bond
, struct arp_pkt
*arp
);
87 static void rlb_src_unlink(struct bonding
*bond
, u32 index
);
88 static void rlb_src_link(struct bonding
*bond
, u32 ip_src_hash
,
91 static inline u8
_simple_hash(const u8
*hash_start
, int hash_size
)
96 for (i
= 0; i
< hash_size
; i
++) {
97 hash
^= hash_start
[i
];
103 /*********************** tlb specific functions ***************************/
105 static inline void _lock_tx_hashtbl_bh(struct bonding
*bond
)
107 spin_lock_bh(&(BOND_ALB_INFO(bond
).tx_hashtbl_lock
));
110 static inline void _unlock_tx_hashtbl_bh(struct bonding
*bond
)
112 spin_unlock_bh(&(BOND_ALB_INFO(bond
).tx_hashtbl_lock
));
115 static inline void _lock_tx_hashtbl(struct bonding
*bond
)
117 spin_lock(&(BOND_ALB_INFO(bond
).tx_hashtbl_lock
));
120 static inline void _unlock_tx_hashtbl(struct bonding
*bond
)
122 spin_unlock(&(BOND_ALB_INFO(bond
).tx_hashtbl_lock
));
125 /* Caller must hold tx_hashtbl lock */
126 static inline void tlb_init_table_entry(struct tlb_client_info
*entry
, int save_load
)
129 entry
->load_history
= 1 + entry
->tx_bytes
/
130 BOND_TLB_REBALANCE_INTERVAL
;
134 entry
->tx_slave
= NULL
;
135 entry
->next
= TLB_NULL_INDEX
;
136 entry
->prev
= TLB_NULL_INDEX
;
139 static inline void tlb_init_slave(struct slave
*slave
)
141 SLAVE_TLB_INFO(slave
).load
= 0;
142 SLAVE_TLB_INFO(slave
).head
= TLB_NULL_INDEX
;
145 /* Caller must hold bond lock for read, BH disabled */
146 static void __tlb_clear_slave(struct bonding
*bond
, struct slave
*slave
,
149 struct tlb_client_info
*tx_hash_table
;
152 /* clear slave from tx_hashtbl */
153 tx_hash_table
= BOND_ALB_INFO(bond
).tx_hashtbl
;
155 /* skip this if we've already freed the tx hash table */
157 index
= SLAVE_TLB_INFO(slave
).head
;
158 while (index
!= TLB_NULL_INDEX
) {
159 u32 next_index
= tx_hash_table
[index
].next
;
160 tlb_init_table_entry(&tx_hash_table
[index
], save_load
);
165 tlb_init_slave(slave
);
168 /* Caller must hold bond lock for read */
169 static void tlb_clear_slave(struct bonding
*bond
, struct slave
*slave
,
172 _lock_tx_hashtbl_bh(bond
);
173 __tlb_clear_slave(bond
, slave
, save_load
);
174 _unlock_tx_hashtbl_bh(bond
);
177 /* Must be called before starting the monitor timer */
178 static int tlb_initialize(struct bonding
*bond
)
180 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
181 int size
= TLB_HASH_TABLE_SIZE
* sizeof(struct tlb_client_info
);
182 struct tlb_client_info
*new_hashtbl
;
185 new_hashtbl
= kzalloc(size
, GFP_KERNEL
);
189 _lock_tx_hashtbl_bh(bond
);
191 bond_info
->tx_hashtbl
= new_hashtbl
;
193 for (i
= 0; i
< TLB_HASH_TABLE_SIZE
; i
++) {
194 tlb_init_table_entry(&bond_info
->tx_hashtbl
[i
], 0);
197 _unlock_tx_hashtbl_bh(bond
);
202 /* Must be called only after all slaves have been released */
203 static void tlb_deinitialize(struct bonding
*bond
)
205 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
207 _lock_tx_hashtbl_bh(bond
);
209 kfree(bond_info
->tx_hashtbl
);
210 bond_info
->tx_hashtbl
= NULL
;
212 _unlock_tx_hashtbl_bh(bond
);
215 static long long compute_gap(struct slave
*slave
)
217 return (s64
) (slave
->speed
<< 20) - /* Convert to Megabit per sec */
218 (s64
) (SLAVE_TLB_INFO(slave
).load
<< 3); /* Bytes to bits */
221 /* Caller must hold bond lock for read */
222 static struct slave
*tlb_get_least_loaded_slave(struct bonding
*bond
)
224 struct slave
*slave
, *least_loaded
;
225 struct list_head
*iter
;
231 /* Find the slave with the largest gap */
232 bond_for_each_slave_rcu(bond
, slave
, iter
) {
233 if (SLAVE_IS_OK(slave
)) {
234 long long gap
= compute_gap(slave
);
237 least_loaded
= slave
;
246 static struct slave
*__tlb_choose_channel(struct bonding
*bond
, u32 hash_index
,
249 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
250 struct tlb_client_info
*hash_table
;
251 struct slave
*assigned_slave
;
253 hash_table
= bond_info
->tx_hashtbl
;
254 assigned_slave
= hash_table
[hash_index
].tx_slave
;
255 if (!assigned_slave
) {
256 assigned_slave
= tlb_get_least_loaded_slave(bond
);
258 if (assigned_slave
) {
259 struct tlb_slave_info
*slave_info
=
260 &(SLAVE_TLB_INFO(assigned_slave
));
261 u32 next_index
= slave_info
->head
;
263 hash_table
[hash_index
].tx_slave
= assigned_slave
;
264 hash_table
[hash_index
].next
= next_index
;
265 hash_table
[hash_index
].prev
= TLB_NULL_INDEX
;
267 if (next_index
!= TLB_NULL_INDEX
) {
268 hash_table
[next_index
].prev
= hash_index
;
271 slave_info
->head
= hash_index
;
273 hash_table
[hash_index
].load_history
;
277 if (assigned_slave
) {
278 hash_table
[hash_index
].tx_bytes
+= skb_len
;
281 return assigned_slave
;
284 /* Caller must hold bond lock for read */
285 static struct slave
*tlb_choose_channel(struct bonding
*bond
, u32 hash_index
,
288 struct slave
*tx_slave
;
290 * We don't need to disable softirq here, becase
291 * tlb_choose_channel() is only called by bond_alb_xmit()
292 * which already has softirq disabled.
294 _lock_tx_hashtbl(bond
);
295 tx_slave
= __tlb_choose_channel(bond
, hash_index
, skb_len
);
296 _unlock_tx_hashtbl(bond
);
300 /*********************** rlb specific functions ***************************/
301 static inline void _lock_rx_hashtbl_bh(struct bonding
*bond
)
303 spin_lock_bh(&(BOND_ALB_INFO(bond
).rx_hashtbl_lock
));
306 static inline void _unlock_rx_hashtbl_bh(struct bonding
*bond
)
308 spin_unlock_bh(&(BOND_ALB_INFO(bond
).rx_hashtbl_lock
));
311 static inline void _lock_rx_hashtbl(struct bonding
*bond
)
313 spin_lock(&(BOND_ALB_INFO(bond
).rx_hashtbl_lock
));
316 static inline void _unlock_rx_hashtbl(struct bonding
*bond
)
318 spin_unlock(&(BOND_ALB_INFO(bond
).rx_hashtbl_lock
));
321 /* when an ARP REPLY is received from a client update its info
324 static void rlb_update_entry_from_arp(struct bonding
*bond
, struct arp_pkt
*arp
)
326 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
327 struct rlb_client_info
*client_info
;
330 _lock_rx_hashtbl_bh(bond
);
332 hash_index
= _simple_hash((u8
*)&(arp
->ip_src
), sizeof(arp
->ip_src
));
333 client_info
= &(bond_info
->rx_hashtbl
[hash_index
]);
335 if ((client_info
->assigned
) &&
336 (client_info
->ip_src
== arp
->ip_dst
) &&
337 (client_info
->ip_dst
== arp
->ip_src
) &&
338 (!ether_addr_equal_64bits(client_info
->mac_dst
, arp
->mac_src
))) {
339 /* update the clients MAC address */
340 memcpy(client_info
->mac_dst
, arp
->mac_src
, ETH_ALEN
);
341 client_info
->ntt
= 1;
342 bond_info
->rx_ntt
= 1;
345 _unlock_rx_hashtbl_bh(bond
);
348 static int rlb_arp_recv(const struct sk_buff
*skb
, struct bonding
*bond
,
351 struct arp_pkt
*arp
, _arp
;
353 if (skb
->protocol
!= cpu_to_be16(ETH_P_ARP
))
356 arp
= skb_header_pointer(skb
, 0, sizeof(_arp
), &_arp
);
360 /* We received an ARP from arp->ip_src.
361 * We might have used this IP address previously (on the bonding host
362 * itself or on a system that is bridged together with the bond).
363 * However, if arp->mac_src is different than what is stored in
364 * rx_hashtbl, some other host is now using the IP and we must prevent
365 * sending out client updates with this IP address and the old MAC
367 * Clean up all hash table entries that have this address as ip_src but
368 * have a different mac_src.
370 rlb_purge_src_ip(bond
, arp
);
372 if (arp
->op_code
== htons(ARPOP_REPLY
)) {
373 /* update rx hash table for this ARP */
374 rlb_update_entry_from_arp(bond
, arp
);
375 pr_debug("Server received an ARP Reply from client\n");
378 return RX_HANDLER_ANOTHER
;
381 /* Caller must hold bond lock for read */
382 static struct slave
*rlb_next_rx_slave(struct bonding
*bond
)
384 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
385 struct slave
*before
= NULL
, *rx_slave
= NULL
, *slave
;
386 struct list_head
*iter
;
389 bond_for_each_slave(bond
, slave
, iter
) {
390 if (!SLAVE_IS_OK(slave
))
393 if (!before
|| before
->speed
< slave
->speed
)
396 if (!rx_slave
|| rx_slave
->speed
< slave
->speed
)
399 if (slave
== bond_info
->rx_slave
)
402 /* we didn't find anything after the current or we have something
403 * better before and up to the current slave
405 if (!rx_slave
|| (before
&& rx_slave
->speed
< before
->speed
))
409 bond_info
->rx_slave
= rx_slave
;
414 /* Caller must hold rcu_read_lock() for read */
415 static struct slave
*__rlb_next_rx_slave(struct bonding
*bond
)
417 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
418 struct slave
*before
= NULL
, *rx_slave
= NULL
, *slave
;
419 struct list_head
*iter
;
422 bond_for_each_slave_rcu(bond
, slave
, iter
) {
423 if (!SLAVE_IS_OK(slave
))
426 if (!before
|| before
->speed
< slave
->speed
)
429 if (!rx_slave
|| rx_slave
->speed
< slave
->speed
)
432 if (slave
== bond_info
->rx_slave
)
435 /* we didn't find anything after the current or we have something
436 * better before and up to the current slave
438 if (!rx_slave
|| (before
&& rx_slave
->speed
< before
->speed
))
442 bond_info
->rx_slave
= rx_slave
;
447 /* teach the switch the mac of a disabled slave
448 * on the primary for fault tolerance
450 * Caller must hold bond->curr_slave_lock for write or bond lock for write
452 static void rlb_teach_disabled_mac_on_primary(struct bonding
*bond
, u8 addr
[])
454 if (!bond
->curr_active_slave
) {
458 if (!bond
->alb_info
.primary_is_promisc
) {
459 if (!dev_set_promiscuity(bond
->curr_active_slave
->dev
, 1))
460 bond
->alb_info
.primary_is_promisc
= 1;
462 bond
->alb_info
.primary_is_promisc
= 0;
465 bond
->alb_info
.rlb_promisc_timeout_counter
= 0;
467 alb_send_learning_packets(bond
->curr_active_slave
, addr
);
470 /* slave being removed should not be active at this point
472 * Caller must hold rtnl.
474 static void rlb_clear_slave(struct bonding
*bond
, struct slave
*slave
)
476 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
477 struct rlb_client_info
*rx_hash_table
;
478 u32 index
, next_index
;
480 /* clear slave from rx_hashtbl */
481 _lock_rx_hashtbl_bh(bond
);
483 rx_hash_table
= bond_info
->rx_hashtbl
;
484 index
= bond_info
->rx_hashtbl_used_head
;
485 for (; index
!= RLB_NULL_INDEX
; index
= next_index
) {
486 next_index
= rx_hash_table
[index
].used_next
;
487 if (rx_hash_table
[index
].slave
== slave
) {
488 struct slave
*assigned_slave
= rlb_next_rx_slave(bond
);
490 if (assigned_slave
) {
491 rx_hash_table
[index
].slave
= assigned_slave
;
492 if (!ether_addr_equal_64bits(rx_hash_table
[index
].mac_dst
,
494 bond_info
->rx_hashtbl
[index
].ntt
= 1;
495 bond_info
->rx_ntt
= 1;
496 /* A slave has been removed from the
497 * table because it is either disabled
498 * or being released. We must retry the
499 * update to avoid clients from not
500 * being updated & disconnecting when
503 bond_info
->rlb_update_retry_counter
=
506 } else { /* there is no active slave */
507 rx_hash_table
[index
].slave
= NULL
;
512 _unlock_rx_hashtbl_bh(bond
);
514 write_lock_bh(&bond
->curr_slave_lock
);
516 if (slave
!= bond
->curr_active_slave
) {
517 rlb_teach_disabled_mac_on_primary(bond
, slave
->dev
->dev_addr
);
520 write_unlock_bh(&bond
->curr_slave_lock
);
523 static void rlb_update_client(struct rlb_client_info
*client_info
)
527 if (!client_info
->slave
) {
531 for (i
= 0; i
< RLB_ARP_BURST_SIZE
; i
++) {
534 skb
= arp_create(ARPOP_REPLY
, ETH_P_ARP
,
536 client_info
->slave
->dev
,
538 client_info
->mac_dst
,
539 client_info
->slave
->dev
->dev_addr
,
540 client_info
->mac_dst
);
542 pr_err("%s: Error: failed to create an ARP packet\n",
543 client_info
->slave
->bond
->dev
->name
);
547 skb
->dev
= client_info
->slave
->dev
;
549 if (client_info
->vlan_id
) {
550 skb
= vlan_put_tag(skb
, htons(ETH_P_8021Q
), client_info
->vlan_id
);
552 pr_err("%s: Error: failed to insert VLAN tag\n",
553 client_info
->slave
->bond
->dev
->name
);
562 /* sends ARP REPLIES that update the clients that need updating */
563 static void rlb_update_rx_clients(struct bonding
*bond
)
565 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
566 struct rlb_client_info
*client_info
;
569 _lock_rx_hashtbl_bh(bond
);
571 hash_index
= bond_info
->rx_hashtbl_used_head
;
572 for (; hash_index
!= RLB_NULL_INDEX
;
573 hash_index
= client_info
->used_next
) {
574 client_info
= &(bond_info
->rx_hashtbl
[hash_index
]);
575 if (client_info
->ntt
) {
576 rlb_update_client(client_info
);
577 if (bond_info
->rlb_update_retry_counter
== 0) {
578 client_info
->ntt
= 0;
583 /* do not update the entries again until this counter is zero so that
584 * not to confuse the clients.
586 bond_info
->rlb_update_delay_counter
= RLB_UPDATE_DELAY
;
588 _unlock_rx_hashtbl_bh(bond
);
591 /* The slave was assigned a new mac address - update the clients */
592 static void rlb_req_update_slave_clients(struct bonding
*bond
, struct slave
*slave
)
594 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
595 struct rlb_client_info
*client_info
;
599 _lock_rx_hashtbl_bh(bond
);
601 hash_index
= bond_info
->rx_hashtbl_used_head
;
602 for (; hash_index
!= RLB_NULL_INDEX
;
603 hash_index
= client_info
->used_next
) {
604 client_info
= &(bond_info
->rx_hashtbl
[hash_index
]);
606 if ((client_info
->slave
== slave
) &&
607 !ether_addr_equal_64bits(client_info
->mac_dst
, mac_bcast
)) {
608 client_info
->ntt
= 1;
613 // update the team's flag only after the whole iteration
615 bond_info
->rx_ntt
= 1;
617 bond_info
->rlb_update_retry_counter
= RLB_UPDATE_RETRY
;
620 _unlock_rx_hashtbl_bh(bond
);
623 /* mark all clients using src_ip to be updated */
624 static void rlb_req_update_subnet_clients(struct bonding
*bond
, __be32 src_ip
)
626 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
627 struct rlb_client_info
*client_info
;
630 _lock_rx_hashtbl(bond
);
632 hash_index
= bond_info
->rx_hashtbl_used_head
;
633 for (; hash_index
!= RLB_NULL_INDEX
;
634 hash_index
= client_info
->used_next
) {
635 client_info
= &(bond_info
->rx_hashtbl
[hash_index
]);
637 if (!client_info
->slave
) {
638 pr_err("%s: Error: found a client with no channel in the client's hash table\n",
642 /*update all clients using this src_ip, that are not assigned
643 * to the team's address (curr_active_slave) and have a known
644 * unicast mac address.
646 if ((client_info
->ip_src
== src_ip
) &&
647 !ether_addr_equal_64bits(client_info
->slave
->dev
->dev_addr
,
648 bond
->dev
->dev_addr
) &&
649 !ether_addr_equal_64bits(client_info
->mac_dst
, mac_bcast
)) {
650 client_info
->ntt
= 1;
651 bond_info
->rx_ntt
= 1;
655 _unlock_rx_hashtbl(bond
);
658 /* Caller must hold both bond and ptr locks for read */
659 static struct slave
*rlb_choose_channel(struct sk_buff
*skb
, struct bonding
*bond
)
661 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
662 struct arp_pkt
*arp
= arp_pkt(skb
);
663 struct slave
*assigned_slave
, *curr_active_slave
;
664 struct rlb_client_info
*client_info
;
667 _lock_rx_hashtbl(bond
);
669 curr_active_slave
= rcu_dereference(bond
->curr_active_slave
);
671 hash_index
= _simple_hash((u8
*)&arp
->ip_dst
, sizeof(arp
->ip_dst
));
672 client_info
= &(bond_info
->rx_hashtbl
[hash_index
]);
674 if (client_info
->assigned
) {
675 if ((client_info
->ip_src
== arp
->ip_src
) &&
676 (client_info
->ip_dst
== arp
->ip_dst
)) {
677 /* the entry is already assigned to this client */
678 if (!ether_addr_equal_64bits(arp
->mac_dst
, mac_bcast
)) {
679 /* update mac address from arp */
680 memcpy(client_info
->mac_dst
, arp
->mac_dst
, ETH_ALEN
);
682 memcpy(client_info
->mac_src
, arp
->mac_src
, ETH_ALEN
);
684 assigned_slave
= client_info
->slave
;
685 if (assigned_slave
) {
686 _unlock_rx_hashtbl(bond
);
687 return assigned_slave
;
690 /* the entry is already assigned to some other client,
691 * move the old client to primary (curr_active_slave) so
692 * that the new client can be assigned to this entry.
694 if (bond
->curr_active_slave
&&
695 client_info
->slave
!= curr_active_slave
) {
696 client_info
->slave
= curr_active_slave
;
697 rlb_update_client(client_info
);
701 /* assign a new slave */
702 assigned_slave
= __rlb_next_rx_slave(bond
);
704 if (assigned_slave
) {
705 if (!(client_info
->assigned
&&
706 client_info
->ip_src
== arp
->ip_src
)) {
707 /* ip_src is going to be updated,
708 * fix the src hash list
710 u32 hash_src
= _simple_hash((u8
*)&arp
->ip_src
,
711 sizeof(arp
->ip_src
));
712 rlb_src_unlink(bond
, hash_index
);
713 rlb_src_link(bond
, hash_src
, hash_index
);
716 client_info
->ip_src
= arp
->ip_src
;
717 client_info
->ip_dst
= arp
->ip_dst
;
718 /* arp->mac_dst is broadcast for arp reqeusts.
719 * will be updated with clients actual unicast mac address
720 * upon receiving an arp reply.
722 memcpy(client_info
->mac_dst
, arp
->mac_dst
, ETH_ALEN
);
723 memcpy(client_info
->mac_src
, arp
->mac_src
, ETH_ALEN
);
724 client_info
->slave
= assigned_slave
;
726 if (!ether_addr_equal_64bits(client_info
->mac_dst
, mac_bcast
)) {
727 client_info
->ntt
= 1;
728 bond
->alb_info
.rx_ntt
= 1;
730 client_info
->ntt
= 0;
733 if (!vlan_get_tag(skb
, &client_info
->vlan_id
))
734 client_info
->vlan_id
= 0;
736 if (!client_info
->assigned
) {
737 u32 prev_tbl_head
= bond_info
->rx_hashtbl_used_head
;
738 bond_info
->rx_hashtbl_used_head
= hash_index
;
739 client_info
->used_next
= prev_tbl_head
;
740 if (prev_tbl_head
!= RLB_NULL_INDEX
) {
741 bond_info
->rx_hashtbl
[prev_tbl_head
].used_prev
=
744 client_info
->assigned
= 1;
748 _unlock_rx_hashtbl(bond
);
750 return assigned_slave
;
753 /* chooses (and returns) transmit channel for arp reply
754 * does not choose channel for other arp types since they are
755 * sent on the curr_active_slave
757 static struct slave
*rlb_arp_xmit(struct sk_buff
*skb
, struct bonding
*bond
)
759 struct arp_pkt
*arp
= arp_pkt(skb
);
760 struct slave
*tx_slave
= NULL
;
762 /* Don't modify or load balance ARPs that do not originate locally
763 * (e.g.,arrive via a bridge).
765 if (!bond_slave_has_mac_rcu(bond
, arp
->mac_src
))
768 if (arp
->op_code
== htons(ARPOP_REPLY
)) {
769 /* the arp must be sent on the selected
772 tx_slave
= rlb_choose_channel(skb
, bond
);
774 memcpy(arp
->mac_src
,tx_slave
->dev
->dev_addr
, ETH_ALEN
);
776 pr_debug("Server sent ARP Reply packet\n");
777 } else if (arp
->op_code
== htons(ARPOP_REQUEST
)) {
778 /* Create an entry in the rx_hashtbl for this client as a
780 * When the arp reply is received the entry will be updated
781 * with the correct unicast address of the client.
783 rlb_choose_channel(skb
, bond
);
785 /* The ARP reply packets must be delayed so that
786 * they can cancel out the influence of the ARP request.
788 bond
->alb_info
.rlb_update_delay_counter
= RLB_UPDATE_DELAY
;
790 /* arp requests are broadcast and are sent on the primary
791 * the arp request will collapse all clients on the subnet to
792 * the primary slave. We must register these clients to be
793 * updated with their assigned mac.
795 rlb_req_update_subnet_clients(bond
, arp
->ip_src
);
796 pr_debug("Server sent ARP Request packet\n");
802 /* Caller must hold bond lock for read */
803 static void rlb_rebalance(struct bonding
*bond
)
805 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
806 struct slave
*assigned_slave
;
807 struct rlb_client_info
*client_info
;
811 _lock_rx_hashtbl_bh(bond
);
814 hash_index
= bond_info
->rx_hashtbl_used_head
;
815 for (; hash_index
!= RLB_NULL_INDEX
;
816 hash_index
= client_info
->used_next
) {
817 client_info
= &(bond_info
->rx_hashtbl
[hash_index
]);
818 assigned_slave
= __rlb_next_rx_slave(bond
);
819 if (assigned_slave
&& (client_info
->slave
!= assigned_slave
)) {
820 client_info
->slave
= assigned_slave
;
821 client_info
->ntt
= 1;
826 /* update the team's flag only after the whole iteration */
828 bond_info
->rx_ntt
= 1;
830 _unlock_rx_hashtbl_bh(bond
);
833 /* Caller must hold rx_hashtbl lock */
834 static void rlb_init_table_entry_dst(struct rlb_client_info
*entry
)
836 entry
->used_next
= RLB_NULL_INDEX
;
837 entry
->used_prev
= RLB_NULL_INDEX
;
842 static void rlb_init_table_entry_src(struct rlb_client_info
*entry
)
844 entry
->src_first
= RLB_NULL_INDEX
;
845 entry
->src_prev
= RLB_NULL_INDEX
;
846 entry
->src_next
= RLB_NULL_INDEX
;
849 static void rlb_init_table_entry(struct rlb_client_info
*entry
)
851 memset(entry
, 0, sizeof(struct rlb_client_info
));
852 rlb_init_table_entry_dst(entry
);
853 rlb_init_table_entry_src(entry
);
856 static void rlb_delete_table_entry_dst(struct bonding
*bond
, u32 index
)
858 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
859 u32 next_index
= bond_info
->rx_hashtbl
[index
].used_next
;
860 u32 prev_index
= bond_info
->rx_hashtbl
[index
].used_prev
;
862 if (index
== bond_info
->rx_hashtbl_used_head
)
863 bond_info
->rx_hashtbl_used_head
= next_index
;
864 if (prev_index
!= RLB_NULL_INDEX
)
865 bond_info
->rx_hashtbl
[prev_index
].used_next
= next_index
;
866 if (next_index
!= RLB_NULL_INDEX
)
867 bond_info
->rx_hashtbl
[next_index
].used_prev
= prev_index
;
870 /* unlink a rlb hash table entry from the src list */
871 static void rlb_src_unlink(struct bonding
*bond
, u32 index
)
873 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
874 u32 next_index
= bond_info
->rx_hashtbl
[index
].src_next
;
875 u32 prev_index
= bond_info
->rx_hashtbl
[index
].src_prev
;
877 bond_info
->rx_hashtbl
[index
].src_next
= RLB_NULL_INDEX
;
878 bond_info
->rx_hashtbl
[index
].src_prev
= RLB_NULL_INDEX
;
880 if (next_index
!= RLB_NULL_INDEX
)
881 bond_info
->rx_hashtbl
[next_index
].src_prev
= prev_index
;
883 if (prev_index
== RLB_NULL_INDEX
)
886 /* is prev_index pointing to the head of this list? */
887 if (bond_info
->rx_hashtbl
[prev_index
].src_first
== index
)
888 bond_info
->rx_hashtbl
[prev_index
].src_first
= next_index
;
890 bond_info
->rx_hashtbl
[prev_index
].src_next
= next_index
;
894 static void rlb_delete_table_entry(struct bonding
*bond
, u32 index
)
896 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
897 struct rlb_client_info
*entry
= &(bond_info
->rx_hashtbl
[index
]);
899 rlb_delete_table_entry_dst(bond
, index
);
900 rlb_init_table_entry_dst(entry
);
902 rlb_src_unlink(bond
, index
);
905 /* add the rx_hashtbl[ip_dst_hash] entry to the list
906 * of entries with identical ip_src_hash
908 static void rlb_src_link(struct bonding
*bond
, u32 ip_src_hash
, u32 ip_dst_hash
)
910 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
913 bond_info
->rx_hashtbl
[ip_dst_hash
].src_prev
= ip_src_hash
;
914 next
= bond_info
->rx_hashtbl
[ip_src_hash
].src_first
;
915 bond_info
->rx_hashtbl
[ip_dst_hash
].src_next
= next
;
916 if (next
!= RLB_NULL_INDEX
)
917 bond_info
->rx_hashtbl
[next
].src_prev
= ip_dst_hash
;
918 bond_info
->rx_hashtbl
[ip_src_hash
].src_first
= ip_dst_hash
;
921 /* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does
922 * not match arp->mac_src */
923 static void rlb_purge_src_ip(struct bonding
*bond
, struct arp_pkt
*arp
)
925 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
926 u32 ip_src_hash
= _simple_hash((u8
*)&(arp
->ip_src
), sizeof(arp
->ip_src
));
929 _lock_rx_hashtbl_bh(bond
);
931 index
= bond_info
->rx_hashtbl
[ip_src_hash
].src_first
;
932 while (index
!= RLB_NULL_INDEX
) {
933 struct rlb_client_info
*entry
= &(bond_info
->rx_hashtbl
[index
]);
934 u32 next_index
= entry
->src_next
;
935 if (entry
->ip_src
== arp
->ip_src
&&
936 !ether_addr_equal_64bits(arp
->mac_src
, entry
->mac_src
))
937 rlb_delete_table_entry(bond
, index
);
940 _unlock_rx_hashtbl_bh(bond
);
943 static int rlb_initialize(struct bonding
*bond
)
945 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
946 struct rlb_client_info
*new_hashtbl
;
947 int size
= RLB_HASH_TABLE_SIZE
* sizeof(struct rlb_client_info
);
950 new_hashtbl
= kmalloc(size
, GFP_KERNEL
);
954 _lock_rx_hashtbl_bh(bond
);
956 bond_info
->rx_hashtbl
= new_hashtbl
;
958 bond_info
->rx_hashtbl_used_head
= RLB_NULL_INDEX
;
960 for (i
= 0; i
< RLB_HASH_TABLE_SIZE
; i
++) {
961 rlb_init_table_entry(bond_info
->rx_hashtbl
+ i
);
964 _unlock_rx_hashtbl_bh(bond
);
966 /* register to receive ARPs */
967 bond
->recv_probe
= rlb_arp_recv
;
972 static void rlb_deinitialize(struct bonding
*bond
)
974 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
976 _lock_rx_hashtbl_bh(bond
);
978 kfree(bond_info
->rx_hashtbl
);
979 bond_info
->rx_hashtbl
= NULL
;
980 bond_info
->rx_hashtbl_used_head
= RLB_NULL_INDEX
;
982 _unlock_rx_hashtbl_bh(bond
);
985 static void rlb_clear_vlan(struct bonding
*bond
, unsigned short vlan_id
)
987 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
990 _lock_rx_hashtbl_bh(bond
);
992 curr_index
= bond_info
->rx_hashtbl_used_head
;
993 while (curr_index
!= RLB_NULL_INDEX
) {
994 struct rlb_client_info
*curr
= &(bond_info
->rx_hashtbl
[curr_index
]);
995 u32 next_index
= bond_info
->rx_hashtbl
[curr_index
].used_next
;
997 if (curr
->vlan_id
== vlan_id
)
998 rlb_delete_table_entry(bond
, curr_index
);
1000 curr_index
= next_index
;
1003 _unlock_rx_hashtbl_bh(bond
);
1006 /*********************** tlb/rlb shared functions *********************/
1008 static void alb_send_lp_vid(struct slave
*slave
, u8 mac_addr
[],
1011 struct learning_pkt pkt
;
1012 struct sk_buff
*skb
;
1013 int size
= sizeof(struct learning_pkt
);
1016 memset(&pkt
, 0, size
);
1017 memcpy(pkt
.mac_dst
, mac_addr
, ETH_ALEN
);
1018 memcpy(pkt
.mac_src
, mac_addr
, ETH_ALEN
);
1019 pkt
.type
= cpu_to_be16(ETH_P_LOOP
);
1021 skb
= dev_alloc_skb(size
);
1025 data
= skb_put(skb
, size
);
1026 memcpy(data
, &pkt
, size
);
1028 skb_reset_mac_header(skb
);
1029 skb
->network_header
= skb
->mac_header
+ ETH_HLEN
;
1030 skb
->protocol
= pkt
.type
;
1031 skb
->priority
= TC_PRIO_CONTROL
;
1032 skb
->dev
= slave
->dev
;
1035 skb
= vlan_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1037 pr_err("%s: Error: failed to insert VLAN tag\n",
1038 slave
->bond
->dev
->name
);
1043 dev_queue_xmit(skb
);
1047 static void alb_send_learning_packets(struct slave
*slave
, u8 mac_addr
[])
1049 struct bonding
*bond
= bond_get_bond_by_slave(slave
);
1050 struct net_device
*upper
;
1051 struct list_head
*iter
;
1054 alb_send_lp_vid(slave
, mac_addr
, 0);
1056 /* loop through vlans and send one packet for each */
1058 netdev_for_each_all_upper_dev_rcu(bond
->dev
, upper
, iter
) {
1059 if (upper
->priv_flags
& IFF_802_1Q_VLAN
)
1060 alb_send_lp_vid(slave
, mac_addr
,
1061 vlan_dev_vlan_id(upper
));
1066 static int alb_set_slave_mac_addr(struct slave
*slave
, u8 addr
[])
1068 struct net_device
*dev
= slave
->dev
;
1069 struct sockaddr s_addr
;
1071 if (slave
->bond
->params
.mode
== BOND_MODE_TLB
) {
1072 memcpy(dev
->dev_addr
, addr
, dev
->addr_len
);
1076 /* for rlb each slave must have a unique hw mac addresses so that */
1077 /* each slave will receive packets destined to a different mac */
1078 memcpy(s_addr
.sa_data
, addr
, dev
->addr_len
);
1079 s_addr
.sa_family
= dev
->type
;
1080 if (dev_set_mac_address(dev
, &s_addr
)) {
1081 pr_err("%s: Error: dev_set_mac_address of dev %s failed!\n"
1082 "ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
1083 slave
->bond
->dev
->name
, dev
->name
);
1090 * Swap MAC addresses between two slaves.
1092 * Called with RTNL held, and no other locks.
1096 static void alb_swap_mac_addr(struct slave
*slave1
, struct slave
*slave2
)
1098 u8 tmp_mac_addr
[ETH_ALEN
];
1100 memcpy(tmp_mac_addr
, slave1
->dev
->dev_addr
, ETH_ALEN
);
1101 alb_set_slave_mac_addr(slave1
, slave2
->dev
->dev_addr
);
1102 alb_set_slave_mac_addr(slave2
, tmp_mac_addr
);
1107 * Send learning packets after MAC address swap.
1109 * Called with RTNL and no other locks
1111 static void alb_fasten_mac_swap(struct bonding
*bond
, struct slave
*slave1
,
1112 struct slave
*slave2
)
1114 int slaves_state_differ
= (SLAVE_IS_OK(slave1
) != SLAVE_IS_OK(slave2
));
1115 struct slave
*disabled_slave
= NULL
;
1119 /* fasten the change in the switch */
1120 if (SLAVE_IS_OK(slave1
)) {
1121 alb_send_learning_packets(slave1
, slave1
->dev
->dev_addr
);
1122 if (bond
->alb_info
.rlb_enabled
) {
1123 /* inform the clients that the mac address
1126 rlb_req_update_slave_clients(bond
, slave1
);
1129 disabled_slave
= slave1
;
1132 if (SLAVE_IS_OK(slave2
)) {
1133 alb_send_learning_packets(slave2
, slave2
->dev
->dev_addr
);
1134 if (bond
->alb_info
.rlb_enabled
) {
1135 /* inform the clients that the mac address
1138 rlb_req_update_slave_clients(bond
, slave2
);
1141 disabled_slave
= slave2
;
1144 if (bond
->alb_info
.rlb_enabled
&& slaves_state_differ
) {
1145 /* A disabled slave was assigned an active mac addr */
1146 rlb_teach_disabled_mac_on_primary(bond
,
1147 disabled_slave
->dev
->dev_addr
);
1152 * alb_change_hw_addr_on_detach
1153 * @bond: bonding we're working on
1154 * @slave: the slave that was just detached
1156 * We assume that @slave was already detached from the slave list.
1158 * If @slave's permanent hw address is different both from its current
1159 * address and from @bond's address, then somewhere in the bond there's
1160 * a slave that has @slave's permanet address as its current address.
1161 * We'll make sure that that slave no longer uses @slave's permanent address.
1163 * Caller must hold RTNL and no other locks
1165 static void alb_change_hw_addr_on_detach(struct bonding
*bond
, struct slave
*slave
)
1169 struct slave
*found_slave
;
1171 perm_curr_diff
= !ether_addr_equal_64bits(slave
->perm_hwaddr
,
1172 slave
->dev
->dev_addr
);
1173 perm_bond_diff
= !ether_addr_equal_64bits(slave
->perm_hwaddr
,
1174 bond
->dev
->dev_addr
);
1176 if (perm_curr_diff
&& perm_bond_diff
) {
1177 found_slave
= bond_slave_has_mac(bond
, slave
->perm_hwaddr
);
1180 /* locking: needs RTNL and nothing else */
1181 alb_swap_mac_addr(slave
, found_slave
);
1182 alb_fasten_mac_swap(bond
, slave
, found_slave
);
1188 * alb_handle_addr_collision_on_attach
1189 * @bond: bonding we're working on
1190 * @slave: the slave that was just attached
1192 * checks uniqueness of slave's mac address and handles the case the
1193 * new slave uses the bonds mac address.
1195 * If the permanent hw address of @slave is @bond's hw address, we need to
1196 * find a different hw address to give @slave, that isn't in use by any other
1197 * slave in the bond. This address must be, of course, one of the permanent
1198 * addresses of the other slaves.
1200 * We go over the slave list, and for each slave there we compare its
1201 * permanent hw address with the current address of all the other slaves.
1202 * If no match was found, then we've found a slave with a permanent address
1203 * that isn't used by any other slave in the bond, so we can assign it to
1206 * assumption: this function is called before @slave is attached to the
1209 static int alb_handle_addr_collision_on_attach(struct bonding
*bond
, struct slave
*slave
)
1211 struct slave
*has_bond_addr
= bond
->curr_active_slave
;
1212 struct slave
*tmp_slave1
, *free_mac_slave
= NULL
;
1213 struct list_head
*iter
;
1215 if (!bond_has_slaves(bond
)) {
1216 /* this is the first slave */
1220 /* if slave's mac address differs from bond's mac address
1221 * check uniqueness of slave's mac address against the other
1222 * slaves in the bond.
1224 if (!ether_addr_equal_64bits(slave
->perm_hwaddr
, bond
->dev
->dev_addr
)) {
1225 if (!bond_slave_has_mac(bond
, slave
->dev
->dev_addr
))
1228 /* Try setting slave mac to bond address and fall-through
1229 to code handling that situation below... */
1230 alb_set_slave_mac_addr(slave
, bond
->dev
->dev_addr
);
1233 /* The slave's address is equal to the address of the bond.
1234 * Search for a spare address in the bond for this slave.
1236 bond_for_each_slave(bond
, tmp_slave1
, iter
) {
1237 if (!bond_slave_has_mac(bond
, tmp_slave1
->perm_hwaddr
)) {
1238 /* no slave has tmp_slave1's perm addr
1241 free_mac_slave
= tmp_slave1
;
1245 if (!has_bond_addr
) {
1246 if (ether_addr_equal_64bits(tmp_slave1
->dev
->dev_addr
,
1247 bond
->dev
->dev_addr
)) {
1249 has_bond_addr
= tmp_slave1
;
1254 if (free_mac_slave
) {
1255 alb_set_slave_mac_addr(slave
, free_mac_slave
->perm_hwaddr
);
1257 pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
1258 bond
->dev
->name
, slave
->dev
->name
,
1259 free_mac_slave
->dev
->name
);
1261 } else if (has_bond_addr
) {
1262 pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
1263 bond
->dev
->name
, slave
->dev
->name
);
1271 * alb_set_mac_address
1275 * In TLB mode all slaves are configured to the bond's hw address, but set
1276 * their dev_addr field to different addresses (based on their permanent hw
1279 * For each slave, this function sets the interface to the new address and then
1280 * changes its dev_addr field to its previous value.
1282 * Unwinding assumes bond's mac address has not yet changed.
1284 static int alb_set_mac_address(struct bonding
*bond
, void *addr
)
1286 struct slave
*slave
, *rollback_slave
;
1287 struct list_head
*iter
;
1289 char tmp_addr
[ETH_ALEN
];
1292 if (bond
->alb_info
.rlb_enabled
)
1295 bond_for_each_slave(bond
, slave
, iter
) {
1296 /* save net_device's current hw address */
1297 memcpy(tmp_addr
, slave
->dev
->dev_addr
, ETH_ALEN
);
1299 res
= dev_set_mac_address(slave
->dev
, addr
);
1301 /* restore net_device's hw address */
1302 memcpy(slave
->dev
->dev_addr
, tmp_addr
, ETH_ALEN
);
1311 memcpy(sa
.sa_data
, bond
->dev
->dev_addr
, bond
->dev
->addr_len
);
1312 sa
.sa_family
= bond
->dev
->type
;
1314 /* unwind from head to the slave that failed */
1315 bond_for_each_slave(bond
, rollback_slave
, iter
) {
1316 if (rollback_slave
== slave
)
1318 memcpy(tmp_addr
, rollback_slave
->dev
->dev_addr
, ETH_ALEN
);
1319 dev_set_mac_address(rollback_slave
->dev
, &sa
);
1320 memcpy(rollback_slave
->dev
->dev_addr
, tmp_addr
, ETH_ALEN
);
1326 /************************ exported alb funcions ************************/
1328 int bond_alb_initialize(struct bonding
*bond
, int rlb_enabled
)
1332 res
= tlb_initialize(bond
);
1338 bond
->alb_info
.rlb_enabled
= 1;
1339 /* initialize rlb */
1340 res
= rlb_initialize(bond
);
1342 tlb_deinitialize(bond
);
1346 bond
->alb_info
.rlb_enabled
= 0;
1352 void bond_alb_deinitialize(struct bonding
*bond
)
1354 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
1356 tlb_deinitialize(bond
);
1358 if (bond_info
->rlb_enabled
) {
1359 rlb_deinitialize(bond
);
1363 int bond_alb_xmit(struct sk_buff
*skb
, struct net_device
*bond_dev
)
1365 struct bonding
*bond
= netdev_priv(bond_dev
);
1366 struct ethhdr
*eth_data
;
1367 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
1368 struct slave
*tx_slave
= NULL
;
1369 static const __be32 ip_bcast
= htonl(0xffffffff);
1371 int do_tx_balance
= 1;
1373 const u8
*hash_start
= NULL
;
1374 struct ipv6hdr
*ip6hdr
;
1376 skb_reset_mac_header(skb
);
1377 eth_data
= eth_hdr(skb
);
1379 switch (ntohs(skb
->protocol
)) {
1381 const struct iphdr
*iph
= ip_hdr(skb
);
1383 if (ether_addr_equal_64bits(eth_data
->h_dest
, mac_bcast
) ||
1384 (iph
->daddr
== ip_bcast
) ||
1385 (iph
->protocol
== IPPROTO_IGMP
)) {
1389 hash_start
= (char *)&(iph
->daddr
);
1390 hash_size
= sizeof(iph
->daddr
);
1394 /* IPv6 doesn't really use broadcast mac address, but leave
1395 * that here just in case.
1397 if (ether_addr_equal_64bits(eth_data
->h_dest
, mac_bcast
)) {
1402 /* IPv6 uses all-nodes multicast as an equivalent to
1403 * broadcasts in IPv4.
1405 if (ether_addr_equal_64bits(eth_data
->h_dest
, mac_v6_allmcast
)) {
1410 /* Additianally, DAD probes should not be tx-balanced as that
1411 * will lead to false positives for duplicate addresses and
1412 * prevent address configuration from working.
1414 ip6hdr
= ipv6_hdr(skb
);
1415 if (ipv6_addr_any(&ip6hdr
->saddr
)) {
1420 hash_start
= (char *)&(ipv6_hdr(skb
)->daddr
);
1421 hash_size
= sizeof(ipv6_hdr(skb
)->daddr
);
1424 if (ipx_hdr(skb
)->ipx_checksum
!= IPX_NO_CHECKSUM
) {
1425 /* something is wrong with this packet */
1430 if (ipx_hdr(skb
)->ipx_type
!= IPX_TYPE_NCP
) {
1431 /* The only protocol worth balancing in
1432 * this family since it has an "ARP" like
1439 hash_start
= (char*)eth_data
->h_dest
;
1440 hash_size
= ETH_ALEN
;
1444 if (bond_info
->rlb_enabled
) {
1445 tx_slave
= rlb_arp_xmit(skb
, bond
);
1453 if (do_tx_balance
) {
1454 hash_index
= _simple_hash(hash_start
, hash_size
);
1455 tx_slave
= tlb_choose_channel(bond
, hash_index
, skb
->len
);
1459 /* unbalanced or unassigned, send through primary */
1460 tx_slave
= rcu_dereference(bond
->curr_active_slave
);
1461 bond_info
->unbalanced_load
+= skb
->len
;
1464 if (tx_slave
&& SLAVE_IS_OK(tx_slave
)) {
1465 if (tx_slave
!= rcu_dereference(bond
->curr_active_slave
)) {
1466 memcpy(eth_data
->h_source
,
1467 tx_slave
->dev
->dev_addr
,
1471 bond_dev_queue_xmit(bond
, skb
, tx_slave
->dev
);
1475 _lock_tx_hashtbl(bond
);
1476 __tlb_clear_slave(bond
, tx_slave
, 0);
1477 _unlock_tx_hashtbl(bond
);
1481 /* no suitable interface, frame not sent */
1484 return NETDEV_TX_OK
;
1487 void bond_alb_monitor(struct work_struct
*work
)
1489 struct bonding
*bond
= container_of(work
, struct bonding
,
1491 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
1492 struct list_head
*iter
;
1493 struct slave
*slave
;
1495 if (!bond_has_slaves(bond
)) {
1496 bond_info
->tx_rebalance_counter
= 0;
1497 bond_info
->lp_counter
= 0;
1503 bond_info
->tx_rebalance_counter
++;
1504 bond_info
->lp_counter
++;
1506 /* send learning packets */
1507 if (bond_info
->lp_counter
>= BOND_ALB_LP_TICKS(bond
)) {
1508 /* change of curr_active_slave involves swapping of mac addresses.
1509 * in order to avoid this swapping from happening while
1510 * sending the learning packets, the curr_slave_lock must be held for
1513 read_lock(&bond
->curr_slave_lock
);
1515 bond_for_each_slave_rcu(bond
, slave
, iter
)
1516 alb_send_learning_packets(slave
, slave
->dev
->dev_addr
);
1518 read_unlock(&bond
->curr_slave_lock
);
1520 bond_info
->lp_counter
= 0;
1523 /* rebalance tx traffic */
1524 if (bond_info
->tx_rebalance_counter
>= BOND_TLB_REBALANCE_TICKS
) {
1526 read_lock(&bond
->curr_slave_lock
);
1528 bond_for_each_slave_rcu(bond
, slave
, iter
) {
1529 tlb_clear_slave(bond
, slave
, 1);
1530 if (slave
== bond
->curr_active_slave
) {
1531 SLAVE_TLB_INFO(slave
).load
=
1532 bond_info
->unbalanced_load
/
1533 BOND_TLB_REBALANCE_INTERVAL
;
1534 bond_info
->unbalanced_load
= 0;
1538 read_unlock(&bond
->curr_slave_lock
);
1540 bond_info
->tx_rebalance_counter
= 0;
1543 /* handle rlb stuff */
1544 if (bond_info
->rlb_enabled
) {
1545 if (bond_info
->primary_is_promisc
&&
1546 (++bond_info
->rlb_promisc_timeout_counter
>= RLB_PROMISC_TIMEOUT
)) {
1549 * dev_set_promiscuity requires rtnl and
1550 * nothing else. Avoid race with bond_close.
1553 if (!rtnl_trylock())
1556 bond_info
->rlb_promisc_timeout_counter
= 0;
1558 /* If the primary was set to promiscuous mode
1559 * because a slave was disabled then
1560 * it can now leave promiscuous mode.
1562 dev_set_promiscuity(bond
->curr_active_slave
->dev
, -1);
1563 bond_info
->primary_is_promisc
= 0;
1569 if (bond_info
->rlb_rebalance
) {
1570 bond_info
->rlb_rebalance
= 0;
1571 rlb_rebalance(bond
);
1574 /* check if clients need updating */
1575 if (bond_info
->rx_ntt
) {
1576 if (bond_info
->rlb_update_delay_counter
) {
1577 --bond_info
->rlb_update_delay_counter
;
1579 rlb_update_rx_clients(bond
);
1580 if (bond_info
->rlb_update_retry_counter
) {
1581 --bond_info
->rlb_update_retry_counter
;
1583 bond_info
->rx_ntt
= 0;
1590 queue_delayed_work(bond
->wq
, &bond
->alb_work
, alb_delta_in_ticks
);
1593 /* assumption: called before the slave is attached to the bond
1594 * and not locked by the bond lock
1596 int bond_alb_init_slave(struct bonding
*bond
, struct slave
*slave
)
1600 res
= alb_set_slave_mac_addr(slave
, slave
->perm_hwaddr
);
1605 res
= alb_handle_addr_collision_on_attach(bond
, slave
);
1610 tlb_init_slave(slave
);
1612 /* order a rebalance ASAP */
1613 bond
->alb_info
.tx_rebalance_counter
= BOND_TLB_REBALANCE_TICKS
;
1615 if (bond
->alb_info
.rlb_enabled
) {
1616 bond
->alb_info
.rlb_rebalance
= 1;
1623 * Remove slave from tlb and rlb hash tables, and fix up MAC addresses
1626 * Caller must hold RTNL and no other locks
1628 void bond_alb_deinit_slave(struct bonding
*bond
, struct slave
*slave
)
1630 if (bond_has_slaves(bond
))
1631 alb_change_hw_addr_on_detach(bond
, slave
);
1633 tlb_clear_slave(bond
, slave
, 0);
1635 if (bond
->alb_info
.rlb_enabled
) {
1636 bond
->alb_info
.rx_slave
= NULL
;
1637 rlb_clear_slave(bond
, slave
);
1641 /* Caller must hold bond lock for read */
1642 void bond_alb_handle_link_change(struct bonding
*bond
, struct slave
*slave
, char link
)
1644 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
1646 if (link
== BOND_LINK_DOWN
) {
1647 tlb_clear_slave(bond
, slave
, 0);
1648 if (bond
->alb_info
.rlb_enabled
) {
1649 rlb_clear_slave(bond
, slave
);
1651 } else if (link
== BOND_LINK_UP
) {
1652 /* order a rebalance ASAP */
1653 bond_info
->tx_rebalance_counter
= BOND_TLB_REBALANCE_TICKS
;
1654 if (bond
->alb_info
.rlb_enabled
) {
1655 bond
->alb_info
.rlb_rebalance
= 1;
1656 /* If the updelay module parameter is smaller than the
1657 * forwarding delay of the switch the rebalance will
1658 * not work because the rebalance arp replies will
1659 * not be forwarded to the clients..
1666 * bond_alb_handle_active_change - assign new curr_active_slave
1667 * @bond: our bonding struct
1668 * @new_slave: new slave to assign
1670 * Set the bond->curr_active_slave to @new_slave and handle
1671 * mac address swapping and promiscuity changes as needed.
1673 * If new_slave is NULL, caller must hold curr_slave_lock or
1674 * bond->lock for write.
1676 * If new_slave is not NULL, caller must hold RTNL, curr_slave_lock
1677 * for write. Processing here may sleep, so no other locks may be held.
1679 void bond_alb_handle_active_change(struct bonding
*bond
, struct slave
*new_slave
)
1680 __releases(&bond
->curr_slave_lock
)
1681 __acquires(&bond
->curr_slave_lock
)
1683 struct slave
*swap_slave
;
1685 if (bond
->curr_active_slave
== new_slave
)
1688 if (bond
->curr_active_slave
&& bond
->alb_info
.primary_is_promisc
) {
1689 dev_set_promiscuity(bond
->curr_active_slave
->dev
, -1);
1690 bond
->alb_info
.primary_is_promisc
= 0;
1691 bond
->alb_info
.rlb_promisc_timeout_counter
= 0;
1694 swap_slave
= bond
->curr_active_slave
;
1695 rcu_assign_pointer(bond
->curr_active_slave
, new_slave
);
1697 if (!new_slave
|| !bond_has_slaves(bond
))
1700 /* set the new curr_active_slave to the bonds mac address
1701 * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
1704 swap_slave
= bond_slave_has_mac(bond
, bond
->dev
->dev_addr
);
1707 * Arrange for swap_slave and new_slave to temporarily be
1708 * ignored so we can mess with their MAC addresses without
1709 * fear of interference from transmit activity.
1712 tlb_clear_slave(bond
, swap_slave
, 1);
1713 tlb_clear_slave(bond
, new_slave
, 1);
1715 write_unlock_bh(&bond
->curr_slave_lock
);
1719 /* in TLB mode, the slave might flip down/up with the old dev_addr,
1720 * and thus filter bond->dev_addr's packets, so force bond's mac
1722 if (bond
->params
.mode
== BOND_MODE_TLB
) {
1724 u8 tmp_addr
[ETH_ALEN
];
1726 memcpy(tmp_addr
, new_slave
->dev
->dev_addr
, ETH_ALEN
);
1728 memcpy(sa
.sa_data
, bond
->dev
->dev_addr
, bond
->dev
->addr_len
);
1729 sa
.sa_family
= bond
->dev
->type
;
1730 /* we don't care if it can't change its mac, best effort */
1731 dev_set_mac_address(new_slave
->dev
, &sa
);
1733 memcpy(new_slave
->dev
->dev_addr
, tmp_addr
, ETH_ALEN
);
1736 /* curr_active_slave must be set before calling alb_swap_mac_addr */
1738 /* swap mac address */
1739 alb_swap_mac_addr(swap_slave
, new_slave
);
1740 alb_fasten_mac_swap(bond
, swap_slave
, new_slave
);
1742 /* set the new_slave to the bond mac address */
1743 alb_set_slave_mac_addr(new_slave
, bond
->dev
->dev_addr
);
1744 alb_send_learning_packets(new_slave
, bond
->dev
->dev_addr
);
1747 write_lock_bh(&bond
->curr_slave_lock
);
1753 int bond_alb_set_mac_address(struct net_device
*bond_dev
, void *addr
)
1754 __acquires(&bond
->lock
)
1755 __releases(&bond
->lock
)
1757 struct bonding
*bond
= netdev_priv(bond_dev
);
1758 struct sockaddr
*sa
= addr
;
1759 struct slave
*swap_slave
;
1762 if (!is_valid_ether_addr(sa
->sa_data
)) {
1763 return -EADDRNOTAVAIL
;
1766 res
= alb_set_mac_address(bond
, addr
);
1771 memcpy(bond_dev
->dev_addr
, sa
->sa_data
, bond_dev
->addr_len
);
1773 /* If there is no curr_active_slave there is nothing else to do.
1774 * Otherwise we'll need to pass the new address to it and handle
1777 if (!bond
->curr_active_slave
) {
1781 swap_slave
= bond_slave_has_mac(bond
, bond_dev
->dev_addr
);
1784 alb_swap_mac_addr(swap_slave
, bond
->curr_active_slave
);
1785 alb_fasten_mac_swap(bond
, swap_slave
, bond
->curr_active_slave
);
1787 alb_set_slave_mac_addr(bond
->curr_active_slave
, bond_dev
->dev_addr
);
1789 read_lock(&bond
->lock
);
1790 alb_send_learning_packets(bond
->curr_active_slave
, bond_dev
->dev_addr
);
1791 if (bond
->alb_info
.rlb_enabled
) {
1792 /* inform clients mac address has changed */
1793 rlb_req_update_slave_clients(bond
, bond
->curr_active_slave
);
1795 read_unlock(&bond
->lock
);
1801 void bond_alb_clear_vlan(struct bonding
*bond
, unsigned short vlan_id
)
1803 if (bond
->alb_info
.rlb_enabled
) {
1804 rlb_clear_vlan(bond
, vlan_id
);