2 * Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25 #include <linux/skbuff.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/pkt_sched.h>
29 #include <linux/spinlock.h>
30 #include <linux/slab.h>
31 #include <linux/timer.h>
33 #include <linux/ipv6.h>
34 #include <linux/if_arp.h>
35 #include <linux/if_ether.h>
36 #include <linux/if_bonding.h>
37 #include <linux/if_vlan.h>
42 #include <asm/byteorder.h>
48 #ifndef __long_aligned
49 #define __long_aligned __attribute__((aligned((sizeof(long)))))
51 static const u8 mac_bcast
[ETH_ALEN
] __long_aligned
= {
52 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
54 static const u8 mac_v6_allmcast
[ETH_ALEN
] __long_aligned
= {
55 0x33, 0x33, 0x00, 0x00, 0x00, 0x01
57 static const int alb_delta_in_ticks
= HZ
/ ALB_TIMER_TICKS_PER_SEC
;
64 u8 padding
[ETH_ZLEN
- ETH_HLEN
];
69 __be16 prot_addr_space
;
73 u8 mac_src
[ETH_ALEN
]; /* sender hardware address */
74 __be32 ip_src
; /* sender IP address */
75 u8 mac_dst
[ETH_ALEN
]; /* target hardware address */
76 __be32 ip_dst
; /* target IP address */
80 static inline struct arp_pkt
*arp_pkt(const struct sk_buff
*skb
)
82 return (struct arp_pkt
*)skb_network_header(skb
);
85 /* Forward declaration */
86 static void alb_send_learning_packets(struct slave
*slave
, u8 mac_addr
[]);
87 static void rlb_purge_src_ip(struct bonding
*bond
, struct arp_pkt
*arp
);
88 static void rlb_src_unlink(struct bonding
*bond
, u32 index
);
89 static void rlb_src_link(struct bonding
*bond
, u32 ip_src_hash
,
92 static inline u8
_simple_hash(const u8
*hash_start
, int hash_size
)
97 for (i
= 0; i
< hash_size
; i
++) {
98 hash
^= hash_start
[i
];
104 /*********************** tlb specific functions ***************************/
106 static inline void _lock_tx_hashtbl_bh(struct bonding
*bond
)
108 spin_lock_bh(&(BOND_ALB_INFO(bond
).tx_hashtbl_lock
));
111 static inline void _unlock_tx_hashtbl_bh(struct bonding
*bond
)
113 spin_unlock_bh(&(BOND_ALB_INFO(bond
).tx_hashtbl_lock
));
116 static inline void _lock_tx_hashtbl(struct bonding
*bond
)
118 spin_lock(&(BOND_ALB_INFO(bond
).tx_hashtbl_lock
));
121 static inline void _unlock_tx_hashtbl(struct bonding
*bond
)
123 spin_unlock(&(BOND_ALB_INFO(bond
).tx_hashtbl_lock
));
126 /* Caller must hold tx_hashtbl lock */
127 static inline void tlb_init_table_entry(struct tlb_client_info
*entry
, int save_load
)
130 entry
->load_history
= 1 + entry
->tx_bytes
/
131 BOND_TLB_REBALANCE_INTERVAL
;
135 entry
->tx_slave
= NULL
;
136 entry
->next
= TLB_NULL_INDEX
;
137 entry
->prev
= TLB_NULL_INDEX
;
140 static inline void tlb_init_slave(struct slave
*slave
)
142 SLAVE_TLB_INFO(slave
).load
= 0;
143 SLAVE_TLB_INFO(slave
).head
= TLB_NULL_INDEX
;
146 /* Caller must hold bond lock for read, BH disabled */
147 static void __tlb_clear_slave(struct bonding
*bond
, struct slave
*slave
,
150 struct tlb_client_info
*tx_hash_table
;
153 /* clear slave from tx_hashtbl */
154 tx_hash_table
= BOND_ALB_INFO(bond
).tx_hashtbl
;
156 /* skip this if we've already freed the tx hash table */
158 index
= SLAVE_TLB_INFO(slave
).head
;
159 while (index
!= TLB_NULL_INDEX
) {
160 u32 next_index
= tx_hash_table
[index
].next
;
161 tlb_init_table_entry(&tx_hash_table
[index
], save_load
);
166 tlb_init_slave(slave
);
169 /* Caller must hold bond lock for read */
170 static void tlb_clear_slave(struct bonding
*bond
, struct slave
*slave
,
173 _lock_tx_hashtbl_bh(bond
);
174 __tlb_clear_slave(bond
, slave
, save_load
);
175 _unlock_tx_hashtbl_bh(bond
);
178 /* Must be called before starting the monitor timer */
179 static int tlb_initialize(struct bonding
*bond
)
181 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
182 int size
= TLB_HASH_TABLE_SIZE
* sizeof(struct tlb_client_info
);
183 struct tlb_client_info
*new_hashtbl
;
186 new_hashtbl
= kzalloc(size
, GFP_KERNEL
);
190 _lock_tx_hashtbl_bh(bond
);
192 bond_info
->tx_hashtbl
= new_hashtbl
;
194 for (i
= 0; i
< TLB_HASH_TABLE_SIZE
; i
++) {
195 tlb_init_table_entry(&bond_info
->tx_hashtbl
[i
], 0);
198 _unlock_tx_hashtbl_bh(bond
);
203 /* Must be called only after all slaves have been released */
204 static void tlb_deinitialize(struct bonding
*bond
)
206 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
208 _lock_tx_hashtbl_bh(bond
);
210 kfree(bond_info
->tx_hashtbl
);
211 bond_info
->tx_hashtbl
= NULL
;
213 _unlock_tx_hashtbl_bh(bond
);
216 static long long compute_gap(struct slave
*slave
)
218 return (s64
) (slave
->speed
<< 20) - /* Convert to Megabit per sec */
219 (s64
) (SLAVE_TLB_INFO(slave
).load
<< 3); /* Bytes to bits */
222 /* Caller must hold bond lock for read */
223 static struct slave
*tlb_get_least_loaded_slave(struct bonding
*bond
)
225 struct slave
*slave
, *least_loaded
;
231 /* Find the slave with the largest gap */
232 bond_for_each_slave(bond
, slave
) {
233 if (SLAVE_IS_OK(slave
)) {
234 long long gap
= compute_gap(slave
);
237 least_loaded
= slave
;
246 static struct slave
*__tlb_choose_channel(struct bonding
*bond
, u32 hash_index
,
249 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
250 struct tlb_client_info
*hash_table
;
251 struct slave
*assigned_slave
;
253 hash_table
= bond_info
->tx_hashtbl
;
254 assigned_slave
= hash_table
[hash_index
].tx_slave
;
255 if (!assigned_slave
) {
256 assigned_slave
= tlb_get_least_loaded_slave(bond
);
258 if (assigned_slave
) {
259 struct tlb_slave_info
*slave_info
=
260 &(SLAVE_TLB_INFO(assigned_slave
));
261 u32 next_index
= slave_info
->head
;
263 hash_table
[hash_index
].tx_slave
= assigned_slave
;
264 hash_table
[hash_index
].next
= next_index
;
265 hash_table
[hash_index
].prev
= TLB_NULL_INDEX
;
267 if (next_index
!= TLB_NULL_INDEX
) {
268 hash_table
[next_index
].prev
= hash_index
;
271 slave_info
->head
= hash_index
;
273 hash_table
[hash_index
].load_history
;
277 if (assigned_slave
) {
278 hash_table
[hash_index
].tx_bytes
+= skb_len
;
281 return assigned_slave
;
284 /* Caller must hold bond lock for read */
285 static struct slave
*tlb_choose_channel(struct bonding
*bond
, u32 hash_index
,
288 struct slave
*tx_slave
;
290 * We don't need to disable softirq here, becase
291 * tlb_choose_channel() is only called by bond_alb_xmit()
292 * which already has softirq disabled.
294 _lock_tx_hashtbl(bond
);
295 tx_slave
= __tlb_choose_channel(bond
, hash_index
, skb_len
);
296 _unlock_tx_hashtbl(bond
);
300 /*********************** rlb specific functions ***************************/
301 static inline void _lock_rx_hashtbl_bh(struct bonding
*bond
)
303 spin_lock_bh(&(BOND_ALB_INFO(bond
).rx_hashtbl_lock
));
306 static inline void _unlock_rx_hashtbl_bh(struct bonding
*bond
)
308 spin_unlock_bh(&(BOND_ALB_INFO(bond
).rx_hashtbl_lock
));
311 static inline void _lock_rx_hashtbl(struct bonding
*bond
)
313 spin_lock(&(BOND_ALB_INFO(bond
).rx_hashtbl_lock
));
316 static inline void _unlock_rx_hashtbl(struct bonding
*bond
)
318 spin_unlock(&(BOND_ALB_INFO(bond
).rx_hashtbl_lock
));
321 /* when an ARP REPLY is received from a client update its info
324 static void rlb_update_entry_from_arp(struct bonding
*bond
, struct arp_pkt
*arp
)
326 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
327 struct rlb_client_info
*client_info
;
330 _lock_rx_hashtbl_bh(bond
);
332 hash_index
= _simple_hash((u8
*)&(arp
->ip_src
), sizeof(arp
->ip_src
));
333 client_info
= &(bond_info
->rx_hashtbl
[hash_index
]);
335 if ((client_info
->assigned
) &&
336 (client_info
->ip_src
== arp
->ip_dst
) &&
337 (client_info
->ip_dst
== arp
->ip_src
) &&
338 (!ether_addr_equal_64bits(client_info
->mac_dst
, arp
->mac_src
))) {
339 /* update the clients MAC address */
340 memcpy(client_info
->mac_dst
, arp
->mac_src
, ETH_ALEN
);
341 client_info
->ntt
= 1;
342 bond_info
->rx_ntt
= 1;
345 _unlock_rx_hashtbl_bh(bond
);
348 static int rlb_arp_recv(const struct sk_buff
*skb
, struct bonding
*bond
,
351 struct arp_pkt
*arp
, _arp
;
353 if (skb
->protocol
!= cpu_to_be16(ETH_P_ARP
))
356 arp
= skb_header_pointer(skb
, 0, sizeof(_arp
), &_arp
);
360 /* We received an ARP from arp->ip_src.
361 * We might have used this IP address previously (on the bonding host
362 * itself or on a system that is bridged together with the bond).
363 * However, if arp->mac_src is different than what is stored in
364 * rx_hashtbl, some other host is now using the IP and we must prevent
365 * sending out client updates with this IP address and the old MAC
367 * Clean up all hash table entries that have this address as ip_src but
368 * have a different mac_src.
370 rlb_purge_src_ip(bond
, arp
);
372 if (arp
->op_code
== htons(ARPOP_REPLY
)) {
373 /* update rx hash table for this ARP */
374 rlb_update_entry_from_arp(bond
, arp
);
375 pr_debug("Server received an ARP Reply from client\n");
378 return RX_HANDLER_ANOTHER
;
381 /* Caller must hold bond lock for read */
382 static struct slave
*rlb_next_rx_slave(struct bonding
*bond
)
384 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
385 struct slave
*rx_slave
, *slave
, *start_at
;
388 if (bond_info
->next_rx_slave
)
389 start_at
= bond_info
->next_rx_slave
;
391 start_at
= bond_first_slave(bond
);
395 bond_for_each_slave_from(bond
, slave
, i
, start_at
) {
396 if (SLAVE_IS_OK(slave
)) {
399 } else if (slave
->speed
> rx_slave
->speed
) {
406 slave
= bond_next_slave(bond
, rx_slave
);
407 bond_info
->next_rx_slave
= slave
;
413 /* teach the switch the mac of a disabled slave
414 * on the primary for fault tolerance
416 * Caller must hold bond->curr_slave_lock for write or bond lock for write
418 static void rlb_teach_disabled_mac_on_primary(struct bonding
*bond
, u8 addr
[])
420 if (!bond
->curr_active_slave
) {
424 if (!bond
->alb_info
.primary_is_promisc
) {
425 if (!dev_set_promiscuity(bond
->curr_active_slave
->dev
, 1))
426 bond
->alb_info
.primary_is_promisc
= 1;
428 bond
->alb_info
.primary_is_promisc
= 0;
431 bond
->alb_info
.rlb_promisc_timeout_counter
= 0;
433 alb_send_learning_packets(bond
->curr_active_slave
, addr
);
436 /* slave being removed should not be active at this point
438 * Caller must hold bond lock for read
440 static void rlb_clear_slave(struct bonding
*bond
, struct slave
*slave
)
442 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
443 struct rlb_client_info
*rx_hash_table
;
444 u32 index
, next_index
;
446 /* clear slave from rx_hashtbl */
447 _lock_rx_hashtbl_bh(bond
);
449 rx_hash_table
= bond_info
->rx_hashtbl
;
450 index
= bond_info
->rx_hashtbl_used_head
;
451 for (; index
!= RLB_NULL_INDEX
; index
= next_index
) {
452 next_index
= rx_hash_table
[index
].used_next
;
453 if (rx_hash_table
[index
].slave
== slave
) {
454 struct slave
*assigned_slave
= rlb_next_rx_slave(bond
);
456 if (assigned_slave
) {
457 rx_hash_table
[index
].slave
= assigned_slave
;
458 if (!ether_addr_equal_64bits(rx_hash_table
[index
].mac_dst
,
460 bond_info
->rx_hashtbl
[index
].ntt
= 1;
461 bond_info
->rx_ntt
= 1;
462 /* A slave has been removed from the
463 * table because it is either disabled
464 * or being released. We must retry the
465 * update to avoid clients from not
466 * being updated & disconnecting when
469 bond_info
->rlb_update_retry_counter
=
472 } else { /* there is no active slave */
473 rx_hash_table
[index
].slave
= NULL
;
478 _unlock_rx_hashtbl_bh(bond
);
480 write_lock_bh(&bond
->curr_slave_lock
);
482 if (slave
!= bond
->curr_active_slave
) {
483 rlb_teach_disabled_mac_on_primary(bond
, slave
->dev
->dev_addr
);
486 write_unlock_bh(&bond
->curr_slave_lock
);
489 static void rlb_update_client(struct rlb_client_info
*client_info
)
493 if (!client_info
->slave
) {
497 for (i
= 0; i
< RLB_ARP_BURST_SIZE
; i
++) {
500 skb
= arp_create(ARPOP_REPLY
, ETH_P_ARP
,
502 client_info
->slave
->dev
,
504 client_info
->mac_dst
,
505 client_info
->slave
->dev
->dev_addr
,
506 client_info
->mac_dst
);
508 pr_err("%s: Error: failed to create an ARP packet\n",
509 client_info
->slave
->bond
->dev
->name
);
513 skb
->dev
= client_info
->slave
->dev
;
515 if (client_info
->vlan_id
) {
516 skb
= vlan_put_tag(skb
, htons(ETH_P_8021Q
), client_info
->vlan_id
);
518 pr_err("%s: Error: failed to insert VLAN tag\n",
519 client_info
->slave
->bond
->dev
->name
);
528 /* sends ARP REPLIES that update the clients that need updating */
529 static void rlb_update_rx_clients(struct bonding
*bond
)
531 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
532 struct rlb_client_info
*client_info
;
535 _lock_rx_hashtbl_bh(bond
);
537 hash_index
= bond_info
->rx_hashtbl_used_head
;
538 for (; hash_index
!= RLB_NULL_INDEX
;
539 hash_index
= client_info
->used_next
) {
540 client_info
= &(bond_info
->rx_hashtbl
[hash_index
]);
541 if (client_info
->ntt
) {
542 rlb_update_client(client_info
);
543 if (bond_info
->rlb_update_retry_counter
== 0) {
544 client_info
->ntt
= 0;
549 /* do not update the entries again until this counter is zero so that
550 * not to confuse the clients.
552 bond_info
->rlb_update_delay_counter
= RLB_UPDATE_DELAY
;
554 _unlock_rx_hashtbl_bh(bond
);
557 /* The slave was assigned a new mac address - update the clients */
558 static void rlb_req_update_slave_clients(struct bonding
*bond
, struct slave
*slave
)
560 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
561 struct rlb_client_info
*client_info
;
565 _lock_rx_hashtbl_bh(bond
);
567 hash_index
= bond_info
->rx_hashtbl_used_head
;
568 for (; hash_index
!= RLB_NULL_INDEX
;
569 hash_index
= client_info
->used_next
) {
570 client_info
= &(bond_info
->rx_hashtbl
[hash_index
]);
572 if ((client_info
->slave
== slave
) &&
573 !ether_addr_equal_64bits(client_info
->mac_dst
, mac_bcast
)) {
574 client_info
->ntt
= 1;
579 // update the team's flag only after the whole iteration
581 bond_info
->rx_ntt
= 1;
583 bond_info
->rlb_update_retry_counter
= RLB_UPDATE_RETRY
;
586 _unlock_rx_hashtbl_bh(bond
);
589 /* mark all clients using src_ip to be updated */
590 static void rlb_req_update_subnet_clients(struct bonding
*bond
, __be32 src_ip
)
592 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
593 struct rlb_client_info
*client_info
;
596 _lock_rx_hashtbl(bond
);
598 hash_index
= bond_info
->rx_hashtbl_used_head
;
599 for (; hash_index
!= RLB_NULL_INDEX
;
600 hash_index
= client_info
->used_next
) {
601 client_info
= &(bond_info
->rx_hashtbl
[hash_index
]);
603 if (!client_info
->slave
) {
604 pr_err("%s: Error: found a client with no channel in the client's hash table\n",
608 /*update all clients using this src_ip, that are not assigned
609 * to the team's address (curr_active_slave) and have a known
610 * unicast mac address.
612 if ((client_info
->ip_src
== src_ip
) &&
613 !ether_addr_equal_64bits(client_info
->slave
->dev
->dev_addr
,
614 bond
->dev
->dev_addr
) &&
615 !ether_addr_equal_64bits(client_info
->mac_dst
, mac_bcast
)) {
616 client_info
->ntt
= 1;
617 bond_info
->rx_ntt
= 1;
621 _unlock_rx_hashtbl(bond
);
624 /* Caller must hold both bond and ptr locks for read */
625 static struct slave
*rlb_choose_channel(struct sk_buff
*skb
, struct bonding
*bond
)
627 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
628 struct arp_pkt
*arp
= arp_pkt(skb
);
629 struct slave
*assigned_slave
;
630 struct rlb_client_info
*client_info
;
633 _lock_rx_hashtbl(bond
);
635 hash_index
= _simple_hash((u8
*)&arp
->ip_dst
, sizeof(arp
->ip_dst
));
636 client_info
= &(bond_info
->rx_hashtbl
[hash_index
]);
638 if (client_info
->assigned
) {
639 if ((client_info
->ip_src
== arp
->ip_src
) &&
640 (client_info
->ip_dst
== arp
->ip_dst
)) {
641 /* the entry is already assigned to this client */
642 if (!ether_addr_equal_64bits(arp
->mac_dst
, mac_bcast
)) {
643 /* update mac address from arp */
644 memcpy(client_info
->mac_dst
, arp
->mac_dst
, ETH_ALEN
);
646 memcpy(client_info
->mac_src
, arp
->mac_src
, ETH_ALEN
);
648 assigned_slave
= client_info
->slave
;
649 if (assigned_slave
) {
650 _unlock_rx_hashtbl(bond
);
651 return assigned_slave
;
654 /* the entry is already assigned to some other client,
655 * move the old client to primary (curr_active_slave) so
656 * that the new client can be assigned to this entry.
658 if (bond
->curr_active_slave
&&
659 client_info
->slave
!= bond
->curr_active_slave
) {
660 client_info
->slave
= bond
->curr_active_slave
;
661 rlb_update_client(client_info
);
665 /* assign a new slave */
666 assigned_slave
= rlb_next_rx_slave(bond
);
668 if (assigned_slave
) {
669 if (!(client_info
->assigned
&&
670 client_info
->ip_src
== arp
->ip_src
)) {
671 /* ip_src is going to be updated,
672 * fix the src hash list
674 u32 hash_src
= _simple_hash((u8
*)&arp
->ip_src
,
675 sizeof(arp
->ip_src
));
676 rlb_src_unlink(bond
, hash_index
);
677 rlb_src_link(bond
, hash_src
, hash_index
);
680 client_info
->ip_src
= arp
->ip_src
;
681 client_info
->ip_dst
= arp
->ip_dst
;
682 /* arp->mac_dst is broadcast for arp reqeusts.
683 * will be updated with clients actual unicast mac address
684 * upon receiving an arp reply.
686 memcpy(client_info
->mac_dst
, arp
->mac_dst
, ETH_ALEN
);
687 memcpy(client_info
->mac_src
, arp
->mac_src
, ETH_ALEN
);
688 client_info
->slave
= assigned_slave
;
690 if (!ether_addr_equal_64bits(client_info
->mac_dst
, mac_bcast
)) {
691 client_info
->ntt
= 1;
692 bond
->alb_info
.rx_ntt
= 1;
694 client_info
->ntt
= 0;
697 if (vlan_get_tag(skb
, &client_info
->vlan_id
))
698 client_info
->vlan_id
= 0;
700 if (!client_info
->assigned
) {
701 u32 prev_tbl_head
= bond_info
->rx_hashtbl_used_head
;
702 bond_info
->rx_hashtbl_used_head
= hash_index
;
703 client_info
->used_next
= prev_tbl_head
;
704 if (prev_tbl_head
!= RLB_NULL_INDEX
) {
705 bond_info
->rx_hashtbl
[prev_tbl_head
].used_prev
=
708 client_info
->assigned
= 1;
712 _unlock_rx_hashtbl(bond
);
714 return assigned_slave
;
717 /* chooses (and returns) transmit channel for arp reply
718 * does not choose channel for other arp types since they are
719 * sent on the curr_active_slave
721 static struct slave
*rlb_arp_xmit(struct sk_buff
*skb
, struct bonding
*bond
)
723 struct arp_pkt
*arp
= arp_pkt(skb
);
724 struct slave
*tx_slave
= NULL
;
726 /* Don't modify or load balance ARPs that do not originate locally
727 * (e.g.,arrive via a bridge).
729 if (!bond_slave_has_mac(bond
, arp
->mac_src
))
732 if (arp
->op_code
== htons(ARPOP_REPLY
)) {
733 /* the arp must be sent on the selected
736 tx_slave
= rlb_choose_channel(skb
, bond
);
738 memcpy(arp
->mac_src
,tx_slave
->dev
->dev_addr
, ETH_ALEN
);
740 pr_debug("Server sent ARP Reply packet\n");
741 } else if (arp
->op_code
== htons(ARPOP_REQUEST
)) {
742 /* Create an entry in the rx_hashtbl for this client as a
744 * When the arp reply is received the entry will be updated
745 * with the correct unicast address of the client.
747 rlb_choose_channel(skb
, bond
);
749 /* The ARP reply packets must be delayed so that
750 * they can cancel out the influence of the ARP request.
752 bond
->alb_info
.rlb_update_delay_counter
= RLB_UPDATE_DELAY
;
754 /* arp requests are broadcast and are sent on the primary
755 * the arp request will collapse all clients on the subnet to
756 * the primary slave. We must register these clients to be
757 * updated with their assigned mac.
759 rlb_req_update_subnet_clients(bond
, arp
->ip_src
);
760 pr_debug("Server sent ARP Request packet\n");
766 /* Caller must hold bond lock for read */
767 static void rlb_rebalance(struct bonding
*bond
)
769 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
770 struct slave
*assigned_slave
;
771 struct rlb_client_info
*client_info
;
775 _lock_rx_hashtbl_bh(bond
);
778 hash_index
= bond_info
->rx_hashtbl_used_head
;
779 for (; hash_index
!= RLB_NULL_INDEX
;
780 hash_index
= client_info
->used_next
) {
781 client_info
= &(bond_info
->rx_hashtbl
[hash_index
]);
782 assigned_slave
= rlb_next_rx_slave(bond
);
783 if (assigned_slave
&& (client_info
->slave
!= assigned_slave
)) {
784 client_info
->slave
= assigned_slave
;
785 client_info
->ntt
= 1;
790 /* update the team's flag only after the whole iteration */
792 bond_info
->rx_ntt
= 1;
794 _unlock_rx_hashtbl_bh(bond
);
797 /* Caller must hold rx_hashtbl lock */
798 static void rlb_init_table_entry_dst(struct rlb_client_info
*entry
)
800 entry
->used_next
= RLB_NULL_INDEX
;
801 entry
->used_prev
= RLB_NULL_INDEX
;
806 static void rlb_init_table_entry_src(struct rlb_client_info
*entry
)
808 entry
->src_first
= RLB_NULL_INDEX
;
809 entry
->src_prev
= RLB_NULL_INDEX
;
810 entry
->src_next
= RLB_NULL_INDEX
;
813 static void rlb_init_table_entry(struct rlb_client_info
*entry
)
815 memset(entry
, 0, sizeof(struct rlb_client_info
));
816 rlb_init_table_entry_dst(entry
);
817 rlb_init_table_entry_src(entry
);
820 static void rlb_delete_table_entry_dst(struct bonding
*bond
, u32 index
)
822 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
823 u32 next_index
= bond_info
->rx_hashtbl
[index
].used_next
;
824 u32 prev_index
= bond_info
->rx_hashtbl
[index
].used_prev
;
826 if (index
== bond_info
->rx_hashtbl_used_head
)
827 bond_info
->rx_hashtbl_used_head
= next_index
;
828 if (prev_index
!= RLB_NULL_INDEX
)
829 bond_info
->rx_hashtbl
[prev_index
].used_next
= next_index
;
830 if (next_index
!= RLB_NULL_INDEX
)
831 bond_info
->rx_hashtbl
[next_index
].used_prev
= prev_index
;
834 /* unlink a rlb hash table entry from the src list */
835 static void rlb_src_unlink(struct bonding
*bond
, u32 index
)
837 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
838 u32 next_index
= bond_info
->rx_hashtbl
[index
].src_next
;
839 u32 prev_index
= bond_info
->rx_hashtbl
[index
].src_prev
;
841 bond_info
->rx_hashtbl
[index
].src_next
= RLB_NULL_INDEX
;
842 bond_info
->rx_hashtbl
[index
].src_prev
= RLB_NULL_INDEX
;
844 if (next_index
!= RLB_NULL_INDEX
)
845 bond_info
->rx_hashtbl
[next_index
].src_prev
= prev_index
;
847 if (prev_index
== RLB_NULL_INDEX
)
850 /* is prev_index pointing to the head of this list? */
851 if (bond_info
->rx_hashtbl
[prev_index
].src_first
== index
)
852 bond_info
->rx_hashtbl
[prev_index
].src_first
= next_index
;
854 bond_info
->rx_hashtbl
[prev_index
].src_next
= next_index
;
858 static void rlb_delete_table_entry(struct bonding
*bond
, u32 index
)
860 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
861 struct rlb_client_info
*entry
= &(bond_info
->rx_hashtbl
[index
]);
863 rlb_delete_table_entry_dst(bond
, index
);
864 rlb_init_table_entry_dst(entry
);
866 rlb_src_unlink(bond
, index
);
869 /* add the rx_hashtbl[ip_dst_hash] entry to the list
870 * of entries with identical ip_src_hash
872 static void rlb_src_link(struct bonding
*bond
, u32 ip_src_hash
, u32 ip_dst_hash
)
874 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
877 bond_info
->rx_hashtbl
[ip_dst_hash
].src_prev
= ip_src_hash
;
878 next
= bond_info
->rx_hashtbl
[ip_src_hash
].src_first
;
879 bond_info
->rx_hashtbl
[ip_dst_hash
].src_next
= next
;
880 if (next
!= RLB_NULL_INDEX
)
881 bond_info
->rx_hashtbl
[next
].src_prev
= ip_dst_hash
;
882 bond_info
->rx_hashtbl
[ip_src_hash
].src_first
= ip_dst_hash
;
885 /* deletes all rx_hashtbl entries with arp->ip_src if their mac_src does
886 * not match arp->mac_src */
887 static void rlb_purge_src_ip(struct bonding
*bond
, struct arp_pkt
*arp
)
889 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
890 u32 ip_src_hash
= _simple_hash((u8
*)&(arp
->ip_src
), sizeof(arp
->ip_src
));
893 _lock_rx_hashtbl_bh(bond
);
895 index
= bond_info
->rx_hashtbl
[ip_src_hash
].src_first
;
896 while (index
!= RLB_NULL_INDEX
) {
897 struct rlb_client_info
*entry
= &(bond_info
->rx_hashtbl
[index
]);
898 u32 next_index
= entry
->src_next
;
899 if (entry
->ip_src
== arp
->ip_src
&&
900 !ether_addr_equal_64bits(arp
->mac_src
, entry
->mac_src
))
901 rlb_delete_table_entry(bond
, index
);
904 _unlock_rx_hashtbl_bh(bond
);
907 static int rlb_initialize(struct bonding
*bond
)
909 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
910 struct rlb_client_info
*new_hashtbl
;
911 int size
= RLB_HASH_TABLE_SIZE
* sizeof(struct rlb_client_info
);
914 new_hashtbl
= kmalloc(size
, GFP_KERNEL
);
918 _lock_rx_hashtbl_bh(bond
);
920 bond_info
->rx_hashtbl
= new_hashtbl
;
922 bond_info
->rx_hashtbl_used_head
= RLB_NULL_INDEX
;
924 for (i
= 0; i
< RLB_HASH_TABLE_SIZE
; i
++) {
925 rlb_init_table_entry(bond_info
->rx_hashtbl
+ i
);
928 _unlock_rx_hashtbl_bh(bond
);
930 /* register to receive ARPs */
931 bond
->recv_probe
= rlb_arp_recv
;
936 static void rlb_deinitialize(struct bonding
*bond
)
938 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
940 _lock_rx_hashtbl_bh(bond
);
942 kfree(bond_info
->rx_hashtbl
);
943 bond_info
->rx_hashtbl
= NULL
;
944 bond_info
->rx_hashtbl_used_head
= RLB_NULL_INDEX
;
946 _unlock_rx_hashtbl_bh(bond
);
949 static void rlb_clear_vlan(struct bonding
*bond
, unsigned short vlan_id
)
951 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
954 _lock_rx_hashtbl_bh(bond
);
956 curr_index
= bond_info
->rx_hashtbl_used_head
;
957 while (curr_index
!= RLB_NULL_INDEX
) {
958 struct rlb_client_info
*curr
= &(bond_info
->rx_hashtbl
[curr_index
]);
959 u32 next_index
= bond_info
->rx_hashtbl
[curr_index
].used_next
;
961 if (curr
->vlan_id
== vlan_id
)
962 rlb_delete_table_entry(bond
, curr_index
);
964 curr_index
= next_index
;
967 _unlock_rx_hashtbl_bh(bond
);
970 /*********************** tlb/rlb shared functions *********************/
972 static void alb_send_lp_vid(struct slave
*slave
, u8 mac_addr
[],
975 struct learning_pkt pkt
;
977 int size
= sizeof(struct learning_pkt
);
980 memset(&pkt
, 0, size
);
981 memcpy(pkt
.mac_dst
, mac_addr
, ETH_ALEN
);
982 memcpy(pkt
.mac_src
, mac_addr
, ETH_ALEN
);
983 pkt
.type
= cpu_to_be16(ETH_P_LOOP
);
985 skb
= dev_alloc_skb(size
);
989 data
= skb_put(skb
, size
);
990 memcpy(data
, &pkt
, size
);
992 skb_reset_mac_header(skb
);
993 skb
->network_header
= skb
->mac_header
+ ETH_HLEN
;
994 skb
->protocol
= pkt
.type
;
995 skb
->priority
= TC_PRIO_CONTROL
;
996 skb
->dev
= slave
->dev
;
999 skb
= vlan_put_tag(skb
, htons(ETH_P_8021Q
), vid
);
1001 pr_err("%s: Error: failed to insert VLAN tag\n",
1002 slave
->bond
->dev
->name
);
1007 dev_queue_xmit(skb
);
1011 static void alb_send_learning_packets(struct slave
*slave
, u8 mac_addr
[])
1013 struct bonding
*bond
= bond_get_bond_by_slave(slave
);
1014 struct net_device
*upper
;
1015 struct list_head
*iter
;
1018 alb_send_lp_vid(slave
, mac_addr
, 0);
1020 /* loop through vlans and send one packet for each */
1022 netdev_for_each_upper_dev_rcu(bond
->dev
, upper
, iter
) {
1023 if (upper
->priv_flags
& IFF_802_1Q_VLAN
)
1024 alb_send_lp_vid(slave
, mac_addr
,
1025 vlan_dev_vlan_id(upper
));
1030 static int alb_set_slave_mac_addr(struct slave
*slave
, u8 addr
[])
1032 struct net_device
*dev
= slave
->dev
;
1033 struct sockaddr s_addr
;
1035 if (slave
->bond
->params
.mode
== BOND_MODE_TLB
) {
1036 memcpy(dev
->dev_addr
, addr
, dev
->addr_len
);
1040 /* for rlb each slave must have a unique hw mac addresses so that */
1041 /* each slave will receive packets destined to a different mac */
1042 memcpy(s_addr
.sa_data
, addr
, dev
->addr_len
);
1043 s_addr
.sa_family
= dev
->type
;
1044 if (dev_set_mac_address(dev
, &s_addr
)) {
1045 pr_err("%s: Error: dev_set_mac_address of dev %s failed!\n"
1046 "ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
1047 slave
->bond
->dev
->name
, dev
->name
);
1054 * Swap MAC addresses between two slaves.
1056 * Called with RTNL held, and no other locks.
1060 static void alb_swap_mac_addr(struct slave
*slave1
, struct slave
*slave2
)
1062 u8 tmp_mac_addr
[ETH_ALEN
];
1064 memcpy(tmp_mac_addr
, slave1
->dev
->dev_addr
, ETH_ALEN
);
1065 alb_set_slave_mac_addr(slave1
, slave2
->dev
->dev_addr
);
1066 alb_set_slave_mac_addr(slave2
, tmp_mac_addr
);
1071 * Send learning packets after MAC address swap.
1073 * Called with RTNL and no other locks
1075 static void alb_fasten_mac_swap(struct bonding
*bond
, struct slave
*slave1
,
1076 struct slave
*slave2
)
1078 int slaves_state_differ
= (SLAVE_IS_OK(slave1
) != SLAVE_IS_OK(slave2
));
1079 struct slave
*disabled_slave
= NULL
;
1083 /* fasten the change in the switch */
1084 if (SLAVE_IS_OK(slave1
)) {
1085 alb_send_learning_packets(slave1
, slave1
->dev
->dev_addr
);
1086 if (bond
->alb_info
.rlb_enabled
) {
1087 /* inform the clients that the mac address
1090 rlb_req_update_slave_clients(bond
, slave1
);
1093 disabled_slave
= slave1
;
1096 if (SLAVE_IS_OK(slave2
)) {
1097 alb_send_learning_packets(slave2
, slave2
->dev
->dev_addr
);
1098 if (bond
->alb_info
.rlb_enabled
) {
1099 /* inform the clients that the mac address
1102 rlb_req_update_slave_clients(bond
, slave2
);
1105 disabled_slave
= slave2
;
1108 if (bond
->alb_info
.rlb_enabled
&& slaves_state_differ
) {
1109 /* A disabled slave was assigned an active mac addr */
1110 rlb_teach_disabled_mac_on_primary(bond
,
1111 disabled_slave
->dev
->dev_addr
);
1116 * alb_change_hw_addr_on_detach
1117 * @bond: bonding we're working on
1118 * @slave: the slave that was just detached
1120 * We assume that @slave was already detached from the slave list.
1122 * If @slave's permanent hw address is different both from its current
1123 * address and from @bond's address, then somewhere in the bond there's
1124 * a slave that has @slave's permanet address as its current address.
1125 * We'll make sure that that slave no longer uses @slave's permanent address.
1127 * Caller must hold RTNL and no other locks
1129 static void alb_change_hw_addr_on_detach(struct bonding
*bond
, struct slave
*slave
)
1133 struct slave
*found_slave
;
1135 perm_curr_diff
= !ether_addr_equal_64bits(slave
->perm_hwaddr
,
1136 slave
->dev
->dev_addr
);
1137 perm_bond_diff
= !ether_addr_equal_64bits(slave
->perm_hwaddr
,
1138 bond
->dev
->dev_addr
);
1140 if (perm_curr_diff
&& perm_bond_diff
) {
1141 found_slave
= bond_slave_has_mac(bond
, slave
->perm_hwaddr
);
1144 /* locking: needs RTNL and nothing else */
1145 alb_swap_mac_addr(slave
, found_slave
);
1146 alb_fasten_mac_swap(bond
, slave
, found_slave
);
1152 * alb_handle_addr_collision_on_attach
1153 * @bond: bonding we're working on
1154 * @slave: the slave that was just attached
1156 * checks uniqueness of slave's mac address and handles the case the
1157 * new slave uses the bonds mac address.
1159 * If the permanent hw address of @slave is @bond's hw address, we need to
1160 * find a different hw address to give @slave, that isn't in use by any other
1161 * slave in the bond. This address must be, of course, one of the permanent
1162 * addresses of the other slaves.
1164 * We go over the slave list, and for each slave there we compare its
1165 * permanent hw address with the current address of all the other slaves.
1166 * If no match was found, then we've found a slave with a permanent address
1167 * that isn't used by any other slave in the bond, so we can assign it to
1170 * assumption: this function is called before @slave is attached to the
1173 static int alb_handle_addr_collision_on_attach(struct bonding
*bond
, struct slave
*slave
)
1175 struct slave
*tmp_slave1
, *free_mac_slave
= NULL
;
1176 struct slave
*has_bond_addr
= bond
->curr_active_slave
;
1178 if (list_empty(&bond
->slave_list
)) {
1179 /* this is the first slave */
1183 /* if slave's mac address differs from bond's mac address
1184 * check uniqueness of slave's mac address against the other
1185 * slaves in the bond.
1187 if (!ether_addr_equal_64bits(slave
->perm_hwaddr
, bond
->dev
->dev_addr
)) {
1188 if (!bond_slave_has_mac(bond
, slave
->dev
->dev_addr
))
1191 /* Try setting slave mac to bond address and fall-through
1192 to code handling that situation below... */
1193 alb_set_slave_mac_addr(slave
, bond
->dev
->dev_addr
);
1196 /* The slave's address is equal to the address of the bond.
1197 * Search for a spare address in the bond for this slave.
1199 bond_for_each_slave(bond
, tmp_slave1
) {
1200 if (!bond_slave_has_mac(bond
, tmp_slave1
->perm_hwaddr
)) {
1201 /* no slave has tmp_slave1's perm addr
1204 free_mac_slave
= tmp_slave1
;
1208 if (!has_bond_addr
) {
1209 if (ether_addr_equal_64bits(tmp_slave1
->dev
->dev_addr
,
1210 bond
->dev
->dev_addr
)) {
1212 has_bond_addr
= tmp_slave1
;
1217 if (free_mac_slave
) {
1218 alb_set_slave_mac_addr(slave
, free_mac_slave
->perm_hwaddr
);
1220 pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
1221 bond
->dev
->name
, slave
->dev
->name
,
1222 free_mac_slave
->dev
->name
);
1224 } else if (has_bond_addr
) {
1225 pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
1226 bond
->dev
->name
, slave
->dev
->name
);
1234 * alb_set_mac_address
1238 * In TLB mode all slaves are configured to the bond's hw address, but set
1239 * their dev_addr field to different addresses (based on their permanent hw
1242 * For each slave, this function sets the interface to the new address and then
1243 * changes its dev_addr field to its previous value.
1245 * Unwinding assumes bond's mac address has not yet changed.
1247 static int alb_set_mac_address(struct bonding
*bond
, void *addr
)
1249 char tmp_addr
[ETH_ALEN
];
1250 struct slave
*slave
;
1254 if (bond
->alb_info
.rlb_enabled
)
1257 bond_for_each_slave(bond
, slave
) {
1258 /* save net_device's current hw address */
1259 memcpy(tmp_addr
, slave
->dev
->dev_addr
, ETH_ALEN
);
1261 res
= dev_set_mac_address(slave
->dev
, addr
);
1263 /* restore net_device's hw address */
1264 memcpy(slave
->dev
->dev_addr
, tmp_addr
, ETH_ALEN
);
1273 memcpy(sa
.sa_data
, bond
->dev
->dev_addr
, bond
->dev
->addr_len
);
1274 sa
.sa_family
= bond
->dev
->type
;
1276 /* unwind from head to the slave that failed */
1277 bond_for_each_slave_continue_reverse(bond
, slave
) {
1278 memcpy(tmp_addr
, slave
->dev
->dev_addr
, ETH_ALEN
);
1279 dev_set_mac_address(slave
->dev
, &sa
);
1280 memcpy(slave
->dev
->dev_addr
, tmp_addr
, ETH_ALEN
);
1286 /************************ exported alb funcions ************************/
1288 int bond_alb_initialize(struct bonding
*bond
, int rlb_enabled
)
1292 res
= tlb_initialize(bond
);
1298 bond
->alb_info
.rlb_enabled
= 1;
1299 /* initialize rlb */
1300 res
= rlb_initialize(bond
);
1302 tlb_deinitialize(bond
);
1306 bond
->alb_info
.rlb_enabled
= 0;
1312 void bond_alb_deinitialize(struct bonding
*bond
)
1314 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
1316 tlb_deinitialize(bond
);
1318 if (bond_info
->rlb_enabled
) {
1319 rlb_deinitialize(bond
);
1323 int bond_alb_xmit(struct sk_buff
*skb
, struct net_device
*bond_dev
)
1325 struct bonding
*bond
= netdev_priv(bond_dev
);
1326 struct ethhdr
*eth_data
;
1327 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
1328 struct slave
*tx_slave
= NULL
;
1329 static const __be32 ip_bcast
= htonl(0xffffffff);
1331 int do_tx_balance
= 1;
1333 const u8
*hash_start
= NULL
;
1335 struct ipv6hdr
*ip6hdr
;
1337 skb_reset_mac_header(skb
);
1338 eth_data
= eth_hdr(skb
);
1340 /* make sure that the curr_active_slave do not change during tx
1342 read_lock(&bond
->lock
);
1343 read_lock(&bond
->curr_slave_lock
);
1345 switch (ntohs(skb
->protocol
)) {
1347 const struct iphdr
*iph
= ip_hdr(skb
);
1349 if (ether_addr_equal_64bits(eth_data
->h_dest
, mac_bcast
) ||
1350 (iph
->daddr
== ip_bcast
) ||
1351 (iph
->protocol
== IPPROTO_IGMP
)) {
1355 hash_start
= (char *)&(iph
->daddr
);
1356 hash_size
= sizeof(iph
->daddr
);
1360 /* IPv6 doesn't really use broadcast mac address, but leave
1361 * that here just in case.
1363 if (ether_addr_equal_64bits(eth_data
->h_dest
, mac_bcast
)) {
1368 /* IPv6 uses all-nodes multicast as an equivalent to
1369 * broadcasts in IPv4.
1371 if (ether_addr_equal_64bits(eth_data
->h_dest
, mac_v6_allmcast
)) {
1376 /* Additianally, DAD probes should not be tx-balanced as that
1377 * will lead to false positives for duplicate addresses and
1378 * prevent address configuration from working.
1380 ip6hdr
= ipv6_hdr(skb
);
1381 if (ipv6_addr_any(&ip6hdr
->saddr
)) {
1386 hash_start
= (char *)&(ipv6_hdr(skb
)->daddr
);
1387 hash_size
= sizeof(ipv6_hdr(skb
)->daddr
);
1390 if (ipx_hdr(skb
)->ipx_checksum
!= IPX_NO_CHECKSUM
) {
1391 /* something is wrong with this packet */
1396 if (ipx_hdr(skb
)->ipx_type
!= IPX_TYPE_NCP
) {
1397 /* The only protocol worth balancing in
1398 * this family since it has an "ARP" like
1405 hash_start
= (char*)eth_data
->h_dest
;
1406 hash_size
= ETH_ALEN
;
1410 if (bond_info
->rlb_enabled
) {
1411 tx_slave
= rlb_arp_xmit(skb
, bond
);
1419 if (do_tx_balance
) {
1420 hash_index
= _simple_hash(hash_start
, hash_size
);
1421 tx_slave
= tlb_choose_channel(bond
, hash_index
, skb
->len
);
1425 /* unbalanced or unassigned, send through primary */
1426 tx_slave
= bond
->curr_active_slave
;
1427 bond_info
->unbalanced_load
+= skb
->len
;
1430 if (tx_slave
&& SLAVE_IS_OK(tx_slave
)) {
1431 if (tx_slave
!= bond
->curr_active_slave
) {
1432 memcpy(eth_data
->h_source
,
1433 tx_slave
->dev
->dev_addr
,
1437 res
= bond_dev_queue_xmit(bond
, skb
, tx_slave
->dev
);
1440 _lock_tx_hashtbl(bond
);
1441 __tlb_clear_slave(bond
, tx_slave
, 0);
1442 _unlock_tx_hashtbl(bond
);
1446 read_unlock(&bond
->curr_slave_lock
);
1447 read_unlock(&bond
->lock
);
1449 /* no suitable interface, frame not sent */
1450 dev_kfree_skb_any(skb
);
1453 return NETDEV_TX_OK
;
1456 void bond_alb_monitor(struct work_struct
*work
)
1458 struct bonding
*bond
= container_of(work
, struct bonding
,
1460 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
1461 struct slave
*slave
;
1463 read_lock(&bond
->lock
);
1465 if (list_empty(&bond
->slave_list
)) {
1466 bond_info
->tx_rebalance_counter
= 0;
1467 bond_info
->lp_counter
= 0;
1471 bond_info
->tx_rebalance_counter
++;
1472 bond_info
->lp_counter
++;
1474 /* send learning packets */
1475 if (bond_info
->lp_counter
>= BOND_ALB_LP_TICKS(bond
)) {
1476 /* change of curr_active_slave involves swapping of mac addresses.
1477 * in order to avoid this swapping from happening while
1478 * sending the learning packets, the curr_slave_lock must be held for
1481 read_lock(&bond
->curr_slave_lock
);
1483 bond_for_each_slave(bond
, slave
)
1484 alb_send_learning_packets(slave
, slave
->dev
->dev_addr
);
1486 read_unlock(&bond
->curr_slave_lock
);
1488 bond_info
->lp_counter
= 0;
1491 /* rebalance tx traffic */
1492 if (bond_info
->tx_rebalance_counter
>= BOND_TLB_REBALANCE_TICKS
) {
1494 read_lock(&bond
->curr_slave_lock
);
1496 bond_for_each_slave(bond
, slave
) {
1497 tlb_clear_slave(bond
, slave
, 1);
1498 if (slave
== bond
->curr_active_slave
) {
1499 SLAVE_TLB_INFO(slave
).load
=
1500 bond_info
->unbalanced_load
/
1501 BOND_TLB_REBALANCE_INTERVAL
;
1502 bond_info
->unbalanced_load
= 0;
1506 read_unlock(&bond
->curr_slave_lock
);
1508 bond_info
->tx_rebalance_counter
= 0;
1511 /* handle rlb stuff */
1512 if (bond_info
->rlb_enabled
) {
1513 if (bond_info
->primary_is_promisc
&&
1514 (++bond_info
->rlb_promisc_timeout_counter
>= RLB_PROMISC_TIMEOUT
)) {
1517 * dev_set_promiscuity requires rtnl and
1518 * nothing else. Avoid race with bond_close.
1520 read_unlock(&bond
->lock
);
1521 if (!rtnl_trylock()) {
1522 read_lock(&bond
->lock
);
1526 bond_info
->rlb_promisc_timeout_counter
= 0;
1528 /* If the primary was set to promiscuous mode
1529 * because a slave was disabled then
1530 * it can now leave promiscuous mode.
1532 dev_set_promiscuity(bond
->curr_active_slave
->dev
, -1);
1533 bond_info
->primary_is_promisc
= 0;
1536 read_lock(&bond
->lock
);
1539 if (bond_info
->rlb_rebalance
) {
1540 bond_info
->rlb_rebalance
= 0;
1541 rlb_rebalance(bond
);
1544 /* check if clients need updating */
1545 if (bond_info
->rx_ntt
) {
1546 if (bond_info
->rlb_update_delay_counter
) {
1547 --bond_info
->rlb_update_delay_counter
;
1549 rlb_update_rx_clients(bond
);
1550 if (bond_info
->rlb_update_retry_counter
) {
1551 --bond_info
->rlb_update_retry_counter
;
1553 bond_info
->rx_ntt
= 0;
1560 queue_delayed_work(bond
->wq
, &bond
->alb_work
, alb_delta_in_ticks
);
1562 read_unlock(&bond
->lock
);
1565 /* assumption: called before the slave is attached to the bond
1566 * and not locked by the bond lock
1568 int bond_alb_init_slave(struct bonding
*bond
, struct slave
*slave
)
1572 res
= alb_set_slave_mac_addr(slave
, slave
->perm_hwaddr
);
1577 res
= alb_handle_addr_collision_on_attach(bond
, slave
);
1582 tlb_init_slave(slave
);
1584 /* order a rebalance ASAP */
1585 bond
->alb_info
.tx_rebalance_counter
= BOND_TLB_REBALANCE_TICKS
;
1587 if (bond
->alb_info
.rlb_enabled
) {
1588 bond
->alb_info
.rlb_rebalance
= 1;
1595 * Remove slave from tlb and rlb hash tables, and fix up MAC addresses
1598 * Caller must hold RTNL and no other locks
1600 void bond_alb_deinit_slave(struct bonding
*bond
, struct slave
*slave
)
1602 if (!list_empty(&bond
->slave_list
))
1603 alb_change_hw_addr_on_detach(bond
, slave
);
1605 tlb_clear_slave(bond
, slave
, 0);
1607 if (bond
->alb_info
.rlb_enabled
) {
1608 bond
->alb_info
.next_rx_slave
= NULL
;
1609 rlb_clear_slave(bond
, slave
);
1613 /* Caller must hold bond lock for read */
1614 void bond_alb_handle_link_change(struct bonding
*bond
, struct slave
*slave
, char link
)
1616 struct alb_bond_info
*bond_info
= &(BOND_ALB_INFO(bond
));
1618 if (link
== BOND_LINK_DOWN
) {
1619 tlb_clear_slave(bond
, slave
, 0);
1620 if (bond
->alb_info
.rlb_enabled
) {
1621 rlb_clear_slave(bond
, slave
);
1623 } else if (link
== BOND_LINK_UP
) {
1624 /* order a rebalance ASAP */
1625 bond_info
->tx_rebalance_counter
= BOND_TLB_REBALANCE_TICKS
;
1626 if (bond
->alb_info
.rlb_enabled
) {
1627 bond
->alb_info
.rlb_rebalance
= 1;
1628 /* If the updelay module parameter is smaller than the
1629 * forwarding delay of the switch the rebalance will
1630 * not work because the rebalance arp replies will
1631 * not be forwarded to the clients..
1638 * bond_alb_handle_active_change - assign new curr_active_slave
1639 * @bond: our bonding struct
1640 * @new_slave: new slave to assign
1642 * Set the bond->curr_active_slave to @new_slave and handle
1643 * mac address swapping and promiscuity changes as needed.
1645 * If new_slave is NULL, caller must hold curr_slave_lock or
1646 * bond->lock for write.
1648 * If new_slave is not NULL, caller must hold RTNL, bond->lock for
1649 * read and curr_slave_lock for write. Processing here may sleep, so
1650 * no other locks may be held.
1652 void bond_alb_handle_active_change(struct bonding
*bond
, struct slave
*new_slave
)
1653 __releases(&bond
->curr_slave_lock
)
1654 __releases(&bond
->lock
)
1655 __acquires(&bond
->lock
)
1656 __acquires(&bond
->curr_slave_lock
)
1658 struct slave
*swap_slave
;
1660 if (bond
->curr_active_slave
== new_slave
)
1663 if (bond
->curr_active_slave
&& bond
->alb_info
.primary_is_promisc
) {
1664 dev_set_promiscuity(bond
->curr_active_slave
->dev
, -1);
1665 bond
->alb_info
.primary_is_promisc
= 0;
1666 bond
->alb_info
.rlb_promisc_timeout_counter
= 0;
1669 swap_slave
= bond
->curr_active_slave
;
1670 rcu_assign_pointer(bond
->curr_active_slave
, new_slave
);
1672 if (!new_slave
|| list_empty(&bond
->slave_list
))
1675 /* set the new curr_active_slave to the bonds mac address
1676 * i.e. swap mac addresses of old curr_active_slave and new curr_active_slave
1679 swap_slave
= bond_slave_has_mac(bond
, bond
->dev
->dev_addr
);
1682 * Arrange for swap_slave and new_slave to temporarily be
1683 * ignored so we can mess with their MAC addresses without
1684 * fear of interference from transmit activity.
1687 tlb_clear_slave(bond
, swap_slave
, 1);
1688 tlb_clear_slave(bond
, new_slave
, 1);
1690 write_unlock_bh(&bond
->curr_slave_lock
);
1691 read_unlock(&bond
->lock
);
1695 /* curr_active_slave must be set before calling alb_swap_mac_addr */
1697 /* swap mac address */
1698 alb_swap_mac_addr(swap_slave
, new_slave
);
1699 alb_fasten_mac_swap(bond
, swap_slave
, new_slave
);
1700 read_lock(&bond
->lock
);
1702 /* set the new_slave to the bond mac address */
1703 alb_set_slave_mac_addr(new_slave
, bond
->dev
->dev_addr
);
1704 read_lock(&bond
->lock
);
1705 alb_send_learning_packets(new_slave
, bond
->dev
->dev_addr
);
1708 write_lock_bh(&bond
->curr_slave_lock
);
1714 int bond_alb_set_mac_address(struct net_device
*bond_dev
, void *addr
)
1715 __acquires(&bond
->lock
)
1716 __releases(&bond
->lock
)
1718 struct bonding
*bond
= netdev_priv(bond_dev
);
1719 struct sockaddr
*sa
= addr
;
1720 struct slave
*swap_slave
;
1723 if (!is_valid_ether_addr(sa
->sa_data
)) {
1724 return -EADDRNOTAVAIL
;
1727 res
= alb_set_mac_address(bond
, addr
);
1732 memcpy(bond_dev
->dev_addr
, sa
->sa_data
, bond_dev
->addr_len
);
1734 /* If there is no curr_active_slave there is nothing else to do.
1735 * Otherwise we'll need to pass the new address to it and handle
1738 if (!bond
->curr_active_slave
) {
1742 swap_slave
= bond_slave_has_mac(bond
, bond_dev
->dev_addr
);
1745 alb_swap_mac_addr(swap_slave
, bond
->curr_active_slave
);
1746 alb_fasten_mac_swap(bond
, swap_slave
, bond
->curr_active_slave
);
1748 alb_set_slave_mac_addr(bond
->curr_active_slave
, bond_dev
->dev_addr
);
1750 read_lock(&bond
->lock
);
1751 alb_send_learning_packets(bond
->curr_active_slave
, bond_dev
->dev_addr
);
1752 if (bond
->alb_info
.rlb_enabled
) {
1753 /* inform clients mac address has changed */
1754 rlb_req_update_slave_clients(bond
, bond
->curr_active_slave
);
1756 read_unlock(&bond
->lock
);
1762 void bond_alb_clear_vlan(struct bonding
*bond
, unsigned short vlan_id
)
1764 if (bond
->alb_info
.rlb_enabled
) {
1765 rlb_clear_vlan(bond
, vlan_id
);