2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
18 #include "ieee80211_i.h"
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER 2
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN 2
27 static inline bool mpath_expired(struct mesh_path
*mpath
)
29 return (mpath
->flags
& MESH_PATH_ACTIVE
) &&
30 time_after(jiffies
, mpath
->exp_time
) &&
31 !(mpath
->flags
& MESH_PATH_FIXED
);
35 struct hlist_node list
;
37 /* This indirection allows two different tables to point to the same
38 * mesh_path structure, useful when resizing
40 struct mesh_path
*mpath
;
43 static struct mesh_table __rcu
*mesh_paths
;
44 static struct mesh_table __rcu
*mpp_paths
; /* Store paths for MPP&MAP */
46 int mesh_paths_generation
;
47 int mpp_paths_generation
;
49 /* This lock will have the grow table function as writer and add / delete nodes
50 * as readers. RCU provides sufficient protection only when reading the table
51 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
52 * the read lock or we risk operating on an old table. The write lock is only
53 * needed when modifying the number of buckets a table.
55 static DEFINE_RWLOCK(pathtbl_resize_lock
);
58 static inline struct mesh_table
*resize_dereference_paths(
59 struct mesh_table __rcu
*table
)
61 return rcu_dereference_protected(table
,
62 lockdep_is_held(&pathtbl_resize_lock
));
65 static inline struct mesh_table
*resize_dereference_mesh_paths(void)
67 return resize_dereference_paths(mesh_paths
);
70 static inline struct mesh_table
*resize_dereference_mpp_paths(void)
72 return resize_dereference_paths(mpp_paths
);
76 * CAREFUL -- "tbl" must not be an expression,
77 * in particular not an rcu_dereference(), since
78 * it's used twice. So it is illegal to do
79 * for_each_mesh_entry(rcu_dereference(...), ...)
81 #define for_each_mesh_entry(tbl, node, i) \
82 for (i = 0; i <= tbl->hash_mask; i++) \
83 hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
86 static struct mesh_table
*mesh_table_alloc(int size_order
)
89 struct mesh_table
*newtbl
;
91 newtbl
= kmalloc(sizeof(struct mesh_table
), GFP_ATOMIC
);
95 newtbl
->hash_buckets
= kzalloc(sizeof(struct hlist_head
) *
96 (1 << size_order
), GFP_ATOMIC
);
98 if (!newtbl
->hash_buckets
) {
103 newtbl
->hashwlock
= kmalloc(sizeof(spinlock_t
) *
104 (1 << size_order
), GFP_ATOMIC
);
105 if (!newtbl
->hashwlock
) {
106 kfree(newtbl
->hash_buckets
);
111 newtbl
->size_order
= size_order
;
112 newtbl
->hash_mask
= (1 << size_order
) - 1;
113 atomic_set(&newtbl
->entries
, 0);
114 get_random_bytes(&newtbl
->hash_rnd
,
115 sizeof(newtbl
->hash_rnd
));
116 for (i
= 0; i
<= newtbl
->hash_mask
; i
++)
117 spin_lock_init(&newtbl
->hashwlock
[i
]);
118 spin_lock_init(&newtbl
->gates_lock
);
123 static void __mesh_table_free(struct mesh_table
*tbl
)
125 kfree(tbl
->hash_buckets
);
126 kfree(tbl
->hashwlock
);
130 static void mesh_table_free(struct mesh_table
*tbl
, bool free_leafs
)
132 struct hlist_head
*mesh_hash
;
133 struct hlist_node
*p
, *q
;
134 struct mpath_node
*gate
;
137 mesh_hash
= tbl
->hash_buckets
;
138 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
139 spin_lock_bh(&tbl
->hashwlock
[i
]);
140 hlist_for_each_safe(p
, q
, &mesh_hash
[i
]) {
141 tbl
->free_node(p
, free_leafs
);
142 atomic_dec(&tbl
->entries
);
144 spin_unlock_bh(&tbl
->hashwlock
[i
]);
147 spin_lock_bh(&tbl
->gates_lock
);
148 hlist_for_each_entry_safe(gate
, q
,
149 tbl
->known_gates
, list
) {
150 hlist_del(&gate
->list
);
153 kfree(tbl
->known_gates
);
154 spin_unlock_bh(&tbl
->gates_lock
);
157 __mesh_table_free(tbl
);
160 static int mesh_table_grow(struct mesh_table
*oldtbl
,
161 struct mesh_table
*newtbl
)
163 struct hlist_head
*oldhash
;
164 struct hlist_node
*p
, *q
;
167 if (atomic_read(&oldtbl
->entries
)
168 < MEAN_CHAIN_LEN
* (oldtbl
->hash_mask
+ 1))
171 newtbl
->free_node
= oldtbl
->free_node
;
172 newtbl
->copy_node
= oldtbl
->copy_node
;
173 newtbl
->known_gates
= oldtbl
->known_gates
;
174 atomic_set(&newtbl
->entries
, atomic_read(&oldtbl
->entries
));
176 oldhash
= oldtbl
->hash_buckets
;
177 for (i
= 0; i
<= oldtbl
->hash_mask
; i
++)
178 hlist_for_each(p
, &oldhash
[i
])
179 if (oldtbl
->copy_node(p
, newtbl
) < 0)
185 for (i
= 0; i
<= newtbl
->hash_mask
; i
++) {
186 hlist_for_each_safe(p
, q
, &newtbl
->hash_buckets
[i
])
187 oldtbl
->free_node(p
, 0);
192 static u32
mesh_table_hash(const u8
*addr
, struct ieee80211_sub_if_data
*sdata
,
193 struct mesh_table
*tbl
)
195 /* Use last four bytes of hw addr and interface index as hash index */
196 return jhash_2words(*(u32
*)(addr
+2), sdata
->dev
->ifindex
,
197 tbl
->hash_rnd
) & tbl
->hash_mask
;
203 * mesh_path_assign_nexthop - update mesh path next hop
205 * @mpath: mesh path to update
206 * @sta: next hop to assign
208 * Locking: mpath->state_lock must be held when calling this function
210 void mesh_path_assign_nexthop(struct mesh_path
*mpath
, struct sta_info
*sta
)
213 struct ieee80211_hdr
*hdr
;
216 rcu_assign_pointer(mpath
->next_hop
, sta
);
218 spin_lock_irqsave(&mpath
->frame_queue
.lock
, flags
);
219 skb_queue_walk(&mpath
->frame_queue
, skb
) {
220 hdr
= (struct ieee80211_hdr
*) skb
->data
;
221 memcpy(hdr
->addr1
, sta
->sta
.addr
, ETH_ALEN
);
222 memcpy(hdr
->addr2
, mpath
->sdata
->vif
.addr
, ETH_ALEN
);
223 ieee80211_mps_set_frame_flags(sta
->sdata
, sta
, hdr
);
226 spin_unlock_irqrestore(&mpath
->frame_queue
.lock
, flags
);
229 static void prepare_for_gate(struct sk_buff
*skb
, char *dst_addr
,
230 struct mesh_path
*gate_mpath
)
232 struct ieee80211_hdr
*hdr
;
233 struct ieee80211s_hdr
*mshdr
;
234 int mesh_hdrlen
, hdrlen
;
237 hdr
= (struct ieee80211_hdr
*) skb
->data
;
238 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
239 mshdr
= (struct ieee80211s_hdr
*) (skb
->data
+ hdrlen
);
241 if (!(mshdr
->flags
& MESH_FLAGS_AE
)) {
242 /* size of the fixed part of the mesh header */
245 /* make room for the two extended addresses */
246 skb_push(skb
, 2 * ETH_ALEN
);
247 memmove(skb
->data
, hdr
, hdrlen
+ mesh_hdrlen
);
249 hdr
= (struct ieee80211_hdr
*) skb
->data
;
251 /* we preserve the previous mesh header and only add
252 * the new addreses */
253 mshdr
= (struct ieee80211s_hdr
*) (skb
->data
+ hdrlen
);
254 mshdr
->flags
= MESH_FLAGS_AE_A5_A6
;
255 memcpy(mshdr
->eaddr1
, hdr
->addr3
, ETH_ALEN
);
256 memcpy(mshdr
->eaddr2
, hdr
->addr4
, ETH_ALEN
);
259 /* update next hop */
260 hdr
= (struct ieee80211_hdr
*) skb
->data
;
262 next_hop
= rcu_dereference(gate_mpath
->next_hop
)->sta
.addr
;
263 memcpy(hdr
->addr1
, next_hop
, ETH_ALEN
);
265 memcpy(hdr
->addr2
, gate_mpath
->sdata
->vif
.addr
, ETH_ALEN
);
266 memcpy(hdr
->addr3
, dst_addr
, ETH_ALEN
);
271 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
273 * This function is used to transfer or copy frames from an unresolved mpath to
274 * a gate mpath. The function also adds the Address Extension field and
275 * updates the next hop.
277 * If a frame already has an Address Extension field, only the next hop and
278 * destination addresses are updated.
280 * The gate mpath must be an active mpath with a valid mpath->next_hop.
282 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
283 * @from_mpath: The failed mpath
284 * @copy: When true, copy all the frames to the new mpath queue. When false,
287 static void mesh_path_move_to_queue(struct mesh_path
*gate_mpath
,
288 struct mesh_path
*from_mpath
,
291 struct sk_buff
*skb
, *fskb
, *tmp
;
292 struct sk_buff_head failq
;
295 if (WARN_ON(gate_mpath
== from_mpath
))
297 if (WARN_ON(!gate_mpath
->next_hop
))
300 __skb_queue_head_init(&failq
);
302 spin_lock_irqsave(&from_mpath
->frame_queue
.lock
, flags
);
303 skb_queue_splice_init(&from_mpath
->frame_queue
, &failq
);
304 spin_unlock_irqrestore(&from_mpath
->frame_queue
.lock
, flags
);
306 skb_queue_walk_safe(&failq
, fskb
, tmp
) {
307 if (skb_queue_len(&gate_mpath
->frame_queue
) >=
308 MESH_FRAME_QUEUE_LEN
) {
309 mpath_dbg(gate_mpath
->sdata
, "mpath queue full!\n");
313 skb
= skb_copy(fskb
, GFP_ATOMIC
);
317 prepare_for_gate(skb
, gate_mpath
->dst
, gate_mpath
);
318 skb_queue_tail(&gate_mpath
->frame_queue
, skb
);
323 __skb_unlink(fskb
, &failq
);
327 mpath_dbg(gate_mpath
->sdata
, "Mpath queue for gate %pM has %d frames\n",
328 gate_mpath
->dst
, skb_queue_len(&gate_mpath
->frame_queue
));
333 spin_lock_irqsave(&from_mpath
->frame_queue
.lock
, flags
);
334 skb_queue_splice(&failq
, &from_mpath
->frame_queue
);
335 spin_unlock_irqrestore(&from_mpath
->frame_queue
.lock
, flags
);
339 static struct mesh_path
*mpath_lookup(struct mesh_table
*tbl
, const u8
*dst
,
340 struct ieee80211_sub_if_data
*sdata
)
342 struct mesh_path
*mpath
;
343 struct hlist_head
*bucket
;
344 struct mpath_node
*node
;
346 bucket
= &tbl
->hash_buckets
[mesh_table_hash(dst
, sdata
, tbl
)];
347 hlist_for_each_entry_rcu(node
, bucket
, list
) {
349 if (mpath
->sdata
== sdata
&&
350 ether_addr_equal(dst
, mpath
->dst
)) {
351 if (mpath_expired(mpath
)) {
352 spin_lock_bh(&mpath
->state_lock
);
353 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
354 spin_unlock_bh(&mpath
->state_lock
);
363 * mesh_path_lookup - look up a path in the mesh path table
364 * @sdata: local subif
365 * @dst: hardware address (ETH_ALEN length) of destination
367 * Returns: pointer to the mesh path structure, or NULL if not found
369 * Locking: must be called within a read rcu section.
372 mesh_path_lookup(struct ieee80211_sub_if_data
*sdata
, const u8
*dst
)
374 return mpath_lookup(rcu_dereference(mesh_paths
), dst
, sdata
);
378 mpp_path_lookup(struct ieee80211_sub_if_data
*sdata
, const u8
*dst
)
380 return mpath_lookup(rcu_dereference(mpp_paths
), dst
, sdata
);
385 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
387 * @sdata: local subif, or NULL for all entries
389 * Returns: pointer to the mesh path structure, or NULL if not found.
391 * Locking: must be called within a read rcu section.
394 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data
*sdata
, int idx
)
396 struct mesh_table
*tbl
= rcu_dereference(mesh_paths
);
397 struct mpath_node
*node
;
401 for_each_mesh_entry(tbl
, node
, i
) {
402 if (sdata
&& node
->mpath
->sdata
!= sdata
)
405 if (mpath_expired(node
->mpath
)) {
406 spin_lock_bh(&node
->mpath
->state_lock
);
407 node
->mpath
->flags
&= ~MESH_PATH_ACTIVE
;
408 spin_unlock_bh(&node
->mpath
->state_lock
);
418 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
420 * @sdata: local subif, or NULL for all entries
422 * Returns: pointer to the proxy path structure, or NULL if not found.
424 * Locking: must be called within a read rcu section.
427 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data
*sdata
, int idx
)
429 struct mesh_table
*tbl
= rcu_dereference(mpp_paths
);
430 struct mpath_node
*node
;
434 for_each_mesh_entry(tbl
, node
, i
) {
435 if (sdata
&& node
->mpath
->sdata
!= sdata
)
445 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
446 * @mpath: gate path to add to table
448 int mesh_path_add_gate(struct mesh_path
*mpath
)
450 struct mesh_table
*tbl
;
451 struct mpath_node
*gate
, *new_gate
;
455 tbl
= rcu_dereference(mesh_paths
);
457 hlist_for_each_entry_rcu(gate
, tbl
->known_gates
, list
)
458 if (gate
->mpath
== mpath
) {
463 new_gate
= kzalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
469 mpath
->is_gate
= true;
470 mpath
->sdata
->u
.mesh
.num_gates
++;
471 new_gate
->mpath
= mpath
;
472 spin_lock_bh(&tbl
->gates_lock
);
473 hlist_add_head_rcu(&new_gate
->list
, tbl
->known_gates
);
474 spin_unlock_bh(&tbl
->gates_lock
);
475 mpath_dbg(mpath
->sdata
,
476 "Mesh path: Recorded new gate: %pM. %d known gates\n",
477 mpath
->dst
, mpath
->sdata
->u
.mesh
.num_gates
);
485 * mesh_gate_del - remove a mesh gate from the list of known gates
486 * @tbl: table which holds our list of known gates
489 * Locking: must be called inside rcu_read_lock() section
491 static void mesh_gate_del(struct mesh_table
*tbl
, struct mesh_path
*mpath
)
493 struct mpath_node
*gate
;
494 struct hlist_node
*q
;
496 hlist_for_each_entry_safe(gate
, q
, tbl
->known_gates
, list
) {
497 if (gate
->mpath
!= mpath
)
499 spin_lock_bh(&tbl
->gates_lock
);
500 hlist_del_rcu(&gate
->list
);
501 kfree_rcu(gate
, rcu
);
502 spin_unlock_bh(&tbl
->gates_lock
);
503 mpath
->sdata
->u
.mesh
.num_gates
--;
504 mpath
->is_gate
= false;
505 mpath_dbg(mpath
->sdata
,
506 "Mesh path: Deleted gate: %pM. %d known gates\n",
507 mpath
->dst
, mpath
->sdata
->u
.mesh
.num_gates
);
513 * mesh_gate_num - number of gates known to this interface
516 int mesh_gate_num(struct ieee80211_sub_if_data
*sdata
)
518 return sdata
->u
.mesh
.num_gates
;
522 * mesh_path_add - allocate and add a new path to the mesh path table
523 * @dst: destination address of the path (ETH_ALEN length)
524 * @sdata: local subif
526 * Returns: 0 on success
528 * State: the initial state of the new path is set to 0
530 struct mesh_path
*mesh_path_add(struct ieee80211_sub_if_data
*sdata
,
533 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
534 struct ieee80211_local
*local
= sdata
->local
;
535 struct mesh_table
*tbl
;
536 struct mesh_path
*mpath
, *new_mpath
;
537 struct mpath_node
*node
, *new_node
;
538 struct hlist_head
*bucket
;
543 if (ether_addr_equal(dst
, sdata
->vif
.addr
))
544 /* never add ourselves as neighbours */
545 return ERR_PTR(-ENOTSUPP
);
547 if (is_multicast_ether_addr(dst
))
548 return ERR_PTR(-ENOTSUPP
);
550 if (atomic_add_unless(&sdata
->u
.mesh
.mpaths
, 1, MESH_MAX_MPATHS
) == 0)
551 return ERR_PTR(-ENOSPC
);
553 read_lock_bh(&pathtbl_resize_lock
);
554 tbl
= resize_dereference_mesh_paths();
556 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
557 bucket
= &tbl
->hash_buckets
[hash_idx
];
559 spin_lock(&tbl
->hashwlock
[hash_idx
]);
561 hlist_for_each_entry(node
, bucket
, list
) {
563 if (mpath
->sdata
== sdata
&&
564 ether_addr_equal(dst
, mpath
->dst
))
569 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
573 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
577 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
578 eth_broadcast_addr(new_mpath
->rann_snd_addr
);
579 new_mpath
->is_root
= false;
580 new_mpath
->sdata
= sdata
;
581 new_mpath
->flags
= 0;
582 skb_queue_head_init(&new_mpath
->frame_queue
);
583 new_node
->mpath
= new_mpath
;
584 new_mpath
->timer
.data
= (unsigned long) new_mpath
;
585 new_mpath
->timer
.function
= mesh_path_timer
;
586 new_mpath
->exp_time
= jiffies
;
587 spin_lock_init(&new_mpath
->state_lock
);
588 init_timer(&new_mpath
->timer
);
590 hlist_add_head_rcu(&new_node
->list
, bucket
);
591 if (atomic_inc_return(&tbl
->entries
) >=
592 MEAN_CHAIN_LEN
* (tbl
->hash_mask
+ 1))
595 mesh_paths_generation
++;
598 set_bit(MESH_WORK_GROW_MPATH_TABLE
, &ifmsh
->wrkq_flags
);
599 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
603 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
604 read_unlock_bh(&pathtbl_resize_lock
);
610 atomic_dec(&sdata
->u
.mesh
.mpaths
);
611 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
612 read_unlock_bh(&pathtbl_resize_lock
);
616 static void mesh_table_free_rcu(struct rcu_head
*rcu
)
618 struct mesh_table
*tbl
= container_of(rcu
, struct mesh_table
, rcu_head
);
620 mesh_table_free(tbl
, false);
623 void mesh_mpath_table_grow(void)
625 struct mesh_table
*oldtbl
, *newtbl
;
627 write_lock_bh(&pathtbl_resize_lock
);
628 oldtbl
= resize_dereference_mesh_paths();
629 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
632 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
633 __mesh_table_free(newtbl
);
636 rcu_assign_pointer(mesh_paths
, newtbl
);
638 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
641 write_unlock_bh(&pathtbl_resize_lock
);
644 void mesh_mpp_table_grow(void)
646 struct mesh_table
*oldtbl
, *newtbl
;
648 write_lock_bh(&pathtbl_resize_lock
);
649 oldtbl
= resize_dereference_mpp_paths();
650 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
653 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
654 __mesh_table_free(newtbl
);
657 rcu_assign_pointer(mpp_paths
, newtbl
);
658 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
661 write_unlock_bh(&pathtbl_resize_lock
);
664 int mpp_path_add(struct ieee80211_sub_if_data
*sdata
,
665 const u8
*dst
, const u8
*mpp
)
667 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
668 struct ieee80211_local
*local
= sdata
->local
;
669 struct mesh_table
*tbl
;
670 struct mesh_path
*mpath
, *new_mpath
;
671 struct mpath_node
*node
, *new_node
;
672 struct hlist_head
*bucket
;
677 if (ether_addr_equal(dst
, sdata
->vif
.addr
))
678 /* never add ourselves as neighbours */
681 if (is_multicast_ether_addr(dst
))
685 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
689 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
693 read_lock_bh(&pathtbl_resize_lock
);
694 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
695 memcpy(new_mpath
->mpp
, mpp
, ETH_ALEN
);
696 new_mpath
->sdata
= sdata
;
697 new_mpath
->flags
= 0;
698 skb_queue_head_init(&new_mpath
->frame_queue
);
699 new_node
->mpath
= new_mpath
;
700 init_timer(&new_mpath
->timer
);
701 new_mpath
->exp_time
= jiffies
;
702 spin_lock_init(&new_mpath
->state_lock
);
704 tbl
= resize_dereference_mpp_paths();
706 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
707 bucket
= &tbl
->hash_buckets
[hash_idx
];
709 spin_lock(&tbl
->hashwlock
[hash_idx
]);
712 hlist_for_each_entry(node
, bucket
, list
) {
714 if (mpath
->sdata
== sdata
&&
715 ether_addr_equal(dst
, mpath
->dst
))
719 hlist_add_head_rcu(&new_node
->list
, bucket
);
720 if (atomic_inc_return(&tbl
->entries
) >=
721 MEAN_CHAIN_LEN
* (tbl
->hash_mask
+ 1))
724 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
725 read_unlock_bh(&pathtbl_resize_lock
);
727 mpp_paths_generation
++;
730 set_bit(MESH_WORK_GROW_MPP_TABLE
, &ifmsh
->wrkq_flags
);
731 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
736 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
737 read_unlock_bh(&pathtbl_resize_lock
);
747 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
749 * @sta: broken peer link
751 * This function must be called from the rate control algorithm if enough
752 * delivery errors suggest that a peer link is no longer usable.
754 void mesh_plink_broken(struct sta_info
*sta
)
756 struct mesh_table
*tbl
;
757 static const u8 bcast
[ETH_ALEN
] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
758 struct mesh_path
*mpath
;
759 struct mpath_node
*node
;
760 struct ieee80211_sub_if_data
*sdata
= sta
->sdata
;
764 tbl
= rcu_dereference(mesh_paths
);
765 for_each_mesh_entry(tbl
, node
, i
) {
767 if (rcu_access_pointer(mpath
->next_hop
) == sta
&&
768 mpath
->flags
& MESH_PATH_ACTIVE
&&
769 !(mpath
->flags
& MESH_PATH_FIXED
)) {
770 spin_lock_bh(&mpath
->state_lock
);
771 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
773 spin_unlock_bh(&mpath
->state_lock
);
774 mesh_path_error_tx(sdata
,
775 sdata
->u
.mesh
.mshcfg
.element_ttl
,
776 mpath
->dst
, mpath
->sn
,
777 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE
, bcast
);
783 static void mesh_path_node_reclaim(struct rcu_head
*rp
)
785 struct mpath_node
*node
= container_of(rp
, struct mpath_node
, rcu
);
787 del_timer_sync(&node
->mpath
->timer
);
792 /* needs to be called with the corresponding hashwlock taken */
793 static void __mesh_path_del(struct mesh_table
*tbl
, struct mpath_node
*node
)
795 struct mesh_path
*mpath
= node
->mpath
;
796 struct ieee80211_sub_if_data
*sdata
= node
->mpath
->sdata
;
798 spin_lock(&mpath
->state_lock
);
799 mpath
->flags
|= MESH_PATH_RESOLVING
;
801 mesh_gate_del(tbl
, mpath
);
802 hlist_del_rcu(&node
->list
);
803 call_rcu(&node
->rcu
, mesh_path_node_reclaim
);
804 spin_unlock(&mpath
->state_lock
);
805 atomic_dec(&sdata
->u
.mesh
.mpaths
);
806 atomic_dec(&tbl
->entries
);
810 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
812 * @sta: mesh peer to match
814 * RCU notes: this function is called when a mesh plink transitions from
815 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
816 * allows path creation. This will happen before the sta can be freed (because
817 * sta_info_destroy() calls this) so any reader in a rcu read block will be
818 * protected against the plink disappearing.
820 void mesh_path_flush_by_nexthop(struct sta_info
*sta
)
822 struct mesh_table
*tbl
;
823 struct mesh_path
*mpath
;
824 struct mpath_node
*node
;
828 read_lock_bh(&pathtbl_resize_lock
);
829 tbl
= resize_dereference_mesh_paths();
830 for_each_mesh_entry(tbl
, node
, i
) {
832 if (rcu_access_pointer(mpath
->next_hop
) == sta
) {
833 spin_lock(&tbl
->hashwlock
[i
]);
834 __mesh_path_del(tbl
, node
);
835 spin_unlock(&tbl
->hashwlock
[i
]);
838 read_unlock_bh(&pathtbl_resize_lock
);
842 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data
*sdata
,
845 struct mesh_table
*tbl
;
846 struct mesh_path
*mpp
;
847 struct mpath_node
*node
;
851 read_lock_bh(&pathtbl_resize_lock
);
852 tbl
= resize_dereference_mpp_paths();
853 for_each_mesh_entry(tbl
, node
, i
) {
855 if (ether_addr_equal(mpp
->mpp
, proxy
)) {
856 spin_lock(&tbl
->hashwlock
[i
]);
857 __mesh_path_del(tbl
, node
);
858 spin_unlock(&tbl
->hashwlock
[i
]);
861 read_unlock_bh(&pathtbl_resize_lock
);
865 static void table_flush_by_iface(struct mesh_table
*tbl
,
866 struct ieee80211_sub_if_data
*sdata
)
868 struct mesh_path
*mpath
;
869 struct mpath_node
*node
;
872 WARN_ON(!rcu_read_lock_held());
873 for_each_mesh_entry(tbl
, node
, i
) {
875 if (mpath
->sdata
!= sdata
)
877 spin_lock_bh(&tbl
->hashwlock
[i
]);
878 __mesh_path_del(tbl
, node
);
879 spin_unlock_bh(&tbl
->hashwlock
[i
]);
884 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
886 * This function deletes both mesh paths as well as mesh portal paths.
888 * @sdata: interface data to match
891 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data
*sdata
)
893 struct mesh_table
*tbl
;
896 read_lock_bh(&pathtbl_resize_lock
);
897 tbl
= resize_dereference_mesh_paths();
898 table_flush_by_iface(tbl
, sdata
);
899 tbl
= resize_dereference_mpp_paths();
900 table_flush_by_iface(tbl
, sdata
);
901 read_unlock_bh(&pathtbl_resize_lock
);
906 * table_path_del - delete a path from the mesh or mpp table
908 * @tbl: mesh or mpp path table
909 * @sdata: local subif
910 * @addr: dst address (ETH_ALEN length)
912 * Returns: 0 if successful
914 static int table_path_del(struct mesh_table __rcu
*rcu_tbl
,
915 struct ieee80211_sub_if_data
*sdata
,
918 struct mesh_table
*tbl
;
919 struct mesh_path
*mpath
;
920 struct mpath_node
*node
;
921 struct hlist_head
*bucket
;
925 tbl
= resize_dereference_paths(rcu_tbl
);
926 hash_idx
= mesh_table_hash(addr
, sdata
, tbl
);
927 bucket
= &tbl
->hash_buckets
[hash_idx
];
929 spin_lock(&tbl
->hashwlock
[hash_idx
]);
930 hlist_for_each_entry(node
, bucket
, list
) {
932 if (mpath
->sdata
== sdata
&&
933 ether_addr_equal(addr
, mpath
->dst
)) {
934 __mesh_path_del(tbl
, node
);
941 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
946 * mesh_path_del - delete a mesh path from the table
948 * @addr: dst address (ETH_ALEN length)
949 * @sdata: local subif
951 * Returns: 0 if successful
953 int mesh_path_del(struct ieee80211_sub_if_data
*sdata
, const u8
*addr
)
957 /* flush relevant mpp entries first */
958 mpp_flush_by_proxy(sdata
, addr
);
960 read_lock_bh(&pathtbl_resize_lock
);
961 err
= table_path_del(mesh_paths
, sdata
, addr
);
962 mesh_paths_generation
++;
963 read_unlock_bh(&pathtbl_resize_lock
);
969 * mpp_path_del - delete a mesh proxy path from the table
971 * @addr: addr address (ETH_ALEN length)
972 * @sdata: local subif
974 * Returns: 0 if successful
976 static int mpp_path_del(struct ieee80211_sub_if_data
*sdata
, const u8
*addr
)
980 read_lock_bh(&pathtbl_resize_lock
);
981 err
= table_path_del(mpp_paths
, sdata
, addr
);
982 mpp_paths_generation
++;
983 read_unlock_bh(&pathtbl_resize_lock
);
989 * mesh_path_tx_pending - sends pending frames in a mesh path queue
991 * @mpath: mesh path to activate
993 * Locking: the state_lock of the mpath structure must NOT be held when calling
996 void mesh_path_tx_pending(struct mesh_path
*mpath
)
998 if (mpath
->flags
& MESH_PATH_ACTIVE
)
999 ieee80211_add_pending_skbs(mpath
->sdata
->local
,
1000 &mpath
->frame_queue
);
1004 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
1006 * @mpath: mesh path whose queue will be emptied
1008 * If there is only one gate, the frames are transferred from the failed mpath
1009 * queue to that gate's queue. If there are more than one gates, the frames
1010 * are copied from each gate to the next. After frames are copied, the
1011 * mpath queues are emptied onto the transmission queue.
1013 int mesh_path_send_to_gates(struct mesh_path
*mpath
)
1015 struct ieee80211_sub_if_data
*sdata
= mpath
->sdata
;
1016 struct mesh_table
*tbl
;
1017 struct mesh_path
*from_mpath
= mpath
;
1018 struct mpath_node
*gate
= NULL
;
1020 struct hlist_head
*known_gates
;
1023 tbl
= rcu_dereference(mesh_paths
);
1024 known_gates
= tbl
->known_gates
;
1028 return -EHOSTUNREACH
;
1030 hlist_for_each_entry_rcu(gate
, known_gates
, list
) {
1031 if (gate
->mpath
->sdata
!= sdata
)
1034 if (gate
->mpath
->flags
& MESH_PATH_ACTIVE
) {
1035 mpath_dbg(sdata
, "Forwarding to %pM\n", gate
->mpath
->dst
);
1036 mesh_path_move_to_queue(gate
->mpath
, from_mpath
, copy
);
1037 from_mpath
= gate
->mpath
;
1041 "Not forwarding to %pM (flags %#x)\n",
1042 gate
->mpath
->dst
, gate
->mpath
->flags
);
1046 hlist_for_each_entry_rcu(gate
, known_gates
, list
)
1047 if (gate
->mpath
->sdata
== sdata
) {
1048 mpath_dbg(sdata
, "Sending to %pM\n", gate
->mpath
->dst
);
1049 mesh_path_tx_pending(gate
->mpath
);
1052 return (from_mpath
== mpath
) ? -EHOSTUNREACH
: 0;
1056 * mesh_path_discard_frame - discard a frame whose path could not be resolved
1058 * @skb: frame to discard
1059 * @sdata: network subif the frame was to be sent through
1061 * Locking: the function must me called within a rcu_read_lock region
1063 void mesh_path_discard_frame(struct ieee80211_sub_if_data
*sdata
,
1064 struct sk_buff
*skb
)
1067 sdata
->u
.mesh
.mshstats
.dropped_frames_no_route
++;
1071 * mesh_path_flush_pending - free the pending queue of a mesh path
1073 * @mpath: mesh path whose queue has to be freed
1075 * Locking: the function must me called within a rcu_read_lock region
1077 void mesh_path_flush_pending(struct mesh_path
*mpath
)
1079 struct sk_buff
*skb
;
1081 while ((skb
= skb_dequeue(&mpath
->frame_queue
)) != NULL
)
1082 mesh_path_discard_frame(mpath
->sdata
, skb
);
1086 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
1088 * @mpath: the mesh path to modify
1089 * @next_hop: the next hop to force
1091 * Locking: this function must be called holding mpath->state_lock
1093 void mesh_path_fix_nexthop(struct mesh_path
*mpath
, struct sta_info
*next_hop
)
1095 spin_lock_bh(&mpath
->state_lock
);
1096 mesh_path_assign_nexthop(mpath
, next_hop
);
1099 mpath
->hop_count
= 0;
1100 mpath
->exp_time
= 0;
1101 mpath
->flags
|= MESH_PATH_FIXED
;
1102 mesh_path_activate(mpath
);
1103 spin_unlock_bh(&mpath
->state_lock
);
1104 mesh_path_tx_pending(mpath
);
1107 static void mesh_path_node_free(struct hlist_node
*p
, bool free_leafs
)
1109 struct mesh_path
*mpath
;
1110 struct mpath_node
*node
= hlist_entry(p
, struct mpath_node
, list
);
1111 mpath
= node
->mpath
;
1114 del_timer_sync(&mpath
->timer
);
1120 static int mesh_path_node_copy(struct hlist_node
*p
, struct mesh_table
*newtbl
)
1122 struct mesh_path
*mpath
;
1123 struct mpath_node
*node
, *new_node
;
1126 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
1127 if (new_node
== NULL
)
1130 node
= hlist_entry(p
, struct mpath_node
, list
);
1131 mpath
= node
->mpath
;
1132 new_node
->mpath
= mpath
;
1133 hash_idx
= mesh_table_hash(mpath
->dst
, mpath
->sdata
, newtbl
);
1134 hlist_add_head(&new_node
->list
,
1135 &newtbl
->hash_buckets
[hash_idx
]);
1139 int mesh_pathtbl_init(void)
1141 struct mesh_table
*tbl_path
, *tbl_mpp
;
1144 tbl_path
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
1147 tbl_path
->free_node
= &mesh_path_node_free
;
1148 tbl_path
->copy_node
= &mesh_path_node_copy
;
1149 tbl_path
->known_gates
= kzalloc(sizeof(struct hlist_head
), GFP_ATOMIC
);
1150 if (!tbl_path
->known_gates
) {
1154 INIT_HLIST_HEAD(tbl_path
->known_gates
);
1157 tbl_mpp
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
1162 tbl_mpp
->free_node
= &mesh_path_node_free
;
1163 tbl_mpp
->copy_node
= &mesh_path_node_copy
;
1164 tbl_mpp
->known_gates
= kzalloc(sizeof(struct hlist_head
), GFP_ATOMIC
);
1165 if (!tbl_mpp
->known_gates
) {
1169 INIT_HLIST_HEAD(tbl_mpp
->known_gates
);
1171 /* Need no locking since this is during init */
1172 RCU_INIT_POINTER(mesh_paths
, tbl_path
);
1173 RCU_INIT_POINTER(mpp_paths
, tbl_mpp
);
1178 mesh_table_free(tbl_mpp
, true);
1180 mesh_table_free(tbl_path
, true);
1184 void mesh_path_expire(struct ieee80211_sub_if_data
*sdata
)
1186 struct mesh_table
*tbl
;
1187 struct mesh_path
*mpath
;
1188 struct mpath_node
*node
;
1192 tbl
= rcu_dereference(mesh_paths
);
1193 for_each_mesh_entry(tbl
, node
, i
) {
1194 if (node
->mpath
->sdata
!= sdata
)
1196 mpath
= node
->mpath
;
1197 if ((!(mpath
->flags
& MESH_PATH_RESOLVING
)) &&
1198 (!(mpath
->flags
& MESH_PATH_FIXED
)) &&
1199 time_after(jiffies
, mpath
->exp_time
+ MESH_PATH_EXPIRE
))
1200 mesh_path_del(mpath
->sdata
, mpath
->dst
);
1203 tbl
= rcu_dereference(mpp_paths
);
1204 for_each_mesh_entry(tbl
, node
, i
) {
1205 if (node
->mpath
->sdata
!= sdata
)
1207 mpath
= node
->mpath
;
1208 if ((!(mpath
->flags
& MESH_PATH_FIXED
)) &&
1209 time_after(jiffies
, mpath
->exp_time
+ MESH_PATH_EXPIRE
))
1210 mpp_path_del(mpath
->sdata
, mpath
->dst
);
1216 void mesh_pathtbl_unregister(void)
1218 /* no need for locking during exit path */
1219 mesh_table_free(rcu_dereference_protected(mesh_paths
, 1), true);
1220 mesh_table_free(rcu_dereference_protected(mpp_paths
, 1), true);