2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
18 #include "ieee80211_i.h"
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER 2
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN 2
27 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
28 time_after(jiffies, mpath->exp_time) && \
29 !(mpath->flags & MESH_PATH_FIXED))
32 struct hlist_node list
;
34 /* This indirection allows two different tables to point to the same
35 * mesh_path structure, useful when resizing
37 struct mesh_path
*mpath
;
40 static struct mesh_table __rcu
*mesh_paths
;
41 static struct mesh_table __rcu
*mpp_paths
; /* Store paths for MPP&MAP */
43 int mesh_paths_generation
;
45 /* This lock will have the grow table function as writer and add / delete nodes
46 * as readers. RCU provides sufficient protection only when reading the table
47 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
48 * the read lock or we risk operating on an old table. The write lock is only
49 * needed when modifying the number of buckets a table.
51 static DEFINE_RWLOCK(pathtbl_resize_lock
);
54 static inline struct mesh_table
*resize_dereference_mesh_paths(void)
56 return rcu_dereference_protected(mesh_paths
,
57 lockdep_is_held(&pathtbl_resize_lock
));
60 static inline struct mesh_table
*resize_dereference_mpp_paths(void)
62 return rcu_dereference_protected(mpp_paths
,
63 lockdep_is_held(&pathtbl_resize_lock
));
67 * CAREFUL -- "tbl" must not be an expression,
68 * in particular not an rcu_dereference(), since
69 * it's used twice. So it is illegal to do
70 * for_each_mesh_entry(rcu_dereference(...), ...)
72 #define for_each_mesh_entry(tbl, p, node, i) \
73 for (i = 0; i <= tbl->hash_mask; i++) \
74 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
77 static struct mesh_table
*mesh_table_alloc(int size_order
)
80 struct mesh_table
*newtbl
;
82 newtbl
= kmalloc(sizeof(struct mesh_table
), GFP_ATOMIC
);
86 newtbl
->hash_buckets
= kzalloc(sizeof(struct hlist_head
) *
87 (1 << size_order
), GFP_ATOMIC
);
89 if (!newtbl
->hash_buckets
) {
94 newtbl
->hashwlock
= kmalloc(sizeof(spinlock_t
) *
95 (1 << size_order
), GFP_ATOMIC
);
96 if (!newtbl
->hashwlock
) {
97 kfree(newtbl
->hash_buckets
);
102 newtbl
->size_order
= size_order
;
103 newtbl
->hash_mask
= (1 << size_order
) - 1;
104 atomic_set(&newtbl
->entries
, 0);
105 get_random_bytes(&newtbl
->hash_rnd
,
106 sizeof(newtbl
->hash_rnd
));
107 for (i
= 0; i
<= newtbl
->hash_mask
; i
++)
108 spin_lock_init(&newtbl
->hashwlock
[i
]);
109 spin_lock_init(&newtbl
->gates_lock
);
114 static void __mesh_table_free(struct mesh_table
*tbl
)
116 kfree(tbl
->hash_buckets
);
117 kfree(tbl
->hashwlock
);
121 static void mesh_table_free(struct mesh_table
*tbl
, bool free_leafs
)
123 struct hlist_head
*mesh_hash
;
124 struct hlist_node
*p
, *q
;
125 struct mpath_node
*gate
;
128 mesh_hash
= tbl
->hash_buckets
;
129 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
130 spin_lock_bh(&tbl
->hashwlock
[i
]);
131 hlist_for_each_safe(p
, q
, &mesh_hash
[i
]) {
132 tbl
->free_node(p
, free_leafs
);
133 atomic_dec(&tbl
->entries
);
135 spin_unlock_bh(&tbl
->hashwlock
[i
]);
138 spin_lock_bh(&tbl
->gates_lock
);
139 hlist_for_each_entry_safe(gate
, p
, q
,
140 tbl
->known_gates
, list
) {
141 hlist_del(&gate
->list
);
144 kfree(tbl
->known_gates
);
145 spin_unlock_bh(&tbl
->gates_lock
);
148 __mesh_table_free(tbl
);
151 static int mesh_table_grow(struct mesh_table
*oldtbl
,
152 struct mesh_table
*newtbl
)
154 struct hlist_head
*oldhash
;
155 struct hlist_node
*p
, *q
;
158 if (atomic_read(&oldtbl
->entries
)
159 < oldtbl
->mean_chain_len
* (oldtbl
->hash_mask
+ 1))
162 newtbl
->free_node
= oldtbl
->free_node
;
163 newtbl
->mean_chain_len
= oldtbl
->mean_chain_len
;
164 newtbl
->copy_node
= oldtbl
->copy_node
;
165 newtbl
->known_gates
= oldtbl
->known_gates
;
166 atomic_set(&newtbl
->entries
, atomic_read(&oldtbl
->entries
));
168 oldhash
= oldtbl
->hash_buckets
;
169 for (i
= 0; i
<= oldtbl
->hash_mask
; i
++)
170 hlist_for_each(p
, &oldhash
[i
])
171 if (oldtbl
->copy_node(p
, newtbl
) < 0)
177 for (i
= 0; i
<= newtbl
->hash_mask
; i
++) {
178 hlist_for_each_safe(p
, q
, &newtbl
->hash_buckets
[i
])
179 oldtbl
->free_node(p
, 0);
184 static u32
mesh_table_hash(u8
*addr
, struct ieee80211_sub_if_data
*sdata
,
185 struct mesh_table
*tbl
)
187 /* Use last four bytes of hw addr and interface index as hash index */
188 return jhash_2words(*(u32
*)(addr
+2), sdata
->dev
->ifindex
, tbl
->hash_rnd
)
195 * mesh_path_assign_nexthop - update mesh path next hop
197 * @mpath: mesh path to update
198 * @sta: next hop to assign
200 * Locking: mpath->state_lock must be held when calling this function
202 void mesh_path_assign_nexthop(struct mesh_path
*mpath
, struct sta_info
*sta
)
205 struct ieee80211_hdr
*hdr
;
206 struct sk_buff_head tmpq
;
209 rcu_assign_pointer(mpath
->next_hop
, sta
);
211 __skb_queue_head_init(&tmpq
);
213 spin_lock_irqsave(&mpath
->frame_queue
.lock
, flags
);
215 while ((skb
= __skb_dequeue(&mpath
->frame_queue
)) != NULL
) {
216 hdr
= (struct ieee80211_hdr
*) skb
->data
;
217 memcpy(hdr
->addr1
, sta
->sta
.addr
, ETH_ALEN
);
218 memcpy(hdr
->addr2
, mpath
->sdata
->vif
.addr
, ETH_ALEN
);
219 __skb_queue_tail(&tmpq
, skb
);
222 skb_queue_splice(&tmpq
, &mpath
->frame_queue
);
223 spin_unlock_irqrestore(&mpath
->frame_queue
.lock
, flags
);
226 static void prepare_for_gate(struct sk_buff
*skb
, char *dst_addr
,
227 struct mesh_path
*gate_mpath
)
229 struct ieee80211_hdr
*hdr
;
230 struct ieee80211s_hdr
*mshdr
;
231 int mesh_hdrlen
, hdrlen
;
234 hdr
= (struct ieee80211_hdr
*) skb
->data
;
235 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
236 mshdr
= (struct ieee80211s_hdr
*) (skb
->data
+ hdrlen
);
238 if (!(mshdr
->flags
& MESH_FLAGS_AE
)) {
239 /* size of the fixed part of the mesh header */
242 /* make room for the two extended addresses */
243 skb_push(skb
, 2 * ETH_ALEN
);
244 memmove(skb
->data
, hdr
, hdrlen
+ mesh_hdrlen
);
246 hdr
= (struct ieee80211_hdr
*) skb
->data
;
248 /* we preserve the previous mesh header and only add
249 * the new addreses */
250 mshdr
= (struct ieee80211s_hdr
*) (skb
->data
+ hdrlen
);
251 mshdr
->flags
= MESH_FLAGS_AE_A5_A6
;
252 memcpy(mshdr
->eaddr1
, hdr
->addr3
, ETH_ALEN
);
253 memcpy(mshdr
->eaddr2
, hdr
->addr4
, ETH_ALEN
);
256 /* update next hop */
257 hdr
= (struct ieee80211_hdr
*) skb
->data
;
259 next_hop
= rcu_dereference(gate_mpath
->next_hop
)->sta
.addr
;
260 memcpy(hdr
->addr1
, next_hop
, ETH_ALEN
);
262 memcpy(hdr
->addr2
, gate_mpath
->sdata
->vif
.addr
, ETH_ALEN
);
263 memcpy(hdr
->addr3
, dst_addr
, ETH_ALEN
);
268 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
270 * This function is used to transfer or copy frames from an unresolved mpath to
271 * a gate mpath. The function also adds the Address Extension field and
272 * updates the next hop.
274 * If a frame already has an Address Extension field, only the next hop and
275 * destination addresses are updated.
277 * The gate mpath must be an active mpath with a valid mpath->next_hop.
279 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
280 * @from_mpath: The failed mpath
281 * @copy: When true, copy all the frames to the new mpath queue. When false,
284 static void mesh_path_move_to_queue(struct mesh_path
*gate_mpath
,
285 struct mesh_path
*from_mpath
,
288 struct sk_buff
*skb
, *cp_skb
= NULL
;
289 struct sk_buff_head gateq
, failq
;
293 BUG_ON(gate_mpath
== from_mpath
);
294 BUG_ON(!gate_mpath
->next_hop
);
296 __skb_queue_head_init(&gateq
);
297 __skb_queue_head_init(&failq
);
299 spin_lock_irqsave(&from_mpath
->frame_queue
.lock
, flags
);
300 skb_queue_splice_init(&from_mpath
->frame_queue
, &failq
);
301 spin_unlock_irqrestore(&from_mpath
->frame_queue
.lock
, flags
);
303 num_skbs
= skb_queue_len(&failq
);
306 skb
= __skb_dequeue(&failq
);
308 cp_skb
= skb_copy(skb
, GFP_ATOMIC
);
310 __skb_queue_tail(&failq
, cp_skb
);
313 prepare_for_gate(skb
, gate_mpath
->dst
, gate_mpath
);
314 __skb_queue_tail(&gateq
, skb
);
317 spin_lock_irqsave(&gate_mpath
->frame_queue
.lock
, flags
);
318 skb_queue_splice(&gateq
, &gate_mpath
->frame_queue
);
319 mpath_dbg(gate_mpath
->sdata
, "Mpath queue for gate %pM has %d frames\n",
320 gate_mpath
->dst
, skb_queue_len(&gate_mpath
->frame_queue
));
321 spin_unlock_irqrestore(&gate_mpath
->frame_queue
.lock
, flags
);
326 spin_lock_irqsave(&from_mpath
->frame_queue
.lock
, flags
);
327 skb_queue_splice(&failq
, &from_mpath
->frame_queue
);
328 spin_unlock_irqrestore(&from_mpath
->frame_queue
.lock
, flags
);
332 static struct mesh_path
*mpath_lookup(struct mesh_table
*tbl
, u8
*dst
,
333 struct ieee80211_sub_if_data
*sdata
)
335 struct mesh_path
*mpath
;
336 struct hlist_node
*n
;
337 struct hlist_head
*bucket
;
338 struct mpath_node
*node
;
340 bucket
= &tbl
->hash_buckets
[mesh_table_hash(dst
, sdata
, tbl
)];
341 hlist_for_each_entry_rcu(node
, n
, bucket
, list
) {
343 if (mpath
->sdata
== sdata
&&
344 ether_addr_equal(dst
, mpath
->dst
)) {
345 if (MPATH_EXPIRED(mpath
)) {
346 spin_lock_bh(&mpath
->state_lock
);
347 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
348 spin_unlock_bh(&mpath
->state_lock
);
357 * mesh_path_lookup - look up a path in the mesh path table
358 * @dst: hardware address (ETH_ALEN length) of destination
359 * @sdata: local subif
361 * Returns: pointer to the mesh path structure, or NULL if not found
363 * Locking: must be called within a read rcu section.
365 struct mesh_path
*mesh_path_lookup(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
367 return mpath_lookup(rcu_dereference(mesh_paths
), dst
, sdata
);
370 struct mesh_path
*mpp_path_lookup(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
372 return mpath_lookup(rcu_dereference(mpp_paths
), dst
, sdata
);
377 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
379 * @sdata: local subif, or NULL for all entries
381 * Returns: pointer to the mesh path structure, or NULL if not found.
383 * Locking: must be called within a read rcu section.
385 struct mesh_path
*mesh_path_lookup_by_idx(int idx
, struct ieee80211_sub_if_data
*sdata
)
387 struct mesh_table
*tbl
= rcu_dereference(mesh_paths
);
388 struct mpath_node
*node
;
389 struct hlist_node
*p
;
393 for_each_mesh_entry(tbl
, p
, node
, i
) {
394 if (sdata
&& node
->mpath
->sdata
!= sdata
)
397 if (MPATH_EXPIRED(node
->mpath
)) {
398 spin_lock_bh(&node
->mpath
->state_lock
);
399 node
->mpath
->flags
&= ~MESH_PATH_ACTIVE
;
400 spin_unlock_bh(&node
->mpath
->state_lock
);
410 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
411 * @mpath: gate path to add to table
413 int mesh_path_add_gate(struct mesh_path
*mpath
)
415 struct mesh_table
*tbl
;
416 struct mpath_node
*gate
, *new_gate
;
417 struct hlist_node
*n
;
421 tbl
= rcu_dereference(mesh_paths
);
423 hlist_for_each_entry_rcu(gate
, n
, tbl
->known_gates
, list
)
424 if (gate
->mpath
== mpath
) {
429 new_gate
= kzalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
435 mpath
->is_gate
= true;
436 mpath
->sdata
->u
.mesh
.num_gates
++;
437 new_gate
->mpath
= mpath
;
438 spin_lock_bh(&tbl
->gates_lock
);
439 hlist_add_head_rcu(&new_gate
->list
, tbl
->known_gates
);
440 spin_unlock_bh(&tbl
->gates_lock
);
442 mpath_dbg(mpath
->sdata
,
443 "Mesh path: Recorded new gate: %pM. %d known gates\n",
444 mpath
->dst
, mpath
->sdata
->u
.mesh
.num_gates
);
452 * mesh_gate_del - remove a mesh gate from the list of known gates
453 * @tbl: table which holds our list of known gates
456 * Returns: 0 on success
458 * Locking: must be called inside rcu_read_lock() section
460 static int mesh_gate_del(struct mesh_table
*tbl
, struct mesh_path
*mpath
)
462 struct mpath_node
*gate
;
463 struct hlist_node
*p
, *q
;
465 hlist_for_each_entry_safe(gate
, p
, q
, tbl
->known_gates
, list
)
466 if (gate
->mpath
== mpath
) {
467 spin_lock_bh(&tbl
->gates_lock
);
468 hlist_del_rcu(&gate
->list
);
469 kfree_rcu(gate
, rcu
);
470 spin_unlock_bh(&tbl
->gates_lock
);
471 mpath
->sdata
->u
.mesh
.num_gates
--;
472 mpath
->is_gate
= false;
473 mpath_dbg(mpath
->sdata
,
474 "Mesh path: Deleted gate: %pM. %d known gates\n",
475 mpath
->dst
, mpath
->sdata
->u
.mesh
.num_gates
);
483 * mesh_gate_num - number of gates known to this interface
486 int mesh_gate_num(struct ieee80211_sub_if_data
*sdata
)
488 return sdata
->u
.mesh
.num_gates
;
492 * mesh_path_add - allocate and add a new path to the mesh path table
493 * @addr: destination address of the path (ETH_ALEN length)
494 * @sdata: local subif
496 * Returns: 0 on success
498 * State: the initial state of the new path is set to 0
500 int mesh_path_add(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
502 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
503 struct ieee80211_local
*local
= sdata
->local
;
504 struct mesh_table
*tbl
;
505 struct mesh_path
*mpath
, *new_mpath
;
506 struct mpath_node
*node
, *new_node
;
507 struct hlist_head
*bucket
;
508 struct hlist_node
*n
;
513 if (ether_addr_equal(dst
, sdata
->vif
.addr
))
514 /* never add ourselves as neighbours */
517 if (is_multicast_ether_addr(dst
))
520 if (atomic_add_unless(&sdata
->u
.mesh
.mpaths
, 1, MESH_MAX_MPATHS
) == 0)
524 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
528 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
532 read_lock_bh(&pathtbl_resize_lock
);
533 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
534 memset(new_mpath
->rann_snd_addr
, 0xff, ETH_ALEN
);
535 new_mpath
->is_root
= false;
536 new_mpath
->sdata
= sdata
;
537 new_mpath
->flags
= 0;
538 skb_queue_head_init(&new_mpath
->frame_queue
);
539 new_node
->mpath
= new_mpath
;
540 new_mpath
->timer
.data
= (unsigned long) new_mpath
;
541 new_mpath
->timer
.function
= mesh_path_timer
;
542 new_mpath
->exp_time
= jiffies
;
543 spin_lock_init(&new_mpath
->state_lock
);
544 init_timer(&new_mpath
->timer
);
546 tbl
= resize_dereference_mesh_paths();
548 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
549 bucket
= &tbl
->hash_buckets
[hash_idx
];
551 spin_lock(&tbl
->hashwlock
[hash_idx
]);
554 hlist_for_each_entry(node
, n
, bucket
, list
) {
556 if (mpath
->sdata
== sdata
&&
557 ether_addr_equal(dst
, mpath
->dst
))
561 hlist_add_head_rcu(&new_node
->list
, bucket
);
562 if (atomic_inc_return(&tbl
->entries
) >=
563 tbl
->mean_chain_len
* (tbl
->hash_mask
+ 1))
566 mesh_paths_generation
++;
568 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
569 read_unlock_bh(&pathtbl_resize_lock
);
571 set_bit(MESH_WORK_GROW_MPATH_TABLE
, &ifmsh
->wrkq_flags
);
572 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
577 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
578 read_unlock_bh(&pathtbl_resize_lock
);
583 atomic_dec(&sdata
->u
.mesh
.mpaths
);
587 static void mesh_table_free_rcu(struct rcu_head
*rcu
)
589 struct mesh_table
*tbl
= container_of(rcu
, struct mesh_table
, rcu_head
);
591 mesh_table_free(tbl
, false);
594 void mesh_mpath_table_grow(void)
596 struct mesh_table
*oldtbl
, *newtbl
;
598 write_lock_bh(&pathtbl_resize_lock
);
599 oldtbl
= resize_dereference_mesh_paths();
600 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
603 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
604 __mesh_table_free(newtbl
);
607 rcu_assign_pointer(mesh_paths
, newtbl
);
609 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
612 write_unlock_bh(&pathtbl_resize_lock
);
615 void mesh_mpp_table_grow(void)
617 struct mesh_table
*oldtbl
, *newtbl
;
619 write_lock_bh(&pathtbl_resize_lock
);
620 oldtbl
= resize_dereference_mpp_paths();
621 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
624 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
625 __mesh_table_free(newtbl
);
628 rcu_assign_pointer(mpp_paths
, newtbl
);
629 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
632 write_unlock_bh(&pathtbl_resize_lock
);
635 int mpp_path_add(u8
*dst
, u8
*mpp
, struct ieee80211_sub_if_data
*sdata
)
637 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
638 struct ieee80211_local
*local
= sdata
->local
;
639 struct mesh_table
*tbl
;
640 struct mesh_path
*mpath
, *new_mpath
;
641 struct mpath_node
*node
, *new_node
;
642 struct hlist_head
*bucket
;
643 struct hlist_node
*n
;
648 if (ether_addr_equal(dst
, sdata
->vif
.addr
))
649 /* never add ourselves as neighbours */
652 if (is_multicast_ether_addr(dst
))
656 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
660 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
664 read_lock_bh(&pathtbl_resize_lock
);
665 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
666 memcpy(new_mpath
->mpp
, mpp
, ETH_ALEN
);
667 new_mpath
->sdata
= sdata
;
668 new_mpath
->flags
= 0;
669 skb_queue_head_init(&new_mpath
->frame_queue
);
670 new_node
->mpath
= new_mpath
;
671 init_timer(&new_mpath
->timer
);
672 new_mpath
->exp_time
= jiffies
;
673 spin_lock_init(&new_mpath
->state_lock
);
675 tbl
= resize_dereference_mpp_paths();
677 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
678 bucket
= &tbl
->hash_buckets
[hash_idx
];
680 spin_lock(&tbl
->hashwlock
[hash_idx
]);
683 hlist_for_each_entry(node
, n
, bucket
, list
) {
685 if (mpath
->sdata
== sdata
&&
686 ether_addr_equal(dst
, mpath
->dst
))
690 hlist_add_head_rcu(&new_node
->list
, bucket
);
691 if (atomic_inc_return(&tbl
->entries
) >=
692 tbl
->mean_chain_len
* (tbl
->hash_mask
+ 1))
695 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
696 read_unlock_bh(&pathtbl_resize_lock
);
698 set_bit(MESH_WORK_GROW_MPP_TABLE
, &ifmsh
->wrkq_flags
);
699 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
704 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
705 read_unlock_bh(&pathtbl_resize_lock
);
715 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
717 * @sta: broken peer link
719 * This function must be called from the rate control algorithm if enough
720 * delivery errors suggest that a peer link is no longer usable.
722 void mesh_plink_broken(struct sta_info
*sta
)
724 struct mesh_table
*tbl
;
725 static const u8 bcast
[ETH_ALEN
] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
726 struct mesh_path
*mpath
;
727 struct mpath_node
*node
;
728 struct hlist_node
*p
;
729 struct ieee80211_sub_if_data
*sdata
= sta
->sdata
;
731 __le16 reason
= cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE
);
734 tbl
= rcu_dereference(mesh_paths
);
735 for_each_mesh_entry(tbl
, p
, node
, i
) {
737 if (rcu_dereference(mpath
->next_hop
) == sta
&&
738 mpath
->flags
& MESH_PATH_ACTIVE
&&
739 !(mpath
->flags
& MESH_PATH_FIXED
)) {
740 spin_lock_bh(&mpath
->state_lock
);
741 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
743 spin_unlock_bh(&mpath
->state_lock
);
744 mesh_path_error_tx(sdata
->u
.mesh
.mshcfg
.element_ttl
,
745 mpath
->dst
, cpu_to_le32(mpath
->sn
),
746 reason
, bcast
, sdata
);
752 static void mesh_path_node_reclaim(struct rcu_head
*rp
)
754 struct mpath_node
*node
= container_of(rp
, struct mpath_node
, rcu
);
755 struct ieee80211_sub_if_data
*sdata
= node
->mpath
->sdata
;
757 del_timer_sync(&node
->mpath
->timer
);
758 atomic_dec(&sdata
->u
.mesh
.mpaths
);
763 /* needs to be called with the corresponding hashwlock taken */
764 static void __mesh_path_del(struct mesh_table
*tbl
, struct mpath_node
*node
)
766 struct mesh_path
*mpath
;
768 spin_lock(&mpath
->state_lock
);
769 mpath
->flags
|= MESH_PATH_RESOLVING
;
771 mesh_gate_del(tbl
, mpath
);
772 hlist_del_rcu(&node
->list
);
773 call_rcu(&node
->rcu
, mesh_path_node_reclaim
);
774 spin_unlock(&mpath
->state_lock
);
775 atomic_dec(&tbl
->entries
);
779 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
781 * @sta: mesh peer to match
783 * RCU notes: this function is called when a mesh plink transitions from
784 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
785 * allows path creation. This will happen before the sta can be freed (because
786 * sta_info_destroy() calls this) so any reader in a rcu read block will be
787 * protected against the plink disappearing.
789 void mesh_path_flush_by_nexthop(struct sta_info
*sta
)
791 struct mesh_table
*tbl
;
792 struct mesh_path
*mpath
;
793 struct mpath_node
*node
;
794 struct hlist_node
*p
;
798 read_lock_bh(&pathtbl_resize_lock
);
799 tbl
= resize_dereference_mesh_paths();
800 for_each_mesh_entry(tbl
, p
, node
, i
) {
802 if (rcu_dereference(mpath
->next_hop
) == sta
) {
803 spin_lock(&tbl
->hashwlock
[i
]);
804 __mesh_path_del(tbl
, node
);
805 spin_unlock(&tbl
->hashwlock
[i
]);
808 read_unlock_bh(&pathtbl_resize_lock
);
812 static void table_flush_by_iface(struct mesh_table
*tbl
,
813 struct ieee80211_sub_if_data
*sdata
)
815 struct mesh_path
*mpath
;
816 struct mpath_node
*node
;
817 struct hlist_node
*p
;
820 WARN_ON(!rcu_read_lock_held());
821 for_each_mesh_entry(tbl
, p
, node
, i
) {
823 if (mpath
->sdata
!= sdata
)
825 spin_lock_bh(&tbl
->hashwlock
[i
]);
826 __mesh_path_del(tbl
, node
);
827 spin_unlock_bh(&tbl
->hashwlock
[i
]);
832 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
834 * This function deletes both mesh paths as well as mesh portal paths.
836 * @sdata: interface data to match
839 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data
*sdata
)
841 struct mesh_table
*tbl
;
844 read_lock_bh(&pathtbl_resize_lock
);
845 tbl
= resize_dereference_mesh_paths();
846 table_flush_by_iface(tbl
, sdata
);
847 tbl
= resize_dereference_mpp_paths();
848 table_flush_by_iface(tbl
, sdata
);
849 read_unlock_bh(&pathtbl_resize_lock
);
854 * mesh_path_del - delete a mesh path from the table
856 * @addr: dst address (ETH_ALEN length)
857 * @sdata: local subif
859 * Returns: 0 if successful
861 int mesh_path_del(u8
*addr
, struct ieee80211_sub_if_data
*sdata
)
863 struct mesh_table
*tbl
;
864 struct mesh_path
*mpath
;
865 struct mpath_node
*node
;
866 struct hlist_head
*bucket
;
867 struct hlist_node
*n
;
871 read_lock_bh(&pathtbl_resize_lock
);
872 tbl
= resize_dereference_mesh_paths();
873 hash_idx
= mesh_table_hash(addr
, sdata
, tbl
);
874 bucket
= &tbl
->hash_buckets
[hash_idx
];
876 spin_lock(&tbl
->hashwlock
[hash_idx
]);
877 hlist_for_each_entry(node
, n
, bucket
, list
) {
879 if (mpath
->sdata
== sdata
&&
880 ether_addr_equal(addr
, mpath
->dst
)) {
881 __mesh_path_del(tbl
, node
);
888 mesh_paths_generation
++;
889 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
890 read_unlock_bh(&pathtbl_resize_lock
);
895 * mesh_path_tx_pending - sends pending frames in a mesh path queue
897 * @mpath: mesh path to activate
899 * Locking: the state_lock of the mpath structure must NOT be held when calling
902 void mesh_path_tx_pending(struct mesh_path
*mpath
)
904 if (mpath
->flags
& MESH_PATH_ACTIVE
)
905 ieee80211_add_pending_skbs(mpath
->sdata
->local
,
906 &mpath
->frame_queue
);
910 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
912 * @mpath: mesh path whose queue will be emptied
914 * If there is only one gate, the frames are transferred from the failed mpath
915 * queue to that gate's queue. If there are more than one gates, the frames
916 * are copied from each gate to the next. After frames are copied, the
917 * mpath queues are emptied onto the transmission queue.
919 int mesh_path_send_to_gates(struct mesh_path
*mpath
)
921 struct ieee80211_sub_if_data
*sdata
= mpath
->sdata
;
922 struct hlist_node
*n
;
923 struct mesh_table
*tbl
;
924 struct mesh_path
*from_mpath
= mpath
;
925 struct mpath_node
*gate
= NULL
;
927 struct hlist_head
*known_gates
;
930 tbl
= rcu_dereference(mesh_paths
);
931 known_gates
= tbl
->known_gates
;
935 return -EHOSTUNREACH
;
937 hlist_for_each_entry_rcu(gate
, n
, known_gates
, list
) {
938 if (gate
->mpath
->sdata
!= sdata
)
941 if (gate
->mpath
->flags
& MESH_PATH_ACTIVE
) {
942 mpath_dbg(sdata
, "Forwarding to %pM\n", gate
->mpath
->dst
);
943 mesh_path_move_to_queue(gate
->mpath
, from_mpath
, copy
);
944 from_mpath
= gate
->mpath
;
948 "Not forwarding %p (flags %#x)\n",
949 gate
->mpath
, gate
->mpath
->flags
);
953 hlist_for_each_entry_rcu(gate
, n
, known_gates
, list
)
954 if (gate
->mpath
->sdata
== sdata
) {
955 mpath_dbg(sdata
, "Sending to %pM\n", gate
->mpath
->dst
);
956 mesh_path_tx_pending(gate
->mpath
);
959 return (from_mpath
== mpath
) ? -EHOSTUNREACH
: 0;
963 * mesh_path_discard_frame - discard a frame whose path could not be resolved
965 * @skb: frame to discard
966 * @sdata: network subif the frame was to be sent through
968 * Locking: the function must me called within a rcu_read_lock region
970 void mesh_path_discard_frame(struct sk_buff
*skb
,
971 struct ieee80211_sub_if_data
*sdata
)
974 sdata
->u
.mesh
.mshstats
.dropped_frames_no_route
++;
978 * mesh_path_flush_pending - free the pending queue of a mesh path
980 * @mpath: mesh path whose queue has to be freed
982 * Locking: the function must me called within a rcu_read_lock region
984 void mesh_path_flush_pending(struct mesh_path
*mpath
)
988 while ((skb
= skb_dequeue(&mpath
->frame_queue
)) != NULL
)
989 mesh_path_discard_frame(skb
, mpath
->sdata
);
993 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
995 * @mpath: the mesh path to modify
996 * @next_hop: the next hop to force
998 * Locking: this function must be called holding mpath->state_lock
1000 void mesh_path_fix_nexthop(struct mesh_path
*mpath
, struct sta_info
*next_hop
)
1002 spin_lock_bh(&mpath
->state_lock
);
1003 mesh_path_assign_nexthop(mpath
, next_hop
);
1006 mpath
->hop_count
= 0;
1007 mpath
->exp_time
= 0;
1008 mpath
->flags
|= MESH_PATH_FIXED
;
1009 mesh_path_activate(mpath
);
1010 spin_unlock_bh(&mpath
->state_lock
);
1011 mesh_path_tx_pending(mpath
);
1014 static void mesh_path_node_free(struct hlist_node
*p
, bool free_leafs
)
1016 struct mesh_path
*mpath
;
1017 struct mpath_node
*node
= hlist_entry(p
, struct mpath_node
, list
);
1018 mpath
= node
->mpath
;
1021 del_timer_sync(&mpath
->timer
);
1027 static int mesh_path_node_copy(struct hlist_node
*p
, struct mesh_table
*newtbl
)
1029 struct mesh_path
*mpath
;
1030 struct mpath_node
*node
, *new_node
;
1033 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
1034 if (new_node
== NULL
)
1037 node
= hlist_entry(p
, struct mpath_node
, list
);
1038 mpath
= node
->mpath
;
1039 new_node
->mpath
= mpath
;
1040 hash_idx
= mesh_table_hash(mpath
->dst
, mpath
->sdata
, newtbl
);
1041 hlist_add_head(&new_node
->list
,
1042 &newtbl
->hash_buckets
[hash_idx
]);
1046 int mesh_pathtbl_init(void)
1048 struct mesh_table
*tbl_path
, *tbl_mpp
;
1051 tbl_path
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
1054 tbl_path
->free_node
= &mesh_path_node_free
;
1055 tbl_path
->copy_node
= &mesh_path_node_copy
;
1056 tbl_path
->mean_chain_len
= MEAN_CHAIN_LEN
;
1057 tbl_path
->known_gates
= kzalloc(sizeof(struct hlist_head
), GFP_ATOMIC
);
1058 if (!tbl_path
->known_gates
) {
1062 INIT_HLIST_HEAD(tbl_path
->known_gates
);
1065 tbl_mpp
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
1070 tbl_mpp
->free_node
= &mesh_path_node_free
;
1071 tbl_mpp
->copy_node
= &mesh_path_node_copy
;
1072 tbl_mpp
->mean_chain_len
= MEAN_CHAIN_LEN
;
1073 tbl_mpp
->known_gates
= kzalloc(sizeof(struct hlist_head
), GFP_ATOMIC
);
1074 if (!tbl_mpp
->known_gates
) {
1078 INIT_HLIST_HEAD(tbl_mpp
->known_gates
);
1080 /* Need no locking since this is during init */
1081 RCU_INIT_POINTER(mesh_paths
, tbl_path
);
1082 RCU_INIT_POINTER(mpp_paths
, tbl_mpp
);
1087 mesh_table_free(tbl_mpp
, true);
1089 mesh_table_free(tbl_path
, true);
1093 void mesh_path_expire(struct ieee80211_sub_if_data
*sdata
)
1095 struct mesh_table
*tbl
;
1096 struct mesh_path
*mpath
;
1097 struct mpath_node
*node
;
1098 struct hlist_node
*p
;
1102 tbl
= rcu_dereference(mesh_paths
);
1103 for_each_mesh_entry(tbl
, p
, node
, i
) {
1104 if (node
->mpath
->sdata
!= sdata
)
1106 mpath
= node
->mpath
;
1107 if ((!(mpath
->flags
& MESH_PATH_RESOLVING
)) &&
1108 (!(mpath
->flags
& MESH_PATH_FIXED
)) &&
1109 time_after(jiffies
, mpath
->exp_time
+ MESH_PATH_EXPIRE
))
1110 mesh_path_del(mpath
->dst
, mpath
->sdata
);
1115 void mesh_pathtbl_unregister(void)
1117 /* no need for locking during exit path */
1118 mesh_table_free(rcu_dereference_protected(mesh_paths
, 1), true);
1119 mesh_table_free(rcu_dereference_protected(mpp_paths
, 1), true);