2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
18 #include "ieee80211_i.h"
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER 2
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN 2
27 static inline bool mpath_expired(struct mesh_path
*mpath
)
29 return (mpath
->flags
& MESH_PATH_ACTIVE
) &&
30 time_after(jiffies
, mpath
->exp_time
) &&
31 !(mpath
->flags
& MESH_PATH_FIXED
);
35 struct hlist_node list
;
37 /* This indirection allows two different tables to point to the same
38 * mesh_path structure, useful when resizing
40 struct mesh_path
*mpath
;
43 static struct mesh_table __rcu
*mesh_paths
;
44 static struct mesh_table __rcu
*mpp_paths
; /* Store paths for MPP&MAP */
46 int mesh_paths_generation
;
48 /* This lock will have the grow table function as writer and add / delete nodes
49 * as readers. RCU provides sufficient protection only when reading the table
50 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
51 * the read lock or we risk operating on an old table. The write lock is only
52 * needed when modifying the number of buckets a table.
54 static DEFINE_RWLOCK(pathtbl_resize_lock
);
57 static inline struct mesh_table
*resize_dereference_mesh_paths(void)
59 return rcu_dereference_protected(mesh_paths
,
60 lockdep_is_held(&pathtbl_resize_lock
));
63 static inline struct mesh_table
*resize_dereference_mpp_paths(void)
65 return rcu_dereference_protected(mpp_paths
,
66 lockdep_is_held(&pathtbl_resize_lock
));
70 * CAREFUL -- "tbl" must not be an expression,
71 * in particular not an rcu_dereference(), since
72 * it's used twice. So it is illegal to do
73 * for_each_mesh_entry(rcu_dereference(...), ...)
75 #define for_each_mesh_entry(tbl, node, i) \
76 for (i = 0; i <= tbl->hash_mask; i++) \
77 hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
80 static struct mesh_table
*mesh_table_alloc(int size_order
)
83 struct mesh_table
*newtbl
;
85 newtbl
= kmalloc(sizeof(struct mesh_table
), GFP_ATOMIC
);
89 newtbl
->hash_buckets
= kzalloc(sizeof(struct hlist_head
) *
90 (1 << size_order
), GFP_ATOMIC
);
92 if (!newtbl
->hash_buckets
) {
97 newtbl
->hashwlock
= kmalloc(sizeof(spinlock_t
) *
98 (1 << size_order
), GFP_ATOMIC
);
99 if (!newtbl
->hashwlock
) {
100 kfree(newtbl
->hash_buckets
);
105 newtbl
->size_order
= size_order
;
106 newtbl
->hash_mask
= (1 << size_order
) - 1;
107 atomic_set(&newtbl
->entries
, 0);
108 get_random_bytes(&newtbl
->hash_rnd
,
109 sizeof(newtbl
->hash_rnd
));
110 for (i
= 0; i
<= newtbl
->hash_mask
; i
++)
111 spin_lock_init(&newtbl
->hashwlock
[i
]);
112 spin_lock_init(&newtbl
->gates_lock
);
117 static void __mesh_table_free(struct mesh_table
*tbl
)
119 kfree(tbl
->hash_buckets
);
120 kfree(tbl
->hashwlock
);
124 static void mesh_table_free(struct mesh_table
*tbl
, bool free_leafs
)
126 struct hlist_head
*mesh_hash
;
127 struct hlist_node
*p
, *q
;
128 struct mpath_node
*gate
;
131 mesh_hash
= tbl
->hash_buckets
;
132 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
133 spin_lock_bh(&tbl
->hashwlock
[i
]);
134 hlist_for_each_safe(p
, q
, &mesh_hash
[i
]) {
135 tbl
->free_node(p
, free_leafs
);
136 atomic_dec(&tbl
->entries
);
138 spin_unlock_bh(&tbl
->hashwlock
[i
]);
141 spin_lock_bh(&tbl
->gates_lock
);
142 hlist_for_each_entry_safe(gate
, q
,
143 tbl
->known_gates
, list
) {
144 hlist_del(&gate
->list
);
147 kfree(tbl
->known_gates
);
148 spin_unlock_bh(&tbl
->gates_lock
);
151 __mesh_table_free(tbl
);
154 static int mesh_table_grow(struct mesh_table
*oldtbl
,
155 struct mesh_table
*newtbl
)
157 struct hlist_head
*oldhash
;
158 struct hlist_node
*p
, *q
;
161 if (atomic_read(&oldtbl
->entries
)
162 < oldtbl
->mean_chain_len
* (oldtbl
->hash_mask
+ 1))
165 newtbl
->free_node
= oldtbl
->free_node
;
166 newtbl
->mean_chain_len
= oldtbl
->mean_chain_len
;
167 newtbl
->copy_node
= oldtbl
->copy_node
;
168 newtbl
->known_gates
= oldtbl
->known_gates
;
169 atomic_set(&newtbl
->entries
, atomic_read(&oldtbl
->entries
));
171 oldhash
= oldtbl
->hash_buckets
;
172 for (i
= 0; i
<= oldtbl
->hash_mask
; i
++)
173 hlist_for_each(p
, &oldhash
[i
])
174 if (oldtbl
->copy_node(p
, newtbl
) < 0)
180 for (i
= 0; i
<= newtbl
->hash_mask
; i
++) {
181 hlist_for_each_safe(p
, q
, &newtbl
->hash_buckets
[i
])
182 oldtbl
->free_node(p
, 0);
187 static u32
mesh_table_hash(const u8
*addr
, struct ieee80211_sub_if_data
*sdata
,
188 struct mesh_table
*tbl
)
190 /* Use last four bytes of hw addr and interface index as hash index */
191 return jhash_2words(*(u32
*)(addr
+2), sdata
->dev
->ifindex
,
192 tbl
->hash_rnd
) & tbl
->hash_mask
;
198 * mesh_path_assign_nexthop - update mesh path next hop
200 * @mpath: mesh path to update
201 * @sta: next hop to assign
203 * Locking: mpath->state_lock must be held when calling this function
205 void mesh_path_assign_nexthop(struct mesh_path
*mpath
, struct sta_info
*sta
)
208 struct ieee80211_hdr
*hdr
;
211 rcu_assign_pointer(mpath
->next_hop
, sta
);
213 spin_lock_irqsave(&mpath
->frame_queue
.lock
, flags
);
214 skb_queue_walk(&mpath
->frame_queue
, skb
) {
215 hdr
= (struct ieee80211_hdr
*) skb
->data
;
216 memcpy(hdr
->addr1
, sta
->sta
.addr
, ETH_ALEN
);
217 memcpy(hdr
->addr2
, mpath
->sdata
->vif
.addr
, ETH_ALEN
);
218 ieee80211_mps_set_frame_flags(sta
->sdata
, sta
, hdr
);
221 spin_unlock_irqrestore(&mpath
->frame_queue
.lock
, flags
);
224 static void prepare_for_gate(struct sk_buff
*skb
, char *dst_addr
,
225 struct mesh_path
*gate_mpath
)
227 struct ieee80211_hdr
*hdr
;
228 struct ieee80211s_hdr
*mshdr
;
229 int mesh_hdrlen
, hdrlen
;
232 hdr
= (struct ieee80211_hdr
*) skb
->data
;
233 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
234 mshdr
= (struct ieee80211s_hdr
*) (skb
->data
+ hdrlen
);
236 if (!(mshdr
->flags
& MESH_FLAGS_AE
)) {
237 /* size of the fixed part of the mesh header */
240 /* make room for the two extended addresses */
241 skb_push(skb
, 2 * ETH_ALEN
);
242 memmove(skb
->data
, hdr
, hdrlen
+ mesh_hdrlen
);
244 hdr
= (struct ieee80211_hdr
*) skb
->data
;
246 /* we preserve the previous mesh header and only add
247 * the new addreses */
248 mshdr
= (struct ieee80211s_hdr
*) (skb
->data
+ hdrlen
);
249 mshdr
->flags
= MESH_FLAGS_AE_A5_A6
;
250 memcpy(mshdr
->eaddr1
, hdr
->addr3
, ETH_ALEN
);
251 memcpy(mshdr
->eaddr2
, hdr
->addr4
, ETH_ALEN
);
254 /* update next hop */
255 hdr
= (struct ieee80211_hdr
*) skb
->data
;
257 next_hop
= rcu_dereference(gate_mpath
->next_hop
)->sta
.addr
;
258 memcpy(hdr
->addr1
, next_hop
, ETH_ALEN
);
260 memcpy(hdr
->addr2
, gate_mpath
->sdata
->vif
.addr
, ETH_ALEN
);
261 memcpy(hdr
->addr3
, dst_addr
, ETH_ALEN
);
266 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
268 * This function is used to transfer or copy frames from an unresolved mpath to
269 * a gate mpath. The function also adds the Address Extension field and
270 * updates the next hop.
272 * If a frame already has an Address Extension field, only the next hop and
273 * destination addresses are updated.
275 * The gate mpath must be an active mpath with a valid mpath->next_hop.
277 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
278 * @from_mpath: The failed mpath
279 * @copy: When true, copy all the frames to the new mpath queue. When false,
282 static void mesh_path_move_to_queue(struct mesh_path
*gate_mpath
,
283 struct mesh_path
*from_mpath
,
286 struct sk_buff
*skb
, *fskb
, *tmp
;
287 struct sk_buff_head failq
;
290 if (WARN_ON(gate_mpath
== from_mpath
))
292 if (WARN_ON(!gate_mpath
->next_hop
))
295 __skb_queue_head_init(&failq
);
297 spin_lock_irqsave(&from_mpath
->frame_queue
.lock
, flags
);
298 skb_queue_splice_init(&from_mpath
->frame_queue
, &failq
);
299 spin_unlock_irqrestore(&from_mpath
->frame_queue
.lock
, flags
);
301 skb_queue_walk_safe(&failq
, fskb
, tmp
) {
302 if (skb_queue_len(&gate_mpath
->frame_queue
) >=
303 MESH_FRAME_QUEUE_LEN
) {
304 mpath_dbg(gate_mpath
->sdata
, "mpath queue full!\n");
308 skb
= skb_copy(fskb
, GFP_ATOMIC
);
312 prepare_for_gate(skb
, gate_mpath
->dst
, gate_mpath
);
313 skb_queue_tail(&gate_mpath
->frame_queue
, skb
);
318 __skb_unlink(fskb
, &failq
);
322 mpath_dbg(gate_mpath
->sdata
, "Mpath queue for gate %pM has %d frames\n",
323 gate_mpath
->dst
, skb_queue_len(&gate_mpath
->frame_queue
));
328 spin_lock_irqsave(&from_mpath
->frame_queue
.lock
, flags
);
329 skb_queue_splice(&failq
, &from_mpath
->frame_queue
);
330 spin_unlock_irqrestore(&from_mpath
->frame_queue
.lock
, flags
);
334 static struct mesh_path
*mpath_lookup(struct mesh_table
*tbl
, const u8
*dst
,
335 struct ieee80211_sub_if_data
*sdata
)
337 struct mesh_path
*mpath
;
338 struct hlist_head
*bucket
;
339 struct mpath_node
*node
;
341 bucket
= &tbl
->hash_buckets
[mesh_table_hash(dst
, sdata
, tbl
)];
342 hlist_for_each_entry_rcu(node
, bucket
, list
) {
344 if (mpath
->sdata
== sdata
&&
345 ether_addr_equal(dst
, mpath
->dst
)) {
346 if (mpath_expired(mpath
)) {
347 spin_lock_bh(&mpath
->state_lock
);
348 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
349 spin_unlock_bh(&mpath
->state_lock
);
358 * mesh_path_lookup - look up a path in the mesh path table
359 * @sdata: local subif
360 * @dst: hardware address (ETH_ALEN length) of destination
362 * Returns: pointer to the mesh path structure, or NULL if not found
364 * Locking: must be called within a read rcu section.
367 mesh_path_lookup(struct ieee80211_sub_if_data
*sdata
, const u8
*dst
)
369 return mpath_lookup(rcu_dereference(mesh_paths
), dst
, sdata
);
373 mpp_path_lookup(struct ieee80211_sub_if_data
*sdata
, const u8
*dst
)
375 return mpath_lookup(rcu_dereference(mpp_paths
), dst
, sdata
);
380 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
382 * @sdata: local subif, or NULL for all entries
384 * Returns: pointer to the mesh path structure, or NULL if not found.
386 * Locking: must be called within a read rcu section.
389 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data
*sdata
, int idx
)
391 struct mesh_table
*tbl
= rcu_dereference(mesh_paths
);
392 struct mpath_node
*node
;
396 for_each_mesh_entry(tbl
, node
, i
) {
397 if (sdata
&& node
->mpath
->sdata
!= sdata
)
400 if (mpath_expired(node
->mpath
)) {
401 spin_lock_bh(&node
->mpath
->state_lock
);
402 node
->mpath
->flags
&= ~MESH_PATH_ACTIVE
;
403 spin_unlock_bh(&node
->mpath
->state_lock
);
413 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
414 * @mpath: gate path to add to table
416 int mesh_path_add_gate(struct mesh_path
*mpath
)
418 struct mesh_table
*tbl
;
419 struct mpath_node
*gate
, *new_gate
;
423 tbl
= rcu_dereference(mesh_paths
);
425 hlist_for_each_entry_rcu(gate
, tbl
->known_gates
, list
)
426 if (gate
->mpath
== mpath
) {
431 new_gate
= kzalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
437 mpath
->is_gate
= true;
438 mpath
->sdata
->u
.mesh
.num_gates
++;
439 new_gate
->mpath
= mpath
;
440 spin_lock_bh(&tbl
->gates_lock
);
441 hlist_add_head_rcu(&new_gate
->list
, tbl
->known_gates
);
442 spin_unlock_bh(&tbl
->gates_lock
);
443 mpath_dbg(mpath
->sdata
,
444 "Mesh path: Recorded new gate: %pM. %d known gates\n",
445 mpath
->dst
, mpath
->sdata
->u
.mesh
.num_gates
);
453 * mesh_gate_del - remove a mesh gate from the list of known gates
454 * @tbl: table which holds our list of known gates
457 * Locking: must be called inside rcu_read_lock() section
459 static void mesh_gate_del(struct mesh_table
*tbl
, struct mesh_path
*mpath
)
461 struct mpath_node
*gate
;
462 struct hlist_node
*q
;
464 hlist_for_each_entry_safe(gate
, q
, tbl
->known_gates
, list
) {
465 if (gate
->mpath
!= mpath
)
467 spin_lock_bh(&tbl
->gates_lock
);
468 hlist_del_rcu(&gate
->list
);
469 kfree_rcu(gate
, rcu
);
470 spin_unlock_bh(&tbl
->gates_lock
);
471 mpath
->sdata
->u
.mesh
.num_gates
--;
472 mpath
->is_gate
= false;
473 mpath_dbg(mpath
->sdata
,
474 "Mesh path: Deleted gate: %pM. %d known gates\n",
475 mpath
->dst
, mpath
->sdata
->u
.mesh
.num_gates
);
481 * mesh_gate_num - number of gates known to this interface
484 int mesh_gate_num(struct ieee80211_sub_if_data
*sdata
)
486 return sdata
->u
.mesh
.num_gates
;
490 * mesh_path_add - allocate and add a new path to the mesh path table
491 * @dst: destination address of the path (ETH_ALEN length)
492 * @sdata: local subif
494 * Returns: 0 on success
496 * State: the initial state of the new path is set to 0
498 struct mesh_path
*mesh_path_add(struct ieee80211_sub_if_data
*sdata
,
501 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
502 struct ieee80211_local
*local
= sdata
->local
;
503 struct mesh_table
*tbl
;
504 struct mesh_path
*mpath
, *new_mpath
;
505 struct mpath_node
*node
, *new_node
;
506 struct hlist_head
*bucket
;
511 if (ether_addr_equal(dst
, sdata
->vif
.addr
))
512 /* never add ourselves as neighbours */
513 return ERR_PTR(-ENOTSUPP
);
515 if (is_multicast_ether_addr(dst
))
516 return ERR_PTR(-ENOTSUPP
);
518 if (atomic_add_unless(&sdata
->u
.mesh
.mpaths
, 1, MESH_MAX_MPATHS
) == 0)
519 return ERR_PTR(-ENOSPC
);
521 read_lock_bh(&pathtbl_resize_lock
);
522 tbl
= resize_dereference_mesh_paths();
524 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
525 bucket
= &tbl
->hash_buckets
[hash_idx
];
527 spin_lock(&tbl
->hashwlock
[hash_idx
]);
529 hlist_for_each_entry(node
, bucket
, list
) {
531 if (mpath
->sdata
== sdata
&&
532 ether_addr_equal(dst
, mpath
->dst
))
537 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
541 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
545 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
546 eth_broadcast_addr(new_mpath
->rann_snd_addr
);
547 new_mpath
->is_root
= false;
548 new_mpath
->sdata
= sdata
;
549 new_mpath
->flags
= 0;
550 skb_queue_head_init(&new_mpath
->frame_queue
);
551 new_node
->mpath
= new_mpath
;
552 new_mpath
->timer
.data
= (unsigned long) new_mpath
;
553 new_mpath
->timer
.function
= mesh_path_timer
;
554 new_mpath
->exp_time
= jiffies
;
555 spin_lock_init(&new_mpath
->state_lock
);
556 init_timer(&new_mpath
->timer
);
558 hlist_add_head_rcu(&new_node
->list
, bucket
);
559 if (atomic_inc_return(&tbl
->entries
) >=
560 tbl
->mean_chain_len
* (tbl
->hash_mask
+ 1))
563 mesh_paths_generation
++;
566 set_bit(MESH_WORK_GROW_MPATH_TABLE
, &ifmsh
->wrkq_flags
);
567 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
571 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
572 read_unlock_bh(&pathtbl_resize_lock
);
578 atomic_dec(&sdata
->u
.mesh
.mpaths
);
579 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
580 read_unlock_bh(&pathtbl_resize_lock
);
584 static void mesh_table_free_rcu(struct rcu_head
*rcu
)
586 struct mesh_table
*tbl
= container_of(rcu
, struct mesh_table
, rcu_head
);
588 mesh_table_free(tbl
, false);
591 void mesh_mpath_table_grow(void)
593 struct mesh_table
*oldtbl
, *newtbl
;
595 write_lock_bh(&pathtbl_resize_lock
);
596 oldtbl
= resize_dereference_mesh_paths();
597 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
600 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
601 __mesh_table_free(newtbl
);
604 rcu_assign_pointer(mesh_paths
, newtbl
);
606 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
609 write_unlock_bh(&pathtbl_resize_lock
);
612 void mesh_mpp_table_grow(void)
614 struct mesh_table
*oldtbl
, *newtbl
;
616 write_lock_bh(&pathtbl_resize_lock
);
617 oldtbl
= resize_dereference_mpp_paths();
618 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
621 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
622 __mesh_table_free(newtbl
);
625 rcu_assign_pointer(mpp_paths
, newtbl
);
626 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
629 write_unlock_bh(&pathtbl_resize_lock
);
632 int mpp_path_add(struct ieee80211_sub_if_data
*sdata
,
633 const u8
*dst
, const u8
*mpp
)
635 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
636 struct ieee80211_local
*local
= sdata
->local
;
637 struct mesh_table
*tbl
;
638 struct mesh_path
*mpath
, *new_mpath
;
639 struct mpath_node
*node
, *new_node
;
640 struct hlist_head
*bucket
;
645 if (ether_addr_equal(dst
, sdata
->vif
.addr
))
646 /* never add ourselves as neighbours */
649 if (is_multicast_ether_addr(dst
))
653 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
657 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
661 read_lock_bh(&pathtbl_resize_lock
);
662 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
663 memcpy(new_mpath
->mpp
, mpp
, ETH_ALEN
);
664 new_mpath
->sdata
= sdata
;
665 new_mpath
->flags
= 0;
666 skb_queue_head_init(&new_mpath
->frame_queue
);
667 new_node
->mpath
= new_mpath
;
668 init_timer(&new_mpath
->timer
);
669 new_mpath
->exp_time
= jiffies
;
670 spin_lock_init(&new_mpath
->state_lock
);
672 tbl
= resize_dereference_mpp_paths();
674 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
675 bucket
= &tbl
->hash_buckets
[hash_idx
];
677 spin_lock(&tbl
->hashwlock
[hash_idx
]);
680 hlist_for_each_entry(node
, bucket
, list
) {
682 if (mpath
->sdata
== sdata
&&
683 ether_addr_equal(dst
, mpath
->dst
))
687 hlist_add_head_rcu(&new_node
->list
, bucket
);
688 if (atomic_inc_return(&tbl
->entries
) >=
689 tbl
->mean_chain_len
* (tbl
->hash_mask
+ 1))
692 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
693 read_unlock_bh(&pathtbl_resize_lock
);
695 set_bit(MESH_WORK_GROW_MPP_TABLE
, &ifmsh
->wrkq_flags
);
696 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
701 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
702 read_unlock_bh(&pathtbl_resize_lock
);
712 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
714 * @sta: broken peer link
716 * This function must be called from the rate control algorithm if enough
717 * delivery errors suggest that a peer link is no longer usable.
719 void mesh_plink_broken(struct sta_info
*sta
)
721 struct mesh_table
*tbl
;
722 static const u8 bcast
[ETH_ALEN
] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
723 struct mesh_path
*mpath
;
724 struct mpath_node
*node
;
725 struct ieee80211_sub_if_data
*sdata
= sta
->sdata
;
729 tbl
= rcu_dereference(mesh_paths
);
730 for_each_mesh_entry(tbl
, node
, i
) {
732 if (rcu_dereference(mpath
->next_hop
) == sta
&&
733 mpath
->flags
& MESH_PATH_ACTIVE
&&
734 !(mpath
->flags
& MESH_PATH_FIXED
)) {
735 spin_lock_bh(&mpath
->state_lock
);
736 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
738 spin_unlock_bh(&mpath
->state_lock
);
739 mesh_path_error_tx(sdata
,
740 sdata
->u
.mesh
.mshcfg
.element_ttl
,
741 mpath
->dst
, mpath
->sn
,
742 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE
, bcast
);
748 static void mesh_path_node_reclaim(struct rcu_head
*rp
)
750 struct mpath_node
*node
= container_of(rp
, struct mpath_node
, rcu
);
751 struct ieee80211_sub_if_data
*sdata
= node
->mpath
->sdata
;
753 del_timer_sync(&node
->mpath
->timer
);
754 atomic_dec(&sdata
->u
.mesh
.mpaths
);
759 /* needs to be called with the corresponding hashwlock taken */
760 static void __mesh_path_del(struct mesh_table
*tbl
, struct mpath_node
*node
)
762 struct mesh_path
*mpath
;
764 spin_lock(&mpath
->state_lock
);
765 mpath
->flags
|= MESH_PATH_RESOLVING
;
767 mesh_gate_del(tbl
, mpath
);
768 hlist_del_rcu(&node
->list
);
769 call_rcu(&node
->rcu
, mesh_path_node_reclaim
);
770 spin_unlock(&mpath
->state_lock
);
771 atomic_dec(&tbl
->entries
);
775 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
777 * @sta: mesh peer to match
779 * RCU notes: this function is called when a mesh plink transitions from
780 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
781 * allows path creation. This will happen before the sta can be freed (because
782 * sta_info_destroy() calls this) so any reader in a rcu read block will be
783 * protected against the plink disappearing.
785 void mesh_path_flush_by_nexthop(struct sta_info
*sta
)
787 struct mesh_table
*tbl
;
788 struct mesh_path
*mpath
;
789 struct mpath_node
*node
;
793 read_lock_bh(&pathtbl_resize_lock
);
794 tbl
= resize_dereference_mesh_paths();
795 for_each_mesh_entry(tbl
, node
, i
) {
797 if (rcu_dereference(mpath
->next_hop
) == sta
) {
798 spin_lock(&tbl
->hashwlock
[i
]);
799 __mesh_path_del(tbl
, node
);
800 spin_unlock(&tbl
->hashwlock
[i
]);
803 read_unlock_bh(&pathtbl_resize_lock
);
807 static void table_flush_by_iface(struct mesh_table
*tbl
,
808 struct ieee80211_sub_if_data
*sdata
)
810 struct mesh_path
*mpath
;
811 struct mpath_node
*node
;
814 WARN_ON(!rcu_read_lock_held());
815 for_each_mesh_entry(tbl
, node
, i
) {
817 if (mpath
->sdata
!= sdata
)
819 spin_lock_bh(&tbl
->hashwlock
[i
]);
820 __mesh_path_del(tbl
, node
);
821 spin_unlock_bh(&tbl
->hashwlock
[i
]);
826 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
828 * This function deletes both mesh paths as well as mesh portal paths.
830 * @sdata: interface data to match
833 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data
*sdata
)
835 struct mesh_table
*tbl
;
838 read_lock_bh(&pathtbl_resize_lock
);
839 tbl
= resize_dereference_mesh_paths();
840 table_flush_by_iface(tbl
, sdata
);
841 tbl
= resize_dereference_mpp_paths();
842 table_flush_by_iface(tbl
, sdata
);
843 read_unlock_bh(&pathtbl_resize_lock
);
848 * mesh_path_del - delete a mesh path from the table
850 * @addr: dst address (ETH_ALEN length)
851 * @sdata: local subif
853 * Returns: 0 if successful
855 int mesh_path_del(struct ieee80211_sub_if_data
*sdata
, const u8
*addr
)
857 struct mesh_table
*tbl
;
858 struct mesh_path
*mpath
;
859 struct mpath_node
*node
;
860 struct hlist_head
*bucket
;
864 read_lock_bh(&pathtbl_resize_lock
);
865 tbl
= resize_dereference_mesh_paths();
866 hash_idx
= mesh_table_hash(addr
, sdata
, tbl
);
867 bucket
= &tbl
->hash_buckets
[hash_idx
];
869 spin_lock(&tbl
->hashwlock
[hash_idx
]);
870 hlist_for_each_entry(node
, bucket
, list
) {
872 if (mpath
->sdata
== sdata
&&
873 ether_addr_equal(addr
, mpath
->dst
)) {
874 __mesh_path_del(tbl
, node
);
881 mesh_paths_generation
++;
882 spin_unlock(&tbl
->hashwlock
[hash_idx
]);
883 read_unlock_bh(&pathtbl_resize_lock
);
888 * mesh_path_tx_pending - sends pending frames in a mesh path queue
890 * @mpath: mesh path to activate
892 * Locking: the state_lock of the mpath structure must NOT be held when calling
895 void mesh_path_tx_pending(struct mesh_path
*mpath
)
897 if (mpath
->flags
& MESH_PATH_ACTIVE
)
898 ieee80211_add_pending_skbs(mpath
->sdata
->local
,
899 &mpath
->frame_queue
);
903 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
905 * @mpath: mesh path whose queue will be emptied
907 * If there is only one gate, the frames are transferred from the failed mpath
908 * queue to that gate's queue. If there are more than one gates, the frames
909 * are copied from each gate to the next. After frames are copied, the
910 * mpath queues are emptied onto the transmission queue.
912 int mesh_path_send_to_gates(struct mesh_path
*mpath
)
914 struct ieee80211_sub_if_data
*sdata
= mpath
->sdata
;
915 struct mesh_table
*tbl
;
916 struct mesh_path
*from_mpath
= mpath
;
917 struct mpath_node
*gate
= NULL
;
919 struct hlist_head
*known_gates
;
922 tbl
= rcu_dereference(mesh_paths
);
923 known_gates
= tbl
->known_gates
;
927 return -EHOSTUNREACH
;
929 hlist_for_each_entry_rcu(gate
, known_gates
, list
) {
930 if (gate
->mpath
->sdata
!= sdata
)
933 if (gate
->mpath
->flags
& MESH_PATH_ACTIVE
) {
934 mpath_dbg(sdata
, "Forwarding to %pM\n", gate
->mpath
->dst
);
935 mesh_path_move_to_queue(gate
->mpath
, from_mpath
, copy
);
936 from_mpath
= gate
->mpath
;
940 "Not forwarding %p (flags %#x)\n",
941 gate
->mpath
, gate
->mpath
->flags
);
945 hlist_for_each_entry_rcu(gate
, known_gates
, list
)
946 if (gate
->mpath
->sdata
== sdata
) {
947 mpath_dbg(sdata
, "Sending to %pM\n", gate
->mpath
->dst
);
948 mesh_path_tx_pending(gate
->mpath
);
951 return (from_mpath
== mpath
) ? -EHOSTUNREACH
: 0;
955 * mesh_path_discard_frame - discard a frame whose path could not be resolved
957 * @skb: frame to discard
958 * @sdata: network subif the frame was to be sent through
960 * Locking: the function must me called within a rcu_read_lock region
962 void mesh_path_discard_frame(struct ieee80211_sub_if_data
*sdata
,
966 sdata
->u
.mesh
.mshstats
.dropped_frames_no_route
++;
970 * mesh_path_flush_pending - free the pending queue of a mesh path
972 * @mpath: mesh path whose queue has to be freed
974 * Locking: the function must me called within a rcu_read_lock region
976 void mesh_path_flush_pending(struct mesh_path
*mpath
)
980 while ((skb
= skb_dequeue(&mpath
->frame_queue
)) != NULL
)
981 mesh_path_discard_frame(mpath
->sdata
, skb
);
985 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
987 * @mpath: the mesh path to modify
988 * @next_hop: the next hop to force
990 * Locking: this function must be called holding mpath->state_lock
992 void mesh_path_fix_nexthop(struct mesh_path
*mpath
, struct sta_info
*next_hop
)
994 spin_lock_bh(&mpath
->state_lock
);
995 mesh_path_assign_nexthop(mpath
, next_hop
);
998 mpath
->hop_count
= 0;
1000 mpath
->flags
|= MESH_PATH_FIXED
;
1001 mesh_path_activate(mpath
);
1002 spin_unlock_bh(&mpath
->state_lock
);
1003 mesh_path_tx_pending(mpath
);
1006 static void mesh_path_node_free(struct hlist_node
*p
, bool free_leafs
)
1008 struct mesh_path
*mpath
;
1009 struct mpath_node
*node
= hlist_entry(p
, struct mpath_node
, list
);
1010 mpath
= node
->mpath
;
1013 del_timer_sync(&mpath
->timer
);
1019 static int mesh_path_node_copy(struct hlist_node
*p
, struct mesh_table
*newtbl
)
1021 struct mesh_path
*mpath
;
1022 struct mpath_node
*node
, *new_node
;
1025 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
1026 if (new_node
== NULL
)
1029 node
= hlist_entry(p
, struct mpath_node
, list
);
1030 mpath
= node
->mpath
;
1031 new_node
->mpath
= mpath
;
1032 hash_idx
= mesh_table_hash(mpath
->dst
, mpath
->sdata
, newtbl
);
1033 hlist_add_head(&new_node
->list
,
1034 &newtbl
->hash_buckets
[hash_idx
]);
1038 int mesh_pathtbl_init(void)
1040 struct mesh_table
*tbl_path
, *tbl_mpp
;
1043 tbl_path
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
1046 tbl_path
->free_node
= &mesh_path_node_free
;
1047 tbl_path
->copy_node
= &mesh_path_node_copy
;
1048 tbl_path
->mean_chain_len
= MEAN_CHAIN_LEN
;
1049 tbl_path
->known_gates
= kzalloc(sizeof(struct hlist_head
), GFP_ATOMIC
);
1050 if (!tbl_path
->known_gates
) {
1054 INIT_HLIST_HEAD(tbl_path
->known_gates
);
1057 tbl_mpp
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
1062 tbl_mpp
->free_node
= &mesh_path_node_free
;
1063 tbl_mpp
->copy_node
= &mesh_path_node_copy
;
1064 tbl_mpp
->mean_chain_len
= MEAN_CHAIN_LEN
;
1065 tbl_mpp
->known_gates
= kzalloc(sizeof(struct hlist_head
), GFP_ATOMIC
);
1066 if (!tbl_mpp
->known_gates
) {
1070 INIT_HLIST_HEAD(tbl_mpp
->known_gates
);
1072 /* Need no locking since this is during init */
1073 RCU_INIT_POINTER(mesh_paths
, tbl_path
);
1074 RCU_INIT_POINTER(mpp_paths
, tbl_mpp
);
1079 mesh_table_free(tbl_mpp
, true);
1081 mesh_table_free(tbl_path
, true);
1085 void mesh_path_expire(struct ieee80211_sub_if_data
*sdata
)
1087 struct mesh_table
*tbl
;
1088 struct mesh_path
*mpath
;
1089 struct mpath_node
*node
;
1093 tbl
= rcu_dereference(mesh_paths
);
1094 for_each_mesh_entry(tbl
, node
, i
) {
1095 if (node
->mpath
->sdata
!= sdata
)
1097 mpath
= node
->mpath
;
1098 if ((!(mpath
->flags
& MESH_PATH_RESOLVING
)) &&
1099 (!(mpath
->flags
& MESH_PATH_FIXED
)) &&
1100 time_after(jiffies
, mpath
->exp_time
+ MESH_PATH_EXPIRE
))
1101 mesh_path_del(mpath
->sdata
, mpath
->dst
);
1106 void mesh_pathtbl_unregister(void)
1108 /* no need for locking during exit path */
1109 mesh_table_free(rcu_dereference_protected(mesh_paths
, 1), true);
1110 mesh_table_free(rcu_dereference_protected(mpp_paths
, 1), true);