2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "ieee80211_i.h"
20 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21 #define INIT_PATHS_SIZE_ORDER 2
23 /* Keep the mean chain length below this constant */
24 #define MEAN_CHAIN_LEN 2
26 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
27 time_after(jiffies, mpath->exp_time) && \
28 !(mpath->flags & MESH_PATH_FIXED))
31 struct hlist_node list
;
33 /* This indirection allows two different tables to point to the same
34 * mesh_path structure, useful when resizing
36 struct mesh_path
*mpath
;
39 static struct mesh_table __rcu
*mesh_paths
;
40 static struct mesh_table __rcu
*mpp_paths
; /* Store paths for MPP&MAP */
42 int mesh_paths_generation
;
44 /* This lock will have the grow table function as writer and add / delete nodes
45 * as readers. When reading the table (i.e. doing lookups) we are well protected
48 static DEFINE_RWLOCK(pathtbl_resize_lock
);
51 static inline struct mesh_table
*resize_dereference_mesh_paths(void)
53 return rcu_dereference_protected(mesh_paths
,
54 lockdep_is_held(&pathtbl_resize_lock
));
57 static inline struct mesh_table
*resize_dereference_mpp_paths(void)
59 return rcu_dereference_protected(mpp_paths
,
60 lockdep_is_held(&pathtbl_resize_lock
));
64 * CAREFUL -- "tbl" must not be an expression,
65 * in particular not an rcu_dereference(), since
66 * it's used twice. So it is illegal to do
67 * for_each_mesh_entry(rcu_dereference(...), ...)
69 #define for_each_mesh_entry(tbl, p, node, i) \
70 for (i = 0; i <= tbl->hash_mask; i++) \
71 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
74 static struct mesh_table
*mesh_table_alloc(int size_order
)
77 struct mesh_table
*newtbl
;
79 newtbl
= kmalloc(sizeof(struct mesh_table
), GFP_ATOMIC
);
83 newtbl
->hash_buckets
= kzalloc(sizeof(struct hlist_head
) *
84 (1 << size_order
), GFP_ATOMIC
);
86 if (!newtbl
->hash_buckets
) {
91 newtbl
->hashwlock
= kmalloc(sizeof(spinlock_t
) *
92 (1 << size_order
), GFP_ATOMIC
);
93 if (!newtbl
->hashwlock
) {
94 kfree(newtbl
->hash_buckets
);
99 newtbl
->size_order
= size_order
;
100 newtbl
->hash_mask
= (1 << size_order
) - 1;
101 atomic_set(&newtbl
->entries
, 0);
102 get_random_bytes(&newtbl
->hash_rnd
,
103 sizeof(newtbl
->hash_rnd
));
104 for (i
= 0; i
<= newtbl
->hash_mask
; i
++)
105 spin_lock_init(&newtbl
->hashwlock
[i
]);
110 static void __mesh_table_free(struct mesh_table
*tbl
)
112 kfree(tbl
->hash_buckets
);
113 kfree(tbl
->hashwlock
);
117 static void mesh_table_free(struct mesh_table
*tbl
, bool free_leafs
)
119 struct hlist_head
*mesh_hash
;
120 struct hlist_node
*p
, *q
;
123 mesh_hash
= tbl
->hash_buckets
;
124 for (i
= 0; i
<= tbl
->hash_mask
; i
++) {
125 spin_lock_bh(&tbl
->hashwlock
[i
]);
126 hlist_for_each_safe(p
, q
, &mesh_hash
[i
]) {
127 tbl
->free_node(p
, free_leafs
);
128 atomic_dec(&tbl
->entries
);
130 spin_unlock_bh(&tbl
->hashwlock
[i
]);
132 __mesh_table_free(tbl
);
135 static int mesh_table_grow(struct mesh_table
*oldtbl
,
136 struct mesh_table
*newtbl
)
138 struct hlist_head
*oldhash
;
139 struct hlist_node
*p
, *q
;
142 if (atomic_read(&oldtbl
->entries
)
143 < oldtbl
->mean_chain_len
* (oldtbl
->hash_mask
+ 1))
146 newtbl
->free_node
= oldtbl
->free_node
;
147 newtbl
->mean_chain_len
= oldtbl
->mean_chain_len
;
148 newtbl
->copy_node
= oldtbl
->copy_node
;
149 atomic_set(&newtbl
->entries
, atomic_read(&oldtbl
->entries
));
151 oldhash
= oldtbl
->hash_buckets
;
152 for (i
= 0; i
<= oldtbl
->hash_mask
; i
++)
153 hlist_for_each(p
, &oldhash
[i
])
154 if (oldtbl
->copy_node(p
, newtbl
) < 0)
160 for (i
= 0; i
<= newtbl
->hash_mask
; i
++) {
161 hlist_for_each_safe(p
, q
, &newtbl
->hash_buckets
[i
])
162 oldtbl
->free_node(p
, 0);
167 static u32
mesh_table_hash(u8
*addr
, struct ieee80211_sub_if_data
*sdata
,
168 struct mesh_table
*tbl
)
170 /* Use last four bytes of hw addr and interface index as hash index */
171 return jhash_2words(*(u32
*)(addr
+2), sdata
->dev
->ifindex
, tbl
->hash_rnd
)
178 * mesh_path_assign_nexthop - update mesh path next hop
180 * @mpath: mesh path to update
181 * @sta: next hop to assign
183 * Locking: mpath->state_lock must be held when calling this function
185 void mesh_path_assign_nexthop(struct mesh_path
*mpath
, struct sta_info
*sta
)
188 struct ieee80211_hdr
*hdr
;
189 struct sk_buff_head tmpq
;
192 rcu_assign_pointer(mpath
->next_hop
, sta
);
194 __skb_queue_head_init(&tmpq
);
196 spin_lock_irqsave(&mpath
->frame_queue
.lock
, flags
);
198 while ((skb
= __skb_dequeue(&mpath
->frame_queue
)) != NULL
) {
199 hdr
= (struct ieee80211_hdr
*) skb
->data
;
200 memcpy(hdr
->addr1
, sta
->sta
.addr
, ETH_ALEN
);
201 __skb_queue_tail(&tmpq
, skb
);
204 skb_queue_splice(&tmpq
, &mpath
->frame_queue
);
205 spin_unlock_irqrestore(&mpath
->frame_queue
.lock
, flags
);
210 * mesh_path_lookup - look up a path in the mesh path table
211 * @dst: hardware address (ETH_ALEN length) of destination
212 * @sdata: local subif
214 * Returns: pointer to the mesh path structure, or NULL if not found
216 * Locking: must be called within a read rcu section.
218 struct mesh_path
*mesh_path_lookup(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
220 struct mesh_path
*mpath
;
221 struct hlist_node
*n
;
222 struct hlist_head
*bucket
;
223 struct mesh_table
*tbl
;
224 struct mpath_node
*node
;
226 tbl
= rcu_dereference(mesh_paths
);
228 bucket
= &tbl
->hash_buckets
[mesh_table_hash(dst
, sdata
, tbl
)];
229 hlist_for_each_entry_rcu(node
, n
, bucket
, list
) {
231 if (mpath
->sdata
== sdata
&&
232 memcmp(dst
, mpath
->dst
, ETH_ALEN
) == 0) {
233 if (MPATH_EXPIRED(mpath
)) {
234 spin_lock_bh(&mpath
->state_lock
);
235 if (MPATH_EXPIRED(mpath
))
236 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
237 spin_unlock_bh(&mpath
->state_lock
);
245 struct mesh_path
*mpp_path_lookup(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
247 struct mesh_path
*mpath
;
248 struct hlist_node
*n
;
249 struct hlist_head
*bucket
;
250 struct mesh_table
*tbl
;
251 struct mpath_node
*node
;
253 tbl
= rcu_dereference(mpp_paths
);
255 bucket
= &tbl
->hash_buckets
[mesh_table_hash(dst
, sdata
, tbl
)];
256 hlist_for_each_entry_rcu(node
, n
, bucket
, list
) {
258 if (mpath
->sdata
== sdata
&&
259 memcmp(dst
, mpath
->dst
, ETH_ALEN
) == 0) {
260 if (MPATH_EXPIRED(mpath
)) {
261 spin_lock_bh(&mpath
->state_lock
);
262 if (MPATH_EXPIRED(mpath
))
263 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
264 spin_unlock_bh(&mpath
->state_lock
);
274 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
276 * @sdata: local subif, or NULL for all entries
278 * Returns: pointer to the mesh path structure, or NULL if not found.
280 * Locking: must be called within a read rcu section.
282 struct mesh_path
*mesh_path_lookup_by_idx(int idx
, struct ieee80211_sub_if_data
*sdata
)
284 struct mesh_table
*tbl
= rcu_dereference(mesh_paths
);
285 struct mpath_node
*node
;
286 struct hlist_node
*p
;
290 for_each_mesh_entry(tbl
, p
, node
, i
) {
291 if (sdata
&& node
->mpath
->sdata
!= sdata
)
294 if (MPATH_EXPIRED(node
->mpath
)) {
295 spin_lock_bh(&node
->mpath
->state_lock
);
296 if (MPATH_EXPIRED(node
->mpath
))
297 node
->mpath
->flags
&= ~MESH_PATH_ACTIVE
;
298 spin_unlock_bh(&node
->mpath
->state_lock
);
308 * mesh_path_add - allocate and add a new path to the mesh path table
309 * @addr: destination address of the path (ETH_ALEN length)
310 * @sdata: local subif
312 * Returns: 0 on success
314 * State: the initial state of the new path is set to 0
316 int mesh_path_add(u8
*dst
, struct ieee80211_sub_if_data
*sdata
)
318 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
319 struct ieee80211_local
*local
= sdata
->local
;
320 struct mesh_table
*tbl
;
321 struct mesh_path
*mpath
, *new_mpath
;
322 struct mpath_node
*node
, *new_node
;
323 struct hlist_head
*bucket
;
324 struct hlist_node
*n
;
329 if (memcmp(dst
, sdata
->vif
.addr
, ETH_ALEN
) == 0)
330 /* never add ourselves as neighbours */
333 if (is_multicast_ether_addr(dst
))
336 if (atomic_add_unless(&sdata
->u
.mesh
.mpaths
, 1, MESH_MAX_MPATHS
) == 0)
340 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
344 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
348 read_lock_bh(&pathtbl_resize_lock
);
349 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
350 new_mpath
->sdata
= sdata
;
351 new_mpath
->flags
= 0;
352 skb_queue_head_init(&new_mpath
->frame_queue
);
353 new_node
->mpath
= new_mpath
;
354 new_mpath
->timer
.data
= (unsigned long) new_mpath
;
355 new_mpath
->timer
.function
= mesh_path_timer
;
356 new_mpath
->exp_time
= jiffies
;
357 spin_lock_init(&new_mpath
->state_lock
);
358 init_timer(&new_mpath
->timer
);
360 tbl
= resize_dereference_mesh_paths();
362 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
363 bucket
= &tbl
->hash_buckets
[hash_idx
];
365 spin_lock_bh(&tbl
->hashwlock
[hash_idx
]);
368 hlist_for_each_entry(node
, n
, bucket
, list
) {
370 if (mpath
->sdata
== sdata
&& memcmp(dst
, mpath
->dst
, ETH_ALEN
) == 0)
374 hlist_add_head_rcu(&new_node
->list
, bucket
);
375 if (atomic_inc_return(&tbl
->entries
) >=
376 tbl
->mean_chain_len
* (tbl
->hash_mask
+ 1))
379 mesh_paths_generation
++;
381 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
382 read_unlock_bh(&pathtbl_resize_lock
);
384 set_bit(MESH_WORK_GROW_MPATH_TABLE
, &ifmsh
->wrkq_flags
);
385 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
390 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
391 read_unlock_bh(&pathtbl_resize_lock
);
396 atomic_dec(&sdata
->u
.mesh
.mpaths
);
400 static void mesh_table_free_rcu(struct rcu_head
*rcu
)
402 struct mesh_table
*tbl
= container_of(rcu
, struct mesh_table
, rcu_head
);
404 mesh_table_free(tbl
, false);
407 void mesh_mpath_table_grow(void)
409 struct mesh_table
*oldtbl
, *newtbl
;
411 write_lock_bh(&pathtbl_resize_lock
);
412 oldtbl
= resize_dereference_mesh_paths();
413 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
416 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
417 __mesh_table_free(newtbl
);
420 rcu_assign_pointer(mesh_paths
, newtbl
);
422 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
425 write_unlock_bh(&pathtbl_resize_lock
);
428 void mesh_mpp_table_grow(void)
430 struct mesh_table
*oldtbl
, *newtbl
;
432 write_lock_bh(&pathtbl_resize_lock
);
433 oldtbl
= resize_dereference_mpp_paths();
434 newtbl
= mesh_table_alloc(oldtbl
->size_order
+ 1);
437 if (mesh_table_grow(oldtbl
, newtbl
) < 0) {
438 __mesh_table_free(newtbl
);
441 rcu_assign_pointer(mpp_paths
, newtbl
);
442 call_rcu(&oldtbl
->rcu_head
, mesh_table_free_rcu
);
445 write_unlock_bh(&pathtbl_resize_lock
);
448 int mpp_path_add(u8
*dst
, u8
*mpp
, struct ieee80211_sub_if_data
*sdata
)
450 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
451 struct ieee80211_local
*local
= sdata
->local
;
452 struct mesh_table
*tbl
;
453 struct mesh_path
*mpath
, *new_mpath
;
454 struct mpath_node
*node
, *new_node
;
455 struct hlist_head
*bucket
;
456 struct hlist_node
*n
;
461 if (memcmp(dst
, sdata
->vif
.addr
, ETH_ALEN
) == 0)
462 /* never add ourselves as neighbours */
465 if (is_multicast_ether_addr(dst
))
469 new_mpath
= kzalloc(sizeof(struct mesh_path
), GFP_ATOMIC
);
473 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
477 read_lock_bh(&pathtbl_resize_lock
);
478 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
479 memcpy(new_mpath
->mpp
, mpp
, ETH_ALEN
);
480 new_mpath
->sdata
= sdata
;
481 new_mpath
->flags
= 0;
482 skb_queue_head_init(&new_mpath
->frame_queue
);
483 new_node
->mpath
= new_mpath
;
484 new_mpath
->exp_time
= jiffies
;
485 spin_lock_init(&new_mpath
->state_lock
);
487 tbl
= resize_dereference_mpp_paths();
489 hash_idx
= mesh_table_hash(dst
, sdata
, tbl
);
490 bucket
= &tbl
->hash_buckets
[hash_idx
];
492 spin_lock_bh(&tbl
->hashwlock
[hash_idx
]);
495 hlist_for_each_entry(node
, n
, bucket
, list
) {
497 if (mpath
->sdata
== sdata
&& memcmp(dst
, mpath
->dst
, ETH_ALEN
) == 0)
501 hlist_add_head_rcu(&new_node
->list
, bucket
);
502 if (atomic_inc_return(&tbl
->entries
) >=
503 tbl
->mean_chain_len
* (tbl
->hash_mask
+ 1))
506 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
507 read_unlock_bh(&pathtbl_resize_lock
);
509 set_bit(MESH_WORK_GROW_MPP_TABLE
, &ifmsh
->wrkq_flags
);
510 ieee80211_queue_work(&local
->hw
, &sdata
->work
);
515 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
516 read_unlock_bh(&pathtbl_resize_lock
);
526 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
528 * @sta: broken peer link
530 * This function must be called from the rate control algorithm if enough
531 * delivery errors suggest that a peer link is no longer usable.
533 void mesh_plink_broken(struct sta_info
*sta
)
535 struct mesh_table
*tbl
;
536 static const u8 bcast
[ETH_ALEN
] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
537 struct mesh_path
*mpath
;
538 struct mpath_node
*node
;
539 struct hlist_node
*p
;
540 struct ieee80211_sub_if_data
*sdata
= sta
->sdata
;
544 tbl
= rcu_dereference(mesh_paths
);
545 for_each_mesh_entry(tbl
, p
, node
, i
) {
547 spin_lock_bh(&mpath
->state_lock
);
548 if (rcu_dereference(mpath
->next_hop
) == sta
&&
549 mpath
->flags
& MESH_PATH_ACTIVE
&&
550 !(mpath
->flags
& MESH_PATH_FIXED
)) {
551 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
553 spin_unlock_bh(&mpath
->state_lock
);
554 mesh_path_error_tx(sdata
->u
.mesh
.mshcfg
.element_ttl
,
555 mpath
->dst
, cpu_to_le32(mpath
->sn
),
556 cpu_to_le16(PERR_RCODE_DEST_UNREACH
),
559 spin_unlock_bh(&mpath
->state_lock
);
565 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
567 * @sta - mesh peer to match
569 * RCU notes: this function is called when a mesh plink transitions from
570 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
571 * allows path creation. This will happen before the sta can be freed (because
572 * sta_info_destroy() calls this) so any reader in a rcu read block will be
573 * protected against the plink disappearing.
575 void mesh_path_flush_by_nexthop(struct sta_info
*sta
)
577 struct mesh_table
*tbl
;
578 struct mesh_path
*mpath
;
579 struct mpath_node
*node
;
580 struct hlist_node
*p
;
584 tbl
= rcu_dereference(mesh_paths
);
585 for_each_mesh_entry(tbl
, p
, node
, i
) {
587 if (rcu_dereference(mpath
->next_hop
) == sta
)
588 mesh_path_del(mpath
->dst
, mpath
->sdata
);
593 void mesh_path_flush(struct ieee80211_sub_if_data
*sdata
)
595 struct mesh_table
*tbl
;
596 struct mesh_path
*mpath
;
597 struct mpath_node
*node
;
598 struct hlist_node
*p
;
602 tbl
= rcu_dereference(mesh_paths
);
603 for_each_mesh_entry(tbl
, p
, node
, i
) {
605 if (mpath
->sdata
== sdata
)
606 mesh_path_del(mpath
->dst
, mpath
->sdata
);
611 static void mesh_path_node_reclaim(struct rcu_head
*rp
)
613 struct mpath_node
*node
= container_of(rp
, struct mpath_node
, rcu
);
614 struct ieee80211_sub_if_data
*sdata
= node
->mpath
->sdata
;
616 del_timer_sync(&node
->mpath
->timer
);
617 atomic_dec(&sdata
->u
.mesh
.mpaths
);
623 * mesh_path_del - delete a mesh path from the table
625 * @addr: dst address (ETH_ALEN length)
626 * @sdata: local subif
628 * Returns: 0 if successful
630 int mesh_path_del(u8
*addr
, struct ieee80211_sub_if_data
*sdata
)
632 struct mesh_table
*tbl
;
633 struct mesh_path
*mpath
;
634 struct mpath_node
*node
;
635 struct hlist_head
*bucket
;
636 struct hlist_node
*n
;
640 read_lock_bh(&pathtbl_resize_lock
);
641 tbl
= resize_dereference_mesh_paths();
642 hash_idx
= mesh_table_hash(addr
, sdata
, tbl
);
643 bucket
= &tbl
->hash_buckets
[hash_idx
];
645 spin_lock_bh(&tbl
->hashwlock
[hash_idx
]);
646 hlist_for_each_entry(node
, n
, bucket
, list
) {
648 if (mpath
->sdata
== sdata
&&
649 memcmp(addr
, mpath
->dst
, ETH_ALEN
) == 0) {
650 spin_lock(&mpath
->state_lock
);
651 mpath
->flags
|= MESH_PATH_RESOLVING
;
652 hlist_del_rcu(&node
->list
);
653 call_rcu(&node
->rcu
, mesh_path_node_reclaim
);
654 atomic_dec(&tbl
->entries
);
655 spin_unlock(&mpath
->state_lock
);
662 mesh_paths_generation
++;
663 spin_unlock_bh(&tbl
->hashwlock
[hash_idx
]);
664 read_unlock_bh(&pathtbl_resize_lock
);
669 * mesh_path_tx_pending - sends pending frames in a mesh path queue
671 * @mpath: mesh path to activate
673 * Locking: the state_lock of the mpath structure must NOT be held when calling
676 void mesh_path_tx_pending(struct mesh_path
*mpath
)
678 if (mpath
->flags
& MESH_PATH_ACTIVE
)
679 ieee80211_add_pending_skbs(mpath
->sdata
->local
,
680 &mpath
->frame_queue
);
684 * mesh_path_discard_frame - discard a frame whose path could not be resolved
686 * @skb: frame to discard
687 * @sdata: network subif the frame was to be sent through
689 * If the frame was being forwarded from another MP, a PERR frame will be sent
690 * to the precursor. The precursor's address (i.e. the previous hop) was saved
691 * in addr1 of the frame-to-be-forwarded, and would only be overwritten once
692 * the destination is successfully resolved.
694 * Locking: the function must me called within a rcu_read_lock region
696 void mesh_path_discard_frame(struct sk_buff
*skb
,
697 struct ieee80211_sub_if_data
*sdata
)
699 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*) skb
->data
;
700 struct mesh_path
*mpath
;
703 if (memcmp(hdr
->addr4
, sdata
->vif
.addr
, ETH_ALEN
) != 0) {
708 mpath
= mesh_path_lookup(da
, sdata
);
711 mesh_path_error_tx(sdata
->u
.mesh
.mshcfg
.element_ttl
, skb
->data
,
713 cpu_to_le16(PERR_RCODE_NO_ROUTE
), ra
, sdata
);
717 sdata
->u
.mesh
.mshstats
.dropped_frames_no_route
++;
721 * mesh_path_flush_pending - free the pending queue of a mesh path
723 * @mpath: mesh path whose queue has to be freed
725 * Locking: the function must me called within a rcu_read_lock region
727 void mesh_path_flush_pending(struct mesh_path
*mpath
)
731 while ((skb
= skb_dequeue(&mpath
->frame_queue
)) &&
732 (mpath
->flags
& MESH_PATH_ACTIVE
))
733 mesh_path_discard_frame(skb
, mpath
->sdata
);
737 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
739 * @mpath: the mesh path to modify
740 * @next_hop: the next hop to force
742 * Locking: this function must be called holding mpath->state_lock
744 void mesh_path_fix_nexthop(struct mesh_path
*mpath
, struct sta_info
*next_hop
)
746 spin_lock_bh(&mpath
->state_lock
);
747 mesh_path_assign_nexthop(mpath
, next_hop
);
750 mpath
->hop_count
= 0;
752 mpath
->flags
|= MESH_PATH_FIXED
;
753 mesh_path_activate(mpath
);
754 spin_unlock_bh(&mpath
->state_lock
);
755 mesh_path_tx_pending(mpath
);
758 static void mesh_path_node_free(struct hlist_node
*p
, bool free_leafs
)
760 struct mesh_path
*mpath
;
761 struct mpath_node
*node
= hlist_entry(p
, struct mpath_node
, list
);
765 del_timer_sync(&mpath
->timer
);
771 static int mesh_path_node_copy(struct hlist_node
*p
, struct mesh_table
*newtbl
)
773 struct mesh_path
*mpath
;
774 struct mpath_node
*node
, *new_node
;
777 new_node
= kmalloc(sizeof(struct mpath_node
), GFP_ATOMIC
);
778 if (new_node
== NULL
)
781 node
= hlist_entry(p
, struct mpath_node
, list
);
783 new_node
->mpath
= mpath
;
784 hash_idx
= mesh_table_hash(mpath
->dst
, mpath
->sdata
, newtbl
);
785 hlist_add_head(&new_node
->list
,
786 &newtbl
->hash_buckets
[hash_idx
]);
790 int mesh_pathtbl_init(void)
792 struct mesh_table
*tbl_path
, *tbl_mpp
;
794 tbl_path
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
797 tbl_path
->free_node
= &mesh_path_node_free
;
798 tbl_path
->copy_node
= &mesh_path_node_copy
;
799 tbl_path
->mean_chain_len
= MEAN_CHAIN_LEN
;
801 tbl_mpp
= mesh_table_alloc(INIT_PATHS_SIZE_ORDER
);
803 mesh_table_free(tbl_path
, true);
806 tbl_mpp
->free_node
= &mesh_path_node_free
;
807 tbl_mpp
->copy_node
= &mesh_path_node_copy
;
808 tbl_mpp
->mean_chain_len
= MEAN_CHAIN_LEN
;
810 /* Need no locking since this is during init */
811 RCU_INIT_POINTER(mesh_paths
, tbl_path
);
812 RCU_INIT_POINTER(mpp_paths
, tbl_mpp
);
817 void mesh_path_expire(struct ieee80211_sub_if_data
*sdata
)
819 struct mesh_table
*tbl
;
820 struct mesh_path
*mpath
;
821 struct mpath_node
*node
;
822 struct hlist_node
*p
;
826 tbl
= rcu_dereference(mesh_paths
);
827 for_each_mesh_entry(tbl
, p
, node
, i
) {
828 if (node
->mpath
->sdata
!= sdata
)
831 spin_lock_bh(&mpath
->state_lock
);
832 if ((!(mpath
->flags
& MESH_PATH_RESOLVING
)) &&
833 (!(mpath
->flags
& MESH_PATH_FIXED
)) &&
834 time_after(jiffies
, mpath
->exp_time
+ MESH_PATH_EXPIRE
)) {
835 spin_unlock_bh(&mpath
->state_lock
);
836 mesh_path_del(mpath
->dst
, mpath
->sdata
);
838 spin_unlock_bh(&mpath
->state_lock
);
843 void mesh_pathtbl_unregister(void)
845 /* no need for locking during exit path */
846 mesh_table_free(rcu_dereference_protected(mesh_paths
, 1), true);
847 mesh_table_free(rcu_dereference_protected(mpp_paths
, 1), true);