1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2008, 2009 open80211s Ltd.
4 * Copyright (C) 2023 Intel Corporation
5 * Author: Luis Carlos Cobo <luisca@cozybit.com>
8 #include <linux/etherdevice.h>
9 #include <linux/list.h>
10 #include <linux/random.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/string.h>
14 #include <net/mac80211.h>
16 #include "ieee80211_i.h"
18 #include <linux/rhashtable.h>
20 static void mesh_path_free_rcu(struct mesh_table
*tbl
, struct mesh_path
*mpath
);
22 static u32
mesh_table_hash(const void *addr
, u32 len
, u32 seed
)
24 /* Use last four bytes of hw addr as hash index */
25 return jhash_1word(__get_unaligned_cpu32((u8
*)addr
+ 2), seed
);
28 static const struct rhashtable_params mesh_rht_params
= {
30 .automatic_shrinking
= true,
32 .key_offset
= offsetof(struct mesh_path
, dst
),
33 .head_offset
= offsetof(struct mesh_path
, rhash
),
34 .hashfn
= mesh_table_hash
,
37 static const struct rhashtable_params fast_tx_rht_params
= {
39 .automatic_shrinking
= true,
40 .key_len
= sizeof_field(struct ieee80211_mesh_fast_tx
, key
),
41 .key_offset
= offsetof(struct ieee80211_mesh_fast_tx
, key
),
42 .head_offset
= offsetof(struct ieee80211_mesh_fast_tx
, rhash
),
43 .hashfn
= mesh_table_hash
,
46 static void __mesh_fast_tx_entry_free(void *ptr
, void *tblptr
)
48 struct ieee80211_mesh_fast_tx
*entry
= ptr
;
50 kfree_rcu(entry
, fast_tx
.rcu_head
);
53 static void mesh_fast_tx_deinit(struct ieee80211_sub_if_data
*sdata
)
55 struct mesh_tx_cache
*cache
;
57 cache
= &sdata
->u
.mesh
.tx_cache
;
58 rhashtable_free_and_destroy(&cache
->rht
,
59 __mesh_fast_tx_entry_free
, NULL
);
62 static void mesh_fast_tx_init(struct ieee80211_sub_if_data
*sdata
)
64 struct mesh_tx_cache
*cache
;
66 cache
= &sdata
->u
.mesh
.tx_cache
;
67 rhashtable_init(&cache
->rht
, &fast_tx_rht_params
);
68 INIT_HLIST_HEAD(&cache
->walk_head
);
69 spin_lock_init(&cache
->walk_lock
);
72 static inline bool mpath_expired(struct mesh_path
*mpath
)
74 return (mpath
->flags
& MESH_PATH_ACTIVE
) &&
75 time_after(jiffies
, mpath
->exp_time
) &&
76 !(mpath
->flags
& MESH_PATH_FIXED
);
79 static void mesh_path_rht_free(void *ptr
, void *tblptr
)
81 struct mesh_path
*mpath
= ptr
;
82 struct mesh_table
*tbl
= tblptr
;
84 mesh_path_free_rcu(tbl
, mpath
);
87 static void mesh_table_init(struct mesh_table
*tbl
)
89 INIT_HLIST_HEAD(&tbl
->known_gates
);
90 INIT_HLIST_HEAD(&tbl
->walk_head
);
91 atomic_set(&tbl
->entries
, 0);
92 spin_lock_init(&tbl
->gates_lock
);
93 spin_lock_init(&tbl
->walk_lock
);
95 /* rhashtable_init() may fail only in case of wrong
98 WARN_ON(rhashtable_init(&tbl
->rhead
, &mesh_rht_params
));
101 static void mesh_table_free(struct mesh_table
*tbl
)
103 rhashtable_free_and_destroy(&tbl
->rhead
,
104 mesh_path_rht_free
, tbl
);
108 * mesh_path_assign_nexthop - update mesh path next hop
110 * @mpath: mesh path to update
111 * @sta: next hop to assign
113 * Locking: mpath->state_lock must be held when calling this function
115 void mesh_path_assign_nexthop(struct mesh_path
*mpath
, struct sta_info
*sta
)
118 struct ieee80211_hdr
*hdr
;
121 rcu_assign_pointer(mpath
->next_hop
, sta
);
123 spin_lock_irqsave(&mpath
->frame_queue
.lock
, flags
);
124 skb_queue_walk(&mpath
->frame_queue
, skb
) {
125 hdr
= (struct ieee80211_hdr
*) skb
->data
;
126 memcpy(hdr
->addr1
, sta
->sta
.addr
, ETH_ALEN
);
127 memcpy(hdr
->addr2
, mpath
->sdata
->vif
.addr
, ETH_ALEN
);
128 ieee80211_mps_set_frame_flags(sta
->sdata
, sta
, hdr
);
131 spin_unlock_irqrestore(&mpath
->frame_queue
.lock
, flags
);
134 static void prepare_for_gate(struct sk_buff
*skb
, char *dst_addr
,
135 struct mesh_path
*gate_mpath
)
137 struct ieee80211_hdr
*hdr
;
138 struct ieee80211s_hdr
*mshdr
;
139 int mesh_hdrlen
, hdrlen
;
142 hdr
= (struct ieee80211_hdr
*) skb
->data
;
143 hdrlen
= ieee80211_hdrlen(hdr
->frame_control
);
144 mshdr
= (struct ieee80211s_hdr
*) (skb
->data
+ hdrlen
);
146 if (!(mshdr
->flags
& MESH_FLAGS_AE
)) {
147 /* size of the fixed part of the mesh header */
150 /* make room for the two extended addresses */
151 skb_push(skb
, 2 * ETH_ALEN
);
152 memmove(skb
->data
, hdr
, hdrlen
+ mesh_hdrlen
);
154 hdr
= (struct ieee80211_hdr
*) skb
->data
;
156 /* we preserve the previous mesh header and only add
157 * the new addresses */
158 mshdr
= (struct ieee80211s_hdr
*) (skb
->data
+ hdrlen
);
159 mshdr
->flags
= MESH_FLAGS_AE_A5_A6
;
160 memcpy(mshdr
->eaddr1
, hdr
->addr3
, ETH_ALEN
);
161 memcpy(mshdr
->eaddr2
, hdr
->addr4
, ETH_ALEN
);
164 /* update next hop */
165 hdr
= (struct ieee80211_hdr
*) skb
->data
;
167 next_hop
= rcu_dereference(gate_mpath
->next_hop
)->sta
.addr
;
168 memcpy(hdr
->addr1
, next_hop
, ETH_ALEN
);
170 memcpy(hdr
->addr2
, gate_mpath
->sdata
->vif
.addr
, ETH_ALEN
);
171 memcpy(hdr
->addr3
, dst_addr
, ETH_ALEN
);
175 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
177 * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate)
178 * @from_mpath: The failed mpath
179 * @copy: When true, copy all the frames to the new mpath queue. When false,
182 * This function is used to transfer or copy frames from an unresolved mpath to
183 * a gate mpath. The function also adds the Address Extension field and
184 * updates the next hop.
186 * If a frame already has an Address Extension field, only the next hop and
187 * destination addresses are updated.
189 * The gate mpath must be an active mpath with a valid mpath->next_hop.
191 static void mesh_path_move_to_queue(struct mesh_path
*gate_mpath
,
192 struct mesh_path
*from_mpath
,
195 struct sk_buff
*skb
, *fskb
, *tmp
;
196 struct sk_buff_head failq
;
199 if (WARN_ON(gate_mpath
== from_mpath
))
201 if (WARN_ON(!gate_mpath
->next_hop
))
204 __skb_queue_head_init(&failq
);
206 spin_lock_irqsave(&from_mpath
->frame_queue
.lock
, flags
);
207 skb_queue_splice_init(&from_mpath
->frame_queue
, &failq
);
208 spin_unlock_irqrestore(&from_mpath
->frame_queue
.lock
, flags
);
210 skb_queue_walk_safe(&failq
, fskb
, tmp
) {
211 if (skb_queue_len(&gate_mpath
->frame_queue
) >=
212 MESH_FRAME_QUEUE_LEN
) {
213 mpath_dbg(gate_mpath
->sdata
, "mpath queue full!\n");
217 skb
= skb_copy(fskb
, GFP_ATOMIC
);
221 prepare_for_gate(skb
, gate_mpath
->dst
, gate_mpath
);
222 skb_queue_tail(&gate_mpath
->frame_queue
, skb
);
227 __skb_unlink(fskb
, &failq
);
231 mpath_dbg(gate_mpath
->sdata
, "Mpath queue for gate %pM has %d frames\n",
232 gate_mpath
->dst
, skb_queue_len(&gate_mpath
->frame_queue
));
237 spin_lock_irqsave(&from_mpath
->frame_queue
.lock
, flags
);
238 skb_queue_splice(&failq
, &from_mpath
->frame_queue
);
239 spin_unlock_irqrestore(&from_mpath
->frame_queue
.lock
, flags
);
243 static struct mesh_path
*mpath_lookup(struct mesh_table
*tbl
, const u8
*dst
,
244 struct ieee80211_sub_if_data
*sdata
)
246 struct mesh_path
*mpath
;
248 mpath
= rhashtable_lookup(&tbl
->rhead
, dst
, mesh_rht_params
);
250 if (mpath
&& mpath_expired(mpath
)) {
251 spin_lock_bh(&mpath
->state_lock
);
252 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
253 spin_unlock_bh(&mpath
->state_lock
);
259 * mesh_path_lookup - look up a path in the mesh path table
260 * @sdata: local subif
261 * @dst: hardware address (ETH_ALEN length) of destination
263 * Returns: pointer to the mesh path structure, or NULL if not found
265 * Locking: must be called within a read rcu section.
268 mesh_path_lookup(struct ieee80211_sub_if_data
*sdata
, const u8
*dst
)
270 return mpath_lookup(&sdata
->u
.mesh
.mesh_paths
, dst
, sdata
);
274 mpp_path_lookup(struct ieee80211_sub_if_data
*sdata
, const u8
*dst
)
276 return mpath_lookup(&sdata
->u
.mesh
.mpp_paths
, dst
, sdata
);
279 static struct mesh_path
*
280 __mesh_path_lookup_by_idx(struct mesh_table
*tbl
, int idx
)
283 struct mesh_path
*mpath
;
285 hlist_for_each_entry_rcu(mpath
, &tbl
->walk_head
, walk_list
) {
293 if (mpath_expired(mpath
)) {
294 spin_lock_bh(&mpath
->state_lock
);
295 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
296 spin_unlock_bh(&mpath
->state_lock
);
302 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
304 * @sdata: local subif, or NULL for all entries
306 * Returns: pointer to the mesh path structure, or NULL if not found.
308 * Locking: must be called within a read rcu section.
311 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data
*sdata
, int idx
)
313 return __mesh_path_lookup_by_idx(&sdata
->u
.mesh
.mesh_paths
, idx
);
317 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
319 * @sdata: local subif, or NULL for all entries
321 * Returns: pointer to the proxy path structure, or NULL if not found.
323 * Locking: must be called within a read rcu section.
326 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data
*sdata
, int idx
)
328 return __mesh_path_lookup_by_idx(&sdata
->u
.mesh
.mpp_paths
, idx
);
332 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
333 * @mpath: gate path to add to table
335 * Returns: 0 on success, -EEXIST
337 int mesh_path_add_gate(struct mesh_path
*mpath
)
339 struct mesh_table
*tbl
;
343 tbl
= &mpath
->sdata
->u
.mesh
.mesh_paths
;
345 spin_lock_bh(&mpath
->state_lock
);
346 if (mpath
->is_gate
) {
348 spin_unlock_bh(&mpath
->state_lock
);
351 mpath
->is_gate
= true;
352 mpath
->sdata
->u
.mesh
.num_gates
++;
354 spin_lock(&tbl
->gates_lock
);
355 hlist_add_head_rcu(&mpath
->gate_list
, &tbl
->known_gates
);
356 spin_unlock(&tbl
->gates_lock
);
358 spin_unlock_bh(&mpath
->state_lock
);
360 mpath_dbg(mpath
->sdata
,
361 "Mesh path: Recorded new gate: %pM. %d known gates\n",
362 mpath
->dst
, mpath
->sdata
->u
.mesh
.num_gates
);
370 * mesh_gate_del - remove a mesh gate from the list of known gates
371 * @tbl: table which holds our list of known gates
374 static void mesh_gate_del(struct mesh_table
*tbl
, struct mesh_path
*mpath
)
376 lockdep_assert_held(&mpath
->state_lock
);
380 mpath
->is_gate
= false;
381 spin_lock_bh(&tbl
->gates_lock
);
382 hlist_del_rcu(&mpath
->gate_list
);
383 mpath
->sdata
->u
.mesh
.num_gates
--;
384 spin_unlock_bh(&tbl
->gates_lock
);
386 mpath_dbg(mpath
->sdata
,
387 "Mesh path: Deleted gate: %pM. %d known gates\n",
388 mpath
->dst
, mpath
->sdata
->u
.mesh
.num_gates
);
392 * mesh_gate_num - number of gates known to this interface
395 * Returns: The number of gates
397 int mesh_gate_num(struct ieee80211_sub_if_data
*sdata
)
399 return sdata
->u
.mesh
.num_gates
;
403 struct mesh_path
*mesh_path_new(struct ieee80211_sub_if_data
*sdata
,
404 const u8
*dst
, gfp_t gfp_flags
)
406 struct mesh_path
*new_mpath
;
408 new_mpath
= kzalloc(sizeof(struct mesh_path
), gfp_flags
);
412 memcpy(new_mpath
->dst
, dst
, ETH_ALEN
);
413 eth_broadcast_addr(new_mpath
->rann_snd_addr
);
414 new_mpath
->is_root
= false;
415 new_mpath
->sdata
= sdata
;
416 new_mpath
->flags
= 0;
417 skb_queue_head_init(&new_mpath
->frame_queue
);
418 new_mpath
->exp_time
= jiffies
;
419 spin_lock_init(&new_mpath
->state_lock
);
420 timer_setup(&new_mpath
->timer
, mesh_path_timer
, 0);
425 static void mesh_fast_tx_entry_free(struct mesh_tx_cache
*cache
,
426 struct ieee80211_mesh_fast_tx
*entry
)
428 hlist_del_rcu(&entry
->walk_list
);
429 rhashtable_remove_fast(&cache
->rht
, &entry
->rhash
, fast_tx_rht_params
);
430 kfree_rcu(entry
, fast_tx
.rcu_head
);
433 struct ieee80211_mesh_fast_tx
*
434 mesh_fast_tx_get(struct ieee80211_sub_if_data
*sdata
,
435 struct ieee80211_mesh_fast_tx_key
*key
)
437 struct ieee80211_mesh_fast_tx
*entry
;
438 struct mesh_tx_cache
*cache
;
440 cache
= &sdata
->u
.mesh
.tx_cache
;
441 entry
= rhashtable_lookup(&cache
->rht
, key
, fast_tx_rht_params
);
445 if (!(entry
->mpath
->flags
& MESH_PATH_ACTIVE
) ||
446 mpath_expired(entry
->mpath
)) {
447 spin_lock_bh(&cache
->walk_lock
);
448 entry
= rhashtable_lookup(&cache
->rht
, key
, fast_tx_rht_params
);
450 mesh_fast_tx_entry_free(cache
, entry
);
451 spin_unlock_bh(&cache
->walk_lock
);
455 mesh_path_refresh(sdata
, entry
->mpath
, NULL
);
457 entry
->mppath
->exp_time
= jiffies
;
458 entry
->timestamp
= jiffies
;
463 void mesh_fast_tx_cache(struct ieee80211_sub_if_data
*sdata
,
464 struct sk_buff
*skb
, struct mesh_path
*mpath
)
466 struct ieee80211_hdr
*hdr
= (struct ieee80211_hdr
*)skb
->data
;
467 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
468 struct ieee80211_mesh_fast_tx
*entry
, *prev
;
469 struct ieee80211_mesh_fast_tx build
= {};
470 struct ieee80211s_hdr
*meshhdr
;
471 struct mesh_tx_cache
*cache
;
472 struct ieee80211_key
*key
;
473 struct mesh_path
*mppath
;
474 struct sta_info
*sta
;
477 if (sdata
->noack_map
||
478 !ieee80211_is_data_qos(hdr
->frame_control
))
481 build
.fast_tx
.hdr_len
= ieee80211_hdrlen(hdr
->frame_control
);
482 meshhdr
= (struct ieee80211s_hdr
*)(skb
->data
+ build
.fast_tx
.hdr_len
);
483 build
.hdrlen
= ieee80211_get_mesh_hdrlen(meshhdr
);
485 cache
= &sdata
->u
.mesh
.tx_cache
;
486 if (atomic_read(&cache
->rht
.nelems
) >= MESH_FAST_TX_CACHE_MAX_SIZE
)
489 sta
= rcu_dereference(mpath
->next_hop
);
493 build
.key
.type
= MESH_FAST_TX_TYPE_LOCAL
;
494 if ((meshhdr
->flags
& MESH_FLAGS_AE
) == MESH_FLAGS_AE_A5_A6
) {
495 /* This is required to keep the mppath alive */
496 mppath
= mpp_path_lookup(sdata
, meshhdr
->eaddr1
);
499 build
.mppath
= mppath
;
500 if (!ether_addr_equal(meshhdr
->eaddr2
, sdata
->vif
.addr
))
501 build
.key
.type
= MESH_FAST_TX_TYPE_PROXIED
;
502 } else if (ieee80211_has_a4(hdr
->frame_control
)) {
508 if (!ether_addr_equal(hdr
->addr4
, sdata
->vif
.addr
))
509 build
.key
.type
= MESH_FAST_TX_TYPE_FORWARDED
;
511 /* rate limit, in case fast xmit can't be enabled */
512 if (mppath
->fast_tx_check
== jiffies
)
515 mppath
->fast_tx_check
= jiffies
;
518 * Same use of the sta lock as in ieee80211_check_fast_xmit, in order
519 * to protect against concurrent sta key updates.
521 spin_lock_bh(&sta
->lock
);
522 key
= rcu_access_pointer(sta
->ptk
[sta
->ptk_idx
]);
524 key
= rcu_access_pointer(sdata
->default_unicast_key
);
525 build
.fast_tx
.key
= key
;
530 gen_iv
= key
->conf
.flags
& IEEE80211_KEY_FLAG_GENERATE_IV
;
531 iv_spc
= key
->conf
.flags
& IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
533 if (!(key
->flags
& KEY_FLAG_UPLOADED_TO_HARDWARE
) ||
534 (key
->flags
& KEY_FLAG_TAINTED
))
537 switch (key
->conf
.cipher
) {
538 case WLAN_CIPHER_SUITE_CCMP
:
539 case WLAN_CIPHER_SUITE_CCMP_256
:
541 build
.fast_tx
.pn_offs
= build
.fast_tx
.hdr_len
;
542 if (gen_iv
|| iv_spc
)
543 build
.fast_tx
.hdr_len
+= IEEE80211_CCMP_HDR_LEN
;
545 case WLAN_CIPHER_SUITE_GCMP
:
546 case WLAN_CIPHER_SUITE_GCMP_256
:
548 build
.fast_tx
.pn_offs
= build
.fast_tx
.hdr_len
;
549 if (gen_iv
|| iv_spc
)
550 build
.fast_tx
.hdr_len
+= IEEE80211_GCMP_HDR_LEN
;
557 memcpy(build
.key
.addr
, mppath
->dst
, ETH_ALEN
);
558 build
.timestamp
= jiffies
;
559 build
.fast_tx
.band
= info
->band
;
560 build
.fast_tx
.da_offs
= offsetof(struct ieee80211_hdr
, addr3
);
561 build
.fast_tx
.sa_offs
= offsetof(struct ieee80211_hdr
, addr4
);
563 memcpy(build
.hdr
, meshhdr
, build
.hdrlen
);
564 memcpy(build
.hdr
+ build
.hdrlen
, rfc1042_header
, sizeof(rfc1042_header
));
565 build
.hdrlen
+= sizeof(rfc1042_header
);
566 memcpy(build
.fast_tx
.hdr
, hdr
, build
.fast_tx
.hdr_len
);
568 hdr
= (struct ieee80211_hdr
*)build
.fast_tx
.hdr
;
569 if (build
.fast_tx
.key
)
570 hdr
->frame_control
|= cpu_to_le16(IEEE80211_FCTL_PROTECTED
);
572 qc
= ieee80211_get_qos_ctl(hdr
);
573 qc
[1] |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT
>> 8;
575 entry
= kmemdup(&build
, sizeof(build
), GFP_ATOMIC
);
579 spin_lock(&cache
->walk_lock
);
580 prev
= rhashtable_lookup_get_insert_fast(&cache
->rht
,
589 * replace any previous entry in the hash table, in case we're
590 * replacing it with a different type (e.g. mpath -> mpp)
592 if (unlikely(prev
)) {
593 rhashtable_replace_fast(&cache
->rht
, &prev
->rhash
,
594 &entry
->rhash
, fast_tx_rht_params
);
595 hlist_del_rcu(&prev
->walk_list
);
596 kfree_rcu(prev
, fast_tx
.rcu_head
);
599 hlist_add_head(&entry
->walk_list
, &cache
->walk_head
);
602 spin_unlock(&cache
->walk_lock
);
604 spin_unlock_bh(&sta
->lock
);
607 void mesh_fast_tx_gc(struct ieee80211_sub_if_data
*sdata
)
609 unsigned long timeout
= msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT
);
610 struct mesh_tx_cache
*cache
= &sdata
->u
.mesh
.tx_cache
;
611 struct ieee80211_mesh_fast_tx
*entry
;
612 struct hlist_node
*n
;
614 if (atomic_read(&cache
->rht
.nelems
) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE
)
617 spin_lock_bh(&cache
->walk_lock
);
618 hlist_for_each_entry_safe(entry
, n
, &cache
->walk_head
, walk_list
)
619 if (!time_is_after_jiffies(entry
->timestamp
+ timeout
))
620 mesh_fast_tx_entry_free(cache
, entry
);
621 spin_unlock_bh(&cache
->walk_lock
);
624 void mesh_fast_tx_flush_mpath(struct mesh_path
*mpath
)
626 struct ieee80211_sub_if_data
*sdata
= mpath
->sdata
;
627 struct mesh_tx_cache
*cache
= &sdata
->u
.mesh
.tx_cache
;
628 struct ieee80211_mesh_fast_tx
*entry
;
629 struct hlist_node
*n
;
631 spin_lock_bh(&cache
->walk_lock
);
632 hlist_for_each_entry_safe(entry
, n
, &cache
->walk_head
, walk_list
)
633 if (entry
->mpath
== mpath
)
634 mesh_fast_tx_entry_free(cache
, entry
);
635 spin_unlock_bh(&cache
->walk_lock
);
638 void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data
*sdata
,
639 struct sta_info
*sta
)
641 struct mesh_tx_cache
*cache
= &sdata
->u
.mesh
.tx_cache
;
642 struct ieee80211_mesh_fast_tx
*entry
;
643 struct hlist_node
*n
;
645 spin_lock_bh(&cache
->walk_lock
);
646 hlist_for_each_entry_safe(entry
, n
, &cache
->walk_head
, walk_list
)
647 if (rcu_access_pointer(entry
->mpath
->next_hop
) == sta
)
648 mesh_fast_tx_entry_free(cache
, entry
);
649 spin_unlock_bh(&cache
->walk_lock
);
652 void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data
*sdata
,
655 struct mesh_tx_cache
*cache
= &sdata
->u
.mesh
.tx_cache
;
656 struct ieee80211_mesh_fast_tx_key key
= {};
657 struct ieee80211_mesh_fast_tx
*entry
;
660 ether_addr_copy(key
.addr
, addr
);
661 spin_lock_bh(&cache
->walk_lock
);
662 for (i
= 0; i
< NUM_MESH_FAST_TX_TYPE
; i
++) {
664 entry
= rhashtable_lookup_fast(&cache
->rht
, &key
, fast_tx_rht_params
);
666 mesh_fast_tx_entry_free(cache
, entry
);
668 spin_unlock_bh(&cache
->walk_lock
);
672 * mesh_path_add - allocate and add a new path to the mesh path table
673 * @dst: destination address of the path (ETH_ALEN length)
674 * @sdata: local subif
676 * Returns: 0 on success
678 * State: the initial state of the new path is set to 0
680 struct mesh_path
*mesh_path_add(struct ieee80211_sub_if_data
*sdata
,
683 struct mesh_table
*tbl
;
684 struct mesh_path
*mpath
, *new_mpath
;
686 if (ether_addr_equal(dst
, sdata
->vif
.addr
))
687 /* never add ourselves as neighbours */
688 return ERR_PTR(-EOPNOTSUPP
);
690 if (is_multicast_ether_addr(dst
))
691 return ERR_PTR(-EOPNOTSUPP
);
693 if (atomic_add_unless(&sdata
->u
.mesh
.mpaths
, 1, MESH_MAX_MPATHS
) == 0)
694 return ERR_PTR(-ENOSPC
);
696 new_mpath
= mesh_path_new(sdata
, dst
, GFP_ATOMIC
);
698 return ERR_PTR(-ENOMEM
);
700 tbl
= &sdata
->u
.mesh
.mesh_paths
;
701 spin_lock_bh(&tbl
->walk_lock
);
702 mpath
= rhashtable_lookup_get_insert_fast(&tbl
->rhead
,
706 hlist_add_head(&new_mpath
->walk_list
, &tbl
->walk_head
);
707 spin_unlock_bh(&tbl
->walk_lock
);
718 sdata
->u
.mesh
.mesh_paths_generation
++;
722 int mpp_path_add(struct ieee80211_sub_if_data
*sdata
,
723 const u8
*dst
, const u8
*mpp
)
725 struct mesh_table
*tbl
;
726 struct mesh_path
*new_mpath
;
729 if (ether_addr_equal(dst
, sdata
->vif
.addr
))
730 /* never add ourselves as neighbours */
733 if (is_multicast_ether_addr(dst
))
736 new_mpath
= mesh_path_new(sdata
, dst
, GFP_ATOMIC
);
741 memcpy(new_mpath
->mpp
, mpp
, ETH_ALEN
);
742 tbl
= &sdata
->u
.mesh
.mpp_paths
;
744 spin_lock_bh(&tbl
->walk_lock
);
745 ret
= rhashtable_lookup_insert_fast(&tbl
->rhead
,
749 hlist_add_head_rcu(&new_mpath
->walk_list
, &tbl
->walk_head
);
750 spin_unlock_bh(&tbl
->walk_lock
);
755 mesh_fast_tx_flush_addr(sdata
, dst
);
757 sdata
->u
.mesh
.mpp_paths_generation
++;
763 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
765 * @sta: broken peer link
767 * This function must be called from the rate control algorithm if enough
768 * delivery errors suggest that a peer link is no longer usable.
770 void mesh_plink_broken(struct sta_info
*sta
)
772 struct ieee80211_sub_if_data
*sdata
= sta
->sdata
;
773 struct mesh_table
*tbl
= &sdata
->u
.mesh
.mesh_paths
;
774 static const u8 bcast
[ETH_ALEN
] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
775 struct mesh_path
*mpath
;
778 hlist_for_each_entry_rcu(mpath
, &tbl
->walk_head
, walk_list
) {
779 if (rcu_access_pointer(mpath
->next_hop
) == sta
&&
780 mpath
->flags
& MESH_PATH_ACTIVE
&&
781 !(mpath
->flags
& MESH_PATH_FIXED
)) {
782 spin_lock_bh(&mpath
->state_lock
);
783 mpath
->flags
&= ~MESH_PATH_ACTIVE
;
785 spin_unlock_bh(&mpath
->state_lock
);
786 mesh_path_error_tx(sdata
,
787 sdata
->u
.mesh
.mshcfg
.element_ttl
,
788 mpath
->dst
, mpath
->sn
,
789 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE
, bcast
);
795 static void mesh_path_free_rcu(struct mesh_table
*tbl
,
796 struct mesh_path
*mpath
)
798 struct ieee80211_sub_if_data
*sdata
= mpath
->sdata
;
800 spin_lock_bh(&mpath
->state_lock
);
801 mpath
->flags
|= MESH_PATH_RESOLVING
| MESH_PATH_DELETED
;
802 mesh_gate_del(tbl
, mpath
);
803 spin_unlock_bh(&mpath
->state_lock
);
804 timer_shutdown_sync(&mpath
->timer
);
805 atomic_dec(&sdata
->u
.mesh
.mpaths
);
806 atomic_dec(&tbl
->entries
);
807 mesh_path_flush_pending(mpath
);
808 kfree_rcu(mpath
, rcu
);
811 static void __mesh_path_del(struct mesh_table
*tbl
, struct mesh_path
*mpath
)
813 hlist_del_rcu(&mpath
->walk_list
);
814 rhashtable_remove_fast(&tbl
->rhead
, &mpath
->rhash
, mesh_rht_params
);
815 if (tbl
== &mpath
->sdata
->u
.mesh
.mpp_paths
)
816 mesh_fast_tx_flush_addr(mpath
->sdata
, mpath
->dst
);
818 mesh_fast_tx_flush_mpath(mpath
);
819 mesh_path_free_rcu(tbl
, mpath
);
823 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
825 * @sta: mesh peer to match
827 * RCU notes: this function is called when a mesh plink transitions from
828 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
829 * allows path creation. This will happen before the sta can be freed (because
830 * sta_info_destroy() calls this) so any reader in a rcu read block will be
831 * protected against the plink disappearing.
833 void mesh_path_flush_by_nexthop(struct sta_info
*sta
)
835 struct ieee80211_sub_if_data
*sdata
= sta
->sdata
;
836 struct mesh_table
*tbl
= &sdata
->u
.mesh
.mesh_paths
;
837 struct mesh_path
*mpath
;
838 struct hlist_node
*n
;
840 spin_lock_bh(&tbl
->walk_lock
);
841 hlist_for_each_entry_safe(mpath
, n
, &tbl
->walk_head
, walk_list
) {
842 if (rcu_access_pointer(mpath
->next_hop
) == sta
)
843 __mesh_path_del(tbl
, mpath
);
845 spin_unlock_bh(&tbl
->walk_lock
);
848 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data
*sdata
,
851 struct mesh_table
*tbl
= &sdata
->u
.mesh
.mpp_paths
;
852 struct mesh_path
*mpath
;
853 struct hlist_node
*n
;
855 spin_lock_bh(&tbl
->walk_lock
);
856 hlist_for_each_entry_safe(mpath
, n
, &tbl
->walk_head
, walk_list
) {
857 if (ether_addr_equal(mpath
->mpp
, proxy
))
858 __mesh_path_del(tbl
, mpath
);
860 spin_unlock_bh(&tbl
->walk_lock
);
863 static void table_flush_by_iface(struct mesh_table
*tbl
)
865 struct mesh_path
*mpath
;
866 struct hlist_node
*n
;
868 spin_lock_bh(&tbl
->walk_lock
);
869 hlist_for_each_entry_safe(mpath
, n
, &tbl
->walk_head
, walk_list
) {
870 __mesh_path_del(tbl
, mpath
);
872 spin_unlock_bh(&tbl
->walk_lock
);
876 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
878 * @sdata: interface data to match
880 * This function deletes both mesh paths as well as mesh portal paths.
882 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data
*sdata
)
884 table_flush_by_iface(&sdata
->u
.mesh
.mesh_paths
);
885 table_flush_by_iface(&sdata
->u
.mesh
.mpp_paths
);
889 * table_path_del - delete a path from the mesh or mpp table
891 * @tbl: mesh or mpp path table
892 * @sdata: local subif
893 * @addr: dst address (ETH_ALEN length)
895 * Returns: 0 if successful
897 static int table_path_del(struct mesh_table
*tbl
,
898 struct ieee80211_sub_if_data
*sdata
,
901 struct mesh_path
*mpath
;
903 spin_lock_bh(&tbl
->walk_lock
);
904 mpath
= rhashtable_lookup_fast(&tbl
->rhead
, addr
, mesh_rht_params
);
906 spin_unlock_bh(&tbl
->walk_lock
);
910 __mesh_path_del(tbl
, mpath
);
911 spin_unlock_bh(&tbl
->walk_lock
);
917 * mesh_path_del - delete a mesh path from the table
919 * @addr: dst address (ETH_ALEN length)
920 * @sdata: local subif
922 * Returns: 0 if successful
924 int mesh_path_del(struct ieee80211_sub_if_data
*sdata
, const u8
*addr
)
928 /* flush relevant mpp entries first */
929 mpp_flush_by_proxy(sdata
, addr
);
931 err
= table_path_del(&sdata
->u
.mesh
.mesh_paths
, sdata
, addr
);
932 sdata
->u
.mesh
.mesh_paths_generation
++;
937 * mesh_path_tx_pending - sends pending frames in a mesh path queue
939 * @mpath: mesh path to activate
941 * Locking: the state_lock of the mpath structure must NOT be held when calling
944 void mesh_path_tx_pending(struct mesh_path
*mpath
)
946 if (mpath
->flags
& MESH_PATH_ACTIVE
)
947 ieee80211_add_pending_skbs(mpath
->sdata
->local
,
948 &mpath
->frame_queue
);
952 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
954 * @mpath: mesh path whose queue will be emptied
956 * If there is only one gate, the frames are transferred from the failed mpath
957 * queue to that gate's queue. If there are more than one gates, the frames
958 * are copied from each gate to the next. After frames are copied, the
959 * mpath queues are emptied onto the transmission queue.
961 * Returns: 0 on success, -EHOSTUNREACH
963 int mesh_path_send_to_gates(struct mesh_path
*mpath
)
965 struct ieee80211_sub_if_data
*sdata
= mpath
->sdata
;
966 struct mesh_table
*tbl
;
967 struct mesh_path
*from_mpath
= mpath
;
968 struct mesh_path
*gate
;
971 tbl
= &sdata
->u
.mesh
.mesh_paths
;
974 hlist_for_each_entry_rcu(gate
, &tbl
->known_gates
, gate_list
) {
975 if (gate
->flags
& MESH_PATH_ACTIVE
) {
976 mpath_dbg(sdata
, "Forwarding to %pM\n", gate
->dst
);
977 mesh_path_move_to_queue(gate
, from_mpath
, copy
);
982 "Not forwarding to %pM (flags %#x)\n",
983 gate
->dst
, gate
->flags
);
987 hlist_for_each_entry_rcu(gate
, &tbl
->known_gates
, gate_list
) {
988 mpath_dbg(sdata
, "Sending to %pM\n", gate
->dst
);
989 mesh_path_tx_pending(gate
);
993 return (from_mpath
== mpath
) ? -EHOSTUNREACH
: 0;
997 * mesh_path_discard_frame - discard a frame whose path could not be resolved
999 * @skb: frame to discard
1000 * @sdata: network subif the frame was to be sent through
1002 * Locking: the function must me called within a rcu_read_lock region
1004 void mesh_path_discard_frame(struct ieee80211_sub_if_data
*sdata
,
1005 struct sk_buff
*skb
)
1007 ieee80211_free_txskb(&sdata
->local
->hw
, skb
);
1008 sdata
->u
.mesh
.mshstats
.dropped_frames_no_route
++;
1012 * mesh_path_flush_pending - free the pending queue of a mesh path
1014 * @mpath: mesh path whose queue has to be freed
1016 * Locking: the function must me called within a rcu_read_lock region
1018 void mesh_path_flush_pending(struct mesh_path
*mpath
)
1020 struct ieee80211_sub_if_data
*sdata
= mpath
->sdata
;
1021 struct ieee80211_if_mesh
*ifmsh
= &sdata
->u
.mesh
;
1022 struct mesh_preq_queue
*preq
, *tmp
;
1023 struct sk_buff
*skb
;
1025 while ((skb
= skb_dequeue(&mpath
->frame_queue
)) != NULL
)
1026 mesh_path_discard_frame(mpath
->sdata
, skb
);
1028 spin_lock_bh(&ifmsh
->mesh_preq_queue_lock
);
1029 list_for_each_entry_safe(preq
, tmp
, &ifmsh
->preq_queue
.list
, list
) {
1030 if (ether_addr_equal(mpath
->dst
, preq
->dst
)) {
1031 list_del(&preq
->list
);
1033 --ifmsh
->preq_queue_len
;
1036 spin_unlock_bh(&ifmsh
->mesh_preq_queue_lock
);
1040 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
1042 * @mpath: the mesh path to modify
1043 * @next_hop: the next hop to force
1045 * Locking: this function must be called holding mpath->state_lock
1047 void mesh_path_fix_nexthop(struct mesh_path
*mpath
, struct sta_info
*next_hop
)
1049 spin_lock_bh(&mpath
->state_lock
);
1050 mesh_path_assign_nexthop(mpath
, next_hop
);
1053 mpath
->hop_count
= 0;
1054 mpath
->exp_time
= 0;
1055 mpath
->flags
= MESH_PATH_FIXED
| MESH_PATH_SN_VALID
;
1056 mesh_path_activate(mpath
);
1057 mesh_fast_tx_flush_mpath(mpath
);
1058 spin_unlock_bh(&mpath
->state_lock
);
1059 ewma_mesh_fail_avg_init(&next_hop
->mesh
->fail_avg
);
1060 /* init it at a low value - 0 start is tricky */
1061 ewma_mesh_fail_avg_add(&next_hop
->mesh
->fail_avg
, 1);
1062 mesh_path_tx_pending(mpath
);
1065 void mesh_pathtbl_init(struct ieee80211_sub_if_data
*sdata
)
1067 mesh_table_init(&sdata
->u
.mesh
.mesh_paths
);
1068 mesh_table_init(&sdata
->u
.mesh
.mpp_paths
);
1069 mesh_fast_tx_init(sdata
);
1073 void mesh_path_tbl_expire(struct ieee80211_sub_if_data
*sdata
,
1074 struct mesh_table
*tbl
)
1076 struct mesh_path
*mpath
;
1077 struct hlist_node
*n
;
1079 spin_lock_bh(&tbl
->walk_lock
);
1080 hlist_for_each_entry_safe(mpath
, n
, &tbl
->walk_head
, walk_list
) {
1081 if ((!(mpath
->flags
& MESH_PATH_RESOLVING
)) &&
1082 (!(mpath
->flags
& MESH_PATH_FIXED
)) &&
1083 time_after(jiffies
, mpath
->exp_time
+ MESH_PATH_EXPIRE
))
1084 __mesh_path_del(tbl
, mpath
);
1086 spin_unlock_bh(&tbl
->walk_lock
);
1089 void mesh_path_expire(struct ieee80211_sub_if_data
*sdata
)
1091 mesh_path_tbl_expire(sdata
, &sdata
->u
.mesh
.mesh_paths
);
1092 mesh_path_tbl_expire(sdata
, &sdata
->u
.mesh
.mpp_paths
);
1095 void mesh_pathtbl_unregister(struct ieee80211_sub_if_data
*sdata
)
1097 mesh_fast_tx_deinit(sdata
);
1098 mesh_table_free(&sdata
->u
.mesh
.mesh_paths
);
1099 mesh_table_free(&sdata
->u
.mesh
.mpp_paths
);