spi-topcliff-pch: Fix issue for transmitting over 4KByte
[zen-stable.git] / net / mac80211 / mesh_pathtbl.c
blobedf167e3b8f391d65784bc56f3a3f43fc07b9169
1 /*
2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "wme.h"
18 #include "ieee80211_i.h"
19 #include "mesh.h"
21 #ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
22 #define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
23 #else
24 #define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
25 #endif
27 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
28 #define INIT_PATHS_SIZE_ORDER 2
30 /* Keep the mean chain length below this constant */
31 #define MEAN_CHAIN_LEN 2
33 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
34 time_after(jiffies, mpath->exp_time) && \
35 !(mpath->flags & MESH_PATH_FIXED))
37 struct mpath_node {
38 struct hlist_node list;
39 struct rcu_head rcu;
40 /* This indirection allows two different tables to point to the same
41 * mesh_path structure, useful when resizing
43 struct mesh_path *mpath;
46 static struct mesh_table __rcu *mesh_paths;
47 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
49 int mesh_paths_generation;
51 /* This lock will have the grow table function as writer and add / delete nodes
52 * as readers. RCU provides sufficient protection only when reading the table
53 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
54 * the read lock or we risk operating on an old table. The write lock is only
55 * needed when modifying the number of buckets a table.
57 static DEFINE_RWLOCK(pathtbl_resize_lock);
60 static inline struct mesh_table *resize_dereference_mesh_paths(void)
62 return rcu_dereference_protected(mesh_paths,
63 lockdep_is_held(&pathtbl_resize_lock));
66 static inline struct mesh_table *resize_dereference_mpp_paths(void)
68 return rcu_dereference_protected(mpp_paths,
69 lockdep_is_held(&pathtbl_resize_lock));
73 * CAREFUL -- "tbl" must not be an expression,
74 * in particular not an rcu_dereference(), since
75 * it's used twice. So it is illegal to do
76 * for_each_mesh_entry(rcu_dereference(...), ...)
78 #define for_each_mesh_entry(tbl, p, node, i) \
79 for (i = 0; i <= tbl->hash_mask; i++) \
80 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
83 static struct mesh_table *mesh_table_alloc(int size_order)
85 int i;
86 struct mesh_table *newtbl;
88 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
89 if (!newtbl)
90 return NULL;
92 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
93 (1 << size_order), GFP_ATOMIC);
95 if (!newtbl->hash_buckets) {
96 kfree(newtbl);
97 return NULL;
100 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
101 (1 << size_order), GFP_ATOMIC);
102 if (!newtbl->hashwlock) {
103 kfree(newtbl->hash_buckets);
104 kfree(newtbl);
105 return NULL;
108 newtbl->size_order = size_order;
109 newtbl->hash_mask = (1 << size_order) - 1;
110 atomic_set(&newtbl->entries, 0);
111 get_random_bytes(&newtbl->hash_rnd,
112 sizeof(newtbl->hash_rnd));
113 for (i = 0; i <= newtbl->hash_mask; i++)
114 spin_lock_init(&newtbl->hashwlock[i]);
115 spin_lock_init(&newtbl->gates_lock);
117 return newtbl;
120 static void __mesh_table_free(struct mesh_table *tbl)
122 kfree(tbl->hash_buckets);
123 kfree(tbl->hashwlock);
124 kfree(tbl);
127 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
129 struct hlist_head *mesh_hash;
130 struct hlist_node *p, *q;
131 struct mpath_node *gate;
132 int i;
134 mesh_hash = tbl->hash_buckets;
135 for (i = 0; i <= tbl->hash_mask; i++) {
136 spin_lock_bh(&tbl->hashwlock[i]);
137 hlist_for_each_safe(p, q, &mesh_hash[i]) {
138 tbl->free_node(p, free_leafs);
139 atomic_dec(&tbl->entries);
141 spin_unlock_bh(&tbl->hashwlock[i]);
143 if (free_leafs) {
144 spin_lock_bh(&tbl->gates_lock);
145 hlist_for_each_entry_safe(gate, p, q,
146 tbl->known_gates, list) {
147 hlist_del(&gate->list);
148 kfree(gate);
150 kfree(tbl->known_gates);
151 spin_unlock_bh(&tbl->gates_lock);
154 __mesh_table_free(tbl);
157 static int mesh_table_grow(struct mesh_table *oldtbl,
158 struct mesh_table *newtbl)
160 struct hlist_head *oldhash;
161 struct hlist_node *p, *q;
162 int i;
164 if (atomic_read(&oldtbl->entries)
165 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
166 return -EAGAIN;
168 newtbl->free_node = oldtbl->free_node;
169 newtbl->mean_chain_len = oldtbl->mean_chain_len;
170 newtbl->copy_node = oldtbl->copy_node;
171 newtbl->known_gates = oldtbl->known_gates;
172 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
174 oldhash = oldtbl->hash_buckets;
175 for (i = 0; i <= oldtbl->hash_mask; i++)
176 hlist_for_each(p, &oldhash[i])
177 if (oldtbl->copy_node(p, newtbl) < 0)
178 goto errcopy;
180 return 0;
182 errcopy:
183 for (i = 0; i <= newtbl->hash_mask; i++) {
184 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
185 oldtbl->free_node(p, 0);
187 return -ENOMEM;
190 static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
191 struct mesh_table *tbl)
193 /* Use last four bytes of hw addr and interface index as hash index */
194 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
195 & tbl->hash_mask;
201 * mesh_path_assign_nexthop - update mesh path next hop
203 * @mpath: mesh path to update
204 * @sta: next hop to assign
206 * Locking: mpath->state_lock must be held when calling this function
208 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
210 struct sk_buff *skb;
211 struct ieee80211_hdr *hdr;
212 struct sk_buff_head tmpq;
213 unsigned long flags;
215 rcu_assign_pointer(mpath->next_hop, sta);
217 __skb_queue_head_init(&tmpq);
219 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
221 while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
222 hdr = (struct ieee80211_hdr *) skb->data;
223 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
224 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
225 __skb_queue_tail(&tmpq, skb);
228 skb_queue_splice(&tmpq, &mpath->frame_queue);
229 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
232 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
233 struct mesh_path *gate_mpath)
235 struct ieee80211_hdr *hdr;
236 struct ieee80211s_hdr *mshdr;
237 int mesh_hdrlen, hdrlen;
238 char *next_hop;
240 hdr = (struct ieee80211_hdr *) skb->data;
241 hdrlen = ieee80211_hdrlen(hdr->frame_control);
242 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
244 if (!(mshdr->flags & MESH_FLAGS_AE)) {
245 /* size of the fixed part of the mesh header */
246 mesh_hdrlen = 6;
248 /* make room for the two extended addresses */
249 skb_push(skb, 2 * ETH_ALEN);
250 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
252 hdr = (struct ieee80211_hdr *) skb->data;
254 /* we preserve the previous mesh header and only add
255 * the new addreses */
256 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
257 mshdr->flags = MESH_FLAGS_AE_A5_A6;
258 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
259 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
262 /* update next hop */
263 hdr = (struct ieee80211_hdr *) skb->data;
264 rcu_read_lock();
265 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
266 memcpy(hdr->addr1, next_hop, ETH_ALEN);
267 rcu_read_unlock();
268 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
269 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
274 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
276 * This function is used to transfer or copy frames from an unresolved mpath to
277 * a gate mpath. The function also adds the Address Extension field and
278 * updates the next hop.
280 * If a frame already has an Address Extension field, only the next hop and
281 * destination addresses are updated.
283 * The gate mpath must be an active mpath with a valid mpath->next_hop.
285 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
286 * @from_mpath: The failed mpath
287 * @copy: When true, copy all the frames to the new mpath queue. When false,
288 * move them.
290 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
291 struct mesh_path *from_mpath,
292 bool copy)
294 struct sk_buff *skb, *cp_skb = NULL;
295 struct sk_buff_head gateq, failq;
296 unsigned long flags;
297 int num_skbs;
299 BUG_ON(gate_mpath == from_mpath);
300 BUG_ON(!gate_mpath->next_hop);
302 __skb_queue_head_init(&gateq);
303 __skb_queue_head_init(&failq);
305 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
306 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
307 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
309 num_skbs = skb_queue_len(&failq);
311 while (num_skbs--) {
312 skb = __skb_dequeue(&failq);
313 if (copy) {
314 cp_skb = skb_copy(skb, GFP_ATOMIC);
315 if (cp_skb)
316 __skb_queue_tail(&failq, cp_skb);
319 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
320 __skb_queue_tail(&gateq, skb);
323 spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
324 skb_queue_splice(&gateq, &gate_mpath->frame_queue);
325 mpath_dbg("Mpath queue for gate %pM has %d frames\n",
326 gate_mpath->dst,
327 skb_queue_len(&gate_mpath->frame_queue));
328 spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
330 if (!copy)
331 return;
333 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
334 skb_queue_splice(&failq, &from_mpath->frame_queue);
335 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
339 static struct mesh_path *path_lookup(struct mesh_table *tbl, u8 *dst,
340 struct ieee80211_sub_if_data *sdata)
342 struct mesh_path *mpath;
343 struct hlist_node *n;
344 struct hlist_head *bucket;
345 struct mpath_node *node;
347 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
348 hlist_for_each_entry_rcu(node, n, bucket, list) {
349 mpath = node->mpath;
350 if (mpath->sdata == sdata &&
351 memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
352 if (MPATH_EXPIRED(mpath)) {
353 spin_lock_bh(&mpath->state_lock);
354 mpath->flags &= ~MESH_PATH_ACTIVE;
355 spin_unlock_bh(&mpath->state_lock);
357 return mpath;
360 return NULL;
364 * mesh_path_lookup - look up a path in the mesh path table
365 * @dst: hardware address (ETH_ALEN length) of destination
366 * @sdata: local subif
368 * Returns: pointer to the mesh path structure, or NULL if not found
370 * Locking: must be called within a read rcu section.
372 struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
374 return path_lookup(rcu_dereference(mesh_paths), dst, sdata);
377 struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
379 return path_lookup(rcu_dereference(mpp_paths), dst, sdata);
384 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
385 * @idx: index
386 * @sdata: local subif, or NULL for all entries
388 * Returns: pointer to the mesh path structure, or NULL if not found.
390 * Locking: must be called within a read rcu section.
392 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
394 struct mesh_table *tbl = rcu_dereference(mesh_paths);
395 struct mpath_node *node;
396 struct hlist_node *p;
397 int i;
398 int j = 0;
400 for_each_mesh_entry(tbl, p, node, i) {
401 if (sdata && node->mpath->sdata != sdata)
402 continue;
403 if (j++ == idx) {
404 if (MPATH_EXPIRED(node->mpath)) {
405 spin_lock_bh(&node->mpath->state_lock);
406 node->mpath->flags &= ~MESH_PATH_ACTIVE;
407 spin_unlock_bh(&node->mpath->state_lock);
409 return node->mpath;
413 return NULL;
416 static void mesh_gate_node_reclaim(struct rcu_head *rp)
418 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
419 kfree(node);
423 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
424 * @mpath: gate path to add to table
426 int mesh_path_add_gate(struct mesh_path *mpath)
428 struct mesh_table *tbl;
429 struct mpath_node *gate, *new_gate;
430 struct hlist_node *n;
431 int err;
433 rcu_read_lock();
434 tbl = rcu_dereference(mesh_paths);
436 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
437 if (gate->mpath == mpath) {
438 err = -EEXIST;
439 goto err_rcu;
442 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
443 if (!new_gate) {
444 err = -ENOMEM;
445 goto err_rcu;
448 mpath->is_gate = true;
449 mpath->sdata->u.mesh.num_gates++;
450 new_gate->mpath = mpath;
451 spin_lock_bh(&tbl->gates_lock);
452 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
453 spin_unlock_bh(&tbl->gates_lock);
454 rcu_read_unlock();
455 mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
456 mpath->sdata->name, mpath->dst,
457 mpath->sdata->u.mesh.num_gates);
458 return 0;
459 err_rcu:
460 rcu_read_unlock();
461 return err;
465 * mesh_gate_del - remove a mesh gate from the list of known gates
466 * @tbl: table which holds our list of known gates
467 * @mpath: gate mpath
469 * Returns: 0 on success
471 * Locking: must be called inside rcu_read_lock() section
473 static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
475 struct mpath_node *gate;
476 struct hlist_node *p, *q;
478 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
479 if (gate->mpath == mpath) {
480 spin_lock_bh(&tbl->gates_lock);
481 hlist_del_rcu(&gate->list);
482 call_rcu(&gate->rcu, mesh_gate_node_reclaim);
483 spin_unlock_bh(&tbl->gates_lock);
484 mpath->sdata->u.mesh.num_gates--;
485 mpath->is_gate = false;
486 mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
487 "%d known gates\n", mpath->sdata->name,
488 mpath->dst, mpath->sdata->u.mesh.num_gates);
489 break;
492 return 0;
496 * mesh_gate_num - number of gates known to this interface
497 * @sdata: subif data
499 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
501 return sdata->u.mesh.num_gates;
505 * mesh_path_add - allocate and add a new path to the mesh path table
506 * @addr: destination address of the path (ETH_ALEN length)
507 * @sdata: local subif
509 * Returns: 0 on success
511 * State: the initial state of the new path is set to 0
513 int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
515 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
516 struct ieee80211_local *local = sdata->local;
517 struct mesh_table *tbl;
518 struct mesh_path *mpath, *new_mpath;
519 struct mpath_node *node, *new_node;
520 struct hlist_head *bucket;
521 struct hlist_node *n;
522 int grow = 0;
523 int err = 0;
524 u32 hash_idx;
526 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
527 /* never add ourselves as neighbours */
528 return -ENOTSUPP;
530 if (is_multicast_ether_addr(dst))
531 return -ENOTSUPP;
533 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
534 return -ENOSPC;
536 err = -ENOMEM;
537 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
538 if (!new_mpath)
539 goto err_path_alloc;
541 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
542 if (!new_node)
543 goto err_node_alloc;
545 read_lock_bh(&pathtbl_resize_lock);
546 memcpy(new_mpath->dst, dst, ETH_ALEN);
547 new_mpath->sdata = sdata;
548 new_mpath->flags = 0;
549 skb_queue_head_init(&new_mpath->frame_queue);
550 new_node->mpath = new_mpath;
551 new_mpath->timer.data = (unsigned long) new_mpath;
552 new_mpath->timer.function = mesh_path_timer;
553 new_mpath->exp_time = jiffies;
554 spin_lock_init(&new_mpath->state_lock);
555 init_timer(&new_mpath->timer);
557 tbl = resize_dereference_mesh_paths();
559 hash_idx = mesh_table_hash(dst, sdata, tbl);
560 bucket = &tbl->hash_buckets[hash_idx];
562 spin_lock_bh(&tbl->hashwlock[hash_idx]);
564 err = -EEXIST;
565 hlist_for_each_entry(node, n, bucket, list) {
566 mpath = node->mpath;
567 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
568 goto err_exists;
571 hlist_add_head_rcu(&new_node->list, bucket);
572 if (atomic_inc_return(&tbl->entries) >=
573 tbl->mean_chain_len * (tbl->hash_mask + 1))
574 grow = 1;
576 mesh_paths_generation++;
578 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
579 read_unlock_bh(&pathtbl_resize_lock);
580 if (grow) {
581 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
582 ieee80211_queue_work(&local->hw, &sdata->work);
584 return 0;
586 err_exists:
587 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
588 read_unlock_bh(&pathtbl_resize_lock);
589 kfree(new_node);
590 err_node_alloc:
591 kfree(new_mpath);
592 err_path_alloc:
593 atomic_dec(&sdata->u.mesh.mpaths);
594 return err;
597 static void mesh_table_free_rcu(struct rcu_head *rcu)
599 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
601 mesh_table_free(tbl, false);
604 void mesh_mpath_table_grow(void)
606 struct mesh_table *oldtbl, *newtbl;
608 write_lock_bh(&pathtbl_resize_lock);
609 oldtbl = resize_dereference_mesh_paths();
610 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
611 if (!newtbl)
612 goto out;
613 if (mesh_table_grow(oldtbl, newtbl) < 0) {
614 __mesh_table_free(newtbl);
615 goto out;
617 rcu_assign_pointer(mesh_paths, newtbl);
619 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
621 out:
622 write_unlock_bh(&pathtbl_resize_lock);
625 void mesh_mpp_table_grow(void)
627 struct mesh_table *oldtbl, *newtbl;
629 write_lock_bh(&pathtbl_resize_lock);
630 oldtbl = resize_dereference_mpp_paths();
631 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
632 if (!newtbl)
633 goto out;
634 if (mesh_table_grow(oldtbl, newtbl) < 0) {
635 __mesh_table_free(newtbl);
636 goto out;
638 rcu_assign_pointer(mpp_paths, newtbl);
639 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
641 out:
642 write_unlock_bh(&pathtbl_resize_lock);
645 int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
647 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
648 struct ieee80211_local *local = sdata->local;
649 struct mesh_table *tbl;
650 struct mesh_path *mpath, *new_mpath;
651 struct mpath_node *node, *new_node;
652 struct hlist_head *bucket;
653 struct hlist_node *n;
654 int grow = 0;
655 int err = 0;
656 u32 hash_idx;
658 if (memcmp(dst, sdata->vif.addr, ETH_ALEN) == 0)
659 /* never add ourselves as neighbours */
660 return -ENOTSUPP;
662 if (is_multicast_ether_addr(dst))
663 return -ENOTSUPP;
665 err = -ENOMEM;
666 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
667 if (!new_mpath)
668 goto err_path_alloc;
670 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
671 if (!new_node)
672 goto err_node_alloc;
674 read_lock_bh(&pathtbl_resize_lock);
675 memcpy(new_mpath->dst, dst, ETH_ALEN);
676 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
677 new_mpath->sdata = sdata;
678 new_mpath->flags = 0;
679 skb_queue_head_init(&new_mpath->frame_queue);
680 new_node->mpath = new_mpath;
681 init_timer(&new_mpath->timer);
682 new_mpath->exp_time = jiffies;
683 spin_lock_init(&new_mpath->state_lock);
685 tbl = resize_dereference_mpp_paths();
687 hash_idx = mesh_table_hash(dst, sdata, tbl);
688 bucket = &tbl->hash_buckets[hash_idx];
690 spin_lock_bh(&tbl->hashwlock[hash_idx]);
692 err = -EEXIST;
693 hlist_for_each_entry(node, n, bucket, list) {
694 mpath = node->mpath;
695 if (mpath->sdata == sdata && memcmp(dst, mpath->dst, ETH_ALEN) == 0)
696 goto err_exists;
699 hlist_add_head_rcu(&new_node->list, bucket);
700 if (atomic_inc_return(&tbl->entries) >=
701 tbl->mean_chain_len * (tbl->hash_mask + 1))
702 grow = 1;
704 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
705 read_unlock_bh(&pathtbl_resize_lock);
706 if (grow) {
707 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
708 ieee80211_queue_work(&local->hw, &sdata->work);
710 return 0;
712 err_exists:
713 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
714 read_unlock_bh(&pathtbl_resize_lock);
715 kfree(new_node);
716 err_node_alloc:
717 kfree(new_mpath);
718 err_path_alloc:
719 return err;
724 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
726 * @sta: broken peer link
728 * This function must be called from the rate control algorithm if enough
729 * delivery errors suggest that a peer link is no longer usable.
731 void mesh_plink_broken(struct sta_info *sta)
733 struct mesh_table *tbl;
734 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
735 struct mesh_path *mpath;
736 struct mpath_node *node;
737 struct hlist_node *p;
738 struct ieee80211_sub_if_data *sdata = sta->sdata;
739 int i;
740 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
742 rcu_read_lock();
743 tbl = rcu_dereference(mesh_paths);
744 for_each_mesh_entry(tbl, p, node, i) {
745 mpath = node->mpath;
746 if (rcu_dereference(mpath->next_hop) == sta &&
747 mpath->flags & MESH_PATH_ACTIVE &&
748 !(mpath->flags & MESH_PATH_FIXED)) {
749 spin_lock_bh(&mpath->state_lock);
750 mpath->flags &= ~MESH_PATH_ACTIVE;
751 ++mpath->sn;
752 spin_unlock_bh(&mpath->state_lock);
753 mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
754 mpath->dst, cpu_to_le32(mpath->sn),
755 reason, bcast, sdata);
758 rcu_read_unlock();
761 static void mesh_path_node_reclaim(struct rcu_head *rp)
763 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
764 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
766 del_timer_sync(&node->mpath->timer);
767 atomic_dec(&sdata->u.mesh.mpaths);
768 kfree(node->mpath);
769 kfree(node);
772 /* needs to be called with the corresponding hashwlock taken */
773 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
775 struct mesh_path *mpath;
776 mpath = node->mpath;
777 spin_lock(&mpath->state_lock);
778 mpath->flags |= MESH_PATH_RESOLVING;
779 if (mpath->is_gate)
780 mesh_gate_del(tbl, mpath);
781 hlist_del_rcu(&node->list);
782 call_rcu(&node->rcu, mesh_path_node_reclaim);
783 spin_unlock(&mpath->state_lock);
784 atomic_dec(&tbl->entries);
788 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
790 * @sta - mesh peer to match
792 * RCU notes: this function is called when a mesh plink transitions from
793 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
794 * allows path creation. This will happen before the sta can be freed (because
795 * sta_info_destroy() calls this) so any reader in a rcu read block will be
796 * protected against the plink disappearing.
798 void mesh_path_flush_by_nexthop(struct sta_info *sta)
800 struct mesh_table *tbl;
801 struct mesh_path *mpath;
802 struct mpath_node *node;
803 struct hlist_node *p;
804 int i;
806 rcu_read_lock();
807 read_lock_bh(&pathtbl_resize_lock);
808 tbl = resize_dereference_mesh_paths();
809 for_each_mesh_entry(tbl, p, node, i) {
810 mpath = node->mpath;
811 if (rcu_dereference(mpath->next_hop) == sta) {
812 spin_lock_bh(&tbl->hashwlock[i]);
813 __mesh_path_del(tbl, node);
814 spin_unlock_bh(&tbl->hashwlock[i]);
817 read_unlock_bh(&pathtbl_resize_lock);
818 rcu_read_unlock();
821 static void table_flush_by_iface(struct mesh_table *tbl,
822 struct ieee80211_sub_if_data *sdata)
824 struct mesh_path *mpath;
825 struct mpath_node *node;
826 struct hlist_node *p;
827 int i;
829 WARN_ON(!rcu_read_lock_held());
830 for_each_mesh_entry(tbl, p, node, i) {
831 mpath = node->mpath;
832 if (mpath->sdata != sdata)
833 continue;
834 spin_lock_bh(&tbl->hashwlock[i]);
835 __mesh_path_del(tbl, node);
836 spin_unlock_bh(&tbl->hashwlock[i]);
841 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
843 * This function deletes both mesh paths as well as mesh portal paths.
845 * @sdata - interface data to match
848 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
850 struct mesh_table *tbl;
852 rcu_read_lock();
853 read_lock_bh(&pathtbl_resize_lock);
854 tbl = resize_dereference_mesh_paths();
855 table_flush_by_iface(tbl, sdata);
856 tbl = resize_dereference_mpp_paths();
857 table_flush_by_iface(tbl, sdata);
858 read_unlock_bh(&pathtbl_resize_lock);
859 rcu_read_unlock();
863 * mesh_path_del - delete a mesh path from the table
865 * @addr: dst address (ETH_ALEN length)
866 * @sdata: local subif
868 * Returns: 0 if successful
870 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
872 struct mesh_table *tbl;
873 struct mesh_path *mpath;
874 struct mpath_node *node;
875 struct hlist_head *bucket;
876 struct hlist_node *n;
877 int hash_idx;
878 int err = 0;
880 read_lock_bh(&pathtbl_resize_lock);
881 tbl = resize_dereference_mesh_paths();
882 hash_idx = mesh_table_hash(addr, sdata, tbl);
883 bucket = &tbl->hash_buckets[hash_idx];
885 spin_lock_bh(&tbl->hashwlock[hash_idx]);
886 hlist_for_each_entry(node, n, bucket, list) {
887 mpath = node->mpath;
888 if (mpath->sdata == sdata &&
889 memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
890 __mesh_path_del(tbl, node);
891 goto enddel;
895 err = -ENXIO;
896 enddel:
897 mesh_paths_generation++;
898 spin_unlock_bh(&tbl->hashwlock[hash_idx]);
899 read_unlock_bh(&pathtbl_resize_lock);
900 return err;
904 * mesh_path_tx_pending - sends pending frames in a mesh path queue
906 * @mpath: mesh path to activate
908 * Locking: the state_lock of the mpath structure must NOT be held when calling
909 * this function.
911 void mesh_path_tx_pending(struct mesh_path *mpath)
913 if (mpath->flags & MESH_PATH_ACTIVE)
914 ieee80211_add_pending_skbs(mpath->sdata->local,
915 &mpath->frame_queue);
919 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
921 * @mpath: mesh path whose queue will be emptied
923 * If there is only one gate, the frames are transferred from the failed mpath
924 * queue to that gate's queue. If there are more than one gates, the frames
925 * are copied from each gate to the next. After frames are copied, the
926 * mpath queues are emptied onto the transmission queue.
928 int mesh_path_send_to_gates(struct mesh_path *mpath)
930 struct ieee80211_sub_if_data *sdata = mpath->sdata;
931 struct hlist_node *n;
932 struct mesh_table *tbl;
933 struct mesh_path *from_mpath = mpath;
934 struct mpath_node *gate = NULL;
935 bool copy = false;
936 struct hlist_head *known_gates;
938 rcu_read_lock();
939 tbl = rcu_dereference(mesh_paths);
940 known_gates = tbl->known_gates;
941 rcu_read_unlock();
943 if (!known_gates)
944 return -EHOSTUNREACH;
946 hlist_for_each_entry_rcu(gate, n, known_gates, list) {
947 if (gate->mpath->sdata != sdata)
948 continue;
950 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
951 mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
952 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
953 from_mpath = gate->mpath;
954 copy = true;
955 } else {
956 mpath_dbg("Not forwarding %p\n", gate->mpath);
957 mpath_dbg("flags %x\n", gate->mpath->flags);
961 hlist_for_each_entry_rcu(gate, n, known_gates, list)
962 if (gate->mpath->sdata == sdata) {
963 mpath_dbg("Sending to %pM\n", gate->mpath->dst);
964 mesh_path_tx_pending(gate->mpath);
967 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
971 * mesh_path_discard_frame - discard a frame whose path could not be resolved
973 * @skb: frame to discard
974 * @sdata: network subif the frame was to be sent through
976 * Locking: the function must me called within a rcu_read_lock region
978 void mesh_path_discard_frame(struct sk_buff *skb,
979 struct ieee80211_sub_if_data *sdata)
981 kfree_skb(skb);
982 sdata->u.mesh.mshstats.dropped_frames_no_route++;
986 * mesh_path_flush_pending - free the pending queue of a mesh path
988 * @mpath: mesh path whose queue has to be freed
990 * Locking: the function must me called within a rcu_read_lock region
992 void mesh_path_flush_pending(struct mesh_path *mpath)
994 struct sk_buff *skb;
996 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
997 mesh_path_discard_frame(skb, mpath->sdata);
1001 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
1003 * @mpath: the mesh path to modify
1004 * @next_hop: the next hop to force
1006 * Locking: this function must be called holding mpath->state_lock
1008 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1010 spin_lock_bh(&mpath->state_lock);
1011 mesh_path_assign_nexthop(mpath, next_hop);
1012 mpath->sn = 0xffff;
1013 mpath->metric = 0;
1014 mpath->hop_count = 0;
1015 mpath->exp_time = 0;
1016 mpath->flags |= MESH_PATH_FIXED;
1017 mesh_path_activate(mpath);
1018 spin_unlock_bh(&mpath->state_lock);
1019 mesh_path_tx_pending(mpath);
1022 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1024 struct mesh_path *mpath;
1025 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1026 mpath = node->mpath;
1027 hlist_del_rcu(p);
1028 if (free_leafs) {
1029 del_timer_sync(&mpath->timer);
1030 kfree(mpath);
1032 kfree(node);
1035 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1037 struct mesh_path *mpath;
1038 struct mpath_node *node, *new_node;
1039 u32 hash_idx;
1041 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1042 if (new_node == NULL)
1043 return -ENOMEM;
1045 node = hlist_entry(p, struct mpath_node, list);
1046 mpath = node->mpath;
1047 new_node->mpath = mpath;
1048 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1049 hlist_add_head(&new_node->list,
1050 &newtbl->hash_buckets[hash_idx]);
1051 return 0;
1054 int mesh_pathtbl_init(void)
1056 struct mesh_table *tbl_path, *tbl_mpp;
1057 int ret;
1059 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1060 if (!tbl_path)
1061 return -ENOMEM;
1062 tbl_path->free_node = &mesh_path_node_free;
1063 tbl_path->copy_node = &mesh_path_node_copy;
1064 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1065 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1066 if (!tbl_path->known_gates) {
1067 ret = -ENOMEM;
1068 goto free_path;
1070 INIT_HLIST_HEAD(tbl_path->known_gates);
1073 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1074 if (!tbl_mpp) {
1075 ret = -ENOMEM;
1076 goto free_path;
1078 tbl_mpp->free_node = &mesh_path_node_free;
1079 tbl_mpp->copy_node = &mesh_path_node_copy;
1080 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1081 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1082 if (!tbl_mpp->known_gates) {
1083 ret = -ENOMEM;
1084 goto free_mpp;
1086 INIT_HLIST_HEAD(tbl_mpp->known_gates);
1088 /* Need no locking since this is during init */
1089 RCU_INIT_POINTER(mesh_paths, tbl_path);
1090 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1092 return 0;
1094 free_mpp:
1095 mesh_table_free(tbl_mpp, true);
1096 free_path:
1097 mesh_table_free(tbl_path, true);
1098 return ret;
1101 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1103 struct mesh_table *tbl;
1104 struct mesh_path *mpath;
1105 struct mpath_node *node;
1106 struct hlist_node *p;
1107 int i;
1109 rcu_read_lock();
1110 tbl = rcu_dereference(mesh_paths);
1111 for_each_mesh_entry(tbl, p, node, i) {
1112 if (node->mpath->sdata != sdata)
1113 continue;
1114 mpath = node->mpath;
1115 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1116 (!(mpath->flags & MESH_PATH_FIXED)) &&
1117 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1118 mesh_path_del(mpath->dst, mpath->sdata);
1120 rcu_read_unlock();
1123 void mesh_pathtbl_unregister(void)
1125 /* no need for locking during exit path */
1126 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1127 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);