mtd: spi-nor: make lock/unlock bounds checks more obvious and robust
[linux/fpc-iii.git] / net / batman-adv / originator.c
blobfe578f75c39137c451fcc307e729046bd1d1c3c0
1 /* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
3 * Marek Lindner, Simon Wunderlich
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "originator.h"
19 #include "main.h"
21 #include <linux/errno.h>
22 #include <linux/etherdevice.h>
23 #include <linux/fs.h>
24 #include <linux/jiffies.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/lockdep.h>
28 #include <linux/netdevice.h>
29 #include <linux/rculist.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/workqueue.h>
35 #include "distributed-arp-table.h"
36 #include "fragmentation.h"
37 #include "gateway_client.h"
38 #include "hard-interface.h"
39 #include "hash.h"
40 #include "multicast.h"
41 #include "network-coding.h"
42 #include "routing.h"
43 #include "translation-table.h"
45 /* hash class keys */
46 static struct lock_class_key batadv_orig_hash_lock_class_key;
48 static void batadv_purge_orig(struct work_struct *work);
50 /* returns 1 if they are the same originator */
51 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
53 const void *data1 = container_of(node, struct batadv_orig_node,
54 hash_entry);
56 return batadv_compare_eth(data1, data2);
59 /**
60 * batadv_orig_node_vlan_get - get an orig_node_vlan object
61 * @orig_node: the originator serving the VLAN
62 * @vid: the VLAN identifier
64 * Returns the vlan object identified by vid and belonging to orig_node or NULL
65 * if it does not exist.
67 struct batadv_orig_node_vlan *
68 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
69 unsigned short vid)
71 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
73 rcu_read_lock();
74 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
75 if (tmp->vid != vid)
76 continue;
78 if (!atomic_inc_not_zero(&tmp->refcount))
79 continue;
81 vlan = tmp;
83 break;
85 rcu_read_unlock();
87 return vlan;
90 /**
91 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
92 * object
93 * @orig_node: the originator serving the VLAN
94 * @vid: the VLAN identifier
96 * Returns NULL in case of failure or the vlan object identified by vid and
97 * belonging to orig_node otherwise. The object is created and added to the list
98 * if it does not exist.
100 * The object is returned with refcounter increased by 1.
102 struct batadv_orig_node_vlan *
103 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
104 unsigned short vid)
106 struct batadv_orig_node_vlan *vlan;
108 spin_lock_bh(&orig_node->vlan_list_lock);
110 /* first look if an object for this vid already exists */
111 vlan = batadv_orig_node_vlan_get(orig_node, vid);
112 if (vlan)
113 goto out;
115 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
116 if (!vlan)
117 goto out;
119 atomic_set(&vlan->refcount, 2);
120 vlan->vid = vid;
122 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
124 out:
125 spin_unlock_bh(&orig_node->vlan_list_lock);
127 return vlan;
131 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
132 * the originator-vlan object
133 * @orig_vlan: the originator-vlan object to release
135 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
137 if (atomic_dec_and_test(&orig_vlan->refcount))
138 kfree_rcu(orig_vlan, rcu);
141 int batadv_originator_init(struct batadv_priv *bat_priv)
143 if (bat_priv->orig_hash)
144 return 0;
146 bat_priv->orig_hash = batadv_hash_new(1024);
148 if (!bat_priv->orig_hash)
149 goto err;
151 batadv_hash_set_lock_class(bat_priv->orig_hash,
152 &batadv_orig_hash_lock_class_key);
154 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
155 queue_delayed_work(batadv_event_workqueue,
156 &bat_priv->orig_work,
157 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
159 return 0;
161 err:
162 return -ENOMEM;
166 * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
167 * free after rcu grace period
168 * @neigh_ifinfo: the neigh_ifinfo object to release
170 static void
171 batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
173 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
174 batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
176 kfree_rcu(neigh_ifinfo, rcu);
180 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
181 * the neigh_ifinfo
182 * @neigh_ifinfo: the neigh_ifinfo object to release
184 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
186 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
187 batadv_neigh_ifinfo_release(neigh_ifinfo);
191 * batadv_hardif_neigh_release - release hardif neigh node from lists and
192 * queue for free after rcu grace period
193 * @hardif_neigh: hardif neigh neighbor to free
195 static void
196 batadv_hardif_neigh_release(struct batadv_hardif_neigh_node *hardif_neigh)
198 spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
199 hlist_del_init_rcu(&hardif_neigh->list);
200 spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
202 batadv_hardif_free_ref(hardif_neigh->if_incoming);
203 kfree_rcu(hardif_neigh, rcu);
207 * batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter
208 * and possibly release it
209 * @hardif_neigh: hardif neigh neighbor to free
211 void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh)
213 if (atomic_dec_and_test(&hardif_neigh->refcount))
214 batadv_hardif_neigh_release(hardif_neigh);
218 * batadv_neigh_node_release - release neigh_node from lists and queue for
219 * free after rcu grace period
220 * @neigh_node: neigh neighbor to free
222 static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
224 struct hlist_node *node_tmp;
225 struct batadv_hardif_neigh_node *hardif_neigh;
226 struct batadv_neigh_ifinfo *neigh_ifinfo;
227 struct batadv_algo_ops *bao;
229 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
231 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
232 &neigh_node->ifinfo_list, list) {
233 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
236 hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
237 neigh_node->addr);
238 if (hardif_neigh) {
239 /* batadv_hardif_neigh_get() increases refcount too */
240 batadv_hardif_neigh_free_ref(hardif_neigh);
241 batadv_hardif_neigh_free_ref(hardif_neigh);
244 if (bao->bat_neigh_free)
245 bao->bat_neigh_free(neigh_node);
247 batadv_hardif_free_ref(neigh_node->if_incoming);
249 kfree_rcu(neigh_node, rcu);
253 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
254 * and possibly release it
255 * @neigh_node: neigh neighbor to free
257 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
259 if (atomic_dec_and_test(&neigh_node->refcount))
260 batadv_neigh_node_release(neigh_node);
264 * batadv_orig_node_get_router - router to the originator depending on iface
265 * @orig_node: the orig node for the router
266 * @if_outgoing: the interface where the payload packet has been received or
267 * the OGM should be sent to
269 * Returns the neighbor which should be router for this orig_node/iface.
271 * The object is returned with refcounter increased by 1.
273 struct batadv_neigh_node *
274 batadv_orig_router_get(struct batadv_orig_node *orig_node,
275 const struct batadv_hard_iface *if_outgoing)
277 struct batadv_orig_ifinfo *orig_ifinfo;
278 struct batadv_neigh_node *router = NULL;
280 rcu_read_lock();
281 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
282 if (orig_ifinfo->if_outgoing != if_outgoing)
283 continue;
285 router = rcu_dereference(orig_ifinfo->router);
286 break;
289 if (router && !atomic_inc_not_zero(&router->refcount))
290 router = NULL;
292 rcu_read_unlock();
293 return router;
297 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
298 * @orig_node: the orig node to be queried
299 * @if_outgoing: the interface for which the ifinfo should be acquired
301 * Returns the requested orig_ifinfo or NULL if not found.
303 * The object is returned with refcounter increased by 1.
305 struct batadv_orig_ifinfo *
306 batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
307 struct batadv_hard_iface *if_outgoing)
309 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
311 rcu_read_lock();
312 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
313 list) {
314 if (tmp->if_outgoing != if_outgoing)
315 continue;
317 if (!atomic_inc_not_zero(&tmp->refcount))
318 continue;
320 orig_ifinfo = tmp;
321 break;
323 rcu_read_unlock();
325 return orig_ifinfo;
329 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
330 * @orig_node: the orig node to be queried
331 * @if_outgoing: the interface for which the ifinfo should be acquired
333 * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
334 * interface otherwise. The object is created and added to the list
335 * if it does not exist.
337 * The object is returned with refcounter increased by 1.
339 struct batadv_orig_ifinfo *
340 batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
341 struct batadv_hard_iface *if_outgoing)
343 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
344 unsigned long reset_time;
346 spin_lock_bh(&orig_node->neigh_list_lock);
348 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
349 if (orig_ifinfo)
350 goto out;
352 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
353 if (!orig_ifinfo)
354 goto out;
356 if (if_outgoing != BATADV_IF_DEFAULT &&
357 !atomic_inc_not_zero(&if_outgoing->refcount)) {
358 kfree(orig_ifinfo);
359 orig_ifinfo = NULL;
360 goto out;
363 reset_time = jiffies - 1;
364 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
365 orig_ifinfo->batman_seqno_reset = reset_time;
366 orig_ifinfo->if_outgoing = if_outgoing;
367 INIT_HLIST_NODE(&orig_ifinfo->list);
368 atomic_set(&orig_ifinfo->refcount, 2);
369 hlist_add_head_rcu(&orig_ifinfo->list,
370 &orig_node->ifinfo_list);
371 out:
372 spin_unlock_bh(&orig_node->neigh_list_lock);
373 return orig_ifinfo;
377 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
378 * @neigh_node: the neigh node to be queried
379 * @if_outgoing: the interface for which the ifinfo should be acquired
381 * The object is returned with refcounter increased by 1.
383 * Returns the requested neigh_ifinfo or NULL if not found
385 struct batadv_neigh_ifinfo *
386 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
387 struct batadv_hard_iface *if_outgoing)
389 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
390 *tmp_neigh_ifinfo;
392 rcu_read_lock();
393 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
394 list) {
395 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
396 continue;
398 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
399 continue;
401 neigh_ifinfo = tmp_neigh_ifinfo;
402 break;
404 rcu_read_unlock();
406 return neigh_ifinfo;
410 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
411 * @neigh_node: the neigh node to be queried
412 * @if_outgoing: the interface for which the ifinfo should be acquired
414 * Returns NULL in case of failure or the neigh_ifinfo object for the
415 * if_outgoing interface otherwise. The object is created and added to the list
416 * if it does not exist.
418 * The object is returned with refcounter increased by 1.
420 struct batadv_neigh_ifinfo *
421 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
422 struct batadv_hard_iface *if_outgoing)
424 struct batadv_neigh_ifinfo *neigh_ifinfo;
426 spin_lock_bh(&neigh->ifinfo_lock);
428 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
429 if (neigh_ifinfo)
430 goto out;
432 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
433 if (!neigh_ifinfo)
434 goto out;
436 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
437 kfree(neigh_ifinfo);
438 neigh_ifinfo = NULL;
439 goto out;
442 INIT_HLIST_NODE(&neigh_ifinfo->list);
443 atomic_set(&neigh_ifinfo->refcount, 2);
444 neigh_ifinfo->if_outgoing = if_outgoing;
446 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
448 out:
449 spin_unlock_bh(&neigh->ifinfo_lock);
451 return neigh_ifinfo;
455 * batadv_neigh_node_get - retrieve a neighbour from the list
456 * @orig_node: originator which the neighbour belongs to
457 * @hard_iface: the interface where this neighbour is connected to
458 * @addr: the address of the neighbour
460 * Looks for and possibly returns a neighbour belonging to this originator list
461 * which is connected through the provided hard interface.
462 * Returns NULL if the neighbour is not found.
464 static struct batadv_neigh_node *
465 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
466 const struct batadv_hard_iface *hard_iface,
467 const u8 *addr)
469 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
471 rcu_read_lock();
472 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
473 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
474 continue;
476 if (tmp_neigh_node->if_incoming != hard_iface)
477 continue;
479 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
480 continue;
482 res = tmp_neigh_node;
483 break;
485 rcu_read_unlock();
487 return res;
491 * batadv_hardif_neigh_create - create a hardif neighbour node
492 * @hard_iface: the interface this neighbour is connected to
493 * @neigh_addr: the interface address of the neighbour to retrieve
495 * Returns the hardif neighbour node if found or created or NULL otherwise.
497 static struct batadv_hardif_neigh_node *
498 batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
499 const u8 *neigh_addr)
501 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
502 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
504 spin_lock_bh(&hard_iface->neigh_list_lock);
506 /* check if neighbor hasn't been added in the meantime */
507 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
508 if (hardif_neigh)
509 goto out;
511 if (!atomic_inc_not_zero(&hard_iface->refcount))
512 goto out;
514 hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
515 if (!hardif_neigh) {
516 batadv_hardif_free_ref(hard_iface);
517 goto out;
520 INIT_HLIST_NODE(&hardif_neigh->list);
521 ether_addr_copy(hardif_neigh->addr, neigh_addr);
522 hardif_neigh->if_incoming = hard_iface;
523 hardif_neigh->last_seen = jiffies;
525 atomic_set(&hardif_neigh->refcount, 1);
527 if (bat_priv->bat_algo_ops->bat_hardif_neigh_init)
528 bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh);
530 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
532 out:
533 spin_unlock_bh(&hard_iface->neigh_list_lock);
534 return hardif_neigh;
538 * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
539 * node
540 * @hard_iface: the interface this neighbour is connected to
541 * @neigh_addr: the interface address of the neighbour to retrieve
543 * Returns the hardif neighbour node if found or created or NULL otherwise.
545 static struct batadv_hardif_neigh_node *
546 batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
547 const u8 *neigh_addr)
549 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
551 /* first check without locking to avoid the overhead */
552 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
553 if (hardif_neigh)
554 return hardif_neigh;
556 return batadv_hardif_neigh_create(hard_iface, neigh_addr);
560 * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
561 * @hard_iface: the interface where this neighbour is connected to
562 * @neigh_addr: the address of the neighbour
564 * Looks for and possibly returns a neighbour belonging to this hard interface.
565 * Returns NULL if the neighbour is not found.
567 struct batadv_hardif_neigh_node *
568 batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
569 const u8 *neigh_addr)
571 struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
573 rcu_read_lock();
574 hlist_for_each_entry_rcu(tmp_hardif_neigh,
575 &hard_iface->neigh_list, list) {
576 if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
577 continue;
579 if (!atomic_inc_not_zero(&tmp_hardif_neigh->refcount))
580 continue;
582 hardif_neigh = tmp_hardif_neigh;
583 break;
585 rcu_read_unlock();
587 return hardif_neigh;
591 * batadv_neigh_node_new - create and init a new neigh_node object
592 * @orig_node: originator object representing the neighbour
593 * @hard_iface: the interface where the neighbour is connected to
594 * @neigh_addr: the mac address of the neighbour interface
596 * Allocates a new neigh_node object and initialises all the generic fields.
597 * Returns the new object or NULL on failure.
599 struct batadv_neigh_node *
600 batadv_neigh_node_new(struct batadv_orig_node *orig_node,
601 struct batadv_hard_iface *hard_iface,
602 const u8 *neigh_addr)
604 struct batadv_neigh_node *neigh_node;
605 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
607 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
608 if (neigh_node)
609 goto out;
611 hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
612 neigh_addr);
613 if (!hardif_neigh)
614 goto out;
616 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
617 if (!neigh_node)
618 goto out;
620 if (!atomic_inc_not_zero(&hard_iface->refcount)) {
621 kfree(neigh_node);
622 neigh_node = NULL;
623 goto out;
626 INIT_HLIST_NODE(&neigh_node->list);
627 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
628 spin_lock_init(&neigh_node->ifinfo_lock);
630 ether_addr_copy(neigh_node->addr, neigh_addr);
631 neigh_node->if_incoming = hard_iface;
632 neigh_node->orig_node = orig_node;
634 /* extra reference for return */
635 atomic_set(&neigh_node->refcount, 2);
637 spin_lock_bh(&orig_node->neigh_list_lock);
638 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
639 spin_unlock_bh(&orig_node->neigh_list_lock);
641 /* increment unique neighbor refcount */
642 atomic_inc(&hardif_neigh->refcount);
644 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
645 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
646 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
648 out:
649 if (hardif_neigh)
650 batadv_hardif_neigh_free_ref(hardif_neigh);
651 return neigh_node;
655 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
656 * @seq: neighbour table seq_file struct
657 * @offset: not used
659 * Always returns 0.
661 int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
663 struct net_device *net_dev = (struct net_device *)seq->private;
664 struct batadv_priv *bat_priv = netdev_priv(net_dev);
665 struct batadv_hard_iface *primary_if;
667 primary_if = batadv_seq_print_text_primary_if_get(seq);
668 if (!primary_if)
669 return 0;
671 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
672 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
673 primary_if->net_dev->dev_addr, net_dev->name,
674 bat_priv->bat_algo_ops->name);
676 batadv_hardif_free_ref(primary_if);
678 if (!bat_priv->bat_algo_ops->bat_neigh_print) {
679 seq_puts(seq,
680 "No printing function for this routing protocol\n");
681 return 0;
684 bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq);
685 return 0;
689 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
690 * free after rcu grace period
691 * @orig_ifinfo: the orig_ifinfo object to release
693 static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
695 struct batadv_neigh_node *router;
697 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
698 batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
700 /* this is the last reference to this object */
701 router = rcu_dereference_protected(orig_ifinfo->router, true);
702 if (router)
703 batadv_neigh_node_free_ref(router);
705 kfree_rcu(orig_ifinfo, rcu);
709 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
710 * the orig_ifinfo
711 * @orig_ifinfo: the orig_ifinfo object to release
713 void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
715 if (atomic_dec_and_test(&orig_ifinfo->refcount))
716 batadv_orig_ifinfo_release(orig_ifinfo);
720 * batadv_orig_node_free_rcu - free the orig_node
721 * @rcu: rcu pointer of the orig_node
723 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
725 struct batadv_orig_node *orig_node;
727 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
729 batadv_mcast_purge_orig(orig_node);
731 batadv_frag_purge_orig(orig_node, NULL);
733 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
734 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
736 kfree(orig_node->tt_buff);
737 kfree(orig_node);
741 * batadv_orig_node_release - release orig_node from lists and queue for
742 * free after rcu grace period
743 * @orig_node: the orig node to free
745 static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
747 struct hlist_node *node_tmp;
748 struct batadv_neigh_node *neigh_node;
749 struct batadv_orig_ifinfo *orig_ifinfo;
751 spin_lock_bh(&orig_node->neigh_list_lock);
753 /* for all neighbors towards this originator ... */
754 hlist_for_each_entry_safe(neigh_node, node_tmp,
755 &orig_node->neigh_list, list) {
756 hlist_del_rcu(&neigh_node->list);
757 batadv_neigh_node_free_ref(neigh_node);
760 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
761 &orig_node->ifinfo_list, list) {
762 hlist_del_rcu(&orig_ifinfo->list);
763 batadv_orig_ifinfo_free_ref(orig_ifinfo);
765 spin_unlock_bh(&orig_node->neigh_list_lock);
767 /* Free nc_nodes */
768 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
770 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
774 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
775 * release it
776 * @orig_node: the orig node to free
778 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
780 if (atomic_dec_and_test(&orig_node->refcount))
781 batadv_orig_node_release(orig_node);
784 void batadv_originator_free(struct batadv_priv *bat_priv)
786 struct batadv_hashtable *hash = bat_priv->orig_hash;
787 struct hlist_node *node_tmp;
788 struct hlist_head *head;
789 spinlock_t *list_lock; /* spinlock to protect write access */
790 struct batadv_orig_node *orig_node;
791 u32 i;
793 if (!hash)
794 return;
796 cancel_delayed_work_sync(&bat_priv->orig_work);
798 bat_priv->orig_hash = NULL;
800 for (i = 0; i < hash->size; i++) {
801 head = &hash->table[i];
802 list_lock = &hash->list_locks[i];
804 spin_lock_bh(list_lock);
805 hlist_for_each_entry_safe(orig_node, node_tmp,
806 head, hash_entry) {
807 hlist_del_rcu(&orig_node->hash_entry);
808 batadv_orig_node_free_ref(orig_node);
810 spin_unlock_bh(list_lock);
813 batadv_hash_destroy(hash);
817 * batadv_orig_node_new - creates a new orig_node
818 * @bat_priv: the bat priv with all the soft interface information
819 * @addr: the mac address of the originator
821 * Creates a new originator object and initialise all the generic fields.
822 * The new object is not added to the originator list.
823 * Returns the newly created object or NULL on failure.
825 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
826 const u8 *addr)
828 struct batadv_orig_node *orig_node;
829 struct batadv_orig_node_vlan *vlan;
830 unsigned long reset_time;
831 int i;
833 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
834 "Creating new originator: %pM\n", addr);
836 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
837 if (!orig_node)
838 return NULL;
840 INIT_HLIST_HEAD(&orig_node->neigh_list);
841 INIT_HLIST_HEAD(&orig_node->vlan_list);
842 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
843 spin_lock_init(&orig_node->bcast_seqno_lock);
844 spin_lock_init(&orig_node->neigh_list_lock);
845 spin_lock_init(&orig_node->tt_buff_lock);
846 spin_lock_init(&orig_node->tt_lock);
847 spin_lock_init(&orig_node->vlan_list_lock);
849 batadv_nc_init_orig(orig_node);
851 /* extra reference for return */
852 atomic_set(&orig_node->refcount, 2);
854 orig_node->bat_priv = bat_priv;
855 ether_addr_copy(orig_node->orig, addr);
856 batadv_dat_init_orig_node_addr(orig_node);
857 atomic_set(&orig_node->last_ttvn, 0);
858 orig_node->tt_buff = NULL;
859 orig_node->tt_buff_len = 0;
860 orig_node->last_seen = jiffies;
861 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
862 orig_node->bcast_seqno_reset = reset_time;
864 #ifdef CONFIG_BATMAN_ADV_MCAST
865 orig_node->mcast_flags = BATADV_NO_FLAGS;
866 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
867 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
868 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
869 spin_lock_init(&orig_node->mcast_handler_lock);
870 #endif
872 /* create a vlan object for the "untagged" LAN */
873 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
874 if (!vlan)
875 goto free_orig_node;
876 /* batadv_orig_node_vlan_new() increases the refcounter.
877 * Immediately release vlan since it is not needed anymore in this
878 * context
880 batadv_orig_node_vlan_free_ref(vlan);
882 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
883 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
884 spin_lock_init(&orig_node->fragments[i].lock);
885 orig_node->fragments[i].size = 0;
888 return orig_node;
889 free_orig_node:
890 kfree(orig_node);
891 return NULL;
895 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
896 * @bat_priv: the bat priv with all the soft interface information
897 * @neigh: orig node which is to be checked
899 static void
900 batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
901 struct batadv_neigh_node *neigh)
903 struct batadv_neigh_ifinfo *neigh_ifinfo;
904 struct batadv_hard_iface *if_outgoing;
905 struct hlist_node *node_tmp;
907 spin_lock_bh(&neigh->ifinfo_lock);
909 /* for all ifinfo objects for this neighinator */
910 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
911 &neigh->ifinfo_list, list) {
912 if_outgoing = neigh_ifinfo->if_outgoing;
914 /* always keep the default interface */
915 if (if_outgoing == BATADV_IF_DEFAULT)
916 continue;
918 /* don't purge if the interface is not (going) down */
919 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
920 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
921 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
922 continue;
924 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
925 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
926 neigh->addr, if_outgoing->net_dev->name);
928 hlist_del_rcu(&neigh_ifinfo->list);
929 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
932 spin_unlock_bh(&neigh->ifinfo_lock);
936 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
937 * @bat_priv: the bat priv with all the soft interface information
938 * @orig_node: orig node which is to be checked
940 * Returns true if any ifinfo entry was purged, false otherwise.
942 static bool
943 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
944 struct batadv_orig_node *orig_node)
946 struct batadv_orig_ifinfo *orig_ifinfo;
947 struct batadv_hard_iface *if_outgoing;
948 struct hlist_node *node_tmp;
949 bool ifinfo_purged = false;
951 spin_lock_bh(&orig_node->neigh_list_lock);
953 /* for all ifinfo objects for this originator */
954 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
955 &orig_node->ifinfo_list, list) {
956 if_outgoing = orig_ifinfo->if_outgoing;
958 /* always keep the default interface */
959 if (if_outgoing == BATADV_IF_DEFAULT)
960 continue;
962 /* don't purge if the interface is not (going) down */
963 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
964 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
965 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
966 continue;
968 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
969 "router/ifinfo purge: originator %pM, iface: %s\n",
970 orig_node->orig, if_outgoing->net_dev->name);
972 ifinfo_purged = true;
974 hlist_del_rcu(&orig_ifinfo->list);
975 batadv_orig_ifinfo_free_ref(orig_ifinfo);
976 if (orig_node->last_bonding_candidate == orig_ifinfo) {
977 orig_node->last_bonding_candidate = NULL;
978 batadv_orig_ifinfo_free_ref(orig_ifinfo);
982 spin_unlock_bh(&orig_node->neigh_list_lock);
984 return ifinfo_purged;
988 * batadv_purge_orig_neighbors - purges neighbors from originator
989 * @bat_priv: the bat priv with all the soft interface information
990 * @orig_node: orig node which is to be checked
992 * Returns true if any neighbor was purged, false otherwise
994 static bool
995 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
996 struct batadv_orig_node *orig_node)
998 struct hlist_node *node_tmp;
999 struct batadv_neigh_node *neigh_node;
1000 bool neigh_purged = false;
1001 unsigned long last_seen;
1002 struct batadv_hard_iface *if_incoming;
1004 spin_lock_bh(&orig_node->neigh_list_lock);
1006 /* for all neighbors towards this originator ... */
1007 hlist_for_each_entry_safe(neigh_node, node_tmp,
1008 &orig_node->neigh_list, list) {
1009 last_seen = neigh_node->last_seen;
1010 if_incoming = neigh_node->if_incoming;
1012 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
1013 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
1014 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1015 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
1016 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
1017 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1018 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
1019 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1020 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
1021 orig_node->orig, neigh_node->addr,
1022 if_incoming->net_dev->name);
1023 else
1024 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1025 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
1026 orig_node->orig, neigh_node->addr,
1027 jiffies_to_msecs(last_seen));
1029 neigh_purged = true;
1031 hlist_del_rcu(&neigh_node->list);
1032 batadv_neigh_node_free_ref(neigh_node);
1033 } else {
1034 /* only necessary if not the whole neighbor is to be
1035 * deleted, but some interface has been removed.
1037 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
1041 spin_unlock_bh(&orig_node->neigh_list_lock);
1042 return neigh_purged;
1046 * batadv_find_best_neighbor - finds the best neighbor after purging
1047 * @bat_priv: the bat priv with all the soft interface information
1048 * @orig_node: orig node which is to be checked
1049 * @if_outgoing: the interface for which the metric should be compared
1051 * Returns the current best neighbor, with refcount increased.
1053 static struct batadv_neigh_node *
1054 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
1055 struct batadv_orig_node *orig_node,
1056 struct batadv_hard_iface *if_outgoing)
1058 struct batadv_neigh_node *best = NULL, *neigh;
1059 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1061 rcu_read_lock();
1062 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
1063 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
1064 best, if_outgoing) <= 0))
1065 continue;
1067 if (!atomic_inc_not_zero(&neigh->refcount))
1068 continue;
1070 if (best)
1071 batadv_neigh_node_free_ref(best);
1073 best = neigh;
1075 rcu_read_unlock();
1077 return best;
1081 * batadv_purge_orig_node - purges obsolete information from an orig_node
1082 * @bat_priv: the bat priv with all the soft interface information
1083 * @orig_node: orig node which is to be checked
1085 * This function checks if the orig_node or substructures of it have become
1086 * obsolete, and purges this information if that's the case.
1088 * Returns true if the orig_node is to be removed, false otherwise.
1090 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
1091 struct batadv_orig_node *orig_node)
1093 struct batadv_neigh_node *best_neigh_node;
1094 struct batadv_hard_iface *hard_iface;
1095 bool changed_ifinfo, changed_neigh;
1097 if (batadv_has_timed_out(orig_node->last_seen,
1098 2 * BATADV_PURGE_TIMEOUT)) {
1099 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1100 "Originator timeout: originator %pM, last_seen %u\n",
1101 orig_node->orig,
1102 jiffies_to_msecs(orig_node->last_seen));
1103 return true;
1105 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
1106 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
1108 if (!changed_ifinfo && !changed_neigh)
1109 return false;
1111 /* first for NULL ... */
1112 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
1113 BATADV_IF_DEFAULT);
1114 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
1115 best_neigh_node);
1116 if (best_neigh_node)
1117 batadv_neigh_node_free_ref(best_neigh_node);
1119 /* ... then for all other interfaces. */
1120 rcu_read_lock();
1121 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
1122 if (hard_iface->if_status != BATADV_IF_ACTIVE)
1123 continue;
1125 if (hard_iface->soft_iface != bat_priv->soft_iface)
1126 continue;
1128 best_neigh_node = batadv_find_best_neighbor(bat_priv,
1129 orig_node,
1130 hard_iface);
1131 batadv_update_route(bat_priv, orig_node, hard_iface,
1132 best_neigh_node);
1133 if (best_neigh_node)
1134 batadv_neigh_node_free_ref(best_neigh_node);
1136 rcu_read_unlock();
1138 return false;
1141 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
1143 struct batadv_hashtable *hash = bat_priv->orig_hash;
1144 struct hlist_node *node_tmp;
1145 struct hlist_head *head;
1146 spinlock_t *list_lock; /* spinlock to protect write access */
1147 struct batadv_orig_node *orig_node;
1148 u32 i;
1150 if (!hash)
1151 return;
1153 /* for all origins... */
1154 for (i = 0; i < hash->size; i++) {
1155 head = &hash->table[i];
1156 list_lock = &hash->list_locks[i];
1158 spin_lock_bh(list_lock);
1159 hlist_for_each_entry_safe(orig_node, node_tmp,
1160 head, hash_entry) {
1161 if (batadv_purge_orig_node(bat_priv, orig_node)) {
1162 batadv_gw_node_delete(bat_priv, orig_node);
1163 hlist_del_rcu(&orig_node->hash_entry);
1164 batadv_tt_global_del_orig(orig_node->bat_priv,
1165 orig_node, -1,
1166 "originator timed out");
1167 batadv_orig_node_free_ref(orig_node);
1168 continue;
1171 batadv_frag_purge_orig(orig_node,
1172 batadv_frag_check_entry);
1174 spin_unlock_bh(list_lock);
1177 batadv_gw_election(bat_priv);
1180 static void batadv_purge_orig(struct work_struct *work)
1182 struct delayed_work *delayed_work;
1183 struct batadv_priv *bat_priv;
1185 delayed_work = container_of(work, struct delayed_work, work);
1186 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
1187 _batadv_purge_orig(bat_priv);
1188 queue_delayed_work(batadv_event_workqueue,
1189 &bat_priv->orig_work,
1190 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
1193 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
1195 _batadv_purge_orig(bat_priv);
1198 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
1200 struct net_device *net_dev = (struct net_device *)seq->private;
1201 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1202 struct batadv_hard_iface *primary_if;
1204 primary_if = batadv_seq_print_text_primary_if_get(seq);
1205 if (!primary_if)
1206 return 0;
1208 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
1209 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
1210 primary_if->net_dev->dev_addr, net_dev->name,
1211 bat_priv->bat_algo_ops->name);
1213 batadv_hardif_free_ref(primary_if);
1215 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1216 seq_puts(seq,
1217 "No printing function for this routing protocol\n");
1218 return 0;
1221 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1222 BATADV_IF_DEFAULT);
1224 return 0;
1228 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1229 * outgoing interface
1230 * @seq: debugfs table seq_file struct
1231 * @offset: not used
1233 * Returns 0
1235 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1237 struct net_device *net_dev = (struct net_device *)seq->private;
1238 struct batadv_hard_iface *hard_iface;
1239 struct batadv_priv *bat_priv;
1241 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1243 if (!hard_iface || !hard_iface->soft_iface) {
1244 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1245 goto out;
1248 bat_priv = netdev_priv(hard_iface->soft_iface);
1249 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1250 seq_puts(seq,
1251 "No printing function for this routing protocol\n");
1252 goto out;
1255 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1256 seq_puts(seq, "Interface not active\n");
1257 goto out;
1260 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1261 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1262 hard_iface->net_dev->dev_addr,
1263 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1265 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1267 out:
1268 if (hard_iface)
1269 batadv_hardif_free_ref(hard_iface);
1270 return 0;
1273 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1274 int max_if_num)
1276 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1277 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1278 struct batadv_hashtable *hash = bat_priv->orig_hash;
1279 struct hlist_head *head;
1280 struct batadv_orig_node *orig_node;
1281 u32 i;
1282 int ret;
1284 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1285 * if_num
1287 for (i = 0; i < hash->size; i++) {
1288 head = &hash->table[i];
1290 rcu_read_lock();
1291 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1292 ret = 0;
1293 if (bao->bat_orig_add_if)
1294 ret = bao->bat_orig_add_if(orig_node,
1295 max_if_num);
1296 if (ret == -ENOMEM)
1297 goto err;
1299 rcu_read_unlock();
1302 return 0;
1304 err:
1305 rcu_read_unlock();
1306 return -ENOMEM;
1309 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1310 int max_if_num)
1312 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1313 struct batadv_hashtable *hash = bat_priv->orig_hash;
1314 struct hlist_head *head;
1315 struct batadv_hard_iface *hard_iface_tmp;
1316 struct batadv_orig_node *orig_node;
1317 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1318 u32 i;
1319 int ret;
1321 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1322 * if_num
1324 for (i = 0; i < hash->size; i++) {
1325 head = &hash->table[i];
1327 rcu_read_lock();
1328 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1329 ret = 0;
1330 if (bao->bat_orig_del_if)
1331 ret = bao->bat_orig_del_if(orig_node,
1332 max_if_num,
1333 hard_iface->if_num);
1334 if (ret == -ENOMEM)
1335 goto err;
1337 rcu_read_unlock();
1340 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1341 rcu_read_lock();
1342 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
1343 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
1344 continue;
1346 if (hard_iface == hard_iface_tmp)
1347 continue;
1349 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
1350 continue;
1352 if (hard_iface_tmp->if_num > hard_iface->if_num)
1353 hard_iface_tmp->if_num--;
1355 rcu_read_unlock();
1357 hard_iface->if_num = -1;
1358 return 0;
1360 err:
1361 rcu_read_unlock();
1362 return -ENOMEM;