arm64: alternatives: add enable parameter to conditional asm macros
[linux/fpc-iii.git] / net / batman-adv / originator.c
blob018b7495ad844cdf4f97f1590455bc205cdf3d71
1 /* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
3 * Marek Lindner, Simon Wunderlich
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
18 #include "originator.h"
19 #include "main.h"
21 #include <linux/errno.h>
22 #include <linux/etherdevice.h>
23 #include <linux/fs.h>
24 #include <linux/jiffies.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/lockdep.h>
28 #include <linux/netdevice.h>
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/spinlock.h>
32 #include <linux/workqueue.h>
34 #include "distributed-arp-table.h"
35 #include "fragmentation.h"
36 #include "gateway_client.h"
37 #include "hard-interface.h"
38 #include "hash.h"
39 #include "multicast.h"
40 #include "network-coding.h"
41 #include "routing.h"
42 #include "translation-table.h"
44 /* hash class keys */
45 static struct lock_class_key batadv_orig_hash_lock_class_key;
47 static void batadv_purge_orig(struct work_struct *work);
49 /* returns 1 if they are the same originator */
50 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
52 const void *data1 = container_of(node, struct batadv_orig_node,
53 hash_entry);
55 return batadv_compare_eth(data1, data2);
58 /**
59 * batadv_orig_node_vlan_get - get an orig_node_vlan object
60 * @orig_node: the originator serving the VLAN
61 * @vid: the VLAN identifier
63 * Returns the vlan object identified by vid and belonging to orig_node or NULL
64 * if it does not exist.
66 struct batadv_orig_node_vlan *
67 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
68 unsigned short vid)
70 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
72 rcu_read_lock();
73 list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
74 if (tmp->vid != vid)
75 continue;
77 if (!atomic_inc_not_zero(&tmp->refcount))
78 continue;
80 vlan = tmp;
82 break;
84 rcu_read_unlock();
86 return vlan;
89 /**
90 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
91 * object
92 * @orig_node: the originator serving the VLAN
93 * @vid: the VLAN identifier
95 * Returns NULL in case of failure or the vlan object identified by vid and
96 * belonging to orig_node otherwise. The object is created and added to the list
97 * if it does not exist.
99 * The object is returned with refcounter increased by 1.
101 struct batadv_orig_node_vlan *
102 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
103 unsigned short vid)
105 struct batadv_orig_node_vlan *vlan;
107 spin_lock_bh(&orig_node->vlan_list_lock);
109 /* first look if an object for this vid already exists */
110 vlan = batadv_orig_node_vlan_get(orig_node, vid);
111 if (vlan)
112 goto out;
114 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
115 if (!vlan)
116 goto out;
118 atomic_set(&vlan->refcount, 2);
119 vlan->vid = vid;
121 list_add_rcu(&vlan->list, &orig_node->vlan_list);
123 out:
124 spin_unlock_bh(&orig_node->vlan_list_lock);
126 return vlan;
130 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
131 * the originator-vlan object
132 * @orig_vlan: the originator-vlan object to release
134 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
136 if (atomic_dec_and_test(&orig_vlan->refcount))
137 kfree_rcu(orig_vlan, rcu);
140 int batadv_originator_init(struct batadv_priv *bat_priv)
142 if (bat_priv->orig_hash)
143 return 0;
145 bat_priv->orig_hash = batadv_hash_new(1024);
147 if (!bat_priv->orig_hash)
148 goto err;
150 batadv_hash_set_lock_class(bat_priv->orig_hash,
151 &batadv_orig_hash_lock_class_key);
153 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
154 queue_delayed_work(batadv_event_workqueue,
155 &bat_priv->orig_work,
156 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
158 return 0;
160 err:
161 return -ENOMEM;
165 * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
166 * @rcu: rcu pointer of the neigh_ifinfo object
168 static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
170 struct batadv_neigh_ifinfo *neigh_ifinfo;
172 neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
174 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
175 batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
177 kfree(neigh_ifinfo);
181 * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
182 * the neigh_ifinfo (without rcu callback)
183 * @neigh_ifinfo: the neigh_ifinfo object to release
185 static void
186 batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
188 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
189 batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
193 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
194 * the neigh_ifinfo
195 * @neigh_ifinfo: the neigh_ifinfo object to release
197 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
199 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
200 call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
204 * batadv_neigh_node_free_rcu - free the neigh_node
205 * @rcu: rcu pointer of the neigh_node
207 static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
209 struct hlist_node *node_tmp;
210 struct batadv_neigh_node *neigh_node;
211 struct batadv_neigh_ifinfo *neigh_ifinfo;
212 struct batadv_algo_ops *bao;
214 neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
215 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
217 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
218 &neigh_node->ifinfo_list, list) {
219 batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
222 if (bao->bat_neigh_free)
223 bao->bat_neigh_free(neigh_node);
225 batadv_hardif_free_ref_now(neigh_node->if_incoming);
227 kfree(neigh_node);
231 * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
232 * and possibly free it (without rcu callback)
233 * @neigh_node: neigh neighbor to free
235 static void
236 batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
238 if (atomic_dec_and_test(&neigh_node->refcount))
239 batadv_neigh_node_free_rcu(&neigh_node->rcu);
243 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
244 * and possibly free it
245 * @neigh_node: neigh neighbor to free
247 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
249 if (atomic_dec_and_test(&neigh_node->refcount))
250 call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
254 * batadv_orig_node_get_router - router to the originator depending on iface
255 * @orig_node: the orig node for the router
256 * @if_outgoing: the interface where the payload packet has been received or
257 * the OGM should be sent to
259 * Returns the neighbor which should be router for this orig_node/iface.
261 * The object is returned with refcounter increased by 1.
263 struct batadv_neigh_node *
264 batadv_orig_router_get(struct batadv_orig_node *orig_node,
265 const struct batadv_hard_iface *if_outgoing)
267 struct batadv_orig_ifinfo *orig_ifinfo;
268 struct batadv_neigh_node *router = NULL;
270 rcu_read_lock();
271 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
272 if (orig_ifinfo->if_outgoing != if_outgoing)
273 continue;
275 router = rcu_dereference(orig_ifinfo->router);
276 break;
279 if (router && !atomic_inc_not_zero(&router->refcount))
280 router = NULL;
282 rcu_read_unlock();
283 return router;
287 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
288 * @orig_node: the orig node to be queried
289 * @if_outgoing: the interface for which the ifinfo should be acquired
291 * Returns the requested orig_ifinfo or NULL if not found.
293 * The object is returned with refcounter increased by 1.
295 struct batadv_orig_ifinfo *
296 batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
297 struct batadv_hard_iface *if_outgoing)
299 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
301 rcu_read_lock();
302 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
303 list) {
304 if (tmp->if_outgoing != if_outgoing)
305 continue;
307 if (!atomic_inc_not_zero(&tmp->refcount))
308 continue;
310 orig_ifinfo = tmp;
311 break;
313 rcu_read_unlock();
315 return orig_ifinfo;
319 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
320 * @orig_node: the orig node to be queried
321 * @if_outgoing: the interface for which the ifinfo should be acquired
323 * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
324 * interface otherwise. The object is created and added to the list
325 * if it does not exist.
327 * The object is returned with refcounter increased by 1.
329 struct batadv_orig_ifinfo *
330 batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
331 struct batadv_hard_iface *if_outgoing)
333 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
334 unsigned long reset_time;
336 spin_lock_bh(&orig_node->neigh_list_lock);
338 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
339 if (orig_ifinfo)
340 goto out;
342 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
343 if (!orig_ifinfo)
344 goto out;
346 if (if_outgoing != BATADV_IF_DEFAULT &&
347 !atomic_inc_not_zero(&if_outgoing->refcount)) {
348 kfree(orig_ifinfo);
349 orig_ifinfo = NULL;
350 goto out;
353 reset_time = jiffies - 1;
354 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
355 orig_ifinfo->batman_seqno_reset = reset_time;
356 orig_ifinfo->if_outgoing = if_outgoing;
357 INIT_HLIST_NODE(&orig_ifinfo->list);
358 atomic_set(&orig_ifinfo->refcount, 2);
359 hlist_add_head_rcu(&orig_ifinfo->list,
360 &orig_node->ifinfo_list);
361 out:
362 spin_unlock_bh(&orig_node->neigh_list_lock);
363 return orig_ifinfo;
367 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
368 * @neigh_node: the neigh node to be queried
369 * @if_outgoing: the interface for which the ifinfo should be acquired
371 * The object is returned with refcounter increased by 1.
373 * Returns the requested neigh_ifinfo or NULL if not found
375 struct batadv_neigh_ifinfo *
376 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
377 struct batadv_hard_iface *if_outgoing)
379 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
380 *tmp_neigh_ifinfo;
382 rcu_read_lock();
383 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
384 list) {
385 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
386 continue;
388 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
389 continue;
391 neigh_ifinfo = tmp_neigh_ifinfo;
392 break;
394 rcu_read_unlock();
396 return neigh_ifinfo;
400 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
401 * @neigh_node: the neigh node to be queried
402 * @if_outgoing: the interface for which the ifinfo should be acquired
404 * Returns NULL in case of failure or the neigh_ifinfo object for the
405 * if_outgoing interface otherwise. The object is created and added to the list
406 * if it does not exist.
408 * The object is returned with refcounter increased by 1.
410 struct batadv_neigh_ifinfo *
411 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
412 struct batadv_hard_iface *if_outgoing)
414 struct batadv_neigh_ifinfo *neigh_ifinfo;
416 spin_lock_bh(&neigh->ifinfo_lock);
418 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
419 if (neigh_ifinfo)
420 goto out;
422 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
423 if (!neigh_ifinfo)
424 goto out;
426 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
427 kfree(neigh_ifinfo);
428 neigh_ifinfo = NULL;
429 goto out;
432 INIT_HLIST_NODE(&neigh_ifinfo->list);
433 atomic_set(&neigh_ifinfo->refcount, 2);
434 neigh_ifinfo->if_outgoing = if_outgoing;
436 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
438 out:
439 spin_unlock_bh(&neigh->ifinfo_lock);
441 return neigh_ifinfo;
445 * batadv_neigh_node_new - create and init a new neigh_node object
446 * @hard_iface: the interface where the neighbour is connected to
447 * @neigh_addr: the mac address of the neighbour interface
448 * @orig_node: originator object representing the neighbour
450 * Allocates a new neigh_node object and initialises all the generic fields.
451 * Returns the new object or NULL on failure.
453 struct batadv_neigh_node *
454 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
455 const uint8_t *neigh_addr,
456 struct batadv_orig_node *orig_node)
458 struct batadv_neigh_node *neigh_node;
460 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
461 if (!neigh_node)
462 goto out;
464 INIT_HLIST_NODE(&neigh_node->list);
465 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
466 spin_lock_init(&neigh_node->ifinfo_lock);
468 ether_addr_copy(neigh_node->addr, neigh_addr);
469 neigh_node->if_incoming = hard_iface;
470 neigh_node->orig_node = orig_node;
472 /* extra reference for return */
473 atomic_set(&neigh_node->refcount, 2);
475 out:
476 return neigh_node;
480 * batadv_neigh_node_get - retrieve a neighbour from the list
481 * @orig_node: originator which the neighbour belongs to
482 * @hard_iface: the interface where this neighbour is connected to
483 * @addr: the address of the neighbour
485 * Looks for and possibly returns a neighbour belonging to this originator list
486 * which is connected through the provided hard interface.
487 * Returns NULL if the neighbour is not found.
489 struct batadv_neigh_node *
490 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
491 const struct batadv_hard_iface *hard_iface,
492 const uint8_t *addr)
494 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
496 rcu_read_lock();
497 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
498 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
499 continue;
501 if (tmp_neigh_node->if_incoming != hard_iface)
502 continue;
504 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
505 continue;
507 res = tmp_neigh_node;
508 break;
510 rcu_read_unlock();
512 return res;
516 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
517 * @rcu: rcu pointer of the orig_ifinfo object
519 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
521 struct batadv_orig_ifinfo *orig_ifinfo;
522 struct batadv_neigh_node *router;
524 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
526 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
527 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
529 /* this is the last reference to this object */
530 router = rcu_dereference_protected(orig_ifinfo->router, true);
531 if (router)
532 batadv_neigh_node_free_ref_now(router);
533 kfree(orig_ifinfo);
537 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
538 * the orig_ifinfo (without rcu callback)
539 * @orig_ifinfo: the orig_ifinfo object to release
541 static void
542 batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
544 if (atomic_dec_and_test(&orig_ifinfo->refcount))
545 batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
549 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
550 * the orig_ifinfo
551 * @orig_ifinfo: the orig_ifinfo object to release
553 void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
555 if (atomic_dec_and_test(&orig_ifinfo->refcount))
556 call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
559 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
561 struct hlist_node *node_tmp;
562 struct batadv_neigh_node *neigh_node;
563 struct batadv_orig_node *orig_node;
564 struct batadv_orig_ifinfo *orig_ifinfo;
566 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
568 spin_lock_bh(&orig_node->neigh_list_lock);
570 /* for all neighbors towards this originator ... */
571 hlist_for_each_entry_safe(neigh_node, node_tmp,
572 &orig_node->neigh_list, list) {
573 hlist_del_rcu(&neigh_node->list);
574 batadv_neigh_node_free_ref_now(neigh_node);
577 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
578 &orig_node->ifinfo_list, list) {
579 hlist_del_rcu(&orig_ifinfo->list);
580 batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
582 spin_unlock_bh(&orig_node->neigh_list_lock);
584 batadv_mcast_purge_orig(orig_node);
586 /* Free nc_nodes */
587 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
589 batadv_frag_purge_orig(orig_node, NULL);
591 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
592 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
594 kfree(orig_node->tt_buff);
595 kfree(orig_node);
599 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
600 * schedule an rcu callback for freeing it
601 * @orig_node: the orig node to free
603 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
605 if (atomic_dec_and_test(&orig_node->refcount))
606 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
610 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
611 * possibly free it (without rcu callback)
612 * @orig_node: the orig node to free
614 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
616 if (atomic_dec_and_test(&orig_node->refcount))
617 batadv_orig_node_free_rcu(&orig_node->rcu);
620 void batadv_originator_free(struct batadv_priv *bat_priv)
622 struct batadv_hashtable *hash = bat_priv->orig_hash;
623 struct hlist_node *node_tmp;
624 struct hlist_head *head;
625 spinlock_t *list_lock; /* spinlock to protect write access */
626 struct batadv_orig_node *orig_node;
627 uint32_t i;
629 if (!hash)
630 return;
632 cancel_delayed_work_sync(&bat_priv->orig_work);
634 bat_priv->orig_hash = NULL;
636 for (i = 0; i < hash->size; i++) {
637 head = &hash->table[i];
638 list_lock = &hash->list_locks[i];
640 spin_lock_bh(list_lock);
641 hlist_for_each_entry_safe(orig_node, node_tmp,
642 head, hash_entry) {
643 hlist_del_rcu(&orig_node->hash_entry);
644 batadv_orig_node_free_ref(orig_node);
646 spin_unlock_bh(list_lock);
649 batadv_hash_destroy(hash);
653 * batadv_orig_node_new - creates a new orig_node
654 * @bat_priv: the bat priv with all the soft interface information
655 * @addr: the mac address of the originator
657 * Creates a new originator object and initialise all the generic fields.
658 * The new object is not added to the originator list.
659 * Returns the newly created object or NULL on failure.
661 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
662 const uint8_t *addr)
664 struct batadv_orig_node *orig_node;
665 struct batadv_orig_node_vlan *vlan;
666 unsigned long reset_time;
667 int i;
669 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
670 "Creating new originator: %pM\n", addr);
672 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
673 if (!orig_node)
674 return NULL;
676 INIT_HLIST_HEAD(&orig_node->neigh_list);
677 INIT_LIST_HEAD(&orig_node->vlan_list);
678 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
679 spin_lock_init(&orig_node->bcast_seqno_lock);
680 spin_lock_init(&orig_node->neigh_list_lock);
681 spin_lock_init(&orig_node->tt_buff_lock);
682 spin_lock_init(&orig_node->tt_lock);
683 spin_lock_init(&orig_node->vlan_list_lock);
685 batadv_nc_init_orig(orig_node);
687 /* extra reference for return */
688 atomic_set(&orig_node->refcount, 2);
690 orig_node->bat_priv = bat_priv;
691 ether_addr_copy(orig_node->orig, addr);
692 batadv_dat_init_orig_node_addr(orig_node);
693 atomic_set(&orig_node->last_ttvn, 0);
694 orig_node->tt_buff = NULL;
695 orig_node->tt_buff_len = 0;
696 orig_node->last_seen = jiffies;
697 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
698 orig_node->bcast_seqno_reset = reset_time;
699 #ifdef CONFIG_BATMAN_ADV_MCAST
700 orig_node->mcast_flags = BATADV_NO_FLAGS;
701 #endif
703 /* create a vlan object for the "untagged" LAN */
704 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
705 if (!vlan)
706 goto free_orig_node;
707 /* batadv_orig_node_vlan_new() increases the refcounter.
708 * Immediately release vlan since it is not needed anymore in this
709 * context
711 batadv_orig_node_vlan_free_ref(vlan);
713 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
714 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
715 spin_lock_init(&orig_node->fragments[i].lock);
716 orig_node->fragments[i].size = 0;
719 return orig_node;
720 free_orig_node:
721 kfree(orig_node);
722 return NULL;
726 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
727 * @bat_priv: the bat priv with all the soft interface information
728 * @neigh: orig node which is to be checked
730 static void
731 batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
732 struct batadv_neigh_node *neigh)
734 struct batadv_neigh_ifinfo *neigh_ifinfo;
735 struct batadv_hard_iface *if_outgoing;
736 struct hlist_node *node_tmp;
738 spin_lock_bh(&neigh->ifinfo_lock);
740 /* for all ifinfo objects for this neighinator */
741 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
742 &neigh->ifinfo_list, list) {
743 if_outgoing = neigh_ifinfo->if_outgoing;
745 /* always keep the default interface */
746 if (if_outgoing == BATADV_IF_DEFAULT)
747 continue;
749 /* don't purge if the interface is not (going) down */
750 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
751 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
752 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
753 continue;
755 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
756 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
757 neigh->addr, if_outgoing->net_dev->name);
759 hlist_del_rcu(&neigh_ifinfo->list);
760 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
763 spin_unlock_bh(&neigh->ifinfo_lock);
767 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
768 * @bat_priv: the bat priv with all the soft interface information
769 * @orig_node: orig node which is to be checked
771 * Returns true if any ifinfo entry was purged, false otherwise.
773 static bool
774 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
775 struct batadv_orig_node *orig_node)
777 struct batadv_orig_ifinfo *orig_ifinfo;
778 struct batadv_hard_iface *if_outgoing;
779 struct hlist_node *node_tmp;
780 bool ifinfo_purged = false;
782 spin_lock_bh(&orig_node->neigh_list_lock);
784 /* for all ifinfo objects for this originator */
785 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
786 &orig_node->ifinfo_list, list) {
787 if_outgoing = orig_ifinfo->if_outgoing;
789 /* always keep the default interface */
790 if (if_outgoing == BATADV_IF_DEFAULT)
791 continue;
793 /* don't purge if the interface is not (going) down */
794 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
795 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
796 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
797 continue;
799 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
800 "router/ifinfo purge: originator %pM, iface: %s\n",
801 orig_node->orig, if_outgoing->net_dev->name);
803 ifinfo_purged = true;
805 hlist_del_rcu(&orig_ifinfo->list);
806 batadv_orig_ifinfo_free_ref(orig_ifinfo);
807 if (orig_node->last_bonding_candidate == orig_ifinfo) {
808 orig_node->last_bonding_candidate = NULL;
809 batadv_orig_ifinfo_free_ref(orig_ifinfo);
813 spin_unlock_bh(&orig_node->neigh_list_lock);
815 return ifinfo_purged;
819 * batadv_purge_orig_neighbors - purges neighbors from originator
820 * @bat_priv: the bat priv with all the soft interface information
821 * @orig_node: orig node which is to be checked
823 * Returns true if any neighbor was purged, false otherwise
825 static bool
826 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
827 struct batadv_orig_node *orig_node)
829 struct hlist_node *node_tmp;
830 struct batadv_neigh_node *neigh_node;
831 bool neigh_purged = false;
832 unsigned long last_seen;
833 struct batadv_hard_iface *if_incoming;
835 spin_lock_bh(&orig_node->neigh_list_lock);
837 /* for all neighbors towards this originator ... */
838 hlist_for_each_entry_safe(neigh_node, node_tmp,
839 &orig_node->neigh_list, list) {
840 last_seen = neigh_node->last_seen;
841 if_incoming = neigh_node->if_incoming;
843 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
844 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
845 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
846 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
847 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
848 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
849 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
850 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
851 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
852 orig_node->orig, neigh_node->addr,
853 if_incoming->net_dev->name);
854 else
855 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
856 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
857 orig_node->orig, neigh_node->addr,
858 jiffies_to_msecs(last_seen));
860 neigh_purged = true;
862 hlist_del_rcu(&neigh_node->list);
863 batadv_neigh_node_free_ref(neigh_node);
864 } else {
865 /* only necessary if not the whole neighbor is to be
866 * deleted, but some interface has been removed.
868 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
872 spin_unlock_bh(&orig_node->neigh_list_lock);
873 return neigh_purged;
877 * batadv_find_best_neighbor - finds the best neighbor after purging
878 * @bat_priv: the bat priv with all the soft interface information
879 * @orig_node: orig node which is to be checked
880 * @if_outgoing: the interface for which the metric should be compared
882 * Returns the current best neighbor, with refcount increased.
884 static struct batadv_neigh_node *
885 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
886 struct batadv_orig_node *orig_node,
887 struct batadv_hard_iface *if_outgoing)
889 struct batadv_neigh_node *best = NULL, *neigh;
890 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
892 rcu_read_lock();
893 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
894 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
895 best, if_outgoing) <= 0))
896 continue;
898 if (!atomic_inc_not_zero(&neigh->refcount))
899 continue;
901 if (best)
902 batadv_neigh_node_free_ref(best);
904 best = neigh;
906 rcu_read_unlock();
908 return best;
912 * batadv_purge_orig_node - purges obsolete information from an orig_node
913 * @bat_priv: the bat priv with all the soft interface information
914 * @orig_node: orig node which is to be checked
916 * This function checks if the orig_node or substructures of it have become
917 * obsolete, and purges this information if that's the case.
919 * Returns true if the orig_node is to be removed, false otherwise.
921 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
922 struct batadv_orig_node *orig_node)
924 struct batadv_neigh_node *best_neigh_node;
925 struct batadv_hard_iface *hard_iface;
926 bool changed_ifinfo, changed_neigh;
928 if (batadv_has_timed_out(orig_node->last_seen,
929 2 * BATADV_PURGE_TIMEOUT)) {
930 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
931 "Originator timeout: originator %pM, last_seen %u\n",
932 orig_node->orig,
933 jiffies_to_msecs(orig_node->last_seen));
934 return true;
936 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
937 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
939 if (!changed_ifinfo && !changed_neigh)
940 return false;
942 /* first for NULL ... */
943 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
944 BATADV_IF_DEFAULT);
945 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
946 best_neigh_node);
947 if (best_neigh_node)
948 batadv_neigh_node_free_ref(best_neigh_node);
950 /* ... then for all other interfaces. */
951 rcu_read_lock();
952 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
953 if (hard_iface->if_status != BATADV_IF_ACTIVE)
954 continue;
956 if (hard_iface->soft_iface != bat_priv->soft_iface)
957 continue;
959 best_neigh_node = batadv_find_best_neighbor(bat_priv,
960 orig_node,
961 hard_iface);
962 batadv_update_route(bat_priv, orig_node, hard_iface,
963 best_neigh_node);
964 if (best_neigh_node)
965 batadv_neigh_node_free_ref(best_neigh_node);
967 rcu_read_unlock();
969 return false;
972 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
974 struct batadv_hashtable *hash = bat_priv->orig_hash;
975 struct hlist_node *node_tmp;
976 struct hlist_head *head;
977 spinlock_t *list_lock; /* spinlock to protect write access */
978 struct batadv_orig_node *orig_node;
979 uint32_t i;
981 if (!hash)
982 return;
984 /* for all origins... */
985 for (i = 0; i < hash->size; i++) {
986 head = &hash->table[i];
987 list_lock = &hash->list_locks[i];
989 spin_lock_bh(list_lock);
990 hlist_for_each_entry_safe(orig_node, node_tmp,
991 head, hash_entry) {
992 if (batadv_purge_orig_node(bat_priv, orig_node)) {
993 batadv_gw_node_delete(bat_priv, orig_node);
994 hlist_del_rcu(&orig_node->hash_entry);
995 batadv_tt_global_del_orig(orig_node->bat_priv,
996 orig_node, -1,
997 "originator timed out");
998 batadv_orig_node_free_ref(orig_node);
999 continue;
1002 batadv_frag_purge_orig(orig_node,
1003 batadv_frag_check_entry);
1005 spin_unlock_bh(list_lock);
1008 batadv_gw_node_purge(bat_priv);
1009 batadv_gw_election(bat_priv);
1012 static void batadv_purge_orig(struct work_struct *work)
1014 struct delayed_work *delayed_work;
1015 struct batadv_priv *bat_priv;
1017 delayed_work = container_of(work, struct delayed_work, work);
1018 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
1019 _batadv_purge_orig(bat_priv);
1020 queue_delayed_work(batadv_event_workqueue,
1021 &bat_priv->orig_work,
1022 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
1025 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
1027 _batadv_purge_orig(bat_priv);
1030 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
1032 struct net_device *net_dev = (struct net_device *)seq->private;
1033 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1034 struct batadv_hard_iface *primary_if;
1036 primary_if = batadv_seq_print_text_primary_if_get(seq);
1037 if (!primary_if)
1038 return 0;
1040 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
1041 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
1042 primary_if->net_dev->dev_addr, net_dev->name,
1043 bat_priv->bat_algo_ops->name);
1045 batadv_hardif_free_ref(primary_if);
1047 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1048 seq_puts(seq,
1049 "No printing function for this routing protocol\n");
1050 return 0;
1053 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1054 BATADV_IF_DEFAULT);
1056 return 0;
1060 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1061 * outgoing interface
1062 * @seq: debugfs table seq_file struct
1063 * @offset: not used
1065 * Returns 0
1067 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1069 struct net_device *net_dev = (struct net_device *)seq->private;
1070 struct batadv_hard_iface *hard_iface;
1071 struct batadv_priv *bat_priv;
1073 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1075 if (!hard_iface || !hard_iface->soft_iface) {
1076 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1077 goto out;
1080 bat_priv = netdev_priv(hard_iface->soft_iface);
1081 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1082 seq_puts(seq,
1083 "No printing function for this routing protocol\n");
1084 goto out;
1087 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1088 seq_puts(seq, "Interface not active\n");
1089 goto out;
1092 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1093 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1094 hard_iface->net_dev->dev_addr,
1095 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1097 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1099 out:
1100 if (hard_iface)
1101 batadv_hardif_free_ref(hard_iface);
1102 return 0;
1105 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1106 int max_if_num)
1108 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1109 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1110 struct batadv_hashtable *hash = bat_priv->orig_hash;
1111 struct hlist_head *head;
1112 struct batadv_orig_node *orig_node;
1113 uint32_t i;
1114 int ret;
1116 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1117 * if_num
1119 for (i = 0; i < hash->size; i++) {
1120 head = &hash->table[i];
1122 rcu_read_lock();
1123 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1124 ret = 0;
1125 if (bao->bat_orig_add_if)
1126 ret = bao->bat_orig_add_if(orig_node,
1127 max_if_num);
1128 if (ret == -ENOMEM)
1129 goto err;
1131 rcu_read_unlock();
1134 return 0;
1136 err:
1137 rcu_read_unlock();
1138 return -ENOMEM;
1141 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1142 int max_if_num)
1144 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1145 struct batadv_hashtable *hash = bat_priv->orig_hash;
1146 struct hlist_head *head;
1147 struct batadv_hard_iface *hard_iface_tmp;
1148 struct batadv_orig_node *orig_node;
1149 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1150 uint32_t i;
1151 int ret;
1153 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1154 * if_num
1156 for (i = 0; i < hash->size; i++) {
1157 head = &hash->table[i];
1159 rcu_read_lock();
1160 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1161 ret = 0;
1162 if (bao->bat_orig_del_if)
1163 ret = bao->bat_orig_del_if(orig_node,
1164 max_if_num,
1165 hard_iface->if_num);
1166 if (ret == -ENOMEM)
1167 goto err;
1169 rcu_read_unlock();
1172 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1173 rcu_read_lock();
1174 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
1175 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
1176 continue;
1178 if (hard_iface == hard_iface_tmp)
1179 continue;
1181 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
1182 continue;
1184 if (hard_iface_tmp->if_num > hard_iface->if_num)
1185 hard_iface_tmp->if_num--;
1187 rcu_read_unlock();
1189 hard_iface->if_num = -1;
1190 return 0;
1192 err:
1193 rcu_read_unlock();
1194 return -ENOMEM;