Merge branch 'batman-adv/next' of git://git.open-mesh.org/linux-merge
[zen-stable.git] / net / batman-adv / translation-table.c
blobcc87acf0243106dd5e110ad920c8e17d4640760b
1 /*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
26 #include "send.h"
27 #include "hash.h"
28 #include "originator.h"
29 #include "routing.h"
31 #include <linux/crc16.h>
33 static void _tt_global_del(struct bat_priv *bat_priv,
34 struct tt_global_entry *tt_global_entry,
35 const char *message);
36 static void tt_purge(struct work_struct *work);
38 /* returns 1 if they are the same mac addr */
39 static int compare_tt(const struct hlist_node *node, const void *data2)
41 const void *data1 = container_of(node, struct tt_common_entry,
42 hash_entry);
44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
47 static void tt_start_timer(struct bat_priv *bat_priv)
49 INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge);
50 queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work,
51 msecs_to_jiffies(5000));
54 static struct tt_common_entry *tt_hash_find(struct hashtable_t *hash,
55 const void *data)
57 struct hlist_head *head;
58 struct hlist_node *node;
59 struct tt_common_entry *tt_common_entry, *tt_common_entry_tmp = NULL;
60 uint32_t index;
62 if (!hash)
63 return NULL;
65 index = choose_orig(data, hash->size);
66 head = &hash->table[index];
68 rcu_read_lock();
69 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
70 if (!compare_eth(tt_common_entry, data))
71 continue;
73 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
74 continue;
76 tt_common_entry_tmp = tt_common_entry;
77 break;
79 rcu_read_unlock();
81 return tt_common_entry_tmp;
84 static struct tt_local_entry *tt_local_hash_find(struct bat_priv *bat_priv,
85 const void *data)
87 struct tt_common_entry *tt_common_entry;
88 struct tt_local_entry *tt_local_entry = NULL;
90 tt_common_entry = tt_hash_find(bat_priv->tt_local_hash, data);
91 if (tt_common_entry)
92 tt_local_entry = container_of(tt_common_entry,
93 struct tt_local_entry, common);
94 return tt_local_entry;
97 static struct tt_global_entry *tt_global_hash_find(struct bat_priv *bat_priv,
98 const void *data)
100 struct tt_common_entry *tt_common_entry;
101 struct tt_global_entry *tt_global_entry = NULL;
103 tt_common_entry = tt_hash_find(bat_priv->tt_global_hash, data);
104 if (tt_common_entry)
105 tt_global_entry = container_of(tt_common_entry,
106 struct tt_global_entry, common);
107 return tt_global_entry;
111 static bool is_out_of_time(unsigned long starting_time, unsigned long timeout)
113 unsigned long deadline;
114 deadline = starting_time + msecs_to_jiffies(timeout);
116 return time_after(jiffies, deadline);
119 static void tt_local_entry_free_ref(struct tt_local_entry *tt_local_entry)
121 if (atomic_dec_and_test(&tt_local_entry->common.refcount))
122 kfree_rcu(tt_local_entry, common.rcu);
125 static void tt_global_entry_free_rcu(struct rcu_head *rcu)
127 struct tt_common_entry *tt_common_entry;
128 struct tt_global_entry *tt_global_entry;
130 tt_common_entry = container_of(rcu, struct tt_common_entry, rcu);
131 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
132 common);
134 if (tt_global_entry->orig_node)
135 orig_node_free_ref(tt_global_entry->orig_node);
137 kfree(tt_global_entry);
140 static void tt_global_entry_free_ref(struct tt_global_entry *tt_global_entry)
142 if (atomic_dec_and_test(&tt_global_entry->common.refcount))
143 call_rcu(&tt_global_entry->common.rcu,
144 tt_global_entry_free_rcu);
147 static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr,
148 uint8_t flags)
150 struct tt_change_node *tt_change_node;
152 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
154 if (!tt_change_node)
155 return;
157 tt_change_node->change.flags = flags;
158 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
160 spin_lock_bh(&bat_priv->tt_changes_list_lock);
161 /* track the change in the OGMinterval list */
162 list_add_tail(&tt_change_node->list, &bat_priv->tt_changes_list);
163 atomic_inc(&bat_priv->tt_local_changes);
164 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
166 atomic_set(&bat_priv->tt_ogm_append_cnt, 0);
169 int tt_len(int changes_num)
171 return changes_num * sizeof(struct tt_change);
174 static int tt_local_init(struct bat_priv *bat_priv)
176 if (bat_priv->tt_local_hash)
177 return 1;
179 bat_priv->tt_local_hash = hash_new(1024);
181 if (!bat_priv->tt_local_hash)
182 return 0;
184 return 1;
187 void tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
188 int ifindex)
190 struct bat_priv *bat_priv = netdev_priv(soft_iface);
191 struct tt_local_entry *tt_local_entry = NULL;
192 struct tt_global_entry *tt_global_entry = NULL;
193 int hash_added;
195 tt_local_entry = tt_local_hash_find(bat_priv, addr);
197 if (tt_local_entry) {
198 tt_local_entry->last_seen = jiffies;
199 goto out;
202 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
203 if (!tt_local_entry)
204 goto out;
206 bat_dbg(DBG_TT, bat_priv,
207 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
208 (uint8_t)atomic_read(&bat_priv->ttvn));
210 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
211 tt_local_entry->common.flags = NO_FLAGS;
212 if (is_wifi_iface(ifindex))
213 tt_local_entry->common.flags |= TT_CLIENT_WIFI;
214 atomic_set(&tt_local_entry->common.refcount, 2);
215 tt_local_entry->last_seen = jiffies;
217 /* the batman interface mac address should never be purged */
218 if (compare_eth(addr, soft_iface->dev_addr))
219 tt_local_entry->common.flags |= TT_CLIENT_NOPURGE;
221 hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig,
222 &tt_local_entry->common,
223 &tt_local_entry->common.hash_entry);
225 if (unlikely(hash_added != 0)) {
226 /* remove the reference for the hash */
227 tt_local_entry_free_ref(tt_local_entry);
228 goto out;
231 tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
233 /* The local entry has to be marked as NEW to avoid to send it in
234 * a full table response going out before the next ttvn increment
235 * (consistency check) */
236 tt_local_entry->common.flags |= TT_CLIENT_NEW;
238 /* remove address from global hash if present */
239 tt_global_entry = tt_global_hash_find(bat_priv, addr);
241 /* Check whether it is a roaming! */
242 if (tt_global_entry) {
243 /* This node is probably going to update its tt table */
244 tt_global_entry->orig_node->tt_poss_change = true;
245 /* The global entry has to be marked as PENDING and has to be
246 * kept for consistency purpose */
247 tt_global_entry->common.flags |= TT_CLIENT_PENDING;
248 send_roam_adv(bat_priv, tt_global_entry->common.addr,
249 tt_global_entry->orig_node);
251 out:
252 if (tt_local_entry)
253 tt_local_entry_free_ref(tt_local_entry);
254 if (tt_global_entry)
255 tt_global_entry_free_ref(tt_global_entry);
258 int tt_changes_fill_buffer(struct bat_priv *bat_priv,
259 unsigned char *buff, int buff_len)
261 int count = 0, tot_changes = 0;
262 struct tt_change_node *entry, *safe;
264 if (buff_len > 0)
265 tot_changes = buff_len / tt_len(1);
267 spin_lock_bh(&bat_priv->tt_changes_list_lock);
268 atomic_set(&bat_priv->tt_local_changes, 0);
270 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
271 list) {
272 if (count < tot_changes) {
273 memcpy(buff + tt_len(count),
274 &entry->change, sizeof(struct tt_change));
275 count++;
277 list_del(&entry->list);
278 kfree(entry);
280 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
282 /* Keep the buffer for possible tt_request */
283 spin_lock_bh(&bat_priv->tt_buff_lock);
284 kfree(bat_priv->tt_buff);
285 bat_priv->tt_buff_len = 0;
286 bat_priv->tt_buff = NULL;
287 /* We check whether this new OGM has no changes due to size
288 * problems */
289 if (buff_len > 0) {
291 * if kmalloc() fails we will reply with the full table
292 * instead of providing the diff
294 bat_priv->tt_buff = kmalloc(buff_len, GFP_ATOMIC);
295 if (bat_priv->tt_buff) {
296 memcpy(bat_priv->tt_buff, buff, buff_len);
297 bat_priv->tt_buff_len = buff_len;
300 spin_unlock_bh(&bat_priv->tt_buff_lock);
302 return tot_changes;
305 int tt_local_seq_print_text(struct seq_file *seq, void *offset)
307 struct net_device *net_dev = (struct net_device *)seq->private;
308 struct bat_priv *bat_priv = netdev_priv(net_dev);
309 struct hashtable_t *hash = bat_priv->tt_local_hash;
310 struct tt_common_entry *tt_common_entry;
311 struct hard_iface *primary_if;
312 struct hlist_node *node;
313 struct hlist_head *head;
314 uint32_t i;
315 int ret = 0;
317 primary_if = primary_if_get_selected(bat_priv);
318 if (!primary_if) {
319 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
320 "please specify interfaces to enable it\n",
321 net_dev->name);
322 goto out;
325 if (primary_if->if_status != IF_ACTIVE) {
326 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
327 "primary interface not active\n",
328 net_dev->name);
329 goto out;
332 seq_printf(seq, "Locally retrieved addresses (from %s) "
333 "announced via TT (TTVN: %u):\n",
334 net_dev->name, (uint8_t)atomic_read(&bat_priv->ttvn));
336 for (i = 0; i < hash->size; i++) {
337 head = &hash->table[i];
339 rcu_read_lock();
340 hlist_for_each_entry_rcu(tt_common_entry, node,
341 head, hash_entry) {
342 seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
343 tt_common_entry->addr,
344 (tt_common_entry->flags &
345 TT_CLIENT_ROAM ? 'R' : '.'),
346 (tt_common_entry->flags &
347 TT_CLIENT_NOPURGE ? 'P' : '.'),
348 (tt_common_entry->flags &
349 TT_CLIENT_NEW ? 'N' : '.'),
350 (tt_common_entry->flags &
351 TT_CLIENT_PENDING ? 'X' : '.'),
352 (tt_common_entry->flags &
353 TT_CLIENT_WIFI ? 'W' : '.'));
355 rcu_read_unlock();
357 out:
358 if (primary_if)
359 hardif_free_ref(primary_if);
360 return ret;
363 static void tt_local_set_pending(struct bat_priv *bat_priv,
364 struct tt_local_entry *tt_local_entry,
365 uint16_t flags)
367 tt_local_event(bat_priv, tt_local_entry->common.addr,
368 tt_local_entry->common.flags | flags);
370 /* The local client has to be marked as "pending to be removed" but has
371 * to be kept in the table in order to send it in a full table
372 * response issued before the net ttvn increment (consistency check) */
373 tt_local_entry->common.flags |= TT_CLIENT_PENDING;
376 void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr,
377 const char *message, bool roaming)
379 struct tt_local_entry *tt_local_entry = NULL;
381 tt_local_entry = tt_local_hash_find(bat_priv, addr);
382 if (!tt_local_entry)
383 goto out;
385 tt_local_set_pending(bat_priv, tt_local_entry, TT_CLIENT_DEL |
386 (roaming ? TT_CLIENT_ROAM : NO_FLAGS));
388 bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) pending to be removed: "
389 "%s\n", tt_local_entry->common.addr, message);
390 out:
391 if (tt_local_entry)
392 tt_local_entry_free_ref(tt_local_entry);
395 static void tt_local_purge(struct bat_priv *bat_priv)
397 struct hashtable_t *hash = bat_priv->tt_local_hash;
398 struct tt_local_entry *tt_local_entry;
399 struct tt_common_entry *tt_common_entry;
400 struct hlist_node *node, *node_tmp;
401 struct hlist_head *head;
402 spinlock_t *list_lock; /* protects write access to the hash lists */
403 uint32_t i;
405 for (i = 0; i < hash->size; i++) {
406 head = &hash->table[i];
407 list_lock = &hash->list_locks[i];
409 spin_lock_bh(list_lock);
410 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
411 head, hash_entry) {
412 tt_local_entry = container_of(tt_common_entry,
413 struct tt_local_entry,
414 common);
415 if (tt_local_entry->common.flags & TT_CLIENT_NOPURGE)
416 continue;
418 /* entry already marked for deletion */
419 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
420 continue;
422 if (!is_out_of_time(tt_local_entry->last_seen,
423 TT_LOCAL_TIMEOUT * 1000))
424 continue;
426 tt_local_set_pending(bat_priv, tt_local_entry,
427 TT_CLIENT_DEL);
428 bat_dbg(DBG_TT, bat_priv, "Local tt entry (%pM) "
429 "pending to be removed: timed out\n",
430 tt_local_entry->common.addr);
432 spin_unlock_bh(list_lock);
437 static void tt_local_table_free(struct bat_priv *bat_priv)
439 struct hashtable_t *hash;
440 spinlock_t *list_lock; /* protects write access to the hash lists */
441 struct tt_common_entry *tt_common_entry;
442 struct tt_local_entry *tt_local_entry;
443 struct hlist_node *node, *node_tmp;
444 struct hlist_head *head;
445 uint32_t i;
447 if (!bat_priv->tt_local_hash)
448 return;
450 hash = bat_priv->tt_local_hash;
452 for (i = 0; i < hash->size; i++) {
453 head = &hash->table[i];
454 list_lock = &hash->list_locks[i];
456 spin_lock_bh(list_lock);
457 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
458 head, hash_entry) {
459 hlist_del_rcu(node);
460 tt_local_entry = container_of(tt_common_entry,
461 struct tt_local_entry,
462 common);
463 tt_local_entry_free_ref(tt_local_entry);
465 spin_unlock_bh(list_lock);
468 hash_destroy(hash);
470 bat_priv->tt_local_hash = NULL;
473 static int tt_global_init(struct bat_priv *bat_priv)
475 if (bat_priv->tt_global_hash)
476 return 1;
478 bat_priv->tt_global_hash = hash_new(1024);
480 if (!bat_priv->tt_global_hash)
481 return 0;
483 return 1;
486 static void tt_changes_list_free(struct bat_priv *bat_priv)
488 struct tt_change_node *entry, *safe;
490 spin_lock_bh(&bat_priv->tt_changes_list_lock);
492 list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list,
493 list) {
494 list_del(&entry->list);
495 kfree(entry);
498 atomic_set(&bat_priv->tt_local_changes, 0);
499 spin_unlock_bh(&bat_priv->tt_changes_list_lock);
502 /* caller must hold orig_node refcount */
503 int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node,
504 const unsigned char *tt_addr, uint8_t ttvn, bool roaming,
505 bool wifi)
507 struct tt_global_entry *tt_global_entry;
508 struct orig_node *orig_node_tmp;
509 int ret = 0;
510 int hash_added;
512 tt_global_entry = tt_global_hash_find(bat_priv, tt_addr);
514 if (!tt_global_entry) {
515 tt_global_entry =
516 kmalloc(sizeof(*tt_global_entry),
517 GFP_ATOMIC);
518 if (!tt_global_entry)
519 goto out;
521 memcpy(tt_global_entry->common.addr, tt_addr, ETH_ALEN);
522 tt_global_entry->common.flags = NO_FLAGS;
523 atomic_set(&tt_global_entry->common.refcount, 2);
524 /* Assign the new orig_node */
525 atomic_inc(&orig_node->refcount);
526 tt_global_entry->orig_node = orig_node;
527 tt_global_entry->ttvn = ttvn;
528 tt_global_entry->roam_at = 0;
530 hash_added = hash_add(bat_priv->tt_global_hash, compare_tt,
531 choose_orig, &tt_global_entry->common,
532 &tt_global_entry->common.hash_entry);
534 if (unlikely(hash_added != 0)) {
535 /* remove the reference for the hash */
536 tt_global_entry_free_ref(tt_global_entry);
537 goto out_remove;
539 atomic_inc(&orig_node->tt_size);
540 } else {
541 if (tt_global_entry->orig_node != orig_node) {
542 atomic_dec(&tt_global_entry->orig_node->tt_size);
543 orig_node_tmp = tt_global_entry->orig_node;
544 atomic_inc(&orig_node->refcount);
545 tt_global_entry->orig_node = orig_node;
546 orig_node_free_ref(orig_node_tmp);
547 atomic_inc(&orig_node->tt_size);
549 tt_global_entry->common.flags = NO_FLAGS;
550 tt_global_entry->ttvn = ttvn;
551 tt_global_entry->roam_at = 0;
554 if (wifi)
555 tt_global_entry->common.flags |= TT_CLIENT_WIFI;
557 bat_dbg(DBG_TT, bat_priv,
558 "Creating new global tt entry: %pM (via %pM)\n",
559 tt_global_entry->common.addr, orig_node->orig);
561 out_remove:
562 /* remove address from local hash if present */
563 tt_local_remove(bat_priv, tt_global_entry->common.addr,
564 "global tt received", roaming);
565 ret = 1;
566 out:
567 if (tt_global_entry)
568 tt_global_entry_free_ref(tt_global_entry);
569 return ret;
572 int tt_global_seq_print_text(struct seq_file *seq, void *offset)
574 struct net_device *net_dev = (struct net_device *)seq->private;
575 struct bat_priv *bat_priv = netdev_priv(net_dev);
576 struct hashtable_t *hash = bat_priv->tt_global_hash;
577 struct tt_common_entry *tt_common_entry;
578 struct tt_global_entry *tt_global_entry;
579 struct hard_iface *primary_if;
580 struct hlist_node *node;
581 struct hlist_head *head;
582 uint32_t i;
583 int ret = 0;
585 primary_if = primary_if_get_selected(bat_priv);
586 if (!primary_if) {
587 ret = seq_printf(seq, "BATMAN mesh %s disabled - please "
588 "specify interfaces to enable it\n",
589 net_dev->name);
590 goto out;
593 if (primary_if->if_status != IF_ACTIVE) {
594 ret = seq_printf(seq, "BATMAN mesh %s disabled - "
595 "primary interface not active\n",
596 net_dev->name);
597 goto out;
600 seq_printf(seq,
601 "Globally announced TT entries received via the mesh %s\n",
602 net_dev->name);
603 seq_printf(seq, " %-13s %s %-15s %s %s\n",
604 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
606 for (i = 0; i < hash->size; i++) {
607 head = &hash->table[i];
609 rcu_read_lock();
610 hlist_for_each_entry_rcu(tt_common_entry, node,
611 head, hash_entry) {
612 tt_global_entry = container_of(tt_common_entry,
613 struct tt_global_entry,
614 common);
615 seq_printf(seq, " * %pM (%3u) via %pM (%3u) "
616 "[%c%c%c]\n",
617 tt_global_entry->common.addr,
618 tt_global_entry->ttvn,
619 tt_global_entry->orig_node->orig,
620 (uint8_t) atomic_read(
621 &tt_global_entry->orig_node->
622 last_ttvn),
623 (tt_global_entry->common.flags &
624 TT_CLIENT_ROAM ? 'R' : '.'),
625 (tt_global_entry->common.flags &
626 TT_CLIENT_PENDING ? 'X' : '.'),
627 (tt_global_entry->common.flags &
628 TT_CLIENT_WIFI ? 'W' : '.'));
630 rcu_read_unlock();
632 out:
633 if (primary_if)
634 hardif_free_ref(primary_if);
635 return ret;
638 static void _tt_global_del(struct bat_priv *bat_priv,
639 struct tt_global_entry *tt_global_entry,
640 const char *message)
642 if (!tt_global_entry)
643 goto out;
645 bat_dbg(DBG_TT, bat_priv,
646 "Deleting global tt entry %pM (via %pM): %s\n",
647 tt_global_entry->common.addr, tt_global_entry->orig_node->orig,
648 message);
650 atomic_dec(&tt_global_entry->orig_node->tt_size);
652 hash_remove(bat_priv->tt_global_hash, compare_tt, choose_orig,
653 tt_global_entry->common.addr);
654 out:
655 if (tt_global_entry)
656 tt_global_entry_free_ref(tt_global_entry);
659 void tt_global_del(struct bat_priv *bat_priv,
660 struct orig_node *orig_node, const unsigned char *addr,
661 const char *message, bool roaming)
663 struct tt_global_entry *tt_global_entry = NULL;
665 tt_global_entry = tt_global_hash_find(bat_priv, addr);
666 if (!tt_global_entry)
667 goto out;
669 if (tt_global_entry->orig_node == orig_node) {
670 if (roaming) {
671 tt_global_entry->common.flags |= TT_CLIENT_ROAM;
672 tt_global_entry->roam_at = jiffies;
673 goto out;
675 _tt_global_del(bat_priv, tt_global_entry, message);
677 out:
678 if (tt_global_entry)
679 tt_global_entry_free_ref(tt_global_entry);
682 void tt_global_del_orig(struct bat_priv *bat_priv,
683 struct orig_node *orig_node, const char *message)
685 struct tt_global_entry *tt_global_entry;
686 struct tt_common_entry *tt_common_entry;
687 uint32_t i;
688 struct hashtable_t *hash = bat_priv->tt_global_hash;
689 struct hlist_node *node, *safe;
690 struct hlist_head *head;
691 spinlock_t *list_lock; /* protects write access to the hash lists */
693 if (!hash)
694 return;
696 for (i = 0; i < hash->size; i++) {
697 head = &hash->table[i];
698 list_lock = &hash->list_locks[i];
700 spin_lock_bh(list_lock);
701 hlist_for_each_entry_safe(tt_common_entry, node, safe,
702 head, hash_entry) {
703 tt_global_entry = container_of(tt_common_entry,
704 struct tt_global_entry,
705 common);
706 if (tt_global_entry->orig_node == orig_node) {
707 bat_dbg(DBG_TT, bat_priv,
708 "Deleting global tt entry %pM "
709 "(via %pM): %s\n",
710 tt_global_entry->common.addr,
711 tt_global_entry->orig_node->orig,
712 message);
713 hlist_del_rcu(node);
714 tt_global_entry_free_ref(tt_global_entry);
717 spin_unlock_bh(list_lock);
719 atomic_set(&orig_node->tt_size, 0);
722 static void tt_global_roam_purge(struct bat_priv *bat_priv)
724 struct hashtable_t *hash = bat_priv->tt_global_hash;
725 struct tt_common_entry *tt_common_entry;
726 struct tt_global_entry *tt_global_entry;
727 struct hlist_node *node, *node_tmp;
728 struct hlist_head *head;
729 spinlock_t *list_lock; /* protects write access to the hash lists */
730 uint32_t i;
732 for (i = 0; i < hash->size; i++) {
733 head = &hash->table[i];
734 list_lock = &hash->list_locks[i];
736 spin_lock_bh(list_lock);
737 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
738 head, hash_entry) {
739 tt_global_entry = container_of(tt_common_entry,
740 struct tt_global_entry,
741 common);
742 if (!(tt_global_entry->common.flags & TT_CLIENT_ROAM))
743 continue;
744 if (!is_out_of_time(tt_global_entry->roam_at,
745 TT_CLIENT_ROAM_TIMEOUT * 1000))
746 continue;
748 bat_dbg(DBG_TT, bat_priv, "Deleting global "
749 "tt entry (%pM): Roaming timeout\n",
750 tt_global_entry->common.addr);
751 atomic_dec(&tt_global_entry->orig_node->tt_size);
752 hlist_del_rcu(node);
753 tt_global_entry_free_ref(tt_global_entry);
755 spin_unlock_bh(list_lock);
760 static void tt_global_table_free(struct bat_priv *bat_priv)
762 struct hashtable_t *hash;
763 spinlock_t *list_lock; /* protects write access to the hash lists */
764 struct tt_common_entry *tt_common_entry;
765 struct tt_global_entry *tt_global_entry;
766 struct hlist_node *node, *node_tmp;
767 struct hlist_head *head;
768 uint32_t i;
770 if (!bat_priv->tt_global_hash)
771 return;
773 hash = bat_priv->tt_global_hash;
775 for (i = 0; i < hash->size; i++) {
776 head = &hash->table[i];
777 list_lock = &hash->list_locks[i];
779 spin_lock_bh(list_lock);
780 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
781 head, hash_entry) {
782 hlist_del_rcu(node);
783 tt_global_entry = container_of(tt_common_entry,
784 struct tt_global_entry,
785 common);
786 tt_global_entry_free_ref(tt_global_entry);
788 spin_unlock_bh(list_lock);
791 hash_destroy(hash);
793 bat_priv->tt_global_hash = NULL;
796 static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry,
797 struct tt_global_entry *tt_global_entry)
799 bool ret = false;
801 if (tt_local_entry->common.flags & TT_CLIENT_WIFI &&
802 tt_global_entry->common.flags & TT_CLIENT_WIFI)
803 ret = true;
805 return ret;
808 struct orig_node *transtable_search(struct bat_priv *bat_priv,
809 const uint8_t *src, const uint8_t *addr)
811 struct tt_local_entry *tt_local_entry = NULL;
812 struct tt_global_entry *tt_global_entry = NULL;
813 struct orig_node *orig_node = NULL;
815 if (src && atomic_read(&bat_priv->ap_isolation)) {
816 tt_local_entry = tt_local_hash_find(bat_priv, src);
817 if (!tt_local_entry)
818 goto out;
821 tt_global_entry = tt_global_hash_find(bat_priv, addr);
822 if (!tt_global_entry)
823 goto out;
825 /* check whether the clients should not communicate due to AP
826 * isolation */
827 if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry))
828 goto out;
830 if (!atomic_inc_not_zero(&tt_global_entry->orig_node->refcount))
831 goto out;
833 /* A global client marked as PENDING has already moved from that
834 * originator */
835 if (tt_global_entry->common.flags & TT_CLIENT_PENDING)
836 goto out;
838 orig_node = tt_global_entry->orig_node;
840 out:
841 if (tt_global_entry)
842 tt_global_entry_free_ref(tt_global_entry);
843 if (tt_local_entry)
844 tt_local_entry_free_ref(tt_local_entry);
846 return orig_node;
849 /* Calculates the checksum of the local table of a given orig_node */
850 uint16_t tt_global_crc(struct bat_priv *bat_priv, struct orig_node *orig_node)
852 uint16_t total = 0, total_one;
853 struct hashtable_t *hash = bat_priv->tt_global_hash;
854 struct tt_common_entry *tt_common_entry;
855 struct tt_global_entry *tt_global_entry;
856 struct hlist_node *node;
857 struct hlist_head *head;
858 uint32_t i;
859 int j;
861 for (i = 0; i < hash->size; i++) {
862 head = &hash->table[i];
864 rcu_read_lock();
865 hlist_for_each_entry_rcu(tt_common_entry, node,
866 head, hash_entry) {
867 tt_global_entry = container_of(tt_common_entry,
868 struct tt_global_entry,
869 common);
870 if (compare_eth(tt_global_entry->orig_node,
871 orig_node)) {
872 /* Roaming clients are in the global table for
873 * consistency only. They don't have to be
874 * taken into account while computing the
875 * global crc */
876 if (tt_common_entry->flags & TT_CLIENT_ROAM)
877 continue;
878 total_one = 0;
879 for (j = 0; j < ETH_ALEN; j++)
880 total_one = crc16_byte(total_one,
881 tt_common_entry->addr[j]);
882 total ^= total_one;
885 rcu_read_unlock();
888 return total;
891 /* Calculates the checksum of the local table */
892 uint16_t tt_local_crc(struct bat_priv *bat_priv)
894 uint16_t total = 0, total_one;
895 struct hashtable_t *hash = bat_priv->tt_local_hash;
896 struct tt_common_entry *tt_common_entry;
897 struct hlist_node *node;
898 struct hlist_head *head;
899 uint32_t i;
900 int j;
902 for (i = 0; i < hash->size; i++) {
903 head = &hash->table[i];
905 rcu_read_lock();
906 hlist_for_each_entry_rcu(tt_common_entry, node,
907 head, hash_entry) {
908 /* not yet committed clients have not to be taken into
909 * account while computing the CRC */
910 if (tt_common_entry->flags & TT_CLIENT_NEW)
911 continue;
912 total_one = 0;
913 for (j = 0; j < ETH_ALEN; j++)
914 total_one = crc16_byte(total_one,
915 tt_common_entry->addr[j]);
916 total ^= total_one;
918 rcu_read_unlock();
921 return total;
924 static void tt_req_list_free(struct bat_priv *bat_priv)
926 struct tt_req_node *node, *safe;
928 spin_lock_bh(&bat_priv->tt_req_list_lock);
930 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
931 list_del(&node->list);
932 kfree(node);
935 spin_unlock_bh(&bat_priv->tt_req_list_lock);
938 void tt_save_orig_buffer(struct bat_priv *bat_priv, struct orig_node *orig_node,
939 const unsigned char *tt_buff, uint8_t tt_num_changes)
941 uint16_t tt_buff_len = tt_len(tt_num_changes);
943 /* Replace the old buffer only if I received something in the
944 * last OGM (the OGM could carry no changes) */
945 spin_lock_bh(&orig_node->tt_buff_lock);
946 if (tt_buff_len > 0) {
947 kfree(orig_node->tt_buff);
948 orig_node->tt_buff_len = 0;
949 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
950 if (orig_node->tt_buff) {
951 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
952 orig_node->tt_buff_len = tt_buff_len;
955 spin_unlock_bh(&orig_node->tt_buff_lock);
958 static void tt_req_purge(struct bat_priv *bat_priv)
960 struct tt_req_node *node, *safe;
962 spin_lock_bh(&bat_priv->tt_req_list_lock);
963 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
964 if (is_out_of_time(node->issued_at,
965 TT_REQUEST_TIMEOUT * 1000)) {
966 list_del(&node->list);
967 kfree(node);
970 spin_unlock_bh(&bat_priv->tt_req_list_lock);
973 /* returns the pointer to the new tt_req_node struct if no request
974 * has already been issued for this orig_node, NULL otherwise */
975 static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv,
976 struct orig_node *orig_node)
978 struct tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
980 spin_lock_bh(&bat_priv->tt_req_list_lock);
981 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt_req_list, list) {
982 if (compare_eth(tt_req_node_tmp, orig_node) &&
983 !is_out_of_time(tt_req_node_tmp->issued_at,
984 TT_REQUEST_TIMEOUT * 1000))
985 goto unlock;
988 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
989 if (!tt_req_node)
990 goto unlock;
992 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
993 tt_req_node->issued_at = jiffies;
995 list_add(&tt_req_node->list, &bat_priv->tt_req_list);
996 unlock:
997 spin_unlock_bh(&bat_priv->tt_req_list_lock);
998 return tt_req_node;
1001 /* data_ptr is useless here, but has to be kept to respect the prototype */
1002 static int tt_local_valid_entry(const void *entry_ptr, const void *data_ptr)
1004 const struct tt_common_entry *tt_common_entry = entry_ptr;
1006 if (tt_common_entry->flags & TT_CLIENT_NEW)
1007 return 0;
1008 return 1;
1011 static int tt_global_valid_entry(const void *entry_ptr, const void *data_ptr)
1013 const struct tt_common_entry *tt_common_entry = entry_ptr;
1014 const struct tt_global_entry *tt_global_entry;
1015 const struct orig_node *orig_node = data_ptr;
1017 if (tt_common_entry->flags & TT_CLIENT_ROAM)
1018 return 0;
1020 tt_global_entry = container_of(tt_common_entry, struct tt_global_entry,
1021 common);
1023 return (tt_global_entry->orig_node == orig_node);
1026 static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1027 struct hashtable_t *hash,
1028 struct hard_iface *primary_if,
1029 int (*valid_cb)(const void *,
1030 const void *),
1031 void *cb_data)
1033 struct tt_common_entry *tt_common_entry;
1034 struct tt_query_packet *tt_response;
1035 struct tt_change *tt_change;
1036 struct hlist_node *node;
1037 struct hlist_head *head;
1038 struct sk_buff *skb = NULL;
1039 uint16_t tt_tot, tt_count;
1040 ssize_t tt_query_size = sizeof(struct tt_query_packet);
1041 uint32_t i;
1043 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1044 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1045 tt_len -= tt_len % sizeof(struct tt_change);
1047 tt_tot = tt_len / sizeof(struct tt_change);
1049 skb = dev_alloc_skb(tt_query_size + tt_len + ETH_HLEN);
1050 if (!skb)
1051 goto out;
1053 skb_reserve(skb, ETH_HLEN);
1054 tt_response = (struct tt_query_packet *)skb_put(skb,
1055 tt_query_size + tt_len);
1056 tt_response->ttvn = ttvn;
1058 tt_change = (struct tt_change *)(skb->data + tt_query_size);
1059 tt_count = 0;
1061 rcu_read_lock();
1062 for (i = 0; i < hash->size; i++) {
1063 head = &hash->table[i];
1065 hlist_for_each_entry_rcu(tt_common_entry, node,
1066 head, hash_entry) {
1067 if (tt_count == tt_tot)
1068 break;
1070 if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
1071 continue;
1073 memcpy(tt_change->addr, tt_common_entry->addr,
1074 ETH_ALEN);
1075 tt_change->flags = NO_FLAGS;
1077 tt_count++;
1078 tt_change++;
1081 rcu_read_unlock();
1083 /* store in the message the number of entries we have successfully
1084 * copied */
1085 tt_response->tt_data = htons(tt_count);
1087 out:
1088 return skb;
1091 static int send_tt_request(struct bat_priv *bat_priv,
1092 struct orig_node *dst_orig_node,
1093 uint8_t ttvn, uint16_t tt_crc, bool full_table)
1095 struct sk_buff *skb = NULL;
1096 struct tt_query_packet *tt_request;
1097 struct neigh_node *neigh_node = NULL;
1098 struct hard_iface *primary_if;
1099 struct tt_req_node *tt_req_node = NULL;
1100 int ret = 1;
1102 primary_if = primary_if_get_selected(bat_priv);
1103 if (!primary_if)
1104 goto out;
1106 /* The new tt_req will be issued only if I'm not waiting for a
1107 * reply from the same orig_node yet */
1108 tt_req_node = new_tt_req_node(bat_priv, dst_orig_node);
1109 if (!tt_req_node)
1110 goto out;
1112 skb = dev_alloc_skb(sizeof(struct tt_query_packet) + ETH_HLEN);
1113 if (!skb)
1114 goto out;
1116 skb_reserve(skb, ETH_HLEN);
1118 tt_request = (struct tt_query_packet *)skb_put(skb,
1119 sizeof(struct tt_query_packet));
1121 tt_request->packet_type = BAT_TT_QUERY;
1122 tt_request->version = COMPAT_VERSION;
1123 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1124 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1125 tt_request->ttl = TTL;
1126 tt_request->ttvn = ttvn;
1127 tt_request->tt_data = tt_crc;
1128 tt_request->flags = TT_REQUEST;
1130 if (full_table)
1131 tt_request->flags |= TT_FULL_TABLE;
1133 neigh_node = orig_node_get_router(dst_orig_node);
1134 if (!neigh_node)
1135 goto out;
1137 bat_dbg(DBG_TT, bat_priv, "Sending TT_REQUEST to %pM via %pM "
1138 "[%c]\n", dst_orig_node->orig, neigh_node->addr,
1139 (full_table ? 'F' : '.'));
1141 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1142 ret = 0;
1144 out:
1145 if (neigh_node)
1146 neigh_node_free_ref(neigh_node);
1147 if (primary_if)
1148 hardif_free_ref(primary_if);
1149 if (ret)
1150 kfree_skb(skb);
1151 if (ret && tt_req_node) {
1152 spin_lock_bh(&bat_priv->tt_req_list_lock);
1153 list_del(&tt_req_node->list);
1154 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1155 kfree(tt_req_node);
1157 return ret;
1160 static bool send_other_tt_response(struct bat_priv *bat_priv,
1161 struct tt_query_packet *tt_request)
1163 struct orig_node *req_dst_orig_node = NULL, *res_dst_orig_node = NULL;
1164 struct neigh_node *neigh_node = NULL;
1165 struct hard_iface *primary_if = NULL;
1166 uint8_t orig_ttvn, req_ttvn, ttvn;
1167 int ret = false;
1168 unsigned char *tt_buff;
1169 bool full_table;
1170 uint16_t tt_len, tt_tot;
1171 struct sk_buff *skb = NULL;
1172 struct tt_query_packet *tt_response;
1174 bat_dbg(DBG_TT, bat_priv,
1175 "Received TT_REQUEST from %pM for "
1176 "ttvn: %u (%pM) [%c]\n", tt_request->src,
1177 tt_request->ttvn, tt_request->dst,
1178 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1180 /* Let's get the orig node of the REAL destination */
1181 req_dst_orig_node = orig_hash_find(bat_priv, tt_request->dst);
1182 if (!req_dst_orig_node)
1183 goto out;
1185 res_dst_orig_node = orig_hash_find(bat_priv, tt_request->src);
1186 if (!res_dst_orig_node)
1187 goto out;
1189 neigh_node = orig_node_get_router(res_dst_orig_node);
1190 if (!neigh_node)
1191 goto out;
1193 primary_if = primary_if_get_selected(bat_priv);
1194 if (!primary_if)
1195 goto out;
1197 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1198 req_ttvn = tt_request->ttvn;
1200 /* I don't have the requested data */
1201 if (orig_ttvn != req_ttvn ||
1202 tt_request->tt_data != req_dst_orig_node->tt_crc)
1203 goto out;
1205 /* If the full table has been explicitly requested */
1206 if (tt_request->flags & TT_FULL_TABLE ||
1207 !req_dst_orig_node->tt_buff)
1208 full_table = true;
1209 else
1210 full_table = false;
1212 /* In this version, fragmentation is not implemented, then
1213 * I'll send only one packet with as much TT entries as I can */
1214 if (!full_table) {
1215 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1216 tt_len = req_dst_orig_node->tt_buff_len;
1217 tt_tot = tt_len / sizeof(struct tt_change);
1219 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1220 tt_len + ETH_HLEN);
1221 if (!skb)
1222 goto unlock;
1224 skb_reserve(skb, ETH_HLEN);
1225 tt_response = (struct tt_query_packet *)skb_put(skb,
1226 sizeof(struct tt_query_packet) + tt_len);
1227 tt_response->ttvn = req_ttvn;
1228 tt_response->tt_data = htons(tt_tot);
1230 tt_buff = skb->data + sizeof(struct tt_query_packet);
1231 /* Copy the last orig_node's OGM buffer */
1232 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1233 req_dst_orig_node->tt_buff_len);
1235 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1236 } else {
1237 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size) *
1238 sizeof(struct tt_change);
1239 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1241 skb = tt_response_fill_table(tt_len, ttvn,
1242 bat_priv->tt_global_hash,
1243 primary_if, tt_global_valid_entry,
1244 req_dst_orig_node);
1245 if (!skb)
1246 goto out;
1248 tt_response = (struct tt_query_packet *)skb->data;
1251 tt_response->packet_type = BAT_TT_QUERY;
1252 tt_response->version = COMPAT_VERSION;
1253 tt_response->ttl = TTL;
1254 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1255 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1256 tt_response->flags = TT_RESPONSE;
1258 if (full_table)
1259 tt_response->flags |= TT_FULL_TABLE;
1261 bat_dbg(DBG_TT, bat_priv,
1262 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1263 res_dst_orig_node->orig, neigh_node->addr,
1264 req_dst_orig_node->orig, req_ttvn);
1266 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1267 ret = true;
1268 goto out;
1270 unlock:
1271 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1273 out:
1274 if (res_dst_orig_node)
1275 orig_node_free_ref(res_dst_orig_node);
1276 if (req_dst_orig_node)
1277 orig_node_free_ref(req_dst_orig_node);
1278 if (neigh_node)
1279 neigh_node_free_ref(neigh_node);
1280 if (primary_if)
1281 hardif_free_ref(primary_if);
1282 if (!ret)
1283 kfree_skb(skb);
1284 return ret;
1287 static bool send_my_tt_response(struct bat_priv *bat_priv,
1288 struct tt_query_packet *tt_request)
1290 struct orig_node *orig_node = NULL;
1291 struct neigh_node *neigh_node = NULL;
1292 struct hard_iface *primary_if = NULL;
1293 uint8_t my_ttvn, req_ttvn, ttvn;
1294 int ret = false;
1295 unsigned char *tt_buff;
1296 bool full_table;
1297 uint16_t tt_len, tt_tot;
1298 struct sk_buff *skb = NULL;
1299 struct tt_query_packet *tt_response;
1301 bat_dbg(DBG_TT, bat_priv,
1302 "Received TT_REQUEST from %pM for "
1303 "ttvn: %u (me) [%c]\n", tt_request->src,
1304 tt_request->ttvn,
1305 (tt_request->flags & TT_FULL_TABLE ? 'F' : '.'));
1308 my_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1309 req_ttvn = tt_request->ttvn;
1311 orig_node = orig_hash_find(bat_priv, tt_request->src);
1312 if (!orig_node)
1313 goto out;
1315 neigh_node = orig_node_get_router(orig_node);
1316 if (!neigh_node)
1317 goto out;
1319 primary_if = primary_if_get_selected(bat_priv);
1320 if (!primary_if)
1321 goto out;
1323 /* If the full table has been explicitly requested or the gap
1324 * is too big send the whole local translation table */
1325 if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn ||
1326 !bat_priv->tt_buff)
1327 full_table = true;
1328 else
1329 full_table = false;
1331 /* In this version, fragmentation is not implemented, then
1332 * I'll send only one packet with as much TT entries as I can */
1333 if (!full_table) {
1334 spin_lock_bh(&bat_priv->tt_buff_lock);
1335 tt_len = bat_priv->tt_buff_len;
1336 tt_tot = tt_len / sizeof(struct tt_change);
1338 skb = dev_alloc_skb(sizeof(struct tt_query_packet) +
1339 tt_len + ETH_HLEN);
1340 if (!skb)
1341 goto unlock;
1343 skb_reserve(skb, ETH_HLEN);
1344 tt_response = (struct tt_query_packet *)skb_put(skb,
1345 sizeof(struct tt_query_packet) + tt_len);
1346 tt_response->ttvn = req_ttvn;
1347 tt_response->tt_data = htons(tt_tot);
1349 tt_buff = skb->data + sizeof(struct tt_query_packet);
1350 memcpy(tt_buff, bat_priv->tt_buff,
1351 bat_priv->tt_buff_len);
1352 spin_unlock_bh(&bat_priv->tt_buff_lock);
1353 } else {
1354 tt_len = (uint16_t)atomic_read(&bat_priv->num_local_tt) *
1355 sizeof(struct tt_change);
1356 ttvn = (uint8_t)atomic_read(&bat_priv->ttvn);
1358 skb = tt_response_fill_table(tt_len, ttvn,
1359 bat_priv->tt_local_hash,
1360 primary_if, tt_local_valid_entry,
1361 NULL);
1362 if (!skb)
1363 goto out;
1365 tt_response = (struct tt_query_packet *)skb->data;
1368 tt_response->packet_type = BAT_TT_QUERY;
1369 tt_response->version = COMPAT_VERSION;
1370 tt_response->ttl = TTL;
1371 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1372 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1373 tt_response->flags = TT_RESPONSE;
1375 if (full_table)
1376 tt_response->flags |= TT_FULL_TABLE;
1378 bat_dbg(DBG_TT, bat_priv,
1379 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1380 orig_node->orig, neigh_node->addr,
1381 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1383 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1384 ret = true;
1385 goto out;
1387 unlock:
1388 spin_unlock_bh(&bat_priv->tt_buff_lock);
1389 out:
1390 if (orig_node)
1391 orig_node_free_ref(orig_node);
1392 if (neigh_node)
1393 neigh_node_free_ref(neigh_node);
1394 if (primary_if)
1395 hardif_free_ref(primary_if);
1396 if (!ret)
1397 kfree_skb(skb);
1398 /* This packet was for me, so it doesn't need to be re-routed */
1399 return true;
1402 bool send_tt_response(struct bat_priv *bat_priv,
1403 struct tt_query_packet *tt_request)
1405 if (is_my_mac(tt_request->dst))
1406 return send_my_tt_response(bat_priv, tt_request);
1407 else
1408 return send_other_tt_response(bat_priv, tt_request);
1411 static void _tt_update_changes(struct bat_priv *bat_priv,
1412 struct orig_node *orig_node,
1413 struct tt_change *tt_change,
1414 uint16_t tt_num_changes, uint8_t ttvn)
1416 int i;
1418 for (i = 0; i < tt_num_changes; i++) {
1419 if ((tt_change + i)->flags & TT_CLIENT_DEL)
1420 tt_global_del(bat_priv, orig_node,
1421 (tt_change + i)->addr,
1422 "tt removed by changes",
1423 (tt_change + i)->flags & TT_CLIENT_ROAM);
1424 else
1425 if (!tt_global_add(bat_priv, orig_node,
1426 (tt_change + i)->addr, ttvn, false,
1427 (tt_change + i)->flags &
1428 TT_CLIENT_WIFI))
1429 /* In case of problem while storing a
1430 * global_entry, we stop the updating
1431 * procedure without committing the
1432 * ttvn change. This will avoid to send
1433 * corrupted data on tt_request
1435 return;
1439 static void tt_fill_gtable(struct bat_priv *bat_priv,
1440 struct tt_query_packet *tt_response)
1442 struct orig_node *orig_node = NULL;
1444 orig_node = orig_hash_find(bat_priv, tt_response->src);
1445 if (!orig_node)
1446 goto out;
1448 /* Purge the old table first.. */
1449 tt_global_del_orig(bat_priv, orig_node, "Received full table");
1451 _tt_update_changes(bat_priv, orig_node,
1452 (struct tt_change *)(tt_response + 1),
1453 tt_response->tt_data, tt_response->ttvn);
1455 spin_lock_bh(&orig_node->tt_buff_lock);
1456 kfree(orig_node->tt_buff);
1457 orig_node->tt_buff_len = 0;
1458 orig_node->tt_buff = NULL;
1459 spin_unlock_bh(&orig_node->tt_buff_lock);
1461 atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1463 out:
1464 if (orig_node)
1465 orig_node_free_ref(orig_node);
1468 static void tt_update_changes(struct bat_priv *bat_priv,
1469 struct orig_node *orig_node,
1470 uint16_t tt_num_changes, uint8_t ttvn,
1471 struct tt_change *tt_change)
1473 _tt_update_changes(bat_priv, orig_node, tt_change, tt_num_changes,
1474 ttvn);
1476 tt_save_orig_buffer(bat_priv, orig_node, (unsigned char *)tt_change,
1477 tt_num_changes);
1478 atomic_set(&orig_node->last_ttvn, ttvn);
1481 bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr)
1483 struct tt_local_entry *tt_local_entry = NULL;
1484 bool ret = false;
1486 tt_local_entry = tt_local_hash_find(bat_priv, addr);
1487 if (!tt_local_entry)
1488 goto out;
1489 /* Check if the client has been logically deleted (but is kept for
1490 * consistency purpose) */
1491 if (tt_local_entry->common.flags & TT_CLIENT_PENDING)
1492 goto out;
1493 ret = true;
1494 out:
1495 if (tt_local_entry)
1496 tt_local_entry_free_ref(tt_local_entry);
1497 return ret;
1500 void handle_tt_response(struct bat_priv *bat_priv,
1501 struct tt_query_packet *tt_response)
1503 struct tt_req_node *node, *safe;
1504 struct orig_node *orig_node = NULL;
1506 bat_dbg(DBG_TT, bat_priv, "Received TT_RESPONSE from %pM for "
1507 "ttvn %d t_size: %d [%c]\n",
1508 tt_response->src, tt_response->ttvn,
1509 tt_response->tt_data,
1510 (tt_response->flags & TT_FULL_TABLE ? 'F' : '.'));
1512 orig_node = orig_hash_find(bat_priv, tt_response->src);
1513 if (!orig_node)
1514 goto out;
1516 if (tt_response->flags & TT_FULL_TABLE)
1517 tt_fill_gtable(bat_priv, tt_response);
1518 else
1519 tt_update_changes(bat_priv, orig_node, tt_response->tt_data,
1520 tt_response->ttvn,
1521 (struct tt_change *)(tt_response + 1));
1523 /* Delete the tt_req_node from pending tt_requests list */
1524 spin_lock_bh(&bat_priv->tt_req_list_lock);
1525 list_for_each_entry_safe(node, safe, &bat_priv->tt_req_list, list) {
1526 if (!compare_eth(node->addr, tt_response->src))
1527 continue;
1528 list_del(&node->list);
1529 kfree(node);
1531 spin_unlock_bh(&bat_priv->tt_req_list_lock);
1533 /* Recalculate the CRC for this orig_node and store it */
1534 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1535 /* Roaming phase is over: tables are in sync again. I can
1536 * unset the flag */
1537 orig_node->tt_poss_change = false;
1538 out:
1539 if (orig_node)
1540 orig_node_free_ref(orig_node);
1543 int tt_init(struct bat_priv *bat_priv)
1545 if (!tt_local_init(bat_priv))
1546 return 0;
1548 if (!tt_global_init(bat_priv))
1549 return 0;
1551 tt_start_timer(bat_priv);
1553 return 1;
1556 static void tt_roam_list_free(struct bat_priv *bat_priv)
1558 struct tt_roam_node *node, *safe;
1560 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1562 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1563 list_del(&node->list);
1564 kfree(node);
1567 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1570 static void tt_roam_purge(struct bat_priv *bat_priv)
1572 struct tt_roam_node *node, *safe;
1574 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1575 list_for_each_entry_safe(node, safe, &bat_priv->tt_roam_list, list) {
1576 if (!is_out_of_time(node->first_time,
1577 ROAMING_MAX_TIME * 1000))
1578 continue;
1580 list_del(&node->list);
1581 kfree(node);
1583 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1586 /* This function checks whether the client already reached the
1587 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1588 * will not be sent.
1590 * returns true if the ROAMING_ADV can be sent, false otherwise */
1591 static bool tt_check_roam_count(struct bat_priv *bat_priv,
1592 uint8_t *client)
1594 struct tt_roam_node *tt_roam_node;
1595 bool ret = false;
1597 spin_lock_bh(&bat_priv->tt_roam_list_lock);
1598 /* The new tt_req will be issued only if I'm not waiting for a
1599 * reply from the same orig_node yet */
1600 list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) {
1601 if (!compare_eth(tt_roam_node->addr, client))
1602 continue;
1604 if (is_out_of_time(tt_roam_node->first_time,
1605 ROAMING_MAX_TIME * 1000))
1606 continue;
1608 if (!atomic_dec_not_zero(&tt_roam_node->counter))
1609 /* Sorry, you roamed too many times! */
1610 goto unlock;
1611 ret = true;
1612 break;
1615 if (!ret) {
1616 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
1617 if (!tt_roam_node)
1618 goto unlock;
1620 tt_roam_node->first_time = jiffies;
1621 atomic_set(&tt_roam_node->counter, ROAMING_MAX_COUNT - 1);
1622 memcpy(tt_roam_node->addr, client, ETH_ALEN);
1624 list_add(&tt_roam_node->list, &bat_priv->tt_roam_list);
1625 ret = true;
1628 unlock:
1629 spin_unlock_bh(&bat_priv->tt_roam_list_lock);
1630 return ret;
1633 void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client,
1634 struct orig_node *orig_node)
1636 struct neigh_node *neigh_node = NULL;
1637 struct sk_buff *skb = NULL;
1638 struct roam_adv_packet *roam_adv_packet;
1639 int ret = 1;
1640 struct hard_iface *primary_if;
1642 /* before going on we have to check whether the client has
1643 * already roamed to us too many times */
1644 if (!tt_check_roam_count(bat_priv, client))
1645 goto out;
1647 skb = dev_alloc_skb(sizeof(struct roam_adv_packet) + ETH_HLEN);
1648 if (!skb)
1649 goto out;
1651 skb_reserve(skb, ETH_HLEN);
1653 roam_adv_packet = (struct roam_adv_packet *)skb_put(skb,
1654 sizeof(struct roam_adv_packet));
1656 roam_adv_packet->packet_type = BAT_ROAM_ADV;
1657 roam_adv_packet->version = COMPAT_VERSION;
1658 roam_adv_packet->ttl = TTL;
1659 primary_if = primary_if_get_selected(bat_priv);
1660 if (!primary_if)
1661 goto out;
1662 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1663 hardif_free_ref(primary_if);
1664 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
1665 memcpy(roam_adv_packet->client, client, ETH_ALEN);
1667 neigh_node = orig_node_get_router(orig_node);
1668 if (!neigh_node)
1669 goto out;
1671 bat_dbg(DBG_TT, bat_priv,
1672 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1673 orig_node->orig, client, neigh_node->addr);
1675 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1676 ret = 0;
1678 out:
1679 if (neigh_node)
1680 neigh_node_free_ref(neigh_node);
1681 if (ret)
1682 kfree_skb(skb);
1683 return;
1686 static void tt_purge(struct work_struct *work)
1688 struct delayed_work *delayed_work =
1689 container_of(work, struct delayed_work, work);
1690 struct bat_priv *bat_priv =
1691 container_of(delayed_work, struct bat_priv, tt_work);
1693 tt_local_purge(bat_priv);
1694 tt_global_roam_purge(bat_priv);
1695 tt_req_purge(bat_priv);
1696 tt_roam_purge(bat_priv);
1698 tt_start_timer(bat_priv);
1701 void tt_free(struct bat_priv *bat_priv)
1703 cancel_delayed_work_sync(&bat_priv->tt_work);
1705 tt_local_table_free(bat_priv);
1706 tt_global_table_free(bat_priv);
1707 tt_req_list_free(bat_priv);
1708 tt_changes_list_free(bat_priv);
1709 tt_roam_list_free(bat_priv);
1711 kfree(bat_priv->tt_buff);
1714 /* This function will enable or disable the specified flags for all the entries
1715 * in the given hash table and returns the number of modified entries */
1716 static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags,
1717 bool enable)
1719 uint32_t i;
1720 uint16_t changed_num = 0;
1721 struct hlist_head *head;
1722 struct hlist_node *node;
1723 struct tt_common_entry *tt_common_entry;
1725 if (!hash)
1726 goto out;
1728 for (i = 0; i < hash->size; i++) {
1729 head = &hash->table[i];
1731 rcu_read_lock();
1732 hlist_for_each_entry_rcu(tt_common_entry, node,
1733 head, hash_entry) {
1734 if (enable) {
1735 if ((tt_common_entry->flags & flags) == flags)
1736 continue;
1737 tt_common_entry->flags |= flags;
1738 } else {
1739 if (!(tt_common_entry->flags & flags))
1740 continue;
1741 tt_common_entry->flags &= ~flags;
1743 changed_num++;
1745 rcu_read_unlock();
1747 out:
1748 return changed_num;
1751 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
1752 static void tt_local_purge_pending_clients(struct bat_priv *bat_priv)
1754 struct hashtable_t *hash = bat_priv->tt_local_hash;
1755 struct tt_common_entry *tt_common_entry;
1756 struct tt_local_entry *tt_local_entry;
1757 struct hlist_node *node, *node_tmp;
1758 struct hlist_head *head;
1759 spinlock_t *list_lock; /* protects write access to the hash lists */
1760 uint32_t i;
1762 if (!hash)
1763 return;
1765 for (i = 0; i < hash->size; i++) {
1766 head = &hash->table[i];
1767 list_lock = &hash->list_locks[i];
1769 spin_lock_bh(list_lock);
1770 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
1771 head, hash_entry) {
1772 if (!(tt_common_entry->flags & TT_CLIENT_PENDING))
1773 continue;
1775 bat_dbg(DBG_TT, bat_priv, "Deleting local tt entry "
1776 "(%pM): pending\n", tt_common_entry->addr);
1778 atomic_dec(&bat_priv->num_local_tt);
1779 hlist_del_rcu(node);
1780 tt_local_entry = container_of(tt_common_entry,
1781 struct tt_local_entry,
1782 common);
1783 tt_local_entry_free_ref(tt_local_entry);
1785 spin_unlock_bh(list_lock);
1790 void tt_commit_changes(struct bat_priv *bat_priv)
1792 uint16_t changed_num = tt_set_flags(bat_priv->tt_local_hash,
1793 TT_CLIENT_NEW, false);
1794 /* all the reset entries have now to be effectively counted as local
1795 * entries */
1796 atomic_add(changed_num, &bat_priv->num_local_tt);
1797 tt_local_purge_pending_clients(bat_priv);
1799 /* Increment the TTVN only once per OGM interval */
1800 atomic_inc(&bat_priv->ttvn);
1801 bat_priv->tt_poss_change = false;
1804 bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst)
1806 struct tt_local_entry *tt_local_entry = NULL;
1807 struct tt_global_entry *tt_global_entry = NULL;
1808 bool ret = true;
1810 if (!atomic_read(&bat_priv->ap_isolation))
1811 return false;
1813 tt_local_entry = tt_local_hash_find(bat_priv, dst);
1814 if (!tt_local_entry)
1815 goto out;
1817 tt_global_entry = tt_global_hash_find(bat_priv, src);
1818 if (!tt_global_entry)
1819 goto out;
1821 if (_is_ap_isolated(tt_local_entry, tt_global_entry))
1822 goto out;
1824 ret = false;
1826 out:
1827 if (tt_global_entry)
1828 tt_global_entry_free_ref(tt_global_entry);
1829 if (tt_local_entry)
1830 tt_local_entry_free_ref(tt_local_entry);
1831 return ret;
1834 void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node,
1835 const unsigned char *tt_buff, uint8_t tt_num_changes,
1836 uint8_t ttvn, uint16_t tt_crc)
1838 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
1839 bool full_table = true;
1841 /* the ttvn increased by one -> we can apply the attached changes */
1842 if (ttvn - orig_ttvn == 1) {
1843 /* the OGM could not contain the changes due to their size or
1844 * because they have already been sent TT_OGM_APPEND_MAX times.
1845 * In this case send a tt request */
1846 if (!tt_num_changes) {
1847 full_table = false;
1848 goto request_table;
1851 tt_update_changes(bat_priv, orig_node, tt_num_changes, ttvn,
1852 (struct tt_change *)tt_buff);
1854 /* Even if we received the precomputed crc with the OGM, we
1855 * prefer to recompute it to spot any possible inconsistency
1856 * in the global table */
1857 orig_node->tt_crc = tt_global_crc(bat_priv, orig_node);
1859 /* The ttvn alone is not enough to guarantee consistency
1860 * because a single value could represent different states
1861 * (due to the wrap around). Thus a node has to check whether
1862 * the resulting table (after applying the changes) is still
1863 * consistent or not. E.g. a node could disconnect while its
1864 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
1865 * checking the CRC value is mandatory to detect the
1866 * inconsistency */
1867 if (orig_node->tt_crc != tt_crc)
1868 goto request_table;
1870 /* Roaming phase is over: tables are in sync again. I can
1871 * unset the flag */
1872 orig_node->tt_poss_change = false;
1873 } else {
1874 /* if we missed more than one change or our tables are not
1875 * in sync anymore -> request fresh tt data */
1876 if (ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) {
1877 request_table:
1878 bat_dbg(DBG_TT, bat_priv, "TT inconsistency for %pM. "
1879 "Need to retrieve the correct information "
1880 "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
1881 "%u num_changes: %u)\n", orig_node->orig, ttvn,
1882 orig_ttvn, tt_crc, orig_node->tt_crc,
1883 tt_num_changes);
1884 send_tt_request(bat_priv, orig_node, ttvn, tt_crc,
1885 full_table);
1886 return;