2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
28 #include "originator.h"
31 #include <linux/crc16.h>
33 static void _tt_global_del(struct bat_priv
*bat_priv
,
34 struct tt_global_entry
*tt_global_entry
,
36 static void tt_purge(struct work_struct
*work
);
38 /* returns 1 if they are the same mac addr */
39 static int compare_tt(const struct hlist_node
*node
, const void *data2
)
41 const void *data1
= container_of(node
, struct tt_common_entry
,
44 return (memcmp(data1
, data2
, ETH_ALEN
) == 0 ? 1 : 0);
47 static void tt_start_timer(struct bat_priv
*bat_priv
)
49 INIT_DELAYED_WORK(&bat_priv
->tt_work
, tt_purge
);
50 queue_delayed_work(bat_event_workqueue
, &bat_priv
->tt_work
,
51 msecs_to_jiffies(5000));
54 static struct tt_common_entry
*tt_hash_find(struct hashtable_t
*hash
,
57 struct hlist_head
*head
;
58 struct hlist_node
*node
;
59 struct tt_common_entry
*tt_common_entry
, *tt_common_entry_tmp
= NULL
;
65 index
= choose_orig(data
, hash
->size
);
66 head
= &hash
->table
[index
];
69 hlist_for_each_entry_rcu(tt_common_entry
, node
, head
, hash_entry
) {
70 if (!compare_eth(tt_common_entry
, data
))
73 if (!atomic_inc_not_zero(&tt_common_entry
->refcount
))
76 tt_common_entry_tmp
= tt_common_entry
;
81 return tt_common_entry_tmp
;
84 static struct tt_local_entry
*tt_local_hash_find(struct bat_priv
*bat_priv
,
87 struct tt_common_entry
*tt_common_entry
;
88 struct tt_local_entry
*tt_local_entry
= NULL
;
90 tt_common_entry
= tt_hash_find(bat_priv
->tt_local_hash
, data
);
92 tt_local_entry
= container_of(tt_common_entry
,
93 struct tt_local_entry
, common
);
94 return tt_local_entry
;
97 static struct tt_global_entry
*tt_global_hash_find(struct bat_priv
*bat_priv
,
100 struct tt_common_entry
*tt_common_entry
;
101 struct tt_global_entry
*tt_global_entry
= NULL
;
103 tt_common_entry
= tt_hash_find(bat_priv
->tt_global_hash
, data
);
105 tt_global_entry
= container_of(tt_common_entry
,
106 struct tt_global_entry
, common
);
107 return tt_global_entry
;
111 static bool is_out_of_time(unsigned long starting_time
, unsigned long timeout
)
113 unsigned long deadline
;
114 deadline
= starting_time
+ msecs_to_jiffies(timeout
);
116 return time_after(jiffies
, deadline
);
119 static void tt_local_entry_free_ref(struct tt_local_entry
*tt_local_entry
)
121 if (atomic_dec_and_test(&tt_local_entry
->common
.refcount
))
122 kfree_rcu(tt_local_entry
, common
.rcu
);
125 static void tt_global_entry_free_rcu(struct rcu_head
*rcu
)
127 struct tt_common_entry
*tt_common_entry
;
128 struct tt_global_entry
*tt_global_entry
;
130 tt_common_entry
= container_of(rcu
, struct tt_common_entry
, rcu
);
131 tt_global_entry
= container_of(tt_common_entry
, struct tt_global_entry
,
134 if (tt_global_entry
->orig_node
)
135 orig_node_free_ref(tt_global_entry
->orig_node
);
137 kfree(tt_global_entry
);
140 static void tt_global_entry_free_ref(struct tt_global_entry
*tt_global_entry
)
142 if (atomic_dec_and_test(&tt_global_entry
->common
.refcount
))
143 call_rcu(&tt_global_entry
->common
.rcu
,
144 tt_global_entry_free_rcu
);
147 static void tt_local_event(struct bat_priv
*bat_priv
, const uint8_t *addr
,
150 struct tt_change_node
*tt_change_node
;
152 tt_change_node
= kmalloc(sizeof(*tt_change_node
), GFP_ATOMIC
);
157 tt_change_node
->change
.flags
= flags
;
158 memcpy(tt_change_node
->change
.addr
, addr
, ETH_ALEN
);
160 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
161 /* track the change in the OGMinterval list */
162 list_add_tail(&tt_change_node
->list
, &bat_priv
->tt_changes_list
);
163 atomic_inc(&bat_priv
->tt_local_changes
);
164 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
166 atomic_set(&bat_priv
->tt_ogm_append_cnt
, 0);
169 int tt_len(int changes_num
)
171 return changes_num
* sizeof(struct tt_change
);
174 static int tt_local_init(struct bat_priv
*bat_priv
)
176 if (bat_priv
->tt_local_hash
)
179 bat_priv
->tt_local_hash
= hash_new(1024);
181 if (!bat_priv
->tt_local_hash
)
187 void tt_local_add(struct net_device
*soft_iface
, const uint8_t *addr
,
190 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
191 struct tt_local_entry
*tt_local_entry
= NULL
;
192 struct tt_global_entry
*tt_global_entry
= NULL
;
195 tt_local_entry
= tt_local_hash_find(bat_priv
, addr
);
197 if (tt_local_entry
) {
198 tt_local_entry
->last_seen
= jiffies
;
202 tt_local_entry
= kmalloc(sizeof(*tt_local_entry
), GFP_ATOMIC
);
206 bat_dbg(DBG_TT
, bat_priv
,
207 "Creating new local tt entry: %pM (ttvn: %d)\n", addr
,
208 (uint8_t)atomic_read(&bat_priv
->ttvn
));
210 memcpy(tt_local_entry
->common
.addr
, addr
, ETH_ALEN
);
211 tt_local_entry
->common
.flags
= NO_FLAGS
;
212 if (is_wifi_iface(ifindex
))
213 tt_local_entry
->common
.flags
|= TT_CLIENT_WIFI
;
214 atomic_set(&tt_local_entry
->common
.refcount
, 2);
215 tt_local_entry
->last_seen
= jiffies
;
217 /* the batman interface mac address should never be purged */
218 if (compare_eth(addr
, soft_iface
->dev_addr
))
219 tt_local_entry
->common
.flags
|= TT_CLIENT_NOPURGE
;
221 hash_added
= hash_add(bat_priv
->tt_local_hash
, compare_tt
, choose_orig
,
222 &tt_local_entry
->common
,
223 &tt_local_entry
->common
.hash_entry
);
225 if (unlikely(hash_added
!= 0)) {
226 /* remove the reference for the hash */
227 tt_local_entry_free_ref(tt_local_entry
);
231 tt_local_event(bat_priv
, addr
, tt_local_entry
->common
.flags
);
233 /* The local entry has to be marked as NEW to avoid to send it in
234 * a full table response going out before the next ttvn increment
235 * (consistency check) */
236 tt_local_entry
->common
.flags
|= TT_CLIENT_NEW
;
238 /* remove address from global hash if present */
239 tt_global_entry
= tt_global_hash_find(bat_priv
, addr
);
241 /* Check whether it is a roaming! */
242 if (tt_global_entry
) {
243 /* This node is probably going to update its tt table */
244 tt_global_entry
->orig_node
->tt_poss_change
= true;
245 /* The global entry has to be marked as PENDING and has to be
246 * kept for consistency purpose */
247 tt_global_entry
->common
.flags
|= TT_CLIENT_PENDING
;
248 send_roam_adv(bat_priv
, tt_global_entry
->common
.addr
,
249 tt_global_entry
->orig_node
);
253 tt_local_entry_free_ref(tt_local_entry
);
255 tt_global_entry_free_ref(tt_global_entry
);
258 int tt_changes_fill_buffer(struct bat_priv
*bat_priv
,
259 unsigned char *buff
, int buff_len
)
261 int count
= 0, tot_changes
= 0;
262 struct tt_change_node
*entry
, *safe
;
265 tot_changes
= buff_len
/ tt_len(1);
267 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
268 atomic_set(&bat_priv
->tt_local_changes
, 0);
270 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
272 if (count
< tot_changes
) {
273 memcpy(buff
+ tt_len(count
),
274 &entry
->change
, sizeof(struct tt_change
));
277 list_del(&entry
->list
);
280 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
282 /* Keep the buffer for possible tt_request */
283 spin_lock_bh(&bat_priv
->tt_buff_lock
);
284 kfree(bat_priv
->tt_buff
);
285 bat_priv
->tt_buff_len
= 0;
286 bat_priv
->tt_buff
= NULL
;
287 /* We check whether this new OGM has no changes due to size
291 * if kmalloc() fails we will reply with the full table
292 * instead of providing the diff
294 bat_priv
->tt_buff
= kmalloc(buff_len
, GFP_ATOMIC
);
295 if (bat_priv
->tt_buff
) {
296 memcpy(bat_priv
->tt_buff
, buff
, buff_len
);
297 bat_priv
->tt_buff_len
= buff_len
;
300 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
305 int tt_local_seq_print_text(struct seq_file
*seq
, void *offset
)
307 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
308 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
309 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
310 struct tt_common_entry
*tt_common_entry
;
311 struct hard_iface
*primary_if
;
312 struct hlist_node
*node
;
313 struct hlist_head
*head
;
317 primary_if
= primary_if_get_selected(bat_priv
);
319 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - "
320 "please specify interfaces to enable it\n",
325 if (primary_if
->if_status
!= IF_ACTIVE
) {
326 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - "
327 "primary interface not active\n",
332 seq_printf(seq
, "Locally retrieved addresses (from %s) "
333 "announced via TT (TTVN: %u):\n",
334 net_dev
->name
, (uint8_t)atomic_read(&bat_priv
->ttvn
));
336 for (i
= 0; i
< hash
->size
; i
++) {
337 head
= &hash
->table
[i
];
340 hlist_for_each_entry_rcu(tt_common_entry
, node
,
342 seq_printf(seq
, " * %pM [%c%c%c%c%c]\n",
343 tt_common_entry
->addr
,
344 (tt_common_entry
->flags
&
345 TT_CLIENT_ROAM
? 'R' : '.'),
346 (tt_common_entry
->flags
&
347 TT_CLIENT_NOPURGE
? 'P' : '.'),
348 (tt_common_entry
->flags
&
349 TT_CLIENT_NEW
? 'N' : '.'),
350 (tt_common_entry
->flags
&
351 TT_CLIENT_PENDING
? 'X' : '.'),
352 (tt_common_entry
->flags
&
353 TT_CLIENT_WIFI
? 'W' : '.'));
359 hardif_free_ref(primary_if
);
363 static void tt_local_set_pending(struct bat_priv
*bat_priv
,
364 struct tt_local_entry
*tt_local_entry
,
367 tt_local_event(bat_priv
, tt_local_entry
->common
.addr
,
368 tt_local_entry
->common
.flags
| flags
);
370 /* The local client has to be marked as "pending to be removed" but has
371 * to be kept in the table in order to send it in a full table
372 * response issued before the net ttvn increment (consistency check) */
373 tt_local_entry
->common
.flags
|= TT_CLIENT_PENDING
;
376 void tt_local_remove(struct bat_priv
*bat_priv
, const uint8_t *addr
,
377 const char *message
, bool roaming
)
379 struct tt_local_entry
*tt_local_entry
= NULL
;
381 tt_local_entry
= tt_local_hash_find(bat_priv
, addr
);
385 tt_local_set_pending(bat_priv
, tt_local_entry
, TT_CLIENT_DEL
|
386 (roaming
? TT_CLIENT_ROAM
: NO_FLAGS
));
388 bat_dbg(DBG_TT
, bat_priv
, "Local tt entry (%pM) pending to be removed: "
389 "%s\n", tt_local_entry
->common
.addr
, message
);
392 tt_local_entry_free_ref(tt_local_entry
);
395 static void tt_local_purge(struct bat_priv
*bat_priv
)
397 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
398 struct tt_local_entry
*tt_local_entry
;
399 struct tt_common_entry
*tt_common_entry
;
400 struct hlist_node
*node
, *node_tmp
;
401 struct hlist_head
*head
;
402 spinlock_t
*list_lock
; /* protects write access to the hash lists */
405 for (i
= 0; i
< hash
->size
; i
++) {
406 head
= &hash
->table
[i
];
407 list_lock
= &hash
->list_locks
[i
];
409 spin_lock_bh(list_lock
);
410 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
412 tt_local_entry
= container_of(tt_common_entry
,
413 struct tt_local_entry
,
415 if (tt_local_entry
->common
.flags
& TT_CLIENT_NOPURGE
)
418 /* entry already marked for deletion */
419 if (tt_local_entry
->common
.flags
& TT_CLIENT_PENDING
)
422 if (!is_out_of_time(tt_local_entry
->last_seen
,
423 TT_LOCAL_TIMEOUT
* 1000))
426 tt_local_set_pending(bat_priv
, tt_local_entry
,
428 bat_dbg(DBG_TT
, bat_priv
, "Local tt entry (%pM) "
429 "pending to be removed: timed out\n",
430 tt_local_entry
->common
.addr
);
432 spin_unlock_bh(list_lock
);
437 static void tt_local_table_free(struct bat_priv
*bat_priv
)
439 struct hashtable_t
*hash
;
440 spinlock_t
*list_lock
; /* protects write access to the hash lists */
441 struct tt_common_entry
*tt_common_entry
;
442 struct tt_local_entry
*tt_local_entry
;
443 struct hlist_node
*node
, *node_tmp
;
444 struct hlist_head
*head
;
447 if (!bat_priv
->tt_local_hash
)
450 hash
= bat_priv
->tt_local_hash
;
452 for (i
= 0; i
< hash
->size
; i
++) {
453 head
= &hash
->table
[i
];
454 list_lock
= &hash
->list_locks
[i
];
456 spin_lock_bh(list_lock
);
457 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
460 tt_local_entry
= container_of(tt_common_entry
,
461 struct tt_local_entry
,
463 tt_local_entry_free_ref(tt_local_entry
);
465 spin_unlock_bh(list_lock
);
470 bat_priv
->tt_local_hash
= NULL
;
473 static int tt_global_init(struct bat_priv
*bat_priv
)
475 if (bat_priv
->tt_global_hash
)
478 bat_priv
->tt_global_hash
= hash_new(1024);
480 if (!bat_priv
->tt_global_hash
)
486 static void tt_changes_list_free(struct bat_priv
*bat_priv
)
488 struct tt_change_node
*entry
, *safe
;
490 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
492 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
494 list_del(&entry
->list
);
498 atomic_set(&bat_priv
->tt_local_changes
, 0);
499 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
502 /* caller must hold orig_node refcount */
503 int tt_global_add(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
504 const unsigned char *tt_addr
, uint8_t ttvn
, bool roaming
,
507 struct tt_global_entry
*tt_global_entry
;
508 struct orig_node
*orig_node_tmp
;
512 tt_global_entry
= tt_global_hash_find(bat_priv
, tt_addr
);
514 if (!tt_global_entry
) {
516 kmalloc(sizeof(*tt_global_entry
),
518 if (!tt_global_entry
)
521 memcpy(tt_global_entry
->common
.addr
, tt_addr
, ETH_ALEN
);
522 tt_global_entry
->common
.flags
= NO_FLAGS
;
523 atomic_set(&tt_global_entry
->common
.refcount
, 2);
524 /* Assign the new orig_node */
525 atomic_inc(&orig_node
->refcount
);
526 tt_global_entry
->orig_node
= orig_node
;
527 tt_global_entry
->ttvn
= ttvn
;
528 tt_global_entry
->roam_at
= 0;
530 hash_added
= hash_add(bat_priv
->tt_global_hash
, compare_tt
,
531 choose_orig
, &tt_global_entry
->common
,
532 &tt_global_entry
->common
.hash_entry
);
534 if (unlikely(hash_added
!= 0)) {
535 /* remove the reference for the hash */
536 tt_global_entry_free_ref(tt_global_entry
);
539 atomic_inc(&orig_node
->tt_size
);
541 if (tt_global_entry
->orig_node
!= orig_node
) {
542 atomic_dec(&tt_global_entry
->orig_node
->tt_size
);
543 orig_node_tmp
= tt_global_entry
->orig_node
;
544 atomic_inc(&orig_node
->refcount
);
545 tt_global_entry
->orig_node
= orig_node
;
546 orig_node_free_ref(orig_node_tmp
);
547 atomic_inc(&orig_node
->tt_size
);
549 tt_global_entry
->common
.flags
= NO_FLAGS
;
550 tt_global_entry
->ttvn
= ttvn
;
551 tt_global_entry
->roam_at
= 0;
555 tt_global_entry
->common
.flags
|= TT_CLIENT_WIFI
;
557 bat_dbg(DBG_TT
, bat_priv
,
558 "Creating new global tt entry: %pM (via %pM)\n",
559 tt_global_entry
->common
.addr
, orig_node
->orig
);
562 /* remove address from local hash if present */
563 tt_local_remove(bat_priv
, tt_global_entry
->common
.addr
,
564 "global tt received", roaming
);
568 tt_global_entry_free_ref(tt_global_entry
);
572 int tt_global_seq_print_text(struct seq_file
*seq
, void *offset
)
574 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
575 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
576 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
577 struct tt_common_entry
*tt_common_entry
;
578 struct tt_global_entry
*tt_global_entry
;
579 struct hard_iface
*primary_if
;
580 struct hlist_node
*node
;
581 struct hlist_head
*head
;
585 primary_if
= primary_if_get_selected(bat_priv
);
587 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - please "
588 "specify interfaces to enable it\n",
593 if (primary_if
->if_status
!= IF_ACTIVE
) {
594 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - "
595 "primary interface not active\n",
601 "Globally announced TT entries received via the mesh %s\n",
603 seq_printf(seq
, " %-13s %s %-15s %s %s\n",
604 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
606 for (i
= 0; i
< hash
->size
; i
++) {
607 head
= &hash
->table
[i
];
610 hlist_for_each_entry_rcu(tt_common_entry
, node
,
612 tt_global_entry
= container_of(tt_common_entry
,
613 struct tt_global_entry
,
615 seq_printf(seq
, " * %pM (%3u) via %pM (%3u) "
617 tt_global_entry
->common
.addr
,
618 tt_global_entry
->ttvn
,
619 tt_global_entry
->orig_node
->orig
,
620 (uint8_t) atomic_read(
621 &tt_global_entry
->orig_node
->
623 (tt_global_entry
->common
.flags
&
624 TT_CLIENT_ROAM
? 'R' : '.'),
625 (tt_global_entry
->common
.flags
&
626 TT_CLIENT_PENDING
? 'X' : '.'),
627 (tt_global_entry
->common
.flags
&
628 TT_CLIENT_WIFI
? 'W' : '.'));
634 hardif_free_ref(primary_if
);
638 static void _tt_global_del(struct bat_priv
*bat_priv
,
639 struct tt_global_entry
*tt_global_entry
,
642 if (!tt_global_entry
)
645 bat_dbg(DBG_TT
, bat_priv
,
646 "Deleting global tt entry %pM (via %pM): %s\n",
647 tt_global_entry
->common
.addr
, tt_global_entry
->orig_node
->orig
,
650 atomic_dec(&tt_global_entry
->orig_node
->tt_size
);
652 hash_remove(bat_priv
->tt_global_hash
, compare_tt
, choose_orig
,
653 tt_global_entry
->common
.addr
);
656 tt_global_entry_free_ref(tt_global_entry
);
659 void tt_global_del(struct bat_priv
*bat_priv
,
660 struct orig_node
*orig_node
, const unsigned char *addr
,
661 const char *message
, bool roaming
)
663 struct tt_global_entry
*tt_global_entry
= NULL
;
665 tt_global_entry
= tt_global_hash_find(bat_priv
, addr
);
666 if (!tt_global_entry
)
669 if (tt_global_entry
->orig_node
== orig_node
) {
671 tt_global_entry
->common
.flags
|= TT_CLIENT_ROAM
;
672 tt_global_entry
->roam_at
= jiffies
;
675 _tt_global_del(bat_priv
, tt_global_entry
, message
);
679 tt_global_entry_free_ref(tt_global_entry
);
682 void tt_global_del_orig(struct bat_priv
*bat_priv
,
683 struct orig_node
*orig_node
, const char *message
)
685 struct tt_global_entry
*tt_global_entry
;
686 struct tt_common_entry
*tt_common_entry
;
688 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
689 struct hlist_node
*node
, *safe
;
690 struct hlist_head
*head
;
691 spinlock_t
*list_lock
; /* protects write access to the hash lists */
696 for (i
= 0; i
< hash
->size
; i
++) {
697 head
= &hash
->table
[i
];
698 list_lock
= &hash
->list_locks
[i
];
700 spin_lock_bh(list_lock
);
701 hlist_for_each_entry_safe(tt_common_entry
, node
, safe
,
703 tt_global_entry
= container_of(tt_common_entry
,
704 struct tt_global_entry
,
706 if (tt_global_entry
->orig_node
== orig_node
) {
707 bat_dbg(DBG_TT
, bat_priv
,
708 "Deleting global tt entry %pM "
710 tt_global_entry
->common
.addr
,
711 tt_global_entry
->orig_node
->orig
,
714 tt_global_entry_free_ref(tt_global_entry
);
717 spin_unlock_bh(list_lock
);
719 atomic_set(&orig_node
->tt_size
, 0);
722 static void tt_global_roam_purge(struct bat_priv
*bat_priv
)
724 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
725 struct tt_common_entry
*tt_common_entry
;
726 struct tt_global_entry
*tt_global_entry
;
727 struct hlist_node
*node
, *node_tmp
;
728 struct hlist_head
*head
;
729 spinlock_t
*list_lock
; /* protects write access to the hash lists */
732 for (i
= 0; i
< hash
->size
; i
++) {
733 head
= &hash
->table
[i
];
734 list_lock
= &hash
->list_locks
[i
];
736 spin_lock_bh(list_lock
);
737 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
739 tt_global_entry
= container_of(tt_common_entry
,
740 struct tt_global_entry
,
742 if (!(tt_global_entry
->common
.flags
& TT_CLIENT_ROAM
))
744 if (!is_out_of_time(tt_global_entry
->roam_at
,
745 TT_CLIENT_ROAM_TIMEOUT
* 1000))
748 bat_dbg(DBG_TT
, bat_priv
, "Deleting global "
749 "tt entry (%pM): Roaming timeout\n",
750 tt_global_entry
->common
.addr
);
751 atomic_dec(&tt_global_entry
->orig_node
->tt_size
);
753 tt_global_entry_free_ref(tt_global_entry
);
755 spin_unlock_bh(list_lock
);
760 static void tt_global_table_free(struct bat_priv
*bat_priv
)
762 struct hashtable_t
*hash
;
763 spinlock_t
*list_lock
; /* protects write access to the hash lists */
764 struct tt_common_entry
*tt_common_entry
;
765 struct tt_global_entry
*tt_global_entry
;
766 struct hlist_node
*node
, *node_tmp
;
767 struct hlist_head
*head
;
770 if (!bat_priv
->tt_global_hash
)
773 hash
= bat_priv
->tt_global_hash
;
775 for (i
= 0; i
< hash
->size
; i
++) {
776 head
= &hash
->table
[i
];
777 list_lock
= &hash
->list_locks
[i
];
779 spin_lock_bh(list_lock
);
780 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
783 tt_global_entry
= container_of(tt_common_entry
,
784 struct tt_global_entry
,
786 tt_global_entry_free_ref(tt_global_entry
);
788 spin_unlock_bh(list_lock
);
793 bat_priv
->tt_global_hash
= NULL
;
796 static bool _is_ap_isolated(struct tt_local_entry
*tt_local_entry
,
797 struct tt_global_entry
*tt_global_entry
)
801 if (tt_local_entry
->common
.flags
& TT_CLIENT_WIFI
&&
802 tt_global_entry
->common
.flags
& TT_CLIENT_WIFI
)
808 struct orig_node
*transtable_search(struct bat_priv
*bat_priv
,
809 const uint8_t *src
, const uint8_t *addr
)
811 struct tt_local_entry
*tt_local_entry
= NULL
;
812 struct tt_global_entry
*tt_global_entry
= NULL
;
813 struct orig_node
*orig_node
= NULL
;
815 if (src
&& atomic_read(&bat_priv
->ap_isolation
)) {
816 tt_local_entry
= tt_local_hash_find(bat_priv
, src
);
821 tt_global_entry
= tt_global_hash_find(bat_priv
, addr
);
822 if (!tt_global_entry
)
825 /* check whether the clients should not communicate due to AP
827 if (tt_local_entry
&& _is_ap_isolated(tt_local_entry
, tt_global_entry
))
830 if (!atomic_inc_not_zero(&tt_global_entry
->orig_node
->refcount
))
833 /* A global client marked as PENDING has already moved from that
835 if (tt_global_entry
->common
.flags
& TT_CLIENT_PENDING
)
838 orig_node
= tt_global_entry
->orig_node
;
842 tt_global_entry_free_ref(tt_global_entry
);
844 tt_local_entry_free_ref(tt_local_entry
);
849 /* Calculates the checksum of the local table of a given orig_node */
850 uint16_t tt_global_crc(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
)
852 uint16_t total
= 0, total_one
;
853 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
854 struct tt_common_entry
*tt_common_entry
;
855 struct tt_global_entry
*tt_global_entry
;
856 struct hlist_node
*node
;
857 struct hlist_head
*head
;
861 for (i
= 0; i
< hash
->size
; i
++) {
862 head
= &hash
->table
[i
];
865 hlist_for_each_entry_rcu(tt_common_entry
, node
,
867 tt_global_entry
= container_of(tt_common_entry
,
868 struct tt_global_entry
,
870 if (compare_eth(tt_global_entry
->orig_node
,
872 /* Roaming clients are in the global table for
873 * consistency only. They don't have to be
874 * taken into account while computing the
876 if (tt_common_entry
->flags
& TT_CLIENT_ROAM
)
879 for (j
= 0; j
< ETH_ALEN
; j
++)
880 total_one
= crc16_byte(total_one
,
881 tt_common_entry
->addr
[j
]);
891 /* Calculates the checksum of the local table */
892 uint16_t tt_local_crc(struct bat_priv
*bat_priv
)
894 uint16_t total
= 0, total_one
;
895 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
896 struct tt_common_entry
*tt_common_entry
;
897 struct hlist_node
*node
;
898 struct hlist_head
*head
;
902 for (i
= 0; i
< hash
->size
; i
++) {
903 head
= &hash
->table
[i
];
906 hlist_for_each_entry_rcu(tt_common_entry
, node
,
908 /* not yet committed clients have not to be taken into
909 * account while computing the CRC */
910 if (tt_common_entry
->flags
& TT_CLIENT_NEW
)
913 for (j
= 0; j
< ETH_ALEN
; j
++)
914 total_one
= crc16_byte(total_one
,
915 tt_common_entry
->addr
[j
]);
924 static void tt_req_list_free(struct bat_priv
*bat_priv
)
926 struct tt_req_node
*node
, *safe
;
928 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
930 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
931 list_del(&node
->list
);
935 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
938 void tt_save_orig_buffer(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
939 const unsigned char *tt_buff
, uint8_t tt_num_changes
)
941 uint16_t tt_buff_len
= tt_len(tt_num_changes
);
943 /* Replace the old buffer only if I received something in the
944 * last OGM (the OGM could carry no changes) */
945 spin_lock_bh(&orig_node
->tt_buff_lock
);
946 if (tt_buff_len
> 0) {
947 kfree(orig_node
->tt_buff
);
948 orig_node
->tt_buff_len
= 0;
949 orig_node
->tt_buff
= kmalloc(tt_buff_len
, GFP_ATOMIC
);
950 if (orig_node
->tt_buff
) {
951 memcpy(orig_node
->tt_buff
, tt_buff
, tt_buff_len
);
952 orig_node
->tt_buff_len
= tt_buff_len
;
955 spin_unlock_bh(&orig_node
->tt_buff_lock
);
958 static void tt_req_purge(struct bat_priv
*bat_priv
)
960 struct tt_req_node
*node
, *safe
;
962 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
963 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
964 if (is_out_of_time(node
->issued_at
,
965 TT_REQUEST_TIMEOUT
* 1000)) {
966 list_del(&node
->list
);
970 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
973 /* returns the pointer to the new tt_req_node struct if no request
974 * has already been issued for this orig_node, NULL otherwise */
975 static struct tt_req_node
*new_tt_req_node(struct bat_priv
*bat_priv
,
976 struct orig_node
*orig_node
)
978 struct tt_req_node
*tt_req_node_tmp
, *tt_req_node
= NULL
;
980 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
981 list_for_each_entry(tt_req_node_tmp
, &bat_priv
->tt_req_list
, list
) {
982 if (compare_eth(tt_req_node_tmp
, orig_node
) &&
983 !is_out_of_time(tt_req_node_tmp
->issued_at
,
984 TT_REQUEST_TIMEOUT
* 1000))
988 tt_req_node
= kmalloc(sizeof(*tt_req_node
), GFP_ATOMIC
);
992 memcpy(tt_req_node
->addr
, orig_node
->orig
, ETH_ALEN
);
993 tt_req_node
->issued_at
= jiffies
;
995 list_add(&tt_req_node
->list
, &bat_priv
->tt_req_list
);
997 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1001 /* data_ptr is useless here, but has to be kept to respect the prototype */
1002 static int tt_local_valid_entry(const void *entry_ptr
, const void *data_ptr
)
1004 const struct tt_common_entry
*tt_common_entry
= entry_ptr
;
1006 if (tt_common_entry
->flags
& TT_CLIENT_NEW
)
1011 static int tt_global_valid_entry(const void *entry_ptr
, const void *data_ptr
)
1013 const struct tt_common_entry
*tt_common_entry
= entry_ptr
;
1014 const struct tt_global_entry
*tt_global_entry
;
1015 const struct orig_node
*orig_node
= data_ptr
;
1017 if (tt_common_entry
->flags
& TT_CLIENT_ROAM
)
1020 tt_global_entry
= container_of(tt_common_entry
, struct tt_global_entry
,
1023 return (tt_global_entry
->orig_node
== orig_node
);
1026 static struct sk_buff
*tt_response_fill_table(uint16_t tt_len
, uint8_t ttvn
,
1027 struct hashtable_t
*hash
,
1028 struct hard_iface
*primary_if
,
1029 int (*valid_cb
)(const void *,
1033 struct tt_common_entry
*tt_common_entry
;
1034 struct tt_query_packet
*tt_response
;
1035 struct tt_change
*tt_change
;
1036 struct hlist_node
*node
;
1037 struct hlist_head
*head
;
1038 struct sk_buff
*skb
= NULL
;
1039 uint16_t tt_tot
, tt_count
;
1040 ssize_t tt_query_size
= sizeof(struct tt_query_packet
);
1043 if (tt_query_size
+ tt_len
> primary_if
->soft_iface
->mtu
) {
1044 tt_len
= primary_if
->soft_iface
->mtu
- tt_query_size
;
1045 tt_len
-= tt_len
% sizeof(struct tt_change
);
1047 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1049 skb
= dev_alloc_skb(tt_query_size
+ tt_len
+ ETH_HLEN
);
1053 skb_reserve(skb
, ETH_HLEN
);
1054 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1055 tt_query_size
+ tt_len
);
1056 tt_response
->ttvn
= ttvn
;
1058 tt_change
= (struct tt_change
*)(skb
->data
+ tt_query_size
);
1062 for (i
= 0; i
< hash
->size
; i
++) {
1063 head
= &hash
->table
[i
];
1065 hlist_for_each_entry_rcu(tt_common_entry
, node
,
1067 if (tt_count
== tt_tot
)
1070 if ((valid_cb
) && (!valid_cb(tt_common_entry
, cb_data
)))
1073 memcpy(tt_change
->addr
, tt_common_entry
->addr
,
1075 tt_change
->flags
= NO_FLAGS
;
1083 /* store in the message the number of entries we have successfully
1085 tt_response
->tt_data
= htons(tt_count
);
1091 static int send_tt_request(struct bat_priv
*bat_priv
,
1092 struct orig_node
*dst_orig_node
,
1093 uint8_t ttvn
, uint16_t tt_crc
, bool full_table
)
1095 struct sk_buff
*skb
= NULL
;
1096 struct tt_query_packet
*tt_request
;
1097 struct neigh_node
*neigh_node
= NULL
;
1098 struct hard_iface
*primary_if
;
1099 struct tt_req_node
*tt_req_node
= NULL
;
1102 primary_if
= primary_if_get_selected(bat_priv
);
1106 /* The new tt_req will be issued only if I'm not waiting for a
1107 * reply from the same orig_node yet */
1108 tt_req_node
= new_tt_req_node(bat_priv
, dst_orig_node
);
1112 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) + ETH_HLEN
);
1116 skb_reserve(skb
, ETH_HLEN
);
1118 tt_request
= (struct tt_query_packet
*)skb_put(skb
,
1119 sizeof(struct tt_query_packet
));
1121 tt_request
->packet_type
= BAT_TT_QUERY
;
1122 tt_request
->version
= COMPAT_VERSION
;
1123 memcpy(tt_request
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1124 memcpy(tt_request
->dst
, dst_orig_node
->orig
, ETH_ALEN
);
1125 tt_request
->ttl
= TTL
;
1126 tt_request
->ttvn
= ttvn
;
1127 tt_request
->tt_data
= tt_crc
;
1128 tt_request
->flags
= TT_REQUEST
;
1131 tt_request
->flags
|= TT_FULL_TABLE
;
1133 neigh_node
= orig_node_get_router(dst_orig_node
);
1137 bat_dbg(DBG_TT
, bat_priv
, "Sending TT_REQUEST to %pM via %pM "
1138 "[%c]\n", dst_orig_node
->orig
, neigh_node
->addr
,
1139 (full_table
? 'F' : '.'));
1141 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1146 neigh_node_free_ref(neigh_node
);
1148 hardif_free_ref(primary_if
);
1151 if (ret
&& tt_req_node
) {
1152 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1153 list_del(&tt_req_node
->list
);
1154 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1160 static bool send_other_tt_response(struct bat_priv
*bat_priv
,
1161 struct tt_query_packet
*tt_request
)
1163 struct orig_node
*req_dst_orig_node
= NULL
, *res_dst_orig_node
= NULL
;
1164 struct neigh_node
*neigh_node
= NULL
;
1165 struct hard_iface
*primary_if
= NULL
;
1166 uint8_t orig_ttvn
, req_ttvn
, ttvn
;
1168 unsigned char *tt_buff
;
1170 uint16_t tt_len
, tt_tot
;
1171 struct sk_buff
*skb
= NULL
;
1172 struct tt_query_packet
*tt_response
;
1174 bat_dbg(DBG_TT
, bat_priv
,
1175 "Received TT_REQUEST from %pM for "
1176 "ttvn: %u (%pM) [%c]\n", tt_request
->src
,
1177 tt_request
->ttvn
, tt_request
->dst
,
1178 (tt_request
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1180 /* Let's get the orig node of the REAL destination */
1181 req_dst_orig_node
= orig_hash_find(bat_priv
, tt_request
->dst
);
1182 if (!req_dst_orig_node
)
1185 res_dst_orig_node
= orig_hash_find(bat_priv
, tt_request
->src
);
1186 if (!res_dst_orig_node
)
1189 neigh_node
= orig_node_get_router(res_dst_orig_node
);
1193 primary_if
= primary_if_get_selected(bat_priv
);
1197 orig_ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1198 req_ttvn
= tt_request
->ttvn
;
1200 /* I don't have the requested data */
1201 if (orig_ttvn
!= req_ttvn
||
1202 tt_request
->tt_data
!= req_dst_orig_node
->tt_crc
)
1205 /* If the full table has been explicitly requested */
1206 if (tt_request
->flags
& TT_FULL_TABLE
||
1207 !req_dst_orig_node
->tt_buff
)
1212 /* In this version, fragmentation is not implemented, then
1213 * I'll send only one packet with as much TT entries as I can */
1215 spin_lock_bh(&req_dst_orig_node
->tt_buff_lock
);
1216 tt_len
= req_dst_orig_node
->tt_buff_len
;
1217 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1219 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) +
1224 skb_reserve(skb
, ETH_HLEN
);
1225 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1226 sizeof(struct tt_query_packet
) + tt_len
);
1227 tt_response
->ttvn
= req_ttvn
;
1228 tt_response
->tt_data
= htons(tt_tot
);
1230 tt_buff
= skb
->data
+ sizeof(struct tt_query_packet
);
1231 /* Copy the last orig_node's OGM buffer */
1232 memcpy(tt_buff
, req_dst_orig_node
->tt_buff
,
1233 req_dst_orig_node
->tt_buff_len
);
1235 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1237 tt_len
= (uint16_t)atomic_read(&req_dst_orig_node
->tt_size
) *
1238 sizeof(struct tt_change
);
1239 ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1241 skb
= tt_response_fill_table(tt_len
, ttvn
,
1242 bat_priv
->tt_global_hash
,
1243 primary_if
, tt_global_valid_entry
,
1248 tt_response
= (struct tt_query_packet
*)skb
->data
;
1251 tt_response
->packet_type
= BAT_TT_QUERY
;
1252 tt_response
->version
= COMPAT_VERSION
;
1253 tt_response
->ttl
= TTL
;
1254 memcpy(tt_response
->src
, req_dst_orig_node
->orig
, ETH_ALEN
);
1255 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1256 tt_response
->flags
= TT_RESPONSE
;
1259 tt_response
->flags
|= TT_FULL_TABLE
;
1261 bat_dbg(DBG_TT
, bat_priv
,
1262 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1263 res_dst_orig_node
->orig
, neigh_node
->addr
,
1264 req_dst_orig_node
->orig
, req_ttvn
);
1266 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1271 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1274 if (res_dst_orig_node
)
1275 orig_node_free_ref(res_dst_orig_node
);
1276 if (req_dst_orig_node
)
1277 orig_node_free_ref(req_dst_orig_node
);
1279 neigh_node_free_ref(neigh_node
);
1281 hardif_free_ref(primary_if
);
1287 static bool send_my_tt_response(struct bat_priv
*bat_priv
,
1288 struct tt_query_packet
*tt_request
)
1290 struct orig_node
*orig_node
= NULL
;
1291 struct neigh_node
*neigh_node
= NULL
;
1292 struct hard_iface
*primary_if
= NULL
;
1293 uint8_t my_ttvn
, req_ttvn
, ttvn
;
1295 unsigned char *tt_buff
;
1297 uint16_t tt_len
, tt_tot
;
1298 struct sk_buff
*skb
= NULL
;
1299 struct tt_query_packet
*tt_response
;
1301 bat_dbg(DBG_TT
, bat_priv
,
1302 "Received TT_REQUEST from %pM for "
1303 "ttvn: %u (me) [%c]\n", tt_request
->src
,
1305 (tt_request
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1308 my_ttvn
= (uint8_t)atomic_read(&bat_priv
->ttvn
);
1309 req_ttvn
= tt_request
->ttvn
;
1311 orig_node
= orig_hash_find(bat_priv
, tt_request
->src
);
1315 neigh_node
= orig_node_get_router(orig_node
);
1319 primary_if
= primary_if_get_selected(bat_priv
);
1323 /* If the full table has been explicitly requested or the gap
1324 * is too big send the whole local translation table */
1325 if (tt_request
->flags
& TT_FULL_TABLE
|| my_ttvn
!= req_ttvn
||
1331 /* In this version, fragmentation is not implemented, then
1332 * I'll send only one packet with as much TT entries as I can */
1334 spin_lock_bh(&bat_priv
->tt_buff_lock
);
1335 tt_len
= bat_priv
->tt_buff_len
;
1336 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1338 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) +
1343 skb_reserve(skb
, ETH_HLEN
);
1344 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1345 sizeof(struct tt_query_packet
) + tt_len
);
1346 tt_response
->ttvn
= req_ttvn
;
1347 tt_response
->tt_data
= htons(tt_tot
);
1349 tt_buff
= skb
->data
+ sizeof(struct tt_query_packet
);
1350 memcpy(tt_buff
, bat_priv
->tt_buff
,
1351 bat_priv
->tt_buff_len
);
1352 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
1354 tt_len
= (uint16_t)atomic_read(&bat_priv
->num_local_tt
) *
1355 sizeof(struct tt_change
);
1356 ttvn
= (uint8_t)atomic_read(&bat_priv
->ttvn
);
1358 skb
= tt_response_fill_table(tt_len
, ttvn
,
1359 bat_priv
->tt_local_hash
,
1360 primary_if
, tt_local_valid_entry
,
1365 tt_response
= (struct tt_query_packet
*)skb
->data
;
1368 tt_response
->packet_type
= BAT_TT_QUERY
;
1369 tt_response
->version
= COMPAT_VERSION
;
1370 tt_response
->ttl
= TTL
;
1371 memcpy(tt_response
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1372 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1373 tt_response
->flags
= TT_RESPONSE
;
1376 tt_response
->flags
|= TT_FULL_TABLE
;
1378 bat_dbg(DBG_TT
, bat_priv
,
1379 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1380 orig_node
->orig
, neigh_node
->addr
,
1381 (tt_response
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1383 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1388 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
1391 orig_node_free_ref(orig_node
);
1393 neigh_node_free_ref(neigh_node
);
1395 hardif_free_ref(primary_if
);
1398 /* This packet was for me, so it doesn't need to be re-routed */
1402 bool send_tt_response(struct bat_priv
*bat_priv
,
1403 struct tt_query_packet
*tt_request
)
1405 if (is_my_mac(tt_request
->dst
))
1406 return send_my_tt_response(bat_priv
, tt_request
);
1408 return send_other_tt_response(bat_priv
, tt_request
);
1411 static void _tt_update_changes(struct bat_priv
*bat_priv
,
1412 struct orig_node
*orig_node
,
1413 struct tt_change
*tt_change
,
1414 uint16_t tt_num_changes
, uint8_t ttvn
)
1418 for (i
= 0; i
< tt_num_changes
; i
++) {
1419 if ((tt_change
+ i
)->flags
& TT_CLIENT_DEL
)
1420 tt_global_del(bat_priv
, orig_node
,
1421 (tt_change
+ i
)->addr
,
1422 "tt removed by changes",
1423 (tt_change
+ i
)->flags
& TT_CLIENT_ROAM
);
1425 if (!tt_global_add(bat_priv
, orig_node
,
1426 (tt_change
+ i
)->addr
, ttvn
, false,
1427 (tt_change
+ i
)->flags
&
1429 /* In case of problem while storing a
1430 * global_entry, we stop the updating
1431 * procedure without committing the
1432 * ttvn change. This will avoid to send
1433 * corrupted data on tt_request
1439 static void tt_fill_gtable(struct bat_priv
*bat_priv
,
1440 struct tt_query_packet
*tt_response
)
1442 struct orig_node
*orig_node
= NULL
;
1444 orig_node
= orig_hash_find(bat_priv
, tt_response
->src
);
1448 /* Purge the old table first.. */
1449 tt_global_del_orig(bat_priv
, orig_node
, "Received full table");
1451 _tt_update_changes(bat_priv
, orig_node
,
1452 (struct tt_change
*)(tt_response
+ 1),
1453 tt_response
->tt_data
, tt_response
->ttvn
);
1455 spin_lock_bh(&orig_node
->tt_buff_lock
);
1456 kfree(orig_node
->tt_buff
);
1457 orig_node
->tt_buff_len
= 0;
1458 orig_node
->tt_buff
= NULL
;
1459 spin_unlock_bh(&orig_node
->tt_buff_lock
);
1461 atomic_set(&orig_node
->last_ttvn
, tt_response
->ttvn
);
1465 orig_node_free_ref(orig_node
);
1468 static void tt_update_changes(struct bat_priv
*bat_priv
,
1469 struct orig_node
*orig_node
,
1470 uint16_t tt_num_changes
, uint8_t ttvn
,
1471 struct tt_change
*tt_change
)
1473 _tt_update_changes(bat_priv
, orig_node
, tt_change
, tt_num_changes
,
1476 tt_save_orig_buffer(bat_priv
, orig_node
, (unsigned char *)tt_change
,
1478 atomic_set(&orig_node
->last_ttvn
, ttvn
);
1481 bool is_my_client(struct bat_priv
*bat_priv
, const uint8_t *addr
)
1483 struct tt_local_entry
*tt_local_entry
= NULL
;
1486 tt_local_entry
= tt_local_hash_find(bat_priv
, addr
);
1487 if (!tt_local_entry
)
1489 /* Check if the client has been logically deleted (but is kept for
1490 * consistency purpose) */
1491 if (tt_local_entry
->common
.flags
& TT_CLIENT_PENDING
)
1496 tt_local_entry_free_ref(tt_local_entry
);
1500 void handle_tt_response(struct bat_priv
*bat_priv
,
1501 struct tt_query_packet
*tt_response
)
1503 struct tt_req_node
*node
, *safe
;
1504 struct orig_node
*orig_node
= NULL
;
1506 bat_dbg(DBG_TT
, bat_priv
, "Received TT_RESPONSE from %pM for "
1507 "ttvn %d t_size: %d [%c]\n",
1508 tt_response
->src
, tt_response
->ttvn
,
1509 tt_response
->tt_data
,
1510 (tt_response
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1512 orig_node
= orig_hash_find(bat_priv
, tt_response
->src
);
1516 if (tt_response
->flags
& TT_FULL_TABLE
)
1517 tt_fill_gtable(bat_priv
, tt_response
);
1519 tt_update_changes(bat_priv
, orig_node
, tt_response
->tt_data
,
1521 (struct tt_change
*)(tt_response
+ 1));
1523 /* Delete the tt_req_node from pending tt_requests list */
1524 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1525 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
1526 if (!compare_eth(node
->addr
, tt_response
->src
))
1528 list_del(&node
->list
);
1531 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1533 /* Recalculate the CRC for this orig_node and store it */
1534 orig_node
->tt_crc
= tt_global_crc(bat_priv
, orig_node
);
1535 /* Roaming phase is over: tables are in sync again. I can
1537 orig_node
->tt_poss_change
= false;
1540 orig_node_free_ref(orig_node
);
1543 int tt_init(struct bat_priv
*bat_priv
)
1545 if (!tt_local_init(bat_priv
))
1548 if (!tt_global_init(bat_priv
))
1551 tt_start_timer(bat_priv
);
1556 static void tt_roam_list_free(struct bat_priv
*bat_priv
)
1558 struct tt_roam_node
*node
, *safe
;
1560 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1562 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_roam_list
, list
) {
1563 list_del(&node
->list
);
1567 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1570 static void tt_roam_purge(struct bat_priv
*bat_priv
)
1572 struct tt_roam_node
*node
, *safe
;
1574 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1575 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_roam_list
, list
) {
1576 if (!is_out_of_time(node
->first_time
,
1577 ROAMING_MAX_TIME
* 1000))
1580 list_del(&node
->list
);
1583 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1586 /* This function checks whether the client already reached the
1587 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1590 * returns true if the ROAMING_ADV can be sent, false otherwise */
1591 static bool tt_check_roam_count(struct bat_priv
*bat_priv
,
1594 struct tt_roam_node
*tt_roam_node
;
1597 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1598 /* The new tt_req will be issued only if I'm not waiting for a
1599 * reply from the same orig_node yet */
1600 list_for_each_entry(tt_roam_node
, &bat_priv
->tt_roam_list
, list
) {
1601 if (!compare_eth(tt_roam_node
->addr
, client
))
1604 if (is_out_of_time(tt_roam_node
->first_time
,
1605 ROAMING_MAX_TIME
* 1000))
1608 if (!atomic_dec_not_zero(&tt_roam_node
->counter
))
1609 /* Sorry, you roamed too many times! */
1616 tt_roam_node
= kmalloc(sizeof(*tt_roam_node
), GFP_ATOMIC
);
1620 tt_roam_node
->first_time
= jiffies
;
1621 atomic_set(&tt_roam_node
->counter
, ROAMING_MAX_COUNT
- 1);
1622 memcpy(tt_roam_node
->addr
, client
, ETH_ALEN
);
1624 list_add(&tt_roam_node
->list
, &bat_priv
->tt_roam_list
);
1629 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1633 void send_roam_adv(struct bat_priv
*bat_priv
, uint8_t *client
,
1634 struct orig_node
*orig_node
)
1636 struct neigh_node
*neigh_node
= NULL
;
1637 struct sk_buff
*skb
= NULL
;
1638 struct roam_adv_packet
*roam_adv_packet
;
1640 struct hard_iface
*primary_if
;
1642 /* before going on we have to check whether the client has
1643 * already roamed to us too many times */
1644 if (!tt_check_roam_count(bat_priv
, client
))
1647 skb
= dev_alloc_skb(sizeof(struct roam_adv_packet
) + ETH_HLEN
);
1651 skb_reserve(skb
, ETH_HLEN
);
1653 roam_adv_packet
= (struct roam_adv_packet
*)skb_put(skb
,
1654 sizeof(struct roam_adv_packet
));
1656 roam_adv_packet
->packet_type
= BAT_ROAM_ADV
;
1657 roam_adv_packet
->version
= COMPAT_VERSION
;
1658 roam_adv_packet
->ttl
= TTL
;
1659 primary_if
= primary_if_get_selected(bat_priv
);
1662 memcpy(roam_adv_packet
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1663 hardif_free_ref(primary_if
);
1664 memcpy(roam_adv_packet
->dst
, orig_node
->orig
, ETH_ALEN
);
1665 memcpy(roam_adv_packet
->client
, client
, ETH_ALEN
);
1667 neigh_node
= orig_node_get_router(orig_node
);
1671 bat_dbg(DBG_TT
, bat_priv
,
1672 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1673 orig_node
->orig
, client
, neigh_node
->addr
);
1675 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1680 neigh_node_free_ref(neigh_node
);
1686 static void tt_purge(struct work_struct
*work
)
1688 struct delayed_work
*delayed_work
=
1689 container_of(work
, struct delayed_work
, work
);
1690 struct bat_priv
*bat_priv
=
1691 container_of(delayed_work
, struct bat_priv
, tt_work
);
1693 tt_local_purge(bat_priv
);
1694 tt_global_roam_purge(bat_priv
);
1695 tt_req_purge(bat_priv
);
1696 tt_roam_purge(bat_priv
);
1698 tt_start_timer(bat_priv
);
1701 void tt_free(struct bat_priv
*bat_priv
)
1703 cancel_delayed_work_sync(&bat_priv
->tt_work
);
1705 tt_local_table_free(bat_priv
);
1706 tt_global_table_free(bat_priv
);
1707 tt_req_list_free(bat_priv
);
1708 tt_changes_list_free(bat_priv
);
1709 tt_roam_list_free(bat_priv
);
1711 kfree(bat_priv
->tt_buff
);
1714 /* This function will enable or disable the specified flags for all the entries
1715 * in the given hash table and returns the number of modified entries */
1716 static uint16_t tt_set_flags(struct hashtable_t
*hash
, uint16_t flags
,
1720 uint16_t changed_num
= 0;
1721 struct hlist_head
*head
;
1722 struct hlist_node
*node
;
1723 struct tt_common_entry
*tt_common_entry
;
1728 for (i
= 0; i
< hash
->size
; i
++) {
1729 head
= &hash
->table
[i
];
1732 hlist_for_each_entry_rcu(tt_common_entry
, node
,
1735 if ((tt_common_entry
->flags
& flags
) == flags
)
1737 tt_common_entry
->flags
|= flags
;
1739 if (!(tt_common_entry
->flags
& flags
))
1741 tt_common_entry
->flags
&= ~flags
;
1751 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
1752 static void tt_local_purge_pending_clients(struct bat_priv
*bat_priv
)
1754 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
1755 struct tt_common_entry
*tt_common_entry
;
1756 struct tt_local_entry
*tt_local_entry
;
1757 struct hlist_node
*node
, *node_tmp
;
1758 struct hlist_head
*head
;
1759 spinlock_t
*list_lock
; /* protects write access to the hash lists */
1765 for (i
= 0; i
< hash
->size
; i
++) {
1766 head
= &hash
->table
[i
];
1767 list_lock
= &hash
->list_locks
[i
];
1769 spin_lock_bh(list_lock
);
1770 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
1772 if (!(tt_common_entry
->flags
& TT_CLIENT_PENDING
))
1775 bat_dbg(DBG_TT
, bat_priv
, "Deleting local tt entry "
1776 "(%pM): pending\n", tt_common_entry
->addr
);
1778 atomic_dec(&bat_priv
->num_local_tt
);
1779 hlist_del_rcu(node
);
1780 tt_local_entry
= container_of(tt_common_entry
,
1781 struct tt_local_entry
,
1783 tt_local_entry_free_ref(tt_local_entry
);
1785 spin_unlock_bh(list_lock
);
1790 void tt_commit_changes(struct bat_priv
*bat_priv
)
1792 uint16_t changed_num
= tt_set_flags(bat_priv
->tt_local_hash
,
1793 TT_CLIENT_NEW
, false);
1794 /* all the reset entries have now to be effectively counted as local
1796 atomic_add(changed_num
, &bat_priv
->num_local_tt
);
1797 tt_local_purge_pending_clients(bat_priv
);
1799 /* Increment the TTVN only once per OGM interval */
1800 atomic_inc(&bat_priv
->ttvn
);
1801 bat_priv
->tt_poss_change
= false;
1804 bool is_ap_isolated(struct bat_priv
*bat_priv
, uint8_t *src
, uint8_t *dst
)
1806 struct tt_local_entry
*tt_local_entry
= NULL
;
1807 struct tt_global_entry
*tt_global_entry
= NULL
;
1810 if (!atomic_read(&bat_priv
->ap_isolation
))
1813 tt_local_entry
= tt_local_hash_find(bat_priv
, dst
);
1814 if (!tt_local_entry
)
1817 tt_global_entry
= tt_global_hash_find(bat_priv
, src
);
1818 if (!tt_global_entry
)
1821 if (_is_ap_isolated(tt_local_entry
, tt_global_entry
))
1827 if (tt_global_entry
)
1828 tt_global_entry_free_ref(tt_global_entry
);
1830 tt_local_entry_free_ref(tt_local_entry
);
1834 void tt_update_orig(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
1835 const unsigned char *tt_buff
, uint8_t tt_num_changes
,
1836 uint8_t ttvn
, uint16_t tt_crc
)
1838 uint8_t orig_ttvn
= (uint8_t)atomic_read(&orig_node
->last_ttvn
);
1839 bool full_table
= true;
1841 /* the ttvn increased by one -> we can apply the attached changes */
1842 if (ttvn
- orig_ttvn
== 1) {
1843 /* the OGM could not contain the changes due to their size or
1844 * because they have already been sent TT_OGM_APPEND_MAX times.
1845 * In this case send a tt request */
1846 if (!tt_num_changes
) {
1851 tt_update_changes(bat_priv
, orig_node
, tt_num_changes
, ttvn
,
1852 (struct tt_change
*)tt_buff
);
1854 /* Even if we received the precomputed crc with the OGM, we
1855 * prefer to recompute it to spot any possible inconsistency
1856 * in the global table */
1857 orig_node
->tt_crc
= tt_global_crc(bat_priv
, orig_node
);
1859 /* The ttvn alone is not enough to guarantee consistency
1860 * because a single value could represent different states
1861 * (due to the wrap around). Thus a node has to check whether
1862 * the resulting table (after applying the changes) is still
1863 * consistent or not. E.g. a node could disconnect while its
1864 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
1865 * checking the CRC value is mandatory to detect the
1867 if (orig_node
->tt_crc
!= tt_crc
)
1870 /* Roaming phase is over: tables are in sync again. I can
1872 orig_node
->tt_poss_change
= false;
1874 /* if we missed more than one change or our tables are not
1875 * in sync anymore -> request fresh tt data */
1876 if (ttvn
!= orig_ttvn
|| orig_node
->tt_crc
!= tt_crc
) {
1878 bat_dbg(DBG_TT
, bat_priv
, "TT inconsistency for %pM. "
1879 "Need to retrieve the correct information "
1880 "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
1881 "%u num_changes: %u)\n", orig_node
->orig
, ttvn
,
1882 orig_ttvn
, tt_crc
, orig_node
->tt_crc
,
1884 send_tt_request(bat_priv
, orig_node
, ttvn
, tt_crc
,