2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
28 #include "originator.h"
31 #include <linux/crc16.h>
33 static void _tt_global_del(struct bat_priv
*bat_priv
,
34 struct tt_global_entry
*tt_global_entry
,
36 static void tt_purge(struct work_struct
*work
);
38 /* returns 1 if they are the same mac addr */
39 static int compare_tt(const struct hlist_node
*node
, const void *data2
)
41 const void *data1
= container_of(node
, struct tt_common_entry
,
44 return (memcmp(data1
, data2
, ETH_ALEN
) == 0 ? 1 : 0);
47 static void tt_start_timer(struct bat_priv
*bat_priv
)
49 INIT_DELAYED_WORK(&bat_priv
->tt_work
, tt_purge
);
50 queue_delayed_work(bat_event_workqueue
, &bat_priv
->tt_work
,
51 msecs_to_jiffies(5000));
54 static struct tt_common_entry
*tt_hash_find(struct hashtable_t
*hash
,
57 struct hlist_head
*head
;
58 struct hlist_node
*node
;
59 struct tt_common_entry
*tt_common_entry
, *tt_common_entry_tmp
= NULL
;
65 index
= choose_orig(data
, hash
->size
);
66 head
= &hash
->table
[index
];
69 hlist_for_each_entry_rcu(tt_common_entry
, node
, head
, hash_entry
) {
70 if (!compare_eth(tt_common_entry
, data
))
73 if (!atomic_inc_not_zero(&tt_common_entry
->refcount
))
76 tt_common_entry_tmp
= tt_common_entry
;
81 return tt_common_entry_tmp
;
84 static struct tt_local_entry
*tt_local_hash_find(struct bat_priv
*bat_priv
,
87 struct tt_common_entry
*tt_common_entry
;
88 struct tt_local_entry
*tt_local_entry
= NULL
;
90 tt_common_entry
= tt_hash_find(bat_priv
->tt_local_hash
, data
);
92 tt_local_entry
= container_of(tt_common_entry
,
93 struct tt_local_entry
, common
);
94 return tt_local_entry
;
97 static struct tt_global_entry
*tt_global_hash_find(struct bat_priv
*bat_priv
,
100 struct tt_common_entry
*tt_common_entry
;
101 struct tt_global_entry
*tt_global_entry
= NULL
;
103 tt_common_entry
= tt_hash_find(bat_priv
->tt_global_hash
, data
);
105 tt_global_entry
= container_of(tt_common_entry
,
106 struct tt_global_entry
, common
);
107 return tt_global_entry
;
111 static bool is_out_of_time(unsigned long starting_time
, unsigned long timeout
)
113 unsigned long deadline
;
114 deadline
= starting_time
+ msecs_to_jiffies(timeout
);
116 return time_after(jiffies
, deadline
);
119 static void tt_local_entry_free_ref(struct tt_local_entry
*tt_local_entry
)
121 if (atomic_dec_and_test(&tt_local_entry
->common
.refcount
))
122 kfree_rcu(tt_local_entry
, common
.rcu
);
125 static void tt_global_entry_free_rcu(struct rcu_head
*rcu
)
127 struct tt_common_entry
*tt_common_entry
;
128 struct tt_global_entry
*tt_global_entry
;
130 tt_common_entry
= container_of(rcu
, struct tt_common_entry
, rcu
);
131 tt_global_entry
= container_of(tt_common_entry
, struct tt_global_entry
,
134 if (tt_global_entry
->orig_node
)
135 orig_node_free_ref(tt_global_entry
->orig_node
);
137 kfree(tt_global_entry
);
140 static void tt_global_entry_free_ref(struct tt_global_entry
*tt_global_entry
)
142 if (atomic_dec_and_test(&tt_global_entry
->common
.refcount
))
143 call_rcu(&tt_global_entry
->common
.rcu
,
144 tt_global_entry_free_rcu
);
147 static void tt_local_event(struct bat_priv
*bat_priv
, const uint8_t *addr
,
150 struct tt_change_node
*tt_change_node
;
152 tt_change_node
= kmalloc(sizeof(*tt_change_node
), GFP_ATOMIC
);
157 tt_change_node
->change
.flags
= flags
;
158 memcpy(tt_change_node
->change
.addr
, addr
, ETH_ALEN
);
160 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
161 /* track the change in the OGMinterval list */
162 list_add_tail(&tt_change_node
->list
, &bat_priv
->tt_changes_list
);
163 atomic_inc(&bat_priv
->tt_local_changes
);
164 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
166 atomic_set(&bat_priv
->tt_ogm_append_cnt
, 0);
169 int tt_len(int changes_num
)
171 return changes_num
* sizeof(struct tt_change
);
174 static int tt_local_init(struct bat_priv
*bat_priv
)
176 if (bat_priv
->tt_local_hash
)
179 bat_priv
->tt_local_hash
= hash_new(1024);
181 if (!bat_priv
->tt_local_hash
)
187 void tt_local_add(struct net_device
*soft_iface
, const uint8_t *addr
,
190 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
191 struct tt_local_entry
*tt_local_entry
= NULL
;
192 struct tt_global_entry
*tt_global_entry
= NULL
;
195 tt_local_entry
= tt_local_hash_find(bat_priv
, addr
);
197 if (tt_local_entry
) {
198 tt_local_entry
->last_seen
= jiffies
;
202 tt_local_entry
= kmalloc(sizeof(*tt_local_entry
), GFP_ATOMIC
);
206 bat_dbg(DBG_TT
, bat_priv
,
207 "Creating new local tt entry: %pM (ttvn: %d)\n", addr
,
208 (uint8_t)atomic_read(&bat_priv
->ttvn
));
210 memcpy(tt_local_entry
->common
.addr
, addr
, ETH_ALEN
);
211 tt_local_entry
->common
.flags
= NO_FLAGS
;
212 if (is_wifi_iface(ifindex
))
213 tt_local_entry
->common
.flags
|= TT_CLIENT_WIFI
;
214 atomic_set(&tt_local_entry
->common
.refcount
, 2);
215 tt_local_entry
->last_seen
= jiffies
;
217 /* the batman interface mac address should never be purged */
218 if (compare_eth(addr
, soft_iface
->dev_addr
))
219 tt_local_entry
->common
.flags
|= TT_CLIENT_NOPURGE
;
221 hash_added
= hash_add(bat_priv
->tt_local_hash
, compare_tt
, choose_orig
,
222 &tt_local_entry
->common
,
223 &tt_local_entry
->common
.hash_entry
);
225 if (unlikely(hash_added
!= 0)) {
226 /* remove the reference for the hash */
227 tt_local_entry_free_ref(tt_local_entry
);
231 tt_local_event(bat_priv
, addr
, tt_local_entry
->common
.flags
);
233 /* The local entry has to be marked as NEW to avoid to send it in
234 * a full table response going out before the next ttvn increment
235 * (consistency check) */
236 tt_local_entry
->common
.flags
|= TT_CLIENT_NEW
;
238 /* remove address from global hash if present */
239 tt_global_entry
= tt_global_hash_find(bat_priv
, addr
);
241 /* Check whether it is a roaming! */
242 if (tt_global_entry
) {
243 /* This node is probably going to update its tt table */
244 tt_global_entry
->orig_node
->tt_poss_change
= true;
245 /* The global entry has to be marked as ROAMING and has to be
246 * kept for consistency purpose */
247 tt_global_entry
->common
.flags
|= TT_CLIENT_ROAM
;
248 tt_global_entry
->roam_at
= jiffies
;
249 send_roam_adv(bat_priv
, tt_global_entry
->common
.addr
,
250 tt_global_entry
->orig_node
);
254 tt_local_entry_free_ref(tt_local_entry
);
256 tt_global_entry_free_ref(tt_global_entry
);
259 int tt_changes_fill_buffer(struct bat_priv
*bat_priv
,
260 unsigned char *buff
, int buff_len
)
262 int count
= 0, tot_changes
= 0;
263 struct tt_change_node
*entry
, *safe
;
266 tot_changes
= buff_len
/ tt_len(1);
268 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
269 atomic_set(&bat_priv
->tt_local_changes
, 0);
271 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
273 if (count
< tot_changes
) {
274 memcpy(buff
+ tt_len(count
),
275 &entry
->change
, sizeof(struct tt_change
));
278 list_del(&entry
->list
);
281 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
283 /* Keep the buffer for possible tt_request */
284 spin_lock_bh(&bat_priv
->tt_buff_lock
);
285 kfree(bat_priv
->tt_buff
);
286 bat_priv
->tt_buff_len
= 0;
287 bat_priv
->tt_buff
= NULL
;
288 /* We check whether this new OGM has no changes due to size
292 * if kmalloc() fails we will reply with the full table
293 * instead of providing the diff
295 bat_priv
->tt_buff
= kmalloc(buff_len
, GFP_ATOMIC
);
296 if (bat_priv
->tt_buff
) {
297 memcpy(bat_priv
->tt_buff
, buff
, buff_len
);
298 bat_priv
->tt_buff_len
= buff_len
;
301 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
306 int tt_local_seq_print_text(struct seq_file
*seq
, void *offset
)
308 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
309 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
310 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
311 struct tt_common_entry
*tt_common_entry
;
312 struct hard_iface
*primary_if
;
313 struct hlist_node
*node
;
314 struct hlist_head
*head
;
318 primary_if
= primary_if_get_selected(bat_priv
);
320 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - "
321 "please specify interfaces to enable it\n",
326 if (primary_if
->if_status
!= IF_ACTIVE
) {
327 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - "
328 "primary interface not active\n",
333 seq_printf(seq
, "Locally retrieved addresses (from %s) "
334 "announced via TT (TTVN: %u):\n",
335 net_dev
->name
, (uint8_t)atomic_read(&bat_priv
->ttvn
));
337 for (i
= 0; i
< hash
->size
; i
++) {
338 head
= &hash
->table
[i
];
341 hlist_for_each_entry_rcu(tt_common_entry
, node
,
343 seq_printf(seq
, " * %pM [%c%c%c%c%c]\n",
344 tt_common_entry
->addr
,
345 (tt_common_entry
->flags
&
346 TT_CLIENT_ROAM
? 'R' : '.'),
347 (tt_common_entry
->flags
&
348 TT_CLIENT_NOPURGE
? 'P' : '.'),
349 (tt_common_entry
->flags
&
350 TT_CLIENT_NEW
? 'N' : '.'),
351 (tt_common_entry
->flags
&
352 TT_CLIENT_PENDING
? 'X' : '.'),
353 (tt_common_entry
->flags
&
354 TT_CLIENT_WIFI
? 'W' : '.'));
360 hardif_free_ref(primary_if
);
364 static void tt_local_set_pending(struct bat_priv
*bat_priv
,
365 struct tt_local_entry
*tt_local_entry
,
368 tt_local_event(bat_priv
, tt_local_entry
->common
.addr
,
369 tt_local_entry
->common
.flags
| flags
);
371 /* The local client has to be marked as "pending to be removed" but has
372 * to be kept in the table in order to send it in a full table
373 * response issued before the net ttvn increment (consistency check) */
374 tt_local_entry
->common
.flags
|= TT_CLIENT_PENDING
;
377 void tt_local_remove(struct bat_priv
*bat_priv
, const uint8_t *addr
,
378 const char *message
, bool roaming
)
380 struct tt_local_entry
*tt_local_entry
= NULL
;
382 tt_local_entry
= tt_local_hash_find(bat_priv
, addr
);
386 tt_local_set_pending(bat_priv
, tt_local_entry
, TT_CLIENT_DEL
|
387 (roaming
? TT_CLIENT_ROAM
: NO_FLAGS
));
389 bat_dbg(DBG_TT
, bat_priv
, "Local tt entry (%pM) pending to be removed: "
390 "%s\n", tt_local_entry
->common
.addr
, message
);
393 tt_local_entry_free_ref(tt_local_entry
);
396 static void tt_local_purge(struct bat_priv
*bat_priv
)
398 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
399 struct tt_local_entry
*tt_local_entry
;
400 struct tt_common_entry
*tt_common_entry
;
401 struct hlist_node
*node
, *node_tmp
;
402 struct hlist_head
*head
;
403 spinlock_t
*list_lock
; /* protects write access to the hash lists */
406 for (i
= 0; i
< hash
->size
; i
++) {
407 head
= &hash
->table
[i
];
408 list_lock
= &hash
->list_locks
[i
];
410 spin_lock_bh(list_lock
);
411 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
413 tt_local_entry
= container_of(tt_common_entry
,
414 struct tt_local_entry
,
416 if (tt_local_entry
->common
.flags
& TT_CLIENT_NOPURGE
)
419 /* entry already marked for deletion */
420 if (tt_local_entry
->common
.flags
& TT_CLIENT_PENDING
)
423 if (!is_out_of_time(tt_local_entry
->last_seen
,
424 TT_LOCAL_TIMEOUT
* 1000))
427 tt_local_set_pending(bat_priv
, tt_local_entry
,
429 bat_dbg(DBG_TT
, bat_priv
, "Local tt entry (%pM) "
430 "pending to be removed: timed out\n",
431 tt_local_entry
->common
.addr
);
433 spin_unlock_bh(list_lock
);
438 static void tt_local_table_free(struct bat_priv
*bat_priv
)
440 struct hashtable_t
*hash
;
441 spinlock_t
*list_lock
; /* protects write access to the hash lists */
442 struct tt_common_entry
*tt_common_entry
;
443 struct tt_local_entry
*tt_local_entry
;
444 struct hlist_node
*node
, *node_tmp
;
445 struct hlist_head
*head
;
448 if (!bat_priv
->tt_local_hash
)
451 hash
= bat_priv
->tt_local_hash
;
453 for (i
= 0; i
< hash
->size
; i
++) {
454 head
= &hash
->table
[i
];
455 list_lock
= &hash
->list_locks
[i
];
457 spin_lock_bh(list_lock
);
458 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
461 tt_local_entry
= container_of(tt_common_entry
,
462 struct tt_local_entry
,
464 tt_local_entry_free_ref(tt_local_entry
);
466 spin_unlock_bh(list_lock
);
471 bat_priv
->tt_local_hash
= NULL
;
474 static int tt_global_init(struct bat_priv
*bat_priv
)
476 if (bat_priv
->tt_global_hash
)
479 bat_priv
->tt_global_hash
= hash_new(1024);
481 if (!bat_priv
->tt_global_hash
)
487 static void tt_changes_list_free(struct bat_priv
*bat_priv
)
489 struct tt_change_node
*entry
, *safe
;
491 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
493 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
495 list_del(&entry
->list
);
499 atomic_set(&bat_priv
->tt_local_changes
, 0);
500 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
503 /* caller must hold orig_node refcount */
504 int tt_global_add(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
505 const unsigned char *tt_addr
, uint8_t ttvn
, bool roaming
,
508 struct tt_global_entry
*tt_global_entry
;
509 struct orig_node
*orig_node_tmp
;
513 tt_global_entry
= tt_global_hash_find(bat_priv
, tt_addr
);
515 if (!tt_global_entry
) {
517 kmalloc(sizeof(*tt_global_entry
),
519 if (!tt_global_entry
)
522 memcpy(tt_global_entry
->common
.addr
, tt_addr
, ETH_ALEN
);
523 tt_global_entry
->common
.flags
= NO_FLAGS
;
524 atomic_set(&tt_global_entry
->common
.refcount
, 2);
525 /* Assign the new orig_node */
526 atomic_inc(&orig_node
->refcount
);
527 tt_global_entry
->orig_node
= orig_node
;
528 tt_global_entry
->ttvn
= ttvn
;
529 tt_global_entry
->roam_at
= 0;
531 hash_added
= hash_add(bat_priv
->tt_global_hash
, compare_tt
,
532 choose_orig
, &tt_global_entry
->common
,
533 &tt_global_entry
->common
.hash_entry
);
535 if (unlikely(hash_added
!= 0)) {
536 /* remove the reference for the hash */
537 tt_global_entry_free_ref(tt_global_entry
);
540 atomic_inc(&orig_node
->tt_size
);
542 if (tt_global_entry
->orig_node
!= orig_node
) {
543 atomic_dec(&tt_global_entry
->orig_node
->tt_size
);
544 orig_node_tmp
= tt_global_entry
->orig_node
;
545 atomic_inc(&orig_node
->refcount
);
546 tt_global_entry
->orig_node
= orig_node
;
547 orig_node_free_ref(orig_node_tmp
);
548 atomic_inc(&orig_node
->tt_size
);
550 tt_global_entry
->common
.flags
= NO_FLAGS
;
551 tt_global_entry
->ttvn
= ttvn
;
552 tt_global_entry
->roam_at
= 0;
556 tt_global_entry
->common
.flags
|= TT_CLIENT_WIFI
;
558 bat_dbg(DBG_TT
, bat_priv
,
559 "Creating new global tt entry: %pM (via %pM)\n",
560 tt_global_entry
->common
.addr
, orig_node
->orig
);
563 /* remove address from local hash if present */
564 tt_local_remove(bat_priv
, tt_global_entry
->common
.addr
,
565 "global tt received", roaming
);
569 tt_global_entry_free_ref(tt_global_entry
);
573 int tt_global_seq_print_text(struct seq_file
*seq
, void *offset
)
575 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
576 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
577 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
578 struct tt_common_entry
*tt_common_entry
;
579 struct tt_global_entry
*tt_global_entry
;
580 struct hard_iface
*primary_if
;
581 struct hlist_node
*node
;
582 struct hlist_head
*head
;
586 primary_if
= primary_if_get_selected(bat_priv
);
588 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - please "
589 "specify interfaces to enable it\n",
594 if (primary_if
->if_status
!= IF_ACTIVE
) {
595 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - "
596 "primary interface not active\n",
602 "Globally announced TT entries received via the mesh %s\n",
604 seq_printf(seq
, " %-13s %s %-15s %s %s\n",
605 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
607 for (i
= 0; i
< hash
->size
; i
++) {
608 head
= &hash
->table
[i
];
611 hlist_for_each_entry_rcu(tt_common_entry
, node
,
613 tt_global_entry
= container_of(tt_common_entry
,
614 struct tt_global_entry
,
616 seq_printf(seq
, " * %pM (%3u) via %pM (%3u) "
618 tt_global_entry
->common
.addr
,
619 tt_global_entry
->ttvn
,
620 tt_global_entry
->orig_node
->orig
,
621 (uint8_t) atomic_read(
622 &tt_global_entry
->orig_node
->
624 (tt_global_entry
->common
.flags
&
625 TT_CLIENT_ROAM
? 'R' : '.'),
626 (tt_global_entry
->common
.flags
&
627 TT_CLIENT_PENDING
? 'X' : '.'),
628 (tt_global_entry
->common
.flags
&
629 TT_CLIENT_WIFI
? 'W' : '.'));
635 hardif_free_ref(primary_if
);
639 static void _tt_global_del(struct bat_priv
*bat_priv
,
640 struct tt_global_entry
*tt_global_entry
,
643 if (!tt_global_entry
)
646 bat_dbg(DBG_TT
, bat_priv
,
647 "Deleting global tt entry %pM (via %pM): %s\n",
648 tt_global_entry
->common
.addr
, tt_global_entry
->orig_node
->orig
,
651 atomic_dec(&tt_global_entry
->orig_node
->tt_size
);
653 hash_remove(bat_priv
->tt_global_hash
, compare_tt
, choose_orig
,
654 tt_global_entry
->common
.addr
);
657 tt_global_entry_free_ref(tt_global_entry
);
660 void tt_global_del(struct bat_priv
*bat_priv
,
661 struct orig_node
*orig_node
, const unsigned char *addr
,
662 const char *message
, bool roaming
)
664 struct tt_global_entry
*tt_global_entry
= NULL
;
665 struct tt_local_entry
*tt_local_entry
= NULL
;
667 tt_global_entry
= tt_global_hash_find(bat_priv
, addr
);
668 if (!tt_global_entry
)
671 if (tt_global_entry
->orig_node
== orig_node
) {
673 /* if we are deleting a global entry due to a roam
674 * event, there are two possibilities:
675 * 1) the client roamed from node A to node B => we mark
676 * it with TT_CLIENT_ROAM, we start a timer and we
677 * wait for node B to claim it. In case of timeout
678 * the entry is purged.
679 * 2) the client roamed to us => we can directly delete
680 * the global entry, since it is useless now. */
681 tt_local_entry
= tt_local_hash_find(bat_priv
,
682 tt_global_entry
->common
.addr
);
683 if (!tt_local_entry
) {
684 tt_global_entry
->common
.flags
|= TT_CLIENT_ROAM
;
685 tt_global_entry
->roam_at
= jiffies
;
689 _tt_global_del(bat_priv
, tt_global_entry
, message
);
693 tt_global_entry_free_ref(tt_global_entry
);
695 tt_local_entry_free_ref(tt_local_entry
);
698 void tt_global_del_orig(struct bat_priv
*bat_priv
,
699 struct orig_node
*orig_node
, const char *message
)
701 struct tt_global_entry
*tt_global_entry
;
702 struct tt_common_entry
*tt_common_entry
;
704 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
705 struct hlist_node
*node
, *safe
;
706 struct hlist_head
*head
;
707 spinlock_t
*list_lock
; /* protects write access to the hash lists */
712 for (i
= 0; i
< hash
->size
; i
++) {
713 head
= &hash
->table
[i
];
714 list_lock
= &hash
->list_locks
[i
];
716 spin_lock_bh(list_lock
);
717 hlist_for_each_entry_safe(tt_common_entry
, node
, safe
,
719 tt_global_entry
= container_of(tt_common_entry
,
720 struct tt_global_entry
,
722 if (tt_global_entry
->orig_node
== orig_node
) {
723 bat_dbg(DBG_TT
, bat_priv
,
724 "Deleting global tt entry %pM "
726 tt_global_entry
->common
.addr
,
727 tt_global_entry
->orig_node
->orig
,
730 tt_global_entry_free_ref(tt_global_entry
);
733 spin_unlock_bh(list_lock
);
735 atomic_set(&orig_node
->tt_size
, 0);
738 static void tt_global_roam_purge(struct bat_priv
*bat_priv
)
740 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
741 struct tt_common_entry
*tt_common_entry
;
742 struct tt_global_entry
*tt_global_entry
;
743 struct hlist_node
*node
, *node_tmp
;
744 struct hlist_head
*head
;
745 spinlock_t
*list_lock
; /* protects write access to the hash lists */
748 for (i
= 0; i
< hash
->size
; i
++) {
749 head
= &hash
->table
[i
];
750 list_lock
= &hash
->list_locks
[i
];
752 spin_lock_bh(list_lock
);
753 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
755 tt_global_entry
= container_of(tt_common_entry
,
756 struct tt_global_entry
,
758 if (!(tt_global_entry
->common
.flags
& TT_CLIENT_ROAM
))
760 if (!is_out_of_time(tt_global_entry
->roam_at
,
761 TT_CLIENT_ROAM_TIMEOUT
* 1000))
764 bat_dbg(DBG_TT
, bat_priv
, "Deleting global "
765 "tt entry (%pM): Roaming timeout\n",
766 tt_global_entry
->common
.addr
);
767 atomic_dec(&tt_global_entry
->orig_node
->tt_size
);
769 tt_global_entry_free_ref(tt_global_entry
);
771 spin_unlock_bh(list_lock
);
776 static void tt_global_table_free(struct bat_priv
*bat_priv
)
778 struct hashtable_t
*hash
;
779 spinlock_t
*list_lock
; /* protects write access to the hash lists */
780 struct tt_common_entry
*tt_common_entry
;
781 struct tt_global_entry
*tt_global_entry
;
782 struct hlist_node
*node
, *node_tmp
;
783 struct hlist_head
*head
;
786 if (!bat_priv
->tt_global_hash
)
789 hash
= bat_priv
->tt_global_hash
;
791 for (i
= 0; i
< hash
->size
; i
++) {
792 head
= &hash
->table
[i
];
793 list_lock
= &hash
->list_locks
[i
];
795 spin_lock_bh(list_lock
);
796 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
799 tt_global_entry
= container_of(tt_common_entry
,
800 struct tt_global_entry
,
802 tt_global_entry_free_ref(tt_global_entry
);
804 spin_unlock_bh(list_lock
);
809 bat_priv
->tt_global_hash
= NULL
;
812 static bool _is_ap_isolated(struct tt_local_entry
*tt_local_entry
,
813 struct tt_global_entry
*tt_global_entry
)
817 if (tt_local_entry
->common
.flags
& TT_CLIENT_WIFI
&&
818 tt_global_entry
->common
.flags
& TT_CLIENT_WIFI
)
824 struct orig_node
*transtable_search(struct bat_priv
*bat_priv
,
825 const uint8_t *src
, const uint8_t *addr
)
827 struct tt_local_entry
*tt_local_entry
= NULL
;
828 struct tt_global_entry
*tt_global_entry
= NULL
;
829 struct orig_node
*orig_node
= NULL
;
831 if (src
&& atomic_read(&bat_priv
->ap_isolation
)) {
832 tt_local_entry
= tt_local_hash_find(bat_priv
, src
);
837 tt_global_entry
= tt_global_hash_find(bat_priv
, addr
);
838 if (!tt_global_entry
)
841 /* check whether the clients should not communicate due to AP
843 if (tt_local_entry
&& _is_ap_isolated(tt_local_entry
, tt_global_entry
))
846 if (!atomic_inc_not_zero(&tt_global_entry
->orig_node
->refcount
))
849 /* A global client marked as PENDING has already moved from that
851 if (tt_global_entry
->common
.flags
& TT_CLIENT_PENDING
)
854 orig_node
= tt_global_entry
->orig_node
;
858 tt_global_entry_free_ref(tt_global_entry
);
860 tt_local_entry_free_ref(tt_local_entry
);
865 /* Calculates the checksum of the local table of a given orig_node */
866 uint16_t tt_global_crc(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
)
868 uint16_t total
= 0, total_one
;
869 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
870 struct tt_common_entry
*tt_common_entry
;
871 struct tt_global_entry
*tt_global_entry
;
872 struct hlist_node
*node
;
873 struct hlist_head
*head
;
877 for (i
= 0; i
< hash
->size
; i
++) {
878 head
= &hash
->table
[i
];
881 hlist_for_each_entry_rcu(tt_common_entry
, node
,
883 tt_global_entry
= container_of(tt_common_entry
,
884 struct tt_global_entry
,
886 if (compare_eth(tt_global_entry
->orig_node
,
888 /* Roaming clients are in the global table for
889 * consistency only. They don't have to be
890 * taken into account while computing the
892 if (tt_common_entry
->flags
& TT_CLIENT_ROAM
)
895 for (j
= 0; j
< ETH_ALEN
; j
++)
896 total_one
= crc16_byte(total_one
,
897 tt_common_entry
->addr
[j
]);
907 /* Calculates the checksum of the local table */
908 uint16_t tt_local_crc(struct bat_priv
*bat_priv
)
910 uint16_t total
= 0, total_one
;
911 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
912 struct tt_common_entry
*tt_common_entry
;
913 struct hlist_node
*node
;
914 struct hlist_head
*head
;
918 for (i
= 0; i
< hash
->size
; i
++) {
919 head
= &hash
->table
[i
];
922 hlist_for_each_entry_rcu(tt_common_entry
, node
,
924 /* not yet committed clients have not to be taken into
925 * account while computing the CRC */
926 if (tt_common_entry
->flags
& TT_CLIENT_NEW
)
929 for (j
= 0; j
< ETH_ALEN
; j
++)
930 total_one
= crc16_byte(total_one
,
931 tt_common_entry
->addr
[j
]);
940 static void tt_req_list_free(struct bat_priv
*bat_priv
)
942 struct tt_req_node
*node
, *safe
;
944 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
946 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
947 list_del(&node
->list
);
951 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
954 void tt_save_orig_buffer(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
955 const unsigned char *tt_buff
, uint8_t tt_num_changes
)
957 uint16_t tt_buff_len
= tt_len(tt_num_changes
);
959 /* Replace the old buffer only if I received something in the
960 * last OGM (the OGM could carry no changes) */
961 spin_lock_bh(&orig_node
->tt_buff_lock
);
962 if (tt_buff_len
> 0) {
963 kfree(orig_node
->tt_buff
);
964 orig_node
->tt_buff_len
= 0;
965 orig_node
->tt_buff
= kmalloc(tt_buff_len
, GFP_ATOMIC
);
966 if (orig_node
->tt_buff
) {
967 memcpy(orig_node
->tt_buff
, tt_buff
, tt_buff_len
);
968 orig_node
->tt_buff_len
= tt_buff_len
;
971 spin_unlock_bh(&orig_node
->tt_buff_lock
);
974 static void tt_req_purge(struct bat_priv
*bat_priv
)
976 struct tt_req_node
*node
, *safe
;
978 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
979 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
980 if (is_out_of_time(node
->issued_at
,
981 TT_REQUEST_TIMEOUT
* 1000)) {
982 list_del(&node
->list
);
986 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
989 /* returns the pointer to the new tt_req_node struct if no request
990 * has already been issued for this orig_node, NULL otherwise */
991 static struct tt_req_node
*new_tt_req_node(struct bat_priv
*bat_priv
,
992 struct orig_node
*orig_node
)
994 struct tt_req_node
*tt_req_node_tmp
, *tt_req_node
= NULL
;
996 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
997 list_for_each_entry(tt_req_node_tmp
, &bat_priv
->tt_req_list
, list
) {
998 if (compare_eth(tt_req_node_tmp
, orig_node
) &&
999 !is_out_of_time(tt_req_node_tmp
->issued_at
,
1000 TT_REQUEST_TIMEOUT
* 1000))
1004 tt_req_node
= kmalloc(sizeof(*tt_req_node
), GFP_ATOMIC
);
1008 memcpy(tt_req_node
->addr
, orig_node
->orig
, ETH_ALEN
);
1009 tt_req_node
->issued_at
= jiffies
;
1011 list_add(&tt_req_node
->list
, &bat_priv
->tt_req_list
);
1013 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1017 /* data_ptr is useless here, but has to be kept to respect the prototype */
1018 static int tt_local_valid_entry(const void *entry_ptr
, const void *data_ptr
)
1020 const struct tt_common_entry
*tt_common_entry
= entry_ptr
;
1022 if (tt_common_entry
->flags
& TT_CLIENT_NEW
)
1027 static int tt_global_valid_entry(const void *entry_ptr
, const void *data_ptr
)
1029 const struct tt_common_entry
*tt_common_entry
= entry_ptr
;
1030 const struct tt_global_entry
*tt_global_entry
;
1031 const struct orig_node
*orig_node
= data_ptr
;
1033 if (tt_common_entry
->flags
& TT_CLIENT_ROAM
)
1036 tt_global_entry
= container_of(tt_common_entry
, struct tt_global_entry
,
1039 return (tt_global_entry
->orig_node
== orig_node
);
1042 static struct sk_buff
*tt_response_fill_table(uint16_t tt_len
, uint8_t ttvn
,
1043 struct hashtable_t
*hash
,
1044 struct hard_iface
*primary_if
,
1045 int (*valid_cb
)(const void *,
1049 struct tt_common_entry
*tt_common_entry
;
1050 struct tt_query_packet
*tt_response
;
1051 struct tt_change
*tt_change
;
1052 struct hlist_node
*node
;
1053 struct hlist_head
*head
;
1054 struct sk_buff
*skb
= NULL
;
1055 uint16_t tt_tot
, tt_count
;
1056 ssize_t tt_query_size
= sizeof(struct tt_query_packet
);
1059 if (tt_query_size
+ tt_len
> primary_if
->soft_iface
->mtu
) {
1060 tt_len
= primary_if
->soft_iface
->mtu
- tt_query_size
;
1061 tt_len
-= tt_len
% sizeof(struct tt_change
);
1063 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1065 skb
= dev_alloc_skb(tt_query_size
+ tt_len
+ ETH_HLEN
);
1069 skb_reserve(skb
, ETH_HLEN
);
1070 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1071 tt_query_size
+ tt_len
);
1072 tt_response
->ttvn
= ttvn
;
1074 tt_change
= (struct tt_change
*)(skb
->data
+ tt_query_size
);
1078 for (i
= 0; i
< hash
->size
; i
++) {
1079 head
= &hash
->table
[i
];
1081 hlist_for_each_entry_rcu(tt_common_entry
, node
,
1083 if (tt_count
== tt_tot
)
1086 if ((valid_cb
) && (!valid_cb(tt_common_entry
, cb_data
)))
1089 memcpy(tt_change
->addr
, tt_common_entry
->addr
,
1091 tt_change
->flags
= NO_FLAGS
;
1099 /* store in the message the number of entries we have successfully
1101 tt_response
->tt_data
= htons(tt_count
);
1107 static int send_tt_request(struct bat_priv
*bat_priv
,
1108 struct orig_node
*dst_orig_node
,
1109 uint8_t ttvn
, uint16_t tt_crc
, bool full_table
)
1111 struct sk_buff
*skb
= NULL
;
1112 struct tt_query_packet
*tt_request
;
1113 struct neigh_node
*neigh_node
= NULL
;
1114 struct hard_iface
*primary_if
;
1115 struct tt_req_node
*tt_req_node
= NULL
;
1118 primary_if
= primary_if_get_selected(bat_priv
);
1122 /* The new tt_req will be issued only if I'm not waiting for a
1123 * reply from the same orig_node yet */
1124 tt_req_node
= new_tt_req_node(bat_priv
, dst_orig_node
);
1128 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) + ETH_HLEN
);
1132 skb_reserve(skb
, ETH_HLEN
);
1134 tt_request
= (struct tt_query_packet
*)skb_put(skb
,
1135 sizeof(struct tt_query_packet
));
1137 tt_request
->packet_type
= BAT_TT_QUERY
;
1138 tt_request
->version
= COMPAT_VERSION
;
1139 memcpy(tt_request
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1140 memcpy(tt_request
->dst
, dst_orig_node
->orig
, ETH_ALEN
);
1141 tt_request
->ttl
= TTL
;
1142 tt_request
->ttvn
= ttvn
;
1143 tt_request
->tt_data
= tt_crc
;
1144 tt_request
->flags
= TT_REQUEST
;
1147 tt_request
->flags
|= TT_FULL_TABLE
;
1149 neigh_node
= orig_node_get_router(dst_orig_node
);
1153 bat_dbg(DBG_TT
, bat_priv
, "Sending TT_REQUEST to %pM via %pM "
1154 "[%c]\n", dst_orig_node
->orig
, neigh_node
->addr
,
1155 (full_table
? 'F' : '.'));
1157 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1162 neigh_node_free_ref(neigh_node
);
1164 hardif_free_ref(primary_if
);
1167 if (ret
&& tt_req_node
) {
1168 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1169 list_del(&tt_req_node
->list
);
1170 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1176 static bool send_other_tt_response(struct bat_priv
*bat_priv
,
1177 struct tt_query_packet
*tt_request
)
1179 struct orig_node
*req_dst_orig_node
= NULL
, *res_dst_orig_node
= NULL
;
1180 struct neigh_node
*neigh_node
= NULL
;
1181 struct hard_iface
*primary_if
= NULL
;
1182 uint8_t orig_ttvn
, req_ttvn
, ttvn
;
1184 unsigned char *tt_buff
;
1186 uint16_t tt_len
, tt_tot
;
1187 struct sk_buff
*skb
= NULL
;
1188 struct tt_query_packet
*tt_response
;
1190 bat_dbg(DBG_TT
, bat_priv
,
1191 "Received TT_REQUEST from %pM for "
1192 "ttvn: %u (%pM) [%c]\n", tt_request
->src
,
1193 tt_request
->ttvn
, tt_request
->dst
,
1194 (tt_request
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1196 /* Let's get the orig node of the REAL destination */
1197 req_dst_orig_node
= orig_hash_find(bat_priv
, tt_request
->dst
);
1198 if (!req_dst_orig_node
)
1201 res_dst_orig_node
= orig_hash_find(bat_priv
, tt_request
->src
);
1202 if (!res_dst_orig_node
)
1205 neigh_node
= orig_node_get_router(res_dst_orig_node
);
1209 primary_if
= primary_if_get_selected(bat_priv
);
1213 orig_ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1214 req_ttvn
= tt_request
->ttvn
;
1216 /* I don't have the requested data */
1217 if (orig_ttvn
!= req_ttvn
||
1218 tt_request
->tt_data
!= req_dst_orig_node
->tt_crc
)
1221 /* If the full table has been explicitly requested */
1222 if (tt_request
->flags
& TT_FULL_TABLE
||
1223 !req_dst_orig_node
->tt_buff
)
1228 /* In this version, fragmentation is not implemented, then
1229 * I'll send only one packet with as much TT entries as I can */
1231 spin_lock_bh(&req_dst_orig_node
->tt_buff_lock
);
1232 tt_len
= req_dst_orig_node
->tt_buff_len
;
1233 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1235 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) +
1240 skb_reserve(skb
, ETH_HLEN
);
1241 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1242 sizeof(struct tt_query_packet
) + tt_len
);
1243 tt_response
->ttvn
= req_ttvn
;
1244 tt_response
->tt_data
= htons(tt_tot
);
1246 tt_buff
= skb
->data
+ sizeof(struct tt_query_packet
);
1247 /* Copy the last orig_node's OGM buffer */
1248 memcpy(tt_buff
, req_dst_orig_node
->tt_buff
,
1249 req_dst_orig_node
->tt_buff_len
);
1251 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1253 tt_len
= (uint16_t)atomic_read(&req_dst_orig_node
->tt_size
) *
1254 sizeof(struct tt_change
);
1255 ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1257 skb
= tt_response_fill_table(tt_len
, ttvn
,
1258 bat_priv
->tt_global_hash
,
1259 primary_if
, tt_global_valid_entry
,
1264 tt_response
= (struct tt_query_packet
*)skb
->data
;
1267 tt_response
->packet_type
= BAT_TT_QUERY
;
1268 tt_response
->version
= COMPAT_VERSION
;
1269 tt_response
->ttl
= TTL
;
1270 memcpy(tt_response
->src
, req_dst_orig_node
->orig
, ETH_ALEN
);
1271 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1272 tt_response
->flags
= TT_RESPONSE
;
1275 tt_response
->flags
|= TT_FULL_TABLE
;
1277 bat_dbg(DBG_TT
, bat_priv
,
1278 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1279 res_dst_orig_node
->orig
, neigh_node
->addr
,
1280 req_dst_orig_node
->orig
, req_ttvn
);
1282 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1287 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1290 if (res_dst_orig_node
)
1291 orig_node_free_ref(res_dst_orig_node
);
1292 if (req_dst_orig_node
)
1293 orig_node_free_ref(req_dst_orig_node
);
1295 neigh_node_free_ref(neigh_node
);
1297 hardif_free_ref(primary_if
);
1303 static bool send_my_tt_response(struct bat_priv
*bat_priv
,
1304 struct tt_query_packet
*tt_request
)
1306 struct orig_node
*orig_node
= NULL
;
1307 struct neigh_node
*neigh_node
= NULL
;
1308 struct hard_iface
*primary_if
= NULL
;
1309 uint8_t my_ttvn
, req_ttvn
, ttvn
;
1311 unsigned char *tt_buff
;
1313 uint16_t tt_len
, tt_tot
;
1314 struct sk_buff
*skb
= NULL
;
1315 struct tt_query_packet
*tt_response
;
1317 bat_dbg(DBG_TT
, bat_priv
,
1318 "Received TT_REQUEST from %pM for "
1319 "ttvn: %u (me) [%c]\n", tt_request
->src
,
1321 (tt_request
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1324 my_ttvn
= (uint8_t)atomic_read(&bat_priv
->ttvn
);
1325 req_ttvn
= tt_request
->ttvn
;
1327 orig_node
= orig_hash_find(bat_priv
, tt_request
->src
);
1331 neigh_node
= orig_node_get_router(orig_node
);
1335 primary_if
= primary_if_get_selected(bat_priv
);
1339 /* If the full table has been explicitly requested or the gap
1340 * is too big send the whole local translation table */
1341 if (tt_request
->flags
& TT_FULL_TABLE
|| my_ttvn
!= req_ttvn
||
1347 /* In this version, fragmentation is not implemented, then
1348 * I'll send only one packet with as much TT entries as I can */
1350 spin_lock_bh(&bat_priv
->tt_buff_lock
);
1351 tt_len
= bat_priv
->tt_buff_len
;
1352 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1354 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) +
1359 skb_reserve(skb
, ETH_HLEN
);
1360 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1361 sizeof(struct tt_query_packet
) + tt_len
);
1362 tt_response
->ttvn
= req_ttvn
;
1363 tt_response
->tt_data
= htons(tt_tot
);
1365 tt_buff
= skb
->data
+ sizeof(struct tt_query_packet
);
1366 memcpy(tt_buff
, bat_priv
->tt_buff
,
1367 bat_priv
->tt_buff_len
);
1368 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
1370 tt_len
= (uint16_t)atomic_read(&bat_priv
->num_local_tt
) *
1371 sizeof(struct tt_change
);
1372 ttvn
= (uint8_t)atomic_read(&bat_priv
->ttvn
);
1374 skb
= tt_response_fill_table(tt_len
, ttvn
,
1375 bat_priv
->tt_local_hash
,
1376 primary_if
, tt_local_valid_entry
,
1381 tt_response
= (struct tt_query_packet
*)skb
->data
;
1384 tt_response
->packet_type
= BAT_TT_QUERY
;
1385 tt_response
->version
= COMPAT_VERSION
;
1386 tt_response
->ttl
= TTL
;
1387 memcpy(tt_response
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1388 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1389 tt_response
->flags
= TT_RESPONSE
;
1392 tt_response
->flags
|= TT_FULL_TABLE
;
1394 bat_dbg(DBG_TT
, bat_priv
,
1395 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1396 orig_node
->orig
, neigh_node
->addr
,
1397 (tt_response
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1399 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1404 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
1407 orig_node_free_ref(orig_node
);
1409 neigh_node_free_ref(neigh_node
);
1411 hardif_free_ref(primary_if
);
1414 /* This packet was for me, so it doesn't need to be re-routed */
1418 bool send_tt_response(struct bat_priv
*bat_priv
,
1419 struct tt_query_packet
*tt_request
)
1421 if (is_my_mac(tt_request
->dst
))
1422 return send_my_tt_response(bat_priv
, tt_request
);
1424 return send_other_tt_response(bat_priv
, tt_request
);
1427 static void _tt_update_changes(struct bat_priv
*bat_priv
,
1428 struct orig_node
*orig_node
,
1429 struct tt_change
*tt_change
,
1430 uint16_t tt_num_changes
, uint8_t ttvn
)
1434 for (i
= 0; i
< tt_num_changes
; i
++) {
1435 if ((tt_change
+ i
)->flags
& TT_CLIENT_DEL
)
1436 tt_global_del(bat_priv
, orig_node
,
1437 (tt_change
+ i
)->addr
,
1438 "tt removed by changes",
1439 (tt_change
+ i
)->flags
& TT_CLIENT_ROAM
);
1441 if (!tt_global_add(bat_priv
, orig_node
,
1442 (tt_change
+ i
)->addr
, ttvn
, false,
1443 (tt_change
+ i
)->flags
&
1445 /* In case of problem while storing a
1446 * global_entry, we stop the updating
1447 * procedure without committing the
1448 * ttvn change. This will avoid to send
1449 * corrupted data on tt_request
1455 static void tt_fill_gtable(struct bat_priv
*bat_priv
,
1456 struct tt_query_packet
*tt_response
)
1458 struct orig_node
*orig_node
= NULL
;
1460 orig_node
= orig_hash_find(bat_priv
, tt_response
->src
);
1464 /* Purge the old table first.. */
1465 tt_global_del_orig(bat_priv
, orig_node
, "Received full table");
1467 _tt_update_changes(bat_priv
, orig_node
,
1468 (struct tt_change
*)(tt_response
+ 1),
1469 tt_response
->tt_data
, tt_response
->ttvn
);
1471 spin_lock_bh(&orig_node
->tt_buff_lock
);
1472 kfree(orig_node
->tt_buff
);
1473 orig_node
->tt_buff_len
= 0;
1474 orig_node
->tt_buff
= NULL
;
1475 spin_unlock_bh(&orig_node
->tt_buff_lock
);
1477 atomic_set(&orig_node
->last_ttvn
, tt_response
->ttvn
);
1481 orig_node_free_ref(orig_node
);
1484 static void tt_update_changes(struct bat_priv
*bat_priv
,
1485 struct orig_node
*orig_node
,
1486 uint16_t tt_num_changes
, uint8_t ttvn
,
1487 struct tt_change
*tt_change
)
1489 _tt_update_changes(bat_priv
, orig_node
, tt_change
, tt_num_changes
,
1492 tt_save_orig_buffer(bat_priv
, orig_node
, (unsigned char *)tt_change
,
1494 atomic_set(&orig_node
->last_ttvn
, ttvn
);
1497 bool is_my_client(struct bat_priv
*bat_priv
, const uint8_t *addr
)
1499 struct tt_local_entry
*tt_local_entry
= NULL
;
1502 tt_local_entry
= tt_local_hash_find(bat_priv
, addr
);
1503 if (!tt_local_entry
)
1505 /* Check if the client has been logically deleted (but is kept for
1506 * consistency purpose) */
1507 if (tt_local_entry
->common
.flags
& TT_CLIENT_PENDING
)
1512 tt_local_entry_free_ref(tt_local_entry
);
1516 void handle_tt_response(struct bat_priv
*bat_priv
,
1517 struct tt_query_packet
*tt_response
)
1519 struct tt_req_node
*node
, *safe
;
1520 struct orig_node
*orig_node
= NULL
;
1522 bat_dbg(DBG_TT
, bat_priv
, "Received TT_RESPONSE from %pM for "
1523 "ttvn %d t_size: %d [%c]\n",
1524 tt_response
->src
, tt_response
->ttvn
,
1525 tt_response
->tt_data
,
1526 (tt_response
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1528 orig_node
= orig_hash_find(bat_priv
, tt_response
->src
);
1532 if (tt_response
->flags
& TT_FULL_TABLE
)
1533 tt_fill_gtable(bat_priv
, tt_response
);
1535 tt_update_changes(bat_priv
, orig_node
, tt_response
->tt_data
,
1537 (struct tt_change
*)(tt_response
+ 1));
1539 /* Delete the tt_req_node from pending tt_requests list */
1540 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1541 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
1542 if (!compare_eth(node
->addr
, tt_response
->src
))
1544 list_del(&node
->list
);
1547 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1549 /* Recalculate the CRC for this orig_node and store it */
1550 orig_node
->tt_crc
= tt_global_crc(bat_priv
, orig_node
);
1551 /* Roaming phase is over: tables are in sync again. I can
1553 orig_node
->tt_poss_change
= false;
1556 orig_node_free_ref(orig_node
);
1559 int tt_init(struct bat_priv
*bat_priv
)
1561 if (!tt_local_init(bat_priv
))
1564 if (!tt_global_init(bat_priv
))
1567 tt_start_timer(bat_priv
);
1572 static void tt_roam_list_free(struct bat_priv
*bat_priv
)
1574 struct tt_roam_node
*node
, *safe
;
1576 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1578 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_roam_list
, list
) {
1579 list_del(&node
->list
);
1583 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1586 static void tt_roam_purge(struct bat_priv
*bat_priv
)
1588 struct tt_roam_node
*node
, *safe
;
1590 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1591 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_roam_list
, list
) {
1592 if (!is_out_of_time(node
->first_time
,
1593 ROAMING_MAX_TIME
* 1000))
1596 list_del(&node
->list
);
1599 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1602 /* This function checks whether the client already reached the
1603 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1606 * returns true if the ROAMING_ADV can be sent, false otherwise */
1607 static bool tt_check_roam_count(struct bat_priv
*bat_priv
,
1610 struct tt_roam_node
*tt_roam_node
;
1613 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1614 /* The new tt_req will be issued only if I'm not waiting for a
1615 * reply from the same orig_node yet */
1616 list_for_each_entry(tt_roam_node
, &bat_priv
->tt_roam_list
, list
) {
1617 if (!compare_eth(tt_roam_node
->addr
, client
))
1620 if (is_out_of_time(tt_roam_node
->first_time
,
1621 ROAMING_MAX_TIME
* 1000))
1624 if (!atomic_dec_not_zero(&tt_roam_node
->counter
))
1625 /* Sorry, you roamed too many times! */
1632 tt_roam_node
= kmalloc(sizeof(*tt_roam_node
), GFP_ATOMIC
);
1636 tt_roam_node
->first_time
= jiffies
;
1637 atomic_set(&tt_roam_node
->counter
, ROAMING_MAX_COUNT
- 1);
1638 memcpy(tt_roam_node
->addr
, client
, ETH_ALEN
);
1640 list_add(&tt_roam_node
->list
, &bat_priv
->tt_roam_list
);
1645 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1649 void send_roam_adv(struct bat_priv
*bat_priv
, uint8_t *client
,
1650 struct orig_node
*orig_node
)
1652 struct neigh_node
*neigh_node
= NULL
;
1653 struct sk_buff
*skb
= NULL
;
1654 struct roam_adv_packet
*roam_adv_packet
;
1656 struct hard_iface
*primary_if
;
1658 /* before going on we have to check whether the client has
1659 * already roamed to us too many times */
1660 if (!tt_check_roam_count(bat_priv
, client
))
1663 skb
= dev_alloc_skb(sizeof(struct roam_adv_packet
) + ETH_HLEN
);
1667 skb_reserve(skb
, ETH_HLEN
);
1669 roam_adv_packet
= (struct roam_adv_packet
*)skb_put(skb
,
1670 sizeof(struct roam_adv_packet
));
1672 roam_adv_packet
->packet_type
= BAT_ROAM_ADV
;
1673 roam_adv_packet
->version
= COMPAT_VERSION
;
1674 roam_adv_packet
->ttl
= TTL
;
1675 primary_if
= primary_if_get_selected(bat_priv
);
1678 memcpy(roam_adv_packet
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1679 hardif_free_ref(primary_if
);
1680 memcpy(roam_adv_packet
->dst
, orig_node
->orig
, ETH_ALEN
);
1681 memcpy(roam_adv_packet
->client
, client
, ETH_ALEN
);
1683 neigh_node
= orig_node_get_router(orig_node
);
1687 bat_dbg(DBG_TT
, bat_priv
,
1688 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1689 orig_node
->orig
, client
, neigh_node
->addr
);
1691 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1696 neigh_node_free_ref(neigh_node
);
1702 static void tt_purge(struct work_struct
*work
)
1704 struct delayed_work
*delayed_work
=
1705 container_of(work
, struct delayed_work
, work
);
1706 struct bat_priv
*bat_priv
=
1707 container_of(delayed_work
, struct bat_priv
, tt_work
);
1709 tt_local_purge(bat_priv
);
1710 tt_global_roam_purge(bat_priv
);
1711 tt_req_purge(bat_priv
);
1712 tt_roam_purge(bat_priv
);
1714 tt_start_timer(bat_priv
);
1717 void tt_free(struct bat_priv
*bat_priv
)
1719 cancel_delayed_work_sync(&bat_priv
->tt_work
);
1721 tt_local_table_free(bat_priv
);
1722 tt_global_table_free(bat_priv
);
1723 tt_req_list_free(bat_priv
);
1724 tt_changes_list_free(bat_priv
);
1725 tt_roam_list_free(bat_priv
);
1727 kfree(bat_priv
->tt_buff
);
1730 /* This function will enable or disable the specified flags for all the entries
1731 * in the given hash table and returns the number of modified entries */
1732 static uint16_t tt_set_flags(struct hashtable_t
*hash
, uint16_t flags
,
1736 uint16_t changed_num
= 0;
1737 struct hlist_head
*head
;
1738 struct hlist_node
*node
;
1739 struct tt_common_entry
*tt_common_entry
;
1744 for (i
= 0; i
< hash
->size
; i
++) {
1745 head
= &hash
->table
[i
];
1748 hlist_for_each_entry_rcu(tt_common_entry
, node
,
1751 if ((tt_common_entry
->flags
& flags
) == flags
)
1753 tt_common_entry
->flags
|= flags
;
1755 if (!(tt_common_entry
->flags
& flags
))
1757 tt_common_entry
->flags
&= ~flags
;
1767 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
1768 static void tt_local_purge_pending_clients(struct bat_priv
*bat_priv
)
1770 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
1771 struct tt_common_entry
*tt_common_entry
;
1772 struct tt_local_entry
*tt_local_entry
;
1773 struct hlist_node
*node
, *node_tmp
;
1774 struct hlist_head
*head
;
1775 spinlock_t
*list_lock
; /* protects write access to the hash lists */
1781 for (i
= 0; i
< hash
->size
; i
++) {
1782 head
= &hash
->table
[i
];
1783 list_lock
= &hash
->list_locks
[i
];
1785 spin_lock_bh(list_lock
);
1786 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
1788 if (!(tt_common_entry
->flags
& TT_CLIENT_PENDING
))
1791 bat_dbg(DBG_TT
, bat_priv
, "Deleting local tt entry "
1792 "(%pM): pending\n", tt_common_entry
->addr
);
1794 atomic_dec(&bat_priv
->num_local_tt
);
1795 hlist_del_rcu(node
);
1796 tt_local_entry
= container_of(tt_common_entry
,
1797 struct tt_local_entry
,
1799 tt_local_entry_free_ref(tt_local_entry
);
1801 spin_unlock_bh(list_lock
);
1806 void tt_commit_changes(struct bat_priv
*bat_priv
)
1808 uint16_t changed_num
= tt_set_flags(bat_priv
->tt_local_hash
,
1809 TT_CLIENT_NEW
, false);
1810 /* all the reset entries have now to be effectively counted as local
1812 atomic_add(changed_num
, &bat_priv
->num_local_tt
);
1813 tt_local_purge_pending_clients(bat_priv
);
1815 /* Increment the TTVN only once per OGM interval */
1816 atomic_inc(&bat_priv
->ttvn
);
1817 bat_priv
->tt_poss_change
= false;
1820 bool is_ap_isolated(struct bat_priv
*bat_priv
, uint8_t *src
, uint8_t *dst
)
1822 struct tt_local_entry
*tt_local_entry
= NULL
;
1823 struct tt_global_entry
*tt_global_entry
= NULL
;
1826 if (!atomic_read(&bat_priv
->ap_isolation
))
1829 tt_local_entry
= tt_local_hash_find(bat_priv
, dst
);
1830 if (!tt_local_entry
)
1833 tt_global_entry
= tt_global_hash_find(bat_priv
, src
);
1834 if (!tt_global_entry
)
1837 if (_is_ap_isolated(tt_local_entry
, tt_global_entry
))
1843 if (tt_global_entry
)
1844 tt_global_entry_free_ref(tt_global_entry
);
1846 tt_local_entry_free_ref(tt_local_entry
);
1850 void tt_update_orig(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
1851 const unsigned char *tt_buff
, uint8_t tt_num_changes
,
1852 uint8_t ttvn
, uint16_t tt_crc
)
1854 uint8_t orig_ttvn
= (uint8_t)atomic_read(&orig_node
->last_ttvn
);
1855 bool full_table
= true;
1857 /* the ttvn increased by one -> we can apply the attached changes */
1858 if (ttvn
- orig_ttvn
== 1) {
1859 /* the OGM could not contain the changes due to their size or
1860 * because they have already been sent TT_OGM_APPEND_MAX times.
1861 * In this case send a tt request */
1862 if (!tt_num_changes
) {
1867 tt_update_changes(bat_priv
, orig_node
, tt_num_changes
, ttvn
,
1868 (struct tt_change
*)tt_buff
);
1870 /* Even if we received the precomputed crc with the OGM, we
1871 * prefer to recompute it to spot any possible inconsistency
1872 * in the global table */
1873 orig_node
->tt_crc
= tt_global_crc(bat_priv
, orig_node
);
1875 /* The ttvn alone is not enough to guarantee consistency
1876 * because a single value could represent different states
1877 * (due to the wrap around). Thus a node has to check whether
1878 * the resulting table (after applying the changes) is still
1879 * consistent or not. E.g. a node could disconnect while its
1880 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
1881 * checking the CRC value is mandatory to detect the
1883 if (orig_node
->tt_crc
!= tt_crc
)
1886 /* Roaming phase is over: tables are in sync again. I can
1888 orig_node
->tt_poss_change
= false;
1890 /* if we missed more than one change or our tables are not
1891 * in sync anymore -> request fresh tt data */
1892 if (ttvn
!= orig_ttvn
|| orig_node
->tt_crc
!= tt_crc
) {
1894 bat_dbg(DBG_TT
, bat_priv
, "TT inconsistency for %pM. "
1895 "Need to retrieve the correct information "
1896 "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
1897 "%u num_changes: %u)\n", orig_node
->orig
, ttvn
,
1898 orig_ttvn
, tt_crc
, orig_node
->tt_crc
,
1900 send_tt_request(bat_priv
, orig_node
, ttvn
, tt_crc
,