2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 #include "translation-table.h"
24 #include "soft-interface.h"
25 #include "hard-interface.h"
28 #include "originator.h"
31 #include <linux/crc16.h>
33 static void _tt_global_del(struct bat_priv
*bat_priv
,
34 struct tt_global_entry
*tt_global_entry
,
36 static void tt_purge(struct work_struct
*work
);
38 /* returns 1 if they are the same mac addr */
39 static int compare_ltt(const struct hlist_node
*node
, const void *data2
)
41 const void *data1
= container_of(node
, struct tt_local_entry
,
44 return (memcmp(data1
, data2
, ETH_ALEN
) == 0 ? 1 : 0);
47 /* returns 1 if they are the same mac addr */
48 static int compare_gtt(const struct hlist_node
*node
, const void *data2
)
50 const void *data1
= container_of(node
, struct tt_global_entry
,
53 return (memcmp(data1
, data2
, ETH_ALEN
) == 0 ? 1 : 0);
56 static void tt_start_timer(struct bat_priv
*bat_priv
)
58 INIT_DELAYED_WORK(&bat_priv
->tt_work
, tt_purge
);
59 queue_delayed_work(bat_event_workqueue
, &bat_priv
->tt_work
,
60 msecs_to_jiffies(5000));
63 static struct tt_local_entry
*tt_local_hash_find(struct bat_priv
*bat_priv
,
66 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
67 struct hlist_head
*head
;
68 struct hlist_node
*node
;
69 struct tt_local_entry
*tt_local_entry
, *tt_local_entry_tmp
= NULL
;
75 index
= choose_orig(data
, hash
->size
);
76 head
= &hash
->table
[index
];
79 hlist_for_each_entry_rcu(tt_local_entry
, node
, head
, hash_entry
) {
80 if (!compare_eth(tt_local_entry
, data
))
83 if (!atomic_inc_not_zero(&tt_local_entry
->refcount
))
86 tt_local_entry_tmp
= tt_local_entry
;
91 return tt_local_entry_tmp
;
94 static struct tt_global_entry
*tt_global_hash_find(struct bat_priv
*bat_priv
,
97 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
98 struct hlist_head
*head
;
99 struct hlist_node
*node
;
100 struct tt_global_entry
*tt_global_entry
;
101 struct tt_global_entry
*tt_global_entry_tmp
= NULL
;
107 index
= choose_orig(data
, hash
->size
);
108 head
= &hash
->table
[index
];
111 hlist_for_each_entry_rcu(tt_global_entry
, node
, head
, hash_entry
) {
112 if (!compare_eth(tt_global_entry
, data
))
115 if (!atomic_inc_not_zero(&tt_global_entry
->refcount
))
118 tt_global_entry_tmp
= tt_global_entry
;
123 return tt_global_entry_tmp
;
126 static bool is_out_of_time(unsigned long starting_time
, unsigned long timeout
)
128 unsigned long deadline
;
129 deadline
= starting_time
+ msecs_to_jiffies(timeout
);
131 return time_after(jiffies
, deadline
);
134 static void tt_local_entry_free_ref(struct tt_local_entry
*tt_local_entry
)
136 if (atomic_dec_and_test(&tt_local_entry
->refcount
))
137 kfree_rcu(tt_local_entry
, rcu
);
140 static void tt_global_entry_free_ref(struct tt_global_entry
*tt_global_entry
)
142 if (atomic_dec_and_test(&tt_global_entry
->refcount
))
143 kfree_rcu(tt_global_entry
, rcu
);
146 static void tt_local_event(struct bat_priv
*bat_priv
, const uint8_t *addr
,
149 struct tt_change_node
*tt_change_node
;
151 tt_change_node
= kmalloc(sizeof(*tt_change_node
), GFP_ATOMIC
);
156 tt_change_node
->change
.flags
= flags
;
157 memcpy(tt_change_node
->change
.addr
, addr
, ETH_ALEN
);
159 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
160 /* track the change in the OGMinterval list */
161 list_add_tail(&tt_change_node
->list
, &bat_priv
->tt_changes_list
);
162 atomic_inc(&bat_priv
->tt_local_changes
);
163 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
165 atomic_set(&bat_priv
->tt_ogm_append_cnt
, 0);
168 int tt_len(int changes_num
)
170 return changes_num
* sizeof(struct tt_change
);
173 static int tt_local_init(struct bat_priv
*bat_priv
)
175 if (bat_priv
->tt_local_hash
)
178 bat_priv
->tt_local_hash
= hash_new(1024);
180 if (!bat_priv
->tt_local_hash
)
186 void tt_local_add(struct net_device
*soft_iface
, const uint8_t *addr
,
189 struct bat_priv
*bat_priv
= netdev_priv(soft_iface
);
190 struct tt_local_entry
*tt_local_entry
= NULL
;
191 struct tt_global_entry
*tt_global_entry
= NULL
;
193 tt_local_entry
= tt_local_hash_find(bat_priv
, addr
);
195 if (tt_local_entry
) {
196 tt_local_entry
->last_seen
= jiffies
;
200 tt_local_entry
= kmalloc(sizeof(*tt_local_entry
), GFP_ATOMIC
);
204 bat_dbg(DBG_TT
, bat_priv
,
205 "Creating new local tt entry: %pM (ttvn: %d)\n", addr
,
206 (uint8_t)atomic_read(&bat_priv
->ttvn
));
208 memcpy(tt_local_entry
->addr
, addr
, ETH_ALEN
);
209 tt_local_entry
->last_seen
= jiffies
;
210 tt_local_entry
->flags
= NO_FLAGS
;
211 if (is_wifi_iface(ifindex
))
212 tt_local_entry
->flags
|= TT_CLIENT_WIFI
;
213 atomic_set(&tt_local_entry
->refcount
, 2);
215 /* the batman interface mac address should never be purged */
216 if (compare_eth(addr
, soft_iface
->dev_addr
))
217 tt_local_entry
->flags
|= TT_CLIENT_NOPURGE
;
219 tt_local_event(bat_priv
, addr
, tt_local_entry
->flags
);
221 /* The local entry has to be marked as NEW to avoid to send it in
222 * a full table response going out before the next ttvn increment
223 * (consistency check) */
224 tt_local_entry
->flags
|= TT_CLIENT_NEW
;
226 hash_add(bat_priv
->tt_local_hash
, compare_ltt
, choose_orig
,
227 tt_local_entry
, &tt_local_entry
->hash_entry
);
229 /* remove address from global hash if present */
230 tt_global_entry
= tt_global_hash_find(bat_priv
, addr
);
232 /* Check whether it is a roaming! */
233 if (tt_global_entry
) {
234 /* This node is probably going to update its tt table */
235 tt_global_entry
->orig_node
->tt_poss_change
= true;
236 /* The global entry has to be marked as PENDING and has to be
237 * kept for consistency purpose */
238 tt_global_entry
->flags
|= TT_CLIENT_PENDING
;
239 send_roam_adv(bat_priv
, tt_global_entry
->addr
,
240 tt_global_entry
->orig_node
);
244 tt_local_entry_free_ref(tt_local_entry
);
246 tt_global_entry_free_ref(tt_global_entry
);
249 int tt_changes_fill_buffer(struct bat_priv
*bat_priv
,
250 unsigned char *buff
, int buff_len
)
252 int count
= 0, tot_changes
= 0;
253 struct tt_change_node
*entry
, *safe
;
256 tot_changes
= buff_len
/ tt_len(1);
258 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
259 atomic_set(&bat_priv
->tt_local_changes
, 0);
261 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
263 if (count
< tot_changes
) {
264 memcpy(buff
+ tt_len(count
),
265 &entry
->change
, sizeof(struct tt_change
));
268 list_del(&entry
->list
);
271 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
273 /* Keep the buffer for possible tt_request */
274 spin_lock_bh(&bat_priv
->tt_buff_lock
);
275 kfree(bat_priv
->tt_buff
);
276 bat_priv
->tt_buff_len
= 0;
277 bat_priv
->tt_buff
= NULL
;
278 /* We check whether this new OGM has no changes due to size
282 * if kmalloc() fails we will reply with the full table
283 * instead of providing the diff
285 bat_priv
->tt_buff
= kmalloc(buff_len
, GFP_ATOMIC
);
286 if (bat_priv
->tt_buff
) {
287 memcpy(bat_priv
->tt_buff
, buff
, buff_len
);
288 bat_priv
->tt_buff_len
= buff_len
;
291 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
296 int tt_local_seq_print_text(struct seq_file
*seq
, void *offset
)
298 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
299 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
300 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
301 struct tt_local_entry
*tt_local_entry
;
302 struct hard_iface
*primary_if
;
303 struct hlist_node
*node
;
304 struct hlist_head
*head
;
305 size_t buf_size
, pos
;
309 primary_if
= primary_if_get_selected(bat_priv
);
311 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - "
312 "please specify interfaces to enable it\n",
317 if (primary_if
->if_status
!= IF_ACTIVE
) {
318 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - "
319 "primary interface not active\n",
324 seq_printf(seq
, "Locally retrieved addresses (from %s) "
325 "announced via TT (TTVN: %u):\n",
326 net_dev
->name
, (uint8_t)atomic_read(&bat_priv
->ttvn
));
329 /* Estimate length for: " * xx:xx:xx:xx:xx:xx\n" */
330 for (i
= 0; i
< hash
->size
; i
++) {
331 head
= &hash
->table
[i
];
334 __hlist_for_each_rcu(node
, head
)
339 buff
= kmalloc(buf_size
, GFP_ATOMIC
);
348 for (i
= 0; i
< hash
->size
; i
++) {
349 head
= &hash
->table
[i
];
352 hlist_for_each_entry_rcu(tt_local_entry
, node
,
354 pos
+= snprintf(buff
+ pos
, 30, " * %pM "
356 tt_local_entry
->addr
,
357 (tt_local_entry
->flags
&
358 TT_CLIENT_ROAM
? 'R' : '.'),
359 (tt_local_entry
->flags
&
360 TT_CLIENT_NOPURGE
? 'P' : '.'),
361 (tt_local_entry
->flags
&
362 TT_CLIENT_NEW
? 'N' : '.'),
363 (tt_local_entry
->flags
&
364 TT_CLIENT_PENDING
? 'X' : '.'),
365 (tt_local_entry
->flags
&
366 TT_CLIENT_WIFI
? 'W' : '.'));
371 seq_printf(seq
, "%s", buff
);
375 hardif_free_ref(primary_if
);
379 static void tt_local_set_pending(struct bat_priv
*bat_priv
,
380 struct tt_local_entry
*tt_local_entry
,
383 tt_local_event(bat_priv
, tt_local_entry
->addr
,
384 tt_local_entry
->flags
| flags
);
386 /* The local client has to be marked as "pending to be removed" but has
387 * to be kept in the table in order to send it in a full table
388 * response issued before the net ttvn increment (consistency check) */
389 tt_local_entry
->flags
|= TT_CLIENT_PENDING
;
392 void tt_local_remove(struct bat_priv
*bat_priv
, const uint8_t *addr
,
393 const char *message
, bool roaming
)
395 struct tt_local_entry
*tt_local_entry
= NULL
;
397 tt_local_entry
= tt_local_hash_find(bat_priv
, addr
);
401 tt_local_set_pending(bat_priv
, tt_local_entry
, TT_CLIENT_DEL
|
402 (roaming
? TT_CLIENT_ROAM
: NO_FLAGS
));
404 bat_dbg(DBG_TT
, bat_priv
, "Local tt entry (%pM) pending to be removed: "
405 "%s\n", tt_local_entry
->addr
, message
);
408 tt_local_entry_free_ref(tt_local_entry
);
411 static void tt_local_purge(struct bat_priv
*bat_priv
)
413 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
414 struct tt_local_entry
*tt_local_entry
;
415 struct hlist_node
*node
, *node_tmp
;
416 struct hlist_head
*head
;
417 spinlock_t
*list_lock
; /* protects write access to the hash lists */
420 for (i
= 0; i
< hash
->size
; i
++) {
421 head
= &hash
->table
[i
];
422 list_lock
= &hash
->list_locks
[i
];
424 spin_lock_bh(list_lock
);
425 hlist_for_each_entry_safe(tt_local_entry
, node
, node_tmp
,
427 if (tt_local_entry
->flags
& TT_CLIENT_NOPURGE
)
430 /* entry already marked for deletion */
431 if (tt_local_entry
->flags
& TT_CLIENT_PENDING
)
434 if (!is_out_of_time(tt_local_entry
->last_seen
,
435 TT_LOCAL_TIMEOUT
* 1000))
438 tt_local_set_pending(bat_priv
, tt_local_entry
,
440 bat_dbg(DBG_TT
, bat_priv
, "Local tt entry (%pM) "
441 "pending to be removed: timed out\n",
442 tt_local_entry
->addr
);
444 spin_unlock_bh(list_lock
);
449 static void tt_local_table_free(struct bat_priv
*bat_priv
)
451 struct hashtable_t
*hash
;
452 spinlock_t
*list_lock
; /* protects write access to the hash lists */
453 struct tt_local_entry
*tt_local_entry
;
454 struct hlist_node
*node
, *node_tmp
;
455 struct hlist_head
*head
;
458 if (!bat_priv
->tt_local_hash
)
461 hash
= bat_priv
->tt_local_hash
;
463 for (i
= 0; i
< hash
->size
; i
++) {
464 head
= &hash
->table
[i
];
465 list_lock
= &hash
->list_locks
[i
];
467 spin_lock_bh(list_lock
);
468 hlist_for_each_entry_safe(tt_local_entry
, node
, node_tmp
,
471 tt_local_entry_free_ref(tt_local_entry
);
473 spin_unlock_bh(list_lock
);
478 bat_priv
->tt_local_hash
= NULL
;
481 static int tt_global_init(struct bat_priv
*bat_priv
)
483 if (bat_priv
->tt_global_hash
)
486 bat_priv
->tt_global_hash
= hash_new(1024);
488 if (!bat_priv
->tt_global_hash
)
494 static void tt_changes_list_free(struct bat_priv
*bat_priv
)
496 struct tt_change_node
*entry
, *safe
;
498 spin_lock_bh(&bat_priv
->tt_changes_list_lock
);
500 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt_changes_list
,
502 list_del(&entry
->list
);
506 atomic_set(&bat_priv
->tt_local_changes
, 0);
507 spin_unlock_bh(&bat_priv
->tt_changes_list_lock
);
510 /* caller must hold orig_node refcount */
511 int tt_global_add(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
512 const unsigned char *tt_addr
, uint8_t ttvn
, bool roaming
,
515 struct tt_global_entry
*tt_global_entry
;
516 struct orig_node
*orig_node_tmp
;
519 tt_global_entry
= tt_global_hash_find(bat_priv
, tt_addr
);
521 if (!tt_global_entry
) {
523 kmalloc(sizeof(*tt_global_entry
),
525 if (!tt_global_entry
)
528 memcpy(tt_global_entry
->addr
, tt_addr
, ETH_ALEN
);
529 /* Assign the new orig_node */
530 atomic_inc(&orig_node
->refcount
);
531 tt_global_entry
->orig_node
= orig_node
;
532 tt_global_entry
->ttvn
= ttvn
;
533 tt_global_entry
->flags
= NO_FLAGS
;
534 tt_global_entry
->roam_at
= 0;
535 atomic_set(&tt_global_entry
->refcount
, 2);
537 hash_add(bat_priv
->tt_global_hash
, compare_gtt
,
538 choose_orig
, tt_global_entry
,
539 &tt_global_entry
->hash_entry
);
540 atomic_inc(&orig_node
->tt_size
);
542 if (tt_global_entry
->orig_node
!= orig_node
) {
543 atomic_dec(&tt_global_entry
->orig_node
->tt_size
);
544 orig_node_tmp
= tt_global_entry
->orig_node
;
545 atomic_inc(&orig_node
->refcount
);
546 tt_global_entry
->orig_node
= orig_node
;
547 orig_node_free_ref(orig_node_tmp
);
548 atomic_inc(&orig_node
->tt_size
);
550 tt_global_entry
->ttvn
= ttvn
;
551 tt_global_entry
->flags
= NO_FLAGS
;
552 tt_global_entry
->roam_at
= 0;
556 tt_global_entry
->flags
|= TT_CLIENT_WIFI
;
558 bat_dbg(DBG_TT
, bat_priv
,
559 "Creating new global tt entry: %pM (via %pM)\n",
560 tt_global_entry
->addr
, orig_node
->orig
);
562 /* remove address from local hash if present */
563 tt_local_remove(bat_priv
, tt_global_entry
->addr
,
564 "global tt received", roaming
);
568 tt_global_entry_free_ref(tt_global_entry
);
572 int tt_global_seq_print_text(struct seq_file
*seq
, void *offset
)
574 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
575 struct bat_priv
*bat_priv
= netdev_priv(net_dev
);
576 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
577 struct tt_global_entry
*tt_global_entry
;
578 struct hard_iface
*primary_if
;
579 struct hlist_node
*node
;
580 struct hlist_head
*head
;
581 size_t buf_size
, pos
;
585 primary_if
= primary_if_get_selected(bat_priv
);
587 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - please "
588 "specify interfaces to enable it\n",
593 if (primary_if
->if_status
!= IF_ACTIVE
) {
594 ret
= seq_printf(seq
, "BATMAN mesh %s disabled - "
595 "primary interface not active\n",
601 "Globally announced TT entries received via the mesh %s\n",
603 seq_printf(seq
, " %-13s %s %-15s %s %s\n",
604 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
607 /* Estimate length for: " * xx:xx:xx:xx:xx:xx (ttvn) via
608 * xx:xx:xx:xx:xx:xx (cur_ttvn)\n"*/
609 for (i
= 0; i
< hash
->size
; i
++) {
610 head
= &hash
->table
[i
];
613 __hlist_for_each_rcu(node
, head
)
618 buff
= kmalloc(buf_size
, GFP_ATOMIC
);
627 for (i
= 0; i
< hash
->size
; i
++) {
628 head
= &hash
->table
[i
];
631 hlist_for_each_entry_rcu(tt_global_entry
, node
,
633 pos
+= snprintf(buff
+ pos
, 69,
634 " * %pM (%3u) via %pM (%3u) "
635 "[%c%c%c]\n", tt_global_entry
->addr
,
636 tt_global_entry
->ttvn
,
637 tt_global_entry
->orig_node
->orig
,
638 (uint8_t) atomic_read(
639 &tt_global_entry
->orig_node
->
641 (tt_global_entry
->flags
&
642 TT_CLIENT_ROAM
? 'R' : '.'),
643 (tt_global_entry
->flags
&
644 TT_CLIENT_PENDING
? 'X' : '.'),
645 (tt_global_entry
->flags
&
646 TT_CLIENT_WIFI
? 'W' : '.'));
651 seq_printf(seq
, "%s", buff
);
655 hardif_free_ref(primary_if
);
659 static void _tt_global_del(struct bat_priv
*bat_priv
,
660 struct tt_global_entry
*tt_global_entry
,
663 if (!tt_global_entry
)
666 bat_dbg(DBG_TT
, bat_priv
,
667 "Deleting global tt entry %pM (via %pM): %s\n",
668 tt_global_entry
->addr
, tt_global_entry
->orig_node
->orig
,
671 atomic_dec(&tt_global_entry
->orig_node
->tt_size
);
673 hash_remove(bat_priv
->tt_global_hash
, compare_gtt
, choose_orig
,
674 tt_global_entry
->addr
);
677 tt_global_entry_free_ref(tt_global_entry
);
680 void tt_global_del(struct bat_priv
*bat_priv
,
681 struct orig_node
*orig_node
, const unsigned char *addr
,
682 const char *message
, bool roaming
)
684 struct tt_global_entry
*tt_global_entry
= NULL
;
686 tt_global_entry
= tt_global_hash_find(bat_priv
, addr
);
687 if (!tt_global_entry
)
690 if (tt_global_entry
->orig_node
== orig_node
) {
692 tt_global_entry
->flags
|= TT_CLIENT_ROAM
;
693 tt_global_entry
->roam_at
= jiffies
;
696 _tt_global_del(bat_priv
, tt_global_entry
, message
);
700 tt_global_entry_free_ref(tt_global_entry
);
703 void tt_global_del_orig(struct bat_priv
*bat_priv
,
704 struct orig_node
*orig_node
, const char *message
)
706 struct tt_global_entry
*tt_global_entry
;
708 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
709 struct hlist_node
*node
, *safe
;
710 struct hlist_head
*head
;
711 spinlock_t
*list_lock
; /* protects write access to the hash lists */
713 for (i
= 0; i
< hash
->size
; i
++) {
714 head
= &hash
->table
[i
];
715 list_lock
= &hash
->list_locks
[i
];
717 spin_lock_bh(list_lock
);
718 hlist_for_each_entry_safe(tt_global_entry
, node
, safe
,
720 if (tt_global_entry
->orig_node
== orig_node
) {
721 bat_dbg(DBG_TT
, bat_priv
,
722 "Deleting global tt entry %pM "
723 "(via %pM): originator time out\n",
724 tt_global_entry
->addr
,
725 tt_global_entry
->orig_node
->orig
);
727 tt_global_entry_free_ref(tt_global_entry
);
730 spin_unlock_bh(list_lock
);
732 atomic_set(&orig_node
->tt_size
, 0);
735 static void tt_global_roam_purge(struct bat_priv
*bat_priv
)
737 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
738 struct tt_global_entry
*tt_global_entry
;
739 struct hlist_node
*node
, *node_tmp
;
740 struct hlist_head
*head
;
741 spinlock_t
*list_lock
; /* protects write access to the hash lists */
744 for (i
= 0; i
< hash
->size
; i
++) {
745 head
= &hash
->table
[i
];
746 list_lock
= &hash
->list_locks
[i
];
748 spin_lock_bh(list_lock
);
749 hlist_for_each_entry_safe(tt_global_entry
, node
, node_tmp
,
751 if (!(tt_global_entry
->flags
& TT_CLIENT_ROAM
))
753 if (!is_out_of_time(tt_global_entry
->roam_at
,
754 TT_CLIENT_ROAM_TIMEOUT
* 1000))
757 bat_dbg(DBG_TT
, bat_priv
, "Deleting global "
758 "tt entry (%pM): Roaming timeout\n",
759 tt_global_entry
->addr
);
760 atomic_dec(&tt_global_entry
->orig_node
->tt_size
);
762 tt_global_entry_free_ref(tt_global_entry
);
764 spin_unlock_bh(list_lock
);
769 static void tt_global_table_free(struct bat_priv
*bat_priv
)
771 struct hashtable_t
*hash
;
772 spinlock_t
*list_lock
; /* protects write access to the hash lists */
773 struct tt_global_entry
*tt_global_entry
;
774 struct hlist_node
*node
, *node_tmp
;
775 struct hlist_head
*head
;
778 if (!bat_priv
->tt_global_hash
)
781 hash
= bat_priv
->tt_global_hash
;
783 for (i
= 0; i
< hash
->size
; i
++) {
784 head
= &hash
->table
[i
];
785 list_lock
= &hash
->list_locks
[i
];
787 spin_lock_bh(list_lock
);
788 hlist_for_each_entry_safe(tt_global_entry
, node
, node_tmp
,
791 tt_global_entry_free_ref(tt_global_entry
);
793 spin_unlock_bh(list_lock
);
798 bat_priv
->tt_global_hash
= NULL
;
801 static bool _is_ap_isolated(struct tt_local_entry
*tt_local_entry
,
802 struct tt_global_entry
*tt_global_entry
)
806 if (tt_local_entry
->flags
& TT_CLIENT_WIFI
&&
807 tt_global_entry
->flags
& TT_CLIENT_WIFI
)
813 struct orig_node
*transtable_search(struct bat_priv
*bat_priv
,
814 const uint8_t *src
, const uint8_t *addr
)
816 struct tt_local_entry
*tt_local_entry
= NULL
;
817 struct tt_global_entry
*tt_global_entry
= NULL
;
818 struct orig_node
*orig_node
= NULL
;
820 if (src
&& atomic_read(&bat_priv
->ap_isolation
)) {
821 tt_local_entry
= tt_local_hash_find(bat_priv
, src
);
826 tt_global_entry
= tt_global_hash_find(bat_priv
, addr
);
827 if (!tt_global_entry
)
830 /* check whether the clients should not communicate due to AP
832 if (tt_local_entry
&& _is_ap_isolated(tt_local_entry
, tt_global_entry
))
835 if (!atomic_inc_not_zero(&tt_global_entry
->orig_node
->refcount
))
838 /* A global client marked as PENDING has already moved from that
840 if (tt_global_entry
->flags
& TT_CLIENT_PENDING
)
843 orig_node
= tt_global_entry
->orig_node
;
847 tt_global_entry_free_ref(tt_global_entry
);
849 tt_local_entry_free_ref(tt_local_entry
);
854 /* Calculates the checksum of the local table of a given orig_node */
855 uint16_t tt_global_crc(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
)
857 uint16_t total
= 0, total_one
;
858 struct hashtable_t
*hash
= bat_priv
->tt_global_hash
;
859 struct tt_global_entry
*tt_global_entry
;
860 struct hlist_node
*node
;
861 struct hlist_head
*head
;
864 for (i
= 0; i
< hash
->size
; i
++) {
865 head
= &hash
->table
[i
];
868 hlist_for_each_entry_rcu(tt_global_entry
, node
,
870 if (compare_eth(tt_global_entry
->orig_node
,
872 /* Roaming clients are in the global table for
873 * consistency only. They don't have to be
874 * taken into account while computing the
876 if (tt_global_entry
->flags
& TT_CLIENT_ROAM
)
879 for (j
= 0; j
< ETH_ALEN
; j
++)
880 total_one
= crc16_byte(total_one
,
881 tt_global_entry
->addr
[j
]);
891 /* Calculates the checksum of the local table */
892 uint16_t tt_local_crc(struct bat_priv
*bat_priv
)
894 uint16_t total
= 0, total_one
;
895 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
896 struct tt_local_entry
*tt_local_entry
;
897 struct hlist_node
*node
;
898 struct hlist_head
*head
;
901 for (i
= 0; i
< hash
->size
; i
++) {
902 head
= &hash
->table
[i
];
905 hlist_for_each_entry_rcu(tt_local_entry
, node
,
907 /* not yet committed clients have not to be taken into
908 * account while computing the CRC */
909 if (tt_local_entry
->flags
& TT_CLIENT_NEW
)
912 for (j
= 0; j
< ETH_ALEN
; j
++)
913 total_one
= crc16_byte(total_one
,
914 tt_local_entry
->addr
[j
]);
923 static void tt_req_list_free(struct bat_priv
*bat_priv
)
925 struct tt_req_node
*node
, *safe
;
927 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
929 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
930 list_del(&node
->list
);
934 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
937 void tt_save_orig_buffer(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
938 const unsigned char *tt_buff
, uint8_t tt_num_changes
)
940 uint16_t tt_buff_len
= tt_len(tt_num_changes
);
942 /* Replace the old buffer only if I received something in the
943 * last OGM (the OGM could carry no changes) */
944 spin_lock_bh(&orig_node
->tt_buff_lock
);
945 if (tt_buff_len
> 0) {
946 kfree(orig_node
->tt_buff
);
947 orig_node
->tt_buff_len
= 0;
948 orig_node
->tt_buff
= kmalloc(tt_buff_len
, GFP_ATOMIC
);
949 if (orig_node
->tt_buff
) {
950 memcpy(orig_node
->tt_buff
, tt_buff
, tt_buff_len
);
951 orig_node
->tt_buff_len
= tt_buff_len
;
954 spin_unlock_bh(&orig_node
->tt_buff_lock
);
957 static void tt_req_purge(struct bat_priv
*bat_priv
)
959 struct tt_req_node
*node
, *safe
;
961 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
962 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
963 if (is_out_of_time(node
->issued_at
,
964 TT_REQUEST_TIMEOUT
* 1000)) {
965 list_del(&node
->list
);
969 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
972 /* returns the pointer to the new tt_req_node struct if no request
973 * has already been issued for this orig_node, NULL otherwise */
974 static struct tt_req_node
*new_tt_req_node(struct bat_priv
*bat_priv
,
975 struct orig_node
*orig_node
)
977 struct tt_req_node
*tt_req_node_tmp
, *tt_req_node
= NULL
;
979 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
980 list_for_each_entry(tt_req_node_tmp
, &bat_priv
->tt_req_list
, list
) {
981 if (compare_eth(tt_req_node_tmp
, orig_node
) &&
982 !is_out_of_time(tt_req_node_tmp
->issued_at
,
983 TT_REQUEST_TIMEOUT
* 1000))
987 tt_req_node
= kmalloc(sizeof(*tt_req_node
), GFP_ATOMIC
);
991 memcpy(tt_req_node
->addr
, orig_node
->orig
, ETH_ALEN
);
992 tt_req_node
->issued_at
= jiffies
;
994 list_add(&tt_req_node
->list
, &bat_priv
->tt_req_list
);
996 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1000 /* data_ptr is useless here, but has to be kept to respect the prototype */
1001 static int tt_local_valid_entry(const void *entry_ptr
, const void *data_ptr
)
1003 const struct tt_local_entry
*tt_local_entry
= entry_ptr
;
1005 if (tt_local_entry
->flags
& TT_CLIENT_NEW
)
1010 static int tt_global_valid_entry(const void *entry_ptr
, const void *data_ptr
)
1012 const struct tt_global_entry
*tt_global_entry
= entry_ptr
;
1013 const struct orig_node
*orig_node
= data_ptr
;
1015 if (tt_global_entry
->flags
& TT_CLIENT_ROAM
)
1018 return (tt_global_entry
->orig_node
== orig_node
);
1021 static struct sk_buff
*tt_response_fill_table(uint16_t tt_len
, uint8_t ttvn
,
1022 struct hashtable_t
*hash
,
1023 struct hard_iface
*primary_if
,
1024 int (*valid_cb
)(const void *,
1028 struct tt_local_entry
*tt_local_entry
;
1029 struct tt_query_packet
*tt_response
;
1030 struct tt_change
*tt_change
;
1031 struct hlist_node
*node
;
1032 struct hlist_head
*head
;
1033 struct sk_buff
*skb
= NULL
;
1034 uint16_t tt_tot
, tt_count
;
1035 ssize_t tt_query_size
= sizeof(struct tt_query_packet
);
1038 if (tt_query_size
+ tt_len
> primary_if
->soft_iface
->mtu
) {
1039 tt_len
= primary_if
->soft_iface
->mtu
- tt_query_size
;
1040 tt_len
-= tt_len
% sizeof(struct tt_change
);
1042 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1044 skb
= dev_alloc_skb(tt_query_size
+ tt_len
+ ETH_HLEN
);
1048 skb_reserve(skb
, ETH_HLEN
);
1049 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1050 tt_query_size
+ tt_len
);
1051 tt_response
->ttvn
= ttvn
;
1053 tt_change
= (struct tt_change
*)(skb
->data
+ tt_query_size
);
1057 for (i
= 0; i
< hash
->size
; i
++) {
1058 head
= &hash
->table
[i
];
1060 hlist_for_each_entry_rcu(tt_local_entry
, node
,
1062 if (tt_count
== tt_tot
)
1065 if ((valid_cb
) && (!valid_cb(tt_local_entry
, cb_data
)))
1068 memcpy(tt_change
->addr
, tt_local_entry
->addr
, ETH_ALEN
);
1069 tt_change
->flags
= NO_FLAGS
;
1077 /* store in the message the number of entries we have successfully
1079 tt_response
->tt_data
= htons(tt_count
);
1085 static int send_tt_request(struct bat_priv
*bat_priv
,
1086 struct orig_node
*dst_orig_node
,
1087 uint8_t ttvn
, uint16_t tt_crc
, bool full_table
)
1089 struct sk_buff
*skb
= NULL
;
1090 struct tt_query_packet
*tt_request
;
1091 struct neigh_node
*neigh_node
= NULL
;
1092 struct hard_iface
*primary_if
;
1093 struct tt_req_node
*tt_req_node
= NULL
;
1096 primary_if
= primary_if_get_selected(bat_priv
);
1100 /* The new tt_req will be issued only if I'm not waiting for a
1101 * reply from the same orig_node yet */
1102 tt_req_node
= new_tt_req_node(bat_priv
, dst_orig_node
);
1106 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) + ETH_HLEN
);
1110 skb_reserve(skb
, ETH_HLEN
);
1112 tt_request
= (struct tt_query_packet
*)skb_put(skb
,
1113 sizeof(struct tt_query_packet
));
1115 tt_request
->packet_type
= BAT_TT_QUERY
;
1116 tt_request
->version
= COMPAT_VERSION
;
1117 memcpy(tt_request
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1118 memcpy(tt_request
->dst
, dst_orig_node
->orig
, ETH_ALEN
);
1119 tt_request
->ttl
= TTL
;
1120 tt_request
->ttvn
= ttvn
;
1121 tt_request
->tt_data
= tt_crc
;
1122 tt_request
->flags
= TT_REQUEST
;
1125 tt_request
->flags
|= TT_FULL_TABLE
;
1127 neigh_node
= orig_node_get_router(dst_orig_node
);
1131 bat_dbg(DBG_TT
, bat_priv
, "Sending TT_REQUEST to %pM via %pM "
1132 "[%c]\n", dst_orig_node
->orig
, neigh_node
->addr
,
1133 (full_table
? 'F' : '.'));
1135 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1140 neigh_node_free_ref(neigh_node
);
1142 hardif_free_ref(primary_if
);
1145 if (ret
&& tt_req_node
) {
1146 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1147 list_del(&tt_req_node
->list
);
1148 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1154 static bool send_other_tt_response(struct bat_priv
*bat_priv
,
1155 struct tt_query_packet
*tt_request
)
1157 struct orig_node
*req_dst_orig_node
= NULL
, *res_dst_orig_node
= NULL
;
1158 struct neigh_node
*neigh_node
= NULL
;
1159 struct hard_iface
*primary_if
= NULL
;
1160 uint8_t orig_ttvn
, req_ttvn
, ttvn
;
1162 unsigned char *tt_buff
;
1164 uint16_t tt_len
, tt_tot
;
1165 struct sk_buff
*skb
= NULL
;
1166 struct tt_query_packet
*tt_response
;
1168 bat_dbg(DBG_TT
, bat_priv
,
1169 "Received TT_REQUEST from %pM for "
1170 "ttvn: %u (%pM) [%c]\n", tt_request
->src
,
1171 tt_request
->ttvn
, tt_request
->dst
,
1172 (tt_request
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1174 /* Let's get the orig node of the REAL destination */
1175 req_dst_orig_node
= get_orig_node(bat_priv
, tt_request
->dst
);
1176 if (!req_dst_orig_node
)
1179 res_dst_orig_node
= get_orig_node(bat_priv
, tt_request
->src
);
1180 if (!res_dst_orig_node
)
1183 neigh_node
= orig_node_get_router(res_dst_orig_node
);
1187 primary_if
= primary_if_get_selected(bat_priv
);
1191 orig_ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1192 req_ttvn
= tt_request
->ttvn
;
1194 /* I don't have the requested data */
1195 if (orig_ttvn
!= req_ttvn
||
1196 tt_request
->tt_data
!= req_dst_orig_node
->tt_crc
)
1199 /* If the full table has been explicitly requested */
1200 if (tt_request
->flags
& TT_FULL_TABLE
||
1201 !req_dst_orig_node
->tt_buff
)
1206 /* In this version, fragmentation is not implemented, then
1207 * I'll send only one packet with as much TT entries as I can */
1209 spin_lock_bh(&req_dst_orig_node
->tt_buff_lock
);
1210 tt_len
= req_dst_orig_node
->tt_buff_len
;
1211 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1213 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) +
1218 skb_reserve(skb
, ETH_HLEN
);
1219 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1220 sizeof(struct tt_query_packet
) + tt_len
);
1221 tt_response
->ttvn
= req_ttvn
;
1222 tt_response
->tt_data
= htons(tt_tot
);
1224 tt_buff
= skb
->data
+ sizeof(struct tt_query_packet
);
1225 /* Copy the last orig_node's OGM buffer */
1226 memcpy(tt_buff
, req_dst_orig_node
->tt_buff
,
1227 req_dst_orig_node
->tt_buff_len
);
1229 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1231 tt_len
= (uint16_t)atomic_read(&req_dst_orig_node
->tt_size
) *
1232 sizeof(struct tt_change
);
1233 ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1235 skb
= tt_response_fill_table(tt_len
, ttvn
,
1236 bat_priv
->tt_global_hash
,
1237 primary_if
, tt_global_valid_entry
,
1242 tt_response
= (struct tt_query_packet
*)skb
->data
;
1245 tt_response
->packet_type
= BAT_TT_QUERY
;
1246 tt_response
->version
= COMPAT_VERSION
;
1247 tt_response
->ttl
= TTL
;
1248 memcpy(tt_response
->src
, req_dst_orig_node
->orig
, ETH_ALEN
);
1249 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1250 tt_response
->flags
= TT_RESPONSE
;
1253 tt_response
->flags
|= TT_FULL_TABLE
;
1255 bat_dbg(DBG_TT
, bat_priv
,
1256 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1257 res_dst_orig_node
->orig
, neigh_node
->addr
,
1258 req_dst_orig_node
->orig
, req_ttvn
);
1260 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1265 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1268 if (res_dst_orig_node
)
1269 orig_node_free_ref(res_dst_orig_node
);
1270 if (req_dst_orig_node
)
1271 orig_node_free_ref(req_dst_orig_node
);
1273 neigh_node_free_ref(neigh_node
);
1275 hardif_free_ref(primary_if
);
1281 static bool send_my_tt_response(struct bat_priv
*bat_priv
,
1282 struct tt_query_packet
*tt_request
)
1284 struct orig_node
*orig_node
= NULL
;
1285 struct neigh_node
*neigh_node
= NULL
;
1286 struct hard_iface
*primary_if
= NULL
;
1287 uint8_t my_ttvn
, req_ttvn
, ttvn
;
1289 unsigned char *tt_buff
;
1291 uint16_t tt_len
, tt_tot
;
1292 struct sk_buff
*skb
= NULL
;
1293 struct tt_query_packet
*tt_response
;
1295 bat_dbg(DBG_TT
, bat_priv
,
1296 "Received TT_REQUEST from %pM for "
1297 "ttvn: %u (me) [%c]\n", tt_request
->src
,
1299 (tt_request
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1302 my_ttvn
= (uint8_t)atomic_read(&bat_priv
->ttvn
);
1303 req_ttvn
= tt_request
->ttvn
;
1305 orig_node
= get_orig_node(bat_priv
, tt_request
->src
);
1309 neigh_node
= orig_node_get_router(orig_node
);
1313 primary_if
= primary_if_get_selected(bat_priv
);
1317 /* If the full table has been explicitly requested or the gap
1318 * is too big send the whole local translation table */
1319 if (tt_request
->flags
& TT_FULL_TABLE
|| my_ttvn
!= req_ttvn
||
1325 /* In this version, fragmentation is not implemented, then
1326 * I'll send only one packet with as much TT entries as I can */
1328 spin_lock_bh(&bat_priv
->tt_buff_lock
);
1329 tt_len
= bat_priv
->tt_buff_len
;
1330 tt_tot
= tt_len
/ sizeof(struct tt_change
);
1332 skb
= dev_alloc_skb(sizeof(struct tt_query_packet
) +
1337 skb_reserve(skb
, ETH_HLEN
);
1338 tt_response
= (struct tt_query_packet
*)skb_put(skb
,
1339 sizeof(struct tt_query_packet
) + tt_len
);
1340 tt_response
->ttvn
= req_ttvn
;
1341 tt_response
->tt_data
= htons(tt_tot
);
1343 tt_buff
= skb
->data
+ sizeof(struct tt_query_packet
);
1344 memcpy(tt_buff
, bat_priv
->tt_buff
,
1345 bat_priv
->tt_buff_len
);
1346 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
1348 tt_len
= (uint16_t)atomic_read(&bat_priv
->num_local_tt
) *
1349 sizeof(struct tt_change
);
1350 ttvn
= (uint8_t)atomic_read(&bat_priv
->ttvn
);
1352 skb
= tt_response_fill_table(tt_len
, ttvn
,
1353 bat_priv
->tt_local_hash
,
1354 primary_if
, tt_local_valid_entry
,
1359 tt_response
= (struct tt_query_packet
*)skb
->data
;
1362 tt_response
->packet_type
= BAT_TT_QUERY
;
1363 tt_response
->version
= COMPAT_VERSION
;
1364 tt_response
->ttl
= TTL
;
1365 memcpy(tt_response
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1366 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1367 tt_response
->flags
= TT_RESPONSE
;
1370 tt_response
->flags
|= TT_FULL_TABLE
;
1372 bat_dbg(DBG_TT
, bat_priv
,
1373 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1374 orig_node
->orig
, neigh_node
->addr
,
1375 (tt_response
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1377 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1382 spin_unlock_bh(&bat_priv
->tt_buff_lock
);
1385 orig_node_free_ref(orig_node
);
1387 neigh_node_free_ref(neigh_node
);
1389 hardif_free_ref(primary_if
);
1392 /* This packet was for me, so it doesn't need to be re-routed */
1396 bool send_tt_response(struct bat_priv
*bat_priv
,
1397 struct tt_query_packet
*tt_request
)
1399 if (is_my_mac(tt_request
->dst
))
1400 return send_my_tt_response(bat_priv
, tt_request
);
1402 return send_other_tt_response(bat_priv
, tt_request
);
1405 static void _tt_update_changes(struct bat_priv
*bat_priv
,
1406 struct orig_node
*orig_node
,
1407 struct tt_change
*tt_change
,
1408 uint16_t tt_num_changes
, uint8_t ttvn
)
1412 for (i
= 0; i
< tt_num_changes
; i
++) {
1413 if ((tt_change
+ i
)->flags
& TT_CLIENT_DEL
)
1414 tt_global_del(bat_priv
, orig_node
,
1415 (tt_change
+ i
)->addr
,
1416 "tt removed by changes",
1417 (tt_change
+ i
)->flags
& TT_CLIENT_ROAM
);
1419 if (!tt_global_add(bat_priv
, orig_node
,
1420 (tt_change
+ i
)->addr
, ttvn
, false,
1421 (tt_change
+ i
)->flags
&
1423 /* In case of problem while storing a
1424 * global_entry, we stop the updating
1425 * procedure without committing the
1426 * ttvn change. This will avoid to send
1427 * corrupted data on tt_request
1433 static void tt_fill_gtable(struct bat_priv
*bat_priv
,
1434 struct tt_query_packet
*tt_response
)
1436 struct orig_node
*orig_node
= NULL
;
1438 orig_node
= orig_hash_find(bat_priv
, tt_response
->src
);
1442 /* Purge the old table first.. */
1443 tt_global_del_orig(bat_priv
, orig_node
, "Received full table");
1445 _tt_update_changes(bat_priv
, orig_node
,
1446 (struct tt_change
*)(tt_response
+ 1),
1447 tt_response
->tt_data
, tt_response
->ttvn
);
1449 spin_lock_bh(&orig_node
->tt_buff_lock
);
1450 kfree(orig_node
->tt_buff
);
1451 orig_node
->tt_buff_len
= 0;
1452 orig_node
->tt_buff
= NULL
;
1453 spin_unlock_bh(&orig_node
->tt_buff_lock
);
1455 atomic_set(&orig_node
->last_ttvn
, tt_response
->ttvn
);
1459 orig_node_free_ref(orig_node
);
1462 static void tt_update_changes(struct bat_priv
*bat_priv
,
1463 struct orig_node
*orig_node
,
1464 uint16_t tt_num_changes
, uint8_t ttvn
,
1465 struct tt_change
*tt_change
)
1467 _tt_update_changes(bat_priv
, orig_node
, tt_change
, tt_num_changes
,
1470 tt_save_orig_buffer(bat_priv
, orig_node
, (unsigned char *)tt_change
,
1472 atomic_set(&orig_node
->last_ttvn
, ttvn
);
1475 bool is_my_client(struct bat_priv
*bat_priv
, const uint8_t *addr
)
1477 struct tt_local_entry
*tt_local_entry
= NULL
;
1480 tt_local_entry
= tt_local_hash_find(bat_priv
, addr
);
1481 if (!tt_local_entry
)
1483 /* Check if the client has been logically deleted (but is kept for
1484 * consistency purpose) */
1485 if (tt_local_entry
->flags
& TT_CLIENT_PENDING
)
1490 tt_local_entry_free_ref(tt_local_entry
);
1494 void handle_tt_response(struct bat_priv
*bat_priv
,
1495 struct tt_query_packet
*tt_response
)
1497 struct tt_req_node
*node
, *safe
;
1498 struct orig_node
*orig_node
= NULL
;
1500 bat_dbg(DBG_TT
, bat_priv
, "Received TT_RESPONSE from %pM for "
1501 "ttvn %d t_size: %d [%c]\n",
1502 tt_response
->src
, tt_response
->ttvn
,
1503 tt_response
->tt_data
,
1504 (tt_response
->flags
& TT_FULL_TABLE
? 'F' : '.'));
1506 orig_node
= orig_hash_find(bat_priv
, tt_response
->src
);
1510 if (tt_response
->flags
& TT_FULL_TABLE
)
1511 tt_fill_gtable(bat_priv
, tt_response
);
1513 tt_update_changes(bat_priv
, orig_node
, tt_response
->tt_data
,
1515 (struct tt_change
*)(tt_response
+ 1));
1517 /* Delete the tt_req_node from pending tt_requests list */
1518 spin_lock_bh(&bat_priv
->tt_req_list_lock
);
1519 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_req_list
, list
) {
1520 if (!compare_eth(node
->addr
, tt_response
->src
))
1522 list_del(&node
->list
);
1525 spin_unlock_bh(&bat_priv
->tt_req_list_lock
);
1527 /* Recalculate the CRC for this orig_node and store it */
1528 orig_node
->tt_crc
= tt_global_crc(bat_priv
, orig_node
);
1529 /* Roaming phase is over: tables are in sync again. I can
1531 orig_node
->tt_poss_change
= false;
1534 orig_node_free_ref(orig_node
);
1537 int tt_init(struct bat_priv
*bat_priv
)
1539 if (!tt_local_init(bat_priv
))
1542 if (!tt_global_init(bat_priv
))
1545 tt_start_timer(bat_priv
);
1550 static void tt_roam_list_free(struct bat_priv
*bat_priv
)
1552 struct tt_roam_node
*node
, *safe
;
1554 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1556 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_roam_list
, list
) {
1557 list_del(&node
->list
);
1561 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1564 static void tt_roam_purge(struct bat_priv
*bat_priv
)
1566 struct tt_roam_node
*node
, *safe
;
1568 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1569 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt_roam_list
, list
) {
1570 if (!is_out_of_time(node
->first_time
,
1571 ROAMING_MAX_TIME
* 1000))
1574 list_del(&node
->list
);
1577 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1580 /* This function checks whether the client already reached the
1581 * maximum number of possible roaming phases. In this case the ROAMING_ADV
1584 * returns true if the ROAMING_ADV can be sent, false otherwise */
1585 static bool tt_check_roam_count(struct bat_priv
*bat_priv
,
1588 struct tt_roam_node
*tt_roam_node
;
1591 spin_lock_bh(&bat_priv
->tt_roam_list_lock
);
1592 /* The new tt_req will be issued only if I'm not waiting for a
1593 * reply from the same orig_node yet */
1594 list_for_each_entry(tt_roam_node
, &bat_priv
->tt_roam_list
, list
) {
1595 if (!compare_eth(tt_roam_node
->addr
, client
))
1598 if (is_out_of_time(tt_roam_node
->first_time
,
1599 ROAMING_MAX_TIME
* 1000))
1602 if (!atomic_dec_not_zero(&tt_roam_node
->counter
))
1603 /* Sorry, you roamed too many times! */
1610 tt_roam_node
= kmalloc(sizeof(*tt_roam_node
), GFP_ATOMIC
);
1614 tt_roam_node
->first_time
= jiffies
;
1615 atomic_set(&tt_roam_node
->counter
, ROAMING_MAX_COUNT
- 1);
1616 memcpy(tt_roam_node
->addr
, client
, ETH_ALEN
);
1618 list_add(&tt_roam_node
->list
, &bat_priv
->tt_roam_list
);
1623 spin_unlock_bh(&bat_priv
->tt_roam_list_lock
);
1627 void send_roam_adv(struct bat_priv
*bat_priv
, uint8_t *client
,
1628 struct orig_node
*orig_node
)
1630 struct neigh_node
*neigh_node
= NULL
;
1631 struct sk_buff
*skb
= NULL
;
1632 struct roam_adv_packet
*roam_adv_packet
;
1634 struct hard_iface
*primary_if
;
1636 /* before going on we have to check whether the client has
1637 * already roamed to us too many times */
1638 if (!tt_check_roam_count(bat_priv
, client
))
1641 skb
= dev_alloc_skb(sizeof(struct roam_adv_packet
) + ETH_HLEN
);
1645 skb_reserve(skb
, ETH_HLEN
);
1647 roam_adv_packet
= (struct roam_adv_packet
*)skb_put(skb
,
1648 sizeof(struct roam_adv_packet
));
1650 roam_adv_packet
->packet_type
= BAT_ROAM_ADV
;
1651 roam_adv_packet
->version
= COMPAT_VERSION
;
1652 roam_adv_packet
->ttl
= TTL
;
1653 primary_if
= primary_if_get_selected(bat_priv
);
1656 memcpy(roam_adv_packet
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1657 hardif_free_ref(primary_if
);
1658 memcpy(roam_adv_packet
->dst
, orig_node
->orig
, ETH_ALEN
);
1659 memcpy(roam_adv_packet
->client
, client
, ETH_ALEN
);
1661 neigh_node
= orig_node_get_router(orig_node
);
1665 bat_dbg(DBG_TT
, bat_priv
,
1666 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
1667 orig_node
->orig
, client
, neigh_node
->addr
);
1669 send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1674 neigh_node_free_ref(neigh_node
);
1680 static void tt_purge(struct work_struct
*work
)
1682 struct delayed_work
*delayed_work
=
1683 container_of(work
, struct delayed_work
, work
);
1684 struct bat_priv
*bat_priv
=
1685 container_of(delayed_work
, struct bat_priv
, tt_work
);
1687 tt_local_purge(bat_priv
);
1688 tt_global_roam_purge(bat_priv
);
1689 tt_req_purge(bat_priv
);
1690 tt_roam_purge(bat_priv
);
1692 tt_start_timer(bat_priv
);
1695 void tt_free(struct bat_priv
*bat_priv
)
1697 cancel_delayed_work_sync(&bat_priv
->tt_work
);
1699 tt_local_table_free(bat_priv
);
1700 tt_global_table_free(bat_priv
);
1701 tt_req_list_free(bat_priv
);
1702 tt_changes_list_free(bat_priv
);
1703 tt_roam_list_free(bat_priv
);
1705 kfree(bat_priv
->tt_buff
);
1708 /* This function will reset the specified flags from all the entries in
1709 * the given hash table and will increment num_local_tt for each involved
1711 static void tt_local_reset_flags(struct bat_priv
*bat_priv
, uint16_t flags
)
1714 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
1715 struct hlist_head
*head
;
1716 struct hlist_node
*node
;
1717 struct tt_local_entry
*tt_local_entry
;
1722 for (i
= 0; i
< hash
->size
; i
++) {
1723 head
= &hash
->table
[i
];
1726 hlist_for_each_entry_rcu(tt_local_entry
, node
,
1728 if (!(tt_local_entry
->flags
& flags
))
1730 tt_local_entry
->flags
&= ~flags
;
1731 atomic_inc(&bat_priv
->num_local_tt
);
1738 /* Purge out all the tt local entries marked with TT_CLIENT_PENDING */
1739 static void tt_local_purge_pending_clients(struct bat_priv
*bat_priv
)
1741 struct hashtable_t
*hash
= bat_priv
->tt_local_hash
;
1742 struct tt_local_entry
*tt_local_entry
;
1743 struct hlist_node
*node
, *node_tmp
;
1744 struct hlist_head
*head
;
1745 spinlock_t
*list_lock
; /* protects write access to the hash lists */
1751 for (i
= 0; i
< hash
->size
; i
++) {
1752 head
= &hash
->table
[i
];
1753 list_lock
= &hash
->list_locks
[i
];
1755 spin_lock_bh(list_lock
);
1756 hlist_for_each_entry_safe(tt_local_entry
, node
, node_tmp
,
1758 if (!(tt_local_entry
->flags
& TT_CLIENT_PENDING
))
1761 bat_dbg(DBG_TT
, bat_priv
, "Deleting local tt entry "
1762 "(%pM): pending\n", tt_local_entry
->addr
);
1764 atomic_dec(&bat_priv
->num_local_tt
);
1765 hlist_del_rcu(node
);
1766 tt_local_entry_free_ref(tt_local_entry
);
1768 spin_unlock_bh(list_lock
);
1773 void tt_commit_changes(struct bat_priv
*bat_priv
)
1775 tt_local_reset_flags(bat_priv
, TT_CLIENT_NEW
);
1776 tt_local_purge_pending_clients(bat_priv
);
1778 /* Increment the TTVN only once per OGM interval */
1779 atomic_inc(&bat_priv
->ttvn
);
1780 bat_priv
->tt_poss_change
= false;
1783 bool is_ap_isolated(struct bat_priv
*bat_priv
, uint8_t *src
, uint8_t *dst
)
1785 struct tt_local_entry
*tt_local_entry
= NULL
;
1786 struct tt_global_entry
*tt_global_entry
= NULL
;
1789 if (!atomic_read(&bat_priv
->ap_isolation
))
1792 tt_local_entry
= tt_local_hash_find(bat_priv
, dst
);
1793 if (!tt_local_entry
)
1796 tt_global_entry
= tt_global_hash_find(bat_priv
, src
);
1797 if (!tt_global_entry
)
1800 if (_is_ap_isolated(tt_local_entry
, tt_global_entry
))
1806 if (tt_global_entry
)
1807 tt_global_entry_free_ref(tt_global_entry
);
1809 tt_local_entry_free_ref(tt_local_entry
);
1813 void tt_update_orig(struct bat_priv
*bat_priv
, struct orig_node
*orig_node
,
1814 const unsigned char *tt_buff
, uint8_t tt_num_changes
,
1815 uint8_t ttvn
, uint16_t tt_crc
)
1817 uint8_t orig_ttvn
= (uint8_t)atomic_read(&orig_node
->last_ttvn
);
1818 bool full_table
= true;
1820 /* the ttvn increased by one -> we can apply the attached changes */
1821 if (ttvn
- orig_ttvn
== 1) {
1822 /* the OGM could not contain the changes due to their size or
1823 * because they have already been sent TT_OGM_APPEND_MAX times.
1824 * In this case send a tt request */
1825 if (!tt_num_changes
) {
1830 tt_update_changes(bat_priv
, orig_node
, tt_num_changes
, ttvn
,
1831 (struct tt_change
*)tt_buff
);
1833 /* Even if we received the precomputed crc with the OGM, we
1834 * prefer to recompute it to spot any possible inconsistency
1835 * in the global table */
1836 orig_node
->tt_crc
= tt_global_crc(bat_priv
, orig_node
);
1838 /* The ttvn alone is not enough to guarantee consistency
1839 * because a single value could represent different states
1840 * (due to the wrap around). Thus a node has to check whether
1841 * the resulting table (after applying the changes) is still
1842 * consistent or not. E.g. a node could disconnect while its
1843 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
1844 * checking the CRC value is mandatory to detect the
1846 if (orig_node
->tt_crc
!= tt_crc
)
1849 /* Roaming phase is over: tables are in sync again. I can
1851 orig_node
->tt_poss_change
= false;
1853 /* if we missed more than one change or our tables are not
1854 * in sync anymore -> request fresh tt data */
1855 if (ttvn
!= orig_ttvn
|| orig_node
->tt_crc
!= tt_crc
) {
1857 bat_dbg(DBG_TT
, bat_priv
, "TT inconsistency for %pM. "
1858 "Need to retrieve the correct information "
1859 "(ttvn: %u last_ttvn: %u crc: %u last_crc: "
1860 "%u num_changes: %u)\n", orig_node
->orig
, ttvn
,
1861 orig_ttvn
, tt_crc
, orig_node
->tt_crc
,
1863 send_tt_request(bat_priv
, orig_node
, ttvn
, tt_crc
,