Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/cjb/mmc
[cris-mirror.git] / net / batman-adv / routing.c
blobc172f5d0e05a20d54d2fb68c346833eb14645825
1 /*
2 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
4 * Marek Lindner, Simon Wunderlich
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
22 #include "main.h"
23 #include "routing.h"
24 #include "send.h"
25 #include "hash.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "icmp_socket.h"
29 #include "translation-table.h"
30 #include "originator.h"
31 #include "ring_buffer.h"
32 #include "vis.h"
33 #include "aggregation.h"
34 #include "gateway_common.h"
35 #include "gateway_client.h"
36 #include "unicast.h"
38 void slide_own_bcast_window(struct hard_iface *hard_iface)
40 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
41 struct hashtable_t *hash = bat_priv->orig_hash;
42 struct hlist_node *node;
43 struct hlist_head *head;
44 struct orig_node *orig_node;
45 unsigned long *word;
46 int i;
47 size_t word_index;
49 for (i = 0; i < hash->size; i++) {
50 head = &hash->table[i];
52 rcu_read_lock();
53 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
54 spin_lock_bh(&orig_node->ogm_cnt_lock);
55 word_index = hard_iface->if_num * NUM_WORDS;
56 word = &(orig_node->bcast_own[word_index]);
58 bit_get_packet(bat_priv, word, 1, 0);
59 orig_node->bcast_own_sum[hard_iface->if_num] =
60 bit_packet_count(word);
61 spin_unlock_bh(&orig_node->ogm_cnt_lock);
63 rcu_read_unlock();
67 static void update_HNA(struct bat_priv *bat_priv, struct orig_node *orig_node,
68 unsigned char *hna_buff, int hna_buff_len)
70 if ((hna_buff_len != orig_node->hna_buff_len) ||
71 ((hna_buff_len > 0) &&
72 (orig_node->hna_buff_len > 0) &&
73 (memcmp(orig_node->hna_buff, hna_buff, hna_buff_len) != 0))) {
75 if (orig_node->hna_buff_len > 0)
76 hna_global_del_orig(bat_priv, orig_node,
77 "originator changed hna");
79 if ((hna_buff_len > 0) && (hna_buff))
80 hna_global_add_orig(bat_priv, orig_node,
81 hna_buff, hna_buff_len);
85 static void update_route(struct bat_priv *bat_priv,
86 struct orig_node *orig_node,
87 struct neigh_node *neigh_node,
88 unsigned char *hna_buff, int hna_buff_len)
90 struct neigh_node *neigh_node_tmp;
92 /* route deleted */
93 if ((orig_node->router) && (!neigh_node)) {
95 bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n",
96 orig_node->orig);
97 hna_global_del_orig(bat_priv, orig_node,
98 "originator timed out");
100 /* route added */
101 } else if ((!orig_node->router) && (neigh_node)) {
103 bat_dbg(DBG_ROUTES, bat_priv,
104 "Adding route towards: %pM (via %pM)\n",
105 orig_node->orig, neigh_node->addr);
106 hna_global_add_orig(bat_priv, orig_node,
107 hna_buff, hna_buff_len);
109 /* route changed */
110 } else {
111 bat_dbg(DBG_ROUTES, bat_priv,
112 "Changing route towards: %pM "
113 "(now via %pM - was via %pM)\n",
114 orig_node->orig, neigh_node->addr,
115 orig_node->router->addr);
118 if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount))
119 neigh_node = NULL;
120 neigh_node_tmp = orig_node->router;
121 orig_node->router = neigh_node;
122 if (neigh_node_tmp)
123 neigh_node_free_ref(neigh_node_tmp);
127 void update_routes(struct bat_priv *bat_priv, struct orig_node *orig_node,
128 struct neigh_node *neigh_node, unsigned char *hna_buff,
129 int hna_buff_len)
132 if (!orig_node)
133 return;
135 if (orig_node->router != neigh_node)
136 update_route(bat_priv, orig_node, neigh_node,
137 hna_buff, hna_buff_len);
138 /* may be just HNA changed */
139 else
140 update_HNA(bat_priv, orig_node, hna_buff, hna_buff_len);
143 static int is_bidirectional_neigh(struct orig_node *orig_node,
144 struct orig_node *orig_neigh_node,
145 struct batman_packet *batman_packet,
146 struct hard_iface *if_incoming)
148 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
149 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
150 struct hlist_node *node;
151 unsigned char total_count;
152 uint8_t orig_eq_count, neigh_rq_count, tq_own;
153 int tq_asym_penalty, ret = 0;
155 if (orig_node == orig_neigh_node) {
156 rcu_read_lock();
157 hlist_for_each_entry_rcu(tmp_neigh_node, node,
158 &orig_node->neigh_list, list) {
160 if (!compare_eth(tmp_neigh_node->addr,
161 orig_neigh_node->orig))
162 continue;
164 if (tmp_neigh_node->if_incoming != if_incoming)
165 continue;
167 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
168 continue;
170 neigh_node = tmp_neigh_node;
172 rcu_read_unlock();
174 if (!neigh_node)
175 neigh_node = create_neighbor(orig_node,
176 orig_neigh_node,
177 orig_neigh_node->orig,
178 if_incoming);
179 if (!neigh_node)
180 goto out;
182 neigh_node->last_valid = jiffies;
183 } else {
184 /* find packet count of corresponding one hop neighbor */
185 rcu_read_lock();
186 hlist_for_each_entry_rcu(tmp_neigh_node, node,
187 &orig_neigh_node->neigh_list, list) {
189 if (!compare_eth(tmp_neigh_node->addr,
190 orig_neigh_node->orig))
191 continue;
193 if (tmp_neigh_node->if_incoming != if_incoming)
194 continue;
196 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
197 continue;
199 neigh_node = tmp_neigh_node;
201 rcu_read_unlock();
203 if (!neigh_node)
204 neigh_node = create_neighbor(orig_neigh_node,
205 orig_neigh_node,
206 orig_neigh_node->orig,
207 if_incoming);
208 if (!neigh_node)
209 goto out;
212 orig_node->last_valid = jiffies;
214 spin_lock_bh(&orig_node->ogm_cnt_lock);
215 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
216 neigh_rq_count = neigh_node->real_packet_count;
217 spin_unlock_bh(&orig_node->ogm_cnt_lock);
219 /* pay attention to not get a value bigger than 100 % */
220 total_count = (orig_eq_count > neigh_rq_count ?
221 neigh_rq_count : orig_eq_count);
223 /* if we have too few packets (too less data) we set tq_own to zero */
224 /* if we receive too few packets it is not considered bidirectional */
225 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
226 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
227 tq_own = 0;
228 else
229 /* neigh_node->real_packet_count is never zero as we
230 * only purge old information when getting new
231 * information */
232 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
235 * 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
236 * affect the nearly-symmetric links only a little, but
237 * punishes asymmetric links more. This will give a value
238 * between 0 and TQ_MAX_VALUE
240 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
241 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
242 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
243 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
244 (TQ_LOCAL_WINDOW_SIZE *
245 TQ_LOCAL_WINDOW_SIZE *
246 TQ_LOCAL_WINDOW_SIZE);
248 batman_packet->tq = ((batman_packet->tq * tq_own * tq_asym_penalty) /
249 (TQ_MAX_VALUE * TQ_MAX_VALUE));
251 bat_dbg(DBG_BATMAN, bat_priv,
252 "bidirectional: "
253 "orig = %-15pM neigh = %-15pM => own_bcast = %2i, "
254 "real recv = %2i, local tq: %3i, asym_penalty: %3i, "
255 "total tq: %3i\n",
256 orig_node->orig, orig_neigh_node->orig, total_count,
257 neigh_rq_count, tq_own, tq_asym_penalty, batman_packet->tq);
259 /* if link has the minimum required transmission quality
260 * consider it bidirectional */
261 if (batman_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
262 ret = 1;
264 out:
265 if (neigh_node)
266 neigh_node_free_ref(neigh_node);
267 return ret;
270 /* caller must hold the neigh_list_lock */
271 void bonding_candidate_del(struct orig_node *orig_node,
272 struct neigh_node *neigh_node)
274 /* this neighbor is not part of our candidate list */
275 if (list_empty(&neigh_node->bonding_list))
276 goto out;
278 list_del_rcu(&neigh_node->bonding_list);
279 INIT_LIST_HEAD(&neigh_node->bonding_list);
280 neigh_node_free_ref(neigh_node);
281 atomic_dec(&orig_node->bond_candidates);
283 out:
284 return;
287 static void bonding_candidate_add(struct orig_node *orig_node,
288 struct neigh_node *neigh_node)
290 struct hlist_node *node;
291 struct neigh_node *tmp_neigh_node;
292 uint8_t best_tq, interference_candidate = 0;
294 spin_lock_bh(&orig_node->neigh_list_lock);
296 /* only consider if it has the same primary address ... */
297 if (!compare_eth(orig_node->orig,
298 neigh_node->orig_node->primary_addr))
299 goto candidate_del;
301 if (!orig_node->router)
302 goto candidate_del;
304 best_tq = orig_node->router->tq_avg;
306 /* ... and is good enough to be considered */
307 if (neigh_node->tq_avg < best_tq - BONDING_TQ_THRESHOLD)
308 goto candidate_del;
311 * check if we have another candidate with the same mac address or
312 * interface. If we do, we won't select this candidate because of
313 * possible interference.
315 hlist_for_each_entry_rcu(tmp_neigh_node, node,
316 &orig_node->neigh_list, list) {
318 if (tmp_neigh_node == neigh_node)
319 continue;
321 /* we only care if the other candidate is even
322 * considered as candidate. */
323 if (list_empty(&tmp_neigh_node->bonding_list))
324 continue;
326 if ((neigh_node->if_incoming == tmp_neigh_node->if_incoming) ||
327 (compare_eth(neigh_node->addr, tmp_neigh_node->addr))) {
328 interference_candidate = 1;
329 break;
333 /* don't care further if it is an interference candidate */
334 if (interference_candidate)
335 goto candidate_del;
337 /* this neighbor already is part of our candidate list */
338 if (!list_empty(&neigh_node->bonding_list))
339 goto out;
341 if (!atomic_inc_not_zero(&neigh_node->refcount))
342 goto out;
344 list_add_rcu(&neigh_node->bonding_list, &orig_node->bond_list);
345 atomic_inc(&orig_node->bond_candidates);
346 goto out;
348 candidate_del:
349 bonding_candidate_del(orig_node, neigh_node);
351 out:
352 spin_unlock_bh(&orig_node->neigh_list_lock);
353 return;
356 /* copy primary address for bonding */
357 static void bonding_save_primary(struct orig_node *orig_node,
358 struct orig_node *orig_neigh_node,
359 struct batman_packet *batman_packet)
361 if (!(batman_packet->flags & PRIMARIES_FIRST_HOP))
362 return;
364 memcpy(orig_neigh_node->primary_addr, orig_node->orig, ETH_ALEN);
367 static void update_orig(struct bat_priv *bat_priv,
368 struct orig_node *orig_node,
369 struct ethhdr *ethhdr,
370 struct batman_packet *batman_packet,
371 struct hard_iface *if_incoming,
372 unsigned char *hna_buff, int hna_buff_len,
373 char is_duplicate)
375 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
376 struct orig_node *orig_node_tmp;
377 struct hlist_node *node;
378 int tmp_hna_buff_len;
379 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
381 bat_dbg(DBG_BATMAN, bat_priv, "update_originator(): "
382 "Searching and updating originator entry of received packet\n");
384 rcu_read_lock();
385 hlist_for_each_entry_rcu(tmp_neigh_node, node,
386 &orig_node->neigh_list, list) {
387 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
388 (tmp_neigh_node->if_incoming == if_incoming) &&
389 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
390 if (neigh_node)
391 neigh_node_free_ref(neigh_node);
392 neigh_node = tmp_neigh_node;
393 continue;
396 if (is_duplicate)
397 continue;
399 ring_buffer_set(tmp_neigh_node->tq_recv,
400 &tmp_neigh_node->tq_index, 0);
401 tmp_neigh_node->tq_avg =
402 ring_buffer_avg(tmp_neigh_node->tq_recv);
405 if (!neigh_node) {
406 struct orig_node *orig_tmp;
408 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
409 if (!orig_tmp)
410 goto unlock;
412 neigh_node = create_neighbor(orig_node, orig_tmp,
413 ethhdr->h_source, if_incoming);
415 orig_node_free_ref(orig_tmp);
416 if (!neigh_node)
417 goto unlock;
418 } else
419 bat_dbg(DBG_BATMAN, bat_priv,
420 "Updating existing last-hop neighbor of originator\n");
422 rcu_read_unlock();
424 orig_node->flags = batman_packet->flags;
425 neigh_node->last_valid = jiffies;
427 ring_buffer_set(neigh_node->tq_recv,
428 &neigh_node->tq_index,
429 batman_packet->tq);
430 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
432 if (!is_duplicate) {
433 orig_node->last_ttl = batman_packet->ttl;
434 neigh_node->last_ttl = batman_packet->ttl;
437 bonding_candidate_add(orig_node, neigh_node);
439 tmp_hna_buff_len = (hna_buff_len > batman_packet->num_hna * ETH_ALEN ?
440 batman_packet->num_hna * ETH_ALEN : hna_buff_len);
442 /* if this neighbor already is our next hop there is nothing
443 * to change */
444 if (orig_node->router == neigh_node)
445 goto update_hna;
447 /* if this neighbor does not offer a better TQ we won't consider it */
448 if ((orig_node->router) &&
449 (orig_node->router->tq_avg > neigh_node->tq_avg))
450 goto update_hna;
452 /* if the TQ is the same and the link not more symetric we
453 * won't consider it either */
454 if ((orig_node->router) &&
455 (neigh_node->tq_avg == orig_node->router->tq_avg)) {
456 orig_node_tmp = orig_node->router->orig_node;
457 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
458 bcast_own_sum_orig =
459 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
460 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
462 orig_node_tmp = neigh_node->orig_node;
463 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
464 bcast_own_sum_neigh =
465 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
466 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
468 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
469 goto update_hna;
472 update_routes(bat_priv, orig_node, neigh_node,
473 hna_buff, tmp_hna_buff_len);
474 goto update_gw;
476 update_hna:
477 update_routes(bat_priv, orig_node, orig_node->router,
478 hna_buff, tmp_hna_buff_len);
480 update_gw:
481 if (orig_node->gw_flags != batman_packet->gw_flags)
482 gw_node_update(bat_priv, orig_node, batman_packet->gw_flags);
484 orig_node->gw_flags = batman_packet->gw_flags;
486 /* restart gateway selection if fast or late switching was enabled */
487 if ((orig_node->gw_flags) &&
488 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
489 (atomic_read(&bat_priv->gw_sel_class) > 2))
490 gw_check_election(bat_priv, orig_node);
492 goto out;
494 unlock:
495 rcu_read_unlock();
496 out:
497 if (neigh_node)
498 neigh_node_free_ref(neigh_node);
501 /* checks whether the host restarted and is in the protection time.
502 * returns:
503 * 0 if the packet is to be accepted
504 * 1 if the packet is to be ignored.
506 static int window_protected(struct bat_priv *bat_priv,
507 int32_t seq_num_diff,
508 unsigned long *last_reset)
510 if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE)
511 || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) {
512 if (time_after(jiffies, *last_reset +
513 msecs_to_jiffies(RESET_PROTECTION_MS))) {
515 *last_reset = jiffies;
516 bat_dbg(DBG_BATMAN, bat_priv,
517 "old packet received, start protection\n");
519 return 0;
520 } else
521 return 1;
523 return 0;
526 /* processes a batman packet for all interfaces, adjusts the sequence number and
527 * finds out whether it is a duplicate.
528 * returns:
529 * 1 the packet is a duplicate
530 * 0 the packet has not yet been received
531 * -1 the packet is old and has been received while the seqno window
532 * was protected. Caller should drop it.
534 static char count_real_packets(struct ethhdr *ethhdr,
535 struct batman_packet *batman_packet,
536 struct hard_iface *if_incoming)
538 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
539 struct orig_node *orig_node;
540 struct neigh_node *tmp_neigh_node;
541 struct hlist_node *node;
542 char is_duplicate = 0;
543 int32_t seq_diff;
544 int need_update = 0;
545 int set_mark, ret = -1;
547 orig_node = get_orig_node(bat_priv, batman_packet->orig);
548 if (!orig_node)
549 return 0;
551 spin_lock_bh(&orig_node->ogm_cnt_lock);
552 seq_diff = batman_packet->seqno - orig_node->last_real_seqno;
554 /* signalize caller that the packet is to be dropped. */
555 if (window_protected(bat_priv, seq_diff,
556 &orig_node->batman_seqno_reset))
557 goto out;
559 rcu_read_lock();
560 hlist_for_each_entry_rcu(tmp_neigh_node, node,
561 &orig_node->neigh_list, list) {
563 is_duplicate |= get_bit_status(tmp_neigh_node->real_bits,
564 orig_node->last_real_seqno,
565 batman_packet->seqno);
567 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
568 (tmp_neigh_node->if_incoming == if_incoming))
569 set_mark = 1;
570 else
571 set_mark = 0;
573 /* if the window moved, set the update flag. */
574 need_update |= bit_get_packet(bat_priv,
575 tmp_neigh_node->real_bits,
576 seq_diff, set_mark);
578 tmp_neigh_node->real_packet_count =
579 bit_packet_count(tmp_neigh_node->real_bits);
581 rcu_read_unlock();
583 if (need_update) {
584 bat_dbg(DBG_BATMAN, bat_priv,
585 "updating last_seqno: old %d, new %d\n",
586 orig_node->last_real_seqno, batman_packet->seqno);
587 orig_node->last_real_seqno = batman_packet->seqno;
590 ret = is_duplicate;
592 out:
593 spin_unlock_bh(&orig_node->ogm_cnt_lock);
594 orig_node_free_ref(orig_node);
595 return ret;
598 void receive_bat_packet(struct ethhdr *ethhdr,
599 struct batman_packet *batman_packet,
600 unsigned char *hna_buff, int hna_buff_len,
601 struct hard_iface *if_incoming)
603 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
604 struct hard_iface *hard_iface;
605 struct orig_node *orig_neigh_node, *orig_node;
606 char has_directlink_flag;
607 char is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
608 char is_broadcast = 0, is_bidirectional, is_single_hop_neigh;
609 char is_duplicate;
610 uint32_t if_incoming_seqno;
612 /* Silently drop when the batman packet is actually not a
613 * correct packet.
615 * This might happen if a packet is padded (e.g. Ethernet has a
616 * minimum frame length of 64 byte) and the aggregation interprets
617 * it as an additional length.
619 * TODO: A more sane solution would be to have a bit in the
620 * batman_packet to detect whether the packet is the last
621 * packet in an aggregation. Here we expect that the padding
622 * is always zero (or not 0x01)
624 if (batman_packet->packet_type != BAT_PACKET)
625 return;
627 /* could be changed by schedule_own_packet() */
628 if_incoming_seqno = atomic_read(&if_incoming->seqno);
630 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
632 is_single_hop_neigh = (compare_eth(ethhdr->h_source,
633 batman_packet->orig) ? 1 : 0);
635 bat_dbg(DBG_BATMAN, bat_priv,
636 "Received BATMAN packet via NB: %pM, IF: %s [%pM] "
637 "(from OG: %pM, via prev OG: %pM, seqno %d, tq %d, "
638 "TTL %d, V %d, IDF %d)\n",
639 ethhdr->h_source, if_incoming->net_dev->name,
640 if_incoming->net_dev->dev_addr, batman_packet->orig,
641 batman_packet->prev_sender, batman_packet->seqno,
642 batman_packet->tq, batman_packet->ttl, batman_packet->version,
643 has_directlink_flag);
645 rcu_read_lock();
646 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
647 if (hard_iface->if_status != IF_ACTIVE)
648 continue;
650 if (hard_iface->soft_iface != if_incoming->soft_iface)
651 continue;
653 if (compare_eth(ethhdr->h_source,
654 hard_iface->net_dev->dev_addr))
655 is_my_addr = 1;
657 if (compare_eth(batman_packet->orig,
658 hard_iface->net_dev->dev_addr))
659 is_my_orig = 1;
661 if (compare_eth(batman_packet->prev_sender,
662 hard_iface->net_dev->dev_addr))
663 is_my_oldorig = 1;
665 if (compare_eth(ethhdr->h_source, broadcast_addr))
666 is_broadcast = 1;
668 rcu_read_unlock();
670 if (batman_packet->version != COMPAT_VERSION) {
671 bat_dbg(DBG_BATMAN, bat_priv,
672 "Drop packet: incompatible batman version (%i)\n",
673 batman_packet->version);
674 return;
677 if (is_my_addr) {
678 bat_dbg(DBG_BATMAN, bat_priv,
679 "Drop packet: received my own broadcast (sender: %pM"
680 ")\n",
681 ethhdr->h_source);
682 return;
685 if (is_broadcast) {
686 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
687 "ignoring all packets with broadcast source addr (sender: %pM"
688 ")\n", ethhdr->h_source);
689 return;
692 if (is_my_orig) {
693 unsigned long *word;
694 int offset;
696 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
697 if (!orig_neigh_node)
698 return;
700 /* neighbor has to indicate direct link and it has to
701 * come via the corresponding interface */
702 /* if received seqno equals last send seqno save new
703 * seqno for bidirectional check */
704 if (has_directlink_flag &&
705 compare_eth(if_incoming->net_dev->dev_addr,
706 batman_packet->orig) &&
707 (batman_packet->seqno - if_incoming_seqno + 2 == 0)) {
708 offset = if_incoming->if_num * NUM_WORDS;
710 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
711 word = &(orig_neigh_node->bcast_own[offset]);
712 bit_mark(word, 0);
713 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
714 bit_packet_count(word);
715 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
718 bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: "
719 "originator packet from myself (via neighbor)\n");
720 orig_node_free_ref(orig_neigh_node);
721 return;
724 if (is_my_oldorig) {
725 bat_dbg(DBG_BATMAN, bat_priv,
726 "Drop packet: ignoring all rebroadcast echos (sender: "
727 "%pM)\n", ethhdr->h_source);
728 return;
731 orig_node = get_orig_node(bat_priv, batman_packet->orig);
732 if (!orig_node)
733 return;
735 is_duplicate = count_real_packets(ethhdr, batman_packet, if_incoming);
737 if (is_duplicate == -1) {
738 bat_dbg(DBG_BATMAN, bat_priv,
739 "Drop packet: packet within seqno protection time "
740 "(sender: %pM)\n", ethhdr->h_source);
741 goto out;
744 if (batman_packet->tq == 0) {
745 bat_dbg(DBG_BATMAN, bat_priv,
746 "Drop packet: originator packet with tq equal 0\n");
747 goto out;
750 /* avoid temporary routing loops */
751 if ((orig_node->router) &&
752 (orig_node->router->orig_node->router) &&
753 (compare_eth(orig_node->router->addr,
754 batman_packet->prev_sender)) &&
755 !(compare_eth(batman_packet->orig, batman_packet->prev_sender)) &&
756 (compare_eth(orig_node->router->addr,
757 orig_node->router->orig_node->router->addr))) {
758 bat_dbg(DBG_BATMAN, bat_priv,
759 "Drop packet: ignoring all rebroadcast packets that "
760 "may make me loop (sender: %pM)\n", ethhdr->h_source);
761 goto out;
764 /* if sender is a direct neighbor the sender mac equals
765 * originator mac */
766 orig_neigh_node = (is_single_hop_neigh ?
767 orig_node :
768 get_orig_node(bat_priv, ethhdr->h_source));
769 if (!orig_neigh_node)
770 goto out;
772 /* drop packet if sender is not a direct neighbor and if we
773 * don't route towards it */
774 if (!is_single_hop_neigh && (!orig_neigh_node->router)) {
775 bat_dbg(DBG_BATMAN, bat_priv,
776 "Drop packet: OGM via unknown neighbor!\n");
777 goto out_neigh;
780 is_bidirectional = is_bidirectional_neigh(orig_node, orig_neigh_node,
781 batman_packet, if_incoming);
783 bonding_save_primary(orig_node, orig_neigh_node, batman_packet);
785 /* update ranking if it is not a duplicate or has the same
786 * seqno and similar ttl as the non-duplicate */
787 if (is_bidirectional &&
788 (!is_duplicate ||
789 ((orig_node->last_real_seqno == batman_packet->seqno) &&
790 (orig_node->last_ttl - 3 <= batman_packet->ttl))))
791 update_orig(bat_priv, orig_node, ethhdr, batman_packet,
792 if_incoming, hna_buff, hna_buff_len, is_duplicate);
794 /* is single hop (direct) neighbor */
795 if (is_single_hop_neigh) {
797 /* mark direct link on incoming interface */
798 schedule_forward_packet(orig_node, ethhdr, batman_packet,
799 1, hna_buff_len, if_incoming);
801 bat_dbg(DBG_BATMAN, bat_priv, "Forwarding packet: "
802 "rebroadcast neighbor packet with direct link flag\n");
803 goto out_neigh;
806 /* multihop originator */
807 if (!is_bidirectional) {
808 bat_dbg(DBG_BATMAN, bat_priv,
809 "Drop packet: not received via bidirectional link\n");
810 goto out_neigh;
813 if (is_duplicate) {
814 bat_dbg(DBG_BATMAN, bat_priv,
815 "Drop packet: duplicate packet received\n");
816 goto out_neigh;
819 bat_dbg(DBG_BATMAN, bat_priv,
820 "Forwarding packet: rebroadcast originator packet\n");
821 schedule_forward_packet(orig_node, ethhdr, batman_packet,
822 0, hna_buff_len, if_incoming);
824 out_neigh:
825 if ((orig_neigh_node) && (!is_single_hop_neigh))
826 orig_node_free_ref(orig_neigh_node);
827 out:
828 orig_node_free_ref(orig_node);
831 int recv_bat_packet(struct sk_buff *skb, struct hard_iface *hard_iface)
833 struct ethhdr *ethhdr;
835 /* drop packet if it has not necessary minimum size */
836 if (unlikely(!pskb_may_pull(skb, sizeof(struct batman_packet))))
837 return NET_RX_DROP;
839 ethhdr = (struct ethhdr *)skb_mac_header(skb);
841 /* packet with broadcast indication but unicast recipient */
842 if (!is_broadcast_ether_addr(ethhdr->h_dest))
843 return NET_RX_DROP;
845 /* packet with broadcast sender address */
846 if (is_broadcast_ether_addr(ethhdr->h_source))
847 return NET_RX_DROP;
849 /* create a copy of the skb, if needed, to modify it. */
850 if (skb_cow(skb, 0) < 0)
851 return NET_RX_DROP;
853 /* keep skb linear */
854 if (skb_linearize(skb) < 0)
855 return NET_RX_DROP;
857 ethhdr = (struct ethhdr *)skb_mac_header(skb);
859 receive_aggr_bat_packet(ethhdr,
860 skb->data,
861 skb_headlen(skb),
862 hard_iface);
864 kfree_skb(skb);
865 return NET_RX_SUCCESS;
868 static int recv_my_icmp_packet(struct bat_priv *bat_priv,
869 struct sk_buff *skb, size_t icmp_len)
871 struct orig_node *orig_node = NULL;
872 struct neigh_node *neigh_node = NULL;
873 struct icmp_packet_rr *icmp_packet;
874 int ret = NET_RX_DROP;
876 icmp_packet = (struct icmp_packet_rr *)skb->data;
878 /* add data to device queue */
879 if (icmp_packet->msg_type != ECHO_REQUEST) {
880 bat_socket_receive_packet(icmp_packet, icmp_len);
881 goto out;
884 if (!bat_priv->primary_if)
885 goto out;
887 /* answer echo request (ping) */
888 /* get routing information */
889 rcu_read_lock();
890 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
892 if (!orig_node)
893 goto unlock;
895 neigh_node = orig_node->router;
897 if (!neigh_node)
898 goto unlock;
900 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
901 neigh_node = NULL;
902 goto unlock;
905 rcu_read_unlock();
907 /* create a copy of the skb, if needed, to modify it. */
908 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
909 goto out;
911 icmp_packet = (struct icmp_packet_rr *)skb->data;
913 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
914 memcpy(icmp_packet->orig,
915 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
916 icmp_packet->msg_type = ECHO_REPLY;
917 icmp_packet->ttl = TTL;
919 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
920 ret = NET_RX_SUCCESS;
921 goto out;
923 unlock:
924 rcu_read_unlock();
925 out:
926 if (neigh_node)
927 neigh_node_free_ref(neigh_node);
928 if (orig_node)
929 orig_node_free_ref(orig_node);
930 return ret;
933 static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv,
934 struct sk_buff *skb)
936 struct orig_node *orig_node = NULL;
937 struct neigh_node *neigh_node = NULL;
938 struct icmp_packet *icmp_packet;
939 int ret = NET_RX_DROP;
941 icmp_packet = (struct icmp_packet *)skb->data;
943 /* send TTL exceeded if packet is an echo request (traceroute) */
944 if (icmp_packet->msg_type != ECHO_REQUEST) {
945 pr_debug("Warning - can't forward icmp packet from %pM to "
946 "%pM: ttl exceeded\n", icmp_packet->orig,
947 icmp_packet->dst);
948 goto out;
951 if (!bat_priv->primary_if)
952 goto out;
954 /* get routing information */
955 rcu_read_lock();
956 orig_node = orig_hash_find(bat_priv, icmp_packet->orig);
958 if (!orig_node)
959 goto unlock;
961 neigh_node = orig_node->router;
963 if (!neigh_node)
964 goto unlock;
966 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
967 neigh_node = NULL;
968 goto unlock;
971 rcu_read_unlock();
973 /* create a copy of the skb, if needed, to modify it. */
974 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
975 goto out;
977 icmp_packet = (struct icmp_packet *)skb->data;
979 memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
980 memcpy(icmp_packet->orig,
981 bat_priv->primary_if->net_dev->dev_addr, ETH_ALEN);
982 icmp_packet->msg_type = TTL_EXCEEDED;
983 icmp_packet->ttl = TTL;
985 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
986 ret = NET_RX_SUCCESS;
987 goto out;
989 unlock:
990 rcu_read_unlock();
991 out:
992 if (neigh_node)
993 neigh_node_free_ref(neigh_node);
994 if (orig_node)
995 orig_node_free_ref(orig_node);
996 return ret;
1000 int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1002 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1003 struct icmp_packet_rr *icmp_packet;
1004 struct ethhdr *ethhdr;
1005 struct orig_node *orig_node = NULL;
1006 struct neigh_node *neigh_node = NULL;
1007 int hdr_size = sizeof(struct icmp_packet);
1008 int ret = NET_RX_DROP;
1011 * we truncate all incoming icmp packets if they don't match our size
1013 if (skb->len >= sizeof(struct icmp_packet_rr))
1014 hdr_size = sizeof(struct icmp_packet_rr);
1016 /* drop packet if it has not necessary minimum size */
1017 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1018 goto out;
1020 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1022 /* packet with unicast indication but broadcast recipient */
1023 if (is_broadcast_ether_addr(ethhdr->h_dest))
1024 goto out;
1026 /* packet with broadcast sender address */
1027 if (is_broadcast_ether_addr(ethhdr->h_source))
1028 goto out;
1030 /* not for me */
1031 if (!is_my_mac(ethhdr->h_dest))
1032 goto out;
1034 icmp_packet = (struct icmp_packet_rr *)skb->data;
1036 /* add record route information if not full */
1037 if ((hdr_size == sizeof(struct icmp_packet_rr)) &&
1038 (icmp_packet->rr_cur < BAT_RR_LEN)) {
1039 memcpy(&(icmp_packet->rr[icmp_packet->rr_cur]),
1040 ethhdr->h_dest, ETH_ALEN);
1041 icmp_packet->rr_cur++;
1044 /* packet for me */
1045 if (is_my_mac(icmp_packet->dst))
1046 return recv_my_icmp_packet(bat_priv, skb, hdr_size);
1048 /* TTL exceeded */
1049 if (icmp_packet->ttl < 2)
1050 return recv_icmp_ttl_exceeded(bat_priv, skb);
1052 /* get routing information */
1053 rcu_read_lock();
1054 orig_node = orig_hash_find(bat_priv, icmp_packet->dst);
1056 if (!orig_node)
1057 goto unlock;
1059 neigh_node = orig_node->router;
1061 if (!neigh_node)
1062 goto unlock;
1064 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
1065 neigh_node = NULL;
1066 goto unlock;
1069 rcu_read_unlock();
1071 /* create a copy of the skb, if needed, to modify it. */
1072 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1073 goto out;
1075 icmp_packet = (struct icmp_packet_rr *)skb->data;
1077 /* decrement ttl */
1078 icmp_packet->ttl--;
1080 /* route it */
1081 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1082 ret = NET_RX_SUCCESS;
1083 goto out;
1085 unlock:
1086 rcu_read_unlock();
1087 out:
1088 if (neigh_node)
1089 neigh_node_free_ref(neigh_node);
1090 if (orig_node)
1091 orig_node_free_ref(orig_node);
1092 return ret;
1095 /* find a suitable router for this originator, and use
1096 * bonding if possible. increases the found neighbors
1097 * refcount.*/
1098 struct neigh_node *find_router(struct bat_priv *bat_priv,
1099 struct orig_node *orig_node,
1100 struct hard_iface *recv_if)
1102 struct orig_node *primary_orig_node;
1103 struct orig_node *router_orig;
1104 struct neigh_node *router, *first_candidate, *tmp_neigh_node;
1105 static uint8_t zero_mac[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
1106 int bonding_enabled;
1108 if (!orig_node)
1109 return NULL;
1111 if (!orig_node->router)
1112 return NULL;
1114 /* without bonding, the first node should
1115 * always choose the default router. */
1116 bonding_enabled = atomic_read(&bat_priv->bonding);
1118 rcu_read_lock();
1119 /* select default router to output */
1120 router = orig_node->router;
1121 router_orig = orig_node->router->orig_node;
1122 if (!router_orig || !atomic_inc_not_zero(&router->refcount)) {
1123 rcu_read_unlock();
1124 return NULL;
1127 if ((!recv_if) && (!bonding_enabled))
1128 goto return_router;
1130 /* if we have something in the primary_addr, we can search
1131 * for a potential bonding candidate. */
1132 if (compare_eth(router_orig->primary_addr, zero_mac))
1133 goto return_router;
1135 /* find the orig_node which has the primary interface. might
1136 * even be the same as our router_orig in many cases */
1138 if (compare_eth(router_orig->primary_addr, router_orig->orig)) {
1139 primary_orig_node = router_orig;
1140 } else {
1141 primary_orig_node = orig_hash_find(bat_priv,
1142 router_orig->primary_addr);
1143 if (!primary_orig_node)
1144 goto return_router;
1146 orig_node_free_ref(primary_orig_node);
1149 /* with less than 2 candidates, we can't do any
1150 * bonding and prefer the original router. */
1151 if (atomic_read(&primary_orig_node->bond_candidates) < 2)
1152 goto return_router;
1155 /* all nodes between should choose a candidate which
1156 * is is not on the interface where the packet came
1157 * in. */
1159 neigh_node_free_ref(router);
1160 first_candidate = NULL;
1161 router = NULL;
1163 if (bonding_enabled) {
1164 /* in the bonding case, send the packets in a round
1165 * robin fashion over the remaining interfaces. */
1167 list_for_each_entry_rcu(tmp_neigh_node,
1168 &primary_orig_node->bond_list, bonding_list) {
1169 if (!first_candidate)
1170 first_candidate = tmp_neigh_node;
1171 /* recv_if == NULL on the first node. */
1172 if (tmp_neigh_node->if_incoming != recv_if &&
1173 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
1174 router = tmp_neigh_node;
1175 break;
1179 /* use the first candidate if nothing was found. */
1180 if (!router && first_candidate &&
1181 atomic_inc_not_zero(&first_candidate->refcount))
1182 router = first_candidate;
1184 if (!router) {
1185 rcu_read_unlock();
1186 return NULL;
1189 /* selected should point to the next element
1190 * after the current router */
1191 spin_lock_bh(&primary_orig_node->neigh_list_lock);
1192 /* this is a list_move(), which unfortunately
1193 * does not exist as rcu version */
1194 list_del_rcu(&primary_orig_node->bond_list);
1195 list_add_rcu(&primary_orig_node->bond_list,
1196 &router->bonding_list);
1197 spin_unlock_bh(&primary_orig_node->neigh_list_lock);
1199 } else {
1200 /* if bonding is disabled, use the best of the
1201 * remaining candidates which are not using
1202 * this interface. */
1203 list_for_each_entry_rcu(tmp_neigh_node,
1204 &primary_orig_node->bond_list, bonding_list) {
1205 if (!first_candidate)
1206 first_candidate = tmp_neigh_node;
1208 /* recv_if == NULL on the first node. */
1209 if (tmp_neigh_node->if_incoming == recv_if)
1210 continue;
1212 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
1213 continue;
1215 /* if we don't have a router yet
1216 * or this one is better, choose it. */
1217 if ((!router) ||
1218 (tmp_neigh_node->tq_avg > router->tq_avg)) {
1219 /* decrement refcount of
1220 * previously selected router */
1221 if (router)
1222 neigh_node_free_ref(router);
1224 router = tmp_neigh_node;
1225 atomic_inc_not_zero(&router->refcount);
1228 neigh_node_free_ref(tmp_neigh_node);
1231 /* use the first candidate if nothing was found. */
1232 if (!router && first_candidate &&
1233 atomic_inc_not_zero(&first_candidate->refcount))
1234 router = first_candidate;
1236 return_router:
1237 rcu_read_unlock();
1238 return router;
1241 static int check_unicast_packet(struct sk_buff *skb, int hdr_size)
1243 struct ethhdr *ethhdr;
1245 /* drop packet if it has not necessary minimum size */
1246 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1247 return -1;
1249 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1251 /* packet with unicast indication but broadcast recipient */
1252 if (is_broadcast_ether_addr(ethhdr->h_dest))
1253 return -1;
1255 /* packet with broadcast sender address */
1256 if (is_broadcast_ether_addr(ethhdr->h_source))
1257 return -1;
1259 /* not for me */
1260 if (!is_my_mac(ethhdr->h_dest))
1261 return -1;
1263 return 0;
1266 int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1268 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1269 struct orig_node *orig_node = NULL;
1270 struct neigh_node *neigh_node = NULL;
1271 struct unicast_packet *unicast_packet;
1272 struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
1273 int ret = NET_RX_DROP;
1274 struct sk_buff *new_skb;
1276 unicast_packet = (struct unicast_packet *)skb->data;
1278 /* TTL exceeded */
1279 if (unicast_packet->ttl < 2) {
1280 pr_debug("Warning - can't forward unicast packet from %pM to "
1281 "%pM: ttl exceeded\n", ethhdr->h_source,
1282 unicast_packet->dest);
1283 goto out;
1286 /* get routing information */
1287 rcu_read_lock();
1288 orig_node = orig_hash_find(bat_priv, unicast_packet->dest);
1290 if (!orig_node)
1291 goto unlock;
1293 rcu_read_unlock();
1295 /* find_router() increases neigh_nodes refcount if found. */
1296 neigh_node = find_router(bat_priv, orig_node, recv_if);
1298 if (!neigh_node)
1299 goto out;
1301 /* create a copy of the skb, if needed, to modify it. */
1302 if (skb_cow(skb, sizeof(struct ethhdr)) < 0)
1303 goto out;
1305 unicast_packet = (struct unicast_packet *)skb->data;
1307 if (unicast_packet->packet_type == BAT_UNICAST &&
1308 atomic_read(&bat_priv->fragmentation) &&
1309 skb->len > neigh_node->if_incoming->net_dev->mtu) {
1310 ret = frag_send_skb(skb, bat_priv,
1311 neigh_node->if_incoming, neigh_node->addr);
1312 goto out;
1315 if (unicast_packet->packet_type == BAT_UNICAST_FRAG &&
1316 frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) {
1318 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1320 if (ret == NET_RX_DROP)
1321 goto out;
1323 /* packet was buffered for late merge */
1324 if (!new_skb) {
1325 ret = NET_RX_SUCCESS;
1326 goto out;
1329 skb = new_skb;
1330 unicast_packet = (struct unicast_packet *)skb->data;
1333 /* decrement ttl */
1334 unicast_packet->ttl--;
1336 /* route it */
1337 send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1338 ret = NET_RX_SUCCESS;
1339 goto out;
1341 unlock:
1342 rcu_read_unlock();
1343 out:
1344 if (neigh_node)
1345 neigh_node_free_ref(neigh_node);
1346 if (orig_node)
1347 orig_node_free_ref(orig_node);
1348 return ret;
1351 int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1353 struct unicast_packet *unicast_packet;
1354 int hdr_size = sizeof(struct unicast_packet);
1356 if (check_unicast_packet(skb, hdr_size) < 0)
1357 return NET_RX_DROP;
1359 unicast_packet = (struct unicast_packet *)skb->data;
1361 /* packet for me */
1362 if (is_my_mac(unicast_packet->dest)) {
1363 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1364 return NET_RX_SUCCESS;
1367 return route_unicast_packet(skb, recv_if);
1370 int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1372 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1373 struct unicast_frag_packet *unicast_packet;
1374 int hdr_size = sizeof(struct unicast_frag_packet);
1375 struct sk_buff *new_skb = NULL;
1376 int ret;
1378 if (check_unicast_packet(skb, hdr_size) < 0)
1379 return NET_RX_DROP;
1381 unicast_packet = (struct unicast_frag_packet *)skb->data;
1383 /* packet for me */
1384 if (is_my_mac(unicast_packet->dest)) {
1386 ret = frag_reassemble_skb(skb, bat_priv, &new_skb);
1388 if (ret == NET_RX_DROP)
1389 return NET_RX_DROP;
1391 /* packet was buffered for late merge */
1392 if (!new_skb)
1393 return NET_RX_SUCCESS;
1395 interface_rx(recv_if->soft_iface, new_skb, recv_if,
1396 sizeof(struct unicast_packet));
1397 return NET_RX_SUCCESS;
1400 return route_unicast_packet(skb, recv_if);
1404 int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1406 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1407 struct orig_node *orig_node = NULL;
1408 struct bcast_packet *bcast_packet;
1409 struct ethhdr *ethhdr;
1410 int hdr_size = sizeof(struct bcast_packet);
1411 int ret = NET_RX_DROP;
1412 int32_t seq_diff;
1414 /* drop packet if it has not necessary minimum size */
1415 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1416 goto out;
1418 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1420 /* packet with broadcast indication but unicast recipient */
1421 if (!is_broadcast_ether_addr(ethhdr->h_dest))
1422 goto out;
1424 /* packet with broadcast sender address */
1425 if (is_broadcast_ether_addr(ethhdr->h_source))
1426 goto out;
1428 /* ignore broadcasts sent by myself */
1429 if (is_my_mac(ethhdr->h_source))
1430 goto out;
1432 bcast_packet = (struct bcast_packet *)skb->data;
1434 /* ignore broadcasts originated by myself */
1435 if (is_my_mac(bcast_packet->orig))
1436 goto out;
1438 if (bcast_packet->ttl < 2)
1439 goto out;
1441 rcu_read_lock();
1442 orig_node = orig_hash_find(bat_priv, bcast_packet->orig);
1444 if (!orig_node)
1445 goto rcu_unlock;
1447 rcu_read_unlock();
1449 spin_lock_bh(&orig_node->bcast_seqno_lock);
1451 /* check whether the packet is a duplicate */
1452 if (get_bit_status(orig_node->bcast_bits, orig_node->last_bcast_seqno,
1453 ntohl(bcast_packet->seqno)))
1454 goto spin_unlock;
1456 seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno;
1458 /* check whether the packet is old and the host just restarted. */
1459 if (window_protected(bat_priv, seq_diff,
1460 &orig_node->bcast_seqno_reset))
1461 goto spin_unlock;
1463 /* mark broadcast in flood history, update window position
1464 * if required. */
1465 if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1))
1466 orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno);
1468 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1470 /* rebroadcast packet */
1471 add_bcast_packet_to_list(bat_priv, skb);
1473 /* broadcast for me */
1474 interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size);
1475 ret = NET_RX_SUCCESS;
1476 goto out;
1478 rcu_unlock:
1479 rcu_read_unlock();
1480 goto out;
1481 spin_unlock:
1482 spin_unlock_bh(&orig_node->bcast_seqno_lock);
1483 out:
1484 if (orig_node)
1485 orig_node_free_ref(orig_node);
1486 return ret;
1489 int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if)
1491 struct vis_packet *vis_packet;
1492 struct ethhdr *ethhdr;
1493 struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface);
1494 int hdr_size = sizeof(struct vis_packet);
1496 /* keep skb linear */
1497 if (skb_linearize(skb) < 0)
1498 return NET_RX_DROP;
1500 if (unlikely(!pskb_may_pull(skb, hdr_size)))
1501 return NET_RX_DROP;
1503 vis_packet = (struct vis_packet *)skb->data;
1504 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1506 /* not for me */
1507 if (!is_my_mac(ethhdr->h_dest))
1508 return NET_RX_DROP;
1510 /* ignore own packets */
1511 if (is_my_mac(vis_packet->vis_orig))
1512 return NET_RX_DROP;
1514 if (is_my_mac(vis_packet->sender_orig))
1515 return NET_RX_DROP;
1517 switch (vis_packet->vis_type) {
1518 case VIS_TYPE_SERVER_SYNC:
1519 receive_server_sync_packet(bat_priv, vis_packet,
1520 skb_headlen(skb));
1521 break;
1523 case VIS_TYPE_CLIENT_UPDATE:
1524 receive_client_update_packet(bat_priv, vis_packet,
1525 skb_headlen(skb));
1526 break;
1528 default: /* ignore unknown packet */
1529 break;
1532 /* We take a copy of the data in the packet, so we should
1533 always free the skbuf. */
1534 return NET_RX_DROP;