1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com> */
4 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/skbuff.h>
9 #include <linux/jhash.h>
10 #include <linux/if_tunnel.h>
11 #include <linux/net.h>
12 #include <linux/igmp.h>
13 #include <linux/workqueue.h>
14 #include <net/pkt_sched.h>
15 #include <net/net_namespace.h>
18 #include <net/udp_tunnel.h>
22 #include <uapi/linux/amt.h>
23 #include <linux/security.h>
24 #include <net/gro_cells.h>
26 #include <net/if_inet6.h>
27 #include <net/ndisc.h>
28 #include <net/addrconf.h>
29 #include <net/ip6_route.h>
30 #include <net/inet_common.h>
31 #include <net/ip6_checksum.h>
33 static struct workqueue_struct
*amt_wq
;
35 static HLIST_HEAD(source_gc_list
);
36 /* Lock for source_gc_list */
37 static spinlock_t source_gc_lock
;
38 static struct delayed_work source_gc_wq
;
39 static char *status_str
[] = {
41 "AMT_STATUS_SENT_DISCOVERY",
42 "AMT_STATUS_RECEIVED_DISCOVERY",
43 "AMT_STATUS_SENT_ADVERTISEMENT",
44 "AMT_STATUS_RECEIVED_ADVERTISEMENT",
45 "AMT_STATUS_SENT_REQUEST",
46 "AMT_STATUS_RECEIVED_REQUEST",
47 "AMT_STATUS_SENT_QUERY",
48 "AMT_STATUS_RECEIVED_QUERY",
49 "AMT_STATUS_SENT_UPDATE",
50 "AMT_STATUS_RECEIVED_UPDATE",
53 static char *type_str
[] = {
54 "", /* Type 0 is not defined */
56 "AMT_MSG_ADVERTISEMENT",
58 "AMT_MSG_MEMBERSHIP_QUERY",
59 "AMT_MSG_MEMBERSHIP_UPDATE",
60 "AMT_MSG_MULTICAST_DATA",
64 static char *action_str
[] = {
68 "AMT_ACT_STATUS_FWD_NEW",
69 "AMT_ACT_STATUS_D_FWD_NEW",
70 "AMT_ACT_STATUS_NONE_NEW",
73 static struct igmpv3_grec igmpv3_zero_grec
;
75 #if IS_ENABLED(CONFIG_IPV6)
76 #define MLD2_ALL_NODE_INIT { { { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01 } } }
77 static struct in6_addr mld2_all_node
= MLD2_ALL_NODE_INIT
;
78 static struct mld2_grec mldv2_zero_grec
;
81 static struct amt_skb_cb
*amt_skb_cb(struct sk_buff
*skb
)
83 BUILD_BUG_ON(sizeof(struct amt_skb_cb
) + sizeof(struct tc_skb_cb
) >
84 sizeof_field(struct sk_buff
, cb
));
86 return (struct amt_skb_cb
*)((void *)skb
->cb
+
87 sizeof(struct tc_skb_cb
));
90 static void __amt_source_gc_work(void)
92 struct amt_source_node
*snode
;
93 struct hlist_head gc_list
;
96 spin_lock_bh(&source_gc_lock
);
97 hlist_move_list(&source_gc_list
, &gc_list
);
98 spin_unlock_bh(&source_gc_lock
);
100 hlist_for_each_entry_safe(snode
, t
, &gc_list
, node
) {
101 hlist_del_rcu(&snode
->node
);
102 kfree_rcu(snode
, rcu
);
106 static void amt_source_gc_work(struct work_struct
*work
)
108 __amt_source_gc_work();
110 spin_lock_bh(&source_gc_lock
);
111 mod_delayed_work(amt_wq
, &source_gc_wq
,
112 msecs_to_jiffies(AMT_GC_INTERVAL
));
113 spin_unlock_bh(&source_gc_lock
);
116 static bool amt_addr_equal(union amt_addr
*a
, union amt_addr
*b
)
118 return !memcmp(a
, b
, sizeof(union amt_addr
));
121 static u32
amt_source_hash(struct amt_tunnel_list
*tunnel
, union amt_addr
*src
)
123 u32 hash
= jhash(src
, sizeof(*src
), tunnel
->amt
->hash_seed
);
125 return reciprocal_scale(hash
, tunnel
->amt
->hash_buckets
);
128 static bool amt_status_filter(struct amt_source_node
*snode
,
129 enum amt_filter filter
)
135 if (snode
->status
== AMT_SOURCE_STATUS_FWD
&&
136 snode
->flags
== AMT_SOURCE_OLD
)
139 case AMT_FILTER_D_FWD
:
140 if (snode
->status
== AMT_SOURCE_STATUS_D_FWD
&&
141 snode
->flags
== AMT_SOURCE_OLD
)
144 case AMT_FILTER_FWD_NEW
:
145 if (snode
->status
== AMT_SOURCE_STATUS_FWD
&&
146 snode
->flags
== AMT_SOURCE_NEW
)
149 case AMT_FILTER_D_FWD_NEW
:
150 if (snode
->status
== AMT_SOURCE_STATUS_D_FWD
&&
151 snode
->flags
== AMT_SOURCE_NEW
)
157 case AMT_FILTER_NONE_NEW
:
158 if (snode
->status
== AMT_SOURCE_STATUS_NONE
&&
159 snode
->flags
== AMT_SOURCE_NEW
)
162 case AMT_FILTER_BOTH
:
163 if ((snode
->status
== AMT_SOURCE_STATUS_D_FWD
||
164 snode
->status
== AMT_SOURCE_STATUS_FWD
) &&
165 snode
->flags
== AMT_SOURCE_OLD
)
168 case AMT_FILTER_BOTH_NEW
:
169 if ((snode
->status
== AMT_SOURCE_STATUS_D_FWD
||
170 snode
->status
== AMT_SOURCE_STATUS_FWD
) &&
171 snode
->flags
== AMT_SOURCE_NEW
)
182 static struct amt_source_node
*amt_lookup_src(struct amt_tunnel_list
*tunnel
,
183 struct amt_group_node
*gnode
,
184 enum amt_filter filter
,
187 u32 hash
= amt_source_hash(tunnel
, src
);
188 struct amt_source_node
*snode
;
190 hlist_for_each_entry_rcu(snode
, &gnode
->sources
[hash
], node
)
191 if (amt_status_filter(snode
, filter
) &&
192 amt_addr_equal(&snode
->source_addr
, src
))
198 static u32
amt_group_hash(struct amt_tunnel_list
*tunnel
, union amt_addr
*group
)
200 u32 hash
= jhash(group
, sizeof(*group
), tunnel
->amt
->hash_seed
);
202 return reciprocal_scale(hash
, tunnel
->amt
->hash_buckets
);
205 static struct amt_group_node
*amt_lookup_group(struct amt_tunnel_list
*tunnel
,
206 union amt_addr
*group
,
207 union amt_addr
*host
,
210 u32 hash
= amt_group_hash(tunnel
, group
);
211 struct amt_group_node
*gnode
;
213 hlist_for_each_entry_rcu(gnode
, &tunnel
->groups
[hash
], node
) {
214 if (amt_addr_equal(&gnode
->group_addr
, group
) &&
215 amt_addr_equal(&gnode
->host_addr
, host
) &&
223 static void amt_destroy_source(struct amt_source_node
*snode
)
225 struct amt_group_node
*gnode
= snode
->gnode
;
226 struct amt_tunnel_list
*tunnel
;
228 tunnel
= gnode
->tunnel_list
;
231 netdev_dbg(snode
->gnode
->amt
->dev
,
232 "Delete source %pI4 from %pI4\n",
233 &snode
->source_addr
.ip4
,
234 &gnode
->group_addr
.ip4
);
235 #if IS_ENABLED(CONFIG_IPV6)
237 netdev_dbg(snode
->gnode
->amt
->dev
,
238 "Delete source %pI6 from %pI6\n",
239 &snode
->source_addr
.ip6
,
240 &gnode
->group_addr
.ip6
);
244 cancel_delayed_work(&snode
->source_timer
);
245 hlist_del_init_rcu(&snode
->node
);
246 tunnel
->nr_sources
--;
248 spin_lock_bh(&source_gc_lock
);
249 hlist_add_head_rcu(&snode
->node
, &source_gc_list
);
250 spin_unlock_bh(&source_gc_lock
);
253 static void amt_del_group(struct amt_dev
*amt
, struct amt_group_node
*gnode
)
255 struct amt_source_node
*snode
;
256 struct hlist_node
*t
;
259 if (cancel_delayed_work(&gnode
->group_timer
))
261 hlist_del_rcu(&gnode
->node
);
262 gnode
->tunnel_list
->nr_groups
--;
265 netdev_dbg(amt
->dev
, "Leave group %pI4\n",
266 &gnode
->group_addr
.ip4
);
267 #if IS_ENABLED(CONFIG_IPV6)
269 netdev_dbg(amt
->dev
, "Leave group %pI6\n",
270 &gnode
->group_addr
.ip6
);
272 for (i
= 0; i
< amt
->hash_buckets
; i
++)
273 hlist_for_each_entry_safe(snode
, t
, &gnode
->sources
[i
], node
)
274 amt_destroy_source(snode
);
276 /* tunnel->lock was acquired outside of amt_del_group()
277 * But rcu_read_lock() was acquired too so It's safe.
279 kfree_rcu(gnode
, rcu
);
282 /* If a source timer expires with a router filter-mode for the group of
283 * INCLUDE, the router concludes that traffic from this particular
284 * source is no longer desired on the attached network, and deletes the
285 * associated source record.
287 static void amt_source_work(struct work_struct
*work
)
289 struct amt_source_node
*snode
= container_of(to_delayed_work(work
),
290 struct amt_source_node
,
292 struct amt_group_node
*gnode
= snode
->gnode
;
293 struct amt_dev
*amt
= gnode
->amt
;
294 struct amt_tunnel_list
*tunnel
;
296 tunnel
= gnode
->tunnel_list
;
297 spin_lock_bh(&tunnel
->lock
);
299 if (gnode
->filter_mode
== MCAST_INCLUDE
) {
300 amt_destroy_source(snode
);
301 if (!gnode
->nr_sources
)
302 amt_del_group(amt
, gnode
);
304 /* When a router filter-mode for a group is EXCLUDE,
305 * source records are only deleted when the group timer expires
307 snode
->status
= AMT_SOURCE_STATUS_D_FWD
;
310 spin_unlock_bh(&tunnel
->lock
);
313 static void amt_act_src(struct amt_tunnel_list
*tunnel
,
314 struct amt_group_node
*gnode
,
315 struct amt_source_node
*snode
,
318 struct amt_dev
*amt
= tunnel
->amt
;
322 mod_delayed_work(amt_wq
, &snode
->source_timer
,
323 msecs_to_jiffies(amt_gmi(amt
)));
325 case AMT_ACT_GMI_ZERO
:
326 cancel_delayed_work(&snode
->source_timer
);
329 mod_delayed_work(amt_wq
, &snode
->source_timer
,
330 gnode
->group_timer
.timer
.expires
);
332 case AMT_ACT_STATUS_FWD_NEW
:
333 snode
->status
= AMT_SOURCE_STATUS_FWD
;
334 snode
->flags
= AMT_SOURCE_NEW
;
336 case AMT_ACT_STATUS_D_FWD_NEW
:
337 snode
->status
= AMT_SOURCE_STATUS_D_FWD
;
338 snode
->flags
= AMT_SOURCE_NEW
;
340 case AMT_ACT_STATUS_NONE_NEW
:
341 cancel_delayed_work(&snode
->source_timer
);
342 snode
->status
= AMT_SOURCE_STATUS_NONE
;
343 snode
->flags
= AMT_SOURCE_NEW
;
351 netdev_dbg(amt
->dev
, "Source %pI4 from %pI4 Acted %s\n",
352 &snode
->source_addr
.ip4
,
353 &gnode
->group_addr
.ip4
,
355 #if IS_ENABLED(CONFIG_IPV6)
357 netdev_dbg(amt
->dev
, "Source %pI6 from %pI6 Acted %s\n",
358 &snode
->source_addr
.ip6
,
359 &gnode
->group_addr
.ip6
,
364 static struct amt_source_node
*amt_alloc_snode(struct amt_group_node
*gnode
,
367 struct amt_source_node
*snode
;
369 snode
= kzalloc(sizeof(*snode
), GFP_ATOMIC
);
373 memcpy(&snode
->source_addr
, src
, sizeof(union amt_addr
));
374 snode
->gnode
= gnode
;
375 snode
->status
= AMT_SOURCE_STATUS_NONE
;
376 snode
->flags
= AMT_SOURCE_NEW
;
377 INIT_HLIST_NODE(&snode
->node
);
378 INIT_DELAYED_WORK(&snode
->source_timer
, amt_source_work
);
383 /* RFC 3810 - 7.2.2. Definition of Filter Timers
385 * Router Mode Filter Timer Actions/Comments
386 * ----------- ----------------- ----------------
388 * INCLUDE Not Used All listeners in
391 * EXCLUDE Timer > 0 At least one listener
394 * EXCLUDE Timer == 0 No more listeners in
395 * EXCLUDE mode for the
397 * If the Requested List
400 * Record. If not, switch
401 * to INCLUDE filter mode;
404 * moved to the Include
405 * List, and the Exclude
408 static void amt_group_work(struct work_struct
*work
)
410 struct amt_group_node
*gnode
= container_of(to_delayed_work(work
),
411 struct amt_group_node
,
413 struct amt_tunnel_list
*tunnel
= gnode
->tunnel_list
;
414 struct amt_dev
*amt
= gnode
->amt
;
415 struct amt_source_node
*snode
;
416 bool delete_group
= true;
417 struct hlist_node
*t
;
420 buckets
= amt
->hash_buckets
;
422 spin_lock_bh(&tunnel
->lock
);
423 if (gnode
->filter_mode
== MCAST_INCLUDE
) {
425 spin_unlock_bh(&tunnel
->lock
);
430 for (i
= 0; i
< buckets
; i
++) {
431 hlist_for_each_entry_safe(snode
, t
,
432 &gnode
->sources
[i
], node
) {
433 if (!delayed_work_pending(&snode
->source_timer
) ||
434 snode
->status
== AMT_SOURCE_STATUS_D_FWD
) {
435 amt_destroy_source(snode
);
437 delete_group
= false;
438 snode
->status
= AMT_SOURCE_STATUS_FWD
;
443 amt_del_group(amt
, gnode
);
445 gnode
->filter_mode
= MCAST_INCLUDE
;
447 spin_unlock_bh(&tunnel
->lock
);
452 /* Non-existent group is created as INCLUDE {empty}:
454 * RFC 3376 - 5.1. Action on Change of Interface State
456 * If no interface state existed for that multicast address before
457 * the change (i.e., the change consisted of creating a new
458 * per-interface record), or if no state exists after the change
459 * (i.e., the change consisted of deleting a per-interface record),
460 * then the "non-existent" state is considered to have a filter mode
461 * of INCLUDE and an empty source list.
463 static struct amt_group_node
*amt_add_group(struct amt_dev
*amt
,
464 struct amt_tunnel_list
*tunnel
,
465 union amt_addr
*group
,
466 union amt_addr
*host
,
469 struct amt_group_node
*gnode
;
473 if (tunnel
->nr_groups
>= amt
->max_groups
)
474 return ERR_PTR(-ENOSPC
);
476 gnode
= kzalloc(sizeof(*gnode
) +
477 (sizeof(struct hlist_head
) * amt
->hash_buckets
),
479 if (unlikely(!gnode
))
480 return ERR_PTR(-ENOMEM
);
483 gnode
->group_addr
= *group
;
484 gnode
->host_addr
= *host
;
486 gnode
->tunnel_list
= tunnel
;
487 gnode
->filter_mode
= MCAST_INCLUDE
;
488 INIT_HLIST_NODE(&gnode
->node
);
489 INIT_DELAYED_WORK(&gnode
->group_timer
, amt_group_work
);
490 for (i
= 0; i
< amt
->hash_buckets
; i
++)
491 INIT_HLIST_HEAD(&gnode
->sources
[i
]);
493 hash
= amt_group_hash(tunnel
, group
);
494 hlist_add_head_rcu(&gnode
->node
, &tunnel
->groups
[hash
]);
498 netdev_dbg(amt
->dev
, "Join group %pI4\n",
499 &gnode
->group_addr
.ip4
);
500 #if IS_ENABLED(CONFIG_IPV6)
502 netdev_dbg(amt
->dev
, "Join group %pI6\n",
503 &gnode
->group_addr
.ip6
);
509 static struct sk_buff
*amt_build_igmp_gq(struct amt_dev
*amt
)
511 u8 ra
[AMT_IPHDR_OPTS
] = { IPOPT_RA
, 4, 0, 0 };
512 int hlen
= LL_RESERVED_SPACE(amt
->dev
);
513 int tlen
= amt
->dev
->needed_tailroom
;
514 struct igmpv3_query
*ihv3
;
515 void *csum_start
= NULL
;
516 __sum16
*csum
= NULL
;
523 len
= hlen
+ tlen
+ sizeof(*iph
) + AMT_IPHDR_OPTS
+ sizeof(*ihv3
);
524 skb
= netdev_alloc_skb_ip_align(amt
->dev
, len
);
528 skb_reserve(skb
, hlen
);
529 skb_push(skb
, sizeof(*eth
));
530 skb
->protocol
= htons(ETH_P_IP
);
531 skb_reset_mac_header(skb
);
532 skb
->priority
= TC_PRIO_CONTROL
;
533 skb_put(skb
, sizeof(*iph
));
534 skb_put_data(skb
, ra
, sizeof(ra
));
535 skb_put(skb
, sizeof(*ihv3
));
536 skb_pull(skb
, sizeof(*eth
));
537 skb_reset_network_header(skb
);
541 iph
->ihl
= (sizeof(struct iphdr
) + AMT_IPHDR_OPTS
) >> 2;
543 iph
->tot_len
= htons(sizeof(*iph
) + AMT_IPHDR_OPTS
+ sizeof(*ihv3
));
544 iph
->frag_off
= htons(IP_DF
);
547 iph
->protocol
= IPPROTO_IGMP
;
548 iph
->daddr
= htonl(INADDR_ALLHOSTS_GROUP
);
549 iph
->saddr
= htonl(INADDR_ANY
);
553 ether_addr_copy(eth
->h_source
, amt
->dev
->dev_addr
);
554 ip_eth_mc_map(htonl(INADDR_ALLHOSTS_GROUP
), eth
->h_dest
);
555 eth
->h_proto
= htons(ETH_P_IP
);
557 ihv3
= skb_pull(skb
, sizeof(*iph
) + AMT_IPHDR_OPTS
);
558 skb_reset_transport_header(skb
);
559 ihv3
->type
= IGMP_HOST_MEMBERSHIP_QUERY
;
562 ihv3
->qqic
= amt
->qi
;
565 ihv3
->suppress
= false;
566 ihv3
->qrv
= READ_ONCE(amt
->net
->ipv4
.sysctl_igmp_qrv
);
569 csum_start
= (void *)ihv3
;
570 *csum
= ip_compute_csum(csum_start
, sizeof(*ihv3
));
571 offset
= skb_transport_offset(skb
);
572 skb
->csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
573 skb
->ip_summed
= CHECKSUM_NONE
;
575 skb_push(skb
, sizeof(*eth
) + sizeof(*iph
) + AMT_IPHDR_OPTS
);
580 static void amt_update_gw_status(struct amt_dev
*amt
, enum amt_status status
,
583 if (validate
&& amt
->status
>= status
)
585 netdev_dbg(amt
->dev
, "Update GW status %s -> %s",
586 status_str
[amt
->status
], status_str
[status
]);
587 WRITE_ONCE(amt
->status
, status
);
590 static void __amt_update_relay_status(struct amt_tunnel_list
*tunnel
,
591 enum amt_status status
,
594 if (validate
&& tunnel
->status
>= status
)
596 netdev_dbg(tunnel
->amt
->dev
,
597 "Update Tunnel(IP = %pI4, PORT = %u) status %s -> %s",
598 &tunnel
->ip4
, ntohs(tunnel
->source_port
),
599 status_str
[tunnel
->status
], status_str
[status
]);
600 tunnel
->status
= status
;
603 static void amt_update_relay_status(struct amt_tunnel_list
*tunnel
,
604 enum amt_status status
, bool validate
)
606 spin_lock_bh(&tunnel
->lock
);
607 __amt_update_relay_status(tunnel
, status
, validate
);
608 spin_unlock_bh(&tunnel
->lock
);
611 static void amt_send_discovery(struct amt_dev
*amt
)
613 struct amt_header_discovery
*amtd
;
614 int hlen
, tlen
, offset
;
625 sock
= rcu_dereference(amt
->sock
);
629 if (!netif_running(amt
->stream_dev
) || !netif_running(amt
->dev
))
632 rt
= ip_route_output_ports(amt
->net
, &fl4
, sock
->sk
,
633 amt
->discovery_ip
, amt
->local_ip
,
634 amt
->gw_port
, amt
->relay_port
,
636 amt
->stream_dev
->ifindex
);
638 amt
->dev
->stats
.tx_errors
++;
642 hlen
= LL_RESERVED_SPACE(amt
->dev
);
643 tlen
= amt
->dev
->needed_tailroom
;
644 len
= hlen
+ tlen
+ sizeof(*iph
) + sizeof(*udph
) + sizeof(*amtd
);
645 skb
= netdev_alloc_skb_ip_align(amt
->dev
, len
);
648 amt
->dev
->stats
.tx_errors
++;
652 skb
->priority
= TC_PRIO_CONTROL
;
653 skb_dst_set(skb
, &rt
->dst
);
655 len
= sizeof(*iph
) + sizeof(*udph
) + sizeof(*amtd
);
656 skb_reset_network_header(skb
);
658 amtd
= skb_pull(skb
, sizeof(*iph
) + sizeof(*udph
));
660 amtd
->type
= AMT_MSG_DISCOVERY
;
662 amtd
->nonce
= amt
->nonce
;
663 skb_push(skb
, sizeof(*udph
));
664 skb_reset_transport_header(skb
);
666 udph
->source
= amt
->gw_port
;
667 udph
->dest
= amt
->relay_port
;
668 udph
->len
= htons(sizeof(*udph
) + sizeof(*amtd
));
670 offset
= skb_transport_offset(skb
);
671 skb
->csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
672 udph
->check
= csum_tcpudp_magic(amt
->local_ip
, amt
->discovery_ip
,
673 sizeof(*udph
) + sizeof(*amtd
),
674 IPPROTO_UDP
, skb
->csum
);
676 skb_push(skb
, sizeof(*iph
));
679 iph
->ihl
= (sizeof(struct iphdr
)) >> 2;
682 iph
->ttl
= ip4_dst_hoplimit(&rt
->dst
);
683 iph
->daddr
= amt
->discovery_ip
;
684 iph
->saddr
= amt
->local_ip
;
685 iph
->protocol
= IPPROTO_UDP
;
686 iph
->tot_len
= htons(len
);
688 skb
->ip_summed
= CHECKSUM_NONE
;
689 ip_select_ident(amt
->net
, skb
, NULL
);
691 err
= ip_local_out(amt
->net
, sock
->sk
, skb
);
692 if (unlikely(net_xmit_eval(err
)))
693 amt
->dev
->stats
.tx_errors
++;
695 amt_update_gw_status(amt
, AMT_STATUS_SENT_DISCOVERY
, true);
700 static void amt_send_request(struct amt_dev
*amt
, bool v6
)
702 struct amt_header_request
*amtrh
;
703 int hlen
, tlen
, offset
;
714 sock
= rcu_dereference(amt
->sock
);
718 if (!netif_running(amt
->stream_dev
) || !netif_running(amt
->dev
))
721 rt
= ip_route_output_ports(amt
->net
, &fl4
, sock
->sk
,
722 amt
->remote_ip
, amt
->local_ip
,
723 amt
->gw_port
, amt
->relay_port
,
725 amt
->stream_dev
->ifindex
);
727 amt
->dev
->stats
.tx_errors
++;
731 hlen
= LL_RESERVED_SPACE(amt
->dev
);
732 tlen
= amt
->dev
->needed_tailroom
;
733 len
= hlen
+ tlen
+ sizeof(*iph
) + sizeof(*udph
) + sizeof(*amtrh
);
734 skb
= netdev_alloc_skb_ip_align(amt
->dev
, len
);
737 amt
->dev
->stats
.tx_errors
++;
741 skb
->priority
= TC_PRIO_CONTROL
;
742 skb_dst_set(skb
, &rt
->dst
);
744 len
= sizeof(*iph
) + sizeof(*udph
) + sizeof(*amtrh
);
745 skb_reset_network_header(skb
);
747 amtrh
= skb_pull(skb
, sizeof(*iph
) + sizeof(*udph
));
749 amtrh
->type
= AMT_MSG_REQUEST
;
750 amtrh
->reserved1
= 0;
752 amtrh
->reserved2
= 0;
753 amtrh
->nonce
= amt
->nonce
;
754 skb_push(skb
, sizeof(*udph
));
755 skb_reset_transport_header(skb
);
757 udph
->source
= amt
->gw_port
;
758 udph
->dest
= amt
->relay_port
;
759 udph
->len
= htons(sizeof(*amtrh
) + sizeof(*udph
));
761 offset
= skb_transport_offset(skb
);
762 skb
->csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
763 udph
->check
= csum_tcpudp_magic(amt
->local_ip
, amt
->remote_ip
,
764 sizeof(*udph
) + sizeof(*amtrh
),
765 IPPROTO_UDP
, skb
->csum
);
767 skb_push(skb
, sizeof(*iph
));
770 iph
->ihl
= (sizeof(struct iphdr
)) >> 2;
773 iph
->ttl
= ip4_dst_hoplimit(&rt
->dst
);
774 iph
->daddr
= amt
->remote_ip
;
775 iph
->saddr
= amt
->local_ip
;
776 iph
->protocol
= IPPROTO_UDP
;
777 iph
->tot_len
= htons(len
);
779 skb
->ip_summed
= CHECKSUM_NONE
;
780 ip_select_ident(amt
->net
, skb
, NULL
);
782 err
= ip_local_out(amt
->net
, sock
->sk
, skb
);
783 if (unlikely(net_xmit_eval(err
)))
784 amt
->dev
->stats
.tx_errors
++;
790 static void amt_send_igmp_gq(struct amt_dev
*amt
,
791 struct amt_tunnel_list
*tunnel
)
795 skb
= amt_build_igmp_gq(amt
);
799 amt_skb_cb(skb
)->tunnel
= tunnel
;
803 #if IS_ENABLED(CONFIG_IPV6)
804 static struct sk_buff
*amt_build_mld_gq(struct amt_dev
*amt
)
806 u8 ra
[AMT_IP6HDR_OPTS
] = { IPPROTO_ICMPV6
, 0, IPV6_TLV_ROUTERALERT
,
807 2, 0, 0, IPV6_TLV_PAD1
, IPV6_TLV_PAD1
};
808 int hlen
= LL_RESERVED_SPACE(amt
->dev
);
809 int tlen
= amt
->dev
->needed_tailroom
;
810 struct mld2_query
*mld2q
;
811 void *csum_start
= NULL
;
812 struct ipv6hdr
*ip6h
;
817 len
= hlen
+ tlen
+ sizeof(*ip6h
) + sizeof(ra
) + sizeof(*mld2q
);
818 skb
= netdev_alloc_skb_ip_align(amt
->dev
, len
);
822 skb_reserve(skb
, hlen
);
823 skb_push(skb
, sizeof(*eth
));
824 skb_reset_mac_header(skb
);
826 skb
->priority
= TC_PRIO_CONTROL
;
827 skb
->protocol
= htons(ETH_P_IPV6
);
828 skb_put_zero(skb
, sizeof(*ip6h
));
829 skb_put_data(skb
, ra
, sizeof(ra
));
830 skb_put_zero(skb
, sizeof(*mld2q
));
831 skb_pull(skb
, sizeof(*eth
));
832 skb_reset_network_header(skb
);
833 ip6h
= ipv6_hdr(skb
);
834 ip6h
->payload_len
= htons(sizeof(ra
) + sizeof(*mld2q
));
835 ip6h
->nexthdr
= NEXTHDR_HOP
;
837 ip6h
->daddr
= mld2_all_node
;
838 ip6_flow_hdr(ip6h
, 0, 0);
840 if (ipv6_dev_get_saddr(amt
->net
, amt
->dev
, &ip6h
->daddr
, 0,
842 amt
->dev
->stats
.tx_errors
++;
847 eth
->h_proto
= htons(ETH_P_IPV6
);
848 ether_addr_copy(eth
->h_source
, amt
->dev
->dev_addr
);
849 ipv6_eth_mc_map(&mld2_all_node
, eth
->h_dest
);
851 skb_pull(skb
, sizeof(*ip6h
) + sizeof(ra
));
852 skb_reset_transport_header(skb
);
853 mld2q
= (struct mld2_query
*)icmp6_hdr(skb
);
854 mld2q
->mld2q_mrc
= htons(1);
855 mld2q
->mld2q_type
= ICMPV6_MGM_QUERY
;
856 mld2q
->mld2q_code
= 0;
857 mld2q
->mld2q_cksum
= 0;
858 mld2q
->mld2q_resv1
= 0;
859 mld2q
->mld2q_resv2
= 0;
860 mld2q
->mld2q_suppress
= 0;
861 mld2q
->mld2q_qrv
= amt
->qrv
;
862 mld2q
->mld2q_nsrcs
= 0;
863 mld2q
->mld2q_qqic
= amt
->qi
;
864 csum_start
= (void *)mld2q
;
865 mld2q
->mld2q_cksum
= csum_ipv6_magic(&ip6h
->saddr
, &ip6h
->daddr
,
868 csum_partial(csum_start
,
871 skb
->ip_summed
= CHECKSUM_NONE
;
872 skb_push(skb
, sizeof(*eth
) + sizeof(*ip6h
) + sizeof(ra
));
876 static void amt_send_mld_gq(struct amt_dev
*amt
, struct amt_tunnel_list
*tunnel
)
880 skb
= amt_build_mld_gq(amt
);
884 amt_skb_cb(skb
)->tunnel
= tunnel
;
888 static void amt_send_mld_gq(struct amt_dev
*amt
, struct amt_tunnel_list
*tunnel
)
893 static bool amt_queue_event(struct amt_dev
*amt
, enum amt_event event
,
898 spin_lock_bh(&amt
->lock
);
899 if (amt
->nr_events
>= AMT_MAX_EVENTS
) {
900 spin_unlock_bh(&amt
->lock
);
904 index
= (amt
->event_idx
+ amt
->nr_events
) % AMT_MAX_EVENTS
;
905 amt
->events
[index
].event
= event
;
906 amt
->events
[index
].skb
= skb
;
908 amt
->event_idx
%= AMT_MAX_EVENTS
;
909 queue_work(amt_wq
, &amt
->event_wq
);
910 spin_unlock_bh(&amt
->lock
);
915 static void amt_secret_work(struct work_struct
*work
)
917 struct amt_dev
*amt
= container_of(to_delayed_work(work
),
921 spin_lock_bh(&amt
->lock
);
922 get_random_bytes(&amt
->key
, sizeof(siphash_key_t
));
923 spin_unlock_bh(&amt
->lock
);
924 mod_delayed_work(amt_wq
, &amt
->secret_wq
,
925 msecs_to_jiffies(AMT_SECRET_TIMEOUT
));
928 static void amt_event_send_discovery(struct amt_dev
*amt
)
930 if (amt
->status
> AMT_STATUS_SENT_DISCOVERY
)
932 get_random_bytes(&amt
->nonce
, sizeof(__be32
));
934 amt_send_discovery(amt
);
936 mod_delayed_work(amt_wq
, &amt
->discovery_wq
,
937 msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT
));
940 static void amt_discovery_work(struct work_struct
*work
)
942 struct amt_dev
*amt
= container_of(to_delayed_work(work
),
946 if (amt_queue_event(amt
, AMT_EVENT_SEND_DISCOVERY
, NULL
))
947 mod_delayed_work(amt_wq
, &amt
->discovery_wq
,
948 msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT
));
951 static void amt_event_send_request(struct amt_dev
*amt
)
955 if (amt
->status
< AMT_STATUS_RECEIVED_ADVERTISEMENT
)
958 if (amt
->req_cnt
> AMT_MAX_REQ_COUNT
) {
959 netdev_dbg(amt
->dev
, "Gateway is not ready");
960 amt
->qi
= AMT_INIT_REQ_TIMEOUT
;
961 WRITE_ONCE(amt
->ready4
, false);
962 WRITE_ONCE(amt
->ready6
, false);
964 amt_update_gw_status(amt
, AMT_STATUS_INIT
, false);
971 WRITE_ONCE(amt
->ready4
, false);
972 WRITE_ONCE(amt
->ready6
, false);
973 get_random_bytes(&amt
->nonce
, sizeof(__be32
));
976 amt_send_request(amt
, false);
977 amt_send_request(amt
, true);
978 amt_update_gw_status(amt
, AMT_STATUS_SENT_REQUEST
, true);
981 exp
= min_t(u32
, (1 * (1 << amt
->req_cnt
)), AMT_MAX_REQ_TIMEOUT
);
982 mod_delayed_work(amt_wq
, &amt
->req_wq
, msecs_to_jiffies(exp
* 1000));
985 static void amt_req_work(struct work_struct
*work
)
987 struct amt_dev
*amt
= container_of(to_delayed_work(work
),
991 if (amt_queue_event(amt
, AMT_EVENT_SEND_REQUEST
, NULL
))
992 mod_delayed_work(amt_wq
, &amt
->req_wq
,
993 msecs_to_jiffies(100));
996 static bool amt_send_membership_update(struct amt_dev
*amt
,
1000 struct amt_header_membership_update
*amtmu
;
1001 struct socket
*sock
;
1007 sock
= rcu_dereference_bh(amt
->sock
);
1011 err
= skb_cow_head(skb
, LL_RESERVED_SPACE(amt
->dev
) + sizeof(*amtmu
) +
1012 sizeof(*iph
) + sizeof(struct udphdr
));
1016 skb_reset_inner_headers(skb
);
1017 memset(&fl4
, 0, sizeof(struct flowi4
));
1018 fl4
.flowi4_oif
= amt
->stream_dev
->ifindex
;
1019 fl4
.daddr
= amt
->remote_ip
;
1020 fl4
.saddr
= amt
->local_ip
;
1021 fl4
.flowi4_tos
= AMT_TOS
;
1022 fl4
.flowi4_proto
= IPPROTO_UDP
;
1023 rt
= ip_route_output_key(amt
->net
, &fl4
);
1025 netdev_dbg(amt
->dev
, "no route to %pI4\n", &amt
->remote_ip
);
1029 amtmu
= skb_push(skb
, sizeof(*amtmu
));
1031 amtmu
->type
= AMT_MSG_MEMBERSHIP_UPDATE
;
1032 amtmu
->reserved
= 0;
1033 amtmu
->nonce
= amt
->nonce
;
1034 amtmu
->response_mac
= amt
->mac
;
1037 skb_set_inner_protocol(skb
, htons(ETH_P_IP
));
1039 skb_set_inner_protocol(skb
, htons(ETH_P_IPV6
));
1040 udp_tunnel_xmit_skb(rt
, sock
->sk
, skb
,
1044 ip4_dst_hoplimit(&rt
->dst
),
1050 amt_update_gw_status(amt
, AMT_STATUS_SENT_UPDATE
, true);
1054 static void amt_send_multicast_data(struct amt_dev
*amt
,
1055 const struct sk_buff
*oskb
,
1056 struct amt_tunnel_list
*tunnel
,
1059 struct amt_header_mcast_data
*amtmd
;
1060 struct socket
*sock
;
1061 struct sk_buff
*skb
;
1066 sock
= rcu_dereference_bh(amt
->sock
);
1070 skb
= skb_copy_expand(oskb
, sizeof(*amtmd
) + sizeof(*iph
) +
1071 sizeof(struct udphdr
), 0, GFP_ATOMIC
);
1075 skb_reset_inner_headers(skb
);
1076 memset(&fl4
, 0, sizeof(struct flowi4
));
1077 fl4
.flowi4_oif
= amt
->stream_dev
->ifindex
;
1078 fl4
.daddr
= tunnel
->ip4
;
1079 fl4
.saddr
= amt
->local_ip
;
1080 fl4
.flowi4_proto
= IPPROTO_UDP
;
1081 rt
= ip_route_output_key(amt
->net
, &fl4
);
1083 netdev_dbg(amt
->dev
, "no route to %pI4\n", &tunnel
->ip4
);
1088 amtmd
= skb_push(skb
, sizeof(*amtmd
));
1090 amtmd
->reserved
= 0;
1091 amtmd
->type
= AMT_MSG_MULTICAST_DATA
;
1094 skb_set_inner_protocol(skb
, htons(ETH_P_IP
));
1096 skb_set_inner_protocol(skb
, htons(ETH_P_IPV6
));
1097 udp_tunnel_xmit_skb(rt
, sock
->sk
, skb
,
1101 ip4_dst_hoplimit(&rt
->dst
),
1104 tunnel
->source_port
,
1109 static bool amt_send_membership_query(struct amt_dev
*amt
,
1110 struct sk_buff
*skb
,
1111 struct amt_tunnel_list
*tunnel
,
1114 struct amt_header_membership_query
*amtmq
;
1115 struct socket
*sock
;
1120 sock
= rcu_dereference_bh(amt
->sock
);
1124 err
= skb_cow_head(skb
, LL_RESERVED_SPACE(amt
->dev
) + sizeof(*amtmq
) +
1125 sizeof(struct iphdr
) + sizeof(struct udphdr
));
1129 skb_reset_inner_headers(skb
);
1130 memset(&fl4
, 0, sizeof(struct flowi4
));
1131 fl4
.flowi4_oif
= amt
->stream_dev
->ifindex
;
1132 fl4
.daddr
= tunnel
->ip4
;
1133 fl4
.saddr
= amt
->local_ip
;
1134 fl4
.flowi4_tos
= AMT_TOS
;
1135 fl4
.flowi4_proto
= IPPROTO_UDP
;
1136 rt
= ip_route_output_key(amt
->net
, &fl4
);
1138 netdev_dbg(amt
->dev
, "no route to %pI4\n", &tunnel
->ip4
);
1142 amtmq
= skb_push(skb
, sizeof(*amtmq
));
1144 amtmq
->type
= AMT_MSG_MEMBERSHIP_QUERY
;
1145 amtmq
->reserved
= 0;
1148 amtmq
->nonce
= tunnel
->nonce
;
1149 amtmq
->response_mac
= tunnel
->mac
;
1152 skb_set_inner_protocol(skb
, htons(ETH_P_IP
));
1154 skb_set_inner_protocol(skb
, htons(ETH_P_IPV6
));
1155 udp_tunnel_xmit_skb(rt
, sock
->sk
, skb
,
1159 ip4_dst_hoplimit(&rt
->dst
),
1162 tunnel
->source_port
,
1165 amt_update_relay_status(tunnel
, AMT_STATUS_SENT_QUERY
, true);
1169 static netdev_tx_t
amt_dev_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
1171 struct amt_dev
*amt
= netdev_priv(dev
);
1172 struct amt_tunnel_list
*tunnel
;
1173 struct amt_group_node
*gnode
;
1174 union amt_addr group
= {0,};
1175 #if IS_ENABLED(CONFIG_IPV6)
1176 struct ipv6hdr
*ip6h
;
1177 struct mld_msg
*mld
;
1179 bool report
= false;
1188 if (iph
->version
== 4) {
1189 if (!ipv4_is_multicast(iph
->daddr
))
1192 if (!ip_mc_check_igmp(skb
)) {
1195 case IGMPV3_HOST_MEMBERSHIP_REPORT
:
1196 case IGMP_HOST_MEMBERSHIP_REPORT
:
1199 case IGMP_HOST_MEMBERSHIP_QUERY
:
1209 group
.ip4
= iph
->daddr
;
1210 #if IS_ENABLED(CONFIG_IPV6)
1211 } else if (iph
->version
== 6) {
1212 ip6h
= ipv6_hdr(skb
);
1213 if (!ipv6_addr_is_multicast(&ip6h
->daddr
))
1216 if (!ipv6_mc_check_mld(skb
)) {
1217 mld
= (struct mld_msg
*)skb_transport_header(skb
);
1218 switch (mld
->mld_type
) {
1219 case ICMPV6_MGM_REPORT
:
1220 case ICMPV6_MLD2_REPORT
:
1223 case ICMPV6_MGM_QUERY
:
1233 group
.ip6
= ip6h
->daddr
;
1236 dev
->stats
.tx_errors
++;
1240 if (!pskb_may_pull(skb
, sizeof(struct ethhdr
)))
1243 skb_pull(skb
, sizeof(struct ethhdr
));
1245 if (amt
->mode
== AMT_MODE_GATEWAY
) {
1246 /* Gateway only passes IGMP/MLD packets */
1249 if ((!v6
&& !READ_ONCE(amt
->ready4
)) ||
1250 (v6
&& !READ_ONCE(amt
->ready6
)))
1252 if (amt_send_membership_update(amt
, skb
, v6
))
1255 } else if (amt
->mode
== AMT_MODE_RELAY
) {
1257 tunnel
= amt_skb_cb(skb
)->tunnel
;
1263 /* Do not forward unexpected query */
1264 if (amt_send_membership_query(amt
, skb
, tunnel
, v6
))
1271 list_for_each_entry_rcu(tunnel
, &amt
->tunnel_list
, list
) {
1272 hash
= amt_group_hash(tunnel
, &group
);
1273 hlist_for_each_entry_rcu(gnode
, &tunnel
->groups
[hash
],
1276 if (gnode
->group_addr
.ip4
== iph
->daddr
)
1278 #if IS_ENABLED(CONFIG_IPV6)
1280 if (ipv6_addr_equal(&gnode
->group_addr
.ip6
,
1288 amt_send_multicast_data(amt
, skb
, tunnel
, v6
);
1293 return NETDEV_TX_OK
;
1297 dev
->stats
.tx_dropped
++;
1298 return NETDEV_TX_OK
;
1301 static int amt_parse_type(struct sk_buff
*skb
)
1303 struct amt_header
*amth
;
1305 if (!pskb_may_pull(skb
, sizeof(struct udphdr
) +
1306 sizeof(struct amt_header
)))
1309 amth
= (struct amt_header
*)(udp_hdr(skb
) + 1);
1311 if (amth
->version
!= 0)
1314 if (amth
->type
>= __AMT_MSG_MAX
|| !amth
->type
)
1319 static void amt_clear_groups(struct amt_tunnel_list
*tunnel
)
1321 struct amt_dev
*amt
= tunnel
->amt
;
1322 struct amt_group_node
*gnode
;
1323 struct hlist_node
*t
;
1326 spin_lock_bh(&tunnel
->lock
);
1328 for (i
= 0; i
< amt
->hash_buckets
; i
++)
1329 hlist_for_each_entry_safe(gnode
, t
, &tunnel
->groups
[i
], node
)
1330 amt_del_group(amt
, gnode
);
1332 spin_unlock_bh(&tunnel
->lock
);
1335 static void amt_tunnel_expire(struct work_struct
*work
)
1337 struct amt_tunnel_list
*tunnel
= container_of(to_delayed_work(work
),
1338 struct amt_tunnel_list
,
1340 struct amt_dev
*amt
= tunnel
->amt
;
1342 spin_lock_bh(&amt
->lock
);
1344 list_del_rcu(&tunnel
->list
);
1346 amt_clear_groups(tunnel
);
1348 spin_unlock_bh(&amt
->lock
);
1349 kfree_rcu(tunnel
, rcu
);
1352 static void amt_cleanup_srcs(struct amt_dev
*amt
,
1353 struct amt_tunnel_list
*tunnel
,
1354 struct amt_group_node
*gnode
)
1356 struct amt_source_node
*snode
;
1357 struct hlist_node
*t
;
1360 /* Delete old sources */
1361 for (i
= 0; i
< amt
->hash_buckets
; i
++) {
1362 hlist_for_each_entry_safe(snode
, t
, &gnode
->sources
[i
], node
) {
1363 if (snode
->flags
== AMT_SOURCE_OLD
)
1364 amt_destroy_source(snode
);
1368 /* switch from new to old */
1369 for (i
= 0; i
< amt
->hash_buckets
; i
++) {
1370 hlist_for_each_entry_rcu(snode
, &gnode
->sources
[i
], node
) {
1371 snode
->flags
= AMT_SOURCE_OLD
;
1373 netdev_dbg(snode
->gnode
->amt
->dev
,
1374 "Add source as OLD %pI4 from %pI4\n",
1375 &snode
->source_addr
.ip4
,
1376 &gnode
->group_addr
.ip4
);
1377 #if IS_ENABLED(CONFIG_IPV6)
1379 netdev_dbg(snode
->gnode
->amt
->dev
,
1380 "Add source as OLD %pI6 from %pI6\n",
1381 &snode
->source_addr
.ip6
,
1382 &gnode
->group_addr
.ip6
);
1388 static void amt_add_srcs(struct amt_dev
*amt
, struct amt_tunnel_list
*tunnel
,
1389 struct amt_group_node
*gnode
, void *grec
,
1392 struct igmpv3_grec
*igmp_grec
;
1393 struct amt_source_node
*snode
;
1394 #if IS_ENABLED(CONFIG_IPV6)
1395 struct mld2_grec
*mld_grec
;
1397 union amt_addr src
= {0,};
1404 nsrcs
= ntohs(igmp_grec
->grec_nsrcs
);
1406 #if IS_ENABLED(CONFIG_IPV6)
1408 nsrcs
= ntohs(mld_grec
->grec_nsrcs
);
1413 for (i
= 0; i
< nsrcs
; i
++) {
1414 if (tunnel
->nr_sources
>= amt
->max_sources
)
1417 src
.ip4
= igmp_grec
->grec_src
[i
];
1418 #if IS_ENABLED(CONFIG_IPV6)
1420 memcpy(&src
.ip6
, &mld_grec
->grec_src
[i
],
1421 sizeof(struct in6_addr
));
1423 if (amt_lookup_src(tunnel
, gnode
, AMT_FILTER_ALL
, &src
))
1426 snode
= amt_alloc_snode(gnode
, &src
);
1428 hash
= amt_source_hash(tunnel
, &snode
->source_addr
);
1429 hlist_add_head_rcu(&snode
->node
, &gnode
->sources
[hash
]);
1430 tunnel
->nr_sources
++;
1431 gnode
->nr_sources
++;
1434 netdev_dbg(snode
->gnode
->amt
->dev
,
1435 "Add source as NEW %pI4 from %pI4\n",
1436 &snode
->source_addr
.ip4
,
1437 &gnode
->group_addr
.ip4
);
1438 #if IS_ENABLED(CONFIG_IPV6)
1440 netdev_dbg(snode
->gnode
->amt
->dev
,
1441 "Add source as NEW %pI6 from %pI6\n",
1442 &snode
->source_addr
.ip6
,
1443 &gnode
->group_addr
.ip6
);
1449 /* Router State Report Rec'd New Router State
1450 * ------------ ------------ ----------------
1451 * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A)
1453 * -----------+-----------+-----------+
1455 * -----------+-----------+-----------+
1457 * -----------+-----------+-----------+
1459 * -----------+-----------+-----------+
1461 * -----------+-----------+-----------+
1463 * a) Received sources are NONE/NEW
1464 * b) All NONE will be deleted by amt_cleanup_srcs().
1465 * c) All OLD will be deleted by amt_cleanup_srcs().
1466 * d) After delete, NEW source will be switched to OLD.
1468 static void amt_lookup_act_srcs(struct amt_tunnel_list
*tunnel
,
1469 struct amt_group_node
*gnode
,
1472 enum amt_filter filter
,
1476 struct amt_dev
*amt
= tunnel
->amt
;
1477 struct amt_source_node
*snode
;
1478 struct igmpv3_grec
*igmp_grec
;
1479 #if IS_ENABLED(CONFIG_IPV6)
1480 struct mld2_grec
*mld_grec
;
1482 union amt_addr src
= {0,};
1483 struct hlist_node
*t
;
1489 nsrcs
= ntohs(igmp_grec
->grec_nsrcs
);
1491 #if IS_ENABLED(CONFIG_IPV6)
1493 nsrcs
= ntohs(mld_grec
->grec_nsrcs
);
1499 memset(&src
, 0, sizeof(union amt_addr
));
1503 for (i
= 0; i
< nsrcs
; i
++) {
1505 src
.ip4
= igmp_grec
->grec_src
[i
];
1506 #if IS_ENABLED(CONFIG_IPV6)
1508 memcpy(&src
.ip6
, &mld_grec
->grec_src
[i
],
1509 sizeof(struct in6_addr
));
1511 snode
= amt_lookup_src(tunnel
, gnode
, filter
, &src
);
1514 amt_act_src(tunnel
, gnode
, snode
, act
);
1519 for (i
= 0; i
< amt
->hash_buckets
; i
++) {
1520 hlist_for_each_entry_safe(snode
, t
, &gnode
->sources
[i
],
1522 if (amt_status_filter(snode
, filter
))
1523 amt_act_src(tunnel
, gnode
, snode
, act
);
1526 for (i
= 0; i
< nsrcs
; i
++) {
1528 src
.ip4
= igmp_grec
->grec_src
[i
];
1529 #if IS_ENABLED(CONFIG_IPV6)
1531 memcpy(&src
.ip6
, &mld_grec
->grec_src
[i
],
1532 sizeof(struct in6_addr
));
1534 snode
= amt_lookup_src(tunnel
, gnode
, filter
, &src
);
1537 amt_act_src(tunnel
, gnode
, snode
, act
);
1542 for (i
= 0; i
< amt
->hash_buckets
; i
++) {
1543 hlist_for_each_entry_safe(snode
, t
, &gnode
->sources
[i
],
1545 if (!amt_status_filter(snode
, filter
))
1547 for (j
= 0; j
< nsrcs
; j
++) {
1549 src
.ip4
= igmp_grec
->grec_src
[j
];
1550 #if IS_ENABLED(CONFIG_IPV6)
1553 &mld_grec
->grec_src
[j
],
1554 sizeof(struct in6_addr
));
1556 if (amt_addr_equal(&snode
->source_addr
,
1560 amt_act_src(tunnel
, gnode
, snode
, act
);
1566 case AMT_OPS_SUB_REV
:
1568 for (i
= 0; i
< nsrcs
; i
++) {
1570 src
.ip4
= igmp_grec
->grec_src
[i
];
1571 #if IS_ENABLED(CONFIG_IPV6)
1573 memcpy(&src
.ip6
, &mld_grec
->grec_src
[i
],
1574 sizeof(struct in6_addr
));
1576 snode
= amt_lookup_src(tunnel
, gnode
, AMT_FILTER_ALL
,
1579 snode
= amt_lookup_src(tunnel
, gnode
,
1582 amt_act_src(tunnel
, gnode
, snode
, act
);
1587 netdev_dbg(amt
->dev
, "Invalid type\n");
1592 static void amt_mcast_is_in_handler(struct amt_dev
*amt
,
1593 struct amt_tunnel_list
*tunnel
,
1594 struct amt_group_node
*gnode
,
1595 void *grec
, void *zero_grec
, bool v6
)
1597 if (gnode
->filter_mode
== MCAST_INCLUDE
) {
1598 /* Router State Report Rec'd New Router State Actions
1599 * ------------ ------------ ---------------- -------
1600 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
1602 /* Update IS_IN (B) as FWD/NEW */
1603 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_UNI
,
1604 AMT_FILTER_NONE_NEW
,
1605 AMT_ACT_STATUS_FWD_NEW
,
1607 /* Update INCLUDE (A) as NEW */
1608 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_UNI
,
1610 AMT_ACT_STATUS_FWD_NEW
,
1613 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_INT
,
1619 * ------------ ------------ ---------------- -------
1620 * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
1622 /* Update (A) in (X, Y) as NONE/NEW */
1623 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_INT
,
1625 AMT_ACT_STATUS_NONE_NEW
,
1627 /* Update FWD/OLD as FWD/NEW */
1628 amt_lookup_act_srcs(tunnel
, gnode
, zero_grec
, AMT_OPS_UNI
,
1630 AMT_ACT_STATUS_FWD_NEW
,
1632 /* Update IS_IN (A) as FWD/NEW */
1633 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_INT
,
1634 AMT_FILTER_NONE_NEW
,
1635 AMT_ACT_STATUS_FWD_NEW
,
1637 /* Update EXCLUDE (, Y-A) as D_FWD_NEW */
1638 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_SUB
,
1640 AMT_ACT_STATUS_D_FWD_NEW
,
1645 static void amt_mcast_is_ex_handler(struct amt_dev
*amt
,
1646 struct amt_tunnel_list
*tunnel
,
1647 struct amt_group_node
*gnode
,
1648 void *grec
, void *zero_grec
, bool v6
)
1650 if (gnode
->filter_mode
== MCAST_INCLUDE
) {
1651 /* Router State Report Rec'd New Router State Actions
1652 * ------------ ------------ ---------------- -------
1653 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1657 /* EXCLUDE(A*B, ) */
1658 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_INT
,
1660 AMT_ACT_STATUS_FWD_NEW
,
1662 /* EXCLUDE(, B-A) */
1663 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_SUB_REV
,
1665 AMT_ACT_STATUS_D_FWD_NEW
,
1668 amt_lookup_act_srcs(tunnel
, gnode
, zero_grec
, AMT_OPS_UNI
,
1669 AMT_FILTER_D_FWD_NEW
,
1672 /* Group Timer=GMI */
1673 if (!mod_delayed_work(amt_wq
, &gnode
->group_timer
,
1674 msecs_to_jiffies(amt_gmi(amt
))))
1676 gnode
->filter_mode
= MCAST_EXCLUDE
;
1677 /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
1679 /* Router State Report Rec'd New Router State Actions
1680 * ------------ ------------ ---------------- -------
1681 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
1686 /* EXCLUDE (A-Y, ) */
1687 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_SUB_REV
,
1689 AMT_ACT_STATUS_FWD_NEW
,
1691 /* EXCLUDE (, Y*A ) */
1692 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_INT
,
1694 AMT_ACT_STATUS_D_FWD_NEW
,
1697 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_SUB_REV
,
1698 AMT_FILTER_BOTH_NEW
,
1701 /* Group Timer=GMI */
1702 if (!mod_delayed_work(amt_wq
, &gnode
->group_timer
,
1703 msecs_to_jiffies(amt_gmi(amt
))))
1705 /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
1709 static void amt_mcast_to_in_handler(struct amt_dev
*amt
,
1710 struct amt_tunnel_list
*tunnel
,
1711 struct amt_group_node
*gnode
,
1712 void *grec
, void *zero_grec
, bool v6
)
1714 if (gnode
->filter_mode
== MCAST_INCLUDE
) {
1715 /* Router State Report Rec'd New Router State Actions
1716 * ------------ ------------ ---------------- -------
1717 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
1720 /* Update TO_IN (B) sources as FWD/NEW */
1721 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_UNI
,
1722 AMT_FILTER_NONE_NEW
,
1723 AMT_ACT_STATUS_FWD_NEW
,
1725 /* Update INCLUDE (A) sources as NEW */
1726 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_UNI
,
1728 AMT_ACT_STATUS_FWD_NEW
,
1731 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_INT
,
1736 /* Router State Report Rec'd New Router State Actions
1737 * ------------ ------------ ---------------- -------
1738 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
1742 /* Update TO_IN (A) sources as FWD/NEW */
1743 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_UNI
,
1744 AMT_FILTER_NONE_NEW
,
1745 AMT_ACT_STATUS_FWD_NEW
,
1747 /* Update EXCLUDE(X,) sources as FWD/NEW */
1748 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_UNI
,
1750 AMT_ACT_STATUS_FWD_NEW
,
1753 * (A) are already switched to FWD_NEW.
1754 * So, D_FWD/OLD -> D_FWD/NEW is okay.
1756 amt_lookup_act_srcs(tunnel
, gnode
, zero_grec
, AMT_OPS_UNI
,
1758 AMT_ACT_STATUS_D_FWD_NEW
,
1761 * Only FWD_NEW will have (A) sources.
1763 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_INT
,
1770 static void amt_mcast_to_ex_handler(struct amt_dev
*amt
,
1771 struct amt_tunnel_list
*tunnel
,
1772 struct amt_group_node
*gnode
,
1773 void *grec
, void *zero_grec
, bool v6
)
1775 if (gnode
->filter_mode
== MCAST_INCLUDE
) {
1776 /* Router State Report Rec'd New Router State Actions
1777 * ------------ ------------ ---------------- -------
1778 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
1783 /* EXCLUDE (A*B, ) */
1784 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_INT
,
1786 AMT_ACT_STATUS_FWD_NEW
,
1788 /* EXCLUDE (, B-A) */
1789 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_SUB_REV
,
1791 AMT_ACT_STATUS_D_FWD_NEW
,
1794 amt_lookup_act_srcs(tunnel
, gnode
, zero_grec
, AMT_OPS_UNI
,
1795 AMT_FILTER_D_FWD_NEW
,
1798 /* Group Timer=GMI */
1799 if (!mod_delayed_work(amt_wq
, &gnode
->group_timer
,
1800 msecs_to_jiffies(amt_gmi(amt
))))
1802 gnode
->filter_mode
= MCAST_EXCLUDE
;
1803 /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
1805 /* Router State Report Rec'd New Router State Actions
1806 * ------------ ------------ ---------------- -------
1807 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
1813 /* Update (A-X-Y) as NONE/OLD */
1814 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_SUB_REV
,
1818 /* EXCLUDE (A-Y, ) */
1819 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_SUB_REV
,
1821 AMT_ACT_STATUS_FWD_NEW
,
1823 /* EXCLUDE (, Y*A) */
1824 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_INT
,
1826 AMT_ACT_STATUS_D_FWD_NEW
,
1828 /* Group Timer=GMI */
1829 if (!mod_delayed_work(amt_wq
, &gnode
->group_timer
,
1830 msecs_to_jiffies(amt_gmi(amt
))))
1832 /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
1836 static void amt_mcast_allow_handler(struct amt_dev
*amt
,
1837 struct amt_tunnel_list
*tunnel
,
1838 struct amt_group_node
*gnode
,
1839 void *grec
, void *zero_grec
, bool v6
)
1841 if (gnode
->filter_mode
== MCAST_INCLUDE
) {
1842 /* Router State Report Rec'd New Router State Actions
1843 * ------------ ------------ ---------------- -------
1844 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
1847 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_UNI
,
1849 AMT_ACT_STATUS_FWD_NEW
,
1852 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_INT
,
1857 /* Router State Report Rec'd New Router State Actions
1858 * ------------ ------------ ---------------- -------
1859 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
1861 /* EXCLUDE (X+A, ) */
1862 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_UNI
,
1864 AMT_ACT_STATUS_FWD_NEW
,
1866 /* EXCLUDE (, Y-A) */
1867 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_SUB
,
1869 AMT_ACT_STATUS_D_FWD_NEW
,
1872 * All (A) source are now FWD/NEW status.
1874 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_INT
,
1881 static void amt_mcast_block_handler(struct amt_dev
*amt
,
1882 struct amt_tunnel_list
*tunnel
,
1883 struct amt_group_node
*gnode
,
1884 void *grec
, void *zero_grec
, bool v6
)
1886 if (gnode
->filter_mode
== MCAST_INCLUDE
) {
1887 /* Router State Report Rec'd New Router State Actions
1888 * ------------ ------------ ---------------- -------
1889 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
1892 amt_lookup_act_srcs(tunnel
, gnode
, zero_grec
, AMT_OPS_UNI
,
1894 AMT_ACT_STATUS_FWD_NEW
,
1897 /* Router State Report Rec'd New Router State Actions
1898 * ------------ ------------ ---------------- -------
1899 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
1902 /* (A-X-Y)=Group Timer */
1903 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_SUB_REV
,
1908 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_UNI
,
1910 AMT_ACT_STATUS_FWD_NEW
,
1912 /* EXCLUDE (X+(A-Y) */
1913 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_SUB_REV
,
1915 AMT_ACT_STATUS_FWD_NEW
,
1918 amt_lookup_act_srcs(tunnel
, gnode
, grec
, AMT_OPS_UNI
,
1920 AMT_ACT_STATUS_D_FWD_NEW
,
1926 * 7.3.2. In the Presence of Older Version Group Members
1928 * When Group Compatibility Mode is IGMPv2, a router internally
1929 * translates the following IGMPv2 messages for that group to their
1930 * IGMPv3 equivalents:
1932 * IGMPv2 Message IGMPv3 Equivalent
1933 * -------------- -----------------
1934 * Report IS_EX( {} )
1937 static void amt_igmpv2_report_handler(struct amt_dev
*amt
, struct sk_buff
*skb
,
1938 struct amt_tunnel_list
*tunnel
)
1940 struct igmphdr
*ih
= igmp_hdr(skb
);
1941 struct iphdr
*iph
= ip_hdr(skb
);
1942 struct amt_group_node
*gnode
;
1943 union amt_addr group
, host
;
1945 memset(&group
, 0, sizeof(union amt_addr
));
1946 group
.ip4
= ih
->group
;
1947 memset(&host
, 0, sizeof(union amt_addr
));
1948 host
.ip4
= iph
->saddr
;
1950 gnode
= amt_lookup_group(tunnel
, &group
, &host
, false);
1952 gnode
= amt_add_group(amt
, tunnel
, &group
, &host
, false);
1953 if (!IS_ERR(gnode
)) {
1954 gnode
->filter_mode
= MCAST_EXCLUDE
;
1955 if (!mod_delayed_work(amt_wq
, &gnode
->group_timer
,
1956 msecs_to_jiffies(amt_gmi(amt
))))
1963 * 7.3.2. In the Presence of Older Version Group Members
1965 * When Group Compatibility Mode is IGMPv2, a router internally
1966 * translates the following IGMPv2 messages for that group to their
1967 * IGMPv3 equivalents:
1969 * IGMPv2 Message IGMPv3 Equivalent
1970 * -------------- -----------------
1971 * Report IS_EX( {} )
1974 static void amt_igmpv2_leave_handler(struct amt_dev
*amt
, struct sk_buff
*skb
,
1975 struct amt_tunnel_list
*tunnel
)
1977 struct igmphdr
*ih
= igmp_hdr(skb
);
1978 struct iphdr
*iph
= ip_hdr(skb
);
1979 struct amt_group_node
*gnode
;
1980 union amt_addr group
, host
;
1982 memset(&group
, 0, sizeof(union amt_addr
));
1983 group
.ip4
= ih
->group
;
1984 memset(&host
, 0, sizeof(union amt_addr
));
1985 host
.ip4
= iph
->saddr
;
1987 gnode
= amt_lookup_group(tunnel
, &group
, &host
, false);
1989 amt_del_group(amt
, gnode
);
1992 static void amt_igmpv3_report_handler(struct amt_dev
*amt
, struct sk_buff
*skb
,
1993 struct amt_tunnel_list
*tunnel
)
1995 struct igmpv3_report
*ihrv3
= igmpv3_report_hdr(skb
);
1996 int len
= skb_transport_offset(skb
) + sizeof(*ihrv3
);
1997 void *zero_grec
= (void *)&igmpv3_zero_grec
;
1998 struct iphdr
*iph
= ip_hdr(skb
);
1999 struct amt_group_node
*gnode
;
2000 union amt_addr group
, host
;
2001 struct igmpv3_grec
*grec
;
2005 for (i
= 0; i
< ntohs(ihrv3
->ngrec
); i
++) {
2006 len
+= sizeof(*grec
);
2007 if (!ip_mc_may_pull(skb
, len
))
2010 grec
= (void *)(skb
->data
+ len
- sizeof(*grec
));
2011 nsrcs
= ntohs(grec
->grec_nsrcs
);
2013 len
+= nsrcs
* sizeof(__be32
);
2014 if (!ip_mc_may_pull(skb
, len
))
2017 memset(&group
, 0, sizeof(union amt_addr
));
2018 group
.ip4
= grec
->grec_mca
;
2019 memset(&host
, 0, sizeof(union amt_addr
));
2020 host
.ip4
= iph
->saddr
;
2021 gnode
= amt_lookup_group(tunnel
, &group
, &host
, false);
2023 gnode
= amt_add_group(amt
, tunnel
, &group
, &host
,
2029 amt_add_srcs(amt
, tunnel
, gnode
, grec
, false);
2030 switch (grec
->grec_type
) {
2031 case IGMPV3_MODE_IS_INCLUDE
:
2032 amt_mcast_is_in_handler(amt
, tunnel
, gnode
, grec
,
2035 case IGMPV3_MODE_IS_EXCLUDE
:
2036 amt_mcast_is_ex_handler(amt
, tunnel
, gnode
, grec
,
2039 case IGMPV3_CHANGE_TO_INCLUDE
:
2040 amt_mcast_to_in_handler(amt
, tunnel
, gnode
, grec
,
2043 case IGMPV3_CHANGE_TO_EXCLUDE
:
2044 amt_mcast_to_ex_handler(amt
, tunnel
, gnode
, grec
,
2047 case IGMPV3_ALLOW_NEW_SOURCES
:
2048 amt_mcast_allow_handler(amt
, tunnel
, gnode
, grec
,
2051 case IGMPV3_BLOCK_OLD_SOURCES
:
2052 amt_mcast_block_handler(amt
, tunnel
, gnode
, grec
,
2058 amt_cleanup_srcs(amt
, tunnel
, gnode
);
2062 /* caller held tunnel->lock */
2063 static void amt_igmp_report_handler(struct amt_dev
*amt
, struct sk_buff
*skb
,
2064 struct amt_tunnel_list
*tunnel
)
2066 struct igmphdr
*ih
= igmp_hdr(skb
);
2069 case IGMPV3_HOST_MEMBERSHIP_REPORT
:
2070 amt_igmpv3_report_handler(amt
, skb
, tunnel
);
2072 case IGMPV2_HOST_MEMBERSHIP_REPORT
:
2073 amt_igmpv2_report_handler(amt
, skb
, tunnel
);
2075 case IGMP_HOST_LEAVE_MESSAGE
:
2076 amt_igmpv2_leave_handler(amt
, skb
, tunnel
);
2083 #if IS_ENABLED(CONFIG_IPV6)
2085 * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
2087 * When Multicast Address Compatibility Mode is MLDv2, a router acts
2088 * using the MLDv2 protocol for that multicast address. When Multicast
2089 * Address Compatibility Mode is MLDv1, a router internally translates
2090 * the following MLDv1 messages for that multicast address to their
2091 * MLDv2 equivalents:
2093 * MLDv1 Message MLDv2 Equivalent
2094 * -------------- -----------------
2095 * Report IS_EX( {} )
2098 static void amt_mldv1_report_handler(struct amt_dev
*amt
, struct sk_buff
*skb
,
2099 struct amt_tunnel_list
*tunnel
)
2101 struct mld_msg
*mld
= (struct mld_msg
*)icmp6_hdr(skb
);
2102 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
2103 struct amt_group_node
*gnode
;
2104 union amt_addr group
, host
;
2106 memcpy(&group
.ip6
, &mld
->mld_mca
, sizeof(struct in6_addr
));
2107 memcpy(&host
.ip6
, &ip6h
->saddr
, sizeof(struct in6_addr
));
2109 gnode
= amt_lookup_group(tunnel
, &group
, &host
, true);
2111 gnode
= amt_add_group(amt
, tunnel
, &group
, &host
, true);
2112 if (!IS_ERR(gnode
)) {
2113 gnode
->filter_mode
= MCAST_EXCLUDE
;
2114 if (!mod_delayed_work(amt_wq
, &gnode
->group_timer
,
2115 msecs_to_jiffies(amt_gmi(amt
))))
2122 * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
2124 * When Multicast Address Compatibility Mode is MLDv2, a router acts
2125 * using the MLDv2 protocol for that multicast address. When Multicast
2126 * Address Compatibility Mode is MLDv1, a router internally translates
2127 * the following MLDv1 messages for that multicast address to their
2128 * MLDv2 equivalents:
2130 * MLDv1 Message MLDv2 Equivalent
2131 * -------------- -----------------
2132 * Report IS_EX( {} )
2135 static void amt_mldv1_leave_handler(struct amt_dev
*amt
, struct sk_buff
*skb
,
2136 struct amt_tunnel_list
*tunnel
)
2138 struct mld_msg
*mld
= (struct mld_msg
*)icmp6_hdr(skb
);
2139 struct iphdr
*iph
= ip_hdr(skb
);
2140 struct amt_group_node
*gnode
;
2141 union amt_addr group
, host
;
2143 memcpy(&group
.ip6
, &mld
->mld_mca
, sizeof(struct in6_addr
));
2144 memset(&host
, 0, sizeof(union amt_addr
));
2145 host
.ip4
= iph
->saddr
;
2147 gnode
= amt_lookup_group(tunnel
, &group
, &host
, true);
2149 amt_del_group(amt
, gnode
);
2154 static void amt_mldv2_report_handler(struct amt_dev
*amt
, struct sk_buff
*skb
,
2155 struct amt_tunnel_list
*tunnel
)
2157 struct mld2_report
*mld2r
= (struct mld2_report
*)icmp6_hdr(skb
);
2158 int len
= skb_transport_offset(skb
) + sizeof(*mld2r
);
2159 void *zero_grec
= (void *)&mldv2_zero_grec
;
2160 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
2161 struct amt_group_node
*gnode
;
2162 union amt_addr group
, host
;
2163 struct mld2_grec
*grec
;
2167 for (i
= 0; i
< ntohs(mld2r
->mld2r_ngrec
); i
++) {
2168 len
+= sizeof(*grec
);
2169 if (!ipv6_mc_may_pull(skb
, len
))
2172 grec
= (void *)(skb
->data
+ len
- sizeof(*grec
));
2173 nsrcs
= ntohs(grec
->grec_nsrcs
);
2175 len
+= nsrcs
* sizeof(struct in6_addr
);
2176 if (!ipv6_mc_may_pull(skb
, len
))
2179 memset(&group
, 0, sizeof(union amt_addr
));
2180 group
.ip6
= grec
->grec_mca
;
2181 memset(&host
, 0, sizeof(union amt_addr
));
2182 host
.ip6
= ip6h
->saddr
;
2183 gnode
= amt_lookup_group(tunnel
, &group
, &host
, true);
2185 gnode
= amt_add_group(amt
, tunnel
, &group
, &host
,
2191 amt_add_srcs(amt
, tunnel
, gnode
, grec
, true);
2192 switch (grec
->grec_type
) {
2193 case MLD2_MODE_IS_INCLUDE
:
2194 amt_mcast_is_in_handler(amt
, tunnel
, gnode
, grec
,
2197 case MLD2_MODE_IS_EXCLUDE
:
2198 amt_mcast_is_ex_handler(amt
, tunnel
, gnode
, grec
,
2201 case MLD2_CHANGE_TO_INCLUDE
:
2202 amt_mcast_to_in_handler(amt
, tunnel
, gnode
, grec
,
2205 case MLD2_CHANGE_TO_EXCLUDE
:
2206 amt_mcast_to_ex_handler(amt
, tunnel
, gnode
, grec
,
2209 case MLD2_ALLOW_NEW_SOURCES
:
2210 amt_mcast_allow_handler(amt
, tunnel
, gnode
, grec
,
2213 case MLD2_BLOCK_OLD_SOURCES
:
2214 amt_mcast_block_handler(amt
, tunnel
, gnode
, grec
,
2220 amt_cleanup_srcs(amt
, tunnel
, gnode
);
2224 /* caller held tunnel->lock */
2225 static void amt_mld_report_handler(struct amt_dev
*amt
, struct sk_buff
*skb
,
2226 struct amt_tunnel_list
*tunnel
)
2228 struct mld_msg
*mld
= (struct mld_msg
*)icmp6_hdr(skb
);
2230 switch (mld
->mld_type
) {
2231 case ICMPV6_MGM_REPORT
:
2232 amt_mldv1_report_handler(amt
, skb
, tunnel
);
2234 case ICMPV6_MLD2_REPORT
:
2235 amt_mldv2_report_handler(amt
, skb
, tunnel
);
2237 case ICMPV6_MGM_REDUCTION
:
2238 amt_mldv1_leave_handler(amt
, skb
, tunnel
);
2246 static bool amt_advertisement_handler(struct amt_dev
*amt
, struct sk_buff
*skb
)
2248 struct amt_header_advertisement
*amta
;
2251 hdr_size
= sizeof(*amta
) + sizeof(struct udphdr
);
2252 if (!pskb_may_pull(skb
, hdr_size
))
2255 amta
= (struct amt_header_advertisement
*)(udp_hdr(skb
) + 1);
2259 if (amta
->reserved
|| amta
->version
)
2262 if (ipv4_is_loopback(amta
->ip4
) || ipv4_is_multicast(amta
->ip4
) ||
2263 ipv4_is_zeronet(amta
->ip4
))
2266 if (amt
->status
!= AMT_STATUS_SENT_DISCOVERY
||
2267 amt
->nonce
!= amta
->nonce
)
2270 amt
->remote_ip
= amta
->ip4
;
2271 netdev_dbg(amt
->dev
, "advertised remote ip = %pI4\n", &amt
->remote_ip
);
2272 mod_delayed_work(amt_wq
, &amt
->req_wq
, 0);
2274 amt_update_gw_status(amt
, AMT_STATUS_RECEIVED_ADVERTISEMENT
, true);
2278 static bool amt_multicast_data_handler(struct amt_dev
*amt
, struct sk_buff
*skb
)
2280 struct amt_header_mcast_data
*amtmd
;
2281 int hdr_size
, len
, err
;
2285 if (READ_ONCE(amt
->status
) != AMT_STATUS_SENT_UPDATE
)
2288 hdr_size
= sizeof(*amtmd
) + sizeof(struct udphdr
);
2289 if (!pskb_may_pull(skb
, hdr_size
))
2292 amtmd
= (struct amt_header_mcast_data
*)(udp_hdr(skb
) + 1);
2293 if (amtmd
->reserved
|| amtmd
->version
)
2296 if (iptunnel_pull_header(skb
, hdr_size
, htons(ETH_P_IP
), false))
2299 skb_reset_network_header(skb
);
2300 skb_push(skb
, sizeof(*eth
));
2301 skb_reset_mac_header(skb
);
2302 skb_pull(skb
, sizeof(*eth
));
2305 if (!pskb_may_pull(skb
, sizeof(*iph
)))
2309 if (iph
->version
== 4) {
2310 if (!ipv4_is_multicast(iph
->daddr
))
2312 skb
->protocol
= htons(ETH_P_IP
);
2313 eth
->h_proto
= htons(ETH_P_IP
);
2314 ip_eth_mc_map(iph
->daddr
, eth
->h_dest
);
2315 #if IS_ENABLED(CONFIG_IPV6)
2316 } else if (iph
->version
== 6) {
2317 struct ipv6hdr
*ip6h
;
2319 if (!pskb_may_pull(skb
, sizeof(*ip6h
)))
2322 ip6h
= ipv6_hdr(skb
);
2323 if (!ipv6_addr_is_multicast(&ip6h
->daddr
))
2325 skb
->protocol
= htons(ETH_P_IPV6
);
2326 eth
->h_proto
= htons(ETH_P_IPV6
);
2327 ipv6_eth_mc_map(&ip6h
->daddr
, eth
->h_dest
);
2333 skb
->pkt_type
= PACKET_MULTICAST
;
2334 skb
->ip_summed
= CHECKSUM_NONE
;
2336 err
= gro_cells_receive(&amt
->gro_cells
, skb
);
2337 if (likely(err
== NET_RX_SUCCESS
))
2338 dev_sw_netstats_rx_add(amt
->dev
, len
);
2340 amt
->dev
->stats
.rx_dropped
++;
2345 static bool amt_membership_query_handler(struct amt_dev
*amt
,
2346 struct sk_buff
*skb
)
2348 struct amt_header_membership_query
*amtmq
;
2349 struct igmpv3_query
*ihv3
;
2350 struct ethhdr
*eth
, *oeth
;
2354 hdr_size
= sizeof(*amtmq
) + sizeof(struct udphdr
);
2355 if (!pskb_may_pull(skb
, hdr_size
))
2358 amtmq
= (struct amt_header_membership_query
*)(udp_hdr(skb
) + 1);
2359 if (amtmq
->reserved
|| amtmq
->version
)
2362 if (amtmq
->nonce
!= amt
->nonce
)
2365 hdr_size
-= sizeof(*eth
);
2366 if (iptunnel_pull_header(skb
, hdr_size
, htons(ETH_P_TEB
), false))
2369 oeth
= eth_hdr(skb
);
2370 skb_reset_mac_header(skb
);
2371 skb_pull(skb
, sizeof(*eth
));
2372 skb_reset_network_header(skb
);
2374 if (!pskb_may_pull(skb
, sizeof(*iph
)))
2378 if (iph
->version
== 4) {
2379 if (READ_ONCE(amt
->ready4
))
2382 if (!pskb_may_pull(skb
, sizeof(*iph
) + AMT_IPHDR_OPTS
+
2386 if (!ipv4_is_multicast(iph
->daddr
))
2389 ihv3
= skb_pull(skb
, sizeof(*iph
) + AMT_IPHDR_OPTS
);
2390 skb_reset_transport_header(skb
);
2391 skb_push(skb
, sizeof(*iph
) + AMT_IPHDR_OPTS
);
2392 WRITE_ONCE(amt
->ready4
, true);
2393 amt
->mac
= amtmq
->response_mac
;
2395 amt
->qi
= ihv3
->qqic
;
2396 skb
->protocol
= htons(ETH_P_IP
);
2397 eth
->h_proto
= htons(ETH_P_IP
);
2398 ip_eth_mc_map(iph
->daddr
, eth
->h_dest
);
2399 #if IS_ENABLED(CONFIG_IPV6)
2400 } else if (iph
->version
== 6) {
2401 struct mld2_query
*mld2q
;
2402 struct ipv6hdr
*ip6h
;
2404 if (READ_ONCE(amt
->ready6
))
2407 if (!pskb_may_pull(skb
, sizeof(*ip6h
) + AMT_IP6HDR_OPTS
+
2411 ip6h
= ipv6_hdr(skb
);
2412 if (!ipv6_addr_is_multicast(&ip6h
->daddr
))
2415 mld2q
= skb_pull(skb
, sizeof(*ip6h
) + AMT_IP6HDR_OPTS
);
2416 skb_reset_transport_header(skb
);
2417 skb_push(skb
, sizeof(*ip6h
) + AMT_IP6HDR_OPTS
);
2418 WRITE_ONCE(amt
->ready6
, true);
2419 amt
->mac
= amtmq
->response_mac
;
2421 amt
->qi
= mld2q
->mld2q_qqic
;
2422 skb
->protocol
= htons(ETH_P_IPV6
);
2423 eth
->h_proto
= htons(ETH_P_IPV6
);
2424 ipv6_eth_mc_map(&ip6h
->daddr
, eth
->h_dest
);
2430 ether_addr_copy(eth
->h_source
, oeth
->h_source
);
2431 skb
->pkt_type
= PACKET_MULTICAST
;
2432 skb
->ip_summed
= CHECKSUM_NONE
;
2435 if (__netif_rx(skb
) == NET_RX_SUCCESS
) {
2436 amt_update_gw_status(amt
, AMT_STATUS_RECEIVED_QUERY
, true);
2437 dev_sw_netstats_rx_add(amt
->dev
, len
);
2439 amt
->dev
->stats
.rx_dropped
++;
2446 static bool amt_update_handler(struct amt_dev
*amt
, struct sk_buff
*skb
)
2448 struct amt_header_membership_update
*amtmu
;
2449 struct amt_tunnel_list
*tunnel
;
2456 hdr_size
= sizeof(*amtmu
) + sizeof(struct udphdr
);
2457 if (!pskb_may_pull(skb
, hdr_size
))
2460 amtmu
= (struct amt_header_membership_update
*)(udp_hdr(skb
) + 1);
2461 if (amtmu
->reserved
|| amtmu
->version
)
2464 if (iptunnel_pull_header(skb
, hdr_size
, skb
->protocol
, false))
2467 skb_reset_network_header(skb
);
2469 list_for_each_entry_rcu(tunnel
, &amt
->tunnel_list
, list
) {
2470 if (tunnel
->ip4
== iph
->saddr
) {
2471 if ((amtmu
->nonce
== tunnel
->nonce
&&
2472 amtmu
->response_mac
== tunnel
->mac
)) {
2473 mod_delayed_work(amt_wq
, &tunnel
->gc_wq
,
2474 msecs_to_jiffies(amt_gmi(amt
))
2478 netdev_dbg(amt
->dev
, "Invalid MAC\n");
2487 if (!pskb_may_pull(skb
, sizeof(*iph
)))
2491 if (iph
->version
== 4) {
2492 if (ip_mc_check_igmp(skb
)) {
2493 netdev_dbg(amt
->dev
, "Invalid IGMP\n");
2497 spin_lock_bh(&tunnel
->lock
);
2498 amt_igmp_report_handler(amt
, skb
, tunnel
);
2499 spin_unlock_bh(&tunnel
->lock
);
2501 skb_push(skb
, sizeof(struct ethhdr
));
2502 skb_reset_mac_header(skb
);
2504 skb
->protocol
= htons(ETH_P_IP
);
2505 eth
->h_proto
= htons(ETH_P_IP
);
2506 ip_eth_mc_map(iph
->daddr
, eth
->h_dest
);
2507 #if IS_ENABLED(CONFIG_IPV6)
2508 } else if (iph
->version
== 6) {
2509 struct ipv6hdr
*ip6h
= ipv6_hdr(skb
);
2511 if (ipv6_mc_check_mld(skb
)) {
2512 netdev_dbg(amt
->dev
, "Invalid MLD\n");
2516 spin_lock_bh(&tunnel
->lock
);
2517 amt_mld_report_handler(amt
, skb
, tunnel
);
2518 spin_unlock_bh(&tunnel
->lock
);
2520 skb_push(skb
, sizeof(struct ethhdr
));
2521 skb_reset_mac_header(skb
);
2523 skb
->protocol
= htons(ETH_P_IPV6
);
2524 eth
->h_proto
= htons(ETH_P_IPV6
);
2525 ipv6_eth_mc_map(&ip6h
->daddr
, eth
->h_dest
);
2528 netdev_dbg(amt
->dev
, "Unsupported Protocol\n");
2532 skb_pull(skb
, sizeof(struct ethhdr
));
2533 skb
->pkt_type
= PACKET_MULTICAST
;
2534 skb
->ip_summed
= CHECKSUM_NONE
;
2536 if (__netif_rx(skb
) == NET_RX_SUCCESS
) {
2537 amt_update_relay_status(tunnel
, AMT_STATUS_RECEIVED_UPDATE
,
2539 dev_sw_netstats_rx_add(amt
->dev
, len
);
2541 amt
->dev
->stats
.rx_dropped
++;
2547 static void amt_send_advertisement(struct amt_dev
*amt
, __be32 nonce
,
2548 __be32 daddr
, __be16 dport
)
2550 struct amt_header_advertisement
*amta
;
2551 int hlen
, tlen
, offset
;
2552 struct socket
*sock
;
2553 struct udphdr
*udph
;
2554 struct sk_buff
*skb
;
2562 sock
= rcu_dereference(amt
->sock
);
2566 if (!netif_running(amt
->stream_dev
) || !netif_running(amt
->dev
))
2569 rt
= ip_route_output_ports(amt
->net
, &fl4
, sock
->sk
,
2570 daddr
, amt
->local_ip
,
2571 dport
, amt
->relay_port
,
2573 amt
->stream_dev
->ifindex
);
2575 amt
->dev
->stats
.tx_errors
++;
2579 hlen
= LL_RESERVED_SPACE(amt
->dev
);
2580 tlen
= amt
->dev
->needed_tailroom
;
2581 len
= hlen
+ tlen
+ sizeof(*iph
) + sizeof(*udph
) + sizeof(*amta
);
2582 skb
= netdev_alloc_skb_ip_align(amt
->dev
, len
);
2585 amt
->dev
->stats
.tx_errors
++;
2589 skb
->priority
= TC_PRIO_CONTROL
;
2590 skb_dst_set(skb
, &rt
->dst
);
2592 len
= sizeof(*iph
) + sizeof(*udph
) + sizeof(*amta
);
2593 skb_reset_network_header(skb
);
2595 amta
= skb_pull(skb
, sizeof(*iph
) + sizeof(*udph
));
2597 amta
->type
= AMT_MSG_ADVERTISEMENT
;
2599 amta
->nonce
= nonce
;
2600 amta
->ip4
= amt
->local_ip
;
2601 skb_push(skb
, sizeof(*udph
));
2602 skb_reset_transport_header(skb
);
2603 udph
= udp_hdr(skb
);
2604 udph
->source
= amt
->relay_port
;
2606 udph
->len
= htons(sizeof(*amta
) + sizeof(*udph
));
2608 offset
= skb_transport_offset(skb
);
2609 skb
->csum
= skb_checksum(skb
, offset
, skb
->len
- offset
, 0);
2610 udph
->check
= csum_tcpudp_magic(amt
->local_ip
, daddr
,
2611 sizeof(*udph
) + sizeof(*amta
),
2612 IPPROTO_UDP
, skb
->csum
);
2614 skb_push(skb
, sizeof(*iph
));
2617 iph
->ihl
= (sizeof(struct iphdr
)) >> 2;
2620 iph
->ttl
= ip4_dst_hoplimit(&rt
->dst
);
2622 iph
->saddr
= amt
->local_ip
;
2623 iph
->protocol
= IPPROTO_UDP
;
2624 iph
->tot_len
= htons(len
);
2626 skb
->ip_summed
= CHECKSUM_NONE
;
2627 ip_select_ident(amt
->net
, skb
, NULL
);
2629 err
= ip_local_out(amt
->net
, sock
->sk
, skb
);
2630 if (unlikely(net_xmit_eval(err
)))
2631 amt
->dev
->stats
.tx_errors
++;
2637 static bool amt_discovery_handler(struct amt_dev
*amt
, struct sk_buff
*skb
)
2639 struct amt_header_discovery
*amtd
;
2640 struct udphdr
*udph
;
2643 if (!pskb_may_pull(skb
, sizeof(*udph
) + sizeof(*amtd
)))
2647 udph
= udp_hdr(skb
);
2648 amtd
= (struct amt_header_discovery
*)(udp_hdr(skb
) + 1);
2650 if (amtd
->reserved
|| amtd
->version
)
2653 amt_send_advertisement(amt
, amtd
->nonce
, iph
->saddr
, udph
->source
);
2658 static bool amt_request_handler(struct amt_dev
*amt
, struct sk_buff
*skb
)
2660 struct amt_header_request
*amtrh
;
2661 struct amt_tunnel_list
*tunnel
;
2662 unsigned long long key
;
2663 struct udphdr
*udph
;
2668 if (!pskb_may_pull(skb
, sizeof(*udph
) + sizeof(*amtrh
)))
2672 udph
= udp_hdr(skb
);
2673 amtrh
= (struct amt_header_request
*)(udp_hdr(skb
) + 1);
2675 if (amtrh
->reserved1
|| amtrh
->reserved2
|| amtrh
->version
)
2678 list_for_each_entry_rcu(tunnel
, &amt
->tunnel_list
, list
)
2679 if (tunnel
->ip4
== iph
->saddr
)
2682 spin_lock_bh(&amt
->lock
);
2683 if (amt
->nr_tunnels
>= amt
->max_tunnels
) {
2684 spin_unlock_bh(&amt
->lock
);
2685 icmp_ndo_send(skb
, ICMP_DEST_UNREACH
, ICMP_HOST_UNREACH
, 0);
2689 tunnel
= kzalloc(sizeof(*tunnel
) +
2690 (sizeof(struct hlist_head
) * amt
->hash_buckets
),
2693 spin_unlock_bh(&amt
->lock
);
2697 tunnel
->source_port
= udph
->source
;
2698 tunnel
->ip4
= iph
->saddr
;
2700 memcpy(&key
, &tunnel
->key
, sizeof(unsigned long long));
2702 spin_lock_init(&tunnel
->lock
);
2703 for (i
= 0; i
< amt
->hash_buckets
; i
++)
2704 INIT_HLIST_HEAD(&tunnel
->groups
[i
]);
2706 INIT_DELAYED_WORK(&tunnel
->gc_wq
, amt_tunnel_expire
);
2708 list_add_tail_rcu(&tunnel
->list
, &amt
->tunnel_list
);
2709 tunnel
->key
= amt
->key
;
2710 __amt_update_relay_status(tunnel
, AMT_STATUS_RECEIVED_REQUEST
, true);
2712 mod_delayed_work(amt_wq
, &tunnel
->gc_wq
,
2713 msecs_to_jiffies(amt_gmi(amt
)));
2714 spin_unlock_bh(&amt
->lock
);
2717 tunnel
->nonce
= amtrh
->nonce
;
2718 mac
= siphash_3u32((__force u32
)tunnel
->ip4
,
2719 (__force u32
)tunnel
->source_port
,
2720 (__force u32
)tunnel
->nonce
,
2722 tunnel
->mac
= mac
>> 16;
2724 if (!netif_running(amt
->dev
) || !netif_running(amt
->stream_dev
))
2728 amt_send_igmp_gq(amt
, tunnel
);
2730 amt_send_mld_gq(amt
, tunnel
);
2735 static void amt_gw_rcv(struct amt_dev
*amt
, struct sk_buff
*skb
)
2737 int type
= amt_parse_type(skb
);
2743 if (amt
->mode
== AMT_MODE_GATEWAY
) {
2745 case AMT_MSG_ADVERTISEMENT
:
2746 err
= amt_advertisement_handler(amt
, skb
);
2748 case AMT_MSG_MEMBERSHIP_QUERY
:
2749 err
= amt_membership_query_handler(amt
, skb
);
2754 netdev_dbg(amt
->dev
, "Invalid type of Gateway\n");
2760 amt
->dev
->stats
.rx_dropped
++;
2767 static int amt_rcv(struct sock
*sk
, struct sk_buff
*skb
)
2769 struct amt_dev
*amt
;
2775 amt
= rcu_dereference_sk_user_data(sk
);
2782 skb
->dev
= amt
->dev
;
2784 type
= amt_parse_type(skb
);
2790 if (amt
->mode
== AMT_MODE_GATEWAY
) {
2792 case AMT_MSG_ADVERTISEMENT
:
2793 if (iph
->saddr
!= amt
->discovery_ip
) {
2794 netdev_dbg(amt
->dev
, "Invalid Relay IP\n");
2798 if (amt_queue_event(amt
, AMT_EVENT_RECEIVE
, skb
)) {
2799 netdev_dbg(amt
->dev
, "AMT Event queue full\n");
2804 case AMT_MSG_MULTICAST_DATA
:
2805 if (iph
->saddr
!= amt
->remote_ip
) {
2806 netdev_dbg(amt
->dev
, "Invalid Relay IP\n");
2810 err
= amt_multicast_data_handler(amt
, skb
);
2815 case AMT_MSG_MEMBERSHIP_QUERY
:
2816 if (iph
->saddr
!= amt
->remote_ip
) {
2817 netdev_dbg(amt
->dev
, "Invalid Relay IP\n");
2821 if (amt_queue_event(amt
, AMT_EVENT_RECEIVE
, skb
)) {
2822 netdev_dbg(amt
->dev
, "AMT Event queue full\n");
2829 netdev_dbg(amt
->dev
, "Invalid type of Gateway\n");
2834 case AMT_MSG_DISCOVERY
:
2835 err
= amt_discovery_handler(amt
, skb
);
2837 case AMT_MSG_REQUEST
:
2838 err
= amt_request_handler(amt
, skb
);
2840 case AMT_MSG_MEMBERSHIP_UPDATE
:
2841 err
= amt_update_handler(amt
, skb
);
2848 netdev_dbg(amt
->dev
, "Invalid type of relay\n");
2854 amt
->dev
->stats
.rx_dropped
++;
2860 rcu_read_unlock_bh();
2864 static void amt_event_work(struct work_struct
*work
)
2866 struct amt_dev
*amt
= container_of(work
, struct amt_dev
, event_wq
);
2867 struct sk_buff
*skb
;
2871 for (i
= 0; i
< AMT_MAX_EVENTS
; i
++) {
2872 spin_lock_bh(&amt
->lock
);
2873 if (amt
->nr_events
== 0) {
2874 spin_unlock_bh(&amt
->lock
);
2877 event
= amt
->events
[amt
->event_idx
].event
;
2878 skb
= amt
->events
[amt
->event_idx
].skb
;
2879 amt
->events
[amt
->event_idx
].event
= AMT_EVENT_NONE
;
2880 amt
->events
[amt
->event_idx
].skb
= NULL
;
2883 amt
->event_idx
%= AMT_MAX_EVENTS
;
2884 spin_unlock_bh(&amt
->lock
);
2887 case AMT_EVENT_RECEIVE
:
2888 amt_gw_rcv(amt
, skb
);
2890 case AMT_EVENT_SEND_DISCOVERY
:
2891 amt_event_send_discovery(amt
);
2893 case AMT_EVENT_SEND_REQUEST
:
2894 amt_event_send_request(amt
);
2903 static int amt_err_lookup(struct sock
*sk
, struct sk_buff
*skb
)
2905 struct amt_dev
*amt
;
2909 amt
= rcu_dereference_sk_user_data(sk
);
2913 if (amt
->mode
!= AMT_MODE_GATEWAY
)
2916 type
= amt_parse_type(skb
);
2920 netdev_dbg(amt
->dev
, "Received IGMP Unreachable of %s\n",
2923 case AMT_MSG_DISCOVERY
:
2925 case AMT_MSG_REQUEST
:
2926 case AMT_MSG_MEMBERSHIP_UPDATE
:
2927 if (READ_ONCE(amt
->status
) >= AMT_STATUS_RECEIVED_ADVERTISEMENT
)
2928 mod_delayed_work(amt_wq
, &amt
->req_wq
, 0);
2934 rcu_read_unlock_bh();
2937 rcu_read_unlock_bh();
2938 amt
->dev
->stats
.rx_dropped
++;
2942 static struct socket
*amt_create_sock(struct net
*net
, __be16 port
)
2944 struct udp_port_cfg udp_conf
;
2945 struct socket
*sock
;
2948 memset(&udp_conf
, 0, sizeof(udp_conf
));
2949 udp_conf
.family
= AF_INET
;
2950 udp_conf
.local_ip
.s_addr
= htonl(INADDR_ANY
);
2952 udp_conf
.local_udp_port
= port
;
2954 err
= udp_sock_create(net
, &udp_conf
, &sock
);
2956 return ERR_PTR(err
);
2961 static int amt_socket_create(struct amt_dev
*amt
)
2963 struct udp_tunnel_sock_cfg tunnel_cfg
;
2964 struct socket
*sock
;
2966 sock
= amt_create_sock(amt
->net
, amt
->relay_port
);
2968 return PTR_ERR(sock
);
2970 /* Mark socket as an encapsulation socket */
2971 memset(&tunnel_cfg
, 0, sizeof(tunnel_cfg
));
2972 tunnel_cfg
.sk_user_data
= amt
;
2973 tunnel_cfg
.encap_type
= 1;
2974 tunnel_cfg
.encap_rcv
= amt_rcv
;
2975 tunnel_cfg
.encap_err_lookup
= amt_err_lookup
;
2976 tunnel_cfg
.encap_destroy
= NULL
;
2977 setup_udp_tunnel_sock(amt
->net
, sock
, &tunnel_cfg
);
2979 rcu_assign_pointer(amt
->sock
, sock
);
2983 static int amt_dev_open(struct net_device
*dev
)
2985 struct amt_dev
*amt
= netdev_priv(dev
);
2988 amt
->ready4
= false;
2989 amt
->ready6
= false;
2993 err
= amt_socket_create(amt
);
3000 get_random_bytes(&amt
->key
, sizeof(siphash_key_t
));
3002 amt
->status
= AMT_STATUS_INIT
;
3003 if (amt
->mode
== AMT_MODE_GATEWAY
) {
3004 mod_delayed_work(amt_wq
, &amt
->discovery_wq
, 0);
3005 mod_delayed_work(amt_wq
, &amt
->req_wq
, 0);
3006 } else if (amt
->mode
== AMT_MODE_RELAY
) {
3007 mod_delayed_work(amt_wq
, &amt
->secret_wq
,
3008 msecs_to_jiffies(AMT_SECRET_TIMEOUT
));
3013 static int amt_dev_stop(struct net_device
*dev
)
3015 struct amt_dev
*amt
= netdev_priv(dev
);
3016 struct amt_tunnel_list
*tunnel
, *tmp
;
3017 struct socket
*sock
;
3018 struct sk_buff
*skb
;
3021 cancel_delayed_work_sync(&amt
->req_wq
);
3022 cancel_delayed_work_sync(&amt
->discovery_wq
);
3023 cancel_delayed_work_sync(&amt
->secret_wq
);
3026 sock
= rtnl_dereference(amt
->sock
);
3027 RCU_INIT_POINTER(amt
->sock
, NULL
);
3030 udp_tunnel_sock_release(sock
);
3032 cancel_work_sync(&amt
->event_wq
);
3033 for (i
= 0; i
< AMT_MAX_EVENTS
; i
++) {
3034 skb
= amt
->events
[i
].skb
;
3036 amt
->events
[i
].event
= AMT_EVENT_NONE
;
3037 amt
->events
[i
].skb
= NULL
;
3040 amt
->ready4
= false;
3041 amt
->ready6
= false;
3045 list_for_each_entry_safe(tunnel
, tmp
, &amt
->tunnel_list
, list
) {
3046 list_del_rcu(&tunnel
->list
);
3048 cancel_delayed_work_sync(&tunnel
->gc_wq
);
3049 amt_clear_groups(tunnel
);
3050 kfree_rcu(tunnel
, rcu
);
3056 static const struct device_type amt_type
= {
3060 static int amt_dev_init(struct net_device
*dev
)
3062 struct amt_dev
*amt
= netdev_priv(dev
);
3067 err
= gro_cells_init(&amt
->gro_cells
, dev
);
3074 static void amt_dev_uninit(struct net_device
*dev
)
3076 struct amt_dev
*amt
= netdev_priv(dev
);
3078 gro_cells_destroy(&amt
->gro_cells
);
3081 static const struct net_device_ops amt_netdev_ops
= {
3082 .ndo_init
= amt_dev_init
,
3083 .ndo_uninit
= amt_dev_uninit
,
3084 .ndo_open
= amt_dev_open
,
3085 .ndo_stop
= amt_dev_stop
,
3086 .ndo_start_xmit
= amt_dev_xmit
,
3089 static void amt_link_setup(struct net_device
*dev
)
3091 dev
->netdev_ops
= &amt_netdev_ops
;
3092 dev
->needs_free_netdev
= true;
3093 SET_NETDEV_DEVTYPE(dev
, &amt_type
);
3094 dev
->min_mtu
= ETH_MIN_MTU
;
3095 dev
->max_mtu
= ETH_MAX_MTU
;
3096 dev
->type
= ARPHRD_NONE
;
3097 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
3098 dev
->hard_header_len
= 0;
3100 dev
->priv_flags
|= IFF_NO_QUEUE
;
3102 dev
->netns_local
= true;
3103 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
3104 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
3105 dev
->hw_features
|= NETIF_F_FRAGLIST
| NETIF_F_RXCSUM
;
3106 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
3107 dev
->pcpu_stat_type
= NETDEV_PCPU_STAT_TSTATS
;
3108 eth_hw_addr_random(dev
);
3109 eth_zero_addr(dev
->broadcast
);
3113 static const struct nla_policy amt_policy
[IFLA_AMT_MAX
+ 1] = {
3114 [IFLA_AMT_MODE
] = { .type
= NLA_U32
},
3115 [IFLA_AMT_RELAY_PORT
] = { .type
= NLA_U16
},
3116 [IFLA_AMT_GATEWAY_PORT
] = { .type
= NLA_U16
},
3117 [IFLA_AMT_LINK
] = { .type
= NLA_U32
},
3118 [IFLA_AMT_LOCAL_IP
] = { .len
= sizeof_field(struct iphdr
, daddr
) },
3119 [IFLA_AMT_REMOTE_IP
] = { .len
= sizeof_field(struct iphdr
, daddr
) },
3120 [IFLA_AMT_DISCOVERY_IP
] = { .len
= sizeof_field(struct iphdr
, daddr
) },
3121 [IFLA_AMT_MAX_TUNNELS
] = { .type
= NLA_U32
},
3124 static int amt_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
3125 struct netlink_ext_ack
*extack
)
3130 if (!data
[IFLA_AMT_LINK
]) {
3131 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_AMT_LINK
],
3132 "Link attribute is required");
3136 if (!data
[IFLA_AMT_MODE
]) {
3137 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_AMT_MODE
],
3138 "Mode attribute is required");
3142 if (nla_get_u32(data
[IFLA_AMT_MODE
]) > AMT_MODE_MAX
) {
3143 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_AMT_MODE
],
3144 "Mode attribute is not valid");
3148 if (!data
[IFLA_AMT_LOCAL_IP
]) {
3149 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_AMT_DISCOVERY_IP
],
3150 "Local attribute is required");
3154 if (!data
[IFLA_AMT_DISCOVERY_IP
] &&
3155 nla_get_u32(data
[IFLA_AMT_MODE
]) == AMT_MODE_GATEWAY
) {
3156 NL_SET_ERR_MSG_ATTR(extack
, data
[IFLA_AMT_LOCAL_IP
],
3157 "Discovery attribute is required");
3164 static int amt_newlink(struct net
*net
, struct net_device
*dev
,
3165 struct nlattr
*tb
[], struct nlattr
*data
[],
3166 struct netlink_ext_ack
*extack
)
3168 struct amt_dev
*amt
= netdev_priv(dev
);
3172 amt
->mode
= nla_get_u32(data
[IFLA_AMT_MODE
]);
3174 if (data
[IFLA_AMT_MAX_TUNNELS
] &&
3175 nla_get_u32(data
[IFLA_AMT_MAX_TUNNELS
]))
3176 amt
->max_tunnels
= nla_get_u32(data
[IFLA_AMT_MAX_TUNNELS
]);
3178 amt
->max_tunnels
= AMT_MAX_TUNNELS
;
3180 spin_lock_init(&amt
->lock
);
3181 amt
->max_groups
= AMT_MAX_GROUP
;
3182 amt
->max_sources
= AMT_MAX_SOURCE
;
3183 amt
->hash_buckets
= AMT_HSIZE
;
3184 amt
->nr_tunnels
= 0;
3185 get_random_bytes(&amt
->hash_seed
, sizeof(amt
->hash_seed
));
3186 amt
->stream_dev
= dev_get_by_index(net
,
3187 nla_get_u32(data
[IFLA_AMT_LINK
]));
3188 if (!amt
->stream_dev
) {
3189 NL_SET_ERR_MSG_ATTR(extack
, tb
[IFLA_AMT_LINK
],
3190 "Can't find stream device");
3194 if (amt
->stream_dev
->type
!= ARPHRD_ETHER
) {
3195 NL_SET_ERR_MSG_ATTR(extack
, tb
[IFLA_AMT_LINK
],
3196 "Invalid stream device type");
3200 amt
->local_ip
= nla_get_in_addr(data
[IFLA_AMT_LOCAL_IP
]);
3201 if (ipv4_is_loopback(amt
->local_ip
) ||
3202 ipv4_is_zeronet(amt
->local_ip
) ||
3203 ipv4_is_multicast(amt
->local_ip
)) {
3204 NL_SET_ERR_MSG_ATTR(extack
, tb
[IFLA_AMT_LOCAL_IP
],
3205 "Invalid Local address");
3209 amt
->relay_port
= nla_get_be16_default(data
[IFLA_AMT_RELAY_PORT
],
3210 htons(IANA_AMT_UDP_PORT
));
3212 amt
->gw_port
= nla_get_be16_default(data
[IFLA_AMT_GATEWAY_PORT
],
3213 htons(IANA_AMT_UDP_PORT
));
3215 if (!amt
->relay_port
) {
3216 NL_SET_ERR_MSG_ATTR(extack
, tb
[IFLA_AMT_DISCOVERY_IP
],
3217 "relay port must not be 0");
3220 if (amt
->mode
== AMT_MODE_RELAY
) {
3221 amt
->qrv
= READ_ONCE(amt
->net
->ipv4
.sysctl_igmp_qrv
);
3223 dev
->needed_headroom
= amt
->stream_dev
->needed_headroom
+
3225 dev
->mtu
= amt
->stream_dev
->mtu
- AMT_RELAY_HLEN
;
3226 dev
->max_mtu
= dev
->mtu
;
3227 dev
->min_mtu
= ETH_MIN_MTU
+ AMT_RELAY_HLEN
;
3229 if (!data
[IFLA_AMT_DISCOVERY_IP
]) {
3230 NL_SET_ERR_MSG_ATTR(extack
, tb
[IFLA_AMT_DISCOVERY_IP
],
3231 "discovery must be set in gateway mode");
3234 if (!amt
->gw_port
) {
3235 NL_SET_ERR_MSG_ATTR(extack
, tb
[IFLA_AMT_DISCOVERY_IP
],
3236 "gateway port must not be 0");
3240 amt
->discovery_ip
= nla_get_in_addr(data
[IFLA_AMT_DISCOVERY_IP
]);
3241 if (ipv4_is_loopback(amt
->discovery_ip
) ||
3242 ipv4_is_zeronet(amt
->discovery_ip
) ||
3243 ipv4_is_multicast(amt
->discovery_ip
)) {
3244 NL_SET_ERR_MSG_ATTR(extack
, tb
[IFLA_AMT_DISCOVERY_IP
],
3245 "discovery must be unicast");
3249 dev
->needed_headroom
= amt
->stream_dev
->needed_headroom
+
3251 dev
->mtu
= amt
->stream_dev
->mtu
- AMT_GW_HLEN
;
3252 dev
->max_mtu
= dev
->mtu
;
3253 dev
->min_mtu
= ETH_MIN_MTU
+ AMT_GW_HLEN
;
3255 amt
->qi
= AMT_INIT_QUERY_INTERVAL
;
3257 err
= register_netdevice(dev
);
3259 netdev_dbg(dev
, "failed to register new netdev %d\n", err
);
3263 err
= netdev_upper_dev_link(amt
->stream_dev
, dev
, extack
);
3265 unregister_netdevice(dev
);
3269 INIT_DELAYED_WORK(&amt
->discovery_wq
, amt_discovery_work
);
3270 INIT_DELAYED_WORK(&amt
->req_wq
, amt_req_work
);
3271 INIT_DELAYED_WORK(&amt
->secret_wq
, amt_secret_work
);
3272 INIT_WORK(&amt
->event_wq
, amt_event_work
);
3273 INIT_LIST_HEAD(&amt
->tunnel_list
);
3276 dev_put(amt
->stream_dev
);
3280 static void amt_dellink(struct net_device
*dev
, struct list_head
*head
)
3282 struct amt_dev
*amt
= netdev_priv(dev
);
3284 unregister_netdevice_queue(dev
, head
);
3285 netdev_upper_dev_unlink(amt
->stream_dev
, dev
);
3286 dev_put(amt
->stream_dev
);
3289 static size_t amt_get_size(const struct net_device
*dev
)
3291 return nla_total_size(sizeof(__u32
)) + /* IFLA_AMT_MODE */
3292 nla_total_size(sizeof(__u16
)) + /* IFLA_AMT_RELAY_PORT */
3293 nla_total_size(sizeof(__u16
)) + /* IFLA_AMT_GATEWAY_PORT */
3294 nla_total_size(sizeof(__u32
)) + /* IFLA_AMT_LINK */
3295 nla_total_size(sizeof(__u32
)) + /* IFLA_MAX_TUNNELS */
3296 nla_total_size(sizeof(struct iphdr
)) + /* IFLA_AMT_DISCOVERY_IP */
3297 nla_total_size(sizeof(struct iphdr
)) + /* IFLA_AMT_REMOTE_IP */
3298 nla_total_size(sizeof(struct iphdr
)); /* IFLA_AMT_LOCAL_IP */
3301 static int amt_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
3303 struct amt_dev
*amt
= netdev_priv(dev
);
3305 if (nla_put_u32(skb
, IFLA_AMT_MODE
, amt
->mode
))
3306 goto nla_put_failure
;
3307 if (nla_put_be16(skb
, IFLA_AMT_RELAY_PORT
, amt
->relay_port
))
3308 goto nla_put_failure
;
3309 if (nla_put_be16(skb
, IFLA_AMT_GATEWAY_PORT
, amt
->gw_port
))
3310 goto nla_put_failure
;
3311 if (nla_put_u32(skb
, IFLA_AMT_LINK
, amt
->stream_dev
->ifindex
))
3312 goto nla_put_failure
;
3313 if (nla_put_in_addr(skb
, IFLA_AMT_LOCAL_IP
, amt
->local_ip
))
3314 goto nla_put_failure
;
3315 if (nla_put_in_addr(skb
, IFLA_AMT_DISCOVERY_IP
, amt
->discovery_ip
))
3316 goto nla_put_failure
;
3318 if (nla_put_in_addr(skb
, IFLA_AMT_REMOTE_IP
, amt
->remote_ip
))
3319 goto nla_put_failure
;
3320 if (nla_put_u32(skb
, IFLA_AMT_MAX_TUNNELS
, amt
->max_tunnels
))
3321 goto nla_put_failure
;
3329 static struct rtnl_link_ops amt_link_ops __read_mostly
= {
3331 .maxtype
= IFLA_AMT_MAX
,
3332 .policy
= amt_policy
,
3333 .priv_size
= sizeof(struct amt_dev
),
3334 .setup
= amt_link_setup
,
3335 .validate
= amt_validate
,
3336 .newlink
= amt_newlink
,
3337 .dellink
= amt_dellink
,
3338 .get_size
= amt_get_size
,
3339 .fill_info
= amt_fill_info
,
3342 static struct net_device
*amt_lookup_upper_dev(struct net_device
*dev
)
3344 struct net_device
*upper_dev
;
3345 struct amt_dev
*amt
;
3347 for_each_netdev(dev_net(dev
), upper_dev
) {
3348 if (netif_is_amt(upper_dev
)) {
3349 amt
= netdev_priv(upper_dev
);
3350 if (amt
->stream_dev
== dev
)
3358 static int amt_device_event(struct notifier_block
*unused
,
3359 unsigned long event
, void *ptr
)
3361 struct net_device
*dev
= netdev_notifier_info_to_dev(ptr
);
3362 struct net_device
*upper_dev
;
3363 struct amt_dev
*amt
;
3367 upper_dev
= amt_lookup_upper_dev(dev
);
3370 amt
= netdev_priv(upper_dev
);
3373 case NETDEV_UNREGISTER
:
3374 amt_dellink(amt
->dev
, &list
);
3375 unregister_netdevice_many(&list
);
3377 case NETDEV_CHANGEMTU
:
3378 if (amt
->mode
== AMT_MODE_RELAY
)
3379 new_mtu
= dev
->mtu
- AMT_RELAY_HLEN
;
3381 new_mtu
= dev
->mtu
- AMT_GW_HLEN
;
3383 dev_set_mtu(amt
->dev
, new_mtu
);
3390 static struct notifier_block amt_notifier_block __read_mostly
= {
3391 .notifier_call
= amt_device_event
,
3394 static int __init
amt_init(void)
3398 err
= register_netdevice_notifier(&amt_notifier_block
);
3402 err
= rtnl_link_register(&amt_link_ops
);
3404 goto unregister_notifier
;
3406 amt_wq
= alloc_workqueue("amt", WQ_UNBOUND
, 0);
3409 goto rtnl_unregister
;
3412 spin_lock_init(&source_gc_lock
);
3413 spin_lock_bh(&source_gc_lock
);
3414 INIT_DELAYED_WORK(&source_gc_wq
, amt_source_gc_work
);
3415 mod_delayed_work(amt_wq
, &source_gc_wq
,
3416 msecs_to_jiffies(AMT_GC_INTERVAL
));
3417 spin_unlock_bh(&source_gc_lock
);
3422 rtnl_link_unregister(&amt_link_ops
);
3423 unregister_notifier
:
3424 unregister_netdevice_notifier(&amt_notifier_block
);
3426 pr_err("error loading AMT module loaded\n");
3429 late_initcall(amt_init
);
3431 static void __exit
amt_fini(void)
3433 rtnl_link_unregister(&amt_link_ops
);
3434 unregister_netdevice_notifier(&amt_notifier_block
);
3435 cancel_delayed_work_sync(&source_gc_wq
);
3436 __amt_source_gc_work();
3437 destroy_workqueue(amt_wq
);
3439 module_exit(amt_fini
);
3441 MODULE_LICENSE("GPL");
3442 MODULE_DESCRIPTION("Driver for Automatic Multicast Tunneling (AMT)");
3443 MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
3444 MODULE_ALIAS_RTNL_LINK("amt");