1 // SPDX-License-Identifier: GPL-2.0
2 /* Bareudp: UDP tunnel encasulation for different Payload types like
4 * Copyright (c) 2019 Nokia, Inc.
5 * Authors: Martin Varghese, <martin.varghese@nokia.com>
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/etherdevice.h>
13 #include <linux/hash.h>
14 #include <net/dst_metadata.h>
15 #include <net/gro_cells.h>
16 #include <net/rtnetlink.h>
17 #include <net/protocol.h>
18 #include <net/ip6_tunnel.h>
19 #include <net/ip_tunnels.h>
20 #include <net/udp_tunnel.h>
21 #include <net/bareudp.h>
23 #define BAREUDP_BASE_HLEN sizeof(struct udphdr)
24 #define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \
25 sizeof(struct udphdr))
26 #define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \
27 sizeof(struct udphdr))
29 static bool log_ecn_error
= true;
30 module_param(log_ecn_error
, bool, 0644);
31 MODULE_PARM_DESC(log_ecn_error
, "Log packets received with corrupted ECN");
33 /* per-network namespace private data for this module */
35 static unsigned int bareudp_net_id
;
38 struct list_head bareudp_list
;
41 /* Pseudo network device */
43 struct net
*net
; /* netns for packet i/o */
44 struct net_device
*dev
; /* netdev for bareudp tunnel */
48 bool multi_proto_mode
;
49 struct socket __rcu
*sock
;
50 struct list_head next
; /* bareudp node on namespace list */
51 struct gro_cells gro_cells
;
54 static int bareudp_udp_encap_recv(struct sock
*sk
, struct sk_buff
*skb
)
56 struct metadata_dst
*tun_dst
= NULL
;
57 struct pcpu_sw_netstats
*stats
;
58 struct bareudp_dev
*bareudp
;
59 unsigned short family
;
65 bareudp
= rcu_dereference_sk_user_data(sk
);
69 if (skb
->protocol
== htons(ETH_P_IP
))
74 if (bareudp
->ethertype
== htons(ETH_P_IP
)) {
77 iphdr
= (struct iphdr
*)(skb
->data
+ BAREUDP_BASE_HLEN
);
78 if (iphdr
->version
== 4) {
79 proto
= bareudp
->ethertype
;
80 } else if (bareudp
->multi_proto_mode
&& (iphdr
->version
== 6)) {
81 proto
= htons(ETH_P_IPV6
);
83 bareudp
->dev
->stats
.rx_dropped
++;
86 } else if (bareudp
->ethertype
== htons(ETH_P_MPLS_UC
)) {
87 struct iphdr
*tunnel_hdr
;
89 tunnel_hdr
= (struct iphdr
*)skb_network_header(skb
);
90 if (tunnel_hdr
->version
== 4) {
91 if (!ipv4_is_multicast(tunnel_hdr
->daddr
)) {
92 proto
= bareudp
->ethertype
;
93 } else if (bareudp
->multi_proto_mode
&&
94 ipv4_is_multicast(tunnel_hdr
->daddr
)) {
95 proto
= htons(ETH_P_MPLS_MC
);
97 bareudp
->dev
->stats
.rx_dropped
++;
102 struct ipv6hdr
*tunnel_hdr_v6
;
104 tunnel_hdr_v6
= (struct ipv6hdr
*)skb_network_header(skb
);
106 ipv6_addr_type((struct in6_addr
*)&tunnel_hdr_v6
->daddr
);
107 if (!(addr_type
& IPV6_ADDR_MULTICAST
)) {
108 proto
= bareudp
->ethertype
;
109 } else if (bareudp
->multi_proto_mode
&&
110 (addr_type
& IPV6_ADDR_MULTICAST
)) {
111 proto
= htons(ETH_P_MPLS_MC
);
113 bareudp
->dev
->stats
.rx_dropped
++;
118 proto
= bareudp
->ethertype
;
121 if (iptunnel_pull_header(skb
, BAREUDP_BASE_HLEN
,
123 !net_eq(bareudp
->net
,
124 dev_net(bareudp
->dev
)))) {
125 bareudp
->dev
->stats
.rx_dropped
++;
129 tun_dst
= udp_tun_rx_dst(skb
, family
, TUNNEL_KEY
, 0, 0);
131 bareudp
->dev
->stats
.rx_dropped
++;
134 skb_dst_set(skb
, &tun_dst
->dst
);
135 skb
->dev
= bareudp
->dev
;
136 oiph
= skb_network_header(skb
);
137 skb_reset_network_header(skb
);
139 if (!IS_ENABLED(CONFIG_IPV6
) || family
== AF_INET
)
140 err
= IP_ECN_decapsulate(oiph
, skb
);
142 err
= IP6_ECN_decapsulate(oiph
, skb
);
146 if (!IS_ENABLED(CONFIG_IPV6
) || family
== AF_INET
)
147 net_info_ratelimited("non-ECT from %pI4 "
149 &((struct iphdr
*)oiph
)->saddr
,
150 ((struct iphdr
*)oiph
)->tos
);
152 net_info_ratelimited("non-ECT from %pI6\n",
153 &((struct ipv6hdr
*)oiph
)->saddr
);
156 ++bareudp
->dev
->stats
.rx_frame_errors
;
157 ++bareudp
->dev
->stats
.rx_errors
;
163 err
= gro_cells_receive(&bareudp
->gro_cells
, skb
);
164 if (likely(err
== NET_RX_SUCCESS
)) {
165 stats
= this_cpu_ptr(bareudp
->dev
->tstats
);
166 u64_stats_update_begin(&stats
->syncp
);
168 stats
->rx_bytes
+= len
;
169 u64_stats_update_end(&stats
->syncp
);
173 /* Consume bad packet */
179 static int bareudp_err_lookup(struct sock
*sk
, struct sk_buff
*skb
)
184 static int bareudp_init(struct net_device
*dev
)
186 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
189 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
193 err
= gro_cells_init(&bareudp
->gro_cells
, dev
);
195 free_percpu(dev
->tstats
);
201 static void bareudp_uninit(struct net_device
*dev
)
203 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
205 gro_cells_destroy(&bareudp
->gro_cells
);
206 free_percpu(dev
->tstats
);
209 static struct socket
*bareudp_create_sock(struct net
*net
, __be16 port
)
211 struct udp_port_cfg udp_conf
;
215 memset(&udp_conf
, 0, sizeof(udp_conf
));
216 #if IS_ENABLED(CONFIG_IPV6)
217 udp_conf
.family
= AF_INET6
;
219 udp_conf
.family
= AF_INET
;
221 udp_conf
.local_udp_port
= port
;
222 /* Open UDP socket */
223 err
= udp_sock_create(net
, &udp_conf
, &sock
);
230 /* Create new listen socket if needed */
231 static int bareudp_socket_create(struct bareudp_dev
*bareudp
, __be16 port
)
233 struct udp_tunnel_sock_cfg tunnel_cfg
;
236 sock
= bareudp_create_sock(bareudp
->net
, port
);
238 return PTR_ERR(sock
);
240 /* Mark socket as an encapsulation socket */
241 memset(&tunnel_cfg
, 0, sizeof(tunnel_cfg
));
242 tunnel_cfg
.sk_user_data
= bareudp
;
243 tunnel_cfg
.encap_type
= 1;
244 tunnel_cfg
.encap_rcv
= bareudp_udp_encap_recv
;
245 tunnel_cfg
.encap_err_lookup
= bareudp_err_lookup
;
246 tunnel_cfg
.encap_destroy
= NULL
;
247 setup_udp_tunnel_sock(bareudp
->net
, sock
, &tunnel_cfg
);
249 /* As the setup_udp_tunnel_sock does not call udp_encap_enable if the
250 * socket type is v6 an explicit call to udp_encap_enable is needed.
252 if (sock
->sk
->sk_family
== AF_INET6
)
255 rcu_assign_pointer(bareudp
->sock
, sock
);
259 static int bareudp_open(struct net_device
*dev
)
261 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
264 ret
= bareudp_socket_create(bareudp
, bareudp
->port
);
268 static void bareudp_sock_release(struct bareudp_dev
*bareudp
)
272 sock
= bareudp
->sock
;
273 rcu_assign_pointer(bareudp
->sock
, NULL
);
275 udp_tunnel_sock_release(sock
);
278 static int bareudp_stop(struct net_device
*dev
)
280 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
282 bareudp_sock_release(bareudp
);
286 static int bareudp_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
,
287 struct bareudp_dev
*bareudp
,
288 const struct ip_tunnel_info
*info
)
290 bool xnet
= !net_eq(bareudp
->net
, dev_net(bareudp
->dev
));
291 bool use_cache
= ip_tunnel_dst_cache_usable(skb
, info
);
292 struct socket
*sock
= rcu_dereference(bareudp
->sock
);
293 bool udp_sum
= !!(info
->key
.tun_flags
& TUNNEL_CSUM
);
294 const struct ip_tunnel_key
*key
= &info
->key
;
305 rt
= ip_route_output_tunnel(skb
, dev
, bareudp
->net
, &saddr
, info
,
306 IPPROTO_UDP
, use_cache
);
311 skb_tunnel_check_pmtu(skb
, &rt
->dst
,
312 BAREUDP_IPV4_HLEN
+ info
->options_len
);
314 sport
= udp_flow_src_port(bareudp
->net
, skb
,
315 bareudp
->sport_min
, USHRT_MAX
,
317 tos
= ip_tunnel_ecn_encap(key
->tos
, ip_hdr(skb
), skb
);
319 df
= key
->tun_flags
& TUNNEL_DONT_FRAGMENT
? htons(IP_DF
) : 0;
320 skb_scrub_packet(skb
, xnet
);
323 if (!skb_pull(skb
, skb_network_offset(skb
)))
326 min_headroom
= LL_RESERVED_SPACE(rt
->dst
.dev
) + rt
->dst
.header_len
+
327 BAREUDP_BASE_HLEN
+ info
->options_len
+ sizeof(struct iphdr
);
329 err
= skb_cow_head(skb
, min_headroom
);
333 err
= udp_tunnel_handle_offloads(skb
, udp_sum
);
337 skb_set_inner_protocol(skb
, bareudp
->ethertype
);
338 udp_tunnel_xmit_skb(rt
, sock
->sk
, skb
, saddr
, info
->key
.u
.ipv4
.dst
,
339 tos
, ttl
, df
, sport
, bareudp
->port
,
340 !net_eq(bareudp
->net
, dev_net(bareudp
->dev
)),
341 !(info
->key
.tun_flags
& TUNNEL_CSUM
));
345 dst_release(&rt
->dst
);
349 static int bareudp6_xmit_skb(struct sk_buff
*skb
, struct net_device
*dev
,
350 struct bareudp_dev
*bareudp
,
351 const struct ip_tunnel_info
*info
)
353 bool xnet
= !net_eq(bareudp
->net
, dev_net(bareudp
->dev
));
354 bool use_cache
= ip_tunnel_dst_cache_usable(skb
, info
);
355 struct socket
*sock
= rcu_dereference(bareudp
->sock
);
356 bool udp_sum
= !!(info
->key
.tun_flags
& TUNNEL_CSUM
);
357 const struct ip_tunnel_key
*key
= &info
->key
;
358 struct dst_entry
*dst
= NULL
;
359 struct in6_addr saddr
, daddr
;
368 dst
= ip6_dst_lookup_tunnel(skb
, dev
, bareudp
->net
, sock
, &saddr
, info
,
369 IPPROTO_UDP
, use_cache
);
373 skb_tunnel_check_pmtu(skb
, dst
, BAREUDP_IPV6_HLEN
+ info
->options_len
);
375 sport
= udp_flow_src_port(bareudp
->net
, skb
,
376 bareudp
->sport_min
, USHRT_MAX
,
378 prio
= ip_tunnel_ecn_encap(key
->tos
, ip_hdr(skb
), skb
);
381 skb_scrub_packet(skb
, xnet
);
384 if (!skb_pull(skb
, skb_network_offset(skb
)))
387 min_headroom
= LL_RESERVED_SPACE(dst
->dev
) + dst
->header_len
+
388 BAREUDP_BASE_HLEN
+ info
->options_len
+ sizeof(struct iphdr
);
390 err
= skb_cow_head(skb
, min_headroom
);
394 err
= udp_tunnel_handle_offloads(skb
, udp_sum
);
398 daddr
= info
->key
.u
.ipv6
.dst
;
399 udp_tunnel6_xmit_skb(dst
, sock
->sk
, skb
, dev
,
400 &saddr
, &daddr
, prio
, ttl
,
401 info
->key
.label
, sport
, bareudp
->port
,
402 !(info
->key
.tun_flags
& TUNNEL_CSUM
));
410 static netdev_tx_t
bareudp_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
412 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
413 struct ip_tunnel_info
*info
= NULL
;
416 if (skb
->protocol
!= bareudp
->ethertype
) {
417 if (!bareudp
->multi_proto_mode
||
418 (skb
->protocol
!= htons(ETH_P_MPLS_MC
) &&
419 skb
->protocol
!= htons(ETH_P_IPV6
))) {
425 info
= skb_tunnel_info(skb
);
426 if (unlikely(!info
|| !(info
->mode
& IP_TUNNEL_INFO_TX
))) {
432 if (IS_ENABLED(CONFIG_IPV6
) && info
->mode
& IP_TUNNEL_INFO_IPV6
)
433 err
= bareudp6_xmit_skb(skb
, dev
, bareudp
, info
);
435 err
= bareudp_xmit_skb(skb
, dev
, bareudp
, info
);
445 dev
->stats
.collisions
++;
446 else if (err
== -ENETUNREACH
)
447 dev
->stats
.tx_carrier_errors
++;
449 dev
->stats
.tx_errors
++;
453 static int bareudp_fill_metadata_dst(struct net_device
*dev
,
456 struct ip_tunnel_info
*info
= skb_tunnel_info(skb
);
457 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
460 use_cache
= ip_tunnel_dst_cache_usable(skb
, info
);
462 if (!IS_ENABLED(CONFIG_IPV6
) || ip_tunnel_info_af(info
) == AF_INET
) {
466 rt
= ip_route_output_tunnel(skb
, dev
, bareudp
->net
, &saddr
,
467 info
, IPPROTO_UDP
, use_cache
);
472 info
->key
.u
.ipv4
.src
= saddr
;
473 } else if (ip_tunnel_info_af(info
) == AF_INET6
) {
474 struct dst_entry
*dst
;
475 struct in6_addr saddr
;
476 struct socket
*sock
= rcu_dereference(bareudp
->sock
);
478 dst
= ip6_dst_lookup_tunnel(skb
, dev
, bareudp
->net
, sock
,
479 &saddr
, info
, IPPROTO_UDP
,
485 info
->key
.u
.ipv6
.src
= saddr
;
490 info
->key
.tp_src
= udp_flow_src_port(bareudp
->net
, skb
,
493 info
->key
.tp_dst
= bareudp
->port
;
497 static const struct net_device_ops bareudp_netdev_ops
= {
498 .ndo_init
= bareudp_init
,
499 .ndo_uninit
= bareudp_uninit
,
500 .ndo_open
= bareudp_open
,
501 .ndo_stop
= bareudp_stop
,
502 .ndo_start_xmit
= bareudp_xmit
,
503 .ndo_get_stats64
= ip_tunnel_get_stats64
,
504 .ndo_fill_metadata_dst
= bareudp_fill_metadata_dst
,
507 static const struct nla_policy bareudp_policy
[IFLA_BAREUDP_MAX
+ 1] = {
508 [IFLA_BAREUDP_PORT
] = { .type
= NLA_U16
},
509 [IFLA_BAREUDP_ETHERTYPE
] = { .type
= NLA_U16
},
510 [IFLA_BAREUDP_SRCPORT_MIN
] = { .type
= NLA_U16
},
511 [IFLA_BAREUDP_MULTIPROTO_MODE
] = { .type
= NLA_FLAG
},
514 /* Info for udev, that this is a virtual tunnel endpoint */
515 static struct device_type bareudp_type
= {
519 /* Initialize the device structure. */
520 static void bareudp_setup(struct net_device
*dev
)
522 dev
->netdev_ops
= &bareudp_netdev_ops
;
523 dev
->needs_free_netdev
= true;
524 SET_NETDEV_DEVTYPE(dev
, &bareudp_type
);
525 dev
->features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
;
526 dev
->features
|= NETIF_F_RXCSUM
;
527 dev
->features
|= NETIF_F_GSO_SOFTWARE
;
528 dev
->hw_features
|= NETIF_F_SG
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
;
529 dev
->hw_features
|= NETIF_F_GSO_SOFTWARE
;
530 dev
->hard_header_len
= 0;
532 dev
->mtu
= ETH_DATA_LEN
;
533 dev
->min_mtu
= IPV4_MIN_MTU
;
534 dev
->max_mtu
= IP_MAX_MTU
- BAREUDP_BASE_HLEN
;
535 dev
->type
= ARPHRD_NONE
;
537 dev
->priv_flags
|= IFF_NO_QUEUE
;
538 dev
->flags
= IFF_POINTOPOINT
| IFF_NOARP
| IFF_MULTICAST
;
541 static int bareudp_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
542 struct netlink_ext_ack
*extack
)
545 NL_SET_ERR_MSG(extack
,
546 "Not enough attributes provided to perform the operation");
552 static int bareudp2info(struct nlattr
*data
[], struct bareudp_conf
*conf
,
553 struct netlink_ext_ack
*extack
)
555 memset(conf
, 0, sizeof(*conf
));
557 if (!data
[IFLA_BAREUDP_PORT
]) {
558 NL_SET_ERR_MSG(extack
, "port not specified");
561 if (!data
[IFLA_BAREUDP_ETHERTYPE
]) {
562 NL_SET_ERR_MSG(extack
, "ethertype not specified");
566 if (data
[IFLA_BAREUDP_PORT
])
567 conf
->port
= nla_get_u16(data
[IFLA_BAREUDP_PORT
]);
569 if (data
[IFLA_BAREUDP_ETHERTYPE
])
570 conf
->ethertype
= nla_get_u16(data
[IFLA_BAREUDP_ETHERTYPE
]);
572 if (data
[IFLA_BAREUDP_SRCPORT_MIN
])
573 conf
->sport_min
= nla_get_u16(data
[IFLA_BAREUDP_SRCPORT_MIN
]);
575 if (data
[IFLA_BAREUDP_MULTIPROTO_MODE
])
576 conf
->multi_proto_mode
= true;
581 static struct bareudp_dev
*bareudp_find_dev(struct bareudp_net
*bn
,
582 const struct bareudp_conf
*conf
)
584 struct bareudp_dev
*bareudp
, *t
= NULL
;
586 list_for_each_entry(bareudp
, &bn
->bareudp_list
, next
) {
587 if (conf
->port
== bareudp
->port
)
593 static int bareudp_configure(struct net
*net
, struct net_device
*dev
,
594 struct bareudp_conf
*conf
)
596 struct bareudp_net
*bn
= net_generic(net
, bareudp_net_id
);
597 struct bareudp_dev
*t
, *bareudp
= netdev_priv(dev
);
602 t
= bareudp_find_dev(bn
, conf
);
606 if (conf
->multi_proto_mode
&&
607 (conf
->ethertype
!= htons(ETH_P_MPLS_UC
) &&
608 conf
->ethertype
!= htons(ETH_P_IP
)))
611 bareudp
->port
= conf
->port
;
612 bareudp
->ethertype
= conf
->ethertype
;
613 bareudp
->sport_min
= conf
->sport_min
;
614 bareudp
->multi_proto_mode
= conf
->multi_proto_mode
;
615 err
= register_netdevice(dev
);
619 list_add(&bareudp
->next
, &bn
->bareudp_list
);
623 static int bareudp_link_config(struct net_device
*dev
,
629 err
= dev_set_mtu(dev
, nla_get_u32(tb
[IFLA_MTU
]));
636 static int bareudp_newlink(struct net
*net
, struct net_device
*dev
,
637 struct nlattr
*tb
[], struct nlattr
*data
[],
638 struct netlink_ext_ack
*extack
)
640 struct bareudp_conf conf
;
643 err
= bareudp2info(data
, &conf
, extack
);
647 err
= bareudp_configure(net
, dev
, &conf
);
651 err
= bareudp_link_config(dev
, tb
);
658 static void bareudp_dellink(struct net_device
*dev
, struct list_head
*head
)
660 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
662 list_del(&bareudp
->next
);
663 unregister_netdevice_queue(dev
, head
);
666 static size_t bareudp_get_size(const struct net_device
*dev
)
668 return nla_total_size(sizeof(__be16
)) + /* IFLA_BAREUDP_PORT */
669 nla_total_size(sizeof(__be16
)) + /* IFLA_BAREUDP_ETHERTYPE */
670 nla_total_size(sizeof(__u16
)) + /* IFLA_BAREUDP_SRCPORT_MIN */
671 nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */
675 static int bareudp_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
677 struct bareudp_dev
*bareudp
= netdev_priv(dev
);
679 if (nla_put_be16(skb
, IFLA_BAREUDP_PORT
, bareudp
->port
))
680 goto nla_put_failure
;
681 if (nla_put_be16(skb
, IFLA_BAREUDP_ETHERTYPE
, bareudp
->ethertype
))
682 goto nla_put_failure
;
683 if (nla_put_u16(skb
, IFLA_BAREUDP_SRCPORT_MIN
, bareudp
->sport_min
))
684 goto nla_put_failure
;
685 if (bareudp
->multi_proto_mode
&&
686 nla_put_flag(skb
, IFLA_BAREUDP_MULTIPROTO_MODE
))
687 goto nla_put_failure
;
695 static struct rtnl_link_ops bareudp_link_ops __read_mostly
= {
697 .maxtype
= IFLA_BAREUDP_MAX
,
698 .policy
= bareudp_policy
,
699 .priv_size
= sizeof(struct bareudp_dev
),
700 .setup
= bareudp_setup
,
701 .validate
= bareudp_validate
,
702 .newlink
= bareudp_newlink
,
703 .dellink
= bareudp_dellink
,
704 .get_size
= bareudp_get_size
,
705 .fill_info
= bareudp_fill_info
,
708 struct net_device
*bareudp_dev_create(struct net
*net
, const char *name
,
710 struct bareudp_conf
*conf
)
712 struct nlattr
*tb
[IFLA_MAX
+ 1];
713 struct net_device
*dev
;
714 LIST_HEAD(list_kill
);
717 memset(tb
, 0, sizeof(tb
));
718 dev
= rtnl_create_link(net
, name
, name_assign_type
,
719 &bareudp_link_ops
, tb
, NULL
);
723 err
= bareudp_configure(net
, dev
, conf
);
728 err
= dev_set_mtu(dev
, IP_MAX_MTU
- BAREUDP_BASE_HLEN
);
732 err
= rtnl_configure_link(dev
, NULL
);
738 bareudp_dellink(dev
, &list_kill
);
739 unregister_netdevice_many(&list_kill
);
742 EXPORT_SYMBOL_GPL(bareudp_dev_create
);
744 static __net_init
int bareudp_init_net(struct net
*net
)
746 struct bareudp_net
*bn
= net_generic(net
, bareudp_net_id
);
748 INIT_LIST_HEAD(&bn
->bareudp_list
);
752 static void bareudp_destroy_tunnels(struct net
*net
, struct list_head
*head
)
754 struct bareudp_net
*bn
= net_generic(net
, bareudp_net_id
);
755 struct bareudp_dev
*bareudp
, *next
;
757 list_for_each_entry_safe(bareudp
, next
, &bn
->bareudp_list
, next
)
758 unregister_netdevice_queue(bareudp
->dev
, head
);
761 static void __net_exit
bareudp_exit_batch_net(struct list_head
*net_list
)
767 list_for_each_entry(net
, net_list
, exit_list
)
768 bareudp_destroy_tunnels(net
, &list
);
770 /* unregister the devices gathered above */
771 unregister_netdevice_many(&list
);
775 static struct pernet_operations bareudp_net_ops
= {
776 .init
= bareudp_init_net
,
777 .exit_batch
= bareudp_exit_batch_net
,
778 .id
= &bareudp_net_id
,
779 .size
= sizeof(struct bareudp_net
),
782 static int __init
bareudp_init_module(void)
786 rc
= register_pernet_subsys(&bareudp_net_ops
);
790 rc
= rtnl_link_register(&bareudp_link_ops
);
796 unregister_pernet_subsys(&bareudp_net_ops
);
800 late_initcall(bareudp_init_module
);
802 static void __exit
bareudp_cleanup_module(void)
804 rtnl_link_unregister(&bareudp_link_ops
);
805 unregister_pernet_subsys(&bareudp_net_ops
);
807 module_exit(bareudp_cleanup_module
);
809 MODULE_ALIAS_RTNL_LINK("bareudp");
810 MODULE_LICENSE("GPL");
811 MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>");
812 MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");