1 // SPDX-License-Identifier: GPL-2.0
3 * XFRM virtual interface
5 * Copyright (C) 2018 secunet Security Networks AG
8 * Steffen Klassert <steffen.klassert@secunet.com>
11 #include <linux/module.h>
12 #include <linux/capability.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/sockios.h>
16 #include <linux/icmp.h>
20 #include <linux/net.h>
21 #include <linux/in6.h>
22 #include <linux/netdevice.h>
23 #include <linux/if_link.h>
24 #include <linux/if_arp.h>
25 #include <linux/icmpv6.h>
26 #include <linux/init.h>
27 #include <linux/route.h>
28 #include <linux/rtnetlink.h>
29 #include <linux/netfilter_ipv6.h>
30 #include <linux/slab.h>
31 #include <linux/hash.h>
33 #include <linux/uaccess.h>
34 #include <linux/atomic.h>
39 #include <net/ip6_route.h>
40 #include <net/addrconf.h>
42 #include <net/net_namespace.h>
43 #include <net/netns/generic.h>
44 #include <linux/etherdevice.h>
46 static int xfrmi_dev_init(struct net_device
*dev
);
47 static void xfrmi_dev_setup(struct net_device
*dev
);
48 static struct rtnl_link_ops xfrmi_link_ops __read_mostly
;
49 static unsigned int xfrmi_net_id __read_mostly
;
52 /* lists for storing interfaces in use */
53 struct xfrm_if __rcu
*xfrmi
[1];
56 #define for_each_xfrmi_rcu(start, xi) \
57 for (xi = rcu_dereference(start); xi; xi = rcu_dereference(xi->next))
59 static struct xfrm_if
*xfrmi_lookup(struct net
*net
, struct xfrm_state
*x
)
61 struct xfrmi_net
*xfrmn
= net_generic(net
, xfrmi_net_id
);
64 for_each_xfrmi_rcu(xfrmn
->xfrmi
[0], xi
) {
65 if (x
->if_id
== xi
->p
.if_id
&&
66 (xi
->dev
->flags
& IFF_UP
))
73 static struct xfrm_if
*xfrmi_decode_session(struct sk_buff
*skb
,
74 unsigned short family
)
76 struct xfrmi_net
*xfrmn
;
80 if (!secpath_exists(skb
) || !skb
->dev
)
85 ifindex
= inet6_sdif(skb
);
88 ifindex
= inet_sdif(skb
);
92 ifindex
= skb
->dev
->ifindex
;
94 xfrmn
= net_generic(xs_net(xfrm_input_state(skb
)), xfrmi_net_id
);
96 for_each_xfrmi_rcu(xfrmn
->xfrmi
[0], xi
) {
97 if (ifindex
== xi
->dev
->ifindex
&&
98 (xi
->dev
->flags
& IFF_UP
))
105 static void xfrmi_link(struct xfrmi_net
*xfrmn
, struct xfrm_if
*xi
)
107 struct xfrm_if __rcu
**xip
= &xfrmn
->xfrmi
[0];
109 rcu_assign_pointer(xi
->next
, rtnl_dereference(*xip
));
110 rcu_assign_pointer(*xip
, xi
);
113 static void xfrmi_unlink(struct xfrmi_net
*xfrmn
, struct xfrm_if
*xi
)
115 struct xfrm_if __rcu
**xip
;
116 struct xfrm_if
*iter
;
118 for (xip
= &xfrmn
->xfrmi
[0];
119 (iter
= rtnl_dereference(*xip
)) != NULL
;
122 rcu_assign_pointer(*xip
, xi
->next
);
128 static void xfrmi_dev_free(struct net_device
*dev
)
130 struct xfrm_if
*xi
= netdev_priv(dev
);
132 gro_cells_destroy(&xi
->gro_cells
);
133 free_percpu(dev
->tstats
);
136 static int xfrmi_create(struct net_device
*dev
)
138 struct xfrm_if
*xi
= netdev_priv(dev
);
139 struct net
*net
= dev_net(dev
);
140 struct xfrmi_net
*xfrmn
= net_generic(net
, xfrmi_net_id
);
143 dev
->rtnl_link_ops
= &xfrmi_link_ops
;
144 err
= register_netdevice(dev
);
149 xfrmi_link(xfrmn
, xi
);
157 static struct xfrm_if
*xfrmi_locate(struct net
*net
, struct xfrm_if_parms
*p
)
159 struct xfrm_if __rcu
**xip
;
161 struct xfrmi_net
*xfrmn
= net_generic(net
, xfrmi_net_id
);
163 for (xip
= &xfrmn
->xfrmi
[0];
164 (xi
= rtnl_dereference(*xip
)) != NULL
;
166 if (xi
->p
.if_id
== p
->if_id
)
172 static void xfrmi_dev_uninit(struct net_device
*dev
)
174 struct xfrm_if
*xi
= netdev_priv(dev
);
175 struct xfrmi_net
*xfrmn
= net_generic(xi
->net
, xfrmi_net_id
);
177 xfrmi_unlink(xfrmn
, xi
);
181 static void xfrmi_scrub_packet(struct sk_buff
*skb
, bool xnet
)
184 skb
->pkt_type
= PACKET_HOST
;
200 static int xfrmi_rcv_cb(struct sk_buff
*skb
, int err
)
202 const struct xfrm_mode
*inner_mode
;
203 struct pcpu_sw_netstats
*tstats
;
204 struct net_device
*dev
;
205 struct xfrm_state
*x
;
209 if (err
&& !secpath_exists(skb
))
212 x
= xfrm_input_state(skb
);
214 xi
= xfrmi_lookup(xs_net(x
), x
);
222 dev
->stats
.rx_errors
++;
223 dev
->stats
.rx_dropped
++;
228 xnet
= !net_eq(xi
->net
, dev_net(skb
->dev
));
231 inner_mode
= &x
->inner_mode
;
233 if (x
->sel
.family
== AF_UNSPEC
) {
234 inner_mode
= xfrm_ip2inner_mode(x
, XFRM_MODE_SKB_CB(skb
)->protocol
);
235 if (inner_mode
== NULL
) {
236 XFRM_INC_STATS(dev_net(skb
->dev
),
237 LINUX_MIB_XFRMINSTATEMODEERROR
);
242 if (!xfrm_policy_check(NULL
, XFRM_POLICY_IN
, skb
,
247 xfrmi_scrub_packet(skb
, xnet
);
249 tstats
= this_cpu_ptr(dev
->tstats
);
251 u64_stats_update_begin(&tstats
->syncp
);
252 tstats
->rx_packets
++;
253 tstats
->rx_bytes
+= skb
->len
;
254 u64_stats_update_end(&tstats
->syncp
);
260 xfrmi_xmit2(struct sk_buff
*skb
, struct net_device
*dev
, struct flowi
*fl
)
262 struct xfrm_if
*xi
= netdev_priv(dev
);
263 struct net_device_stats
*stats
= &xi
->dev
->stats
;
264 struct dst_entry
*dst
= skb_dst(skb
);
265 unsigned int length
= skb
->len
;
266 struct net_device
*tdev
;
267 struct xfrm_state
*x
;
272 dst
= xfrm_lookup_with_ifid(xi
->net
, dst
, fl
, NULL
, 0, xi
->p
.if_id
);
276 goto tx_err_link_failure
;
281 goto tx_err_link_failure
;
283 if (x
->if_id
!= xi
->p
.if_id
)
284 goto tx_err_link_failure
;
290 net_warn_ratelimited("%s: Local routing loop detected!\n",
292 goto tx_err_dst_release
;
296 if (!skb
->ignore_df
&& skb
->len
> mtu
) {
297 skb_dst_update_pmtu_no_confirm(skb
, mtu
);
299 if (skb
->protocol
== htons(ETH_P_IPV6
)) {
300 if (mtu
< IPV6_MIN_MTU
)
303 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
305 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
313 xfrmi_scrub_packet(skb
, !net_eq(xi
->net
, dev_net(dev
)));
314 skb_dst_set(skb
, dst
);
317 err
= dst_output(xi
->net
, skb
->sk
, skb
);
318 if (net_xmit_eval(err
) == 0) {
319 struct pcpu_sw_netstats
*tstats
= this_cpu_ptr(dev
->tstats
);
321 u64_stats_update_begin(&tstats
->syncp
);
322 tstats
->tx_bytes
+= length
;
323 tstats
->tx_packets
++;
324 u64_stats_update_end(&tstats
->syncp
);
327 stats
->tx_aborted_errors
++;
332 stats
->tx_carrier_errors
++;
333 dst_link_failure(skb
);
339 static netdev_tx_t
xfrmi_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
341 struct xfrm_if
*xi
= netdev_priv(dev
);
342 struct net_device_stats
*stats
= &xi
->dev
->stats
;
343 struct dst_entry
*dst
= skb_dst(skb
);
347 memset(&fl
, 0, sizeof(fl
));
349 switch (skb
->protocol
) {
350 case htons(ETH_P_IPV6
):
351 xfrm_decode_session(skb
, &fl
, AF_INET6
);
352 memset(IP6CB(skb
), 0, sizeof(*IP6CB(skb
)));
354 fl
.u
.ip6
.flowi6_oif
= dev
->ifindex
;
355 fl
.u
.ip6
.flowi6_flags
|= FLOWI_FLAG_ANYSRC
;
356 dst
= ip6_route_output(dev_net(dev
), NULL
, &fl
.u
.ip6
);
359 stats
->tx_carrier_errors
++;
362 skb_dst_set(skb
, dst
);
365 case htons(ETH_P_IP
):
366 xfrm_decode_session(skb
, &fl
, AF_INET
);
367 memset(IPCB(skb
), 0, sizeof(*IPCB(skb
)));
371 fl
.u
.ip4
.flowi4_oif
= dev
->ifindex
;
372 fl
.u
.ip4
.flowi4_flags
|= FLOWI_FLAG_ANYSRC
;
373 rt
= __ip_route_output_key(dev_net(dev
), &fl
.u
.ip4
);
375 stats
->tx_carrier_errors
++;
378 skb_dst_set(skb
, &rt
->dst
);
385 fl
.flowi_oif
= xi
->p
.link
;
387 ret
= xfrmi_xmit2(skb
, dev
, &fl
);
400 static int xfrmi4_err(struct sk_buff
*skb
, u32 info
)
402 const struct iphdr
*iph
= (const struct iphdr
*)skb
->data
;
403 struct net
*net
= dev_net(skb
->dev
);
404 int protocol
= iph
->protocol
;
405 struct ip_comp_hdr
*ipch
;
406 struct ip_esp_hdr
*esph
;
407 struct ip_auth_hdr
*ah
;
408 struct xfrm_state
*x
;
414 esph
= (struct ip_esp_hdr
*)(skb
->data
+(iph
->ihl
<<2));
418 ah
= (struct ip_auth_hdr
*)(skb
->data
+(iph
->ihl
<<2));
422 ipch
= (struct ip_comp_hdr
*)(skb
->data
+(iph
->ihl
<<2));
423 spi
= htonl(ntohs(ipch
->cpi
));
429 switch (icmp_hdr(skb
)->type
) {
430 case ICMP_DEST_UNREACH
:
431 if (icmp_hdr(skb
)->code
!= ICMP_FRAG_NEEDED
)
439 x
= xfrm_state_lookup(net
, skb
->mark
, (const xfrm_address_t
*)&iph
->daddr
,
440 spi
, protocol
, AF_INET
);
444 xi
= xfrmi_lookup(net
, x
);
450 if (icmp_hdr(skb
)->type
== ICMP_DEST_UNREACH
)
451 ipv4_update_pmtu(skb
, net
, info
, 0, protocol
);
453 ipv4_redirect(skb
, net
, 0, protocol
);
459 static int xfrmi6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
460 u8 type
, u8 code
, int offset
, __be32 info
)
462 const struct ipv6hdr
*iph
= (const struct ipv6hdr
*)skb
->data
;
463 struct net
*net
= dev_net(skb
->dev
);
464 int protocol
= iph
->nexthdr
;
465 struct ip_comp_hdr
*ipch
;
466 struct ip_esp_hdr
*esph
;
467 struct ip_auth_hdr
*ah
;
468 struct xfrm_state
*x
;
474 esph
= (struct ip_esp_hdr
*)(skb
->data
+ offset
);
478 ah
= (struct ip_auth_hdr
*)(skb
->data
+ offset
);
482 ipch
= (struct ip_comp_hdr
*)(skb
->data
+ offset
);
483 spi
= htonl(ntohs(ipch
->cpi
));
489 if (type
!= ICMPV6_PKT_TOOBIG
&&
490 type
!= NDISC_REDIRECT
)
493 x
= xfrm_state_lookup(net
, skb
->mark
, (const xfrm_address_t
*)&iph
->daddr
,
494 spi
, protocol
, AF_INET6
);
498 xi
= xfrmi_lookup(net
, x
);
504 if (type
== NDISC_REDIRECT
)
505 ip6_redirect(skb
, net
, skb
->dev
->ifindex
, 0,
506 sock_net_uid(net
, NULL
));
508 ip6_update_pmtu(skb
, net
, info
, 0, 0, sock_net_uid(net
, NULL
));
514 static int xfrmi_change(struct xfrm_if
*xi
, const struct xfrm_if_parms
*p
)
516 if (xi
->p
.link
!= p
->link
)
519 xi
->p
.if_id
= p
->if_id
;
524 static int xfrmi_update(struct xfrm_if
*xi
, struct xfrm_if_parms
*p
)
526 struct net
*net
= xi
->net
;
527 struct xfrmi_net
*xfrmn
= net_generic(net
, xfrmi_net_id
);
530 xfrmi_unlink(xfrmn
, xi
);
532 err
= xfrmi_change(xi
, p
);
533 xfrmi_link(xfrmn
, xi
);
534 netdev_state_change(xi
->dev
);
538 static void xfrmi_get_stats64(struct net_device
*dev
,
539 struct rtnl_link_stats64
*s
)
543 for_each_possible_cpu(cpu
) {
544 struct pcpu_sw_netstats
*stats
;
545 struct pcpu_sw_netstats tmp
;
548 stats
= per_cpu_ptr(dev
->tstats
, cpu
);
550 start
= u64_stats_fetch_begin_irq(&stats
->syncp
);
551 tmp
.rx_packets
= stats
->rx_packets
;
552 tmp
.rx_bytes
= stats
->rx_bytes
;
553 tmp
.tx_packets
= stats
->tx_packets
;
554 tmp
.tx_bytes
= stats
->tx_bytes
;
555 } while (u64_stats_fetch_retry_irq(&stats
->syncp
, start
));
557 s
->rx_packets
+= tmp
.rx_packets
;
558 s
->rx_bytes
+= tmp
.rx_bytes
;
559 s
->tx_packets
+= tmp
.tx_packets
;
560 s
->tx_bytes
+= tmp
.tx_bytes
;
563 s
->rx_dropped
= dev
->stats
.rx_dropped
;
564 s
->tx_dropped
= dev
->stats
.tx_dropped
;
567 static int xfrmi_get_iflink(const struct net_device
*dev
)
569 struct xfrm_if
*xi
= netdev_priv(dev
);
575 static const struct net_device_ops xfrmi_netdev_ops
= {
576 .ndo_init
= xfrmi_dev_init
,
577 .ndo_uninit
= xfrmi_dev_uninit
,
578 .ndo_start_xmit
= xfrmi_xmit
,
579 .ndo_get_stats64
= xfrmi_get_stats64
,
580 .ndo_get_iflink
= xfrmi_get_iflink
,
583 static void xfrmi_dev_setup(struct net_device
*dev
)
585 dev
->netdev_ops
= &xfrmi_netdev_ops
;
586 dev
->type
= ARPHRD_NONE
;
587 dev
->mtu
= ETH_DATA_LEN
;
588 dev
->min_mtu
= ETH_MIN_MTU
;
589 dev
->max_mtu
= IP_MAX_MTU
;
590 dev
->flags
= IFF_NOARP
;
591 dev
->needs_free_netdev
= true;
592 dev
->priv_destructor
= xfrmi_dev_free
;
595 eth_broadcast_addr(dev
->broadcast
);
598 static int xfrmi_dev_init(struct net_device
*dev
)
600 struct xfrm_if
*xi
= netdev_priv(dev
);
601 struct net_device
*phydev
= __dev_get_by_index(xi
->net
, xi
->p
.link
);
604 dev
->tstats
= netdev_alloc_pcpu_stats(struct pcpu_sw_netstats
);
608 err
= gro_cells_init(&xi
->gro_cells
, dev
);
610 free_percpu(dev
->tstats
);
614 dev
->features
|= NETIF_F_LLTX
;
617 dev
->needed_headroom
= phydev
->needed_headroom
;
618 dev
->needed_tailroom
= phydev
->needed_tailroom
;
620 if (is_zero_ether_addr(dev
->dev_addr
))
621 eth_hw_addr_inherit(dev
, phydev
);
622 if (is_zero_ether_addr(dev
->broadcast
))
623 memcpy(dev
->broadcast
, phydev
->broadcast
,
626 eth_hw_addr_random(dev
);
627 eth_broadcast_addr(dev
->broadcast
);
633 static int xfrmi_validate(struct nlattr
*tb
[], struct nlattr
*data
[],
634 struct netlink_ext_ack
*extack
)
639 static void xfrmi_netlink_parms(struct nlattr
*data
[],
640 struct xfrm_if_parms
*parms
)
642 memset(parms
, 0, sizeof(*parms
));
647 if (data
[IFLA_XFRM_LINK
])
648 parms
->link
= nla_get_u32(data
[IFLA_XFRM_LINK
]);
650 if (data
[IFLA_XFRM_IF_ID
])
651 parms
->if_id
= nla_get_u32(data
[IFLA_XFRM_IF_ID
]);
654 static int xfrmi_newlink(struct net
*src_net
, struct net_device
*dev
,
655 struct nlattr
*tb
[], struct nlattr
*data
[],
656 struct netlink_ext_ack
*extack
)
658 struct net
*net
= dev_net(dev
);
659 struct xfrm_if_parms p
;
663 xfrmi_netlink_parms(data
, &p
);
664 xi
= xfrmi_locate(net
, &p
);
668 xi
= netdev_priv(dev
);
673 err
= xfrmi_create(dev
);
677 static void xfrmi_dellink(struct net_device
*dev
, struct list_head
*head
)
679 unregister_netdevice_queue(dev
, head
);
682 static int xfrmi_changelink(struct net_device
*dev
, struct nlattr
*tb
[],
683 struct nlattr
*data
[],
684 struct netlink_ext_ack
*extack
)
686 struct xfrm_if
*xi
= netdev_priv(dev
);
687 struct net
*net
= xi
->net
;
688 struct xfrm_if_parms p
;
690 xfrmi_netlink_parms(data
, &p
);
691 xi
= xfrmi_locate(net
, &p
);
693 xi
= netdev_priv(dev
);
699 return xfrmi_update(xi
, &p
);
702 static size_t xfrmi_get_size(const struct net_device
*dev
)
707 /* IFLA_XFRM_IF_ID */
712 static int xfrmi_fill_info(struct sk_buff
*skb
, const struct net_device
*dev
)
714 struct xfrm_if
*xi
= netdev_priv(dev
);
715 struct xfrm_if_parms
*parm
= &xi
->p
;
717 if (nla_put_u32(skb
, IFLA_XFRM_LINK
, parm
->link
) ||
718 nla_put_u32(skb
, IFLA_XFRM_IF_ID
, parm
->if_id
))
719 goto nla_put_failure
;
726 static struct net
*xfrmi_get_link_net(const struct net_device
*dev
)
728 struct xfrm_if
*xi
= netdev_priv(dev
);
733 static const struct nla_policy xfrmi_policy
[IFLA_XFRM_MAX
+ 1] = {
734 [IFLA_XFRM_LINK
] = { .type
= NLA_U32
},
735 [IFLA_XFRM_IF_ID
] = { .type
= NLA_U32
},
738 static struct rtnl_link_ops xfrmi_link_ops __read_mostly
= {
740 .maxtype
= IFLA_XFRM_MAX
,
741 .policy
= xfrmi_policy
,
742 .priv_size
= sizeof(struct xfrm_if
),
743 .setup
= xfrmi_dev_setup
,
744 .validate
= xfrmi_validate
,
745 .newlink
= xfrmi_newlink
,
746 .dellink
= xfrmi_dellink
,
747 .changelink
= xfrmi_changelink
,
748 .get_size
= xfrmi_get_size
,
749 .fill_info
= xfrmi_fill_info
,
750 .get_link_net
= xfrmi_get_link_net
,
753 static struct pernet_operations xfrmi_net_ops
= {
755 .size
= sizeof(struct xfrmi_net
),
758 static struct xfrm6_protocol xfrmi_esp6_protocol __read_mostly
= {
759 .handler
= xfrm6_rcv
,
760 .cb_handler
= xfrmi_rcv_cb
,
761 .err_handler
= xfrmi6_err
,
765 static struct xfrm6_protocol xfrmi_ah6_protocol __read_mostly
= {
766 .handler
= xfrm6_rcv
,
767 .cb_handler
= xfrmi_rcv_cb
,
768 .err_handler
= xfrmi6_err
,
772 static struct xfrm6_protocol xfrmi_ipcomp6_protocol __read_mostly
= {
773 .handler
= xfrm6_rcv
,
774 .cb_handler
= xfrmi_rcv_cb
,
775 .err_handler
= xfrmi6_err
,
779 static struct xfrm4_protocol xfrmi_esp4_protocol __read_mostly
= {
780 .handler
= xfrm4_rcv
,
781 .input_handler
= xfrm_input
,
782 .cb_handler
= xfrmi_rcv_cb
,
783 .err_handler
= xfrmi4_err
,
787 static struct xfrm4_protocol xfrmi_ah4_protocol __read_mostly
= {
788 .handler
= xfrm4_rcv
,
789 .input_handler
= xfrm_input
,
790 .cb_handler
= xfrmi_rcv_cb
,
791 .err_handler
= xfrmi4_err
,
795 static struct xfrm4_protocol xfrmi_ipcomp4_protocol __read_mostly
= {
796 .handler
= xfrm4_rcv
,
797 .input_handler
= xfrm_input
,
798 .cb_handler
= xfrmi_rcv_cb
,
799 .err_handler
= xfrmi4_err
,
803 static int __init
xfrmi4_init(void)
807 err
= xfrm4_protocol_register(&xfrmi_esp4_protocol
, IPPROTO_ESP
);
809 goto xfrm_proto_esp_failed
;
810 err
= xfrm4_protocol_register(&xfrmi_ah4_protocol
, IPPROTO_AH
);
812 goto xfrm_proto_ah_failed
;
813 err
= xfrm4_protocol_register(&xfrmi_ipcomp4_protocol
, IPPROTO_COMP
);
815 goto xfrm_proto_comp_failed
;
819 xfrm_proto_comp_failed
:
820 xfrm4_protocol_deregister(&xfrmi_ah4_protocol
, IPPROTO_AH
);
821 xfrm_proto_ah_failed
:
822 xfrm4_protocol_deregister(&xfrmi_esp4_protocol
, IPPROTO_ESP
);
823 xfrm_proto_esp_failed
:
827 static void xfrmi4_fini(void)
829 xfrm4_protocol_deregister(&xfrmi_ipcomp4_protocol
, IPPROTO_COMP
);
830 xfrm4_protocol_deregister(&xfrmi_ah4_protocol
, IPPROTO_AH
);
831 xfrm4_protocol_deregister(&xfrmi_esp4_protocol
, IPPROTO_ESP
);
834 static int __init
xfrmi6_init(void)
838 err
= xfrm6_protocol_register(&xfrmi_esp6_protocol
, IPPROTO_ESP
);
840 goto xfrm_proto_esp_failed
;
841 err
= xfrm6_protocol_register(&xfrmi_ah6_protocol
, IPPROTO_AH
);
843 goto xfrm_proto_ah_failed
;
844 err
= xfrm6_protocol_register(&xfrmi_ipcomp6_protocol
, IPPROTO_COMP
);
846 goto xfrm_proto_comp_failed
;
850 xfrm_proto_comp_failed
:
851 xfrm6_protocol_deregister(&xfrmi_ah6_protocol
, IPPROTO_AH
);
852 xfrm_proto_ah_failed
:
853 xfrm6_protocol_deregister(&xfrmi_esp6_protocol
, IPPROTO_ESP
);
854 xfrm_proto_esp_failed
:
858 static void xfrmi6_fini(void)
860 xfrm6_protocol_deregister(&xfrmi_ipcomp6_protocol
, IPPROTO_COMP
);
861 xfrm6_protocol_deregister(&xfrmi_ah6_protocol
, IPPROTO_AH
);
862 xfrm6_protocol_deregister(&xfrmi_esp6_protocol
, IPPROTO_ESP
);
865 static const struct xfrm_if_cb xfrm_if_cb
= {
866 .decode_session
= xfrmi_decode_session
,
869 static int __init
xfrmi_init(void)
874 pr_info("IPsec XFRM device driver\n");
876 msg
= "tunnel device";
877 err
= register_pernet_device(&xfrmi_net_ops
);
879 goto pernet_dev_failed
;
881 msg
= "xfrm4 protocols";
886 msg
= "xfrm6 protocols";
892 msg
= "netlink interface";
893 err
= rtnl_link_register(&xfrmi_link_ops
);
895 goto rtnl_link_failed
;
897 xfrm_if_register_cb(&xfrm_if_cb
);
906 unregister_pernet_device(&xfrmi_net_ops
);
908 pr_err("xfrmi init: failed to register %s\n", msg
);
912 static void __exit
xfrmi_fini(void)
914 xfrm_if_unregister_cb();
915 rtnl_link_unregister(&xfrmi_link_ops
);
918 unregister_pernet_device(&xfrmi_net_ops
);
921 module_init(xfrmi_init
);
922 module_exit(xfrmi_fini
);
923 MODULE_LICENSE("GPL");
924 MODULE_ALIAS_RTNL_LINK("xfrm");
925 MODULE_ALIAS_NETDEV("xfrm0");
926 MODULE_AUTHOR("Steffen Klassert");
927 MODULE_DESCRIPTION("XFRM virtual interface");