1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C)2002 USAGI/WIDE Project
7 * Mitsuru KANDA @USAGI : IPv6 Support
8 * Kazunori MIYAZAWA @USAGI :
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
11 * This file is derived from net/ipv4/ah.c.
14 #define pr_fmt(fmt) "IPv6: " fmt
16 #include <crypto/hash.h>
17 #include <crypto/utils.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
22 #include <linux/crypto.h>
23 #include <linux/pfkeyv2.h>
24 #include <linux/string.h>
25 #include <linux/scatterlist.h>
26 #include <net/ip6_route.h>
29 #include <net/protocol.h>
32 #define IPV6HDR_BASELEN 8
35 #if IS_ENABLED(CONFIG_IPV6_MIP6)
36 struct in6_addr saddr
;
38 struct in6_addr daddr
;
43 struct xfrm_skb_cb xfrm
;
47 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
49 static void *ah_alloc_tmp(struct crypto_ahash
*ahash
, int nfrags
,
54 len
= size
+ crypto_ahash_digestsize(ahash
);
56 len
= ALIGN(len
, crypto_tfm_ctx_alignment());
58 len
+= sizeof(struct ahash_request
) + crypto_ahash_reqsize(ahash
);
59 len
= ALIGN(len
, __alignof__(struct scatterlist
));
61 len
+= sizeof(struct scatterlist
) * nfrags
;
63 return kmalloc(len
, GFP_ATOMIC
);
66 static inline struct tmp_ext
*ah_tmp_ext(void *base
)
68 return base
+ IPV6HDR_BASELEN
;
71 static inline u8
*ah_tmp_auth(u8
*tmp
, unsigned int offset
)
76 static inline u8
*ah_tmp_icv(void *tmp
, unsigned int offset
)
81 static inline struct ahash_request
*ah_tmp_req(struct crypto_ahash
*ahash
,
84 struct ahash_request
*req
;
86 req
= (void *)PTR_ALIGN(icv
+ crypto_ahash_digestsize(ahash
),
87 crypto_tfm_ctx_alignment());
89 ahash_request_set_tfm(req
, ahash
);
94 static inline struct scatterlist
*ah_req_sg(struct crypto_ahash
*ahash
,
95 struct ahash_request
*req
)
97 return (void *)ALIGN((unsigned long)(req
+ 1) +
98 crypto_ahash_reqsize(ahash
),
99 __alignof__(struct scatterlist
));
102 static bool zero_out_mutable_opts(struct ipv6_opt_hdr
*opthdr
)
104 u8
*opt
= (u8
*)opthdr
;
105 int len
= ipv6_optlen(opthdr
);
122 optlen
= opt
[off
+1]+2;
126 memset(&opt
[off
+2], 0, opt
[off
+1]);
140 #if IS_ENABLED(CONFIG_IPV6_MIP6)
142 * ipv6_rearrange_destopt - rearrange IPv6 destination options header
144 * @destopt: destionation options header
146 static void ipv6_rearrange_destopt(struct ipv6hdr
*iph
, struct ipv6_opt_hdr
*destopt
)
148 u8
*opt
= (u8
*)destopt
;
149 int len
= ipv6_optlen(destopt
);
166 optlen
= opt
[off
+1]+2;
170 /* Rearrange the source address in @iph and the
171 * addresses in home address option for final source.
172 * See 11.3.2 of RFC 3775 for details.
174 if (opt
[off
] == IPV6_TLV_HAO
) {
175 struct ipv6_destopt_hao
*hao
;
177 hao
= (struct ipv6_destopt_hao
*)&opt
[off
];
178 if (hao
->length
!= sizeof(hao
->addr
)) {
179 net_warn_ratelimited("destopt hao: invalid header length: %u\n",
183 swap(hao
->addr
, iph
->saddr
);
191 /* Note: ok if len == 0 */
196 static void ipv6_rearrange_destopt(struct ipv6hdr
*iph
, struct ipv6_opt_hdr
*destopt
) {}
200 * ipv6_rearrange_rthdr - rearrange IPv6 routing header
202 * @rthdr: routing header
204 * Rearrange the destination address in @iph and the addresses in @rthdr
205 * so that they appear in the order they will at the final destination.
206 * See Appendix A2 of RFC 2402 for details.
208 static void ipv6_rearrange_rthdr(struct ipv6hdr
*iph
, struct ipv6_rt_hdr
*rthdr
)
210 int segments
, segments_left
;
211 struct in6_addr
*addrs
;
212 struct in6_addr final_addr
;
214 segments_left
= rthdr
->segments_left
;
215 if (segments_left
== 0)
217 rthdr
->segments_left
= 0;
219 /* The value of rthdr->hdrlen has been verified either by the system
220 * call if it is locally generated, or by ipv6_rthdr_rcv() for incoming
221 * packets. So we can assume that it is even and that segments is
222 * greater than or equal to segments_left.
224 * For the same reason we can assume that this option is of type 0.
226 segments
= rthdr
->hdrlen
>> 1;
228 addrs
= ((struct rt0_hdr
*)rthdr
)->addr
;
229 final_addr
= addrs
[segments
- 1];
231 addrs
+= segments
- segments_left
;
232 memmove(addrs
+ 1, addrs
, (segments_left
- 1) * sizeof(*addrs
));
234 addrs
[0] = iph
->daddr
;
235 iph
->daddr
= final_addr
;
238 static int ipv6_clear_mutable_options(struct ipv6hdr
*iph
, int len
, int dir
)
242 struct ipv6_opt_hdr
*opth
;
243 struct ipv6_rt_hdr
*rth
;
245 } exthdr
= { .iph
= iph
};
246 char *end
= exthdr
.raw
+ len
;
247 int nexthdr
= iph
->nexthdr
;
251 while (exthdr
.raw
< end
) {
254 if (dir
== XFRM_POLICY_OUT
)
255 ipv6_rearrange_destopt(iph
, exthdr
.opth
);
258 if (!zero_out_mutable_opts(exthdr
.opth
)) {
259 net_dbg_ratelimited("overrun %sopts\n",
260 nexthdr
== NEXTHDR_HOP
?
266 case NEXTHDR_ROUTING
:
267 ipv6_rearrange_rthdr(iph
, exthdr
.rth
);
274 nexthdr
= exthdr
.opth
->nexthdr
;
275 exthdr
.raw
+= ipv6_optlen(exthdr
.opth
);
281 static void ah6_output_done(void *data
, int err
)
286 struct sk_buff
*skb
= data
;
287 struct xfrm_state
*x
= skb_dst(skb
)->xfrm
;
288 struct ah_data
*ahp
= x
->data
;
289 struct ipv6hdr
*top_iph
= ipv6_hdr(skb
);
290 struct ip_auth_hdr
*ah
= ip_auth_hdr(skb
);
291 struct tmp_ext
*iph_ext
;
293 extlen
= skb_network_header_len(skb
) - sizeof(struct ipv6hdr
);
295 extlen
+= sizeof(*iph_ext
);
297 iph_base
= AH_SKB_CB(skb
)->tmp
;
298 iph_ext
= ah_tmp_ext(iph_base
);
299 icv
= ah_tmp_icv(iph_ext
, extlen
);
301 memcpy(ah
->auth_data
, icv
, ahp
->icv_trunc_len
);
302 memcpy(top_iph
, iph_base
, IPV6HDR_BASELEN
);
305 #if IS_ENABLED(CONFIG_IPV6_MIP6)
306 memcpy(&top_iph
->saddr
, iph_ext
, extlen
);
308 memcpy(&top_iph
->daddr
, iph_ext
, extlen
);
312 kfree(AH_SKB_CB(skb
)->tmp
);
313 xfrm_output_resume(skb
->sk
, skb
, err
);
316 static int ah6_output(struct xfrm_state
*x
, struct sk_buff
*skb
)
324 struct sk_buff
*trailer
;
325 struct crypto_ahash
*ahash
;
326 struct ahash_request
*req
;
327 struct scatterlist
*sg
;
328 struct ipv6hdr
*top_iph
;
329 struct ip_auth_hdr
*ah
;
331 struct tmp_ext
*iph_ext
;
335 struct scatterlist
*seqhisg
;
340 err
= skb_cow_data(skb
, 0, &trailer
);
345 skb_push(skb
, -skb_network_offset(skb
));
346 extlen
= skb_network_header_len(skb
) - sizeof(struct ipv6hdr
);
348 extlen
+= sizeof(*iph_ext
);
350 if (x
->props
.flags
& XFRM_STATE_ESN
) {
352 seqhi_len
= sizeof(*seqhi
);
355 iph_base
= ah_alloc_tmp(ahash
, nfrags
+ sglists
, IPV6HDR_BASELEN
+
360 iph_ext
= ah_tmp_ext(iph_base
);
361 seqhi
= (__be32
*)((char *)iph_ext
+ extlen
);
362 icv
= ah_tmp_icv(seqhi
, seqhi_len
);
363 req
= ah_tmp_req(ahash
, icv
);
364 sg
= ah_req_sg(ahash
, req
);
365 seqhisg
= sg
+ nfrags
;
367 ah
= ip_auth_hdr(skb
);
368 memset(ah
->auth_data
, 0, ahp
->icv_trunc_len
);
370 top_iph
= ipv6_hdr(skb
);
371 top_iph
->payload_len
= htons(skb
->len
- sizeof(*top_iph
));
373 nexthdr
= *skb_mac_header(skb
);
374 *skb_mac_header(skb
) = IPPROTO_AH
;
376 /* When there are no extension headers, we only need to save the first
377 * 8 bytes of the base IP header.
379 memcpy(iph_base
, top_iph
, IPV6HDR_BASELEN
);
382 #if IS_ENABLED(CONFIG_IPV6_MIP6)
383 memcpy(iph_ext
, &top_iph
->saddr
, extlen
);
385 memcpy(iph_ext
, &top_iph
->daddr
, extlen
);
387 err
= ipv6_clear_mutable_options(top_iph
,
388 extlen
- sizeof(*iph_ext
) +
395 ah
->nexthdr
= nexthdr
;
397 top_iph
->priority
= 0;
398 top_iph
->flow_lbl
[0] = 0;
399 top_iph
->flow_lbl
[1] = 0;
400 top_iph
->flow_lbl
[2] = 0;
401 top_iph
->hop_limit
= 0;
403 ah
->hdrlen
= (XFRM_ALIGN8(sizeof(*ah
) + ahp
->icv_trunc_len
) >> 2) - 2;
407 ah
->seq_no
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.low
);
409 sg_init_table(sg
, nfrags
+ sglists
);
410 err
= skb_to_sgvec_nomark(skb
, sg
, 0, skb
->len
);
411 if (unlikely(err
< 0))
414 if (x
->props
.flags
& XFRM_STATE_ESN
) {
415 /* Attach seqhi sg right after packet payload */
416 *seqhi
= htonl(XFRM_SKB_CB(skb
)->seq
.output
.hi
);
417 sg_set_buf(seqhisg
, seqhi
, seqhi_len
);
419 ahash_request_set_crypt(req
, sg
, icv
, skb
->len
+ seqhi_len
);
420 ahash_request_set_callback(req
, 0, ah6_output_done
, skb
);
422 AH_SKB_CB(skb
)->tmp
= iph_base
;
424 err
= crypto_ahash_digest(req
);
426 if (err
== -EINPROGRESS
)
434 memcpy(ah
->auth_data
, icv
, ahp
->icv_trunc_len
);
435 memcpy(top_iph
, iph_base
, IPV6HDR_BASELEN
);
438 #if IS_ENABLED(CONFIG_IPV6_MIP6)
439 memcpy(&top_iph
->saddr
, iph_ext
, extlen
);
441 memcpy(&top_iph
->daddr
, iph_ext
, extlen
);
451 static void ah6_input_done(void *data
, int err
)
456 struct sk_buff
*skb
= data
;
457 struct xfrm_state
*x
= xfrm_input_state(skb
);
458 struct ah_data
*ahp
= x
->data
;
459 struct ip_auth_hdr
*ah
= ip_auth_hdr(skb
);
460 int hdr_len
= skb_network_header_len(skb
);
461 int ah_hlen
= ipv6_authlen(ah
);
466 work_iph
= AH_SKB_CB(skb
)->tmp
;
467 auth_data
= ah_tmp_auth(work_iph
, hdr_len
);
468 icv
= ah_tmp_icv(auth_data
, ahp
->icv_trunc_len
);
470 err
= crypto_memneq(icv
, auth_data
, ahp
->icv_trunc_len
) ? -EBADMSG
: 0;
476 skb
->network_header
+= ah_hlen
;
477 memcpy(skb_network_header(skb
), work_iph
, hdr_len
);
478 __skb_pull(skb
, ah_hlen
+ hdr_len
);
479 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
480 skb_reset_transport_header(skb
);
482 skb_set_transport_header(skb
, -hdr_len
);
484 kfree(AH_SKB_CB(skb
)->tmp
);
485 xfrm_input_resume(skb
, err
);
490 static int ah6_input(struct xfrm_state
*x
, struct sk_buff
*skb
)
494 * [IPv6][Ext1][Ext2][AH][Dest][Payload]
495 * |<-------------->| hdr_len
498 * Keeping copy of cleared headers. After AH processing,
499 * Moving the pointer of skb->network_header by using skb_pull as long
500 * as AH header length. Then copy back the copy as long as hdr_len
501 * If destination header following AH exists, copy it into after [Ext2].
503 * |<>|[IPv6][Ext1][Ext2][Dest][Payload]
504 * There is offset of AH before IPv6 header after the process.
510 struct sk_buff
*trailer
;
511 struct crypto_ahash
*ahash
;
512 struct ahash_request
*req
;
513 struct scatterlist
*sg
;
514 struct ip_auth_hdr
*ah
;
515 struct ipv6hdr
*ip6h
;
525 struct scatterlist
*seqhisg
;
527 if (!pskb_may_pull(skb
, sizeof(struct ip_auth_hdr
)))
530 /* We are going to _remove_ AH header to keep sockets happy,
531 * so... Later this can change. */
532 if (skb_unclone(skb
, GFP_ATOMIC
))
535 skb
->ip_summed
= CHECKSUM_NONE
;
537 hdr_len
= skb_network_header_len(skb
);
538 ah
= (struct ip_auth_hdr
*)skb
->data
;
542 nexthdr
= ah
->nexthdr
;
543 ah_hlen
= ipv6_authlen(ah
);
545 if (ah_hlen
!= XFRM_ALIGN8(sizeof(*ah
) + ahp
->icv_full_len
) &&
546 ah_hlen
!= XFRM_ALIGN8(sizeof(*ah
) + ahp
->icv_trunc_len
))
549 if (!pskb_may_pull(skb
, ah_hlen
))
552 err
= skb_cow_data(skb
, 0, &trailer
);
557 ah
= (struct ip_auth_hdr
*)skb
->data
;
558 ip6h
= ipv6_hdr(skb
);
560 skb_push(skb
, hdr_len
);
562 if (x
->props
.flags
& XFRM_STATE_ESN
) {
564 seqhi_len
= sizeof(*seqhi
);
567 work_iph
= ah_alloc_tmp(ahash
, nfrags
+ sglists
, hdr_len
+
568 ahp
->icv_trunc_len
+ seqhi_len
);
574 auth_data
= ah_tmp_auth((u8
*)work_iph
, hdr_len
);
575 seqhi
= (__be32
*)(auth_data
+ ahp
->icv_trunc_len
);
576 icv
= ah_tmp_icv(seqhi
, seqhi_len
);
577 req
= ah_tmp_req(ahash
, icv
);
578 sg
= ah_req_sg(ahash
, req
);
579 seqhisg
= sg
+ nfrags
;
581 memcpy(work_iph
, ip6h
, hdr_len
);
582 memcpy(auth_data
, ah
->auth_data
, ahp
->icv_trunc_len
);
583 memset(ah
->auth_data
, 0, ahp
->icv_trunc_len
);
585 err
= ipv6_clear_mutable_options(ip6h
, hdr_len
, XFRM_POLICY_IN
);
590 ip6h
->flow_lbl
[0] = 0;
591 ip6h
->flow_lbl
[1] = 0;
592 ip6h
->flow_lbl
[2] = 0;
595 sg_init_table(sg
, nfrags
+ sglists
);
596 err
= skb_to_sgvec_nomark(skb
, sg
, 0, skb
->len
);
597 if (unlikely(err
< 0))
600 if (x
->props
.flags
& XFRM_STATE_ESN
) {
601 /* Attach seqhi sg right after packet payload */
602 *seqhi
= XFRM_SKB_CB(skb
)->seq
.input
.hi
;
603 sg_set_buf(seqhisg
, seqhi
, seqhi_len
);
606 ahash_request_set_crypt(req
, sg
, icv
, skb
->len
+ seqhi_len
);
607 ahash_request_set_callback(req
, 0, ah6_input_done
, skb
);
609 AH_SKB_CB(skb
)->tmp
= work_iph
;
611 err
= crypto_ahash_digest(req
);
613 if (err
== -EINPROGRESS
)
619 err
= crypto_memneq(icv
, auth_data
, ahp
->icv_trunc_len
) ? -EBADMSG
: 0;
623 skb
->network_header
+= ah_hlen
;
624 memcpy(skb_network_header(skb
), work_iph
, hdr_len
);
625 __skb_pull(skb
, ah_hlen
+ hdr_len
);
627 if (x
->props
.mode
== XFRM_MODE_TUNNEL
)
628 skb_reset_transport_header(skb
);
630 skb_set_transport_header(skb
, -hdr_len
);
640 static int ah6_err(struct sk_buff
*skb
, struct inet6_skb_parm
*opt
,
641 u8 type
, u8 code
, int offset
, __be32 info
)
643 struct net
*net
= dev_net(skb
->dev
);
644 struct ipv6hdr
*iph
= (struct ipv6hdr
*)skb
->data
;
645 struct ip_auth_hdr
*ah
= (struct ip_auth_hdr
*)(skb
->data
+offset
);
646 struct xfrm_state
*x
;
648 if (type
!= ICMPV6_PKT_TOOBIG
&&
649 type
!= NDISC_REDIRECT
)
652 x
= xfrm_state_lookup(net
, skb
->mark
, (xfrm_address_t
*)&iph
->daddr
, ah
->spi
, IPPROTO_AH
, AF_INET6
);
656 if (type
== NDISC_REDIRECT
)
657 ip6_redirect(skb
, net
, skb
->dev
->ifindex
, 0,
658 sock_net_uid(net
, NULL
));
660 ip6_update_pmtu(skb
, net
, info
, 0, 0, sock_net_uid(net
, NULL
));
666 static int ah6_init_state(struct xfrm_state
*x
, struct netlink_ext_ack
*extack
)
668 struct ah_data
*ahp
= NULL
;
669 struct xfrm_algo_desc
*aalg_desc
;
670 struct crypto_ahash
*ahash
;
673 NL_SET_ERR_MSG(extack
, "AH requires a state with an AUTH algorithm");
678 NL_SET_ERR_MSG(extack
, "AH is not compatible with encapsulation");
682 ahp
= kzalloc(sizeof(*ahp
), GFP_KERNEL
);
686 ahash
= crypto_alloc_ahash(x
->aalg
->alg_name
, 0, 0);
688 NL_SET_ERR_MSG(extack
, "Kernel was unable to initialize cryptographic operations");
693 if (crypto_ahash_setkey(ahash
, x
->aalg
->alg_key
,
694 (x
->aalg
->alg_key_len
+ 7) / 8)) {
695 NL_SET_ERR_MSG(extack
, "Kernel was unable to initialize cryptographic operations");
700 * Lookup the algorithm description maintained by xfrm_algo,
701 * verify crypto transform properties, and store information
702 * we need for AH processing. This lookup cannot fail here
703 * after a successful crypto_alloc_hash().
705 aalg_desc
= xfrm_aalg_get_byname(x
->aalg
->alg_name
, 0);
708 if (aalg_desc
->uinfo
.auth
.icv_fullbits
/8 !=
709 crypto_ahash_digestsize(ahash
)) {
710 NL_SET_ERR_MSG(extack
, "Kernel was unable to initialize cryptographic operations");
714 ahp
->icv_full_len
= aalg_desc
->uinfo
.auth
.icv_fullbits
/8;
715 ahp
->icv_trunc_len
= x
->aalg
->alg_trunc_len
/8;
717 x
->props
.header_len
= XFRM_ALIGN8(sizeof(struct ip_auth_hdr
) +
719 switch (x
->props
.mode
) {
721 case XFRM_MODE_TRANSPORT
:
723 case XFRM_MODE_TUNNEL
:
724 x
->props
.header_len
+= sizeof(struct ipv6hdr
);
727 NL_SET_ERR_MSG(extack
, "Invalid mode requested for AH, must be one of TRANSPORT, TUNNEL, BEET");
736 crypto_free_ahash(ahp
->ahash
);
742 static void ah6_destroy(struct xfrm_state
*x
)
744 struct ah_data
*ahp
= x
->data
;
749 crypto_free_ahash(ahp
->ahash
);
753 static int ah6_rcv_cb(struct sk_buff
*skb
, int err
)
758 static const struct xfrm_type ah6_type
= {
759 .owner
= THIS_MODULE
,
761 .flags
= XFRM_TYPE_REPLAY_PROT
,
762 .init_state
= ah6_init_state
,
763 .destructor
= ah6_destroy
,
765 .output
= ah6_output
,
768 static struct xfrm6_protocol ah6_protocol
= {
769 .handler
= xfrm6_rcv
,
770 .input_handler
= xfrm_input
,
771 .cb_handler
= ah6_rcv_cb
,
772 .err_handler
= ah6_err
,
776 static int __init
ah6_init(void)
778 if (xfrm_register_type(&ah6_type
, AF_INET6
) < 0) {
779 pr_info("%s: can't add xfrm type\n", __func__
);
783 if (xfrm6_protocol_register(&ah6_protocol
, IPPROTO_AH
) < 0) {
784 pr_info("%s: can't add protocol\n", __func__
);
785 xfrm_unregister_type(&ah6_type
, AF_INET6
);
792 static void __exit
ah6_fini(void)
794 if (xfrm6_protocol_deregister(&ah6_protocol
, IPPROTO_AH
) < 0)
795 pr_info("%s: can't remove protocol\n", __func__
);
797 xfrm_unregister_type(&ah6_type
, AF_INET6
);
800 module_init(ah6_init
);
801 module_exit(ah6_fini
);
803 MODULE_DESCRIPTION("IPv6 AH transformation helpers");
804 MODULE_LICENSE("GPL");
805 MODULE_ALIAS_XFRM_TYPE(AF_INET6
, XFRM_PROTO_AH
);