Linux 3.8-rc7
[cris-mirror.git] / net / ipv4 / ah4.c
bloba69b4e4a02b5099043d98e5278355274e18ea72b
1 #define pr_fmt(fmt) "IPsec: " fmt
3 #include <crypto/hash.h>
4 #include <linux/err.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
7 #include <net/ip.h>
8 #include <net/xfrm.h>
9 #include <net/ah.h>
10 #include <linux/crypto.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/scatterlist.h>
13 #include <net/icmp.h>
14 #include <net/protocol.h>
16 struct ah_skb_cb {
17 struct xfrm_skb_cb xfrm;
18 void *tmp;
21 #define AH_SKB_CB(__skb) ((struct ah_skb_cb *)&((__skb)->cb[0]))
23 static void *ah_alloc_tmp(struct crypto_ahash *ahash, int nfrags,
24 unsigned int size)
26 unsigned int len;
28 len = size + crypto_ahash_digestsize(ahash) +
29 (crypto_ahash_alignmask(ahash) &
30 ~(crypto_tfm_ctx_alignment() - 1));
32 len = ALIGN(len, crypto_tfm_ctx_alignment());
34 len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
35 len = ALIGN(len, __alignof__(struct scatterlist));
37 len += sizeof(struct scatterlist) * nfrags;
39 return kmalloc(len, GFP_ATOMIC);
42 static inline u8 *ah_tmp_auth(void *tmp, unsigned int offset)
44 return tmp + offset;
47 static inline u8 *ah_tmp_icv(struct crypto_ahash *ahash, void *tmp,
48 unsigned int offset)
50 return PTR_ALIGN((u8 *)tmp + offset, crypto_ahash_alignmask(ahash) + 1);
53 static inline struct ahash_request *ah_tmp_req(struct crypto_ahash *ahash,
54 u8 *icv)
56 struct ahash_request *req;
58 req = (void *)PTR_ALIGN(icv + crypto_ahash_digestsize(ahash),
59 crypto_tfm_ctx_alignment());
61 ahash_request_set_tfm(req, ahash);
63 return req;
66 static inline struct scatterlist *ah_req_sg(struct crypto_ahash *ahash,
67 struct ahash_request *req)
69 return (void *)ALIGN((unsigned long)(req + 1) +
70 crypto_ahash_reqsize(ahash),
71 __alignof__(struct scatterlist));
74 /* Clear mutable options and find final destination to substitute
75 * into IP header for icv calculation. Options are already checked
76 * for validity, so paranoia is not required. */
78 static int ip_clear_mutable_options(const struct iphdr *iph, __be32 *daddr)
80 unsigned char *optptr = (unsigned char *)(iph+1);
81 int l = iph->ihl*4 - sizeof(struct iphdr);
82 int optlen;
84 while (l > 0) {
85 switch (*optptr) {
86 case IPOPT_END:
87 return 0;
88 case IPOPT_NOOP:
89 l--;
90 optptr++;
91 continue;
93 optlen = optptr[1];
94 if (optlen<2 || optlen>l)
95 return -EINVAL;
96 switch (*optptr) {
97 case IPOPT_SEC:
98 case 0x85: /* Some "Extended Security" crap. */
99 case IPOPT_CIPSO:
100 case IPOPT_RA:
101 case 0x80|21: /* RFC1770 */
102 break;
103 case IPOPT_LSRR:
104 case IPOPT_SSRR:
105 if (optlen < 6)
106 return -EINVAL;
107 memcpy(daddr, optptr+optlen-4, 4);
108 /* Fall through */
109 default:
110 memset(optptr, 0, optlen);
112 l -= optlen;
113 optptr += optlen;
115 return 0;
118 static void ah_output_done(struct crypto_async_request *base, int err)
120 u8 *icv;
121 struct iphdr *iph;
122 struct sk_buff *skb = base->data;
123 struct xfrm_state *x = skb_dst(skb)->xfrm;
124 struct ah_data *ahp = x->data;
125 struct iphdr *top_iph = ip_hdr(skb);
126 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
127 int ihl = ip_hdrlen(skb);
129 iph = AH_SKB_CB(skb)->tmp;
130 icv = ah_tmp_icv(ahp->ahash, iph, ihl);
131 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
133 top_iph->tos = iph->tos;
134 top_iph->ttl = iph->ttl;
135 top_iph->frag_off = iph->frag_off;
136 if (top_iph->ihl != 5) {
137 top_iph->daddr = iph->daddr;
138 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
141 kfree(AH_SKB_CB(skb)->tmp);
142 xfrm_output_resume(skb, err);
145 static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
147 int err;
148 int nfrags;
149 int ihl;
150 u8 *icv;
151 struct sk_buff *trailer;
152 struct crypto_ahash *ahash;
153 struct ahash_request *req;
154 struct scatterlist *sg;
155 struct iphdr *iph, *top_iph;
156 struct ip_auth_hdr *ah;
157 struct ah_data *ahp;
159 ahp = x->data;
160 ahash = ahp->ahash;
162 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
163 goto out;
164 nfrags = err;
166 skb_push(skb, -skb_network_offset(skb));
167 ah = ip_auth_hdr(skb);
168 ihl = ip_hdrlen(skb);
170 err = -ENOMEM;
171 iph = ah_alloc_tmp(ahash, nfrags, ihl);
172 if (!iph)
173 goto out;
175 icv = ah_tmp_icv(ahash, iph, ihl);
176 req = ah_tmp_req(ahash, icv);
177 sg = ah_req_sg(ahash, req);
179 memset(ah->auth_data, 0, ahp->icv_trunc_len);
181 top_iph = ip_hdr(skb);
183 iph->tos = top_iph->tos;
184 iph->ttl = top_iph->ttl;
185 iph->frag_off = top_iph->frag_off;
187 if (top_iph->ihl != 5) {
188 iph->daddr = top_iph->daddr;
189 memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
190 err = ip_clear_mutable_options(top_iph, &top_iph->daddr);
191 if (err)
192 goto out_free;
195 ah->nexthdr = *skb_mac_header(skb);
196 *skb_mac_header(skb) = IPPROTO_AH;
198 top_iph->tos = 0;
199 top_iph->tot_len = htons(skb->len);
200 top_iph->frag_off = 0;
201 top_iph->ttl = 0;
202 top_iph->check = 0;
204 if (x->props.flags & XFRM_STATE_ALIGN4)
205 ah->hdrlen = (XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
206 else
207 ah->hdrlen = (XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len) >> 2) - 2;
209 ah->reserved = 0;
210 ah->spi = x->id.spi;
211 ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
213 sg_init_table(sg, nfrags);
214 skb_to_sgvec(skb, sg, 0, skb->len);
216 ahash_request_set_crypt(req, sg, icv, skb->len);
217 ahash_request_set_callback(req, 0, ah_output_done, skb);
219 AH_SKB_CB(skb)->tmp = iph;
221 err = crypto_ahash_digest(req);
222 if (err) {
223 if (err == -EINPROGRESS)
224 goto out;
226 if (err == -EBUSY)
227 err = NET_XMIT_DROP;
228 goto out_free;
231 memcpy(ah->auth_data, icv, ahp->icv_trunc_len);
233 top_iph->tos = iph->tos;
234 top_iph->ttl = iph->ttl;
235 top_iph->frag_off = iph->frag_off;
236 if (top_iph->ihl != 5) {
237 top_iph->daddr = iph->daddr;
238 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
241 out_free:
242 kfree(iph);
243 out:
244 return err;
247 static void ah_input_done(struct crypto_async_request *base, int err)
249 u8 *auth_data;
250 u8 *icv;
251 struct iphdr *work_iph;
252 struct sk_buff *skb = base->data;
253 struct xfrm_state *x = xfrm_input_state(skb);
254 struct ah_data *ahp = x->data;
255 struct ip_auth_hdr *ah = ip_auth_hdr(skb);
256 int ihl = ip_hdrlen(skb);
257 int ah_hlen = (ah->hdrlen + 2) << 2;
259 work_iph = AH_SKB_CB(skb)->tmp;
260 auth_data = ah_tmp_auth(work_iph, ihl);
261 icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
263 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
264 if (err)
265 goto out;
267 err = ah->nexthdr;
269 skb->network_header += ah_hlen;
270 memcpy(skb_network_header(skb), work_iph, ihl);
271 __skb_pull(skb, ah_hlen + ihl);
273 if (x->props.mode == XFRM_MODE_TUNNEL)
274 skb_reset_transport_header(skb);
275 else
276 skb_set_transport_header(skb, -ihl);
277 out:
278 kfree(AH_SKB_CB(skb)->tmp);
279 xfrm_input_resume(skb, err);
282 static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
284 int ah_hlen;
285 int ihl;
286 int nexthdr;
287 int nfrags;
288 u8 *auth_data;
289 u8 *icv;
290 struct sk_buff *trailer;
291 struct crypto_ahash *ahash;
292 struct ahash_request *req;
293 struct scatterlist *sg;
294 struct iphdr *iph, *work_iph;
295 struct ip_auth_hdr *ah;
296 struct ah_data *ahp;
297 int err = -ENOMEM;
299 if (!pskb_may_pull(skb, sizeof(*ah)))
300 goto out;
302 ah = (struct ip_auth_hdr *)skb->data;
303 ahp = x->data;
304 ahash = ahp->ahash;
306 nexthdr = ah->nexthdr;
307 ah_hlen = (ah->hdrlen + 2) << 2;
309 if (x->props.flags & XFRM_STATE_ALIGN4) {
310 if (ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_full_len) &&
311 ah_hlen != XFRM_ALIGN4(sizeof(*ah) + ahp->icv_trunc_len))
312 goto out;
313 } else {
314 if (ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_full_len) &&
315 ah_hlen != XFRM_ALIGN8(sizeof(*ah) + ahp->icv_trunc_len))
316 goto out;
319 if (!pskb_may_pull(skb, ah_hlen))
320 goto out;
322 /* We are going to _remove_ AH header to keep sockets happy,
323 * so... Later this can change. */
324 if (skb_cloned(skb) &&
325 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
326 goto out;
328 skb->ip_summed = CHECKSUM_NONE;
331 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
332 goto out;
333 nfrags = err;
335 ah = (struct ip_auth_hdr *)skb->data;
336 iph = ip_hdr(skb);
337 ihl = ip_hdrlen(skb);
339 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
340 if (!work_iph)
341 goto out;
343 auth_data = ah_tmp_auth(work_iph, ihl);
344 icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
345 req = ah_tmp_req(ahash, icv);
346 sg = ah_req_sg(ahash, req);
348 memcpy(work_iph, iph, ihl);
349 memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
350 memset(ah->auth_data, 0, ahp->icv_trunc_len);
352 iph->ttl = 0;
353 iph->tos = 0;
354 iph->frag_off = 0;
355 iph->check = 0;
356 if (ihl > sizeof(*iph)) {
357 __be32 dummy;
358 err = ip_clear_mutable_options(iph, &dummy);
359 if (err)
360 goto out_free;
363 skb_push(skb, ihl);
365 sg_init_table(sg, nfrags);
366 skb_to_sgvec(skb, sg, 0, skb->len);
368 ahash_request_set_crypt(req, sg, icv, skb->len);
369 ahash_request_set_callback(req, 0, ah_input_done, skb);
371 AH_SKB_CB(skb)->tmp = work_iph;
373 err = crypto_ahash_digest(req);
374 if (err) {
375 if (err == -EINPROGRESS)
376 goto out;
378 goto out_free;
381 err = memcmp(icv, auth_data, ahp->icv_trunc_len) ? -EBADMSG: 0;
382 if (err)
383 goto out_free;
385 skb->network_header += ah_hlen;
386 memcpy(skb_network_header(skb), work_iph, ihl);
387 __skb_pull(skb, ah_hlen + ihl);
388 if (x->props.mode == XFRM_MODE_TUNNEL)
389 skb_reset_transport_header(skb);
390 else
391 skb_set_transport_header(skb, -ihl);
393 err = nexthdr;
395 out_free:
396 kfree (work_iph);
397 out:
398 return err;
401 static void ah4_err(struct sk_buff *skb, u32 info)
403 struct net *net = dev_net(skb->dev);
404 const struct iphdr *iph = (const struct iphdr *)skb->data;
405 struct ip_auth_hdr *ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
406 struct xfrm_state *x;
408 switch (icmp_hdr(skb)->type) {
409 case ICMP_DEST_UNREACH:
410 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
411 return;
412 case ICMP_REDIRECT:
413 break;
414 default:
415 return;
418 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
419 ah->spi, IPPROTO_AH, AF_INET);
420 if (!x)
421 return;
423 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
424 atomic_inc(&flow_cache_genid);
425 rt_genid_bump(net);
427 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
428 } else
429 ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
430 xfrm_state_put(x);
433 static int ah_init_state(struct xfrm_state *x)
435 struct ah_data *ahp = NULL;
436 struct xfrm_algo_desc *aalg_desc;
437 struct crypto_ahash *ahash;
439 if (!x->aalg)
440 goto error;
442 if (x->encap)
443 goto error;
445 ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
446 if (!ahp)
447 return -ENOMEM;
449 ahash = crypto_alloc_ahash(x->aalg->alg_name, 0, 0);
450 if (IS_ERR(ahash))
451 goto error;
453 ahp->ahash = ahash;
454 if (crypto_ahash_setkey(ahash, x->aalg->alg_key,
455 (x->aalg->alg_key_len + 7) / 8))
456 goto error;
459 * Lookup the algorithm description maintained by xfrm_algo,
460 * verify crypto transform properties, and store information
461 * we need for AH processing. This lookup cannot fail here
462 * after a successful crypto_alloc_ahash().
464 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
465 BUG_ON(!aalg_desc);
467 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
468 crypto_ahash_digestsize(ahash)) {
469 pr_info("%s: %s digestsize %u != %hu\n",
470 __func__, x->aalg->alg_name,
471 crypto_ahash_digestsize(ahash),
472 aalg_desc->uinfo.auth.icv_fullbits / 8);
473 goto error;
476 ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
477 ahp->icv_trunc_len = x->aalg->alg_trunc_len/8;
479 BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);
481 if (x->props.flags & XFRM_STATE_ALIGN4)
482 x->props.header_len = XFRM_ALIGN4(sizeof(struct ip_auth_hdr) +
483 ahp->icv_trunc_len);
484 else
485 x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
486 ahp->icv_trunc_len);
487 if (x->props.mode == XFRM_MODE_TUNNEL)
488 x->props.header_len += sizeof(struct iphdr);
489 x->data = ahp;
491 return 0;
493 error:
494 if (ahp) {
495 crypto_free_ahash(ahp->ahash);
496 kfree(ahp);
498 return -EINVAL;
501 static void ah_destroy(struct xfrm_state *x)
503 struct ah_data *ahp = x->data;
505 if (!ahp)
506 return;
508 crypto_free_ahash(ahp->ahash);
509 kfree(ahp);
513 static const struct xfrm_type ah_type =
515 .description = "AH4",
516 .owner = THIS_MODULE,
517 .proto = IPPROTO_AH,
518 .flags = XFRM_TYPE_REPLAY_PROT,
519 .init_state = ah_init_state,
520 .destructor = ah_destroy,
521 .input = ah_input,
522 .output = ah_output
525 static const struct net_protocol ah4_protocol = {
526 .handler = xfrm4_rcv,
527 .err_handler = ah4_err,
528 .no_policy = 1,
529 .netns_ok = 1,
532 static int __init ah4_init(void)
534 if (xfrm_register_type(&ah_type, AF_INET) < 0) {
535 pr_info("%s: can't add xfrm type\n", __func__);
536 return -EAGAIN;
538 if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) {
539 pr_info("%s: can't add protocol\n", __func__);
540 xfrm_unregister_type(&ah_type, AF_INET);
541 return -EAGAIN;
543 return 0;
546 static void __exit ah4_fini(void)
548 if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0)
549 pr_info("%s: can't remove protocol\n", __func__);
550 if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
551 pr_info("%s: can't remove xfrm type\n", __func__);
554 module_init(ah4_init);
555 module_exit(ah4_fini);
556 MODULE_LICENSE("GPL");
557 MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_AH);