2 * xfrm algorithm interface
4 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/pfkeyv2.h>
15 #include <linux/crypto.h>
17 #if defined(CONFIG_INET_AH) || defined(CONFIG_INET_AH_MODULE) || defined(CONFIG_INET6_AH) || defined(CONFIG_INET6_AH_MODULE)
20 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
23 #include <asm/scatterlist.h>
26 * Algorithms supported by IPsec. These entries contain properties which
27 * are used in key negotiation and xfrm processing, and are used to verify
28 * that instantiated crypto transforms have correct parameters for IPsec
31 static struct xfrm_algo_desc aalg_list
[] = {
33 .name
= "hmac(digest_null)",
34 .compat
= "digest_null",
44 .sadb_alg_id
= SADB_X_AALG_NULL
,
46 .sadb_alg_minbits
= 0,
62 .sadb_alg_id
= SADB_AALG_MD5HMAC
,
64 .sadb_alg_minbits
= 128,
65 .sadb_alg_maxbits
= 128
80 .sadb_alg_id
= SADB_AALG_SHA1HMAC
,
82 .sadb_alg_minbits
= 160,
83 .sadb_alg_maxbits
= 160
87 .name
= "hmac(sha256)",
98 .sadb_alg_id
= SADB_X_AALG_SHA2_256HMAC
,
100 .sadb_alg_minbits
= 256,
101 .sadb_alg_maxbits
= 256
105 .name
= "hmac(ripemd160)",
106 .compat
= "ripemd160",
116 .sadb_alg_id
= SADB_X_AALG_RIPEMD160HMAC
,
118 .sadb_alg_minbits
= 160,
119 .sadb_alg_maxbits
= 160
133 .sadb_alg_id
= SADB_X_AALG_AES_XCBC_MAC
,
135 .sadb_alg_minbits
= 128,
136 .sadb_alg_maxbits
= 128
141 static struct xfrm_algo_desc ealg_list
[] = {
143 .name
= "ecb(cipher_null)",
144 .compat
= "cipher_null",
154 .sadb_alg_id
= SADB_EALG_NULL
,
156 .sadb_alg_minbits
= 0,
157 .sadb_alg_maxbits
= 0
172 .sadb_alg_id
= SADB_EALG_DESCBC
,
174 .sadb_alg_minbits
= 64,
175 .sadb_alg_maxbits
= 64
179 .name
= "cbc(des3_ede)",
180 .compat
= "des3_ede",
190 .sadb_alg_id
= SADB_EALG_3DESCBC
,
192 .sadb_alg_minbits
= 192,
193 .sadb_alg_maxbits
= 192
197 .name
= "cbc(cast128)",
208 .sadb_alg_id
= SADB_X_EALG_CASTCBC
,
210 .sadb_alg_minbits
= 40,
211 .sadb_alg_maxbits
= 128
215 .name
= "cbc(blowfish)",
216 .compat
= "blowfish",
226 .sadb_alg_id
= SADB_X_EALG_BLOWFISHCBC
,
228 .sadb_alg_minbits
= 40,
229 .sadb_alg_maxbits
= 448
244 .sadb_alg_id
= SADB_X_EALG_AESCBC
,
246 .sadb_alg_minbits
= 128,
247 .sadb_alg_maxbits
= 256
251 .name
= "cbc(serpent)",
262 .sadb_alg_id
= SADB_X_EALG_SERPENTCBC
,
264 .sadb_alg_minbits
= 128,
265 .sadb_alg_maxbits
= 256,
269 .name
= "cbc(camellia)",
279 .sadb_alg_id
= SADB_X_EALG_CAMELLIACBC
,
281 .sadb_alg_minbits
= 128,
282 .sadb_alg_maxbits
= 256
286 .name
= "cbc(twofish)",
297 .sadb_alg_id
= SADB_X_EALG_TWOFISHCBC
,
299 .sadb_alg_minbits
= 128,
300 .sadb_alg_maxbits
= 256
305 static struct xfrm_algo_desc calg_list
[] = {
313 .desc
= { .sadb_alg_id
= SADB_X_CALG_DEFLATE
}
322 .desc
= { .sadb_alg_id
= SADB_X_CALG_LZS
}
331 .desc
= { .sadb_alg_id
= SADB_X_CALG_LZJH
}
335 static inline int aalg_entries(void)
337 return ARRAY_SIZE(aalg_list
);
340 static inline int ealg_entries(void)
342 return ARRAY_SIZE(ealg_list
);
345 static inline int calg_entries(void)
347 return ARRAY_SIZE(calg_list
);
350 /* Todo: generic iterators */
351 struct xfrm_algo_desc
*xfrm_aalg_get_byid(int alg_id
)
355 for (i
= 0; i
< aalg_entries(); i
++) {
356 if (aalg_list
[i
].desc
.sadb_alg_id
== alg_id
) {
357 if (aalg_list
[i
].available
)
358 return &aalg_list
[i
];
365 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byid
);
367 struct xfrm_algo_desc
*xfrm_ealg_get_byid(int alg_id
)
371 for (i
= 0; i
< ealg_entries(); i
++) {
372 if (ealg_list
[i
].desc
.sadb_alg_id
== alg_id
) {
373 if (ealg_list
[i
].available
)
374 return &ealg_list
[i
];
381 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byid
);
383 struct xfrm_algo_desc
*xfrm_calg_get_byid(int alg_id
)
387 for (i
= 0; i
< calg_entries(); i
++) {
388 if (calg_list
[i
].desc
.sadb_alg_id
== alg_id
) {
389 if (calg_list
[i
].available
)
390 return &calg_list
[i
];
397 EXPORT_SYMBOL_GPL(xfrm_calg_get_byid
);
399 static struct xfrm_algo_desc
*xfrm_get_byname(struct xfrm_algo_desc
*list
,
400 int entries
, u32 type
, u32 mask
,
401 char *name
, int probe
)
408 for (i
= 0; i
< entries
; i
++) {
409 if (strcmp(name
, list
[i
].name
) &&
410 (!list
[i
].compat
|| strcmp(name
, list
[i
].compat
)))
413 if (list
[i
].available
)
419 status
= crypto_has_alg(list
[i
].name
, type
,
420 mask
| CRYPTO_ALG_ASYNC
);
424 list
[i
].available
= status
;
430 struct xfrm_algo_desc
*xfrm_aalg_get_byname(char *name
, int probe
)
432 return xfrm_get_byname(aalg_list
, aalg_entries(),
433 CRYPTO_ALG_TYPE_HASH
, CRYPTO_ALG_TYPE_HASH_MASK
,
436 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byname
);
438 struct xfrm_algo_desc
*xfrm_ealg_get_byname(char *name
, int probe
)
440 return xfrm_get_byname(ealg_list
, ealg_entries(),
441 CRYPTO_ALG_TYPE_BLKCIPHER
, CRYPTO_ALG_TYPE_MASK
,
444 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byname
);
446 struct xfrm_algo_desc
*xfrm_calg_get_byname(char *name
, int probe
)
448 return xfrm_get_byname(calg_list
, calg_entries(),
449 CRYPTO_ALG_TYPE_COMPRESS
, CRYPTO_ALG_TYPE_MASK
,
452 EXPORT_SYMBOL_GPL(xfrm_calg_get_byname
);
454 struct xfrm_algo_desc
*xfrm_aalg_get_byidx(unsigned int idx
)
456 if (idx
>= aalg_entries())
459 return &aalg_list
[idx
];
461 EXPORT_SYMBOL_GPL(xfrm_aalg_get_byidx
);
463 struct xfrm_algo_desc
*xfrm_ealg_get_byidx(unsigned int idx
)
465 if (idx
>= ealg_entries())
468 return &ealg_list
[idx
];
470 EXPORT_SYMBOL_GPL(xfrm_ealg_get_byidx
);
473 * Probe for the availability of crypto algorithms, and set the available
474 * flag for any algorithms found on the system. This is typically called by
475 * pfkey during userspace SA add, update or register.
477 void xfrm_probe_algs(void)
482 BUG_ON(in_softirq());
484 for (i
= 0; i
< aalg_entries(); i
++) {
485 status
= crypto_has_hash(aalg_list
[i
].name
, 0,
487 if (aalg_list
[i
].available
!= status
)
488 aalg_list
[i
].available
= status
;
491 for (i
= 0; i
< ealg_entries(); i
++) {
492 status
= crypto_has_blkcipher(ealg_list
[i
].name
, 0,
494 if (ealg_list
[i
].available
!= status
)
495 ealg_list
[i
].available
= status
;
498 for (i
= 0; i
< calg_entries(); i
++) {
499 status
= crypto_has_comp(calg_list
[i
].name
, 0,
501 if (calg_list
[i
].available
!= status
)
502 calg_list
[i
].available
= status
;
506 EXPORT_SYMBOL_GPL(xfrm_probe_algs
);
508 int xfrm_count_auth_supported(void)
512 for (i
= 0, n
= 0; i
< aalg_entries(); i
++)
513 if (aalg_list
[i
].available
)
517 EXPORT_SYMBOL_GPL(xfrm_count_auth_supported
);
519 int xfrm_count_enc_supported(void)
523 for (i
= 0, n
= 0; i
< ealg_entries(); i
++)
524 if (ealg_list
[i
].available
)
528 EXPORT_SYMBOL_GPL(xfrm_count_enc_supported
);
530 /* Move to common area: it is shared with AH. */
532 int skb_icv_walk(const struct sk_buff
*skb
, struct hash_desc
*desc
,
533 int offset
, int len
, icv_update_fn_t icv_update
)
535 int start
= skb_headlen(skb
);
536 int i
, copy
= start
- offset
;
538 struct scatterlist sg
;
540 /* Checksum header. */
545 sg
.page
= virt_to_page(skb
->data
+ offset
);
546 sg
.offset
= (unsigned long)(skb
->data
+ offset
) % PAGE_SIZE
;
549 err
= icv_update(desc
, &sg
, copy
);
553 if ((len
-= copy
) == 0)
558 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
561 BUG_TRAP(start
<= offset
+ len
);
563 end
= start
+ skb_shinfo(skb
)->frags
[i
].size
;
564 if ((copy
= end
- offset
) > 0) {
565 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
570 sg
.page
= frag
->page
;
571 sg
.offset
= frag
->page_offset
+ offset
-start
;
574 err
= icv_update(desc
, &sg
, copy
);
585 if (skb_shinfo(skb
)->frag_list
) {
586 struct sk_buff
*list
= skb_shinfo(skb
)->frag_list
;
588 for (; list
; list
= list
->next
) {
591 BUG_TRAP(start
<= offset
+ len
);
593 end
= start
+ list
->len
;
594 if ((copy
= end
- offset
) > 0) {
597 err
= skb_icv_walk(list
, desc
, offset
-start
,
601 if ((len
-= copy
) == 0)
611 EXPORT_SYMBOL_GPL(skb_icv_walk
);
613 #if defined(CONFIG_INET_ESP) || defined(CONFIG_INET_ESP_MODULE) || defined(CONFIG_INET6_ESP) || defined(CONFIG_INET6_ESP_MODULE)
615 /* Looking generic it is not used in another places. */
618 skb_to_sgvec(struct sk_buff
*skb
, struct scatterlist
*sg
, int offset
, int len
)
620 int start
= skb_headlen(skb
);
621 int i
, copy
= start
- offset
;
627 sg
[elt
].page
= virt_to_page(skb
->data
+ offset
);
628 sg
[elt
].offset
= (unsigned long)(skb
->data
+ offset
) % PAGE_SIZE
;
629 sg
[elt
].length
= copy
;
631 if ((len
-= copy
) == 0)
636 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
639 BUG_TRAP(start
<= offset
+ len
);
641 end
= start
+ skb_shinfo(skb
)->frags
[i
].size
;
642 if ((copy
= end
- offset
) > 0) {
643 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
647 sg
[elt
].page
= frag
->page
;
648 sg
[elt
].offset
= frag
->page_offset
+offset
-start
;
649 sg
[elt
].length
= copy
;
658 if (skb_shinfo(skb
)->frag_list
) {
659 struct sk_buff
*list
= skb_shinfo(skb
)->frag_list
;
661 for (; list
; list
= list
->next
) {
664 BUG_TRAP(start
<= offset
+ len
);
666 end
= start
+ list
->len
;
667 if ((copy
= end
- offset
) > 0) {
670 elt
+= skb_to_sgvec(list
, sg
+elt
, offset
- start
, copy
);
671 if ((len
-= copy
) == 0)
681 EXPORT_SYMBOL_GPL(skb_to_sgvec
);
683 /* Check that skb data bits are writable. If they are not, copy data
684 * to newly created private area. If "tailbits" is given, make sure that
685 * tailbits bytes beyond current end of skb are writable.
687 * Returns amount of elements of scatterlist to load for subsequent
688 * transformations and pointer to writable trailer skb.
691 int skb_cow_data(struct sk_buff
*skb
, int tailbits
, struct sk_buff
**trailer
)
695 struct sk_buff
*skb1
, **skb_p
;
697 /* If skb is cloned or its head is paged, reallocate
698 * head pulling out all the pages (pages are considered not writable
699 * at the moment even if they are anonymous).
701 if ((skb_cloned(skb
) || skb_shinfo(skb
)->nr_frags
) &&
702 __pskb_pull_tail(skb
, skb_pagelen(skb
)-skb_headlen(skb
)) == NULL
)
705 /* Easy case. Most of packets will go this way. */
706 if (!skb_shinfo(skb
)->frag_list
) {
707 /* A little of trouble, not enough of space for trailer.
708 * This should not happen, when stack is tuned to generate
709 * good frames. OK, on miss we reallocate and reserve even more
710 * space, 128 bytes is fair. */
712 if (skb_tailroom(skb
) < tailbits
&&
713 pskb_expand_head(skb
, 0, tailbits
-skb_tailroom(skb
)+128, GFP_ATOMIC
))
721 /* Misery. We are in troubles, going to mincer fragments... */
724 skb_p
= &skb_shinfo(skb
)->frag_list
;
727 while ((skb1
= *skb_p
) != NULL
) {
730 /* The fragment is partially pulled by someone,
731 * this can happen on input. Copy it and everything
734 if (skb_shared(skb1
))
737 /* If the skb is the last, worry about trailer. */
739 if (skb1
->next
== NULL
&& tailbits
) {
740 if (skb_shinfo(skb1
)->nr_frags
||
741 skb_shinfo(skb1
)->frag_list
||
742 skb_tailroom(skb1
) < tailbits
)
743 ntail
= tailbits
+ 128;
749 skb_shinfo(skb1
)->nr_frags
||
750 skb_shinfo(skb1
)->frag_list
) {
751 struct sk_buff
*skb2
;
753 /* Fuck, we are miserable poor guys... */
755 skb2
= skb_copy(skb1
, GFP_ATOMIC
);
757 skb2
= skb_copy_expand(skb1
,
761 if (unlikely(skb2
== NULL
))
765 skb_set_owner_w(skb2
, skb1
->sk
);
767 /* Looking around. Are we still alive?
768 * OK, link new skb, drop old one */
770 skb2
->next
= skb1
->next
;
782 EXPORT_SYMBOL_GPL(skb_cow_data
);
784 void *pskb_put(struct sk_buff
*skb
, struct sk_buff
*tail
, int len
)
787 skb
->data_len
+= len
;
790 return skb_put(tail
, len
);
792 EXPORT_SYMBOL_GPL(pskb_put
);