2 * Ethernet Cheap Crypt (ccrypt).
3 * (C) 2006 Dawid Ciezarkiewicz <dpc@asn.pl>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #include <linux/ccrypt.h>
12 #include <linux/if_arp.h>
13 #include <linux/if_pppox.h>
14 #include <linux/if_vlan.h>
15 #include <linux/scatterlist.h>
17 #include <linux/crypto.h>
18 #include <linux/module.h>
19 #include <linux/skbuff.h>
21 #include <net/checksum.h>
24 * IP ethertype will be changed to this,
25 * so that some bridges wouldn't try to be smarter
26 * then they should be.
28 #define ETHERTYPE_CCRYPTEDIP 0x0999
33 struct ccrypt_rx
* ccrypt_rx_alloc(void) {
34 struct ccrypt_rx
* new_cc
= kmalloc(sizeof(struct ccrypt_rx
), GFP_KERNEL
);
35 memset(new_cc
, 0, sizeof(struct ccrypt_rx
));
42 struct ccrypt_tx
* ccrypt_tx_alloc(void) {
43 struct ccrypt_tx
* new_cc
= kmalloc(sizeof(struct ccrypt_tx
), GFP_KERNEL
);
44 memset(new_cc
, 0, sizeof(struct ccrypt_tx
));
51 * Caller must hold ccrypt_rx_lock.
53 static void ccrypt_rx_free(struct ccrypt_rx
* cc_rx
)
58 for (key_no
= 0; key_no
< 2; key_no
++) {
59 if (cc_rx
->tfms
[key_no
]) {
60 crypto_free_blkcipher(cc_rx
->tfms
[key_no
]);
61 cc_rx
->tfms
[key_no
] = 0;
64 for (iv_no
= 0; iv_no
< 2; iv_no
++) {
65 if (cc_rx
->last_recv_iv
[key_no
][iv_no
]) {
66 kfree(cc_rx
->last_recv_iv
[key_no
][iv_no
]);
67 cc_rx
->last_recv_iv
[key_no
][iv_no
] = 0;
76 * Caller must hold ccrypt_tx_lock.
78 void ccrypt_tx_free(struct ccrypt_tx
* cc_tx
)
80 if (cc_tx
->last_sent_iv
) {
81 kfree(cc_tx
->last_sent_iv
);
82 cc_tx
->last_sent_iv
= 0;
86 crypto_free_blkcipher(cc_tx
->tfm
);
92 * For key switching unification.
94 typedef int key_switch_f(struct net_device
* dev
, char* algorithm
,
95 u8
* key
, unsigned int keylen
);
98 * Switch key in ccrypt_tx.
103 * Caller must hold ccrypt_tx_lock.
106 int ccrypt_tx_switch_key(struct ccrypt_tx
* cc_tx
, char* algorithm
,
107 u8
* key
, unsigned int keylen
)
109 struct crypto_blkcipher
* new_tfm
;
111 unsigned int new_iv_size
;
114 new_tfm
= crypto_alloc_blkcipher(algorithm
, 0, 0);
120 res
= crypto_blkcipher_setkey(new_tfm
, key
, keylen
);
123 crypto_free_blkcipher(new_tfm
);
127 new_iv_size
= crypto_blkcipher_ivsize(new_tfm
);
129 if (new_iv_size
!= crypto_blkcipher_blocksize(new_tfm
)) {
130 printk(KERN_ERR
"ccrypt: iv_len != bsize - strange\n");
131 crypto_free_blkcipher(new_tfm
);
135 /* allocate new iv_vectors for new key */
136 new_iv
= kmalloc(new_iv_size
, GFP_KERNEL
);
140 printk(KERN_ERR
"couldn't allocate %d bytes", new_iv_size
);
142 crypto_free_blkcipher(new_tfm
);
146 memset(new_iv
, 0, new_iv_size
);
147 if (cc_tx
->last_sent_iv
) {
148 kfree(cc_tx
->last_sent_iv
);
151 cc_tx
->last_sent_iv
= new_iv
;
154 crypto_free_blkcipher(cc_tx
->tfm
);
156 cc_tx
->tfm
= new_tfm
;
162 * Switch key in ccrypt_rx.
167 * Caller must hold ccrypt_rx_lock.
170 int ccrypt_rx_switch_key(struct ccrypt_rx
* cc_rx
, char* algorithm
,
171 u8
* key
, unsigned int keylen
)
173 struct crypto_blkcipher
* new_tfm
;
176 unsigned int new_iv_size
;
177 unsigned int cur_iv_no
;
179 new_tfm
= crypto_alloc_blkcipher(algorithm
, 0, 0);
185 res
= crypto_blkcipher_setkey(new_tfm
, key
, keylen
);
188 crypto_free_blkcipher(new_tfm
);
192 new_iv_size
= crypto_blkcipher_ivsize(new_tfm
);
194 /* allocate new iv_vectors for new key */
195 new_iv
[0] = kmalloc(new_iv_size
, GFP_KERNEL
);
196 new_iv
[1] = kmalloc(new_iv_size
, GFP_KERNEL
);
198 if (!new_iv
[0] || !new_iv
[1]) {
207 crypto_free_blkcipher(new_tfm
);
208 printk(KERN_ERR
"ccrypt: kmalloc(%d) failed.\n",
213 /* zero new ivs and free old ones, then replace them */
214 for (cur_iv_no
= 0; cur_iv_no
< 2; ++cur_iv_no
) {
215 memset(new_iv
[cur_iv_no
], '\0', new_iv_size
);
217 if (cc_rx
->last_recv_iv
[1][cur_iv_no
]) {
218 kfree(cc_rx
->last_recv_iv
[1][cur_iv_no
]);
221 cc_rx
->last_recv_iv
[1][cur_iv_no
] =
222 cc_rx
->last_recv_iv
[0][cur_iv_no
];
224 cc_rx
->last_recv_iv
[0][cur_iv_no
] = new_iv
[cur_iv_no
];
227 if (cc_rx
->tfms
[1]) {
228 crypto_free_blkcipher(cc_rx
->tfms
[1]);
231 cc_rx
->tfms
[1] = cc_rx
->tfms
[0];
232 cc_rx
->tfms
[0] = new_tfm
;
234 cc_rx
->last_recv_iv_matched
[1] =
235 cc_rx
->last_recv_iv_matched
[0];
236 cc_rx
->last_recv_iv_matched
[0] = 1;
238 cc_rx
->after_switch
= 1;
244 * Reset rx key. Stop using rx encryption.
246 void ccrypt_rx_reset(struct net_device
* dev
)
248 spin_lock(&dev
->ccrypt_rx_lock
);
249 if (dev
->ccrypt_rx
) {
250 ccrypt_rx_free(dev
->ccrypt_rx
);
253 spin_unlock(&dev
->ccrypt_rx_lock
);
257 * Reset tx key. Stop using tx encryption.
259 void ccrypt_tx_reset(struct net_device
* dev
)
261 spin_lock(&dev
->ccrypt_tx_lock
);
262 if (dev
->ccrypt_tx
) {
263 ccrypt_tx_free(dev
->ccrypt_tx
);
266 spin_unlock(&dev
->ccrypt_tx_lock
);
270 * Called from user context.
273 int rx_switch(struct net_device
* dev
, char* algorithm
,
274 u8
* key
, unsigned int keylen
)
278 if (strcmp(algorithm
, "null") == 0) {
279 ccrypt_rx_reset(dev
);
283 spin_lock(&dev
->ccrypt_rx_lock
);
284 if (!dev
->ccrypt_rx
) {
285 dev
->ccrypt_rx
= ccrypt_rx_alloc();
286 if (!dev
->ccrypt_rx
) {
287 spin_unlock(&dev
->ccrypt_rx_lock
);
291 res
= ccrypt_rx_switch_key(dev
->ccrypt_rx
, algorithm
, key
, keylen
);
292 spin_unlock(&dev
->ccrypt_rx_lock
);
298 * Called from user context.
301 int tx_switch(struct net_device
* dev
, char* algorithm
,
302 u8
* key
, unsigned int keylen
)
306 if (strcmp(algorithm
, "null") == 0) {
307 ccrypt_tx_reset(dev
);
311 spin_lock(&dev
->ccrypt_tx_lock
);
312 if (!dev
->ccrypt_tx
) {
313 dev
->ccrypt_tx
= ccrypt_tx_alloc();
314 if (!dev
->ccrypt_tx
) {
315 spin_unlock(&dev
->ccrypt_tx_lock
);
319 res
= ccrypt_tx_switch_key(dev
->ccrypt_tx
, algorithm
, key
, keylen
);
320 spin_unlock(&dev
->ccrypt_tx_lock
);
326 * Handle key writes - both rx and tx.
328 * Check permissions, copy data from user, parse it, call appropriate
331 * Returns 0 on success.
334 int ccrypt_key_store_handle(struct net_device
* dev
,
335 const char __user
*user_buffer
,
337 key_switch_f switch_handler
)
339 const unsigned int max_alg_len
= CRYPTO_MAX_ALG_NAME
;
341 /* key length in bytes */
342 const unsigned int max_key_len
= 64;
344 /* key length as string */
345 const unsigned int max_key_string_len
= max_key_len
* 2;
347 /* alg + ':' + keystr + '\0' */
348 const unsigned int max_buffer_len
=
349 max_alg_len
+ 1 + max_key_string_len
+ 1;
353 unsigned int key_len
;
357 char buffer
[max_buffer_len
];
358 char alg_string
[max_alg_len
];
361 if (!capable(CAP_NET_ADMIN
))
364 if (count
> max_buffer_len
- 1) {
368 memcpy(buffer
, user_buffer
, count
);
369 buffer
[count
] = '\0';
372 for (i
= 0; i
< max_alg_len
&& i
<= count
; ++i
) {
373 if (buffer
[i
] == ':' || buffer
[i
] == '\0') {
374 alg_string
[i
] = '\0';
376 if (buffer
[i
] == ':')
380 alg_string
[i
] = buffer
[i
];
383 if (!alg_string_ok
) {
389 for (i
= 0; i
< max_key_len
; i
++, key_len
++, j
+= 2) {
390 if (buffer
[j
] == 0) {
394 if (buffer
[j
] >= '0' && buffer
[j
] <= '9') {
397 else if (buffer
[j
] >= 'a' && buffer
[j
] <= 'f') {
398 a
= buffer
[j
] - 'a' + 10;
403 if (buffer
[j
+ 1] >= '0' && buffer
[j
+ 1] <= '9') {
404 b
= buffer
[j
+ 1] - '0';
406 else if (buffer
[j
+ 1] >= 'a' && buffer
[j
+ 1] <= 'f') {
407 b
= buffer
[j
+ 1] - 'a' + 10;
415 res
= switch_handler(dev
, alg_string
, key
, key_len
);
427 printk(KERN_ERR
"Error: ccrypt error - should not be here\n");
431 ssize_t
ccrypt_rx_store(struct device
*dev
, struct device_attribute
*attr
,
432 const char *buf
, size_t len
)
434 return ccrypt_key_store_handle(to_net_dev(dev
), buf
, len
, rx_switch
);
437 ssize_t
ccrypt_tx_store(struct device
*dev
, struct device_attribute
*attr
,
438 const char *buf
, size_t len
)
440 return ccrypt_key_store_handle(to_net_dev(dev
), buf
, len
, tx_switch
);
443 ssize_t
ccrypt_tx_show(struct device
*dev
, struct device_attribute
*attr
,
446 return -EINVAL
; /* not implemented yet */
449 ssize_t
ccrypt_rx_show(struct device
*dev
, struct device_attribute
*attr
,
452 return -EINVAL
; /* not implemented yet */
456 * Check if buffer has right ipv4 structures.
459 inline int is_valid_ipv4(struct iphdr
* hdr
, int len
)
463 if (len
< sizeof(struct iphdr
)) {
467 if (hdr
->ihl
< 5 || hdr
->ihl
> 15) {
471 if (len
< sizeof(struct iphdr
) + hdr
->ihl
* 4) {
475 tmp_check
= hdr
->check
;
476 hdr
->check
= 0; /* required by ip_fast_csum */
478 if (tmp_check
!= ip_fast_csum((unsigned char *)hdr
, hdr
->ihl
)) {
482 hdr
->check
= tmp_check
;
491 inline int is_valid_ip(struct iphdr
* hdr
, int len
)
493 if (len
< sizeof(struct iphdr
)) {
497 if (hdr
->version
== 4) {
498 return is_valid_ipv4(hdr
, len
);
507 static inline int is_valid_arp(struct arphdr
* hdr
, int len
)
513 switch (hdr
->ar_hrd
) {
514 /* supported hardware layers */
515 case __constant_htons(ARPHRD_ETHER
):
521 switch (hdr
->ar_pro
) {
522 /* supported protocols */
523 case __constant_htons(ETH_P_IP
): /* ipv4 */
529 /* hardware address length
530 * as we support only Ethernet ... */
531 if (hdr
->ar_hln
!= 6) {
541 int is_valid_pppoe(u16 ethertype
, struct pppoe_hdr
* hdr
, int len
)
543 if (len
< sizeof(struct pppoe_hdr
)) {
547 if (hdr
->type
!= 1) {
561 if (ethertype
!= ETH_P_PPP_DISC
) {
566 if (ethertype
!= ETH_P_PPP_SES
) {
577 * Check if decoded buffer is right in needed places.
579 * Ethertype should be after htons().
582 int is_decoded_buffer_valid(u16 ethertype
, u8
* buffer
, int len
)
584 /* TODO: add more protocols */
585 /* XXX: keep documentation in sync */
589 if (!is_valid_ip((struct iphdr
*)buffer
, len
)) {
595 if (!is_valid_arp((struct arphdr
*)buffer
, len
)) {
602 if (!is_valid_pppoe(ethertype
, (struct pppoe_hdr
*)buffer
, len
)) {
613 * Save received iv vector in appropriate place.
616 inline void save_recv_iv(struct ccrypt_rx
* cc_rx
,
617 unsigned int key_no
, unsigned int iv_no
,
618 u8
* src_buffer
, unsigned int len
, unsigned int iv_len
)
620 if (likely(len
>= iv_len
)) {
621 memcpy(cc_rx
->last_recv_iv
[key_no
][iv_no
],
625 memset(cc_rx
->last_recv_iv
[key_no
][iv_no
] + len
,
627 memcpy(cc_rx
->last_recv_iv
[key_no
][iv_no
],
633 * Try to decode incoming packet using skb->dev->ccrypt_rx group.
635 * Returns 0 on success.
636 * -EINVAL on standard "drop it".
638 * Caller must hold ccrypt_rx_lock.
640 int ccrypt_decrypt(struct sk_buff
**pskb
)
642 struct ccrypt_rx
* cc_rx
;
643 struct crypto_blkcipher
* tfm
= 0;
644 struct blkcipher_desc desc
;
645 struct sk_buff
* skb
= 0;
648 unsigned int aligned_len
, unaligned_len
;
650 struct scatterlist sg_out
;
651 struct scatterlist sg_residual
;
652 struct scatterlist sg
;
663 cc_rx
= skb
->dev
->ccrypt_rx
;
664 cc_rx
->valid_counter
++;
667 if (len
< ETH_ZLEN
- sizeof(struct ethhdr
) - VLAN_HLEN
) {
668 /* if shorter - it couldn't have been sent by ccrypt_encode */
673 ethertype
= htons(*((u16
*)(skb
->data
- 2)));
675 if (ethertype
== ETHERTYPE_CCRYPTEDIP
) {
676 ethertype
= ETH_P_IP
;
677 *((u16
*)(data
- 2)) = __constant_htons(ETH_P_IP
);
678 skb
->protocol
= __constant_htons(ETH_P_IP
);
679 } else if (ethertype
== ETH_P_8021Q
) {
682 ethertype
= htons(*((u16
*)(data
- 2)));
686 * original stays in data, all tries will
687 * be validated in decode_buffer
689 decode_buffer
= kmalloc(sizeof(u8
) * len
, GFP_ATOMIC
);
691 if (!decode_buffer
) {
693 printk(KERN_ERR
"ccrypt_decrypt: kmalloc failed.\n");
697 sg_set_buf(&sg_out
, decode_buffer
, len
);
700 * be warned: fancy logic ahead
702 for (key_no_org
= 0; key_no_org
< 2; ++key_no_org
) {
704 /* if we are right after key switch, use key 2 first
705 * until you get first msg encoded with new key */
706 if (cc_rx
->after_switch
) {
707 key_no
= 1 - key_no_org
;
713 if (!cc_rx
->after_switch
&& key_no
== 1) {
714 /* if sender used new key once - it should
715 * not use old key anymore */
719 tfm
= cc_rx
->tfms
[key_no
];
723 memset(&desc
, 0, sizeof(desc
));
727 bsize
= crypto_blkcipher_blocksize(tfm
);
728 unaligned_len
= len
% bsize
;
729 aligned_len
= len
- unaligned_len
;
730 iv_len
= crypto_blkcipher_ivsize(tfm
);
732 for (iv_no
= 0; iv_no
< 2; ++iv_no
) {
733 if (cc_rx
->last_recv_iv_matched
[key_no
] && iv_no
== 1) {
734 /* skip if there is no point trying
735 * because there is no iv from "wrong packet"
740 iv
= cc_rx
->last_recv_iv
[key_no
][iv_no
];
746 sg_set_buf(&sg
, data
, aligned_len
);
747 crypto_blkcipher_set_iv(tfm
, iv
, iv_len
);
748 res
= crypto_blkcipher_decrypt(&desc
, &sg_out
, &sg
, aligned_len
);
751 printk(KERN_ERR
"cipher_decrypt_iv() failed flags=%x\n",
752 tfm
->base
.crt_flags
);
757 u8 residual_block
[bsize
];
758 sg_set_buf(&sg_residual
, residual_block
, bsize
);
760 if (unlikely(aligned_len
< bsize
* 2)) {
761 sg_set_buf(&sg
, iv
, bsize
);
764 sg_set_buf(&sg
, data
, bsize
);
767 res
= crypto_blkcipher_encrypt(&desc
,
768 &sg_residual
, &sg
, bsize
);
771 printk(KERN_ERR
"cipher_encrypt_iv() failed flags=%x\n",
772 tfm
->base
.crt_flags
);
776 for (i
= 0; i
< unaligned_len
; ++i
) {
777 decode_buffer
[aligned_len
+ i
] =
778 residual_block
[i
] ^ data
[aligned_len
+ i
];
782 /* it's a kind of magic ... magic ... magic ... */
783 if (is_decoded_buffer_valid(ethertype
, decode_buffer
, len
)) {
785 cc_rx
->after_switch
= 0;
788 cc_rx
->last_recv_iv_matched
[key_no
] = 1;
789 save_recv_iv(cc_rx
, key_no
, 0, data
, len
, iv_len
);
795 /* there was no match for both ivs for key - save "wrong iv" */
796 cc_rx
->last_recv_iv_matched
[key_no
] = 0;
797 save_recv_iv(cc_rx
, key_no
, 1, data
, len
, iv_len
);
800 /* finish_no_match: */
801 kfree(decode_buffer
);
803 if (cc_rx
->valid_counter
< 1000) {
804 if (++cc_rx
->invalid_counter
> 10) {
805 if (net_ratelimit()) {
806 printk(KERN_INFO
"ccrypt_rx on %s detected frequent "
810 cc_rx
->invalid_counter
= 0;
813 cc_rx
->valid_counter
= 0;
817 memcpy(data
, decode_buffer
, len
);
818 kfree(decode_buffer
);
825 * Returns 0 on success.
827 * Caller must hold ccrypt_tx_lock.
830 * (*pskb)->data points at the start of frame,
831 * (where mac.raw should point)
832 * (*pskb)->len is overall packet len
833 * *pskb is linearized
835 int ccrypt_encrypt(struct sk_buff
**pskb
)
837 struct crypto_blkcipher
* tfm
= 0;
838 struct blkcipher_desc desc
;
839 struct sk_buff
* skb
= 0;
840 struct sk_buff
* nskb
= 0;
843 unsigned int aligned_len
, unaligned_len
;
845 struct scatterlist sg
;
846 struct scatterlist sg_residual
;
852 unsigned int old_len
;
853 struct ccrypt_tx
* cc_tx
= 0;
857 cc_tx
= skb
->dev
->ccrypt_tx
;
861 memset(&desc
, 0, sizeof(desc
));
870 * we can't let packet be expanded in the future
871 * do it now so the Ethernet device wouldn't have to
873 if (skb
->len
< ETH_ZLEN
) {
874 if (skb_shared(skb
)) {
875 nskb
= skb_clone(skb
, GFP_ATOMIC
);
877 if (net_ratelimit()) {
878 printk(KERN_ERR
"ccrypt_tx: "
879 "couldn't unshare tiny packet\n");
887 expand
= ETH_ZLEN
- old_len
;
888 if (skb_tailroom(skb
) < expand
) {
889 res
= pskb_expand_head(skb
, 0, expand
, GFP_ATOMIC
);
891 if (net_ratelimit()) {
892 printk(KERN_ERR
"ccrypt_tx: "
893 "couldn't expand tiny packet\n");
898 skb_put(skb
, expand
);
899 memset(skb
->data
+ old_len
, 0, expand
);
902 data
= skb
->data
+ sizeof(struct ethhdr
);
903 len
= skb
->len
- sizeof(struct ethhdr
);
905 switch (((struct ethhdr
*)(skb
->data
))->h_proto
) {
906 case __constant_htons(ETH_P_8021Q
):
910 case __constant_htons(ETH_P_IP
):
911 ((struct ethhdr
*)(skb
->data
))->h_proto
912 = __constant_htons(ETHERTYPE_CCRYPTEDIP
);
915 bsize
= crypto_blkcipher_blocksize(tfm
);
916 unaligned_len
= len
% bsize
;
917 aligned_len
= len
- unaligned_len
;
918 iv_len
= crypto_blkcipher_ivsize(tfm
);
919 sg_set_buf(&sg
, data
, aligned_len
);
920 iv
= cc_tx
->last_sent_iv
;
922 crypto_blkcipher_set_iv(tfm
, iv
, iv_len
);
924 res
= crypto_blkcipher_encrypt(&desc
, &sg
, &sg
, aligned_len
);
927 printk(KERN_ERR
"cipher_encrypt_iv() failed flags=%x\n",
928 tfm
->base
.crt_flags
);
932 /* do residual block termination */
934 u8 residual_block
[bsize
];
935 sg_set_buf(&sg_residual
, residual_block
, bsize
);
937 if (unlikely(aligned_len
< bsize
* 2)) {
938 sg_set_buf(&sg
, iv
, bsize
);
941 sg_set_buf(&sg
, data
, bsize
);
944 res
= crypto_blkcipher_encrypt(&desc
, &sg_residual
, &sg
, bsize
);
947 printk(KERN_ERR
"cipher_encrypt_iv() failed flags=%x\n",
948 tfm
->base
.crt_flags
);
952 for (i
= 0; i
< unaligned_len
; ++i
) {
953 data
[aligned_len
+ i
] ^= residual_block
[i
];
957 if (likely(len
>= iv_len
)) {
958 memcpy(iv
, data
, iv_len
);
961 memset(iv
+ len
, 0, iv_len
- len
);
962 memcpy(iv
, data
, len
);
968 EXPORT_SYMBOL(ccrypt_tx_store
);
969 EXPORT_SYMBOL(ccrypt_rx_store
);
970 EXPORT_SYMBOL(ccrypt_rx_reset
);
971 EXPORT_SYMBOL(ccrypt_tx_reset
);
972 EXPORT_SYMBOL(ccrypt_tx_show
);
973 EXPORT_SYMBOL(ccrypt_rx_show
);
974 EXPORT_SYMBOL(ccrypt_decrypt
);
975 EXPORT_SYMBOL(ccrypt_encrypt
);