2 * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/bio.h>
17 #include <linux/crypto.h>
18 #include <linux/dst.h>
19 #include <linux/kernel.h>
20 #include <linux/scatterlist.h>
21 #include <linux/slab.h>
24 * Tricky bastard, but IV can be more complex with time...
26 static inline u64
dst_gen_iv(struct dst_trans
*t
)
32 * Crypto machinery: hash/cipher support for the given crypto controls.
34 static struct crypto_hash
*dst_init_hash(struct dst_crypto_ctl
*ctl
, u8
*key
)
37 struct crypto_hash
*hash
;
39 hash
= crypto_alloc_hash(ctl
->hash_algo
, 0, CRYPTO_ALG_ASYNC
);
42 dprintk("%s: failed to allocate hash '%s', err: %d.\n",
43 __func__
, ctl
->hash_algo
, err
);
47 ctl
->crypto_attached_size
= crypto_hash_digestsize(hash
);
49 if (!ctl
->hash_keysize
)
52 err
= crypto_hash_setkey(hash
, key
, ctl
->hash_keysize
);
54 dprintk("%s: failed to set key for hash '%s', err: %d.\n",
55 __func__
, ctl
->hash_algo
, err
);
62 crypto_free_hash(hash
);
67 static struct crypto_ablkcipher
*dst_init_cipher(struct dst_crypto_ctl
*ctl
, u8
*key
)
70 struct crypto_ablkcipher
*cipher
;
72 if (!ctl
->cipher_keysize
)
75 cipher
= crypto_alloc_ablkcipher(ctl
->cipher_algo
, 0, 0);
77 err
= PTR_ERR(cipher
);
78 dprintk("%s: failed to allocate cipher '%s', err: %d.\n",
79 __func__
, ctl
->cipher_algo
, err
);
83 crypto_ablkcipher_clear_flags(cipher
, ~0);
85 err
= crypto_ablkcipher_setkey(cipher
, key
, ctl
->cipher_keysize
);
87 dprintk("%s: failed to set key for cipher '%s', err: %d.\n",
88 __func__
, ctl
->cipher_algo
, err
);
95 crypto_free_ablkcipher(cipher
);
101 * Crypto engine has a pool of pages to encrypt data into before sending
102 * it over the network. This pool is freed/allocated here.
104 static void dst_crypto_pages_free(struct dst_crypto_engine
*e
)
108 for (i
=0; i
<e
->page_num
; ++i
)
109 __free_page(e
->pages
[i
]);
113 static int dst_crypto_pages_alloc(struct dst_crypto_engine
*e
, int num
)
117 e
->pages
= kmalloc(num
* sizeof(struct page
**), GFP_KERNEL
);
121 for (i
=0; i
<num
; ++i
) {
122 e
->pages
[i
] = alloc_page(GFP_KERNEL
);
124 goto err_out_free_pages
;
132 __free_page(e
->pages
[i
]);
139 * Initialize crypto engine for given node.
140 * Setup cipher/hash, keys, pool of threads and private data.
142 static int dst_crypto_engine_init(struct dst_crypto_engine
*e
, struct dst_node
*n
)
145 struct dst_crypto_ctl
*ctl
= &n
->crypto
;
147 err
= dst_crypto_pages_alloc(e
, n
->max_pages
);
152 e
->data
= kmalloc(e
->size
, GFP_KERNEL
);
155 goto err_out_free_pages
;
158 if (ctl
->hash_algo
[0]) {
159 e
->hash
= dst_init_hash(ctl
, n
->hash_key
);
160 if (IS_ERR(e
->hash
)) {
161 err
= PTR_ERR(e
->hash
);
167 if (ctl
->cipher_algo
[0]) {
168 e
->cipher
= dst_init_cipher(ctl
, n
->cipher_key
);
169 if (IS_ERR(e
->cipher
)) {
170 err
= PTR_ERR(e
->cipher
);
172 goto err_out_free_hash
;
179 crypto_free_hash(e
->hash
);
183 dst_crypto_pages_free(e
);
188 static void dst_crypto_engine_exit(struct dst_crypto_engine
*e
)
191 crypto_free_hash(e
->hash
);
193 crypto_free_ablkcipher(e
->cipher
);
194 dst_crypto_pages_free(e
);
199 * Waiting for cipher processing to be completed.
201 struct dst_crypto_completion
203 struct completion complete
;
207 static void dst_crypto_complete(struct crypto_async_request
*req
, int err
)
209 struct dst_crypto_completion
*c
= req
->data
;
211 if (err
== -EINPROGRESS
)
214 dprintk("%s: req: %p, err: %d.\n", __func__
, req
, err
);
216 complete(&c
->complete
);
219 static int dst_crypto_process(struct ablkcipher_request
*req
,
220 struct scatterlist
*sg_dst
, struct scatterlist
*sg_src
,
221 void *iv
, int enc
, unsigned long timeout
)
223 struct dst_crypto_completion c
;
226 init_completion(&c
.complete
);
227 c
.error
= -EINPROGRESS
;
229 ablkcipher_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
230 dst_crypto_complete
, &c
);
232 ablkcipher_request_set_crypt(req
, sg_src
, sg_dst
, sg_src
->length
, iv
);
235 err
= crypto_ablkcipher_encrypt(req
);
237 err
= crypto_ablkcipher_decrypt(req
);
242 err
= wait_for_completion_interruptible_timeout(&c
.complete
,
257 * DST uses generic iteration approach for data crypto processing.
258 * Single block IO request is switched into array of scatterlists,
259 * which are submitted to the crypto processing iterator.
261 * Input and output iterator initialization are different, since
262 * in output case we can not encrypt data in-place and need a
263 * temporary storage, which is then being sent to the remote peer.
265 static int dst_trans_iter_out(struct bio
*bio
, struct dst_crypto_engine
*e
,
266 int (* iterator
) (struct dst_crypto_engine
*e
,
267 struct scatterlist
*dst
,
268 struct scatterlist
*src
))
273 sg_init_table(e
->src
, bio
->bi_vcnt
);
274 sg_init_table(e
->dst
, bio
->bi_vcnt
);
276 bio_for_each_segment(bv
, bio
, i
) {
277 sg_set_page(&e
->src
[i
], bv
->bv_page
, bv
->bv_len
, bv
->bv_offset
);
278 sg_set_page(&e
->dst
[i
], e
->pages
[i
], bv
->bv_len
, bv
->bv_offset
);
280 err
= iterator(e
, &e
->dst
[i
], &e
->src
[i
]);
288 static int dst_trans_iter_in(struct bio
*bio
, struct dst_crypto_engine
*e
,
289 int (* iterator
) (struct dst_crypto_engine
*e
,
290 struct scatterlist
*dst
,
291 struct scatterlist
*src
))
296 sg_init_table(e
->src
, bio
->bi_vcnt
);
297 sg_init_table(e
->dst
, bio
->bi_vcnt
);
299 bio_for_each_segment(bv
, bio
, i
) {
300 sg_set_page(&e
->src
[i
], bv
->bv_page
, bv
->bv_len
, bv
->bv_offset
);
301 sg_set_page(&e
->dst
[i
], bv
->bv_page
, bv
->bv_len
, bv
->bv_offset
);
303 err
= iterator(e
, &e
->dst
[i
], &e
->src
[i
]);
311 static int dst_crypt_iterator(struct dst_crypto_engine
*e
,
312 struct scatterlist
*sg_dst
, struct scatterlist
*sg_src
)
314 struct ablkcipher_request
*req
= e
->data
;
317 memset(iv
, 0, sizeof(iv
));
319 memcpy(iv
, &e
->iv
, sizeof(e
->iv
));
321 return dst_crypto_process(req
, sg_dst
, sg_src
, iv
, e
->enc
, e
->timeout
);
324 static int dst_crypt(struct dst_crypto_engine
*e
, struct bio
*bio
)
326 struct ablkcipher_request
*req
= e
->data
;
328 memset(req
, 0, sizeof(struct ablkcipher_request
));
329 ablkcipher_request_set_tfm(req
, e
->cipher
);
332 return dst_trans_iter_out(bio
, e
, dst_crypt_iterator
);
334 return dst_trans_iter_in(bio
, e
, dst_crypt_iterator
);
337 static int dst_hash_iterator(struct dst_crypto_engine
*e
,
338 struct scatterlist
*sg_dst
, struct scatterlist
*sg_src
)
340 return crypto_hash_update(e
->data
, sg_src
, sg_src
->length
);
343 static int dst_hash(struct dst_crypto_engine
*e
, struct bio
*bio
, void *dst
)
345 struct hash_desc
*desc
= e
->data
;
351 err
= crypto_hash_init(desc
);
355 err
= dst_trans_iter_in(bio
, e
, dst_hash_iterator
);
359 err
= crypto_hash_final(desc
, dst
);
367 * Initialize/cleanup a crypto thread. The only thing it should
368 * do is to allocate a pool of pages as temporary storage.
369 * And to setup cipher and/or hash.
371 static void *dst_crypto_thread_init(void *data
)
373 struct dst_node
*n
= data
;
374 struct dst_crypto_engine
*e
;
377 e
= kzalloc(sizeof(struct dst_crypto_engine
), GFP_KERNEL
);
380 e
->src
= kcalloc(2 * n
->max_pages
, sizeof(struct scatterlist
),
385 e
->dst
= e
->src
+ n
->max_pages
;
387 err
= dst_crypto_engine_init(e
, n
);
389 goto err_out_free_all
;
401 static void dst_crypto_thread_cleanup(void *private)
403 struct dst_crypto_engine
*e
= private;
405 dst_crypto_engine_exit(e
);
411 * Initialize crypto engine for given node: store keys, create pool
412 * of threads, initialize each one.
414 * Each thread has unique ID, but 0 and 1 are reserved for receiving and accepting
415 * threads (if export node), so IDs could start from 2, but starting them
416 * from 10 allows easily understand what this thread is for.
418 int dst_node_crypto_init(struct dst_node
*n
, struct dst_crypto_ctl
*ctl
)
420 void *key
= (ctl
+ 1);
421 int err
= -ENOMEM
, i
;
424 if (ctl
->hash_keysize
) {
425 n
->hash_key
= kmalloc(ctl
->hash_keysize
, GFP_KERNEL
);
428 memcpy(n
->hash_key
, key
, ctl
->hash_keysize
);
431 if (ctl
->cipher_keysize
) {
432 n
->cipher_key
= kmalloc(ctl
->cipher_keysize
, GFP_KERNEL
);
434 goto err_out_free_hash
;
435 memcpy(n
->cipher_key
, key
, ctl
->cipher_keysize
);
437 memcpy(&n
->crypto
, ctl
, sizeof(struct dst_crypto_ctl
));
439 for (i
=0; i
<ctl
->thread_num
; ++i
) {
440 snprintf(name
, sizeof(name
), "%s-crypto-%d", n
->name
, i
);
442 err
= thread_pool_add_worker(n
->pool
, name
, i
+10,
443 dst_crypto_thread_init
, dst_crypto_thread_cleanup
, n
);
445 goto err_out_free_threads
;
450 err_out_free_threads
:
452 thread_pool_del_worker_id(n
->pool
, i
+10);
454 if (ctl
->cipher_keysize
)
455 kfree(n
->cipher_key
);
456 ctl
->cipher_keysize
= 0;
458 if (ctl
->hash_keysize
)
460 ctl
->hash_keysize
= 0;
465 void dst_node_crypto_exit(struct dst_node
*n
)
467 struct dst_crypto_ctl
*ctl
= &n
->crypto
;
469 if (ctl
->cipher_algo
[0] || ctl
->hash_algo
[0]) {
471 kfree(n
->cipher_key
);
476 * Thrad pool setup callback. Just stores a transaction in private data.
478 static int dst_trans_crypto_setup(void *crypto_engine
, void *trans
)
480 struct dst_crypto_engine
*e
= crypto_engine
;
487 static void dst_dump_bio(struct bio
*bio
)
493 bio_for_each_segment(bv
, bio
, i
) {
494 dprintk("%s: %llu/%u: size: %u, offset: %u, data: ",
495 __func__
, bio
->bi_sector
, bio
->bi_size
,
496 bv
->bv_len
, bv
->bv_offset
);
498 p
= kmap(bv
->bv_page
) + bv
->bv_offset
;
499 for (i
=0; i
<bv
->bv_len
; ++i
)
500 printk("%02x ", p
[i
]);
508 * Encrypt/hash data and send it to the network.
510 static int dst_crypto_process_sending(struct dst_crypto_engine
*e
,
511 struct bio
*bio
, u8
*hash
)
516 err
= dst_crypt(e
, bio
);
522 err
= dst_hash(e
, bio
, hash
);
526 #ifdef CONFIG_DST_DEBUG
530 /* dst_dump_bio(bio); */
532 printk(KERN_DEBUG
"%s: bio: %llu/%u, rw: %lu, hash: ",
533 __func__
, (u64
)bio
->bi_sector
,
534 bio
->bi_size
, bio_data_dir(bio
));
535 for (i
=0; i
<crypto_hash_digestsize(e
->hash
); ++i
)
536 printk("%02x ", hash
[i
]);
549 * Check if received data is valid. Decipher if it is.
551 static int dst_crypto_process_receiving(struct dst_crypto_engine
*e
,
552 struct bio
*bio
, u8
*hash
, u8
*recv_hash
)
559 err
= dst_hash(e
, bio
, hash
);
563 mismatch
= !!memcmp(recv_hash
, hash
,
564 crypto_hash_digestsize(e
->hash
));
565 #ifdef CONFIG_DST_DEBUG
566 /* dst_dump_bio(bio); */
568 printk(KERN_DEBUG
"%s: bio: %llu/%u, rw: %lu, hash mismatch: %d",
569 __func__
, (u64
)bio
->bi_sector
, bio
->bi_size
,
570 bio_data_dir(bio
), mismatch
);
574 printk(", recv/calc: ");
575 for (i
=0; i
<crypto_hash_digestsize(e
->hash
); ++i
) {
576 printk("%02x/%02x ", recv_hash
[i
], hash
[i
]);
587 err
= dst_crypt(e
, bio
);
599 * Thread pool callback to encrypt data and send it to the netowork.
601 static int dst_trans_crypto_action(void *crypto_engine
, void *schedule_data
)
603 struct dst_crypto_engine
*e
= crypto_engine
;
604 struct dst_trans
*t
= schedule_data
;
605 struct bio
*bio
= t
->bio
;
608 dprintk("%s: t: %p, gen: %llu, cipher: %p, hash: %p.\n",
609 __func__
, t
, t
->gen
, e
->cipher
, e
->hash
);
612 e
->iv
= dst_gen_iv(t
);
614 if (bio_data_dir(bio
) == WRITE
) {
615 err
= dst_crypto_process_sending(e
, bio
, t
->cmd
.hash
);
620 t
->cmd
.csize
= crypto_hash_digestsize(e
->hash
);
621 t
->cmd
.size
+= t
->cmd
.csize
;
624 return dst_trans_send(t
);
626 u8
*hash
= e
->data
+ e
->size
/2;
628 err
= dst_crypto_process_receiving(e
, bio
, hash
, t
->cmd
.hash
);
645 * Schedule crypto processing for given transaction.
647 int dst_trans_crypto(struct dst_trans
*t
)
649 struct dst_node
*n
= t
->n
;
652 err
= thread_pool_schedule(n
->pool
,
653 dst_trans_crypto_setup
, dst_trans_crypto_action
,
654 t
, MAX_SCHEDULE_TIMEOUT
);
666 * Crypto machinery for the export node.
668 static int dst_export_crypto_setup(void *crypto_engine
, void *bio
)
670 struct dst_crypto_engine
*e
= crypto_engine
;
676 static int dst_export_crypto_action(void *crypto_engine
, void *schedule_data
)
678 struct dst_crypto_engine
*e
= crypto_engine
;
679 struct bio
*bio
= schedule_data
;
680 struct dst_export_priv
*p
= bio
->bi_private
;
683 dprintk("%s: e: %p, data: %p, bio: %llu/%u, dir: %lu.\n", __func__
,
684 e
, e
->data
, (u64
)bio
->bi_sector
, bio
->bi_size
, bio_data_dir(bio
));
686 e
->enc
= (bio_data_dir(bio
) == READ
);
689 if (bio_data_dir(bio
) == WRITE
) {
690 u8
*hash
= e
->data
+ e
->size
/2;
692 err
= dst_crypto_process_receiving(e
, bio
, hash
, p
->cmd
.hash
);
696 generic_make_request(bio
);
698 err
= dst_crypto_process_sending(e
, bio
, p
->cmd
.hash
);
703 p
->cmd
.csize
= crypto_hash_digestsize(e
->hash
);
704 p
->cmd
.size
+= p
->cmd
.csize
;
707 err
= dst_export_send_bio(bio
);
716 int dst_export_crypto(struct dst_node
*n
, struct bio
*bio
)
720 err
= thread_pool_schedule(n
->pool
,
721 dst_export_crypto_setup
, dst_export_crypto_action
,
722 bio
, MAX_SCHEDULE_TIMEOUT
);