2 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
5 * Mimi Zohar <zohar@us.ibm.com>
6 * Kylene Hall <kjhall@us.ibm.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation, version 2 of the License.
13 * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kernel.h>
19 #include <linux/moduleparam.h>
20 #include <linux/ratelimit.h>
21 #include <linux/file.h>
22 #include <linux/crypto.h>
23 #include <linux/scatterlist.h>
24 #include <linux/err.h>
25 #include <linux/slab.h>
26 #include <crypto/hash.h>
30 struct ahash_completion
{
31 struct completion completion
;
35 /* minimum file size for ahash use */
36 static unsigned long ima_ahash_minsize
;
37 module_param_named(ahash_minsize
, ima_ahash_minsize
, ulong
, 0644);
38 MODULE_PARM_DESC(ahash_minsize
, "Minimum file size for ahash use");
40 /* default is 0 - 1 page. */
41 static int ima_maxorder
;
42 static unsigned int ima_bufsize
= PAGE_SIZE
;
44 static int param_set_bufsize(const char *val
, const struct kernel_param
*kp
)
46 unsigned long long size
;
49 size
= memparse(val
, NULL
);
50 order
= get_order(size
);
51 if (order
>= MAX_ORDER
)
54 ima_bufsize
= PAGE_SIZE
<< order
;
58 static const struct kernel_param_ops param_ops_bufsize
= {
59 .set
= param_set_bufsize
,
60 .get
= param_get_uint
,
62 #define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
64 module_param_named(ahash_bufsize
, ima_bufsize
, bufsize
, 0644);
65 MODULE_PARM_DESC(ahash_bufsize
, "Maximum ahash buffer size");
67 static struct crypto_shash
*ima_shash_tfm
;
68 static struct crypto_ahash
*ima_ahash_tfm
;
70 int __init
ima_init_crypto(void)
74 ima_shash_tfm
= crypto_alloc_shash(hash_algo_name
[ima_hash_algo
], 0, 0);
75 if (IS_ERR(ima_shash_tfm
)) {
76 rc
= PTR_ERR(ima_shash_tfm
);
77 pr_err("Can not allocate %s (reason: %ld)\n",
78 hash_algo_name
[ima_hash_algo
], rc
);
84 static struct crypto_shash
*ima_alloc_tfm(enum hash_algo algo
)
86 struct crypto_shash
*tfm
= ima_shash_tfm
;
89 if (algo
< 0 || algo
>= HASH_ALGO__LAST
)
92 if (algo
!= ima_hash_algo
) {
93 tfm
= crypto_alloc_shash(hash_algo_name
[algo
], 0, 0);
96 pr_err("Can not allocate %s (reason: %d)\n",
97 hash_algo_name
[algo
], rc
);
103 static void ima_free_tfm(struct crypto_shash
*tfm
)
105 if (tfm
!= ima_shash_tfm
)
106 crypto_free_shash(tfm
);
110 * ima_alloc_pages() - Allocate contiguous pages.
111 * @max_size: Maximum amount of memory to allocate.
112 * @allocated_size: Returned size of actual allocation.
113 * @last_warn: Should the min_size allocation warn or not.
115 * Tries to do opportunistic allocation for memory first trying to allocate
116 * max_size amount of memory and then splitting that until zero order is
117 * reached. Allocation is tried without generating allocation warnings unless
118 * last_warn is set. Last_warn set affects only last allocation of zero order.
120 * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
122 * Return pointer to allocated memory, or NULL on failure.
124 static void *ima_alloc_pages(loff_t max_size
, size_t *allocated_size
,
128 int order
= ima_maxorder
;
129 gfp_t gfp_mask
= __GFP_RECLAIM
| __GFP_NOWARN
| __GFP_NORETRY
;
132 order
= min(get_order(max_size
), order
);
134 for (; order
; order
--) {
135 ptr
= (void *)__get_free_pages(gfp_mask
, order
);
137 *allocated_size
= PAGE_SIZE
<< order
;
142 /* order is zero - one page */
144 gfp_mask
= GFP_KERNEL
;
147 gfp_mask
|= __GFP_NOWARN
;
149 ptr
= (void *)__get_free_pages(gfp_mask
, 0);
151 *allocated_size
= PAGE_SIZE
;
160 * ima_free_pages() - Free pages allocated by ima_alloc_pages().
161 * @ptr: Pointer to allocated pages.
162 * @size: Size of allocated buffer.
164 static void ima_free_pages(void *ptr
, size_t size
)
168 free_pages((unsigned long)ptr
, get_order(size
));
171 static struct crypto_ahash
*ima_alloc_atfm(enum hash_algo algo
)
173 struct crypto_ahash
*tfm
= ima_ahash_tfm
;
176 if (algo
< 0 || algo
>= HASH_ALGO__LAST
)
177 algo
= ima_hash_algo
;
179 if (algo
!= ima_hash_algo
|| !tfm
) {
180 tfm
= crypto_alloc_ahash(hash_algo_name
[algo
], 0, 0);
182 if (algo
== ima_hash_algo
)
186 pr_err("Can not allocate %s (reason: %d)\n",
187 hash_algo_name
[algo
], rc
);
193 static void ima_free_atfm(struct crypto_ahash
*tfm
)
195 if (tfm
!= ima_ahash_tfm
)
196 crypto_free_ahash(tfm
);
199 static void ahash_complete(struct crypto_async_request
*req
, int err
)
201 struct ahash_completion
*res
= req
->data
;
203 if (err
== -EINPROGRESS
)
206 complete(&res
->completion
);
209 static int ahash_wait(int err
, struct ahash_completion
*res
)
216 wait_for_completion(&res
->completion
);
217 reinit_completion(&res
->completion
);
221 pr_crit_ratelimited("ahash calculation failed: err: %d\n", err
);
227 static int ima_calc_file_hash_atfm(struct file
*file
,
228 struct ima_digest_data
*hash
,
229 struct crypto_ahash
*tfm
)
231 loff_t i_size
, offset
;
232 char *rbuf
[2] = { NULL
, };
233 int rc
, read
= 0, rbuf_len
, active
= 0, ahash_rc
= 0;
234 struct ahash_request
*req
;
235 struct scatterlist sg
[1];
236 struct ahash_completion res
;
239 hash
->length
= crypto_ahash_digestsize(tfm
);
241 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
245 init_completion(&res
.completion
);
246 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
|
247 CRYPTO_TFM_REQ_MAY_SLEEP
,
248 ahash_complete
, &res
);
250 rc
= ahash_wait(crypto_ahash_init(req
), &res
);
254 i_size
= i_size_read(file_inode(file
));
260 * Try to allocate maximum size of memory.
261 * Fail if even a single page cannot be allocated.
263 rbuf
[0] = ima_alloc_pages(i_size
, &rbuf_size
[0], 1);
269 /* Only allocate one buffer if that is enough. */
270 if (i_size
> rbuf_size
[0]) {
272 * Try to allocate secondary buffer. If that fails fallback to
273 * using single buffering. Use previous memory allocation size
274 * as baseline for possible allocation size.
276 rbuf
[1] = ima_alloc_pages(i_size
- rbuf_size
[0],
280 if (!(file
->f_mode
& FMODE_READ
)) {
281 file
->f_mode
|= FMODE_READ
;
285 for (offset
= 0; offset
< i_size
; offset
+= rbuf_len
) {
286 if (!rbuf
[1] && offset
) {
287 /* Not using two buffers, and it is not the first
288 * read/request, wait for the completion of the
289 * previous ahash_update() request.
291 rc
= ahash_wait(ahash_rc
, &res
);
296 rbuf_len
= min_t(loff_t
, i_size
- offset
, rbuf_size
[active
]);
297 rc
= integrity_kernel_read(file
, offset
, rbuf
[active
],
302 if (rbuf
[1] && offset
) {
303 /* Using two buffers, and it is not the first
304 * read/request, wait for the completion of the
305 * previous ahash_update() request.
307 rc
= ahash_wait(ahash_rc
, &res
);
312 sg_init_one(&sg
[0], rbuf
[active
], rbuf_len
);
313 ahash_request_set_crypt(req
, sg
, NULL
, rbuf_len
);
315 ahash_rc
= crypto_ahash_update(req
);
318 active
= !active
; /* swap buffers, if we use two */
320 /* wait for the last update request to complete */
321 rc
= ahash_wait(ahash_rc
, &res
);
324 file
->f_mode
&= ~FMODE_READ
;
325 ima_free_pages(rbuf
[0], rbuf_size
[0]);
326 ima_free_pages(rbuf
[1], rbuf_size
[1]);
329 ahash_request_set_crypt(req
, NULL
, hash
->digest
, 0);
330 rc
= ahash_wait(crypto_ahash_final(req
), &res
);
333 ahash_request_free(req
);
337 static int ima_calc_file_ahash(struct file
*file
, struct ima_digest_data
*hash
)
339 struct crypto_ahash
*tfm
;
342 tfm
= ima_alloc_atfm(hash
->algo
);
346 rc
= ima_calc_file_hash_atfm(file
, hash
, tfm
);
353 static int ima_calc_file_hash_tfm(struct file
*file
,
354 struct ima_digest_data
*hash
,
355 struct crypto_shash
*tfm
)
357 loff_t i_size
, offset
= 0;
360 SHASH_DESC_ON_STACK(shash
, tfm
);
365 hash
->length
= crypto_shash_digestsize(tfm
);
367 rc
= crypto_shash_init(shash
);
371 i_size
= i_size_read(file_inode(file
));
376 rbuf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
380 if (!(file
->f_mode
& FMODE_READ
)) {
381 file
->f_mode
|= FMODE_READ
;
385 while (offset
< i_size
) {
388 rbuf_len
= integrity_kernel_read(file
, offset
, rbuf
, PAGE_SIZE
);
397 rc
= crypto_shash_update(shash
, rbuf
, rbuf_len
);
402 file
->f_mode
&= ~FMODE_READ
;
406 rc
= crypto_shash_final(shash
, hash
->digest
);
410 static int ima_calc_file_shash(struct file
*file
, struct ima_digest_data
*hash
)
412 struct crypto_shash
*tfm
;
415 tfm
= ima_alloc_tfm(hash
->algo
);
419 rc
= ima_calc_file_hash_tfm(file
, hash
, tfm
);
427 * ima_calc_file_hash - calculate file hash
429 * Asynchronous hash (ahash) allows using HW acceleration for calculating
430 * a hash. ahash performance varies for different data sizes on different
431 * crypto accelerators. shash performance might be better for smaller files.
432 * The 'ima.ahash_minsize' module parameter allows specifying the best
433 * minimum file size for using ahash on the system.
435 * If the ima.ahash_minsize parameter is not specified, this function uses
436 * shash for the hash calculation. If ahash fails, it falls back to using
439 int ima_calc_file_hash(struct file
*file
, struct ima_digest_data
*hash
)
444 i_size
= i_size_read(file_inode(file
));
446 if (ima_ahash_minsize
&& i_size
>= ima_ahash_minsize
) {
447 rc
= ima_calc_file_ahash(file
, hash
);
452 return ima_calc_file_shash(file
, hash
);
456 * Calculate the hash of template data
458 static int ima_calc_field_array_hash_tfm(struct ima_field_data
*field_data
,
459 struct ima_template_desc
*td
,
461 struct ima_digest_data
*hash
,
462 struct crypto_shash
*tfm
)
464 SHASH_DESC_ON_STACK(shash
, tfm
);
470 hash
->length
= crypto_shash_digestsize(tfm
);
472 rc
= crypto_shash_init(shash
);
476 for (i
= 0; i
< num_fields
; i
++) {
477 u8 buffer
[IMA_EVENT_NAME_LEN_MAX
+ 1] = { 0 };
478 u8
*data_to_hash
= field_data
[i
].data
;
479 u32 datalen
= field_data
[i
].len
;
480 u32 datalen_to_hash
=
481 !ima_canonical_fmt
? datalen
: cpu_to_le32(datalen
);
483 if (strcmp(td
->name
, IMA_TEMPLATE_IMA_NAME
) != 0) {
484 rc
= crypto_shash_update(shash
,
485 (const u8
*) &datalen_to_hash
,
486 sizeof(datalen_to_hash
));
489 } else if (strcmp(td
->fields
[i
]->field_id
, "n") == 0) {
490 memcpy(buffer
, data_to_hash
, datalen
);
491 data_to_hash
= buffer
;
492 datalen
= IMA_EVENT_NAME_LEN_MAX
+ 1;
494 rc
= crypto_shash_update(shash
, data_to_hash
, datalen
);
500 rc
= crypto_shash_final(shash
, hash
->digest
);
505 int ima_calc_field_array_hash(struct ima_field_data
*field_data
,
506 struct ima_template_desc
*desc
, int num_fields
,
507 struct ima_digest_data
*hash
)
509 struct crypto_shash
*tfm
;
512 tfm
= ima_alloc_tfm(hash
->algo
);
516 rc
= ima_calc_field_array_hash_tfm(field_data
, desc
, num_fields
,
524 static int calc_buffer_ahash_atfm(const void *buf
, loff_t len
,
525 struct ima_digest_data
*hash
,
526 struct crypto_ahash
*tfm
)
528 struct ahash_request
*req
;
529 struct scatterlist sg
;
530 struct ahash_completion res
;
531 int rc
, ahash_rc
= 0;
533 hash
->length
= crypto_ahash_digestsize(tfm
);
535 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
539 init_completion(&res
.completion
);
540 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
|
541 CRYPTO_TFM_REQ_MAY_SLEEP
,
542 ahash_complete
, &res
);
544 rc
= ahash_wait(crypto_ahash_init(req
), &res
);
548 sg_init_one(&sg
, buf
, len
);
549 ahash_request_set_crypt(req
, &sg
, NULL
, len
);
551 ahash_rc
= crypto_ahash_update(req
);
553 /* wait for the update request to complete */
554 rc
= ahash_wait(ahash_rc
, &res
);
556 ahash_request_set_crypt(req
, NULL
, hash
->digest
, 0);
557 rc
= ahash_wait(crypto_ahash_final(req
), &res
);
560 ahash_request_free(req
);
564 static int calc_buffer_ahash(const void *buf
, loff_t len
,
565 struct ima_digest_data
*hash
)
567 struct crypto_ahash
*tfm
;
570 tfm
= ima_alloc_atfm(hash
->algo
);
574 rc
= calc_buffer_ahash_atfm(buf
, len
, hash
, tfm
);
581 static int calc_buffer_shash_tfm(const void *buf
, loff_t size
,
582 struct ima_digest_data
*hash
,
583 struct crypto_shash
*tfm
)
585 SHASH_DESC_ON_STACK(shash
, tfm
);
592 hash
->length
= crypto_shash_digestsize(tfm
);
594 rc
= crypto_shash_init(shash
);
599 len
= size
< PAGE_SIZE
? size
: PAGE_SIZE
;
600 rc
= crypto_shash_update(shash
, buf
, len
);
608 rc
= crypto_shash_final(shash
, hash
->digest
);
612 static int calc_buffer_shash(const void *buf
, loff_t len
,
613 struct ima_digest_data
*hash
)
615 struct crypto_shash
*tfm
;
618 tfm
= ima_alloc_tfm(hash
->algo
);
622 rc
= calc_buffer_shash_tfm(buf
, len
, hash
, tfm
);
628 int ima_calc_buffer_hash(const void *buf
, loff_t len
,
629 struct ima_digest_data
*hash
)
633 if (ima_ahash_minsize
&& len
>= ima_ahash_minsize
) {
634 rc
= calc_buffer_ahash(buf
, len
, hash
);
639 return calc_buffer_shash(buf
, len
, hash
);
642 static void __init
ima_pcrread(int idx
, u8
*pcr
)
647 if (tpm_pcr_read(TPM_ANY_NUM
, idx
, pcr
) != 0)
648 pr_err("Error Communicating to TPM chip\n");
652 * Calculate the boot aggregate hash
654 static int __init
ima_calc_boot_aggregate_tfm(char *digest
,
655 struct crypto_shash
*tfm
)
657 u8 pcr_i
[TPM_DIGEST_SIZE
];
659 SHASH_DESC_ON_STACK(shash
, tfm
);
664 rc
= crypto_shash_init(shash
);
668 /* cumulative sha1 over tpm registers 0-7 */
669 for (i
= TPM_PCR0
; i
< TPM_PCR8
; i
++) {
670 ima_pcrread(i
, pcr_i
);
671 /* now accumulate with current aggregate */
672 rc
= crypto_shash_update(shash
, pcr_i
, TPM_DIGEST_SIZE
);
675 crypto_shash_final(shash
, digest
);
679 int __init
ima_calc_boot_aggregate(struct ima_digest_data
*hash
)
681 struct crypto_shash
*tfm
;
684 tfm
= ima_alloc_tfm(hash
->algo
);
688 hash
->length
= crypto_shash_digestsize(tfm
);
689 rc
= ima_calc_boot_aggregate_tfm(hash
->digest
, tfm
);