2 * linux/fs/ext4/crypto.c
4 * Copyright (C) 2015, Google, Inc.
6 * This contains encryption functions for ext4
8 * Written by Michael Halcrow, 2014.
10 * Filename encryption additions
11 * Uday Savagaonkar, 2014
12 * Encryption policy handling additions
13 * Ildar Muslukhov, 2014
15 * This has not yet undergone a rigorous security audit.
17 * The usage of AES-XTS should conform to recommendations in NIST
18 * Special Publication 800-38E and IEEE P1619/D16.
21 #include <crypto/skcipher.h>
22 #include <keys/user-type.h>
23 #include <keys/encrypted-type.h>
24 #include <linux/ecryptfs.h>
25 #include <linux/gfp.h>
26 #include <linux/kernel.h>
27 #include <linux/key.h>
28 #include <linux/list.h>
29 #include <linux/mempool.h>
30 #include <linux/module.h>
31 #include <linux/mutex.h>
32 #include <linux/random.h>
33 #include <linux/scatterlist.h>
34 #include <linux/spinlock_types.h>
36 #include "ext4_extents.h"
39 /* Encryption added and removed here! (L: */
41 static unsigned int num_prealloc_crypto_pages
= 32;
42 static unsigned int num_prealloc_crypto_ctxs
= 128;
44 module_param(num_prealloc_crypto_pages
, uint
, 0444);
45 MODULE_PARM_DESC(num_prealloc_crypto_pages
,
46 "Number of crypto pages to preallocate");
47 module_param(num_prealloc_crypto_ctxs
, uint
, 0444);
48 MODULE_PARM_DESC(num_prealloc_crypto_ctxs
,
49 "Number of crypto contexts to preallocate");
51 static mempool_t
*ext4_bounce_page_pool
;
53 static LIST_HEAD(ext4_free_crypto_ctxs
);
54 static DEFINE_SPINLOCK(ext4_crypto_ctx_lock
);
56 static struct kmem_cache
*ext4_crypto_ctx_cachep
;
57 struct kmem_cache
*ext4_crypt_info_cachep
;
60 * ext4_release_crypto_ctx() - Releases an encryption context
61 * @ctx: The encryption context to release.
63 * If the encryption context was allocated from the pre-allocated pool, returns
64 * it to that pool. Else, frees it.
66 * If there's a bounce page in the context, this frees that.
68 void ext4_release_crypto_ctx(struct ext4_crypto_ctx
*ctx
)
72 if (ctx
->flags
& EXT4_WRITE_PATH_FL
&& ctx
->w
.bounce_page
)
73 mempool_free(ctx
->w
.bounce_page
, ext4_bounce_page_pool
);
74 ctx
->w
.bounce_page
= NULL
;
75 ctx
->w
.control_page
= NULL
;
76 if (ctx
->flags
& EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL
) {
77 kmem_cache_free(ext4_crypto_ctx_cachep
, ctx
);
79 spin_lock_irqsave(&ext4_crypto_ctx_lock
, flags
);
80 list_add(&ctx
->free_list
, &ext4_free_crypto_ctxs
);
81 spin_unlock_irqrestore(&ext4_crypto_ctx_lock
, flags
);
86 * ext4_get_crypto_ctx() - Gets an encryption context
87 * @inode: The inode for which we are doing the crypto
89 * Allocates and initializes an encryption context.
91 * Return: An allocated and initialized encryption context on success; error
92 * value or NULL otherwise.
94 struct ext4_crypto_ctx
*ext4_get_crypto_ctx(struct inode
*inode
,
97 struct ext4_crypto_ctx
*ctx
= NULL
;
100 struct ext4_crypt_info
*ci
= EXT4_I(inode
)->i_crypt_info
;
103 return ERR_PTR(-ENOKEY
);
106 * We first try getting the ctx from a free list because in
107 * the common case the ctx will have an allocated and
108 * initialized crypto tfm, so it's probably a worthwhile
109 * optimization. For the bounce page, we first try getting it
110 * from the kernel allocator because that's just about as fast
111 * as getting it from a list and because a cache of free pages
112 * should generally be a "last resort" option for a filesystem
113 * to be able to do its job.
115 spin_lock_irqsave(&ext4_crypto_ctx_lock
, flags
);
116 ctx
= list_first_entry_or_null(&ext4_free_crypto_ctxs
,
117 struct ext4_crypto_ctx
, free_list
);
119 list_del(&ctx
->free_list
);
120 spin_unlock_irqrestore(&ext4_crypto_ctx_lock
, flags
);
122 ctx
= kmem_cache_zalloc(ext4_crypto_ctx_cachep
, gfp_flags
);
127 ctx
->flags
|= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL
;
129 ctx
->flags
&= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL
;
131 ctx
->flags
&= ~EXT4_WRITE_PATH_FL
;
135 if (!IS_ERR_OR_NULL(ctx
))
136 ext4_release_crypto_ctx(ctx
);
142 struct workqueue_struct
*ext4_read_workqueue
;
143 static DEFINE_MUTEX(crypto_init
);
146 * ext4_exit_crypto() - Shutdown the ext4 encryption system
148 void ext4_exit_crypto(void)
150 struct ext4_crypto_ctx
*pos
, *n
;
152 list_for_each_entry_safe(pos
, n
, &ext4_free_crypto_ctxs
, free_list
)
153 kmem_cache_free(ext4_crypto_ctx_cachep
, pos
);
154 INIT_LIST_HEAD(&ext4_free_crypto_ctxs
);
155 if (ext4_bounce_page_pool
)
156 mempool_destroy(ext4_bounce_page_pool
);
157 ext4_bounce_page_pool
= NULL
;
158 if (ext4_read_workqueue
)
159 destroy_workqueue(ext4_read_workqueue
);
160 ext4_read_workqueue
= NULL
;
161 if (ext4_crypto_ctx_cachep
)
162 kmem_cache_destroy(ext4_crypto_ctx_cachep
);
163 ext4_crypto_ctx_cachep
= NULL
;
164 if (ext4_crypt_info_cachep
)
165 kmem_cache_destroy(ext4_crypt_info_cachep
);
166 ext4_crypt_info_cachep
= NULL
;
170 * ext4_init_crypto() - Set up for ext4 encryption.
172 * We only call this when we start accessing encrypted files, since it
173 * results in memory getting allocated that wouldn't otherwise be used.
175 * Return: Zero on success, non-zero otherwise.
177 int ext4_init_crypto(void)
179 int i
, res
= -ENOMEM
;
181 mutex_lock(&crypto_init
);
182 if (ext4_read_workqueue
)
183 goto already_initialized
;
184 ext4_read_workqueue
= alloc_workqueue("ext4_crypto", WQ_HIGHPRI
, 0);
185 if (!ext4_read_workqueue
)
188 ext4_crypto_ctx_cachep
= KMEM_CACHE(ext4_crypto_ctx
,
189 SLAB_RECLAIM_ACCOUNT
);
190 if (!ext4_crypto_ctx_cachep
)
193 ext4_crypt_info_cachep
= KMEM_CACHE(ext4_crypt_info
,
194 SLAB_RECLAIM_ACCOUNT
);
195 if (!ext4_crypt_info_cachep
)
198 for (i
= 0; i
< num_prealloc_crypto_ctxs
; i
++) {
199 struct ext4_crypto_ctx
*ctx
;
201 ctx
= kmem_cache_zalloc(ext4_crypto_ctx_cachep
, GFP_NOFS
);
206 list_add(&ctx
->free_list
, &ext4_free_crypto_ctxs
);
209 ext4_bounce_page_pool
=
210 mempool_create_page_pool(num_prealloc_crypto_pages
, 0);
211 if (!ext4_bounce_page_pool
) {
216 mutex_unlock(&crypto_init
);
220 mutex_unlock(&crypto_init
);
224 void ext4_restore_control_page(struct page
*data_page
)
226 struct ext4_crypto_ctx
*ctx
=
227 (struct ext4_crypto_ctx
*)page_private(data_page
);
229 set_page_private(data_page
, (unsigned long)NULL
);
230 ClearPagePrivate(data_page
);
231 unlock_page(data_page
);
232 ext4_release_crypto_ctx(ctx
);
236 * ext4_crypt_complete() - The completion callback for page encryption
237 * @req: The asynchronous encryption request context
238 * @res: The result of the encryption operation
240 static void ext4_crypt_complete(struct crypto_async_request
*req
, int res
)
242 struct ext4_completion_result
*ecr
= req
->data
;
244 if (res
== -EINPROGRESS
)
247 complete(&ecr
->completion
);
255 static int ext4_page_crypto(struct inode
*inode
,
258 struct page
*src_page
,
259 struct page
*dest_page
,
263 u8 xts_tweak
[EXT4_XTS_TWEAK_SIZE
];
264 struct skcipher_request
*req
= NULL
;
265 DECLARE_EXT4_COMPLETION_RESULT(ecr
);
266 struct scatterlist dst
, src
;
267 struct ext4_crypt_info
*ci
= EXT4_I(inode
)->i_crypt_info
;
268 struct crypto_skcipher
*tfm
= ci
->ci_ctfm
;
271 req
= skcipher_request_alloc(tfm
, gfp_flags
);
273 printk_ratelimited(KERN_ERR
274 "%s: crypto_request_alloc() failed\n",
278 skcipher_request_set_callback(
279 req
, CRYPTO_TFM_REQ_MAY_BACKLOG
| CRYPTO_TFM_REQ_MAY_SLEEP
,
280 ext4_crypt_complete
, &ecr
);
282 BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE
< sizeof(index
));
283 memcpy(xts_tweak
, &index
, sizeof(index
));
284 memset(&xts_tweak
[sizeof(index
)], 0,
285 EXT4_XTS_TWEAK_SIZE
- sizeof(index
));
287 sg_init_table(&dst
, 1);
288 sg_set_page(&dst
, dest_page
, PAGE_SIZE
, 0);
289 sg_init_table(&src
, 1);
290 sg_set_page(&src
, src_page
, PAGE_SIZE
, 0);
291 skcipher_request_set_crypt(req
, &src
, &dst
, PAGE_SIZE
,
293 if (rw
== EXT4_DECRYPT
)
294 res
= crypto_skcipher_decrypt(req
);
296 res
= crypto_skcipher_encrypt(req
);
297 if (res
== -EINPROGRESS
|| res
== -EBUSY
) {
298 wait_for_completion(&ecr
.completion
);
301 skcipher_request_free(req
);
305 "%s: crypto_skcipher_encrypt() returned %d\n",
312 static struct page
*alloc_bounce_page(struct ext4_crypto_ctx
*ctx
,
315 ctx
->w
.bounce_page
= mempool_alloc(ext4_bounce_page_pool
, gfp_flags
);
316 if (ctx
->w
.bounce_page
== NULL
)
317 return ERR_PTR(-ENOMEM
);
318 ctx
->flags
|= EXT4_WRITE_PATH_FL
;
319 return ctx
->w
.bounce_page
;
323 * ext4_encrypt() - Encrypts a page
324 * @inode: The inode for which the encryption should take place
325 * @plaintext_page: The page to encrypt. Must be locked.
327 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
328 * encryption context.
330 * Called on the page write path. The caller must call
331 * ext4_restore_control_page() on the returned ciphertext page to
332 * release the bounce buffer and the encryption context.
334 * Return: An allocated page with the encrypted content on success. Else, an
335 * error value or NULL.
337 struct page
*ext4_encrypt(struct inode
*inode
,
338 struct page
*plaintext_page
,
341 struct ext4_crypto_ctx
*ctx
;
342 struct page
*ciphertext_page
= NULL
;
345 BUG_ON(!PageLocked(plaintext_page
));
347 ctx
= ext4_get_crypto_ctx(inode
, gfp_flags
);
349 return (struct page
*) ctx
;
351 /* The encryption operation will require a bounce page. */
352 ciphertext_page
= alloc_bounce_page(ctx
, gfp_flags
);
353 if (IS_ERR(ciphertext_page
))
355 ctx
->w
.control_page
= plaintext_page
;
356 err
= ext4_page_crypto(inode
, EXT4_ENCRYPT
, plaintext_page
->index
,
357 plaintext_page
, ciphertext_page
, gfp_flags
);
359 ciphertext_page
= ERR_PTR(err
);
361 ext4_release_crypto_ctx(ctx
);
362 return ciphertext_page
;
364 SetPagePrivate(ciphertext_page
);
365 set_page_private(ciphertext_page
, (unsigned long)ctx
);
366 lock_page(ciphertext_page
);
367 return ciphertext_page
;
371 * ext4_decrypt() - Decrypts a page in-place
372 * @ctx: The encryption context.
373 * @page: The page to decrypt. Must be locked.
375 * Decrypts page in-place using the ctx encryption context.
377 * Called from the read completion callback.
379 * Return: Zero on success, non-zero otherwise.
381 int ext4_decrypt(struct page
*page
)
383 BUG_ON(!PageLocked(page
));
385 return ext4_page_crypto(page
->mapping
->host
, EXT4_DECRYPT
,
386 page
->index
, page
, page
, GFP_NOFS
);
389 int ext4_encrypted_zeroout(struct inode
*inode
, ext4_lblk_t lblk
,
390 ext4_fsblk_t pblk
, ext4_lblk_t len
)
392 struct ext4_crypto_ctx
*ctx
;
393 struct page
*ciphertext_page
= NULL
;
398 ext4_msg(inode
->i_sb
, KERN_CRIT
,
399 "ext4_encrypted_zeroout ino %lu lblk %u len %u",
400 (unsigned long) inode
->i_ino
, lblk
, len
);
403 BUG_ON(inode
->i_sb
->s_blocksize
!= PAGE_SIZE
);
405 ctx
= ext4_get_crypto_ctx(inode
, GFP_NOFS
);
409 ciphertext_page
= alloc_bounce_page(ctx
, GFP_NOWAIT
);
410 if (IS_ERR(ciphertext_page
)) {
411 err
= PTR_ERR(ciphertext_page
);
416 err
= ext4_page_crypto(inode
, EXT4_ENCRYPT
, lblk
,
417 ZERO_PAGE(0), ciphertext_page
,
422 bio
= bio_alloc(GFP_NOWAIT
, 1);
427 bio
->bi_bdev
= inode
->i_sb
->s_bdev
;
428 bio
->bi_iter
.bi_sector
=
429 pblk
<< (inode
->i_sb
->s_blocksize_bits
- 9);
430 ret
= bio_add_page(bio
, ciphertext_page
,
431 inode
->i_sb
->s_blocksize
, 0);
432 if (ret
!= inode
->i_sb
->s_blocksize
) {
433 /* should never happen! */
434 ext4_msg(inode
->i_sb
, KERN_ERR
,
435 "bio_add_page failed: %d", ret
);
441 err
= submit_bio_wait(WRITE
, bio
);
442 if ((err
== 0) && bio
->bi_error
)
451 ext4_release_crypto_ctx(ctx
);
455 bool ext4_valid_contents_enc_mode(uint32_t mode
)
457 return (mode
== EXT4_ENCRYPTION_MODE_AES_256_XTS
);
461 * ext4_validate_encryption_key_size() - Validate the encryption key size
462 * @mode: The key mode.
463 * @size: The key size to validate.
465 * Return: The validated key size for @mode. Zero if invalid.
467 uint32_t ext4_validate_encryption_key_size(uint32_t mode
, uint32_t size
)
469 if (size
== ext4_encryption_key_size(mode
))
475 * Validate dentries for encrypted directories to make sure we aren't
476 * potentially caching stale data after a key has been added or
479 static int ext4_d_revalidate(struct dentry
*dentry
, unsigned int flags
)
482 struct ext4_crypt_info
*ci
;
483 int dir_has_key
, cached_with_key
;
485 dir
= dget_parent(dentry
);
486 if (!ext4_encrypted_inode(d_inode(dir
))) {
490 ci
= EXT4_I(d_inode(dir
))->i_crypt_info
;
491 if (ci
&& ci
->ci_keyring_key
&&
492 (ci
->ci_keyring_key
->flags
& ((1 << KEY_FLAG_INVALIDATED
) |
493 (1 << KEY_FLAG_REVOKED
) |
494 (1 << KEY_FLAG_DEAD
))))
497 /* this should eventually be an flag in d_flags */
498 cached_with_key
= dentry
->d_fsdata
!= NULL
;
499 dir_has_key
= (ci
!= NULL
);
503 * If the dentry was cached without the key, and it is a
504 * negative dentry, it might be a valid name. We can't check
505 * if the key has since been made available due to locking
506 * reasons, so we fail the validation so ext4_lookup() can do
509 * We also fail the validation if the dentry was created with
510 * the key present, but we no longer have the key, or vice versa.
512 if ((!cached_with_key
&& d_is_negative(dentry
)) ||
513 (!cached_with_key
&& dir_has_key
) ||
514 (cached_with_key
&& !dir_has_key
)) {
515 #if 0 /* Revalidation debug */
517 char *cp
= simple_dname(dentry
, buf
, sizeof(buf
));
521 pr_err("revalidate: %s %p %d %d %d\n", cp
, dentry
->d_fsdata
,
522 cached_with_key
, d_is_negative(dentry
),
530 const struct dentry_operations ext4_encrypted_d_ops
= {
531 .d_revalidate
= ext4_d_revalidate
,