2 * This contains encryption functions for per-file encryption.
4 * Copyright (C) 2015, Google, Inc.
5 * Copyright (C) 2015, Motorola Mobility
7 * Written by Michael Halcrow, 2014.
9 * Filename encryption additions
10 * Uday Savagaonkar, 2014
11 * Encryption policy handling additions
12 * Ildar Muslukhov, 2014
13 * Add fscrypt_pullback_bio_page()
16 * This has not yet undergone a rigorous security audit.
18 * The usage of AES-XTS should conform to recommendations in NIST
19 * Special Publication 800-38E and IEEE P1619/D16.
22 #include <linux/pagemap.h>
23 #include <linux/mempool.h>
24 #include <linux/module.h>
25 #include <linux/scatterlist.h>
26 #include <linux/ratelimit.h>
27 #include <linux/dcache.h>
28 #include <linux/namei.h>
29 #include <crypto/aes.h>
30 #include "fscrypt_private.h"
32 static unsigned int num_prealloc_crypto_pages
= 32;
33 static unsigned int num_prealloc_crypto_ctxs
= 128;
35 module_param(num_prealloc_crypto_pages
, uint
, 0444);
36 MODULE_PARM_DESC(num_prealloc_crypto_pages
,
37 "Number of crypto pages to preallocate");
38 module_param(num_prealloc_crypto_ctxs
, uint
, 0444);
39 MODULE_PARM_DESC(num_prealloc_crypto_ctxs
,
40 "Number of crypto contexts to preallocate");
42 static mempool_t
*fscrypt_bounce_page_pool
= NULL
;
44 static LIST_HEAD(fscrypt_free_ctxs
);
45 static DEFINE_SPINLOCK(fscrypt_ctx_lock
);
47 struct workqueue_struct
*fscrypt_read_workqueue
;
48 static DEFINE_MUTEX(fscrypt_init_mutex
);
50 static struct kmem_cache
*fscrypt_ctx_cachep
;
51 struct kmem_cache
*fscrypt_info_cachep
;
54 * fscrypt_release_ctx() - Releases an encryption context
55 * @ctx: The encryption context to release.
57 * If the encryption context was allocated from the pre-allocated pool, returns
58 * it to that pool. Else, frees it.
60 * If there's a bounce page in the context, this frees that.
62 void fscrypt_release_ctx(struct fscrypt_ctx
*ctx
)
66 if (ctx
->flags
& FS_CTX_HAS_BOUNCE_BUFFER_FL
&& ctx
->w
.bounce_page
) {
67 mempool_free(ctx
->w
.bounce_page
, fscrypt_bounce_page_pool
);
68 ctx
->w
.bounce_page
= NULL
;
70 ctx
->w
.control_page
= NULL
;
71 if (ctx
->flags
& FS_CTX_REQUIRES_FREE_ENCRYPT_FL
) {
72 kmem_cache_free(fscrypt_ctx_cachep
, ctx
);
74 spin_lock_irqsave(&fscrypt_ctx_lock
, flags
);
75 list_add(&ctx
->free_list
, &fscrypt_free_ctxs
);
76 spin_unlock_irqrestore(&fscrypt_ctx_lock
, flags
);
79 EXPORT_SYMBOL(fscrypt_release_ctx
);
82 * fscrypt_get_ctx() - Gets an encryption context
83 * @inode: The inode for which we are doing the crypto
84 * @gfp_flags: The gfp flag for memory allocation
86 * Allocates and initializes an encryption context.
88 * Return: An allocated and initialized encryption context on success; error
89 * value or NULL otherwise.
91 struct fscrypt_ctx
*fscrypt_get_ctx(const struct inode
*inode
, gfp_t gfp_flags
)
93 struct fscrypt_ctx
*ctx
= NULL
;
94 struct fscrypt_info
*ci
= inode
->i_crypt_info
;
98 return ERR_PTR(-ENOKEY
);
101 * We first try getting the ctx from a free list because in
102 * the common case the ctx will have an allocated and
103 * initialized crypto tfm, so it's probably a worthwhile
104 * optimization. For the bounce page, we first try getting it
105 * from the kernel allocator because that's just about as fast
106 * as getting it from a list and because a cache of free pages
107 * should generally be a "last resort" option for a filesystem
108 * to be able to do its job.
110 spin_lock_irqsave(&fscrypt_ctx_lock
, flags
);
111 ctx
= list_first_entry_or_null(&fscrypt_free_ctxs
,
112 struct fscrypt_ctx
, free_list
);
114 list_del(&ctx
->free_list
);
115 spin_unlock_irqrestore(&fscrypt_ctx_lock
, flags
);
117 ctx
= kmem_cache_zalloc(fscrypt_ctx_cachep
, gfp_flags
);
119 return ERR_PTR(-ENOMEM
);
120 ctx
->flags
|= FS_CTX_REQUIRES_FREE_ENCRYPT_FL
;
122 ctx
->flags
&= ~FS_CTX_REQUIRES_FREE_ENCRYPT_FL
;
124 ctx
->flags
&= ~FS_CTX_HAS_BOUNCE_BUFFER_FL
;
127 EXPORT_SYMBOL(fscrypt_get_ctx
);
130 * page_crypt_complete() - completion callback for page crypto
131 * @req: The asynchronous cipher request context
132 * @res: The result of the cipher operation
134 static void page_crypt_complete(struct crypto_async_request
*req
, int res
)
136 struct fscrypt_completion_result
*ecr
= req
->data
;
138 if (res
== -EINPROGRESS
)
141 complete(&ecr
->completion
);
144 int fscrypt_do_page_crypto(const struct inode
*inode
, fscrypt_direction_t rw
,
145 u64 lblk_num
, struct page
*src_page
,
146 struct page
*dest_page
, unsigned int len
,
147 unsigned int offs
, gfp_t gfp_flags
)
151 u8 padding
[FS_IV_SIZE
- sizeof(__le64
)];
153 struct skcipher_request
*req
= NULL
;
154 DECLARE_FS_COMPLETION_RESULT(ecr
);
155 struct scatterlist dst
, src
;
156 struct fscrypt_info
*ci
= inode
->i_crypt_info
;
157 struct crypto_skcipher
*tfm
= ci
->ci_ctfm
;
160 if (WARN_ON_ONCE(len
<= 0))
162 if (WARN_ON_ONCE(len
% FS_CRYPTO_BLOCK_SIZE
!= 0))
165 BUILD_BUG_ON(sizeof(iv
) != FS_IV_SIZE
);
166 BUILD_BUG_ON(AES_BLOCK_SIZE
!= FS_IV_SIZE
);
167 iv
.index
= cpu_to_le64(lblk_num
);
168 memset(iv
.padding
, 0, sizeof(iv
.padding
));
170 if (ci
->ci_essiv_tfm
!= NULL
) {
171 crypto_cipher_encrypt_one(ci
->ci_essiv_tfm
, (u8
*)&iv
,
175 req
= skcipher_request_alloc(tfm
, gfp_flags
);
177 printk_ratelimited(KERN_ERR
178 "%s: crypto_request_alloc() failed\n",
183 skcipher_request_set_callback(
184 req
, CRYPTO_TFM_REQ_MAY_BACKLOG
| CRYPTO_TFM_REQ_MAY_SLEEP
,
185 page_crypt_complete
, &ecr
);
187 sg_init_table(&dst
, 1);
188 sg_set_page(&dst
, dest_page
, len
, offs
);
189 sg_init_table(&src
, 1);
190 sg_set_page(&src
, src_page
, len
, offs
);
191 skcipher_request_set_crypt(req
, &src
, &dst
, len
, &iv
);
192 if (rw
== FS_DECRYPT
)
193 res
= crypto_skcipher_decrypt(req
);
195 res
= crypto_skcipher_encrypt(req
);
196 if (res
== -EINPROGRESS
|| res
== -EBUSY
) {
197 BUG_ON(req
->base
.data
!= &ecr
);
198 wait_for_completion(&ecr
.completion
);
201 skcipher_request_free(req
);
203 printk_ratelimited(KERN_ERR
204 "%s: crypto_skcipher_encrypt() returned %d\n",
211 struct page
*fscrypt_alloc_bounce_page(struct fscrypt_ctx
*ctx
,
214 ctx
->w
.bounce_page
= mempool_alloc(fscrypt_bounce_page_pool
, gfp_flags
);
215 if (ctx
->w
.bounce_page
== NULL
)
216 return ERR_PTR(-ENOMEM
);
217 ctx
->flags
|= FS_CTX_HAS_BOUNCE_BUFFER_FL
;
218 return ctx
->w
.bounce_page
;
222 * fscypt_encrypt_page() - Encrypts a page
223 * @inode: The inode for which the encryption should take place
224 * @page: The page to encrypt. Must be locked for bounce-page
226 * @len: Length of data to encrypt in @page and encrypted
227 * data in returned page.
228 * @offs: Offset of data within @page and returned
229 * page holding encrypted data.
230 * @lblk_num: Logical block number. This must be unique for multiple
231 * calls with same inode, except when overwriting
232 * previously written data.
233 * @gfp_flags: The gfp flag for memory allocation
235 * Encrypts @page using the ctx encryption context. Performs encryption
236 * either in-place or into a newly allocated bounce page.
237 * Called on the page write path.
239 * Bounce page allocation is the default.
240 * In this case, the contents of @page are encrypted and stored in an
241 * allocated bounce page. @page has to be locked and the caller must call
242 * fscrypt_restore_control_page() on the returned ciphertext page to
243 * release the bounce buffer and the encryption context.
245 * In-place encryption is used by setting the FS_CFLG_OWN_PAGES flag in
246 * fscrypt_operations. Here, the input-page is returned with its content
249 * Return: A page with the encrypted content on success. Else, an
250 * error value or NULL.
252 struct page
*fscrypt_encrypt_page(const struct inode
*inode
,
256 u64 lblk_num
, gfp_t gfp_flags
)
259 struct fscrypt_ctx
*ctx
;
260 struct page
*ciphertext_page
= page
;
263 if (inode
->i_sb
->s_cop
->flags
& FS_CFLG_OWN_PAGES
) {
264 /* with inplace-encryption we just encrypt the page */
265 err
= fscrypt_do_page_crypto(inode
, FS_ENCRYPT
, lblk_num
, page
,
266 ciphertext_page
, len
, offs
,
271 return ciphertext_page
;
274 if (WARN_ON_ONCE(!PageLocked(page
)))
275 return ERR_PTR(-EINVAL
);
277 ctx
= fscrypt_get_ctx(inode
, gfp_flags
);
279 return (struct page
*)ctx
;
281 /* The encryption operation will require a bounce page. */
282 ciphertext_page
= fscrypt_alloc_bounce_page(ctx
, gfp_flags
);
283 if (IS_ERR(ciphertext_page
))
286 ctx
->w
.control_page
= page
;
287 err
= fscrypt_do_page_crypto(inode
, FS_ENCRYPT
, lblk_num
,
288 page
, ciphertext_page
, len
, offs
,
291 ciphertext_page
= ERR_PTR(err
);
294 SetPagePrivate(ciphertext_page
);
295 set_page_private(ciphertext_page
, (unsigned long)ctx
);
296 lock_page(ciphertext_page
);
297 return ciphertext_page
;
300 fscrypt_release_ctx(ctx
);
301 return ciphertext_page
;
303 EXPORT_SYMBOL(fscrypt_encrypt_page
);
306 * fscrypt_decrypt_page() - Decrypts a page in-place
307 * @inode: The corresponding inode for the page to decrypt.
308 * @page: The page to decrypt. Must be locked in case
309 * it is a writeback page (FS_CFLG_OWN_PAGES unset).
310 * @len: Number of bytes in @page to be decrypted.
311 * @offs: Start of data in @page.
312 * @lblk_num: Logical block number.
314 * Decrypts page in-place using the ctx encryption context.
316 * Called from the read completion callback.
318 * Return: Zero on success, non-zero otherwise.
320 int fscrypt_decrypt_page(const struct inode
*inode
, struct page
*page
,
321 unsigned int len
, unsigned int offs
, u64 lblk_num
)
323 if (WARN_ON_ONCE(!PageLocked(page
) &&
324 !(inode
->i_sb
->s_cop
->flags
& FS_CFLG_OWN_PAGES
)))
327 return fscrypt_do_page_crypto(inode
, FS_DECRYPT
, lblk_num
, page
, page
,
328 len
, offs
, GFP_NOFS
);
330 EXPORT_SYMBOL(fscrypt_decrypt_page
);
333 * Validate dentries for encrypted directories to make sure we aren't
334 * potentially caching stale data after a key has been added or
337 static int fscrypt_d_revalidate(struct dentry
*dentry
, unsigned int flags
)
340 int dir_has_key
, cached_with_key
;
342 if (flags
& LOOKUP_RCU
)
345 dir
= dget_parent(dentry
);
346 if (!d_inode(dir
)->i_sb
->s_cop
->is_encrypted(d_inode(dir
))) {
351 /* this should eventually be an flag in d_flags */
352 spin_lock(&dentry
->d_lock
);
353 cached_with_key
= dentry
->d_flags
& DCACHE_ENCRYPTED_WITH_KEY
;
354 spin_unlock(&dentry
->d_lock
);
355 dir_has_key
= (d_inode(dir
)->i_crypt_info
!= NULL
);
359 * If the dentry was cached without the key, and it is a
360 * negative dentry, it might be a valid name. We can't check
361 * if the key has since been made available due to locking
362 * reasons, so we fail the validation so ext4_lookup() can do
365 * We also fail the validation if the dentry was created with
366 * the key present, but we no longer have the key, or vice versa.
368 if ((!cached_with_key
&& d_is_negative(dentry
)) ||
369 (!cached_with_key
&& dir_has_key
) ||
370 (cached_with_key
&& !dir_has_key
))
375 const struct dentry_operations fscrypt_d_ops
= {
376 .d_revalidate
= fscrypt_d_revalidate
,
378 EXPORT_SYMBOL(fscrypt_d_ops
);
380 void fscrypt_restore_control_page(struct page
*page
)
382 struct fscrypt_ctx
*ctx
;
384 ctx
= (struct fscrypt_ctx
*)page_private(page
);
385 set_page_private(page
, (unsigned long)NULL
);
386 ClearPagePrivate(page
);
388 fscrypt_release_ctx(ctx
);
390 EXPORT_SYMBOL(fscrypt_restore_control_page
);
392 static void fscrypt_destroy(void)
394 struct fscrypt_ctx
*pos
, *n
;
396 list_for_each_entry_safe(pos
, n
, &fscrypt_free_ctxs
, free_list
)
397 kmem_cache_free(fscrypt_ctx_cachep
, pos
);
398 INIT_LIST_HEAD(&fscrypt_free_ctxs
);
399 mempool_destroy(fscrypt_bounce_page_pool
);
400 fscrypt_bounce_page_pool
= NULL
;
404 * fscrypt_initialize() - allocate major buffers for fs encryption.
405 * @cop_flags: fscrypt operations flags
407 * We only call this when we start accessing encrypted files, since it
408 * results in memory getting allocated that wouldn't otherwise be used.
410 * Return: Zero on success, non-zero otherwise.
412 int fscrypt_initialize(unsigned int cop_flags
)
414 int i
, res
= -ENOMEM
;
416 /* No need to allocate a bounce page pool if this FS won't use it. */
417 if (cop_flags
& FS_CFLG_OWN_PAGES
)
420 mutex_lock(&fscrypt_init_mutex
);
421 if (fscrypt_bounce_page_pool
)
422 goto already_initialized
;
424 for (i
= 0; i
< num_prealloc_crypto_ctxs
; i
++) {
425 struct fscrypt_ctx
*ctx
;
427 ctx
= kmem_cache_zalloc(fscrypt_ctx_cachep
, GFP_NOFS
);
430 list_add(&ctx
->free_list
, &fscrypt_free_ctxs
);
433 fscrypt_bounce_page_pool
=
434 mempool_create_page_pool(num_prealloc_crypto_pages
, 0);
435 if (!fscrypt_bounce_page_pool
)
439 mutex_unlock(&fscrypt_init_mutex
);
443 mutex_unlock(&fscrypt_init_mutex
);
448 * fscrypt_init() - Set up for fs encryption.
450 static int __init
fscrypt_init(void)
453 * Use an unbound workqueue to allow bios to be decrypted in parallel
454 * even when they happen to complete on the same CPU. This sacrifices
455 * locality, but it's worthwhile since decryption is CPU-intensive.
457 * Also use a high-priority workqueue to prioritize decryption work,
458 * which blocks reads from completing, over regular application tasks.
460 fscrypt_read_workqueue
= alloc_workqueue("fscrypt_read_queue",
461 WQ_UNBOUND
| WQ_HIGHPRI
,
463 if (!fscrypt_read_workqueue
)
466 fscrypt_ctx_cachep
= KMEM_CACHE(fscrypt_ctx
, SLAB_RECLAIM_ACCOUNT
);
467 if (!fscrypt_ctx_cachep
)
468 goto fail_free_queue
;
470 fscrypt_info_cachep
= KMEM_CACHE(fscrypt_info
, SLAB_RECLAIM_ACCOUNT
);
471 if (!fscrypt_info_cachep
)
477 kmem_cache_destroy(fscrypt_ctx_cachep
);
479 destroy_workqueue(fscrypt_read_workqueue
);
483 module_init(fscrypt_init
)
486 * fscrypt_exit() - Shutdown the fs encryption system
488 static void __exit
fscrypt_exit(void)
492 if (fscrypt_read_workqueue
)
493 destroy_workqueue(fscrypt_read_workqueue
);
494 kmem_cache_destroy(fscrypt_ctx_cachep
);
495 kmem_cache_destroy(fscrypt_info_cachep
);
497 fscrypt_essiv_cleanup();
499 module_exit(fscrypt_exit
);
501 MODULE_LICENSE("GPL");