2 * This contains encryption functions for per-file encryption.
4 * Copyright (C) 2015, Google, Inc.
5 * Copyright (C) 2015, Motorola Mobility
7 * Written by Michael Halcrow, 2014.
9 * Filename encryption additions
10 * Uday Savagaonkar, 2014
11 * Encryption policy handling additions
12 * Ildar Muslukhov, 2014
13 * Add fscrypt_pullback_bio_page()
16 * This has not yet undergone a rigorous security audit.
18 * The usage of AES-XTS should conform to recommendations in NIST
19 * Special Publication 800-38E and IEEE P1619/D16.
22 #include <linux/pagemap.h>
23 #include <linux/module.h>
24 #include <linux/bio.h>
25 #include <linux/namei.h>
26 #include "fscrypt_private.h"
29 * Call fscrypt_decrypt_page on every single page, reusing the encryption
32 static void completion_pages(struct work_struct
*work
)
34 struct fscrypt_ctx
*ctx
=
35 container_of(work
, struct fscrypt_ctx
, r
.work
);
36 struct bio
*bio
= ctx
->r
.bio
;
40 bio_for_each_segment_all(bv
, bio
, i
) {
41 struct page
*page
= bv
->bv_page
;
42 int ret
= fscrypt_decrypt_page(page
->mapping
->host
, page
,
43 PAGE_SIZE
, 0, page
->index
);
49 SetPageUptodate(page
);
53 fscrypt_release_ctx(ctx
);
57 void fscrypt_decrypt_bio_pages(struct fscrypt_ctx
*ctx
, struct bio
*bio
)
59 INIT_WORK(&ctx
->r
.work
, completion_pages
);
61 queue_work(fscrypt_read_workqueue
, &ctx
->r
.work
);
63 EXPORT_SYMBOL(fscrypt_decrypt_bio_pages
);
65 void fscrypt_pullback_bio_page(struct page
**page
, bool restore
)
67 struct fscrypt_ctx
*ctx
;
68 struct page
*bounce_page
;
70 /* The bounce data pages are unmapped. */
74 /* The bounce data page is unmapped. */
76 ctx
= (struct fscrypt_ctx
*)page_private(bounce_page
);
78 /* restore control page */
79 *page
= ctx
->w
.control_page
;
82 fscrypt_restore_control_page(bounce_page
);
84 EXPORT_SYMBOL(fscrypt_pullback_bio_page
);
86 int fscrypt_zeroout_range(const struct inode
*inode
, pgoff_t lblk
,
87 sector_t pblk
, unsigned int len
)
89 struct fscrypt_ctx
*ctx
;
90 struct page
*ciphertext_page
= NULL
;
94 BUG_ON(inode
->i_sb
->s_blocksize
!= PAGE_SIZE
);
96 ctx
= fscrypt_get_ctx(inode
, GFP_NOFS
);
100 ciphertext_page
= fscrypt_alloc_bounce_page(ctx
, GFP_NOWAIT
);
101 if (IS_ERR(ciphertext_page
)) {
102 err
= PTR_ERR(ciphertext_page
);
107 err
= fscrypt_do_page_crypto(inode
, FS_ENCRYPT
, lblk
,
108 ZERO_PAGE(0), ciphertext_page
,
109 PAGE_SIZE
, 0, GFP_NOFS
);
113 bio
= bio_alloc(GFP_NOWAIT
, 1);
118 bio
->bi_bdev
= inode
->i_sb
->s_bdev
;
119 bio
->bi_iter
.bi_sector
=
120 pblk
<< (inode
->i_sb
->s_blocksize_bits
- 9);
121 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
122 ret
= bio_add_page(bio
, ciphertext_page
,
123 inode
->i_sb
->s_blocksize
, 0);
124 if (ret
!= inode
->i_sb
->s_blocksize
) {
125 /* should never happen! */
131 err
= submit_bio_wait(bio
);
132 if ((err
== 0) && bio
->bi_error
)
142 fscrypt_release_ctx(ctx
);
145 EXPORT_SYMBOL(fscrypt_zeroout_range
);