1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/crc32c.h>
9 #include <linux/crypto.h>
10 #include <linux/xxhash.h>
11 #include <linux/key.h>
12 #include <linux/random.h>
13 #include <linux/ratelimit.h>
14 #include <linux/scatterlist.h>
15 #include <crypto/algapi.h>
16 #include <crypto/chacha.h>
17 #include <crypto/hash.h>
18 #include <crypto/poly1305.h>
19 #include <crypto/skcipher.h>
20 #include <keys/user-type.h>
23 * bch2_checksum state is an abstraction of the checksum state calculated over different pages.
24 * it features page merging without having the checksum algorithm lose its state.
25 * for native checksum aglorithms (like crc), a default seed value will do.
26 * for hash-like algorithms, a state needs to be stored
29 struct bch2_checksum_state
{
32 struct xxh64_state h64state
;
37 static void bch2_checksum_init(struct bch2_checksum_state
*state
)
39 switch (state
->type
) {
45 case BCH_CSUM_crc32c_nonzero
:
46 state
->seed
= U32_MAX
;
48 case BCH_CSUM_crc64_nonzero
:
49 state
->seed
= U64_MAX
;
52 xxh64_reset(&state
->h64state
, 0);
59 static u64
bch2_checksum_final(const struct bch2_checksum_state
*state
)
61 switch (state
->type
) {
66 case BCH_CSUM_crc32c_nonzero
:
67 return state
->seed
^ U32_MAX
;
68 case BCH_CSUM_crc64_nonzero
:
69 return state
->seed
^ U64_MAX
;
71 return xxh64_digest(&state
->h64state
);
77 static void bch2_checksum_update(struct bch2_checksum_state
*state
, const void *data
, size_t len
)
79 switch (state
->type
) {
82 case BCH_CSUM_crc32c_nonzero
:
84 state
->seed
= crc32c(state
->seed
, data
, len
);
86 case BCH_CSUM_crc64_nonzero
:
88 state
->seed
= crc64_be(state
->seed
, data
, len
);
91 xxh64_update(&state
->h64state
, data
, len
);
98 static inline int do_encrypt_sg(struct crypto_sync_skcipher
*tfm
,
100 struct scatterlist
*sg
, size_t len
)
102 SYNC_SKCIPHER_REQUEST_ON_STACK(req
, tfm
);
104 skcipher_request_set_sync_tfm(req
, tfm
);
105 skcipher_request_set_callback(req
, 0, NULL
, NULL
);
106 skcipher_request_set_crypt(req
, sg
, sg
, len
, nonce
.d
);
108 int ret
= crypto_skcipher_encrypt(req
);
110 pr_err("got error %i from crypto_skcipher_encrypt()", ret
);
115 static inline int do_encrypt(struct crypto_sync_skcipher
*tfm
,
117 void *buf
, size_t len
)
119 if (!is_vmalloc_addr(buf
)) {
120 struct scatterlist sg
= {};
123 sg_set_page(&sg
, virt_to_page(buf
), len
, offset_in_page(buf
));
124 return do_encrypt_sg(tfm
, nonce
, &sg
, len
);
126 DARRAY_PREALLOCATED(struct scatterlist
, 4) sgl
;
133 unsigned offset
= offset_in_page(buf
);
134 struct scatterlist sg
= {
135 .page_link
= (unsigned long) vmalloc_to_page(buf
),
137 .length
= min(len
, PAGE_SIZE
- offset
),
140 if (darray_push(&sgl
, sg
)) {
141 sg_mark_end(&darray_last(sgl
));
142 ret
= do_encrypt_sg(tfm
, nonce
, sgl
.data
, sgl_len
);
146 nonce
= nonce_add(nonce
, sgl_len
);
149 BUG_ON(darray_push(&sgl
, sg
));
154 sgl_len
+= sg
.length
;
157 sg_mark_end(&darray_last(sgl
));
158 ret
= do_encrypt_sg(tfm
, nonce
, sgl
.data
, sgl_len
);
165 int bch2_chacha_encrypt_key(struct bch_key
*key
, struct nonce nonce
,
166 void *buf
, size_t len
)
168 struct crypto_sync_skcipher
*chacha20
=
169 crypto_alloc_sync_skcipher("chacha20", 0, 0);
172 ret
= PTR_ERR_OR_ZERO(chacha20
);
174 pr_err("error requesting chacha20 cipher: %s", bch2_err_str(ret
));
178 ret
= crypto_skcipher_setkey(&chacha20
->base
,
179 (void *) key
, sizeof(*key
));
181 pr_err("error from crypto_skcipher_setkey(): %s", bch2_err_str(ret
));
185 ret
= do_encrypt(chacha20
, nonce
, buf
, len
);
187 crypto_free_sync_skcipher(chacha20
);
191 static int gen_poly_key(struct bch_fs
*c
, struct shash_desc
*desc
,
194 u8 key
[POLY1305_KEY_SIZE
];
197 nonce
.d
[3] ^= BCH_NONCE_POLY
;
199 memset(key
, 0, sizeof(key
));
200 ret
= do_encrypt(c
->chacha20
, nonce
, key
, sizeof(key
));
204 desc
->tfm
= c
->poly1305
;
205 crypto_shash_init(desc
);
206 crypto_shash_update(desc
, key
, sizeof(key
));
210 struct bch_csum
bch2_checksum(struct bch_fs
*c
, unsigned type
,
211 struct nonce nonce
, const void *data
, size_t len
)
215 case BCH_CSUM_crc32c_nonzero
:
216 case BCH_CSUM_crc64_nonzero
:
217 case BCH_CSUM_crc32c
:
218 case BCH_CSUM_xxhash
:
219 case BCH_CSUM_crc64
: {
220 struct bch2_checksum_state state
;
224 bch2_checksum_init(&state
);
225 bch2_checksum_update(&state
, data
, len
);
227 return (struct bch_csum
) { .lo
= cpu_to_le64(bch2_checksum_final(&state
)) };
230 case BCH_CSUM_chacha20_poly1305_80
:
231 case BCH_CSUM_chacha20_poly1305_128
: {
232 SHASH_DESC_ON_STACK(desc
, c
->poly1305
);
233 u8 digest
[POLY1305_DIGEST_SIZE
];
234 struct bch_csum ret
= { 0 };
236 gen_poly_key(c
, desc
, nonce
);
238 crypto_shash_update(desc
, data
, len
);
239 crypto_shash_final(desc
, digest
);
241 memcpy(&ret
, digest
, bch_crc_bytes
[type
]);
245 return (struct bch_csum
) {};
249 int bch2_encrypt(struct bch_fs
*c
, unsigned type
,
250 struct nonce nonce
, void *data
, size_t len
)
252 if (!bch2_csum_type_is_encryption(type
))
255 return do_encrypt(c
->chacha20
, nonce
, data
, len
);
258 static struct bch_csum
__bch2_checksum_bio(struct bch_fs
*c
, unsigned type
,
259 struct nonce nonce
, struct bio
*bio
,
260 struct bvec_iter
*iter
)
266 return (struct bch_csum
) { 0 };
267 case BCH_CSUM_crc32c_nonzero
:
268 case BCH_CSUM_crc64_nonzero
:
269 case BCH_CSUM_crc32c
:
270 case BCH_CSUM_xxhash
:
271 case BCH_CSUM_crc64
: {
272 struct bch2_checksum_state state
;
275 bch2_checksum_init(&state
);
277 #ifdef CONFIG_HIGHMEM
278 __bio_for_each_segment(bv
, bio
, *iter
, *iter
) {
279 void *p
= kmap_local_page(bv
.bv_page
) + bv
.bv_offset
;
281 bch2_checksum_update(&state
, p
, bv
.bv_len
);
285 __bio_for_each_bvec(bv
, bio
, *iter
, *iter
)
286 bch2_checksum_update(&state
, page_address(bv
.bv_page
) + bv
.bv_offset
,
289 return (struct bch_csum
) { .lo
= cpu_to_le64(bch2_checksum_final(&state
)) };
292 case BCH_CSUM_chacha20_poly1305_80
:
293 case BCH_CSUM_chacha20_poly1305_128
: {
294 SHASH_DESC_ON_STACK(desc
, c
->poly1305
);
295 u8 digest
[POLY1305_DIGEST_SIZE
];
296 struct bch_csum ret
= { 0 };
298 gen_poly_key(c
, desc
, nonce
);
300 #ifdef CONFIG_HIGHMEM
301 __bio_for_each_segment(bv
, bio
, *iter
, *iter
) {
302 void *p
= kmap_local_page(bv
.bv_page
) + bv
.bv_offset
;
304 crypto_shash_update(desc
, p
, bv
.bv_len
);
308 __bio_for_each_bvec(bv
, bio
, *iter
, *iter
)
309 crypto_shash_update(desc
,
310 page_address(bv
.bv_page
) + bv
.bv_offset
,
313 crypto_shash_final(desc
, digest
);
315 memcpy(&ret
, digest
, bch_crc_bytes
[type
]);
319 return (struct bch_csum
) {};
323 struct bch_csum
bch2_checksum_bio(struct bch_fs
*c
, unsigned type
,
324 struct nonce nonce
, struct bio
*bio
)
326 struct bvec_iter iter
= bio
->bi_iter
;
328 return __bch2_checksum_bio(c
, type
, nonce
, bio
, &iter
);
331 int __bch2_encrypt_bio(struct bch_fs
*c
, unsigned type
,
332 struct nonce nonce
, struct bio
*bio
)
335 struct bvec_iter iter
;
336 DARRAY_PREALLOCATED(struct scatterlist
, 4) sgl
;
340 if (!bch2_csum_type_is_encryption(type
))
345 bio_for_each_segment(bv
, bio
, iter
) {
346 struct scatterlist sg
= {
347 .page_link
= (unsigned long) bv
.bv_page
,
348 .offset
= bv
.bv_offset
,
352 if (darray_push(&sgl
, sg
)) {
353 sg_mark_end(&darray_last(sgl
));
354 ret
= do_encrypt_sg(c
->chacha20
, nonce
, sgl
.data
, sgl_len
);
358 nonce
= nonce_add(nonce
, sgl_len
);
362 BUG_ON(darray_push(&sgl
, sg
));
365 sgl_len
+= sg
.length
;
368 sg_mark_end(&darray_last(sgl
));
369 ret
= do_encrypt_sg(c
->chacha20
, nonce
, sgl
.data
, sgl_len
);
375 struct bch_csum
bch2_checksum_merge(unsigned type
, struct bch_csum a
,
376 struct bch_csum b
, size_t b_len
)
378 struct bch2_checksum_state state
;
381 bch2_checksum_init(&state
);
382 state
.seed
= le64_to_cpu(a
.lo
);
384 BUG_ON(!bch2_checksum_mergeable(type
));
387 unsigned page_len
= min_t(unsigned, b_len
, PAGE_SIZE
);
389 bch2_checksum_update(&state
,
390 page_address(ZERO_PAGE(0)), page_len
);
393 a
.lo
= cpu_to_le64(bch2_checksum_final(&state
));
399 int bch2_rechecksum_bio(struct bch_fs
*c
, struct bio
*bio
,
400 struct bversion version
,
401 struct bch_extent_crc_unpacked crc_old
,
402 struct bch_extent_crc_unpacked
*crc_a
,
403 struct bch_extent_crc_unpacked
*crc_b
,
404 unsigned len_a
, unsigned len_b
,
405 unsigned new_csum_type
)
407 struct bvec_iter iter
= bio
->bi_iter
;
408 struct nonce nonce
= extent_nonce(version
, crc_old
);
409 struct bch_csum merged
= { 0 };
411 struct bch_extent_crc_unpacked
*crc
;
414 struct bch_csum csum
;
416 { crc_a
, len_a
, new_csum_type
, { 0 }},
417 { crc_b
, len_b
, new_csum_type
, { 0 } },
418 { NULL
, bio_sectors(bio
) - len_a
- len_b
, new_csum_type
, { 0 } },
420 bool mergeable
= crc_old
.csum_type
== new_csum_type
&&
421 bch2_checksum_mergeable(new_csum_type
);
422 unsigned crc_nonce
= crc_old
.nonce
;
424 BUG_ON(len_a
+ len_b
> bio_sectors(bio
));
425 BUG_ON(crc_old
.uncompressed_size
!= bio_sectors(bio
));
426 BUG_ON(crc_is_compressed(crc_old
));
427 BUG_ON(bch2_csum_type_is_encryption(crc_old
.csum_type
) !=
428 bch2_csum_type_is_encryption(new_csum_type
));
430 for (i
= splits
; i
< splits
+ ARRAY_SIZE(splits
); i
++) {
431 iter
.bi_size
= i
->len
<< 9;
432 if (mergeable
|| i
->crc
)
433 i
->csum
= __bch2_checksum_bio(c
, i
->csum_type
,
436 bio_advance_iter(bio
, &iter
, i
->len
<< 9);
437 nonce
= nonce_add(nonce
, i
->len
<< 9);
441 for (i
= splits
; i
< splits
+ ARRAY_SIZE(splits
); i
++)
442 merged
= bch2_checksum_merge(new_csum_type
, merged
,
443 i
->csum
, i
->len
<< 9);
445 merged
= bch2_checksum_bio(c
, crc_old
.csum_type
,
446 extent_nonce(version
, crc_old
), bio
);
448 if (bch2_crc_cmp(merged
, crc_old
.csum
) && !c
->opts
.no_data_io
) {
449 struct printbuf buf
= PRINTBUF
;
450 prt_printf(&buf
, "checksum error in %s() (memory corruption or bug?)\n"
451 " expected %0llx:%0llx got %0llx:%0llx (old type ",
457 bch2_prt_csum_type(&buf
, crc_old
.csum_type
);
458 prt_str(&buf
, " new type ");
459 bch2_prt_csum_type(&buf
, new_csum_type
);
461 WARN_RATELIMIT(1, "%s", buf
.buf
);
466 for (i
= splits
; i
< splits
+ ARRAY_SIZE(splits
); i
++) {
468 *i
->crc
= (struct bch_extent_crc_unpacked
) {
469 .csum_type
= i
->csum_type
,
470 .compression_type
= crc_old
.compression_type
,
471 .compressed_size
= i
->len
,
472 .uncompressed_size
= i
->len
,
479 if (bch2_csum_type_is_encryption(new_csum_type
))
486 /* BCH_SB_FIELD_crypt: */
488 static int bch2_sb_crypt_validate(struct bch_sb
*sb
, struct bch_sb_field
*f
,
489 enum bch_validate_flags flags
, struct printbuf
*err
)
491 struct bch_sb_field_crypt
*crypt
= field_to_type(f
, crypt
);
493 if (vstruct_bytes(&crypt
->field
) < sizeof(*crypt
)) {
494 prt_printf(err
, "wrong size (got %zu should be %zu)",
495 vstruct_bytes(&crypt
->field
), sizeof(*crypt
));
496 return -BCH_ERR_invalid_sb_crypt
;
499 if (BCH_CRYPT_KDF_TYPE(crypt
)) {
500 prt_printf(err
, "bad kdf type %llu", BCH_CRYPT_KDF_TYPE(crypt
));
501 return -BCH_ERR_invalid_sb_crypt
;
507 static void bch2_sb_crypt_to_text(struct printbuf
*out
, struct bch_sb
*sb
,
508 struct bch_sb_field
*f
)
510 struct bch_sb_field_crypt
*crypt
= field_to_type(f
, crypt
);
512 prt_printf(out
, "KFD: %llu\n", BCH_CRYPT_KDF_TYPE(crypt
));
513 prt_printf(out
, "scrypt n: %llu\n", BCH_KDF_SCRYPT_N(crypt
));
514 prt_printf(out
, "scrypt r: %llu\n", BCH_KDF_SCRYPT_R(crypt
));
515 prt_printf(out
, "scrypt p: %llu\n", BCH_KDF_SCRYPT_P(crypt
));
518 const struct bch_sb_field_ops bch_sb_field_ops_crypt
= {
519 .validate
= bch2_sb_crypt_validate
,
520 .to_text
= bch2_sb_crypt_to_text
,
524 static int __bch2_request_key(char *key_description
, struct bch_key
*key
)
526 struct key
*keyring_key
;
527 const struct user_key_payload
*ukp
;
530 keyring_key
= request_key(&key_type_user
, key_description
, NULL
);
531 if (IS_ERR(keyring_key
))
532 return PTR_ERR(keyring_key
);
534 down_read(&keyring_key
->sem
);
535 ukp
= dereference_key_locked(keyring_key
);
536 if (ukp
->datalen
== sizeof(*key
)) {
537 memcpy(key
, ukp
->data
, ukp
->datalen
);
542 up_read(&keyring_key
->sem
);
543 key_put(keyring_key
);
548 #include <keyutils.h>
550 static int __bch2_request_key(char *key_description
, struct bch_key
*key
)
554 key_id
= request_key("user", key_description
, NULL
,
555 KEY_SPEC_SESSION_KEYRING
);
559 key_id
= request_key("user", key_description
, NULL
,
560 KEY_SPEC_USER_KEYRING
);
564 key_id
= request_key("user", key_description
, NULL
,
565 KEY_SPEC_USER_SESSION_KEYRING
);
572 if (keyctl_read(key_id
, (void *) key
, sizeof(*key
)) != sizeof(*key
))
581 int bch2_request_key(struct bch_sb
*sb
, struct bch_key
*key
)
583 struct printbuf key_description
= PRINTBUF
;
586 prt_printf(&key_description
, "bcachefs:");
587 pr_uuid(&key_description
, sb
->user_uuid
.b
);
589 ret
= __bch2_request_key(key_description
.buf
, key
);
590 printbuf_exit(&key_description
);
594 char *passphrase
= read_passphrase("Enter passphrase: ");
595 struct bch_encrypted_key sb_key
;
597 bch2_passphrase_check(sb
, passphrase
,
603 /* stash with memfd, pass memfd fd to mount */
609 int bch2_revoke_key(struct bch_sb
*sb
)
612 struct printbuf key_description
= PRINTBUF
;
614 prt_printf(&key_description
, "bcachefs:");
615 pr_uuid(&key_description
, sb
->user_uuid
.b
);
617 key_id
= request_key("user", key_description
.buf
, NULL
, KEY_SPEC_USER_KEYRING
);
618 printbuf_exit(&key_description
);
622 keyctl_revoke(key_id
);
628 int bch2_decrypt_sb_key(struct bch_fs
*c
,
629 struct bch_sb_field_crypt
*crypt
,
632 struct bch_encrypted_key sb_key
= crypt
->key
;
633 struct bch_key user_key
;
636 /* is key encrypted? */
637 if (!bch2_key_is_encrypted(&sb_key
))
640 ret
= bch2_request_key(c
->disk_sb
.sb
, &user_key
);
642 bch_err(c
, "error requesting encryption key: %s", bch2_err_str(ret
));
646 /* decrypt real key: */
647 ret
= bch2_chacha_encrypt_key(&user_key
, bch2_sb_key_nonce(c
),
648 &sb_key
, sizeof(sb_key
));
652 if (bch2_key_is_encrypted(&sb_key
)) {
653 bch_err(c
, "incorrect encryption key");
660 memzero_explicit(&sb_key
, sizeof(sb_key
));
661 memzero_explicit(&user_key
, sizeof(user_key
));
665 static int bch2_alloc_ciphers(struct bch_fs
*c
)
670 struct crypto_sync_skcipher
*chacha20
= crypto_alloc_sync_skcipher("chacha20", 0, 0);
671 int ret
= PTR_ERR_OR_ZERO(chacha20
);
673 bch_err(c
, "error requesting chacha20 module: %s", bch2_err_str(ret
));
677 struct crypto_shash
*poly1305
= crypto_alloc_shash("poly1305", 0, 0);
678 ret
= PTR_ERR_OR_ZERO(poly1305
);
680 bch_err(c
, "error requesting poly1305 module: %s", bch2_err_str(ret
));
681 crypto_free_sync_skcipher(chacha20
);
685 c
->chacha20
= chacha20
;
686 c
->poly1305
= poly1305
;
690 int bch2_disable_encryption(struct bch_fs
*c
)
692 struct bch_sb_field_crypt
*crypt
;
696 mutex_lock(&c
->sb_lock
);
698 crypt
= bch2_sb_field_get(c
->disk_sb
.sb
, crypt
);
702 /* is key encrypted? */
704 if (bch2_key_is_encrypted(&crypt
->key
))
707 ret
= bch2_decrypt_sb_key(c
, crypt
, &key
);
711 crypt
->key
.magic
= cpu_to_le64(BCH_KEY_MAGIC
);
712 crypt
->key
.key
= key
;
714 SET_BCH_SB_ENCRYPTION_TYPE(c
->disk_sb
.sb
, 0);
717 mutex_unlock(&c
->sb_lock
);
722 int bch2_enable_encryption(struct bch_fs
*c
, bool keyed
)
724 struct bch_encrypted_key key
;
725 struct bch_key user_key
;
726 struct bch_sb_field_crypt
*crypt
;
729 mutex_lock(&c
->sb_lock
);
731 /* Do we already have an encryption key? */
732 if (bch2_sb_field_get(c
->disk_sb
.sb
, crypt
))
735 ret
= bch2_alloc_ciphers(c
);
739 key
.magic
= cpu_to_le64(BCH_KEY_MAGIC
);
740 get_random_bytes(&key
.key
, sizeof(key
.key
));
743 ret
= bch2_request_key(c
->disk_sb
.sb
, &user_key
);
745 bch_err(c
, "error requesting encryption key: %s", bch2_err_str(ret
));
749 ret
= bch2_chacha_encrypt_key(&user_key
, bch2_sb_key_nonce(c
),
755 ret
= crypto_skcipher_setkey(&c
->chacha20
->base
,
756 (void *) &key
.key
, sizeof(key
.key
));
760 crypt
= bch2_sb_field_resize(&c
->disk_sb
, crypt
,
761 sizeof(*crypt
) / sizeof(u64
));
763 ret
= -BCH_ERR_ENOSPC_sb_crypt
;
769 /* write superblock */
770 SET_BCH_SB_ENCRYPTION_TYPE(c
->disk_sb
.sb
, 1);
773 mutex_unlock(&c
->sb_lock
);
774 memzero_explicit(&user_key
, sizeof(user_key
));
775 memzero_explicit(&key
, sizeof(key
));
779 void bch2_fs_encryption_exit(struct bch_fs
*c
)
782 crypto_free_shash(c
->poly1305
);
784 crypto_free_sync_skcipher(c
->chacha20
);
786 crypto_free_shash(c
->sha256
);
789 int bch2_fs_encryption_init(struct bch_fs
*c
)
791 struct bch_sb_field_crypt
*crypt
;
795 c
->sha256
= crypto_alloc_shash("sha256", 0, 0);
796 ret
= PTR_ERR_OR_ZERO(c
->sha256
);
799 bch_err(c
, "error requesting sha256 module: %s", bch2_err_str(ret
));
803 crypt
= bch2_sb_field_get(c
->disk_sb
.sb
, crypt
);
807 ret
= bch2_alloc_ciphers(c
);
811 ret
= bch2_decrypt_sb_key(c
, crypt
, &key
);
815 ret
= crypto_skcipher_setkey(&c
->chacha20
->base
,
816 (void *) &key
.key
, sizeof(key
.key
));
820 memzero_explicit(&key
, sizeof(key
));