4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
9 * A full copy of the text of the CDDL should have accompanied this
10 * source. A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
17 * Copyright (c) 2017, Datto, Inc. All rights reserved.
20 #include <sys/zio_crypt.h>
22 #include <sys/dmu_objset.h>
23 #include <sys/dnode.h>
24 #include <sys/fs/zfs.h>
31 * This file is responsible for handling all of the details of generating
32 * encryption parameters and performing encryption and authentication.
34 * BLOCK ENCRYPTION PARAMETERS:
35 * Encryption /Authentication Algorithm Suite (crypt):
36 * The encryption algorithm, mode, and key length we are going to use. We
37 * currently support AES in either GCM or CCM modes with 128, 192, and 256 bit
38 * keys. All authentication is currently done with SHA512-HMAC.
41 * The unencrypted data that we want to encrypt.
43 * Initialization Vector (IV):
44 * An initialization vector for the encryption algorithms. This is used to
45 * "tweak" the encryption algorithms so that two blocks of the same data are
46 * encrypted into different ciphertext outputs, thus obfuscating block patterns.
47 * The supported encryption modes (AES-GCM and AES-CCM) require that an IV is
48 * never reused with the same encryption key. This value is stored unencrypted
49 * and must simply be provided to the decryption function. We use a 96 bit IV
50 * (as recommended by NIST) for all block encryption. For non-dedup blocks we
51 * derive the IV randomly. The first 64 bits of the IV are stored in the second
52 * word of DVA[2] and the remaining 32 bits are stored in the upper 32 bits of
53 * blk_fill. This is safe because encrypted blocks can't use the upper 32 bits
54 * of blk_fill. We only encrypt level 0 blocks, which normally have a fill count
55 * of 1. The only exception is for DMU_OT_DNODE objects, where the fill count of
56 * level 0 blocks is the number of allocated dnodes in that block. The on-disk
57 * format supports at most 2^15 slots per L0 dnode block, because the maximum
58 * block size is 16MB (2^24). In either case, for level 0 blocks this number
59 * will still be smaller than UINT32_MAX so it is safe to store the IV in the
60 * top 32 bits of blk_fill, while leaving the bottom 32 bits of the fill count
64 * This is the most important secret data of an encrypted dataset. It is used
65 * along with the salt to generate that actual encryption keys via HKDF. We
66 * do not use the master key to directly encrypt any data because there are
67 * theoretical limits on how much data can actually be safely encrypted with
68 * any encryption mode. The master key is stored encrypted on disk with the
69 * user's wrapping key. Its length is determined by the encryption algorithm.
70 * For details on how this is stored see the block comment in dsl_crypt.c
73 * Used as an input to the HKDF function, along with the master key. We use a
74 * 64 bit salt, stored unencrypted in the first word of DVA[2]. Any given salt
75 * can be used for encrypting many blocks, so we cache the current salt and the
76 * associated derived key in zio_crypt_t so we do not need to derive it again
80 * A secret binary key, generated from an HKDF function used to encrypt and
83 * Message Authentication Code (MAC)
84 * The MAC is an output of authenticated encryption modes such as AES-GCM and
85 * AES-CCM. Its purpose is to ensure that an attacker cannot modify encrypted
86 * data on disk and return garbage to the application. Effectively, it is a
87 * checksum that can not be reproduced by an attacker. We store the MAC in the
88 * second 128 bits of blk_cksum, leaving the first 128 bits for a truncated
89 * regular checksum of the ciphertext which can be used for scrubbing.
91 * OBJECT AUTHENTICATION:
92 * Some object types, such as DMU_OT_MASTER_NODE cannot be encrypted because
93 * they contain some info that always needs to be readable. To prevent this
94 * data from being altered, we authenticate this data using SHA512-HMAC. This
95 * will produce a MAC (similar to the one produced via encryption) which can
96 * be used to verify the object was not modified. HMACs do not require key
97 * rotation or IVs, so we can keep up to the full 3 copies of authenticated
101 * ZIL blocks have their bp written to disk ahead of the associated data, so we
102 * cannot store the MAC there as we normally do. For these blocks the MAC is
103 * stored in the embedded checksum within the zil_chain_t header. The salt and
104 * IV are generated for the block on bp allocation instead of at encryption
105 * time. In addition, ZIL blocks have some pieces that must be left in plaintext
106 * for claiming even though all of the sensitive user data still needs to be
107 * encrypted. The function zio_crypt_init_uios_zil() handles parsing which
108 * pieces of the block need to be encrypted. All data that is not encrypted is
109 * authenticated using the AAD mechanisms that the supported encryption modes
110 * provide for. In order to preserve the semantics of the ZIL for encrypted
111 * datasets, the ZIL is not protected at the objset level as described below.
114 * Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left
115 * in plaintext for scrubbing and claiming, but the bonus buffers might contain
116 * sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing
117 * which pieces of the block need to be encrypted. For more details about
118 * dnode authentication and encryption, see zio_crypt_init_uios_dnode().
120 * OBJECT SET AUTHENTICATION:
121 * Up to this point, everything we have encrypted and authenticated has been
122 * at level 0 (or -2 for the ZIL). If we did not do any further work the
123 * on-disk format would be susceptible to attacks that deleted or rearranged
124 * the order of level 0 blocks. Ideally, the cleanest solution would be to
125 * maintain a tree of authentication MACs going up the bp tree. However, this
126 * presents a problem for raw sends. Send files do not send information about
127 * indirect blocks so there would be no convenient way to transfer the MACs and
128 * they cannot be recalculated on the receive side without the master key which
129 * would defeat one of the purposes of raw sends in the first place. Instead,
130 * for the indirect levels of the bp tree, we use a regular SHA512 of the MACs
131 * from the level below. We also include some portable fields from blk_prop such
132 * as the lsize and compression algorithm to prevent the data from being
135 * At the objset level, we maintain 2 separate 256 bit MACs in the
136 * objset_phys_t. The first one is "portable" and is the logical root of the
137 * MAC tree maintained in the metadnode's bps. The second, is "local" and is
138 * used as the root MAC for the user accounting objects, which are also not
139 * transferred via "zfs send". The portable MAC is sent in the DRR_BEGIN payload
140 * of the send file. The useraccounting code ensures that the useraccounting
141 * info is not present upon a receive, so the local MAC can simply be cleared
142 * out at that time. For more info about objset_phys_t authentication, see
143 * zio_crypt_do_objset_hmacs().
145 * CONSIDERATIONS FOR DEDUP:
146 * In order for dedup to work, blocks that we want to dedup with one another
147 * need to use the same IV and encryption key, so that they will have the same
148 * ciphertext. Normally, one should never reuse an IV with the same encryption
149 * key or else AES-GCM and AES-CCM can both actually leak the plaintext of both
150 * blocks. In this case, however, since we are using the same plaintext as
151 * well all that we end up with is a duplicate of the original ciphertext we
152 * already had. As a result, an attacker with read access to the raw disk will
153 * be able to tell which blocks are the same but this information is given away
154 * by dedup anyway. In order to get the same IVs and encryption keys for
155 * equivalent blocks of data we use an HMAC of the plaintext. We use an HMAC
156 * here so that a reproducible checksum of the plaintext is never available to
157 * the attacker. The HMAC key is kept alongside the master key, encrypted on
158 * disk. The first 64 bits of the HMAC are used in place of the random salt, and
159 * the next 96 bits are used as the IV. As a result of this mechanism, dedup
160 * will only work within a clone family since encrypted dedup requires use of
161 * the same master and HMAC keys.
165 * After encrypting many blocks with the same key we may start to run up
166 * against the theoretical limits of how much data can securely be encrypted
167 * with a single key using the supported encryption modes. The most obvious
168 * limitation is that our risk of generating 2 equivalent 96 bit IVs increases
169 * the more IVs we generate (which both GCM and CCM modes strictly forbid).
170 * This risk actually grows surprisingly quickly over time according to the
171 * Birthday Problem. With a total IV space of 2^(96 bits), and assuming we have
172 * generated n IVs with a cryptographically secure RNG, the approximate
173 * probability p(n) of a collision is given as:
175 * p(n) ~= e^(-n*(n-1)/(2*(2^96)))
177 * [http://www.math.cornell.edu/~mec/2008-2009/TianyiZheng/Birthday.html]
179 * Assuming that we want to ensure that p(n) never goes over 1 / 1 trillion
180 * we must not write more than 398,065,730 blocks with the same encryption key.
181 * Therefore, we rotate our keys after 400,000,000 blocks have been written by
182 * generating a new random 64 bit salt for our HKDF encryption key generation
185 #define ZFS_KEY_MAX_SALT_USES_DEFAULT 400000000
186 #define ZFS_CURRENT_MAX_SALT_USES \
187 (MIN(zfs_key_max_salt_uses, ZFS_KEY_MAX_SALT_USES_DEFAULT))
188 static unsigned long zfs_key_max_salt_uses
= ZFS_KEY_MAX_SALT_USES_DEFAULT
;
190 typedef struct blkptr_auth_buf
{
191 uint64_t bab_prop
; /* blk_prop - portable mask */
192 uint8_t bab_mac
[ZIO_DATA_MAC_LEN
]; /* MAC from blk_cksum */
193 uint64_t bab_pad
; /* reserved for future use */
196 const zio_crypt_info_t zio_crypt_table
[ZIO_CRYPT_FUNCTIONS
] = {
197 {"", ZC_TYPE_NONE
, 0, "inherit"},
198 {"", ZC_TYPE_NONE
, 0, "on"},
199 {"", ZC_TYPE_NONE
, 0, "off"},
200 {SUN_CKM_AES_CCM
, ZC_TYPE_CCM
, 16, "aes-128-ccm"},
201 {SUN_CKM_AES_CCM
, ZC_TYPE_CCM
, 24, "aes-192-ccm"},
202 {SUN_CKM_AES_CCM
, ZC_TYPE_CCM
, 32, "aes-256-ccm"},
203 {SUN_CKM_AES_GCM
, ZC_TYPE_GCM
, 16, "aes-128-gcm"},
204 {SUN_CKM_AES_GCM
, ZC_TYPE_GCM
, 24, "aes-192-gcm"},
205 {SUN_CKM_AES_GCM
, ZC_TYPE_GCM
, 32, "aes-256-gcm"}
209 zio_crypt_key_destroy_early(zio_crypt_key_t
*key
)
211 rw_destroy(&key
->zk_salt_lock
);
213 /* free crypto templates */
214 memset(&key
->zk_session
, 0, sizeof (key
->zk_session
));
216 /* zero out sensitive data */
217 memset(key
, 0, sizeof (zio_crypt_key_t
));
221 zio_crypt_key_destroy(zio_crypt_key_t
*key
)
224 freebsd_crypt_freesession(&key
->zk_session
);
225 zio_crypt_key_destroy_early(key
);
229 zio_crypt_key_init(uint64_t crypt
, zio_crypt_key_t
*key
)
232 crypto_mechanism_t mech __unused
;
234 const zio_crypt_info_t
*ci
= NULL
;
236 ASSERT3P(key
, !=, NULL
);
237 ASSERT3U(crypt
, <, ZIO_CRYPT_FUNCTIONS
);
239 ci
= &zio_crypt_table
[crypt
];
240 if (ci
->ci_crypt_type
!= ZC_TYPE_GCM
&&
241 ci
->ci_crypt_type
!= ZC_TYPE_CCM
)
244 keydata_len
= zio_crypt_table
[crypt
].ci_keylen
;
245 memset(key
, 0, sizeof (zio_crypt_key_t
));
246 rw_init(&key
->zk_salt_lock
, NULL
, RW_DEFAULT
, NULL
);
248 /* fill keydata buffers and salt with random data */
249 ret
= random_get_bytes((uint8_t *)&key
->zk_guid
, sizeof (uint64_t));
253 ret
= random_get_bytes(key
->zk_master_keydata
, keydata_len
);
257 ret
= random_get_bytes(key
->zk_hmac_keydata
, SHA512_HMAC_KEYLEN
);
261 ret
= random_get_bytes(key
->zk_salt
, ZIO_DATA_SALT_LEN
);
265 /* derive the current key from the master key */
266 ret
= hkdf_sha512(key
->zk_master_keydata
, keydata_len
, NULL
, 0,
267 key
->zk_salt
, ZIO_DATA_SALT_LEN
, key
->zk_current_keydata
,
272 /* initialize keys for the ICP */
273 key
->zk_current_key
.ck_data
= key
->zk_current_keydata
;
274 key
->zk_current_key
.ck_length
= CRYPTO_BYTES2BITS(keydata_len
);
276 key
->zk_hmac_key
.ck_data
= &key
->zk_hmac_key
;
277 key
->zk_hmac_key
.ck_length
= CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN
);
279 ci
= &zio_crypt_table
[crypt
];
280 if (ci
->ci_crypt_type
!= ZC_TYPE_GCM
&&
281 ci
->ci_crypt_type
!= ZC_TYPE_CCM
)
284 ret
= freebsd_crypt_newsession(&key
->zk_session
, ci
,
285 &key
->zk_current_key
);
289 key
->zk_crypt
= crypt
;
290 key
->zk_version
= ZIO_CRYPT_KEY_CURRENT_VERSION
;
291 key
->zk_salt_count
= 0;
296 zio_crypt_key_destroy_early(key
);
301 zio_crypt_key_change_salt(zio_crypt_key_t
*key
)
304 uint8_t salt
[ZIO_DATA_SALT_LEN
];
305 crypto_mechanism_t mech __unused
;
307 uint_t keydata_len
= zio_crypt_table
[key
->zk_crypt
].ci_keylen
;
309 /* generate a new salt */
310 ret
= random_get_bytes(salt
, ZIO_DATA_SALT_LEN
);
314 rw_enter(&key
->zk_salt_lock
, RW_WRITER
);
316 /* someone beat us to the salt rotation, just unlock and return */
317 if (key
->zk_salt_count
< ZFS_CURRENT_MAX_SALT_USES
)
320 /* derive the current key from the master key and the new salt */
321 ret
= hkdf_sha512(key
->zk_master_keydata
, keydata_len
, NULL
, 0,
322 salt
, ZIO_DATA_SALT_LEN
, key
->zk_current_keydata
, keydata_len
);
326 /* assign the salt and reset the usage count */
327 memcpy(key
->zk_salt
, salt
, ZIO_DATA_SALT_LEN
);
328 key
->zk_salt_count
= 0;
330 freebsd_crypt_freesession(&key
->zk_session
);
331 ret
= freebsd_crypt_newsession(&key
->zk_session
,
332 &zio_crypt_table
[key
->zk_crypt
], &key
->zk_current_key
);
336 rw_exit(&key
->zk_salt_lock
);
341 rw_exit(&key
->zk_salt_lock
);
346 /* See comment above zfs_key_max_salt_uses definition for details */
348 zio_crypt_key_get_salt(zio_crypt_key_t
*key
, uint8_t *salt
)
351 boolean_t salt_change
;
353 rw_enter(&key
->zk_salt_lock
, RW_READER
);
355 memcpy(salt
, key
->zk_salt
, ZIO_DATA_SALT_LEN
);
356 salt_change
= (atomic_inc_64_nv(&key
->zk_salt_count
) >=
357 ZFS_CURRENT_MAX_SALT_USES
);
359 rw_exit(&key
->zk_salt_lock
);
362 ret
= zio_crypt_key_change_salt(key
);
373 void *failed_decrypt_buf
;
374 int failed_decrypt_size
;
377 * This function handles all encryption and decryption in zfs. When
378 * encrypting it expects puio to reference the plaintext and cuio to
379 * reference the ciphertext. cuio must have enough space for the
380 * ciphertext + room for a MAC. datalen should be the length of the
381 * plaintext / ciphertext alone.
384 * The implementation for FreeBSD's OpenCrypto.
386 * The big difference between ICP and FOC is that FOC uses a single
387 * buffer for input and output. This means that (for AES-GCM, the
388 * only one supported right now) the source must be copied into the
389 * destination, and the destination must have the AAD, and the tag/MAC,
390 * already associated with it. (Both implementations can use a uio.)
392 * Since the auth data is part of the iovec array, all we need to know
393 * is the length: 0 means there's no AAD.
397 zio_do_crypt_uio_opencrypto(boolean_t encrypt
, freebsd_crypt_session_t
*sess
,
398 uint64_t crypt
, crypto_key_t
*key
, uint8_t *ivbuf
, uint_t datalen
,
399 zfs_uio_t
*uio
, uint_t auth_len
)
401 const zio_crypt_info_t
*ci
= &zio_crypt_table
[crypt
];
402 if (ci
->ci_crypt_type
!= ZC_TYPE_GCM
&&
403 ci
->ci_crypt_type
!= ZC_TYPE_CCM
)
407 int ret
= freebsd_crypt_uio(encrypt
, sess
, ci
, uio
, key
, ivbuf
,
411 printf("%s(%d): Returning error %s\n",
412 __FUNCTION__
, __LINE__
, encrypt
? "EIO" : "ECKSUM");
414 ret
= SET_ERROR(encrypt
? EIO
: ECKSUM
);
421 zio_crypt_key_wrap(crypto_key_t
*cwkey
, zio_crypt_key_t
*key
, uint8_t *iv
,
422 uint8_t *mac
, uint8_t *keydata_out
, uint8_t *hmac_keydata_out
)
427 * With OpenCrypto in FreeBSD, the same buffer is used for
428 * input and output. Also, the AAD (for AES-GMC at least)
429 * needs to logically go in front.
434 uint64_t crypt
= key
->zk_crypt
;
435 uint_t enc_len
, keydata_len
, aad_len
;
437 ASSERT3U(crypt
, <, ZIO_CRYPT_FUNCTIONS
);
439 zfs_uio_init(&cuio
, &cuio_s
);
441 keydata_len
= zio_crypt_table
[crypt
].ci_keylen
;
443 /* generate iv for wrapping the master and hmac key */
444 ret
= random_get_pseudo_bytes(iv
, WRAPPING_IV_LEN
);
449 * Since we only support one buffer, we need to copy
450 * the plain text (source) to the cipher buffer (dest).
451 * We set iovecs[0] -- the authentication data -- below.
453 memcpy(keydata_out
, key
->zk_master_keydata
, keydata_len
);
454 memcpy(hmac_keydata_out
, key
->zk_hmac_keydata
, SHA512_HMAC_KEYLEN
);
455 iovecs
[1].iov_base
= keydata_out
;
456 iovecs
[1].iov_len
= keydata_len
;
457 iovecs
[2].iov_base
= hmac_keydata_out
;
458 iovecs
[2].iov_len
= SHA512_HMAC_KEYLEN
;
459 iovecs
[3].iov_base
= mac
;
460 iovecs
[3].iov_len
= WRAPPING_MAC_LEN
;
463 * Although we don't support writing to the old format, we do
464 * support rewrapping the key so that the user can move and
465 * quarantine datasets on the old format.
467 if (key
->zk_version
== 0) {
468 aad_len
= sizeof (uint64_t);
469 aad
[0] = LE_64(key
->zk_guid
);
471 ASSERT3U(key
->zk_version
, ==, ZIO_CRYPT_KEY_CURRENT_VERSION
);
472 aad_len
= sizeof (uint64_t) * 3;
473 aad
[0] = LE_64(key
->zk_guid
);
474 aad
[1] = LE_64(crypt
);
475 aad
[2] = LE_64(key
->zk_version
);
478 iovecs
[0].iov_base
= aad
;
479 iovecs
[0].iov_len
= aad_len
;
480 enc_len
= zio_crypt_table
[crypt
].ci_keylen
+ SHA512_HMAC_KEYLEN
;
482 GET_UIO_STRUCT(&cuio
)->uio_iov
= iovecs
;
483 zfs_uio_iovcnt(&cuio
) = 4;
484 zfs_uio_segflg(&cuio
) = UIO_SYSSPACE
;
486 /* encrypt the keys and store the resulting ciphertext and mac */
487 ret
= zio_do_crypt_uio_opencrypto(B_TRUE
, NULL
, crypt
, cwkey
,
488 iv
, enc_len
, &cuio
, aad_len
);
499 zio_crypt_key_unwrap(crypto_key_t
*cwkey
, uint64_t crypt
, uint64_t version
,
500 uint64_t guid
, uint8_t *keydata
, uint8_t *hmac_keydata
, uint8_t *iv
,
501 uint8_t *mac
, zio_crypt_key_t
*key
)
506 * With OpenCrypto in FreeBSD, the same buffer is used for
507 * input and output. Also, the AAD (for AES-GMC at least)
508 * needs to logically go in front.
514 uint_t enc_len
, keydata_len
, aad_len
;
516 ASSERT3U(crypt
, <, ZIO_CRYPT_FUNCTIONS
);
518 keydata_len
= zio_crypt_table
[crypt
].ci_keylen
;
519 rw_init(&key
->zk_salt_lock
, NULL
, RW_DEFAULT
, NULL
);
521 zfs_uio_init(&cuio
, &cuio_s
);
524 * Since we only support one buffer, we need to copy
525 * the encrypted buffer (source) to the plain buffer
526 * (dest). We set iovecs[0] -- the authentication data --
529 dst
= key
->zk_master_keydata
;
531 memcpy(dst
, src
, keydata_len
);
533 dst
= key
->zk_hmac_keydata
;
535 memcpy(dst
, src
, SHA512_HMAC_KEYLEN
);
537 iovecs
[1].iov_base
= key
->zk_master_keydata
;
538 iovecs
[1].iov_len
= keydata_len
;
539 iovecs
[2].iov_base
= key
->zk_hmac_keydata
;
540 iovecs
[2].iov_len
= SHA512_HMAC_KEYLEN
;
541 iovecs
[3].iov_base
= mac
;
542 iovecs
[3].iov_len
= WRAPPING_MAC_LEN
;
545 aad_len
= sizeof (uint64_t);
546 aad
[0] = LE_64(guid
);
548 ASSERT3U(version
, ==, ZIO_CRYPT_KEY_CURRENT_VERSION
);
549 aad_len
= sizeof (uint64_t) * 3;
550 aad
[0] = LE_64(guid
);
551 aad
[1] = LE_64(crypt
);
552 aad
[2] = LE_64(version
);
555 enc_len
= keydata_len
+ SHA512_HMAC_KEYLEN
;
556 iovecs
[0].iov_base
= aad
;
557 iovecs
[0].iov_len
= aad_len
;
559 GET_UIO_STRUCT(&cuio
)->uio_iov
= iovecs
;
560 zfs_uio_iovcnt(&cuio
) = 4;
561 zfs_uio_segflg(&cuio
) = UIO_SYSSPACE
;
563 /* decrypt the keys and store the result in the output buffers */
564 ret
= zio_do_crypt_uio_opencrypto(B_FALSE
, NULL
, crypt
, cwkey
,
565 iv
, enc_len
, &cuio
, aad_len
);
570 /* generate a fresh salt */
571 ret
= random_get_bytes(key
->zk_salt
, ZIO_DATA_SALT_LEN
);
575 /* derive the current key from the master key */
576 ret
= hkdf_sha512(key
->zk_master_keydata
, keydata_len
, NULL
, 0,
577 key
->zk_salt
, ZIO_DATA_SALT_LEN
, key
->zk_current_keydata
,
582 /* initialize keys for ICP */
583 key
->zk_current_key
.ck_data
= key
->zk_current_keydata
;
584 key
->zk_current_key
.ck_length
= CRYPTO_BYTES2BITS(keydata_len
);
586 key
->zk_hmac_key
.ck_data
= key
->zk_hmac_keydata
;
587 key
->zk_hmac_key
.ck_length
= CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN
);
589 ret
= freebsd_crypt_newsession(&key
->zk_session
,
590 &zio_crypt_table
[crypt
], &key
->zk_current_key
);
594 key
->zk_crypt
= crypt
;
595 key
->zk_version
= version
;
597 key
->zk_salt_count
= 0;
602 zio_crypt_key_destroy_early(key
);
607 zio_crypt_generate_iv(uint8_t *ivbuf
)
611 /* randomly generate the IV */
612 ret
= random_get_pseudo_bytes(ivbuf
, ZIO_DATA_IV_LEN
);
619 memset(ivbuf
, 0, ZIO_DATA_IV_LEN
);
624 zio_crypt_do_hmac(zio_crypt_key_t
*key
, uint8_t *data
, uint_t datalen
,
625 uint8_t *digestbuf
, uint_t digestlen
)
627 uint8_t raw_digestbuf
[SHA512_DIGEST_LENGTH
];
629 ASSERT3U(digestlen
, <=, SHA512_DIGEST_LENGTH
);
631 crypto_mac(&key
->zk_hmac_key
, data
, datalen
,
632 raw_digestbuf
, SHA512_DIGEST_LENGTH
);
634 memcpy(digestbuf
, raw_digestbuf
, digestlen
);
640 zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t
*key
, uint8_t *data
,
641 uint_t datalen
, uint8_t *ivbuf
, uint8_t *salt
)
644 uint8_t digestbuf
[SHA512_DIGEST_LENGTH
];
646 ret
= zio_crypt_do_hmac(key
, data
, datalen
,
647 digestbuf
, SHA512_DIGEST_LENGTH
);
651 memcpy(salt
, digestbuf
, ZIO_DATA_SALT_LEN
);
652 memcpy(ivbuf
, digestbuf
+ ZIO_DATA_SALT_LEN
, ZIO_DATA_IV_LEN
);
658 * The following functions are used to encode and decode encryption parameters
659 * into blkptr_t and zil_header_t. The ICP wants to use these parameters as
660 * byte strings, which normally means that these strings would not need to deal
661 * with byteswapping at all. However, both blkptr_t and zil_header_t may be
662 * byteswapped by lower layers and so we must "undo" that byteswap here upon
663 * decoding and encoding in a non-native byteorder. These functions require
664 * that the byteorder bit is correct before being called.
667 zio_crypt_encode_params_bp(blkptr_t
*bp
, uint8_t *salt
, uint8_t *iv
)
672 ASSERT(BP_IS_ENCRYPTED(bp
));
674 if (!BP_SHOULD_BYTESWAP(bp
)) {
675 memcpy(&bp
->blk_dva
[2].dva_word
[0], salt
, sizeof (uint64_t));
676 memcpy(&bp
->blk_dva
[2].dva_word
[1], iv
, sizeof (uint64_t));
677 memcpy(&val32
, iv
+ sizeof (uint64_t), sizeof (uint32_t));
678 BP_SET_IV2(bp
, val32
);
680 memcpy(&val64
, salt
, sizeof (uint64_t));
681 bp
->blk_dva
[2].dva_word
[0] = BSWAP_64(val64
);
683 memcpy(&val64
, iv
, sizeof (uint64_t));
684 bp
->blk_dva
[2].dva_word
[1] = BSWAP_64(val64
);
686 memcpy(&val32
, iv
+ sizeof (uint64_t), sizeof (uint32_t));
687 BP_SET_IV2(bp
, BSWAP_32(val32
));
692 zio_crypt_decode_params_bp(const blkptr_t
*bp
, uint8_t *salt
, uint8_t *iv
)
697 ASSERT(BP_IS_PROTECTED(bp
));
699 /* for convenience, so callers don't need to check */
700 if (BP_IS_AUTHENTICATED(bp
)) {
701 memset(salt
, 0, ZIO_DATA_SALT_LEN
);
702 memset(iv
, 0, ZIO_DATA_IV_LEN
);
706 if (!BP_SHOULD_BYTESWAP(bp
)) {
707 memcpy(salt
, &bp
->blk_dva
[2].dva_word
[0], sizeof (uint64_t));
708 memcpy(iv
, &bp
->blk_dva
[2].dva_word
[1], sizeof (uint64_t));
710 val32
= (uint32_t)BP_GET_IV2(bp
);
711 memcpy(iv
+ sizeof (uint64_t), &val32
, sizeof (uint32_t));
713 val64
= BSWAP_64(bp
->blk_dva
[2].dva_word
[0]);
714 memcpy(salt
, &val64
, sizeof (uint64_t));
716 val64
= BSWAP_64(bp
->blk_dva
[2].dva_word
[1]);
717 memcpy(iv
, &val64
, sizeof (uint64_t));
719 val32
= BSWAP_32((uint32_t)BP_GET_IV2(bp
));
720 memcpy(iv
+ sizeof (uint64_t), &val32
, sizeof (uint32_t));
725 zio_crypt_encode_mac_bp(blkptr_t
*bp
, uint8_t *mac
)
729 ASSERT(BP_USES_CRYPT(bp
));
730 ASSERT3U(BP_GET_TYPE(bp
), !=, DMU_OT_OBJSET
);
732 if (!BP_SHOULD_BYTESWAP(bp
)) {
733 memcpy(&bp
->blk_cksum
.zc_word
[2], mac
, sizeof (uint64_t));
734 memcpy(&bp
->blk_cksum
.zc_word
[3], mac
+ sizeof (uint64_t),
737 memcpy(&val64
, mac
, sizeof (uint64_t));
738 bp
->blk_cksum
.zc_word
[2] = BSWAP_64(val64
);
740 memcpy(&val64
, mac
+ sizeof (uint64_t), sizeof (uint64_t));
741 bp
->blk_cksum
.zc_word
[3] = BSWAP_64(val64
);
746 zio_crypt_decode_mac_bp(const blkptr_t
*bp
, uint8_t *mac
)
750 ASSERT(BP_USES_CRYPT(bp
) || BP_IS_HOLE(bp
));
752 /* for convenience, so callers don't need to check */
753 if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
754 memset(mac
, 0, ZIO_DATA_MAC_LEN
);
758 if (!BP_SHOULD_BYTESWAP(bp
)) {
759 memcpy(mac
, &bp
->blk_cksum
.zc_word
[2], sizeof (uint64_t));
760 memcpy(mac
+ sizeof (uint64_t), &bp
->blk_cksum
.zc_word
[3],
763 val64
= BSWAP_64(bp
->blk_cksum
.zc_word
[2]);
764 memcpy(mac
, &val64
, sizeof (uint64_t));
766 val64
= BSWAP_64(bp
->blk_cksum
.zc_word
[3]);
767 memcpy(mac
+ sizeof (uint64_t), &val64
, sizeof (uint64_t));
772 zio_crypt_encode_mac_zil(void *data
, uint8_t *mac
)
774 zil_chain_t
*zilc
= data
;
776 memcpy(&zilc
->zc_eck
.zec_cksum
.zc_word
[2], mac
, sizeof (uint64_t));
777 memcpy(&zilc
->zc_eck
.zec_cksum
.zc_word
[3], mac
+ sizeof (uint64_t),
782 zio_crypt_decode_mac_zil(const void *data
, uint8_t *mac
)
785 * The ZIL MAC is embedded in the block it protects, which will
786 * not have been byteswapped by the time this function has been called.
787 * As a result, we don't need to worry about byteswapping the MAC.
789 const zil_chain_t
*zilc
= data
;
791 memcpy(mac
, &zilc
->zc_eck
.zec_cksum
.zc_word
[2], sizeof (uint64_t));
792 memcpy(mac
+ sizeof (uint64_t), &zilc
->zc_eck
.zec_cksum
.zc_word
[3],
797 * This routine takes a block of dnodes (src_abd) and copies only the bonus
798 * buffers to the same offsets in the dst buffer. datalen should be the size
799 * of both the src_abd and the dst buffer (not just the length of the bonus
803 zio_crypt_copy_dnode_bonus(abd_t
*src_abd
, uint8_t *dst
, uint_t datalen
)
805 uint_t i
, max_dnp
= datalen
>> DNODE_SHIFT
;
807 dnode_phys_t
*dnp
, *sdnp
, *ddnp
;
809 src
= abd_borrow_buf_copy(src_abd
, datalen
);
811 sdnp
= (dnode_phys_t
*)src
;
812 ddnp
= (dnode_phys_t
*)dst
;
814 for (i
= 0; i
< max_dnp
; i
+= sdnp
[i
].dn_extra_slots
+ 1) {
816 if (dnp
->dn_type
!= DMU_OT_NONE
&&
817 DMU_OT_IS_ENCRYPTED(dnp
->dn_bonustype
) &&
818 dnp
->dn_bonuslen
!= 0) {
819 memcpy(DN_BONUS(&ddnp
[i
]), DN_BONUS(dnp
),
820 DN_MAX_BONUS_LEN(dnp
));
824 abd_return_buf(src_abd
, src
, datalen
);
828 * This function decides what fields from blk_prop are included in
829 * the on-disk various MAC algorithms.
832 zio_crypt_bp_zero_nonportable_blkprop(blkptr_t
*bp
, uint64_t version
)
834 int avoidlint
= SPA_MINBLOCKSIZE
;
836 * Version 0 did not properly zero out all non-portable fields
837 * as it should have done. We maintain this code so that we can
838 * do read-only imports of pools on this version.
842 BP_SET_CHECKSUM(bp
, 0);
843 BP_SET_PSIZE(bp
, avoidlint
);
847 ASSERT3U(version
, ==, ZIO_CRYPT_KEY_CURRENT_VERSION
);
850 * The hole_birth feature might set these fields even if this bp
851 * is a hole. We zero them out here to guarantee that raw sends
852 * will function with or without the feature.
854 if (BP_IS_HOLE(bp
)) {
860 * At L0 we want to verify these fields to ensure that data blocks
861 * can not be reinterpreted. For instance, we do not want an attacker
862 * to trick us into returning raw lz4 compressed data to the user
863 * by modifying the compression bits. At higher levels, we cannot
864 * enforce this policy since raw sends do not convey any information
865 * about indirect blocks, so these values might be different on the
866 * receive side. Fortunately, this does not open any new attack
867 * vectors, since any alterations that can be made to a higher level
868 * bp must still verify the correct order of the layer below it.
870 if (BP_GET_LEVEL(bp
) != 0) {
871 BP_SET_BYTEORDER(bp
, 0);
872 BP_SET_COMPRESS(bp
, 0);
875 * psize cannot be set to zero or it will trigger
876 * asserts, but the value doesn't really matter as
877 * long as it is constant.
879 BP_SET_PSIZE(bp
, avoidlint
);
883 BP_SET_CHECKSUM(bp
, 0);
887 zio_crypt_bp_auth_init(uint64_t version
, boolean_t should_bswap
, blkptr_t
*bp
,
888 blkptr_auth_buf_t
*bab
, uint_t
*bab_len
)
890 blkptr_t tmpbp
= *bp
;
893 byteswap_uint64_array(&tmpbp
, sizeof (blkptr_t
));
895 ASSERT(BP_USES_CRYPT(&tmpbp
) || BP_IS_HOLE(&tmpbp
));
896 ASSERT0(BP_IS_EMBEDDED(&tmpbp
));
898 zio_crypt_decode_mac_bp(&tmpbp
, bab
->bab_mac
);
901 * We always MAC blk_prop in LE to ensure portability. This
902 * must be done after decoding the mac, since the endianness
903 * will get zero'd out here.
905 zio_crypt_bp_zero_nonportable_blkprop(&tmpbp
, version
);
906 bab
->bab_prop
= LE_64(tmpbp
.blk_prop
);
909 /* version 0 did not include the padding */
910 *bab_len
= sizeof (blkptr_auth_buf_t
);
912 *bab_len
-= sizeof (uint64_t);
916 zio_crypt_bp_do_hmac_updates(crypto_context_t ctx
, uint64_t version
,
917 boolean_t should_bswap
, blkptr_t
*bp
)
920 blkptr_auth_buf_t bab
;
922 zio_crypt_bp_auth_init(version
, should_bswap
, bp
, &bab
, &bab_len
);
923 crypto_mac_update(ctx
, &bab
, bab_len
);
929 zio_crypt_bp_do_indrect_checksum_updates(SHA2_CTX
*ctx
, uint64_t version
,
930 boolean_t should_bswap
, blkptr_t
*bp
)
933 blkptr_auth_buf_t bab
;
935 zio_crypt_bp_auth_init(version
, should_bswap
, bp
, &bab
, &bab_len
);
936 SHA2Update(ctx
, &bab
, bab_len
);
940 zio_crypt_bp_do_aad_updates(uint8_t **aadp
, uint_t
*aad_len
, uint64_t version
,
941 boolean_t should_bswap
, blkptr_t
*bp
)
944 blkptr_auth_buf_t bab
;
946 zio_crypt_bp_auth_init(version
, should_bswap
, bp
, &bab
, &bab_len
);
947 memcpy(*aadp
, &bab
, bab_len
);
953 zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx
, uint64_t version
,
954 boolean_t should_bswap
, dnode_phys_t
*dnp
)
958 boolean_t le_bswap
= (should_bswap
== ZFS_HOST_BYTEORDER
);
959 uint8_t tmp_dncore
[offsetof(dnode_phys_t
, dn_blkptr
)];
961 /* authenticate the core dnode (masking out non-portable bits) */
962 memcpy(tmp_dncore
, dnp
, sizeof (tmp_dncore
));
963 adnp
= (dnode_phys_t
*)tmp_dncore
;
965 adnp
->dn_datablkszsec
= BSWAP_16(adnp
->dn_datablkszsec
);
966 adnp
->dn_bonuslen
= BSWAP_16(adnp
->dn_bonuslen
);
967 adnp
->dn_maxblkid
= BSWAP_64(adnp
->dn_maxblkid
);
968 adnp
->dn_used
= BSWAP_64(adnp
->dn_used
);
970 adnp
->dn_flags
&= DNODE_CRYPT_PORTABLE_FLAGS_MASK
;
973 crypto_mac_update(ctx
, adnp
, sizeof (tmp_dncore
));
975 for (i
= 0; i
< dnp
->dn_nblkptr
; i
++) {
976 ret
= zio_crypt_bp_do_hmac_updates(ctx
, version
,
977 should_bswap
, &dnp
->dn_blkptr
[i
]);
982 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
983 ret
= zio_crypt_bp_do_hmac_updates(ctx
, version
,
984 should_bswap
, DN_SPILL_BLKPTR(dnp
));
996 * objset_phys_t blocks introduce a number of exceptions to the normal
997 * authentication process. objset_phys_t's contain 2 separate HMACS for
998 * protecting the integrity of their data. The portable_mac protects the
999 * metadnode. This MAC can be sent with a raw send and protects against
1000 * reordering of data within the metadnode. The local_mac protects the user
1001 * accounting objects which are not sent from one system to another.
1003 * In addition, objset blocks are the only blocks that can be modified and
1004 * written to disk without the key loaded under certain circumstances. During
1005 * zil_claim() we need to be able to update the zil_header_t to complete
1006 * claiming log blocks and during raw receives we need to write out the
1007 * portable_mac from the send file. Both of these actions are possible
1008 * because these fields are not protected by either MAC so neither one will
1009 * need to modify the MACs without the key. However, when the modified blocks
1010 * are written out they will be byteswapped into the host machine's native
1011 * endianness which will modify fields protected by the MAC. As a result, MAC
1012 * calculation for objset blocks works slightly differently from other block
1013 * types. Where other block types MAC the data in whatever endianness is
1014 * written to disk, objset blocks always MAC little endian version of their
1015 * values. In the code, should_bswap is the value from BP_SHOULD_BYTESWAP()
1016 * and le_bswap indicates whether a byteswap is needed to get this block
1017 * into little endian format.
1020 zio_crypt_do_objset_hmacs(zio_crypt_key_t
*key
, void *data
, uint_t datalen
,
1021 boolean_t should_bswap
, uint8_t *portable_mac
, uint8_t *local_mac
)
1024 struct hmac_ctx hash_ctx
;
1025 struct hmac_ctx
*ctx
= &hash_ctx
;
1026 objset_phys_t
*osp
= data
;
1028 boolean_t le_bswap
= (should_bswap
== ZFS_HOST_BYTEORDER
);
1029 uint8_t raw_portable_mac
[SHA512_DIGEST_LENGTH
];
1030 uint8_t raw_local_mac
[SHA512_DIGEST_LENGTH
];
1033 /* calculate the portable MAC from the portable fields and metadnode */
1034 crypto_mac_init(ctx
, &key
->zk_hmac_key
);
1036 /* add in the os_type */
1037 intval
= (le_bswap
) ? osp
->os_type
: BSWAP_64(osp
->os_type
);
1038 crypto_mac_update(ctx
, &intval
, sizeof (uint64_t));
1040 /* add in the portable os_flags */
1041 intval
= osp
->os_flags
;
1043 intval
= BSWAP_64(intval
);
1044 intval
&= OBJSET_CRYPT_PORTABLE_FLAGS_MASK
;
1045 if (!ZFS_HOST_BYTEORDER
)
1046 intval
= BSWAP_64(intval
);
1048 crypto_mac_update(ctx
, &intval
, sizeof (uint64_t));
1050 /* add in fields from the metadnode */
1051 ret
= zio_crypt_do_dnode_hmac_updates(ctx
, key
->zk_version
,
1052 should_bswap
, &osp
->os_meta_dnode
);
1056 crypto_mac_final(ctx
, raw_portable_mac
, SHA512_DIGEST_LENGTH
);
1058 memcpy(portable_mac
, raw_portable_mac
, ZIO_OBJSET_MAC_LEN
);
1061 * This is necessary here as we check next whether
1062 * OBJSET_FLAG_USERACCOUNTING_COMPLETE is set in order to
1063 * decide if the local_mac should be zeroed out. That flag will always
1064 * be set by dmu_objset_id_quota_upgrade_cb() and
1065 * dmu_objset_userspace_upgrade_cb() if useraccounting has been
1068 intval
= osp
->os_flags
;
1070 intval
= BSWAP_64(intval
);
1071 boolean_t uacct_incomplete
=
1072 !(intval
& OBJSET_FLAG_USERACCOUNTING_COMPLETE
);
1075 * The local MAC protects the user, group and project accounting.
1076 * If these objects are not present, the local MAC is zeroed out.
1078 if (uacct_incomplete
||
1079 (datalen
>= OBJSET_PHYS_SIZE_V3
&&
1080 osp
->os_userused_dnode
.dn_type
== DMU_OT_NONE
&&
1081 osp
->os_groupused_dnode
.dn_type
== DMU_OT_NONE
&&
1082 osp
->os_projectused_dnode
.dn_type
== DMU_OT_NONE
) ||
1083 (datalen
>= OBJSET_PHYS_SIZE_V2
&&
1084 osp
->os_userused_dnode
.dn_type
== DMU_OT_NONE
&&
1085 osp
->os_groupused_dnode
.dn_type
== DMU_OT_NONE
) ||
1086 (datalen
<= OBJSET_PHYS_SIZE_V1
)) {
1087 memset(local_mac
, 0, ZIO_OBJSET_MAC_LEN
);
1091 /* calculate the local MAC from the userused and groupused dnodes */
1092 crypto_mac_init(ctx
, &key
->zk_hmac_key
);
1094 /* add in the non-portable os_flags */
1095 intval
= osp
->os_flags
;
1097 intval
= BSWAP_64(intval
);
1098 intval
&= ~OBJSET_CRYPT_PORTABLE_FLAGS_MASK
;
1099 if (!ZFS_HOST_BYTEORDER
)
1100 intval
= BSWAP_64(intval
);
1102 crypto_mac_update(ctx
, &intval
, sizeof (uint64_t));
1104 /* XXX check dnode type ... */
1105 /* add in fields from the user accounting dnodes */
1106 if (osp
->os_userused_dnode
.dn_type
!= DMU_OT_NONE
) {
1107 ret
= zio_crypt_do_dnode_hmac_updates(ctx
, key
->zk_version
,
1108 should_bswap
, &osp
->os_userused_dnode
);
1113 if (osp
->os_groupused_dnode
.dn_type
!= DMU_OT_NONE
) {
1114 ret
= zio_crypt_do_dnode_hmac_updates(ctx
, key
->zk_version
,
1115 should_bswap
, &osp
->os_groupused_dnode
);
1120 if (osp
->os_projectused_dnode
.dn_type
!= DMU_OT_NONE
&&
1121 datalen
>= OBJSET_PHYS_SIZE_V3
) {
1122 ret
= zio_crypt_do_dnode_hmac_updates(ctx
, key
->zk_version
,
1123 should_bswap
, &osp
->os_projectused_dnode
);
1128 crypto_mac_final(ctx
, raw_local_mac
, SHA512_DIGEST_LENGTH
);
1130 memcpy(local_mac
, raw_local_mac
, ZIO_OBJSET_MAC_LEN
);
1135 memset(portable_mac
, 0, ZIO_OBJSET_MAC_LEN
);
1136 memset(local_mac
, 0, ZIO_OBJSET_MAC_LEN
);
1141 zio_crypt_destroy_uio(zfs_uio_t
*uio
)
1143 if (GET_UIO_STRUCT(uio
)->uio_iov
)
1144 kmem_free(GET_UIO_STRUCT(uio
)->uio_iov
,
1145 zfs_uio_iovcnt(uio
) * sizeof (iovec_t
));
1149 * This function parses an uncompressed indirect block and returns a checksum
1150 * of all the portable fields from all of the contained bps. The portable
1151 * fields are the MAC and all of the fields from blk_prop except for the dedup,
1152 * checksum, and psize bits. For an explanation of the purpose of this, see
1153 * the comment block on object set authentication.
1156 zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate
, void *buf
,
1157 uint_t datalen
, uint64_t version
, boolean_t byteswap
, uint8_t *cksum
)
1160 int i
, epb
= datalen
>> SPA_BLKPTRSHIFT
;
1162 uint8_t digestbuf
[SHA512_DIGEST_LENGTH
];
1164 /* checksum all of the MACs from the layer below */
1165 SHA2Init(SHA512
, &ctx
);
1166 for (i
= 0, bp
= buf
; i
< epb
; i
++, bp
++) {
1167 zio_crypt_bp_do_indrect_checksum_updates(&ctx
, version
,
1170 SHA2Final(digestbuf
, &ctx
);
1173 memcpy(cksum
, digestbuf
, ZIO_DATA_MAC_LEN
);
1177 if (memcmp(digestbuf
, cksum
, ZIO_DATA_MAC_LEN
) != 0) {
1178 #ifdef FCRYPTO_DEBUG
1179 printf("%s(%d): Setting ECKSUM\n", __FUNCTION__
, __LINE__
);
1181 return (SET_ERROR(ECKSUM
));
1187 zio_crypt_do_indirect_mac_checksum(boolean_t generate
, void *buf
,
1188 uint_t datalen
, boolean_t byteswap
, uint8_t *cksum
)
1193 * Unfortunately, callers of this function will not always have
1194 * easy access to the on-disk format version. This info is
1195 * normally found in the DSL Crypto Key, but the checksum-of-MACs
1196 * is expected to be verifiable even when the key isn't loaded.
1197 * Here, instead of doing a ZAP lookup for the version for each
1198 * zio, we simply try both existing formats.
1200 ret
= zio_crypt_do_indirect_mac_checksum_impl(generate
, buf
,
1201 datalen
, ZIO_CRYPT_KEY_CURRENT_VERSION
, byteswap
, cksum
);
1202 if (ret
== ECKSUM
) {
1204 ret
= zio_crypt_do_indirect_mac_checksum_impl(generate
,
1205 buf
, datalen
, 0, byteswap
, cksum
);
1212 zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate
, abd_t
*abd
,
1213 uint_t datalen
, boolean_t byteswap
, uint8_t *cksum
)
1218 buf
= abd_borrow_buf_copy(abd
, datalen
);
1219 ret
= zio_crypt_do_indirect_mac_checksum(generate
, buf
, datalen
,
1221 abd_return_buf(abd
, buf
, datalen
);
1227 * Special case handling routine for encrypting / decrypting ZIL blocks.
1228 * We do not check for the older ZIL chain because the encryption feature
1229 * was not available before the newer ZIL chain was introduced. The goal
1230 * here is to encrypt everything except the blkptr_t of a lr_write_t and
1231 * the zil_chain_t header. Everything that is not encrypted is authenticated.
1234 * The OpenCrypto used in FreeBSD does not use separate source and
1235 * destination buffers; instead, the same buffer is used. Further, to
1236 * accommodate some of the drivers, the authbuf needs to be logically before
1237 * the data. This means that we need to copy the source to the destination,
1238 * and set up an extra iovec_t at the beginning to handle the authbuf.
1239 * It also means we'll only return one zfs_uio_t.
1243 zio_crypt_init_uios_zil(boolean_t encrypt
, uint8_t *plainbuf
,
1244 uint8_t *cipherbuf
, uint_t datalen
, boolean_t byteswap
, zfs_uio_t
*puio
,
1245 zfs_uio_t
*out_uio
, uint_t
*enc_len
, uint8_t **authbuf
, uint_t
*auth_len
,
1246 boolean_t
*no_crypt
)
1249 uint8_t *aadbuf
= zio_buf_alloc(datalen
);
1250 uint8_t *src
, *dst
, *slrp
, *dlrp
, *blkend
, *aadp
;
1251 iovec_t
*dst_iovecs
;
1254 uint64_t txtype
, lr_len
, nused
;
1255 uint_t crypt_len
, nr_iovecs
, vec
;
1256 uint_t aad_len
= 0, total_len
= 0;
1265 memcpy(dst
, src
, datalen
);
1267 /* Find the start and end record of the log block. */
1268 zilc
= (zil_chain_t
*)src
;
1269 slrp
= src
+ sizeof (zil_chain_t
);
1271 nused
= ((byteswap
) ? BSWAP_64(zilc
->zc_nused
) : zilc
->zc_nused
);
1272 ASSERT3U(nused
, >=, sizeof (zil_chain_t
));
1273 ASSERT3U(nused
, <=, datalen
);
1274 blkend
= src
+ nused
;
1277 * Calculate the number of encrypted iovecs we will need.
1280 /* We need at least two iovecs -- one for the AAD, one for the MAC. */
1283 for (; slrp
< blkend
; slrp
+= lr_len
) {
1287 txtype
= BSWAP_64(lr
->lrc_txtype
);
1288 lr_len
= BSWAP_64(lr
->lrc_reclen
);
1290 txtype
= lr
->lrc_txtype
;
1291 lr_len
= lr
->lrc_reclen
;
1293 ASSERT3U(lr_len
, >=, sizeof (lr_t
));
1294 ASSERT3U(lr_len
, <=, blkend
- slrp
);
1297 if (txtype
== TX_WRITE
&& lr_len
!= sizeof (lr_write_t
))
1301 dst_iovecs
= kmem_alloc(nr_iovecs
* sizeof (iovec_t
), KM_SLEEP
);
1304 * Copy the plain zil header over and authenticate everything except
1305 * the checksum that will store our MAC. If we are writing the data
1306 * the embedded checksum will not have been calculated yet, so we don't
1307 * authenticate that.
1309 memcpy(aadp
, src
, sizeof (zil_chain_t
) - sizeof (zio_eck_t
));
1310 aadp
+= sizeof (zil_chain_t
) - sizeof (zio_eck_t
);
1311 aad_len
+= sizeof (zil_chain_t
) - sizeof (zio_eck_t
);
1313 slrp
= src
+ sizeof (zil_chain_t
);
1314 dlrp
= dst
+ sizeof (zil_chain_t
);
1317 * Loop over records again, filling in iovecs.
1320 /* The first iovec will contain the authbuf. */
1323 for (; slrp
< blkend
; slrp
+= lr_len
, dlrp
+= lr_len
) {
1327 txtype
= lr
->lrc_txtype
;
1328 lr_len
= lr
->lrc_reclen
;
1330 txtype
= BSWAP_64(lr
->lrc_txtype
);
1331 lr_len
= BSWAP_64(lr
->lrc_reclen
);
1334 /* copy the common lr_t */
1335 memcpy(dlrp
, slrp
, sizeof (lr_t
));
1336 memcpy(aadp
, slrp
, sizeof (lr_t
));
1337 aadp
+= sizeof (lr_t
);
1338 aad_len
+= sizeof (lr_t
);
1341 * If this is a TX_WRITE record we want to encrypt everything
1342 * except the bp if exists. If the bp does exist we want to
1345 if (txtype
== TX_WRITE
) {
1346 const size_t o
= offsetof(lr_write_t
, lr_blkptr
);
1347 crypt_len
= o
- sizeof (lr_t
);
1348 dst_iovecs
[vec
].iov_base
= (char *)dlrp
+ sizeof (lr_t
);
1349 dst_iovecs
[vec
].iov_len
= crypt_len
;
1351 /* copy the bp now since it will not be encrypted */
1352 memcpy(dlrp
+ o
, slrp
+ o
, sizeof (blkptr_t
));
1353 memcpy(aadp
, slrp
+ o
, sizeof (blkptr_t
));
1354 aadp
+= sizeof (blkptr_t
);
1355 aad_len
+= sizeof (blkptr_t
);
1357 total_len
+= crypt_len
;
1359 if (lr_len
!= sizeof (lr_write_t
)) {
1360 crypt_len
= lr_len
- sizeof (lr_write_t
);
1361 dst_iovecs
[vec
].iov_base
= (char *)
1362 dlrp
+ sizeof (lr_write_t
);
1363 dst_iovecs
[vec
].iov_len
= crypt_len
;
1365 total_len
+= crypt_len
;
1367 } else if (txtype
== TX_CLONE_RANGE
) {
1368 const size_t o
= offsetof(lr_clone_range_t
, lr_nbps
);
1369 crypt_len
= o
- sizeof (lr_t
);
1370 dst_iovecs
[vec
].iov_base
= (char *)dlrp
+ sizeof (lr_t
);
1371 dst_iovecs
[vec
].iov_len
= crypt_len
;
1373 /* copy the bps now since they will not be encrypted */
1374 memcpy(dlrp
+ o
, slrp
+ o
, lr_len
- o
);
1375 memcpy(aadp
, slrp
+ o
, lr_len
- o
);
1377 aad_len
+= lr_len
- o
;
1379 total_len
+= crypt_len
;
1381 crypt_len
= lr_len
- sizeof (lr_t
);
1382 dst_iovecs
[vec
].iov_base
= (char *)dlrp
+ sizeof (lr_t
);
1383 dst_iovecs
[vec
].iov_len
= crypt_len
;
1385 total_len
+= crypt_len
;
1389 /* The last iovec will contain the MAC. */
1390 ASSERT3U(vec
, ==, nr_iovecs
- 1);
1393 dst_iovecs
[0].iov_base
= aadbuf
;
1394 dst_iovecs
[0].iov_len
= aad_len
;
1396 dst_iovecs
[vec
].iov_base
= 0;
1397 dst_iovecs
[vec
].iov_len
= 0;
1399 *no_crypt
= (vec
== 1);
1400 *enc_len
= total_len
;
1402 *auth_len
= aad_len
;
1403 GET_UIO_STRUCT(out_uio
)->uio_iov
= dst_iovecs
;
1404 zfs_uio_iovcnt(out_uio
) = nr_iovecs
;
1410 * Special case handling routine for encrypting / decrypting dnode blocks.
1413 zio_crypt_init_uios_dnode(boolean_t encrypt
, uint64_t version
,
1414 uint8_t *plainbuf
, uint8_t *cipherbuf
, uint_t datalen
, boolean_t byteswap
,
1415 zfs_uio_t
*puio
, zfs_uio_t
*out_uio
, uint_t
*enc_len
, uint8_t **authbuf
,
1416 uint_t
*auth_len
, boolean_t
*no_crypt
)
1418 uint8_t *aadbuf
= zio_buf_alloc(datalen
);
1419 uint8_t *src
, *dst
, *aadp
;
1420 dnode_phys_t
*dnp
, *adnp
, *sdnp
, *ddnp
;
1421 iovec_t
*dst_iovecs
;
1422 uint_t nr_iovecs
, crypt_len
, vec
;
1423 uint_t aad_len
= 0, total_len
= 0;
1424 uint_t i
, j
, max_dnp
= datalen
>> DNODE_SHIFT
;
1433 memcpy(dst
, src
, datalen
);
1435 sdnp
= (dnode_phys_t
*)src
;
1436 ddnp
= (dnode_phys_t
*)dst
;
1440 * Count the number of iovecs we will need to do the encryption by
1441 * counting the number of bonus buffers that need to be encrypted.
1444 /* We need at least two iovecs -- one for the AAD, one for the MAC. */
1447 for (i
= 0; i
< max_dnp
; i
+= sdnp
[i
].dn_extra_slots
+ 1) {
1449 * This block may still be byteswapped. However, all of the
1450 * values we use are either uint8_t's (for which byteswapping
1451 * is a noop) or a * != 0 check, which will work regardless
1452 * of whether or not we byteswap.
1454 if (sdnp
[i
].dn_type
!= DMU_OT_NONE
&&
1455 DMU_OT_IS_ENCRYPTED(sdnp
[i
].dn_bonustype
) &&
1456 sdnp
[i
].dn_bonuslen
!= 0) {
1461 dst_iovecs
= kmem_alloc(nr_iovecs
* sizeof (iovec_t
), KM_SLEEP
);
1464 * Iterate through the dnodes again, this time filling in the uios
1465 * we allocated earlier. We also concatenate any data we want to
1466 * authenticate onto aadbuf.
1469 /* The first iovec will contain the authbuf. */
1472 for (i
= 0; i
< max_dnp
; i
+= sdnp
[i
].dn_extra_slots
+ 1) {
1475 /* copy over the core fields and blkptrs (kept as plaintext) */
1476 memcpy(&ddnp
[i
], dnp
,
1477 (uint8_t *)DN_BONUS(dnp
) - (uint8_t *)dnp
);
1479 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
1480 memcpy(DN_SPILL_BLKPTR(&ddnp
[i
]), DN_SPILL_BLKPTR(dnp
),
1485 * Handle authenticated data. We authenticate everything in
1486 * the dnode that can be brought over when we do a raw send.
1487 * This includes all of the core fields as well as the MACs
1488 * stored in the bp checksums and all of the portable bits
1489 * from blk_prop. We include the dnode padding here in case it
1490 * ever gets used in the future. Some dn_flags and dn_used are
1491 * not portable so we mask those out values out of the
1492 * authenticated data.
1494 crypt_len
= offsetof(dnode_phys_t
, dn_blkptr
);
1495 memcpy(aadp
, dnp
, crypt_len
);
1496 adnp
= (dnode_phys_t
*)aadp
;
1497 adnp
->dn_flags
&= DNODE_CRYPT_PORTABLE_FLAGS_MASK
;
1500 aad_len
+= crypt_len
;
1502 for (j
= 0; j
< dnp
->dn_nblkptr
; j
++) {
1503 zio_crypt_bp_do_aad_updates(&aadp
, &aad_len
,
1504 version
, byteswap
, &dnp
->dn_blkptr
[j
]);
1507 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
1508 zio_crypt_bp_do_aad_updates(&aadp
, &aad_len
,
1509 version
, byteswap
, DN_SPILL_BLKPTR(dnp
));
1513 * If this bonus buffer needs to be encrypted, we prepare an
1514 * iovec_t. The encryption / decryption functions will fill
1515 * this in for us with the encrypted or decrypted data.
1516 * Otherwise we add the bonus buffer to the authenticated
1517 * data buffer and copy it over to the destination. The
1518 * encrypted iovec extends to DN_MAX_BONUS_LEN(dnp) so that
1519 * we can guarantee alignment with the AES block size
1522 crypt_len
= DN_MAX_BONUS_LEN(dnp
);
1523 if (dnp
->dn_type
!= DMU_OT_NONE
&&
1524 DMU_OT_IS_ENCRYPTED(dnp
->dn_bonustype
) &&
1525 dnp
->dn_bonuslen
!= 0) {
1526 dst_iovecs
[vec
].iov_base
= DN_BONUS(&ddnp
[i
]);
1527 dst_iovecs
[vec
].iov_len
= crypt_len
;
1530 total_len
+= crypt_len
;
1532 memcpy(DN_BONUS(&ddnp
[i
]), DN_BONUS(dnp
), crypt_len
);
1533 memcpy(aadp
, DN_BONUS(dnp
), crypt_len
);
1535 aad_len
+= crypt_len
;
1539 /* The last iovec will contain the MAC. */
1540 ASSERT3U(vec
, ==, nr_iovecs
- 1);
1543 dst_iovecs
[0].iov_base
= aadbuf
;
1544 dst_iovecs
[0].iov_len
= aad_len
;
1546 dst_iovecs
[vec
].iov_base
= 0;
1547 dst_iovecs
[vec
].iov_len
= 0;
1549 *no_crypt
= (vec
== 1);
1550 *enc_len
= total_len
;
1552 *auth_len
= aad_len
;
1553 GET_UIO_STRUCT(out_uio
)->uio_iov
= dst_iovecs
;
1554 zfs_uio_iovcnt(out_uio
) = nr_iovecs
;
1560 zio_crypt_init_uios_normal(boolean_t encrypt
, uint8_t *plainbuf
,
1561 uint8_t *cipherbuf
, uint_t datalen
, zfs_uio_t
*puio
, zfs_uio_t
*out_uio
,
1566 uint_t nr_plain
= 1, nr_cipher
= 2;
1567 iovec_t
*plain_iovecs
= NULL
, *cipher_iovecs
= NULL
;
1570 cipher_iovecs
= kmem_zalloc(nr_cipher
* sizeof (iovec_t
),
1572 if (!cipher_iovecs
) {
1573 ret
= SET_ERROR(ENOMEM
);
1584 memcpy(dst
, src
, datalen
);
1585 cipher_iovecs
[0].iov_base
= dst
;
1586 cipher_iovecs
[0].iov_len
= datalen
;
1589 GET_UIO_STRUCT(out_uio
)->uio_iov
= cipher_iovecs
;
1590 zfs_uio_iovcnt(out_uio
) = nr_cipher
;
1595 if (plain_iovecs
!= NULL
)
1596 kmem_free(plain_iovecs
, nr_plain
* sizeof (iovec_t
));
1597 if (cipher_iovecs
!= NULL
)
1598 kmem_free(cipher_iovecs
, nr_cipher
* sizeof (iovec_t
));
1601 GET_UIO_STRUCT(out_uio
)->uio_iov
= NULL
;
1602 zfs_uio_iovcnt(out_uio
) = 0;
1608 * This function builds up the plaintext (puio) and ciphertext (cuio) uios so
1609 * that they can be used for encryption and decryption by zio_do_crypt_uio().
1610 * Most blocks will use zio_crypt_init_uios_normal(), with ZIL and dnode blocks
1611 * requiring special handling to parse out pieces that are to be encrypted. The
1612 * authbuf is used by these special cases to store additional authenticated
1613 * data (AAD) for the encryption modes.
1616 zio_crypt_init_uios(boolean_t encrypt
, uint64_t version
, dmu_object_type_t ot
,
1617 uint8_t *plainbuf
, uint8_t *cipherbuf
, uint_t datalen
, boolean_t byteswap
,
1618 uint8_t *mac
, zfs_uio_t
*puio
, zfs_uio_t
*cuio
, uint_t
*enc_len
,
1619 uint8_t **authbuf
, uint_t
*auth_len
, boolean_t
*no_crypt
)
1624 ASSERT(DMU_OT_IS_ENCRYPTED(ot
) || ot
== DMU_OT_NONE
);
1626 /* route to handler */
1628 case DMU_OT_INTENT_LOG
:
1629 ret
= zio_crypt_init_uios_zil(encrypt
, plainbuf
, cipherbuf
,
1630 datalen
, byteswap
, puio
, cuio
, enc_len
, authbuf
, auth_len
,
1634 ret
= zio_crypt_init_uios_dnode(encrypt
, version
, plainbuf
,
1635 cipherbuf
, datalen
, byteswap
, puio
, cuio
, enc_len
, authbuf
,
1636 auth_len
, no_crypt
);
1639 ret
= zio_crypt_init_uios_normal(encrypt
, plainbuf
, cipherbuf
,
1640 datalen
, puio
, cuio
, enc_len
);
1643 *no_crypt
= B_FALSE
;
1650 /* populate the uios */
1651 zfs_uio_segflg(cuio
) = UIO_SYSSPACE
;
1654 ((iovec_t
*)&(GET_UIO_STRUCT(cuio
)->
1655 uio_iov
[zfs_uio_iovcnt(cuio
) - 1]));
1656 mac_iov
->iov_base
= (void *)mac
;
1657 mac_iov
->iov_len
= ZIO_DATA_MAC_LEN
;
1665 void *failed_decrypt_buf
;
1666 int faile_decrypt_size
;
1669 * Primary encryption / decryption entrypoint for zio data.
1672 zio_do_crypt_data(boolean_t encrypt
, zio_crypt_key_t
*key
,
1673 dmu_object_type_t ot
, boolean_t byteswap
, uint8_t *salt
, uint8_t *iv
,
1674 uint8_t *mac
, uint_t datalen
, uint8_t *plainbuf
, uint8_t *cipherbuf
,
1675 boolean_t
*no_crypt
)
1678 boolean_t locked
= B_FALSE
;
1679 uint64_t crypt
= key
->zk_crypt
;
1680 uint_t keydata_len
= zio_crypt_table
[crypt
].ci_keylen
;
1681 uint_t enc_len
, auth_len
;
1682 zfs_uio_t puio
, cuio
;
1683 struct uio puio_s
, cuio_s
;
1684 uint8_t enc_keydata
[MASTER_KEY_MAX_LEN
];
1685 crypto_key_t tmp_ckey
, *ckey
= NULL
;
1686 freebsd_crypt_session_t
*tmpl
= NULL
;
1687 uint8_t *authbuf
= NULL
;
1689 memset(&puio_s
, 0, sizeof (puio_s
));
1690 memset(&cuio_s
, 0, sizeof (cuio_s
));
1691 zfs_uio_init(&puio
, &puio_s
);
1692 zfs_uio_init(&cuio
, &cuio_s
);
1694 #ifdef FCRYPTO_DEBUG
1695 printf("%s(%s, %p, %p, %d, %p, %p, %u, %s, %p, %p, %p)\n",
1697 encrypt
? "encrypt" : "decrypt",
1698 key
, salt
, ot
, iv
, mac
, datalen
,
1699 byteswap
? "byteswap" : "native_endian", plainbuf
,
1700 cipherbuf
, no_crypt
);
1702 printf("\tkey = {");
1703 for (int i
= 0; i
< key
->zk_current_key
.ck_length
/8; i
++)
1704 printf("%02x ", ((uint8_t *)key
->zk_current_key
.ck_data
)[i
]);
1707 /* create uios for encryption */
1708 ret
= zio_crypt_init_uios(encrypt
, key
->zk_version
, ot
, plainbuf
,
1709 cipherbuf
, datalen
, byteswap
, mac
, &puio
, &cuio
, &enc_len
,
1710 &authbuf
, &auth_len
, no_crypt
);
1715 * If the needed key is the current one, just use it. Otherwise we
1716 * need to generate a temporary one from the given salt + master key.
1717 * If we are encrypting, we must return a copy of the current salt
1718 * so that it can be stored in the blkptr_t.
1720 rw_enter(&key
->zk_salt_lock
, RW_READER
);
1723 if (memcmp(salt
, key
->zk_salt
, ZIO_DATA_SALT_LEN
) == 0) {
1724 ckey
= &key
->zk_current_key
;
1725 tmpl
= &key
->zk_session
;
1727 rw_exit(&key
->zk_salt_lock
);
1730 ret
= hkdf_sha512(key
->zk_master_keydata
, keydata_len
, NULL
, 0,
1731 salt
, ZIO_DATA_SALT_LEN
, enc_keydata
, keydata_len
);
1734 tmp_ckey
.ck_data
= enc_keydata
;
1735 tmp_ckey
.ck_length
= CRYPTO_BYTES2BITS(keydata_len
);
1741 /* perform the encryption / decryption */
1742 ret
= zio_do_crypt_uio_opencrypto(encrypt
, tmpl
, key
->zk_crypt
,
1743 ckey
, iv
, enc_len
, &cuio
, auth_len
);
1747 rw_exit(&key
->zk_salt_lock
);
1750 if (authbuf
!= NULL
)
1751 zio_buf_free(authbuf
, datalen
);
1752 if (ckey
== &tmp_ckey
)
1753 memset(enc_keydata
, 0, keydata_len
);
1754 zio_crypt_destroy_uio(&puio
);
1755 zio_crypt_destroy_uio(&cuio
);
1761 if (failed_decrypt_buf
!= NULL
)
1762 kmem_free(failed_decrypt_buf
, failed_decrypt_size
);
1763 failed_decrypt_buf
= kmem_alloc(datalen
, KM_SLEEP
);
1764 failed_decrypt_size
= datalen
;
1765 memcpy(failed_decrypt_buf
, cipherbuf
, datalen
);
1768 rw_exit(&key
->zk_salt_lock
);
1769 if (authbuf
!= NULL
)
1770 zio_buf_free(authbuf
, datalen
);
1771 if (ckey
== &tmp_ckey
)
1772 memset(enc_keydata
, 0, keydata_len
);
1773 zio_crypt_destroy_uio(&puio
);
1774 zio_crypt_destroy_uio(&cuio
);
1775 return (SET_ERROR(ret
));
1779 * Simple wrapper around zio_do_crypt_data() to work with abd's instead of
1783 zio_do_crypt_abd(boolean_t encrypt
, zio_crypt_key_t
*key
, dmu_object_type_t ot
,
1784 boolean_t byteswap
, uint8_t *salt
, uint8_t *iv
, uint8_t *mac
,
1785 uint_t datalen
, abd_t
*pabd
, abd_t
*cabd
, boolean_t
*no_crypt
)
1791 ptmp
= abd_borrow_buf_copy(pabd
, datalen
);
1792 ctmp
= abd_borrow_buf(cabd
, datalen
);
1794 ptmp
= abd_borrow_buf(pabd
, datalen
);
1795 ctmp
= abd_borrow_buf_copy(cabd
, datalen
);
1798 ret
= zio_do_crypt_data(encrypt
, key
, ot
, byteswap
, salt
, iv
, mac
,
1799 datalen
, ptmp
, ctmp
, no_crypt
);
1804 abd_return_buf(pabd
, ptmp
, datalen
);
1805 abd_return_buf_copy(cabd
, ctmp
, datalen
);
1807 abd_return_buf_copy(pabd
, ptmp
, datalen
);
1808 abd_return_buf(cabd
, ctmp
, datalen
);
1815 abd_return_buf(pabd
, ptmp
, datalen
);
1816 abd_return_buf_copy(cabd
, ctmp
, datalen
);
1818 abd_return_buf_copy(pabd
, ptmp
, datalen
);
1819 abd_return_buf(cabd
, ctmp
, datalen
);
1822 return (SET_ERROR(ret
));
1825 #if defined(_KERNEL) && defined(HAVE_SPL)
1827 module_param(zfs_key_max_salt_uses
, ulong
, 0644);
1828 MODULE_PARM_DESC(zfs_key_max_salt_uses
, "Max number of times a salt value "
1829 "can be used for generating encryption keys before it is rotated");