4 * This file and its contents are supplied under the terms of the
5 * Common Development and Distribution License ("CDDL"), version 1.0.
6 * You may only use this file in accordance with the terms of version
9 * A full copy of the text of the CDDL should have accompanied this
10 * source. A copy of the CDDL is also available via the Internet at
11 * http://www.illumos.org/license/CDDL.
17 * Copyright (c) 2017, Datto, Inc. All rights reserved.
20 #include <sys/zio_crypt.h>
22 #include <sys/dmu_objset.h>
23 #include <sys/dnode.h>
24 #include <sys/fs/zfs.h>
32 * This file is responsible for handling all of the details of generating
33 * encryption parameters and performing encryption and authentication.
35 * BLOCK ENCRYPTION PARAMETERS:
36 * Encryption /Authentication Algorithm Suite (crypt):
37 * The encryption algorithm, mode, and key length we are going to use. We
38 * currently support AES in either GCM or CCM modes with 128, 192, and 256 bit
39 * keys. All authentication is currently done with SHA512-HMAC.
42 * The unencrypted data that we want to encrypt.
44 * Initialization Vector (IV):
45 * An initialization vector for the encryption algorithms. This is used to
46 * "tweak" the encryption algorithms so that two blocks of the same data are
47 * encrypted into different ciphertext outputs, thus obfuscating block patterns.
48 * The supported encryption modes (AES-GCM and AES-CCM) require that an IV is
49 * never reused with the same encryption key. This value is stored unencrypted
50 * and must simply be provided to the decryption function. We use a 96 bit IV
51 * (as recommended by NIST) for all block encryption. For non-dedup blocks we
52 * derive the IV randomly. The first 64 bits of the IV are stored in the second
53 * word of DVA[2] and the remaining 32 bits are stored in the upper 32 bits of
54 * blk_fill. This is safe because encrypted blocks can't use the upper 32 bits
55 * of blk_fill. We only encrypt level 0 blocks, which normally have a fill count
56 * of 1. The only exception is for DMU_OT_DNODE objects, where the fill count of
57 * level 0 blocks is the number of allocated dnodes in that block. The on-disk
58 * format supports at most 2^15 slots per L0 dnode block, because the maximum
59 * block size is 16MB (2^24). In either case, for level 0 blocks this number
60 * will still be smaller than UINT32_MAX so it is safe to store the IV in the
61 * top 32 bits of blk_fill, while leaving the bottom 32 bits of the fill count
65 * This is the most important secret data of an encrypted dataset. It is used
66 * along with the salt to generate that actual encryption keys via HKDF. We
67 * do not use the master key to directly encrypt any data because there are
68 * theoretical limits on how much data can actually be safely encrypted with
69 * any encryption mode. The master key is stored encrypted on disk with the
70 * user's wrapping key. Its length is determined by the encryption algorithm.
71 * For details on how this is stored see the block comment in dsl_crypt.c
74 * Used as an input to the HKDF function, along with the master key. We use a
75 * 64 bit salt, stored unencrypted in the first word of DVA[2]. Any given salt
76 * can be used for encrypting many blocks, so we cache the current salt and the
77 * associated derived key in zio_crypt_t so we do not need to derive it again
81 * A secret binary key, generated from an HKDF function used to encrypt and
84 * Message Authentication Code (MAC)
85 * The MAC is an output of authenticated encryption modes such as AES-GCM and
86 * AES-CCM. Its purpose is to ensure that an attacker cannot modify encrypted
87 * data on disk and return garbage to the application. Effectively, it is a
88 * checksum that can not be reproduced by an attacker. We store the MAC in the
89 * second 128 bits of blk_cksum, leaving the first 128 bits for a truncated
90 * regular checksum of the ciphertext which can be used for scrubbing.
92 * OBJECT AUTHENTICATION:
93 * Some object types, such as DMU_OT_MASTER_NODE cannot be encrypted because
94 * they contain some info that always needs to be readable. To prevent this
95 * data from being altered, we authenticate this data using SHA512-HMAC. This
96 * will produce a MAC (similar to the one produced via encryption) which can
97 * be used to verify the object was not modified. HMACs do not require key
98 * rotation or IVs, so we can keep up to the full 3 copies of authenticated
102 * ZIL blocks have their bp written to disk ahead of the associated data, so we
103 * cannot store the MAC there as we normally do. For these blocks the MAC is
104 * stored in the embedded checksum within the zil_chain_t header. The salt and
105 * IV are generated for the block on bp allocation instead of at encryption
106 * time. In addition, ZIL blocks have some pieces that must be left in plaintext
107 * for claiming even though all of the sensitive user data still needs to be
108 * encrypted. The function zio_crypt_init_uios_zil() handles parsing which
109 * pieces of the block need to be encrypted. All data that is not encrypted is
110 * authenticated using the AAD mechanisms that the supported encryption modes
111 * provide for. In order to preserve the semantics of the ZIL for encrypted
112 * datasets, the ZIL is not protected at the objset level as described below.
115 * Similarly to ZIL blocks, the core part of each dnode_phys_t needs to be left
116 * in plaintext for scrubbing and claiming, but the bonus buffers might contain
117 * sensitive user data. The function zio_crypt_init_uios_dnode() handles parsing
118 * which pieces of the block need to be encrypted. For more details about
119 * dnode authentication and encryption, see zio_crypt_init_uios_dnode().
121 * OBJECT SET AUTHENTICATION:
122 * Up to this point, everything we have encrypted and authenticated has been
123 * at level 0 (or -2 for the ZIL). If we did not do any further work the
124 * on-disk format would be susceptible to attacks that deleted or rearranged
125 * the order of level 0 blocks. Ideally, the cleanest solution would be to
126 * maintain a tree of authentication MACs going up the bp tree. However, this
127 * presents a problem for raw sends. Send files do not send information about
128 * indirect blocks so there would be no convenient way to transfer the MACs and
129 * they cannot be recalculated on the receive side without the master key which
130 * would defeat one of the purposes of raw sends in the first place. Instead,
131 * for the indirect levels of the bp tree, we use a regular SHA512 of the MACs
132 * from the level below. We also include some portable fields from blk_prop such
133 * as the lsize and compression algorithm to prevent the data from being
136 * At the objset level, we maintain 2 separate 256 bit MACs in the
137 * objset_phys_t. The first one is "portable" and is the logical root of the
138 * MAC tree maintained in the metadnode's bps. The second, is "local" and is
139 * used as the root MAC for the user accounting objects, which are also not
140 * transferred via "zfs send". The portable MAC is sent in the DRR_BEGIN payload
141 * of the send file. The useraccounting code ensures that the useraccounting
142 * info is not present upon a receive, so the local MAC can simply be cleared
143 * out at that time. For more info about objset_phys_t authentication, see
144 * zio_crypt_do_objset_hmacs().
146 * CONSIDERATIONS FOR DEDUP:
147 * In order for dedup to work, blocks that we want to dedup with one another
148 * need to use the same IV and encryption key, so that they will have the same
149 * ciphertext. Normally, one should never reuse an IV with the same encryption
150 * key or else AES-GCM and AES-CCM can both actually leak the plaintext of both
151 * blocks. In this case, however, since we are using the same plaintext as
152 * well all that we end up with is a duplicate of the original ciphertext we
153 * already had. As a result, an attacker with read access to the raw disk will
154 * be able to tell which blocks are the same but this information is given away
155 * by dedup anyway. In order to get the same IVs and encryption keys for
156 * equivalent blocks of data we use an HMAC of the plaintext. We use an HMAC
157 * here so that a reproducible checksum of the plaintext is never available to
158 * the attacker. The HMAC key is kept alongside the master key, encrypted on
159 * disk. The first 64 bits of the HMAC are used in place of the random salt, and
160 * the next 96 bits are used as the IV. As a result of this mechanism, dedup
161 * will only work within a clone family since encrypted dedup requires use of
162 * the same master and HMAC keys.
166 * After encrypting many blocks with the same key we may start to run up
167 * against the theoretical limits of how much data can securely be encrypted
168 * with a single key using the supported encryption modes. The most obvious
169 * limitation is that our risk of generating 2 equivalent 96 bit IVs increases
170 * the more IVs we generate (which both GCM and CCM modes strictly forbid).
171 * This risk actually grows surprisingly quickly over time according to the
172 * Birthday Problem. With a total IV space of 2^(96 bits), and assuming we have
173 * generated n IVs with a cryptographically secure RNG, the approximate
174 * probability p(n) of a collision is given as:
176 * p(n) ~= e^(-n*(n-1)/(2*(2^96)))
178 * [http://www.math.cornell.edu/~mec/2008-2009/TianyiZheng/Birthday.html]
180 * Assuming that we want to ensure that p(n) never goes over 1 / 1 trillion
181 * we must not write more than 398,065,730 blocks with the same encryption key.
182 * Therefore, we rotate our keys after 400,000,000 blocks have been written by
183 * generating a new random 64 bit salt for our HKDF encryption key generation
186 #define ZFS_KEY_MAX_SALT_USES_DEFAULT 400000000
187 #define ZFS_CURRENT_MAX_SALT_USES \
188 (MIN(zfs_key_max_salt_uses, ZFS_KEY_MAX_SALT_USES_DEFAULT))
189 static unsigned long zfs_key_max_salt_uses
= ZFS_KEY_MAX_SALT_USES_DEFAULT
;
191 typedef struct blkptr_auth_buf
{
192 uint64_t bab_prop
; /* blk_prop - portable mask */
193 uint8_t bab_mac
[ZIO_DATA_MAC_LEN
]; /* MAC from blk_cksum */
194 uint64_t bab_pad
; /* reserved for future use */
197 const zio_crypt_info_t zio_crypt_table
[ZIO_CRYPT_FUNCTIONS
] = {
198 {"", ZC_TYPE_NONE
, 0, "inherit"},
199 {"", ZC_TYPE_NONE
, 0, "on"},
200 {"", ZC_TYPE_NONE
, 0, "off"},
201 {SUN_CKM_AES_CCM
, ZC_TYPE_CCM
, 16, "aes-128-ccm"},
202 {SUN_CKM_AES_CCM
, ZC_TYPE_CCM
, 24, "aes-192-ccm"},
203 {SUN_CKM_AES_CCM
, ZC_TYPE_CCM
, 32, "aes-256-ccm"},
204 {SUN_CKM_AES_GCM
, ZC_TYPE_GCM
, 16, "aes-128-gcm"},
205 {SUN_CKM_AES_GCM
, ZC_TYPE_GCM
, 24, "aes-192-gcm"},
206 {SUN_CKM_AES_GCM
, ZC_TYPE_GCM
, 32, "aes-256-gcm"}
210 zio_crypt_key_destroy(zio_crypt_key_t
*key
)
212 rw_destroy(&key
->zk_salt_lock
);
214 /* free crypto templates */
215 crypto_destroy_ctx_template(key
->zk_current_tmpl
);
216 crypto_destroy_ctx_template(key
->zk_hmac_tmpl
);
218 /* zero out sensitive data */
219 memset(key
, 0, sizeof (zio_crypt_key_t
));
223 zio_crypt_key_init(uint64_t crypt
, zio_crypt_key_t
*key
)
226 crypto_mechanism_t mech
;
230 ASSERT3U(crypt
, <, ZIO_CRYPT_FUNCTIONS
);
232 keydata_len
= zio_crypt_table
[crypt
].ci_keylen
;
233 memset(key
, 0, sizeof (zio_crypt_key_t
));
234 rw_init(&key
->zk_salt_lock
, NULL
, RW_DEFAULT
, NULL
);
236 /* fill keydata buffers and salt with random data */
237 ret
= random_get_bytes((uint8_t *)&key
->zk_guid
, sizeof (uint64_t));
241 ret
= random_get_bytes(key
->zk_master_keydata
, keydata_len
);
245 ret
= random_get_bytes(key
->zk_hmac_keydata
, SHA512_HMAC_KEYLEN
);
249 ret
= random_get_bytes(key
->zk_salt
, ZIO_DATA_SALT_LEN
);
253 /* derive the current key from the master key */
254 ret
= hkdf_sha512(key
->zk_master_keydata
, keydata_len
, NULL
, 0,
255 key
->zk_salt
, ZIO_DATA_SALT_LEN
, key
->zk_current_keydata
,
260 /* initialize keys for the ICP */
261 key
->zk_current_key
.ck_data
= key
->zk_current_keydata
;
262 key
->zk_current_key
.ck_length
= CRYPTO_BYTES2BITS(keydata_len
);
264 key
->zk_hmac_key
.ck_data
= &key
->zk_hmac_key
;
265 key
->zk_hmac_key
.ck_length
= CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN
);
268 * Initialize the crypto templates. It's ok if this fails because
269 * this is just an optimization.
271 mech
.cm_type
= crypto_mech2id(zio_crypt_table
[crypt
].ci_mechname
);
272 ret
= crypto_create_ctx_template(&mech
, &key
->zk_current_key
,
273 &key
->zk_current_tmpl
);
274 if (ret
!= CRYPTO_SUCCESS
)
275 key
->zk_current_tmpl
= NULL
;
277 mech
.cm_type
= crypto_mech2id(SUN_CKM_SHA512_HMAC
);
278 ret
= crypto_create_ctx_template(&mech
, &key
->zk_hmac_key
,
280 if (ret
!= CRYPTO_SUCCESS
)
281 key
->zk_hmac_tmpl
= NULL
;
283 key
->zk_crypt
= crypt
;
284 key
->zk_version
= ZIO_CRYPT_KEY_CURRENT_VERSION
;
285 key
->zk_salt_count
= 0;
290 zio_crypt_key_destroy(key
);
295 zio_crypt_key_change_salt(zio_crypt_key_t
*key
)
298 uint8_t salt
[ZIO_DATA_SALT_LEN
];
299 crypto_mechanism_t mech
;
300 uint_t keydata_len
= zio_crypt_table
[key
->zk_crypt
].ci_keylen
;
302 /* generate a new salt */
303 ret
= random_get_bytes(salt
, ZIO_DATA_SALT_LEN
);
307 rw_enter(&key
->zk_salt_lock
, RW_WRITER
);
309 /* someone beat us to the salt rotation, just unlock and return */
310 if (key
->zk_salt_count
< ZFS_CURRENT_MAX_SALT_USES
)
313 /* derive the current key from the master key and the new salt */
314 ret
= hkdf_sha512(key
->zk_master_keydata
, keydata_len
, NULL
, 0,
315 salt
, ZIO_DATA_SALT_LEN
, key
->zk_current_keydata
, keydata_len
);
319 /* assign the salt and reset the usage count */
320 memcpy(key
->zk_salt
, salt
, ZIO_DATA_SALT_LEN
);
321 key
->zk_salt_count
= 0;
323 /* destroy the old context template and create the new one */
324 crypto_destroy_ctx_template(key
->zk_current_tmpl
);
325 ret
= crypto_create_ctx_template(&mech
, &key
->zk_current_key
,
326 &key
->zk_current_tmpl
);
327 if (ret
!= CRYPTO_SUCCESS
)
328 key
->zk_current_tmpl
= NULL
;
330 rw_exit(&key
->zk_salt_lock
);
335 rw_exit(&key
->zk_salt_lock
);
340 /* See comment above zfs_key_max_salt_uses definition for details */
342 zio_crypt_key_get_salt(zio_crypt_key_t
*key
, uint8_t *salt
)
345 boolean_t salt_change
;
347 rw_enter(&key
->zk_salt_lock
, RW_READER
);
349 memcpy(salt
, key
->zk_salt
, ZIO_DATA_SALT_LEN
);
350 salt_change
= (atomic_inc_64_nv(&key
->zk_salt_count
) >=
351 ZFS_CURRENT_MAX_SALT_USES
);
353 rw_exit(&key
->zk_salt_lock
);
356 ret
= zio_crypt_key_change_salt(key
);
368 * This function handles all encryption and decryption in zfs. When
369 * encrypting it expects puio to reference the plaintext and cuio to
370 * reference the ciphertext. cuio must have enough space for the
371 * ciphertext + room for a MAC. datalen should be the length of the
372 * plaintext / ciphertext alone.
375 zio_do_crypt_uio(boolean_t encrypt
, uint64_t crypt
, crypto_key_t
*key
,
376 crypto_ctx_template_t tmpl
, uint8_t *ivbuf
, uint_t datalen
,
377 zfs_uio_t
*puio
, zfs_uio_t
*cuio
, uint8_t *authbuf
, uint_t auth_len
)
380 crypto_data_t plaindata
, cipherdata
;
381 CK_AES_CCM_PARAMS ccmp
;
382 CK_AES_GCM_PARAMS gcmp
;
383 crypto_mechanism_t mech
;
384 zio_crypt_info_t crypt_info
;
385 uint_t plain_full_len
, maclen
;
387 ASSERT3U(crypt
, <, ZIO_CRYPT_FUNCTIONS
);
389 /* lookup the encryption info */
390 crypt_info
= zio_crypt_table
[crypt
];
392 /* the mac will always be the last iovec_t in the cipher uio */
393 maclen
= cuio
->uio_iov
[cuio
->uio_iovcnt
- 1].iov_len
;
395 ASSERT(maclen
<= ZIO_DATA_MAC_LEN
);
397 /* setup encryption mechanism (same as crypt) */
398 mech
.cm_type
= crypto_mech2id(crypt_info
.ci_mechname
);
401 * Strangely, the ICP requires that plain_full_len must include
402 * the MAC length when decrypting, even though the UIO does not
403 * need to have the extra space allocated.
406 plain_full_len
= datalen
;
408 plain_full_len
= datalen
+ maclen
;
412 * setup encryption params (currently only AES CCM and AES GCM
415 if (crypt_info
.ci_crypt_type
== ZC_TYPE_CCM
) {
416 ccmp
.ulNonceSize
= ZIO_DATA_IV_LEN
;
417 ccmp
.ulAuthDataSize
= auth_len
;
418 ccmp
.authData
= authbuf
;
419 ccmp
.ulMACSize
= maclen
;
421 ccmp
.ulDataSize
= plain_full_len
;
423 mech
.cm_param
= (char *)(&ccmp
);
424 mech
.cm_param_len
= sizeof (CK_AES_CCM_PARAMS
);
426 gcmp
.ulIvLen
= ZIO_DATA_IV_LEN
;
427 gcmp
.ulIvBits
= CRYPTO_BYTES2BITS(ZIO_DATA_IV_LEN
);
428 gcmp
.ulAADLen
= auth_len
;
430 gcmp
.ulTagBits
= CRYPTO_BYTES2BITS(maclen
);
433 mech
.cm_param
= (char *)(&gcmp
);
434 mech
.cm_param_len
= sizeof (CK_AES_GCM_PARAMS
);
437 /* populate the cipher and plain data structs. */
438 plaindata
.cd_format
= CRYPTO_DATA_UIO
;
439 plaindata
.cd_offset
= 0;
440 plaindata
.cd_uio
= puio
;
441 plaindata
.cd_length
= plain_full_len
;
443 cipherdata
.cd_format
= CRYPTO_DATA_UIO
;
444 cipherdata
.cd_offset
= 0;
445 cipherdata
.cd_uio
= cuio
;
446 cipherdata
.cd_length
= datalen
+ maclen
;
448 /* perform the actual encryption */
450 ret
= crypto_encrypt(&mech
, &plaindata
, key
, tmpl
, &cipherdata
);
451 if (ret
!= CRYPTO_SUCCESS
) {
452 ret
= SET_ERROR(EIO
);
456 ret
= crypto_decrypt(&mech
, &cipherdata
, key
, tmpl
, &plaindata
);
457 if (ret
!= CRYPTO_SUCCESS
) {
458 ASSERT3U(ret
, ==, CRYPTO_INVALID_MAC
);
459 ret
= SET_ERROR(ECKSUM
);
471 zio_crypt_key_wrap(crypto_key_t
*cwkey
, zio_crypt_key_t
*key
, uint8_t *iv
,
472 uint8_t *mac
, uint8_t *keydata_out
, uint8_t *hmac_keydata_out
)
475 zfs_uio_t puio
, cuio
;
477 iovec_t plain_iovecs
[2], cipher_iovecs
[3];
478 uint64_t crypt
= key
->zk_crypt
;
479 uint_t enc_len
, keydata_len
, aad_len
;
481 ASSERT3U(crypt
, <, ZIO_CRYPT_FUNCTIONS
);
483 keydata_len
= zio_crypt_table
[crypt
].ci_keylen
;
485 /* generate iv for wrapping the master and hmac key */
486 ret
= random_get_pseudo_bytes(iv
, WRAPPING_IV_LEN
);
490 /* initialize zfs_uio_ts */
491 plain_iovecs
[0].iov_base
= key
->zk_master_keydata
;
492 plain_iovecs
[0].iov_len
= keydata_len
;
493 plain_iovecs
[1].iov_base
= key
->zk_hmac_keydata
;
494 plain_iovecs
[1].iov_len
= SHA512_HMAC_KEYLEN
;
496 cipher_iovecs
[0].iov_base
= keydata_out
;
497 cipher_iovecs
[0].iov_len
= keydata_len
;
498 cipher_iovecs
[1].iov_base
= hmac_keydata_out
;
499 cipher_iovecs
[1].iov_len
= SHA512_HMAC_KEYLEN
;
500 cipher_iovecs
[2].iov_base
= mac
;
501 cipher_iovecs
[2].iov_len
= WRAPPING_MAC_LEN
;
504 * Although we don't support writing to the old format, we do
505 * support rewrapping the key so that the user can move and
506 * quarantine datasets on the old format.
508 if (key
->zk_version
== 0) {
509 aad_len
= sizeof (uint64_t);
510 aad
[0] = LE_64(key
->zk_guid
);
512 ASSERT3U(key
->zk_version
, ==, ZIO_CRYPT_KEY_CURRENT_VERSION
);
513 aad_len
= sizeof (uint64_t) * 3;
514 aad
[0] = LE_64(key
->zk_guid
);
515 aad
[1] = LE_64(crypt
);
516 aad
[2] = LE_64(key
->zk_version
);
519 enc_len
= zio_crypt_table
[crypt
].ci_keylen
+ SHA512_HMAC_KEYLEN
;
520 puio
.uio_iov
= plain_iovecs
;
522 puio
.uio_segflg
= UIO_SYSSPACE
;
523 cuio
.uio_iov
= cipher_iovecs
;
525 cuio
.uio_segflg
= UIO_SYSSPACE
;
527 /* encrypt the keys and store the resulting ciphertext and mac */
528 ret
= zio_do_crypt_uio(B_TRUE
, crypt
, cwkey
, NULL
, iv
, enc_len
,
529 &puio
, &cuio
, (uint8_t *)aad
, aad_len
);
540 zio_crypt_key_unwrap(crypto_key_t
*cwkey
, uint64_t crypt
, uint64_t version
,
541 uint64_t guid
, uint8_t *keydata
, uint8_t *hmac_keydata
, uint8_t *iv
,
542 uint8_t *mac
, zio_crypt_key_t
*key
)
544 crypto_mechanism_t mech
;
545 zfs_uio_t puio
, cuio
;
547 iovec_t plain_iovecs
[2], cipher_iovecs
[3];
548 uint_t enc_len
, keydata_len
, aad_len
;
551 ASSERT3U(crypt
, <, ZIO_CRYPT_FUNCTIONS
);
553 rw_init(&key
->zk_salt_lock
, NULL
, RW_DEFAULT
, NULL
);
555 keydata_len
= zio_crypt_table
[crypt
].ci_keylen
;
557 /* initialize zfs_uio_ts */
558 plain_iovecs
[0].iov_base
= key
->zk_master_keydata
;
559 plain_iovecs
[0].iov_len
= keydata_len
;
560 plain_iovecs
[1].iov_base
= key
->zk_hmac_keydata
;
561 plain_iovecs
[1].iov_len
= SHA512_HMAC_KEYLEN
;
563 cipher_iovecs
[0].iov_base
= keydata
;
564 cipher_iovecs
[0].iov_len
= keydata_len
;
565 cipher_iovecs
[1].iov_base
= hmac_keydata
;
566 cipher_iovecs
[1].iov_len
= SHA512_HMAC_KEYLEN
;
567 cipher_iovecs
[2].iov_base
= mac
;
568 cipher_iovecs
[2].iov_len
= WRAPPING_MAC_LEN
;
571 aad_len
= sizeof (uint64_t);
572 aad
[0] = LE_64(guid
);
574 ASSERT3U(version
, ==, ZIO_CRYPT_KEY_CURRENT_VERSION
);
575 aad_len
= sizeof (uint64_t) * 3;
576 aad
[0] = LE_64(guid
);
577 aad
[1] = LE_64(crypt
);
578 aad
[2] = LE_64(version
);
581 enc_len
= keydata_len
+ SHA512_HMAC_KEYLEN
;
582 puio
.uio_iov
= plain_iovecs
;
583 puio
.uio_segflg
= UIO_SYSSPACE
;
585 cuio
.uio_iov
= cipher_iovecs
;
587 cuio
.uio_segflg
= UIO_SYSSPACE
;
589 /* decrypt the keys and store the result in the output buffers */
590 ret
= zio_do_crypt_uio(B_FALSE
, crypt
, cwkey
, NULL
, iv
, enc_len
,
591 &puio
, &cuio
, (uint8_t *)aad
, aad_len
);
595 /* generate a fresh salt */
596 ret
= random_get_bytes(key
->zk_salt
, ZIO_DATA_SALT_LEN
);
600 /* derive the current key from the master key */
601 ret
= hkdf_sha512(key
->zk_master_keydata
, keydata_len
, NULL
, 0,
602 key
->zk_salt
, ZIO_DATA_SALT_LEN
, key
->zk_current_keydata
,
607 /* initialize keys for ICP */
608 key
->zk_current_key
.ck_data
= key
->zk_current_keydata
;
609 key
->zk_current_key
.ck_length
= CRYPTO_BYTES2BITS(keydata_len
);
611 key
->zk_hmac_key
.ck_data
= key
->zk_hmac_keydata
;
612 key
->zk_hmac_key
.ck_length
= CRYPTO_BYTES2BITS(SHA512_HMAC_KEYLEN
);
615 * Initialize the crypto templates. It's ok if this fails because
616 * this is just an optimization.
618 mech
.cm_type
= crypto_mech2id(zio_crypt_table
[crypt
].ci_mechname
);
619 ret
= crypto_create_ctx_template(&mech
, &key
->zk_current_key
,
620 &key
->zk_current_tmpl
);
621 if (ret
!= CRYPTO_SUCCESS
)
622 key
->zk_current_tmpl
= NULL
;
624 mech
.cm_type
= crypto_mech2id(SUN_CKM_SHA512_HMAC
);
625 ret
= crypto_create_ctx_template(&mech
, &key
->zk_hmac_key
,
627 if (ret
!= CRYPTO_SUCCESS
)
628 key
->zk_hmac_tmpl
= NULL
;
630 key
->zk_crypt
= crypt
;
631 key
->zk_version
= version
;
633 key
->zk_salt_count
= 0;
638 zio_crypt_key_destroy(key
);
643 zio_crypt_generate_iv(uint8_t *ivbuf
)
647 /* randomly generate the IV */
648 ret
= random_get_pseudo_bytes(ivbuf
, ZIO_DATA_IV_LEN
);
655 memset(ivbuf
, 0, ZIO_DATA_IV_LEN
);
660 zio_crypt_do_hmac(zio_crypt_key_t
*key
, uint8_t *data
, uint_t datalen
,
661 uint8_t *digestbuf
, uint_t digestlen
)
664 crypto_mechanism_t mech
;
665 crypto_data_t in_data
, digest_data
;
666 uint8_t raw_digestbuf
[SHA512_DIGEST_LENGTH
];
668 ASSERT3U(digestlen
, <=, SHA512_DIGEST_LENGTH
);
670 /* initialize sha512-hmac mechanism and crypto data */
671 mech
.cm_type
= crypto_mech2id(SUN_CKM_SHA512_HMAC
);
672 mech
.cm_param
= NULL
;
673 mech
.cm_param_len
= 0;
675 /* initialize the crypto data */
676 in_data
.cd_format
= CRYPTO_DATA_RAW
;
677 in_data
.cd_offset
= 0;
678 in_data
.cd_length
= datalen
;
679 in_data
.cd_raw
.iov_base
= (char *)data
;
680 in_data
.cd_raw
.iov_len
= in_data
.cd_length
;
682 digest_data
.cd_format
= CRYPTO_DATA_RAW
;
683 digest_data
.cd_offset
= 0;
684 digest_data
.cd_length
= SHA512_DIGEST_LENGTH
;
685 digest_data
.cd_raw
.iov_base
= (char *)raw_digestbuf
;
686 digest_data
.cd_raw
.iov_len
= digest_data
.cd_length
;
688 /* generate the hmac */
689 ret
= crypto_mac(&mech
, &in_data
, &key
->zk_hmac_key
, key
->zk_hmac_tmpl
,
691 if (ret
!= CRYPTO_SUCCESS
) {
692 ret
= SET_ERROR(EIO
);
696 memcpy(digestbuf
, raw_digestbuf
, digestlen
);
701 memset(digestbuf
, 0, digestlen
);
706 zio_crypt_generate_iv_salt_dedup(zio_crypt_key_t
*key
, uint8_t *data
,
707 uint_t datalen
, uint8_t *ivbuf
, uint8_t *salt
)
710 uint8_t digestbuf
[SHA512_DIGEST_LENGTH
];
712 ret
= zio_crypt_do_hmac(key
, data
, datalen
,
713 digestbuf
, SHA512_DIGEST_LENGTH
);
717 memcpy(salt
, digestbuf
, ZIO_DATA_SALT_LEN
);
718 memcpy(ivbuf
, digestbuf
+ ZIO_DATA_SALT_LEN
, ZIO_DATA_IV_LEN
);
724 * The following functions are used to encode and decode encryption parameters
725 * into blkptr_t and zil_header_t. The ICP wants to use these parameters as
726 * byte strings, which normally means that these strings would not need to deal
727 * with byteswapping at all. However, both blkptr_t and zil_header_t may be
728 * byteswapped by lower layers and so we must "undo" that byteswap here upon
729 * decoding and encoding in a non-native byteorder. These functions require
730 * that the byteorder bit is correct before being called.
733 zio_crypt_encode_params_bp(blkptr_t
*bp
, uint8_t *salt
, uint8_t *iv
)
738 ASSERT(BP_IS_ENCRYPTED(bp
));
740 if (!BP_SHOULD_BYTESWAP(bp
)) {
741 memcpy(&bp
->blk_dva
[2].dva_word
[0], salt
, sizeof (uint64_t));
742 memcpy(&bp
->blk_dva
[2].dva_word
[1], iv
, sizeof (uint64_t));
743 memcpy(&val32
, iv
+ sizeof (uint64_t), sizeof (uint32_t));
744 BP_SET_IV2(bp
, val32
);
746 memcpy(&val64
, salt
, sizeof (uint64_t));
747 bp
->blk_dva
[2].dva_word
[0] = BSWAP_64(val64
);
749 memcpy(&val64
, iv
, sizeof (uint64_t));
750 bp
->blk_dva
[2].dva_word
[1] = BSWAP_64(val64
);
752 memcpy(&val32
, iv
+ sizeof (uint64_t), sizeof (uint32_t));
753 BP_SET_IV2(bp
, BSWAP_32(val32
));
758 zio_crypt_decode_params_bp(const blkptr_t
*bp
, uint8_t *salt
, uint8_t *iv
)
763 ASSERT(BP_IS_PROTECTED(bp
));
765 /* for convenience, so callers don't need to check */
766 if (BP_IS_AUTHENTICATED(bp
)) {
767 memset(salt
, 0, ZIO_DATA_SALT_LEN
);
768 memset(iv
, 0, ZIO_DATA_IV_LEN
);
772 if (!BP_SHOULD_BYTESWAP(bp
)) {
773 memcpy(salt
, &bp
->blk_dva
[2].dva_word
[0], sizeof (uint64_t));
774 memcpy(iv
, &bp
->blk_dva
[2].dva_word
[1], sizeof (uint64_t));
776 val32
= (uint32_t)BP_GET_IV2(bp
);
777 memcpy(iv
+ sizeof (uint64_t), &val32
, sizeof (uint32_t));
779 val64
= BSWAP_64(bp
->blk_dva
[2].dva_word
[0]);
780 memcpy(salt
, &val64
, sizeof (uint64_t));
782 val64
= BSWAP_64(bp
->blk_dva
[2].dva_word
[1]);
783 memcpy(iv
, &val64
, sizeof (uint64_t));
785 val32
= BSWAP_32((uint32_t)BP_GET_IV2(bp
));
786 memcpy(iv
+ sizeof (uint64_t), &val32
, sizeof (uint32_t));
791 zio_crypt_encode_mac_bp(blkptr_t
*bp
, uint8_t *mac
)
795 ASSERT(BP_USES_CRYPT(bp
));
796 ASSERT3U(BP_GET_TYPE(bp
), !=, DMU_OT_OBJSET
);
798 if (!BP_SHOULD_BYTESWAP(bp
)) {
799 memcpy(&bp
->blk_cksum
.zc_word
[2], mac
, sizeof (uint64_t));
800 memcpy(&bp
->blk_cksum
.zc_word
[3], mac
+ sizeof (uint64_t),
803 memcpy(&val64
, mac
, sizeof (uint64_t));
804 bp
->blk_cksum
.zc_word
[2] = BSWAP_64(val64
);
806 memcpy(&val64
, mac
+ sizeof (uint64_t), sizeof (uint64_t));
807 bp
->blk_cksum
.zc_word
[3] = BSWAP_64(val64
);
812 zio_crypt_decode_mac_bp(const blkptr_t
*bp
, uint8_t *mac
)
816 ASSERT(BP_USES_CRYPT(bp
) || BP_IS_HOLE(bp
));
818 /* for convenience, so callers don't need to check */
819 if (BP_GET_TYPE(bp
) == DMU_OT_OBJSET
) {
820 memset(mac
, 0, ZIO_DATA_MAC_LEN
);
824 if (!BP_SHOULD_BYTESWAP(bp
)) {
825 memcpy(mac
, &bp
->blk_cksum
.zc_word
[2], sizeof (uint64_t));
826 memcpy(mac
+ sizeof (uint64_t), &bp
->blk_cksum
.zc_word
[3],
829 val64
= BSWAP_64(bp
->blk_cksum
.zc_word
[2]);
830 memcpy(mac
, &val64
, sizeof (uint64_t));
832 val64
= BSWAP_64(bp
->blk_cksum
.zc_word
[3]);
833 memcpy(mac
+ sizeof (uint64_t), &val64
, sizeof (uint64_t));
838 zio_crypt_encode_mac_zil(void *data
, uint8_t *mac
)
840 zil_chain_t
*zilc
= data
;
842 memcpy(&zilc
->zc_eck
.zec_cksum
.zc_word
[2], mac
, sizeof (uint64_t));
843 memcpy(&zilc
->zc_eck
.zec_cksum
.zc_word
[3], mac
+ sizeof (uint64_t),
848 zio_crypt_decode_mac_zil(const void *data
, uint8_t *mac
)
851 * The ZIL MAC is embedded in the block it protects, which will
852 * not have been byteswapped by the time this function has been called.
853 * As a result, we don't need to worry about byteswapping the MAC.
855 const zil_chain_t
*zilc
= data
;
857 memcpy(mac
, &zilc
->zc_eck
.zec_cksum
.zc_word
[2], sizeof (uint64_t));
858 memcpy(mac
+ sizeof (uint64_t), &zilc
->zc_eck
.zec_cksum
.zc_word
[3],
863 * This routine takes a block of dnodes (src_abd) and copies only the bonus
864 * buffers to the same offsets in the dst buffer. datalen should be the size
865 * of both the src_abd and the dst buffer (not just the length of the bonus
869 zio_crypt_copy_dnode_bonus(abd_t
*src_abd
, uint8_t *dst
, uint_t datalen
)
871 uint_t i
, max_dnp
= datalen
>> DNODE_SHIFT
;
873 dnode_phys_t
*dnp
, *sdnp
, *ddnp
;
875 src
= abd_borrow_buf_copy(src_abd
, datalen
);
877 sdnp
= (dnode_phys_t
*)src
;
878 ddnp
= (dnode_phys_t
*)dst
;
880 for (i
= 0; i
< max_dnp
; i
+= sdnp
[i
].dn_extra_slots
+ 1) {
882 if (dnp
->dn_type
!= DMU_OT_NONE
&&
883 DMU_OT_IS_ENCRYPTED(dnp
->dn_bonustype
) &&
884 dnp
->dn_bonuslen
!= 0) {
885 memcpy(DN_BONUS(&ddnp
[i
]), DN_BONUS(dnp
),
886 DN_MAX_BONUS_LEN(dnp
));
890 abd_return_buf(src_abd
, src
, datalen
);
894 * This function decides what fields from blk_prop are included in
895 * the on-disk various MAC algorithms.
898 zio_crypt_bp_zero_nonportable_blkprop(blkptr_t
*bp
, uint64_t version
)
901 * Version 0 did not properly zero out all non-portable fields
902 * as it should have done. We maintain this code so that we can
903 * do read-only imports of pools on this version.
907 BP_SET_CHECKSUM(bp
, 0);
908 BP_SET_PSIZE(bp
, SPA_MINBLOCKSIZE
);
912 ASSERT3U(version
, ==, ZIO_CRYPT_KEY_CURRENT_VERSION
);
915 * The hole_birth feature might set these fields even if this bp
916 * is a hole. We zero them out here to guarantee that raw sends
917 * will function with or without the feature.
919 if (BP_IS_HOLE(bp
)) {
925 * At L0 we want to verify these fields to ensure that data blocks
926 * can not be reinterpreted. For instance, we do not want an attacker
927 * to trick us into returning raw lz4 compressed data to the user
928 * by modifying the compression bits. At higher levels, we cannot
929 * enforce this policy since raw sends do not convey any information
930 * about indirect blocks, so these values might be different on the
931 * receive side. Fortunately, this does not open any new attack
932 * vectors, since any alterations that can be made to a higher level
933 * bp must still verify the correct order of the layer below it.
935 if (BP_GET_LEVEL(bp
) != 0) {
936 BP_SET_BYTEORDER(bp
, 0);
937 BP_SET_COMPRESS(bp
, 0);
940 * psize cannot be set to zero or it will trigger
941 * asserts, but the value doesn't really matter as
942 * long as it is constant.
944 BP_SET_PSIZE(bp
, SPA_MINBLOCKSIZE
);
948 BP_SET_CHECKSUM(bp
, 0);
952 zio_crypt_bp_auth_init(uint64_t version
, boolean_t should_bswap
, blkptr_t
*bp
,
953 blkptr_auth_buf_t
*bab
, uint_t
*bab_len
)
955 blkptr_t tmpbp
= *bp
;
958 byteswap_uint64_array(&tmpbp
, sizeof (blkptr_t
));
960 ASSERT(BP_USES_CRYPT(&tmpbp
) || BP_IS_HOLE(&tmpbp
));
961 ASSERT0(BP_IS_EMBEDDED(&tmpbp
));
963 zio_crypt_decode_mac_bp(&tmpbp
, bab
->bab_mac
);
966 * We always MAC blk_prop in LE to ensure portability. This
967 * must be done after decoding the mac, since the endianness
968 * will get zero'd out here.
970 zio_crypt_bp_zero_nonportable_blkprop(&tmpbp
, version
);
971 bab
->bab_prop
= LE_64(tmpbp
.blk_prop
);
974 /* version 0 did not include the padding */
975 *bab_len
= sizeof (blkptr_auth_buf_t
);
977 *bab_len
-= sizeof (uint64_t);
981 zio_crypt_bp_do_hmac_updates(crypto_context_t ctx
, uint64_t version
,
982 boolean_t should_bswap
, blkptr_t
*bp
)
986 blkptr_auth_buf_t bab
;
989 zio_crypt_bp_auth_init(version
, should_bswap
, bp
, &bab
, &bab_len
);
990 cd
.cd_format
= CRYPTO_DATA_RAW
;
992 cd
.cd_length
= bab_len
;
993 cd
.cd_raw
.iov_base
= (char *)&bab
;
994 cd
.cd_raw
.iov_len
= cd
.cd_length
;
996 ret
= crypto_mac_update(ctx
, &cd
);
997 if (ret
!= CRYPTO_SUCCESS
) {
998 ret
= SET_ERROR(EIO
);
1009 zio_crypt_bp_do_indrect_checksum_updates(SHA2_CTX
*ctx
, uint64_t version
,
1010 boolean_t should_bswap
, blkptr_t
*bp
)
1013 blkptr_auth_buf_t bab
;
1015 zio_crypt_bp_auth_init(version
, should_bswap
, bp
, &bab
, &bab_len
);
1016 SHA2Update(ctx
, &bab
, bab_len
);
1020 zio_crypt_bp_do_aad_updates(uint8_t **aadp
, uint_t
*aad_len
, uint64_t version
,
1021 boolean_t should_bswap
, blkptr_t
*bp
)
1024 blkptr_auth_buf_t bab
;
1026 zio_crypt_bp_auth_init(version
, should_bswap
, bp
, &bab
, &bab_len
);
1027 memcpy(*aadp
, &bab
, bab_len
);
1029 *aad_len
+= bab_len
;
1033 zio_crypt_do_dnode_hmac_updates(crypto_context_t ctx
, uint64_t version
,
1034 boolean_t should_bswap
, dnode_phys_t
*dnp
)
1037 dnode_phys_t
*adnp
, tmp_dncore
;
1038 size_t dn_core_size
= offsetof(dnode_phys_t
, dn_blkptr
);
1039 boolean_t le_bswap
= (should_bswap
== ZFS_HOST_BYTEORDER
);
1042 cd
.cd_format
= CRYPTO_DATA_RAW
;
1046 * Authenticate the core dnode (masking out non-portable bits).
1047 * We only copy the first 64 bytes we operate on to avoid the overhead
1048 * of copying 512-64 unneeded bytes. The compiler seems to be fine
1051 memcpy(&tmp_dncore
, dnp
, dn_core_size
);
1055 adnp
->dn_datablkszsec
= BSWAP_16(adnp
->dn_datablkszsec
);
1056 adnp
->dn_bonuslen
= BSWAP_16(adnp
->dn_bonuslen
);
1057 adnp
->dn_maxblkid
= BSWAP_64(adnp
->dn_maxblkid
);
1058 adnp
->dn_used
= BSWAP_64(adnp
->dn_used
);
1060 adnp
->dn_flags
&= DNODE_CRYPT_PORTABLE_FLAGS_MASK
;
1063 cd
.cd_length
= dn_core_size
;
1064 cd
.cd_raw
.iov_base
= (char *)adnp
;
1065 cd
.cd_raw
.iov_len
= cd
.cd_length
;
1067 ret
= crypto_mac_update(ctx
, &cd
);
1068 if (ret
!= CRYPTO_SUCCESS
) {
1069 ret
= SET_ERROR(EIO
);
1073 for (i
= 0; i
< dnp
->dn_nblkptr
; i
++) {
1074 ret
= zio_crypt_bp_do_hmac_updates(ctx
, version
,
1075 should_bswap
, &dnp
->dn_blkptr
[i
]);
1080 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
1081 ret
= zio_crypt_bp_do_hmac_updates(ctx
, version
,
1082 should_bswap
, DN_SPILL_BLKPTR(dnp
));
1094 * objset_phys_t blocks introduce a number of exceptions to the normal
1095 * authentication process. objset_phys_t's contain 2 separate HMACS for
1096 * protecting the integrity of their data. The portable_mac protects the
1097 * metadnode. This MAC can be sent with a raw send and protects against
1098 * reordering of data within the metadnode. The local_mac protects the user
1099 * accounting objects which are not sent from one system to another.
1101 * In addition, objset blocks are the only blocks that can be modified and
1102 * written to disk without the key loaded under certain circumstances. During
1103 * zil_claim() we need to be able to update the zil_header_t to complete
1104 * claiming log blocks and during raw receives we need to write out the
1105 * portable_mac from the send file. Both of these actions are possible
1106 * because these fields are not protected by either MAC so neither one will
1107 * need to modify the MACs without the key. However, when the modified blocks
1108 * are written out they will be byteswapped into the host machine's native
1109 * endianness which will modify fields protected by the MAC. As a result, MAC
1110 * calculation for objset blocks works slightly differently from other block
1111 * types. Where other block types MAC the data in whatever endianness is
1112 * written to disk, objset blocks always MAC little endian version of their
1113 * values. In the code, should_bswap is the value from BP_SHOULD_BYTESWAP()
1114 * and le_bswap indicates whether a byteswap is needed to get this block
1115 * into little endian format.
1118 zio_crypt_do_objset_hmacs(zio_crypt_key_t
*key
, void *data
, uint_t datalen
,
1119 boolean_t should_bswap
, uint8_t *portable_mac
, uint8_t *local_mac
)
1122 crypto_mechanism_t mech
;
1123 crypto_context_t ctx
;
1125 objset_phys_t
*osp
= data
;
1127 boolean_t le_bswap
= (should_bswap
== ZFS_HOST_BYTEORDER
);
1128 uint8_t raw_portable_mac
[SHA512_DIGEST_LENGTH
];
1129 uint8_t raw_local_mac
[SHA512_DIGEST_LENGTH
];
1131 /* initialize HMAC mechanism */
1132 mech
.cm_type
= crypto_mech2id(SUN_CKM_SHA512_HMAC
);
1133 mech
.cm_param
= NULL
;
1134 mech
.cm_param_len
= 0;
1136 cd
.cd_format
= CRYPTO_DATA_RAW
;
1139 /* calculate the portable MAC from the portable fields and metadnode */
1140 ret
= crypto_mac_init(&mech
, &key
->zk_hmac_key
, NULL
, &ctx
);
1141 if (ret
!= CRYPTO_SUCCESS
) {
1142 ret
= SET_ERROR(EIO
);
1146 /* add in the os_type */
1147 intval
= (le_bswap
) ? osp
->os_type
: BSWAP_64(osp
->os_type
);
1148 cd
.cd_length
= sizeof (uint64_t);
1149 cd
.cd_raw
.iov_base
= (char *)&intval
;
1150 cd
.cd_raw
.iov_len
= cd
.cd_length
;
1152 ret
= crypto_mac_update(ctx
, &cd
);
1153 if (ret
!= CRYPTO_SUCCESS
) {
1154 ret
= SET_ERROR(EIO
);
1158 /* add in the portable os_flags */
1159 intval
= osp
->os_flags
;
1161 intval
= BSWAP_64(intval
);
1162 intval
&= OBJSET_CRYPT_PORTABLE_FLAGS_MASK
;
1163 if (!ZFS_HOST_BYTEORDER
)
1164 intval
= BSWAP_64(intval
);
1166 cd
.cd_length
= sizeof (uint64_t);
1167 cd
.cd_raw
.iov_base
= (char *)&intval
;
1168 cd
.cd_raw
.iov_len
= cd
.cd_length
;
1170 ret
= crypto_mac_update(ctx
, &cd
);
1171 if (ret
!= CRYPTO_SUCCESS
) {
1172 ret
= SET_ERROR(EIO
);
1176 /* add in fields from the metadnode */
1177 ret
= zio_crypt_do_dnode_hmac_updates(ctx
, key
->zk_version
,
1178 should_bswap
, &osp
->os_meta_dnode
);
1182 /* store the final digest in a temporary buffer and copy what we need */
1183 cd
.cd_length
= SHA512_DIGEST_LENGTH
;
1184 cd
.cd_raw
.iov_base
= (char *)raw_portable_mac
;
1185 cd
.cd_raw
.iov_len
= cd
.cd_length
;
1187 ret
= crypto_mac_final(ctx
, &cd
);
1188 if (ret
!= CRYPTO_SUCCESS
) {
1189 ret
= SET_ERROR(EIO
);
1193 memcpy(portable_mac
, raw_portable_mac
, ZIO_OBJSET_MAC_LEN
);
1196 * This is necessary here as we check next whether
1197 * OBJSET_FLAG_USERACCOUNTING_COMPLETE is set in order to
1198 * decide if the local_mac should be zeroed out. That flag will always
1199 * be set by dmu_objset_id_quota_upgrade_cb() and
1200 * dmu_objset_userspace_upgrade_cb() if useraccounting has been
1203 intval
= osp
->os_flags
;
1205 intval
= BSWAP_64(intval
);
1206 boolean_t uacct_incomplete
=
1207 !(intval
& OBJSET_FLAG_USERACCOUNTING_COMPLETE
);
1210 * The local MAC protects the user, group and project accounting.
1211 * If these objects are not present, the local MAC is zeroed out.
1213 if (uacct_incomplete
||
1214 (datalen
>= OBJSET_PHYS_SIZE_V3
&&
1215 osp
->os_userused_dnode
.dn_type
== DMU_OT_NONE
&&
1216 osp
->os_groupused_dnode
.dn_type
== DMU_OT_NONE
&&
1217 osp
->os_projectused_dnode
.dn_type
== DMU_OT_NONE
) ||
1218 (datalen
>= OBJSET_PHYS_SIZE_V2
&&
1219 osp
->os_userused_dnode
.dn_type
== DMU_OT_NONE
&&
1220 osp
->os_groupused_dnode
.dn_type
== DMU_OT_NONE
) ||
1221 (datalen
<= OBJSET_PHYS_SIZE_V1
)) {
1222 memset(local_mac
, 0, ZIO_OBJSET_MAC_LEN
);
1226 /* calculate the local MAC from the userused and groupused dnodes */
1227 ret
= crypto_mac_init(&mech
, &key
->zk_hmac_key
, NULL
, &ctx
);
1228 if (ret
!= CRYPTO_SUCCESS
) {
1229 ret
= SET_ERROR(EIO
);
1233 /* add in the non-portable os_flags */
1234 intval
= osp
->os_flags
;
1236 intval
= BSWAP_64(intval
);
1237 intval
&= ~OBJSET_CRYPT_PORTABLE_FLAGS_MASK
;
1238 if (!ZFS_HOST_BYTEORDER
)
1239 intval
= BSWAP_64(intval
);
1241 cd
.cd_length
= sizeof (uint64_t);
1242 cd
.cd_raw
.iov_base
= (char *)&intval
;
1243 cd
.cd_raw
.iov_len
= cd
.cd_length
;
1245 ret
= crypto_mac_update(ctx
, &cd
);
1246 if (ret
!= CRYPTO_SUCCESS
) {
1247 ret
= SET_ERROR(EIO
);
1251 /* add in fields from the user accounting dnodes */
1252 if (osp
->os_userused_dnode
.dn_type
!= DMU_OT_NONE
) {
1253 ret
= zio_crypt_do_dnode_hmac_updates(ctx
, key
->zk_version
,
1254 should_bswap
, &osp
->os_userused_dnode
);
1259 if (osp
->os_groupused_dnode
.dn_type
!= DMU_OT_NONE
) {
1260 ret
= zio_crypt_do_dnode_hmac_updates(ctx
, key
->zk_version
,
1261 should_bswap
, &osp
->os_groupused_dnode
);
1266 if (osp
->os_projectused_dnode
.dn_type
!= DMU_OT_NONE
&&
1267 datalen
>= OBJSET_PHYS_SIZE_V3
) {
1268 ret
= zio_crypt_do_dnode_hmac_updates(ctx
, key
->zk_version
,
1269 should_bswap
, &osp
->os_projectused_dnode
);
1274 /* store the final digest in a temporary buffer and copy what we need */
1275 cd
.cd_length
= SHA512_DIGEST_LENGTH
;
1276 cd
.cd_raw
.iov_base
= (char *)raw_local_mac
;
1277 cd
.cd_raw
.iov_len
= cd
.cd_length
;
1279 ret
= crypto_mac_final(ctx
, &cd
);
1280 if (ret
!= CRYPTO_SUCCESS
) {
1281 ret
= SET_ERROR(EIO
);
1285 memcpy(local_mac
, raw_local_mac
, ZIO_OBJSET_MAC_LEN
);
1290 memset(portable_mac
, 0, ZIO_OBJSET_MAC_LEN
);
1291 memset(local_mac
, 0, ZIO_OBJSET_MAC_LEN
);
1296 zio_crypt_destroy_uio(zfs_uio_t
*uio
)
1299 kmem_free(uio
->uio_iov
, uio
->uio_iovcnt
* sizeof (iovec_t
));
1303 * This function parses an uncompressed indirect block and returns a checksum
1304 * of all the portable fields from all of the contained bps. The portable
1305 * fields are the MAC and all of the fields from blk_prop except for the dedup,
1306 * checksum, and psize bits. For an explanation of the purpose of this, see
1307 * the comment block on object set authentication.
1310 zio_crypt_do_indirect_mac_checksum_impl(boolean_t generate
, void *buf
,
1311 uint_t datalen
, uint64_t version
, boolean_t byteswap
, uint8_t *cksum
)
1314 int i
, epb
= datalen
>> SPA_BLKPTRSHIFT
;
1316 uint8_t digestbuf
[SHA512_DIGEST_LENGTH
];
1318 /* checksum all of the MACs from the layer below */
1319 SHA2Init(SHA512
, &ctx
);
1320 for (i
= 0, bp
= buf
; i
< epb
; i
++, bp
++) {
1321 zio_crypt_bp_do_indrect_checksum_updates(&ctx
, version
,
1324 SHA2Final(digestbuf
, &ctx
);
1327 memcpy(cksum
, digestbuf
, ZIO_DATA_MAC_LEN
);
1331 if (memcmp(digestbuf
, cksum
, ZIO_DATA_MAC_LEN
) != 0)
1332 return (SET_ERROR(ECKSUM
));
1338 zio_crypt_do_indirect_mac_checksum(boolean_t generate
, void *buf
,
1339 uint_t datalen
, boolean_t byteswap
, uint8_t *cksum
)
1344 * Unfortunately, callers of this function will not always have
1345 * easy access to the on-disk format version. This info is
1346 * normally found in the DSL Crypto Key, but the checksum-of-MACs
1347 * is expected to be verifiable even when the key isn't loaded.
1348 * Here, instead of doing a ZAP lookup for the version for each
1349 * zio, we simply try both existing formats.
1351 ret
= zio_crypt_do_indirect_mac_checksum_impl(generate
, buf
,
1352 datalen
, ZIO_CRYPT_KEY_CURRENT_VERSION
, byteswap
, cksum
);
1353 if (ret
== ECKSUM
) {
1355 ret
= zio_crypt_do_indirect_mac_checksum_impl(generate
,
1356 buf
, datalen
, 0, byteswap
, cksum
);
1363 zio_crypt_do_indirect_mac_checksum_abd(boolean_t generate
, abd_t
*abd
,
1364 uint_t datalen
, boolean_t byteswap
, uint8_t *cksum
)
1369 buf
= abd_borrow_buf_copy(abd
, datalen
);
1370 ret
= zio_crypt_do_indirect_mac_checksum(generate
, buf
, datalen
,
1372 abd_return_buf(abd
, buf
, datalen
);
1378 * Special case handling routine for encrypting / decrypting ZIL blocks.
1379 * We do not check for the older ZIL chain because the encryption feature
1380 * was not available before the newer ZIL chain was introduced. The goal
1381 * here is to encrypt everything except the blkptr_t of a lr_write_t and
1382 * the zil_chain_t header. Everything that is not encrypted is authenticated.
1385 zio_crypt_init_uios_zil(boolean_t encrypt
, uint8_t *plainbuf
,
1386 uint8_t *cipherbuf
, uint_t datalen
, boolean_t byteswap
, zfs_uio_t
*puio
,
1387 zfs_uio_t
*cuio
, uint_t
*enc_len
, uint8_t **authbuf
, uint_t
*auth_len
,
1388 boolean_t
*no_crypt
)
1391 uint64_t txtype
, lr_len
;
1392 uint_t nr_src
, nr_dst
, crypt_len
;
1393 uint_t aad_len
= 0, nr_iovecs
= 0, total_len
= 0;
1394 iovec_t
*src_iovecs
= NULL
, *dst_iovecs
= NULL
;
1395 uint8_t *src
, *dst
, *slrp
, *dlrp
, *blkend
, *aadp
;
1398 uint8_t *aadbuf
= zio_buf_alloc(datalen
);
1400 /* cipherbuf always needs an extra iovec for the MAC */
1412 memset(dst
, 0, datalen
);
1414 /* find the start and end record of the log block */
1415 zilc
= (zil_chain_t
*)src
;
1416 slrp
= src
+ sizeof (zil_chain_t
);
1418 blkend
= src
+ ((byteswap
) ? BSWAP_64(zilc
->zc_nused
) : zilc
->zc_nused
);
1420 /* calculate the number of encrypted iovecs we will need */
1421 for (; slrp
< blkend
; slrp
+= lr_len
) {
1425 txtype
= lr
->lrc_txtype
;
1426 lr_len
= lr
->lrc_reclen
;
1428 txtype
= BSWAP_64(lr
->lrc_txtype
);
1429 lr_len
= BSWAP_64(lr
->lrc_reclen
);
1433 if (txtype
== TX_WRITE
&& lr_len
!= sizeof (lr_write_t
))
1437 nr_src
+= nr_iovecs
;
1438 nr_dst
+= nr_iovecs
;
1440 /* allocate the iovec arrays */
1442 src_iovecs
= kmem_alloc(nr_src
* sizeof (iovec_t
), KM_SLEEP
);
1443 if (src_iovecs
== NULL
) {
1444 ret
= SET_ERROR(ENOMEM
);
1450 dst_iovecs
= kmem_alloc(nr_dst
* sizeof (iovec_t
), KM_SLEEP
);
1451 if (dst_iovecs
== NULL
) {
1452 ret
= SET_ERROR(ENOMEM
);
1458 * Copy the plain zil header over and authenticate everything except
1459 * the checksum that will store our MAC. If we are writing the data
1460 * the embedded checksum will not have been calculated yet, so we don't
1461 * authenticate that.
1463 memcpy(dst
, src
, sizeof (zil_chain_t
));
1464 memcpy(aadp
, src
, sizeof (zil_chain_t
) - sizeof (zio_eck_t
));
1465 aadp
+= sizeof (zil_chain_t
) - sizeof (zio_eck_t
);
1466 aad_len
+= sizeof (zil_chain_t
) - sizeof (zio_eck_t
);
1468 /* loop over records again, filling in iovecs */
1470 slrp
= src
+ sizeof (zil_chain_t
);
1471 dlrp
= dst
+ sizeof (zil_chain_t
);
1473 for (; slrp
< blkend
; slrp
+= lr_len
, dlrp
+= lr_len
) {
1477 txtype
= lr
->lrc_txtype
;
1478 lr_len
= lr
->lrc_reclen
;
1480 txtype
= BSWAP_64(lr
->lrc_txtype
);
1481 lr_len
= BSWAP_64(lr
->lrc_reclen
);
1484 /* copy the common lr_t */
1485 memcpy(dlrp
, slrp
, sizeof (lr_t
));
1486 memcpy(aadp
, slrp
, sizeof (lr_t
));
1487 aadp
+= sizeof (lr_t
);
1488 aad_len
+= sizeof (lr_t
);
1490 ASSERT3P(src_iovecs
, !=, NULL
);
1491 ASSERT3P(dst_iovecs
, !=, NULL
);
1494 * If this is a TX_WRITE record we want to encrypt everything
1495 * except the bp if exists. If the bp does exist we want to
1498 if (txtype
== TX_WRITE
) {
1499 crypt_len
= sizeof (lr_write_t
) -
1500 sizeof (lr_t
) - sizeof (blkptr_t
);
1501 src_iovecs
[nr_iovecs
].iov_base
= slrp
+ sizeof (lr_t
);
1502 src_iovecs
[nr_iovecs
].iov_len
= crypt_len
;
1503 dst_iovecs
[nr_iovecs
].iov_base
= dlrp
+ sizeof (lr_t
);
1504 dst_iovecs
[nr_iovecs
].iov_len
= crypt_len
;
1506 /* copy the bp now since it will not be encrypted */
1507 memcpy(dlrp
+ sizeof (lr_write_t
) - sizeof (blkptr_t
),
1508 slrp
+ sizeof (lr_write_t
) - sizeof (blkptr_t
),
1511 slrp
+ sizeof (lr_write_t
) - sizeof (blkptr_t
),
1513 aadp
+= sizeof (blkptr_t
);
1514 aad_len
+= sizeof (blkptr_t
);
1516 total_len
+= crypt_len
;
1518 if (lr_len
!= sizeof (lr_write_t
)) {
1519 crypt_len
= lr_len
- sizeof (lr_write_t
);
1520 src_iovecs
[nr_iovecs
].iov_base
=
1521 slrp
+ sizeof (lr_write_t
);
1522 src_iovecs
[nr_iovecs
].iov_len
= crypt_len
;
1523 dst_iovecs
[nr_iovecs
].iov_base
=
1524 dlrp
+ sizeof (lr_write_t
);
1525 dst_iovecs
[nr_iovecs
].iov_len
= crypt_len
;
1527 total_len
+= crypt_len
;
1530 crypt_len
= lr_len
- sizeof (lr_t
);
1531 src_iovecs
[nr_iovecs
].iov_base
= slrp
+ sizeof (lr_t
);
1532 src_iovecs
[nr_iovecs
].iov_len
= crypt_len
;
1533 dst_iovecs
[nr_iovecs
].iov_base
= dlrp
+ sizeof (lr_t
);
1534 dst_iovecs
[nr_iovecs
].iov_len
= crypt_len
;
1536 total_len
+= crypt_len
;
1540 *no_crypt
= (nr_iovecs
== 0);
1541 *enc_len
= total_len
;
1543 *auth_len
= aad_len
;
1546 puio
->uio_iov
= src_iovecs
;
1547 puio
->uio_iovcnt
= nr_src
;
1548 cuio
->uio_iov
= dst_iovecs
;
1549 cuio
->uio_iovcnt
= nr_dst
;
1551 puio
->uio_iov
= dst_iovecs
;
1552 puio
->uio_iovcnt
= nr_dst
;
1553 cuio
->uio_iov
= src_iovecs
;
1554 cuio
->uio_iovcnt
= nr_src
;
1560 zio_buf_free(aadbuf
, datalen
);
1561 if (src_iovecs
!= NULL
)
1562 kmem_free(src_iovecs
, nr_src
* sizeof (iovec_t
));
1563 if (dst_iovecs
!= NULL
)
1564 kmem_free(dst_iovecs
, nr_dst
* sizeof (iovec_t
));
1569 *no_crypt
= B_FALSE
;
1570 puio
->uio_iov
= NULL
;
1571 puio
->uio_iovcnt
= 0;
1572 cuio
->uio_iov
= NULL
;
1573 cuio
->uio_iovcnt
= 0;
1578 * Special case handling routine for encrypting / decrypting dnode blocks.
1581 zio_crypt_init_uios_dnode(boolean_t encrypt
, uint64_t version
,
1582 uint8_t *plainbuf
, uint8_t *cipherbuf
, uint_t datalen
, boolean_t byteswap
,
1583 zfs_uio_t
*puio
, zfs_uio_t
*cuio
, uint_t
*enc_len
, uint8_t **authbuf
,
1584 uint_t
*auth_len
, boolean_t
*no_crypt
)
1587 uint_t nr_src
, nr_dst
, crypt_len
;
1588 uint_t aad_len
= 0, nr_iovecs
= 0, total_len
= 0;
1589 uint_t i
, j
, max_dnp
= datalen
>> DNODE_SHIFT
;
1590 iovec_t
*src_iovecs
= NULL
, *dst_iovecs
= NULL
;
1591 uint8_t *src
, *dst
, *aadp
;
1592 dnode_phys_t
*dnp
, *adnp
, *sdnp
, *ddnp
;
1593 uint8_t *aadbuf
= zio_buf_alloc(datalen
);
1607 sdnp
= (dnode_phys_t
*)src
;
1608 ddnp
= (dnode_phys_t
*)dst
;
1612 * Count the number of iovecs we will need to do the encryption by
1613 * counting the number of bonus buffers that need to be encrypted.
1615 for (i
= 0; i
< max_dnp
; i
+= sdnp
[i
].dn_extra_slots
+ 1) {
1617 * This block may still be byteswapped. However, all of the
1618 * values we use are either uint8_t's (for which byteswapping
1619 * is a noop) or a * != 0 check, which will work regardless
1620 * of whether or not we byteswap.
1622 if (sdnp
[i
].dn_type
!= DMU_OT_NONE
&&
1623 DMU_OT_IS_ENCRYPTED(sdnp
[i
].dn_bonustype
) &&
1624 sdnp
[i
].dn_bonuslen
!= 0) {
1629 nr_src
+= nr_iovecs
;
1630 nr_dst
+= nr_iovecs
;
1633 src_iovecs
= kmem_alloc(nr_src
* sizeof (iovec_t
), KM_SLEEP
);
1634 if (src_iovecs
== NULL
) {
1635 ret
= SET_ERROR(ENOMEM
);
1641 dst_iovecs
= kmem_alloc(nr_dst
* sizeof (iovec_t
), KM_SLEEP
);
1642 if (dst_iovecs
== NULL
) {
1643 ret
= SET_ERROR(ENOMEM
);
1651 * Iterate through the dnodes again, this time filling in the uios
1652 * we allocated earlier. We also concatenate any data we want to
1653 * authenticate onto aadbuf.
1655 for (i
= 0; i
< max_dnp
; i
+= sdnp
[i
].dn_extra_slots
+ 1) {
1658 /* copy over the core fields and blkptrs (kept as plaintext) */
1659 memcpy(&ddnp
[i
], dnp
,
1660 (uint8_t *)DN_BONUS(dnp
) - (uint8_t *)dnp
);
1662 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
1663 memcpy(DN_SPILL_BLKPTR(&ddnp
[i
]), DN_SPILL_BLKPTR(dnp
),
1668 * Handle authenticated data. We authenticate everything in
1669 * the dnode that can be brought over when we do a raw send.
1670 * This includes all of the core fields as well as the MACs
1671 * stored in the bp checksums and all of the portable bits
1672 * from blk_prop. We include the dnode padding here in case it
1673 * ever gets used in the future. Some dn_flags and dn_used are
1674 * not portable so we mask those out values out of the
1675 * authenticated data.
1677 crypt_len
= offsetof(dnode_phys_t
, dn_blkptr
);
1678 memcpy(aadp
, dnp
, crypt_len
);
1679 adnp
= (dnode_phys_t
*)aadp
;
1680 adnp
->dn_flags
&= DNODE_CRYPT_PORTABLE_FLAGS_MASK
;
1683 aad_len
+= crypt_len
;
1685 for (j
= 0; j
< dnp
->dn_nblkptr
; j
++) {
1686 zio_crypt_bp_do_aad_updates(&aadp
, &aad_len
,
1687 version
, byteswap
, &dnp
->dn_blkptr
[j
]);
1690 if (dnp
->dn_flags
& DNODE_FLAG_SPILL_BLKPTR
) {
1691 zio_crypt_bp_do_aad_updates(&aadp
, &aad_len
,
1692 version
, byteswap
, DN_SPILL_BLKPTR(dnp
));
1696 * If this bonus buffer needs to be encrypted, we prepare an
1697 * iovec_t. The encryption / decryption functions will fill
1698 * this in for us with the encrypted or decrypted data.
1699 * Otherwise we add the bonus buffer to the authenticated
1700 * data buffer and copy it over to the destination. The
1701 * encrypted iovec extends to DN_MAX_BONUS_LEN(dnp) so that
1702 * we can guarantee alignment with the AES block size
1705 crypt_len
= DN_MAX_BONUS_LEN(dnp
);
1706 if (dnp
->dn_type
!= DMU_OT_NONE
&&
1707 DMU_OT_IS_ENCRYPTED(dnp
->dn_bonustype
) &&
1708 dnp
->dn_bonuslen
!= 0) {
1709 ASSERT3U(nr_iovecs
, <, nr_src
);
1710 ASSERT3U(nr_iovecs
, <, nr_dst
);
1711 ASSERT3P(src_iovecs
, !=, NULL
);
1712 ASSERT3P(dst_iovecs
, !=, NULL
);
1713 src_iovecs
[nr_iovecs
].iov_base
= DN_BONUS(dnp
);
1714 src_iovecs
[nr_iovecs
].iov_len
= crypt_len
;
1715 dst_iovecs
[nr_iovecs
].iov_base
= DN_BONUS(&ddnp
[i
]);
1716 dst_iovecs
[nr_iovecs
].iov_len
= crypt_len
;
1719 total_len
+= crypt_len
;
1721 memcpy(DN_BONUS(&ddnp
[i
]), DN_BONUS(dnp
), crypt_len
);
1722 memcpy(aadp
, DN_BONUS(dnp
), crypt_len
);
1724 aad_len
+= crypt_len
;
1728 *no_crypt
= (nr_iovecs
== 0);
1729 *enc_len
= total_len
;
1731 *auth_len
= aad_len
;
1734 puio
->uio_iov
= src_iovecs
;
1735 puio
->uio_iovcnt
= nr_src
;
1736 cuio
->uio_iov
= dst_iovecs
;
1737 cuio
->uio_iovcnt
= nr_dst
;
1739 puio
->uio_iov
= dst_iovecs
;
1740 puio
->uio_iovcnt
= nr_dst
;
1741 cuio
->uio_iov
= src_iovecs
;
1742 cuio
->uio_iovcnt
= nr_src
;
1748 zio_buf_free(aadbuf
, datalen
);
1749 if (src_iovecs
!= NULL
)
1750 kmem_free(src_iovecs
, nr_src
* sizeof (iovec_t
));
1751 if (dst_iovecs
!= NULL
)
1752 kmem_free(dst_iovecs
, nr_dst
* sizeof (iovec_t
));
1757 *no_crypt
= B_FALSE
;
1758 puio
->uio_iov
= NULL
;
1759 puio
->uio_iovcnt
= 0;
1760 cuio
->uio_iov
= NULL
;
1761 cuio
->uio_iovcnt
= 0;
1766 zio_crypt_init_uios_normal(boolean_t encrypt
, uint8_t *plainbuf
,
1767 uint8_t *cipherbuf
, uint_t datalen
, zfs_uio_t
*puio
, zfs_uio_t
*cuio
,
1772 uint_t nr_plain
= 1, nr_cipher
= 2;
1773 iovec_t
*plain_iovecs
= NULL
, *cipher_iovecs
= NULL
;
1775 /* allocate the iovecs for the plain and cipher data */
1776 plain_iovecs
= kmem_alloc(nr_plain
* sizeof (iovec_t
),
1778 if (!plain_iovecs
) {
1779 ret
= SET_ERROR(ENOMEM
);
1783 cipher_iovecs
= kmem_alloc(nr_cipher
* sizeof (iovec_t
),
1785 if (!cipher_iovecs
) {
1786 ret
= SET_ERROR(ENOMEM
);
1790 plain_iovecs
[0].iov_base
= plainbuf
;
1791 plain_iovecs
[0].iov_len
= datalen
;
1792 cipher_iovecs
[0].iov_base
= cipherbuf
;
1793 cipher_iovecs
[0].iov_len
= datalen
;
1796 puio
->uio_iov
= plain_iovecs
;
1797 puio
->uio_iovcnt
= nr_plain
;
1798 cuio
->uio_iov
= cipher_iovecs
;
1799 cuio
->uio_iovcnt
= nr_cipher
;
1804 if (plain_iovecs
!= NULL
)
1805 kmem_free(plain_iovecs
, nr_plain
* sizeof (iovec_t
));
1806 if (cipher_iovecs
!= NULL
)
1807 kmem_free(cipher_iovecs
, nr_cipher
* sizeof (iovec_t
));
1810 puio
->uio_iov
= NULL
;
1811 puio
->uio_iovcnt
= 0;
1812 cuio
->uio_iov
= NULL
;
1813 cuio
->uio_iovcnt
= 0;
1818 * This function builds up the plaintext (puio) and ciphertext (cuio) uios so
1819 * that they can be used for encryption and decryption by zio_do_crypt_uio().
1820 * Most blocks will use zio_crypt_init_uios_normal(), with ZIL and dnode blocks
1821 * requiring special handling to parse out pieces that are to be encrypted. The
1822 * authbuf is used by these special cases to store additional authenticated
1823 * data (AAD) for the encryption modes.
1826 zio_crypt_init_uios(boolean_t encrypt
, uint64_t version
, dmu_object_type_t ot
,
1827 uint8_t *plainbuf
, uint8_t *cipherbuf
, uint_t datalen
, boolean_t byteswap
,
1828 uint8_t *mac
, zfs_uio_t
*puio
, zfs_uio_t
*cuio
, uint_t
*enc_len
,
1829 uint8_t **authbuf
, uint_t
*auth_len
, boolean_t
*no_crypt
)
1834 ASSERT(DMU_OT_IS_ENCRYPTED(ot
) || ot
== DMU_OT_NONE
);
1836 /* route to handler */
1838 case DMU_OT_INTENT_LOG
:
1839 ret
= zio_crypt_init_uios_zil(encrypt
, plainbuf
, cipherbuf
,
1840 datalen
, byteswap
, puio
, cuio
, enc_len
, authbuf
, auth_len
,
1844 ret
= zio_crypt_init_uios_dnode(encrypt
, version
, plainbuf
,
1845 cipherbuf
, datalen
, byteswap
, puio
, cuio
, enc_len
, authbuf
,
1846 auth_len
, no_crypt
);
1849 ret
= zio_crypt_init_uios_normal(encrypt
, plainbuf
, cipherbuf
,
1850 datalen
, puio
, cuio
, enc_len
);
1853 *no_crypt
= B_FALSE
;
1860 /* populate the uios */
1861 puio
->uio_segflg
= UIO_SYSSPACE
;
1862 cuio
->uio_segflg
= UIO_SYSSPACE
;
1864 mac_iov
= ((iovec_t
*)&cuio
->uio_iov
[cuio
->uio_iovcnt
- 1]);
1865 mac_iov
->iov_base
= mac
;
1866 mac_iov
->iov_len
= ZIO_DATA_MAC_LEN
;
1875 * Primary encryption / decryption entrypoint for zio data.
1878 zio_do_crypt_data(boolean_t encrypt
, zio_crypt_key_t
*key
,
1879 dmu_object_type_t ot
, boolean_t byteswap
, uint8_t *salt
, uint8_t *iv
,
1880 uint8_t *mac
, uint_t datalen
, uint8_t *plainbuf
, uint8_t *cipherbuf
,
1881 boolean_t
*no_crypt
)
1884 boolean_t locked
= B_FALSE
;
1885 uint64_t crypt
= key
->zk_crypt
;
1886 uint_t keydata_len
= zio_crypt_table
[crypt
].ci_keylen
;
1887 uint_t enc_len
, auth_len
;
1888 zfs_uio_t puio
, cuio
;
1889 uint8_t enc_keydata
[MASTER_KEY_MAX_LEN
];
1890 crypto_key_t tmp_ckey
, *ckey
= NULL
;
1891 crypto_ctx_template_t tmpl
;
1892 uint8_t *authbuf
= NULL
;
1894 memset(&puio
, 0, sizeof (puio
));
1895 memset(&cuio
, 0, sizeof (cuio
));
1898 * If the needed key is the current one, just use it. Otherwise we
1899 * need to generate a temporary one from the given salt + master key.
1900 * If we are encrypting, we must return a copy of the current salt
1901 * so that it can be stored in the blkptr_t.
1903 rw_enter(&key
->zk_salt_lock
, RW_READER
);
1906 if (memcmp(salt
, key
->zk_salt
, ZIO_DATA_SALT_LEN
) == 0) {
1907 ckey
= &key
->zk_current_key
;
1908 tmpl
= key
->zk_current_tmpl
;
1910 rw_exit(&key
->zk_salt_lock
);
1913 ret
= hkdf_sha512(key
->zk_master_keydata
, keydata_len
, NULL
, 0,
1914 salt
, ZIO_DATA_SALT_LEN
, enc_keydata
, keydata_len
);
1918 tmp_ckey
.ck_data
= enc_keydata
;
1919 tmp_ckey
.ck_length
= CRYPTO_BYTES2BITS(keydata_len
);
1926 * Attempt to use QAT acceleration if we can. We currently don't
1927 * do this for metadnode and ZIL blocks, since they have a much
1928 * more involved buffer layout and the qat_crypt() function only
1931 if (qat_crypt_use_accel(datalen
) &&
1932 ot
!= DMU_OT_INTENT_LOG
&& ot
!= DMU_OT_DNODE
) {
1933 uint8_t *srcbuf
, *dstbuf
;
1943 ret
= qat_crypt((encrypt
) ? QAT_ENCRYPT
: QAT_DECRYPT
, srcbuf
,
1944 dstbuf
, NULL
, 0, iv
, mac
, ckey
, key
->zk_crypt
, datalen
);
1945 if (ret
== CPA_STATUS_SUCCESS
) {
1947 rw_exit(&key
->zk_salt_lock
);
1953 /* If the hardware implementation fails fall back to software */
1956 /* create uios for encryption */
1957 ret
= zio_crypt_init_uios(encrypt
, key
->zk_version
, ot
, plainbuf
,
1958 cipherbuf
, datalen
, byteswap
, mac
, &puio
, &cuio
, &enc_len
,
1959 &authbuf
, &auth_len
, no_crypt
);
1963 /* perform the encryption / decryption in software */
1964 ret
= zio_do_crypt_uio(encrypt
, key
->zk_crypt
, ckey
, tmpl
, iv
, enc_len
,
1965 &puio
, &cuio
, authbuf
, auth_len
);
1970 rw_exit(&key
->zk_salt_lock
);
1974 if (authbuf
!= NULL
)
1975 zio_buf_free(authbuf
, datalen
);
1976 if (ckey
== &tmp_ckey
)
1977 memset(enc_keydata
, 0, keydata_len
);
1978 zio_crypt_destroy_uio(&puio
);
1979 zio_crypt_destroy_uio(&cuio
);
1985 rw_exit(&key
->zk_salt_lock
);
1986 if (authbuf
!= NULL
)
1987 zio_buf_free(authbuf
, datalen
);
1988 if (ckey
== &tmp_ckey
)
1989 memset(enc_keydata
, 0, keydata_len
);
1990 zio_crypt_destroy_uio(&puio
);
1991 zio_crypt_destroy_uio(&cuio
);
1997 * Simple wrapper around zio_do_crypt_data() to work with abd's instead of
2001 zio_do_crypt_abd(boolean_t encrypt
, zio_crypt_key_t
*key
, dmu_object_type_t ot
,
2002 boolean_t byteswap
, uint8_t *salt
, uint8_t *iv
, uint8_t *mac
,
2003 uint_t datalen
, abd_t
*pabd
, abd_t
*cabd
, boolean_t
*no_crypt
)
2009 ptmp
= abd_borrow_buf_copy(pabd
, datalen
);
2010 ctmp
= abd_borrow_buf(cabd
, datalen
);
2012 ptmp
= abd_borrow_buf(pabd
, datalen
);
2013 ctmp
= abd_borrow_buf_copy(cabd
, datalen
);
2016 ret
= zio_do_crypt_data(encrypt
, key
, ot
, byteswap
, salt
, iv
, mac
,
2017 datalen
, ptmp
, ctmp
, no_crypt
);
2022 abd_return_buf(pabd
, ptmp
, datalen
);
2023 abd_return_buf_copy(cabd
, ctmp
, datalen
);
2025 abd_return_buf_copy(pabd
, ptmp
, datalen
);
2026 abd_return_buf(cabd
, ctmp
, datalen
);
2033 abd_return_buf(pabd
, ptmp
, datalen
);
2034 abd_return_buf_copy(cabd
, ctmp
, datalen
);
2036 abd_return_buf_copy(pabd
, ptmp
, datalen
);
2037 abd_return_buf(cabd
, ctmp
, datalen
);
2043 #if defined(_KERNEL)
2045 module_param(zfs_key_max_salt_uses
, ulong
, 0644);
2046 MODULE_PARM_DESC(zfs_key_max_salt_uses
, "Max number of times a salt value "
2047 "can be used for generating encryption keys before it is rotated");