4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/zfs_context.h>
26 #include <sys/crypto/icp.h>
27 #include <sys/crypto/spi.h>
29 #include <modes/modes.h>
30 #include <aes/aes_impl.h>
33 * Initialize AES encryption and decryption key schedules.
37 * keyBits AES key size (128, 192, or 256 bits)
38 * keysched AES key schedule to be initialized, of type aes_key_t.
39 * Allocated by aes_alloc_keysched().
42 aes_init_keysched(const uint8_t *cipherKey
, uint_t keyBits
, void *keysched
)
44 const aes_impl_ops_t
*ops
= aes_impl_get_ops();
45 aes_key_t
*newbie
= keysched
;
66 /* should never get here */
69 keysize
= CRYPTO_BITS2BYTES(keyBits
);
72 * Generic C implementation requires byteswap for little endian
73 * machines, various accelerated implementations for various
74 * architectures may not.
76 if (!ops
->needs_byteswap
) {
77 /* no byteswap needed */
78 if (IS_P2ALIGNED(cipherKey
, sizeof (uint64_t))) {
79 for (i
= 0, j
= 0; j
< keysize
; i
++, j
+= 8) {
80 /* LINTED: pointer alignment */
81 keyarr
.ka64
[i
] = *((uint64_t *)&cipherKey
[j
]);
84 memcpy(keyarr
.ka32
, cipherKey
, keysize
);
88 for (i
= 0, j
= 0; j
< keysize
; i
++, j
+= 4) {
90 htonl(*(uint32_t *)(void *)&cipherKey
[j
]);
94 ops
->generate(newbie
, keyarr
.ka32
, keyBits
);
98 * Note: if there are systems that need the AES_64BIT_KS type in the
99 * future, move setting key schedule type to individual implementations
101 newbie
->type
= AES_32BIT_KS
;
106 * Encrypt one block using AES.
107 * Align if needed and (for x86 32-bit only) byte-swap.
110 * ks Key schedule, of type aes_key_t
111 * pt Input block (plain text)
112 * ct Output block (crypto text). Can overlap with pt
115 aes_encrypt_block(const void *ks
, const uint8_t *pt
, uint8_t *ct
)
117 aes_key_t
*ksch
= (aes_key_t
*)ks
;
118 const aes_impl_ops_t
*ops
= ksch
->ops
;
120 if (IS_P2ALIGNED2(pt
, ct
, sizeof (uint32_t)) && !ops
->needs_byteswap
) {
121 /* LINTED: pointer alignment */
122 ops
->encrypt(&ksch
->encr_ks
.ks32
[0], ksch
->nr
,
123 /* LINTED: pointer alignment */
124 (uint32_t *)pt
, (uint32_t *)ct
);
126 uint32_t buffer
[AES_BLOCK_LEN
/ sizeof (uint32_t)];
128 /* Copy input block into buffer */
129 if (ops
->needs_byteswap
) {
130 buffer
[0] = htonl(*(uint32_t *)(void *)&pt
[0]);
131 buffer
[1] = htonl(*(uint32_t *)(void *)&pt
[4]);
132 buffer
[2] = htonl(*(uint32_t *)(void *)&pt
[8]);
133 buffer
[3] = htonl(*(uint32_t *)(void *)&pt
[12]);
135 memcpy(&buffer
, pt
, AES_BLOCK_LEN
);
137 ops
->encrypt(&ksch
->encr_ks
.ks32
[0], ksch
->nr
, buffer
, buffer
);
139 /* Copy result from buffer to output block */
140 if (ops
->needs_byteswap
) {
141 *(uint32_t *)(void *)&ct
[0] = htonl(buffer
[0]);
142 *(uint32_t *)(void *)&ct
[4] = htonl(buffer
[1]);
143 *(uint32_t *)(void *)&ct
[8] = htonl(buffer
[2]);
144 *(uint32_t *)(void *)&ct
[12] = htonl(buffer
[3]);
146 memcpy(ct
, &buffer
, AES_BLOCK_LEN
);
148 return (CRYPTO_SUCCESS
);
153 * Decrypt one block using AES.
154 * Align and byte-swap if needed.
157 * ks Key schedule, of type aes_key_t
158 * ct Input block (crypto text)
159 * pt Output block (plain text). Can overlap with pt
162 aes_decrypt_block(const void *ks
, const uint8_t *ct
, uint8_t *pt
)
164 aes_key_t
*ksch
= (aes_key_t
*)ks
;
165 const aes_impl_ops_t
*ops
= ksch
->ops
;
167 if (IS_P2ALIGNED2(ct
, pt
, sizeof (uint32_t)) && !ops
->needs_byteswap
) {
168 /* LINTED: pointer alignment */
169 ops
->decrypt(&ksch
->decr_ks
.ks32
[0], ksch
->nr
,
170 /* LINTED: pointer alignment */
171 (uint32_t *)ct
, (uint32_t *)pt
);
173 uint32_t buffer
[AES_BLOCK_LEN
/ sizeof (uint32_t)];
175 /* Copy input block into buffer */
176 if (ops
->needs_byteswap
) {
177 buffer
[0] = htonl(*(uint32_t *)(void *)&ct
[0]);
178 buffer
[1] = htonl(*(uint32_t *)(void *)&ct
[4]);
179 buffer
[2] = htonl(*(uint32_t *)(void *)&ct
[8]);
180 buffer
[3] = htonl(*(uint32_t *)(void *)&ct
[12]);
182 memcpy(&buffer
, ct
, AES_BLOCK_LEN
);
184 ops
->decrypt(&ksch
->decr_ks
.ks32
[0], ksch
->nr
, buffer
, buffer
);
186 /* Copy result from buffer to output block */
187 if (ops
->needs_byteswap
) {
188 *(uint32_t *)(void *)&pt
[0] = htonl(buffer
[0]);
189 *(uint32_t *)(void *)&pt
[4] = htonl(buffer
[1]);
190 *(uint32_t *)(void *)&pt
[8] = htonl(buffer
[2]);
191 *(uint32_t *)(void *)&pt
[12] = htonl(buffer
[3]);
193 memcpy(pt
, &buffer
, AES_BLOCK_LEN
);
195 return (CRYPTO_SUCCESS
);
200 * Allocate key schedule for AES.
202 * Return the pointer and set size to the number of bytes allocated.
203 * Memory allocated must be freed by the caller when done.
206 * size Size of key schedule allocated, in bytes
207 * kmflag Flag passed to kmem_alloc(9F); ignored in userland.
210 aes_alloc_keysched(size_t *size
, int kmflag
)
214 keysched
= kmem_alloc(sizeof (aes_key_t
), kmflag
);
215 if (keysched
!= NULL
) {
216 *size
= sizeof (aes_key_t
);
222 /* AES implementation that contains the fastest methods */
223 static aes_impl_ops_t aes_fastest_impl
= {
227 /* All compiled in implementations */
228 static const aes_impl_ops_t
*aes_all_impl
[] = {
230 #if defined(__x86_64)
233 #if defined(__x86_64) && defined(HAVE_AES)
238 /* Indicate that benchmark has been completed */
239 static boolean_t aes_impl_initialized
= B_FALSE
;
241 /* Select aes implementation */
242 #define IMPL_FASTEST (UINT32_MAX)
243 #define IMPL_CYCLE (UINT32_MAX-1)
245 #define AES_IMPL_READ(i) (*(volatile uint32_t *) &(i))
247 static uint32_t icp_aes_impl
= IMPL_FASTEST
;
248 static uint32_t user_sel_impl
= IMPL_FASTEST
;
250 /* Hold all supported implementations */
251 static size_t aes_supp_impl_cnt
= 0;
252 static aes_impl_ops_t
*aes_supp_impl
[ARRAY_SIZE(aes_all_impl
)];
255 * Returns the AES operations for encrypt/decrypt/key setup. When a
256 * SIMD implementation is not allowed in the current context, then
257 * fallback to the fastest generic implementation.
259 const aes_impl_ops_t
*
260 aes_impl_get_ops(void)
263 return (&aes_generic_impl
);
265 const aes_impl_ops_t
*ops
= NULL
;
266 const uint32_t impl
= AES_IMPL_READ(icp_aes_impl
);
270 ASSERT(aes_impl_initialized
);
271 ops
= &aes_fastest_impl
;
274 /* Cycle through supported implementations */
275 ASSERT(aes_impl_initialized
);
276 ASSERT3U(aes_supp_impl_cnt
, >, 0);
277 static size_t cycle_impl_idx
= 0;
278 size_t idx
= (++cycle_impl_idx
) % aes_supp_impl_cnt
;
279 ops
= aes_supp_impl
[idx
];
282 ASSERT3U(impl
, <, aes_supp_impl_cnt
);
283 ASSERT3U(aes_supp_impl_cnt
, >, 0);
284 if (impl
< ARRAY_SIZE(aes_all_impl
))
285 ops
= aes_supp_impl
[impl
];
289 ASSERT3P(ops
, !=, NULL
);
295 * Initialize all supported implementations.
300 aes_impl_ops_t
*curr_impl
;
303 /* Move supported implementations into aes_supp_impls */
304 for (i
= 0, c
= 0; i
< ARRAY_SIZE(aes_all_impl
); i
++) {
305 curr_impl
= (aes_impl_ops_t
*)aes_all_impl
[i
];
307 if (curr_impl
->is_supported())
308 aes_supp_impl
[c
++] = (aes_impl_ops_t
*)curr_impl
;
310 aes_supp_impl_cnt
= c
;
313 * Set the fastest implementation given the assumption that the
314 * hardware accelerated version is the fastest.
316 #if defined(__x86_64)
317 #if defined(HAVE_AES)
318 if (aes_aesni_impl
.is_supported()) {
319 memcpy(&aes_fastest_impl
, &aes_aesni_impl
,
320 sizeof (aes_fastest_impl
));
324 memcpy(&aes_fastest_impl
, &aes_x86_64_impl
,
325 sizeof (aes_fastest_impl
));
328 memcpy(&aes_fastest_impl
, &aes_generic_impl
,
329 sizeof (aes_fastest_impl
));
332 strlcpy(aes_fastest_impl
.name
, "fastest", AES_IMPL_NAME_MAX
);
334 /* Finish initialization */
335 atomic_swap_32(&icp_aes_impl
, user_sel_impl
);
336 aes_impl_initialized
= B_TRUE
;
339 static const struct {
342 } aes_impl_opts
[] = {
343 { "cycle", IMPL_CYCLE
},
344 { "fastest", IMPL_FASTEST
},
348 * Function sets desired aes implementation.
350 * If we are called before init(), user preference will be saved in
351 * user_sel_impl, and applied in later init() call. This occurs when module
352 * parameter is specified on module load. Otherwise, directly update
355 * @val Name of aes implementation to use
359 aes_impl_set(const char *val
)
362 char req_name
[AES_IMPL_NAME_MAX
];
363 uint32_t impl
= AES_IMPL_READ(user_sel_impl
);
367 i
= strnlen(val
, AES_IMPL_NAME_MAX
);
368 if (i
== 0 || i
>= AES_IMPL_NAME_MAX
)
371 strlcpy(req_name
, val
, AES_IMPL_NAME_MAX
);
372 while (i
> 0 && isspace(req_name
[i
-1]))
376 /* Check mandatory options */
377 for (i
= 0; i
< ARRAY_SIZE(aes_impl_opts
); i
++) {
378 if (strcmp(req_name
, aes_impl_opts
[i
].name
) == 0) {
379 impl
= aes_impl_opts
[i
].sel
;
385 /* check all supported impl if init() was already called */
386 if (err
!= 0 && aes_impl_initialized
) {
387 /* check all supported implementations */
388 for (i
= 0; i
< aes_supp_impl_cnt
; i
++) {
389 if (strcmp(req_name
, aes_supp_impl
[i
]->name
) == 0) {
398 if (aes_impl_initialized
)
399 atomic_swap_32(&icp_aes_impl
, impl
);
401 atomic_swap_32(&user_sel_impl
, impl
);
407 #if defined(_KERNEL) && defined(__linux__)
410 icp_aes_impl_set(const char *val
, zfs_kernel_param_t
*kp
)
412 return (aes_impl_set(val
));
416 icp_aes_impl_get(char *buffer
, zfs_kernel_param_t
*kp
)
420 const uint32_t impl
= AES_IMPL_READ(icp_aes_impl
);
422 ASSERT(aes_impl_initialized
);
424 /* list mandatory options */
425 for (i
= 0; i
< ARRAY_SIZE(aes_impl_opts
); i
++) {
426 fmt
= (impl
== aes_impl_opts
[i
].sel
) ? "[%s] " : "%s ";
427 cnt
+= kmem_scnprintf(buffer
+ cnt
, PAGE_SIZE
- cnt
, fmt
,
428 aes_impl_opts
[i
].name
);
431 /* list all supported implementations */
432 for (i
= 0; i
< aes_supp_impl_cnt
; i
++) {
433 fmt
= (i
== impl
) ? "[%s] " : "%s ";
434 cnt
+= kmem_scnprintf(buffer
+ cnt
, PAGE_SIZE
- cnt
, fmt
,
435 aes_supp_impl
[i
]->name
);
441 module_param_call(icp_aes_impl
, icp_aes_impl_set
, icp_aes_impl_get
,
443 MODULE_PARM_DESC(icp_aes_impl
, "Select aes implementation.");