CI: Add FreeBSD 14.2 RELEASE+STABLE builds
[zfs.git] / module / icp / algs / aes / aes_impl.c
blob9daa975226fe22c75ecf0c7885c8c07fdd971ef0
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
25 #include <sys/zfs_context.h>
26 #include <sys/crypto/icp.h>
27 #include <sys/crypto/spi.h>
28 #include <sys/simd.h>
29 #include <modes/modes.h>
30 #include <aes/aes_impl.h>
33 * Initialize AES encryption and decryption key schedules.
35 * Parameters:
36 * cipherKey User key
37 * keyBits AES key size (128, 192, or 256 bits)
38 * keysched AES key schedule to be initialized, of type aes_key_t.
39 * Allocated by aes_alloc_keysched().
41 void
42 aes_init_keysched(const uint8_t *cipherKey, uint_t keyBits, void *keysched)
44 const aes_impl_ops_t *ops = aes_impl_get_ops();
45 aes_key_t *newbie = keysched;
46 uint_t keysize, i, j;
47 union {
48 uint64_t ka64[4];
49 uint32_t ka32[8];
50 } keyarr;
52 switch (keyBits) {
53 case 128:
54 newbie->nr = 10;
55 break;
57 case 192:
58 newbie->nr = 12;
59 break;
61 case 256:
62 newbie->nr = 14;
63 break;
65 default:
66 /* should never get here */
67 return;
69 keysize = CRYPTO_BITS2BYTES(keyBits);
72 * Generic C implementation requires byteswap for little endian
73 * machines, various accelerated implementations for various
74 * architectures may not.
76 if (!ops->needs_byteswap) {
77 /* no byteswap needed */
78 if (IS_P2ALIGNED(cipherKey, sizeof (uint64_t))) {
79 for (i = 0, j = 0; j < keysize; i++, j += 8) {
80 /* LINTED: pointer alignment */
81 keyarr.ka64[i] = *((uint64_t *)&cipherKey[j]);
83 } else {
84 memcpy(keyarr.ka32, cipherKey, keysize);
86 } else {
87 /* byte swap */
88 for (i = 0, j = 0; j < keysize; i++, j += 4) {
89 keyarr.ka32[i] =
90 htonl(*(uint32_t *)(void *)&cipherKey[j]);
94 ops->generate(newbie, keyarr.ka32, keyBits);
95 newbie->ops = ops;
98 * Note: if there are systems that need the AES_64BIT_KS type in the
99 * future, move setting key schedule type to individual implementations
101 newbie->type = AES_32BIT_KS;
106 * Encrypt one block using AES.
107 * Align if needed and (for x86 32-bit only) byte-swap.
109 * Parameters:
110 * ks Key schedule, of type aes_key_t
111 * pt Input block (plain text)
112 * ct Output block (crypto text). Can overlap with pt
115 aes_encrypt_block(const void *ks, const uint8_t *pt, uint8_t *ct)
117 aes_key_t *ksch = (aes_key_t *)ks;
118 const aes_impl_ops_t *ops = ksch->ops;
120 if (IS_P2ALIGNED2(pt, ct, sizeof (uint32_t)) && !ops->needs_byteswap) {
121 /* LINTED: pointer alignment */
122 ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr,
123 /* LINTED: pointer alignment */
124 (uint32_t *)pt, (uint32_t *)ct);
125 } else {
126 uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
128 /* Copy input block into buffer */
129 if (ops->needs_byteswap) {
130 buffer[0] = htonl(*(uint32_t *)(void *)&pt[0]);
131 buffer[1] = htonl(*(uint32_t *)(void *)&pt[4]);
132 buffer[2] = htonl(*(uint32_t *)(void *)&pt[8]);
133 buffer[3] = htonl(*(uint32_t *)(void *)&pt[12]);
134 } else
135 memcpy(&buffer, pt, AES_BLOCK_LEN);
137 ops->encrypt(&ksch->encr_ks.ks32[0], ksch->nr, buffer, buffer);
139 /* Copy result from buffer to output block */
140 if (ops->needs_byteswap) {
141 *(uint32_t *)(void *)&ct[0] = htonl(buffer[0]);
142 *(uint32_t *)(void *)&ct[4] = htonl(buffer[1]);
143 *(uint32_t *)(void *)&ct[8] = htonl(buffer[2]);
144 *(uint32_t *)(void *)&ct[12] = htonl(buffer[3]);
145 } else
146 memcpy(ct, &buffer, AES_BLOCK_LEN);
148 return (CRYPTO_SUCCESS);
153 * Decrypt one block using AES.
154 * Align and byte-swap if needed.
156 * Parameters:
157 * ks Key schedule, of type aes_key_t
158 * ct Input block (crypto text)
159 * pt Output block (plain text). Can overlap with pt
162 aes_decrypt_block(const void *ks, const uint8_t *ct, uint8_t *pt)
164 aes_key_t *ksch = (aes_key_t *)ks;
165 const aes_impl_ops_t *ops = ksch->ops;
167 if (IS_P2ALIGNED2(ct, pt, sizeof (uint32_t)) && !ops->needs_byteswap) {
168 /* LINTED: pointer alignment */
169 ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr,
170 /* LINTED: pointer alignment */
171 (uint32_t *)ct, (uint32_t *)pt);
172 } else {
173 uint32_t buffer[AES_BLOCK_LEN / sizeof (uint32_t)];
175 /* Copy input block into buffer */
176 if (ops->needs_byteswap) {
177 buffer[0] = htonl(*(uint32_t *)(void *)&ct[0]);
178 buffer[1] = htonl(*(uint32_t *)(void *)&ct[4]);
179 buffer[2] = htonl(*(uint32_t *)(void *)&ct[8]);
180 buffer[3] = htonl(*(uint32_t *)(void *)&ct[12]);
181 } else
182 memcpy(&buffer, ct, AES_BLOCK_LEN);
184 ops->decrypt(&ksch->decr_ks.ks32[0], ksch->nr, buffer, buffer);
186 /* Copy result from buffer to output block */
187 if (ops->needs_byteswap) {
188 *(uint32_t *)(void *)&pt[0] = htonl(buffer[0]);
189 *(uint32_t *)(void *)&pt[4] = htonl(buffer[1]);
190 *(uint32_t *)(void *)&pt[8] = htonl(buffer[2]);
191 *(uint32_t *)(void *)&pt[12] = htonl(buffer[3]);
192 } else
193 memcpy(pt, &buffer, AES_BLOCK_LEN);
195 return (CRYPTO_SUCCESS);
200 * Allocate key schedule for AES.
202 * Return the pointer and set size to the number of bytes allocated.
203 * Memory allocated must be freed by the caller when done.
205 * Parameters:
206 * size Size of key schedule allocated, in bytes
207 * kmflag Flag passed to kmem_alloc(9F); ignored in userland.
209 void *
210 aes_alloc_keysched(size_t *size, int kmflag)
212 aes_key_t *keysched;
214 keysched = kmem_alloc(sizeof (aes_key_t), kmflag);
215 if (keysched != NULL) {
216 *size = sizeof (aes_key_t);
217 return (keysched);
219 return (NULL);
222 /* AES implementation that contains the fastest methods */
223 static aes_impl_ops_t aes_fastest_impl = {
224 .name = "fastest"
227 /* All compiled in implementations */
228 static const aes_impl_ops_t *aes_all_impl[] = {
229 &aes_generic_impl,
230 #if defined(__x86_64)
231 &aes_x86_64_impl,
232 #endif
233 #if defined(__x86_64) && defined(HAVE_AES)
234 &aes_aesni_impl,
235 #endif
238 /* Indicate that benchmark has been completed */
239 static boolean_t aes_impl_initialized = B_FALSE;
241 /* Select aes implementation */
242 #define IMPL_FASTEST (UINT32_MAX)
243 #define IMPL_CYCLE (UINT32_MAX-1)
245 #define AES_IMPL_READ(i) (*(volatile uint32_t *) &(i))
247 static uint32_t icp_aes_impl = IMPL_FASTEST;
248 static uint32_t user_sel_impl = IMPL_FASTEST;
250 /* Hold all supported implementations */
251 static size_t aes_supp_impl_cnt = 0;
252 static aes_impl_ops_t *aes_supp_impl[ARRAY_SIZE(aes_all_impl)];
255 * Returns the AES operations for encrypt/decrypt/key setup. When a
256 * SIMD implementation is not allowed in the current context, then
257 * fallback to the fastest generic implementation.
259 const aes_impl_ops_t *
260 aes_impl_get_ops(void)
262 if (!kfpu_allowed())
263 return (&aes_generic_impl);
265 const aes_impl_ops_t *ops = NULL;
266 const uint32_t impl = AES_IMPL_READ(icp_aes_impl);
268 switch (impl) {
269 case IMPL_FASTEST:
270 ASSERT(aes_impl_initialized);
271 ops = &aes_fastest_impl;
272 break;
273 case IMPL_CYCLE:
274 /* Cycle through supported implementations */
275 ASSERT(aes_impl_initialized);
276 ASSERT3U(aes_supp_impl_cnt, >, 0);
277 static size_t cycle_impl_idx = 0;
278 size_t idx = (++cycle_impl_idx) % aes_supp_impl_cnt;
279 ops = aes_supp_impl[idx];
280 break;
281 default:
282 ASSERT3U(impl, <, aes_supp_impl_cnt);
283 ASSERT3U(aes_supp_impl_cnt, >, 0);
284 if (impl < ARRAY_SIZE(aes_all_impl))
285 ops = aes_supp_impl[impl];
286 break;
289 ASSERT3P(ops, !=, NULL);
291 return (ops);
295 * Initialize all supported implementations.
297 void
298 aes_impl_init(void)
300 aes_impl_ops_t *curr_impl;
301 int i, c;
303 /* Move supported implementations into aes_supp_impls */
304 for (i = 0, c = 0; i < ARRAY_SIZE(aes_all_impl); i++) {
305 curr_impl = (aes_impl_ops_t *)aes_all_impl[i];
307 if (curr_impl->is_supported())
308 aes_supp_impl[c++] = (aes_impl_ops_t *)curr_impl;
310 aes_supp_impl_cnt = c;
313 * Set the fastest implementation given the assumption that the
314 * hardware accelerated version is the fastest.
316 #if defined(__x86_64)
317 #if defined(HAVE_AES)
318 if (aes_aesni_impl.is_supported()) {
319 memcpy(&aes_fastest_impl, &aes_aesni_impl,
320 sizeof (aes_fastest_impl));
321 } else
322 #endif
324 memcpy(&aes_fastest_impl, &aes_x86_64_impl,
325 sizeof (aes_fastest_impl));
327 #else
328 memcpy(&aes_fastest_impl, &aes_generic_impl,
329 sizeof (aes_fastest_impl));
330 #endif
332 strlcpy(aes_fastest_impl.name, "fastest", AES_IMPL_NAME_MAX);
334 /* Finish initialization */
335 atomic_swap_32(&icp_aes_impl, user_sel_impl);
336 aes_impl_initialized = B_TRUE;
339 static const struct {
340 const char *name;
341 uint32_t sel;
342 } aes_impl_opts[] = {
343 { "cycle", IMPL_CYCLE },
344 { "fastest", IMPL_FASTEST },
348 * Function sets desired aes implementation.
350 * If we are called before init(), user preference will be saved in
351 * user_sel_impl, and applied in later init() call. This occurs when module
352 * parameter is specified on module load. Otherwise, directly update
353 * icp_aes_impl.
355 * @val Name of aes implementation to use
356 * @param Unused.
359 aes_impl_set(const char *val)
361 int err = -EINVAL;
362 char req_name[AES_IMPL_NAME_MAX];
363 uint32_t impl = AES_IMPL_READ(user_sel_impl);
364 size_t i;
366 /* sanitize input */
367 i = strnlen(val, AES_IMPL_NAME_MAX);
368 if (i == 0 || i >= AES_IMPL_NAME_MAX)
369 return (err);
371 strlcpy(req_name, val, AES_IMPL_NAME_MAX);
372 while (i > 0 && isspace(req_name[i-1]))
373 i--;
374 req_name[i] = '\0';
376 /* Check mandatory options */
377 for (i = 0; i < ARRAY_SIZE(aes_impl_opts); i++) {
378 if (strcmp(req_name, aes_impl_opts[i].name) == 0) {
379 impl = aes_impl_opts[i].sel;
380 err = 0;
381 break;
385 /* check all supported impl if init() was already called */
386 if (err != 0 && aes_impl_initialized) {
387 /* check all supported implementations */
388 for (i = 0; i < aes_supp_impl_cnt; i++) {
389 if (strcmp(req_name, aes_supp_impl[i]->name) == 0) {
390 impl = i;
391 err = 0;
392 break;
397 if (err == 0) {
398 if (aes_impl_initialized)
399 atomic_swap_32(&icp_aes_impl, impl);
400 else
401 atomic_swap_32(&user_sel_impl, impl);
404 return (err);
407 #if defined(_KERNEL) && defined(__linux__)
409 static int
410 icp_aes_impl_set(const char *val, zfs_kernel_param_t *kp)
412 return (aes_impl_set(val));
415 static int
416 icp_aes_impl_get(char *buffer, zfs_kernel_param_t *kp)
418 int i, cnt = 0;
419 char *fmt;
420 const uint32_t impl = AES_IMPL_READ(icp_aes_impl);
422 ASSERT(aes_impl_initialized);
424 /* list mandatory options */
425 for (i = 0; i < ARRAY_SIZE(aes_impl_opts); i++) {
426 fmt = (impl == aes_impl_opts[i].sel) ? "[%s] " : "%s ";
427 cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt,
428 aes_impl_opts[i].name);
431 /* list all supported implementations */
432 for (i = 0; i < aes_supp_impl_cnt; i++) {
433 fmt = (i == impl) ? "[%s] " : "%s ";
434 cnt += kmem_scnprintf(buffer + cnt, PAGE_SIZE - cnt, fmt,
435 aes_supp_impl[i]->name);
438 return (cnt);
441 module_param_call(icp_aes_impl, icp_aes_impl_set, icp_aes_impl_get,
442 NULL, 0644);
443 MODULE_PARM_DESC(icp_aes_impl, "Select aes implementation.");
444 #endif