1 // SPDX-License-Identifier: GPL-2.0-only
5 * Support for VIA PadLock hardware crypto engine.
7 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
11 #include <crypto/algapi.h>
12 #include <crypto/aes.h>
13 #include <crypto/internal/skcipher.h>
14 #include <crypto/padlock.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/types.h>
18 #include <linux/errno.h>
19 #include <linux/interrupt.h>
20 #include <linux/kernel.h>
21 #include <linux/percpu.h>
22 #include <linux/smp.h>
23 #include <linux/slab.h>
24 #include <asm/cpu_device_id.h>
25 #include <asm/byteorder.h>
26 #include <asm/processor.h>
27 #include <asm/fpu/api.h>
30 * Number of data blocks actually fetched for each xcrypt insn.
31 * Processors with prefetch errata will fetch extra blocks.
33 static unsigned int ecb_fetch_blocks
= 2;
34 #define MAX_ECB_FETCH_BLOCKS (8)
35 #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
37 static unsigned int cbc_fetch_blocks
= 1;
38 #define MAX_CBC_FETCH_BLOCKS (4)
39 #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
43 unsigned int __attribute__ ((__packed__
))
50 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
52 /* Whenever making any changes to the following
53 * structure *make sure* you keep E, d_data
54 * and cword aligned on 16 Bytes boundaries and
55 * the Hardware can access 16 * 16 bytes of E and d_data
56 * (only the first 15 * 16 bytes matter but the HW reads
60 u32 E
[AES_MAX_KEYLENGTH_U32
]
61 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
62 u32 d_data
[AES_MAX_KEYLENGTH_U32
]
63 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
71 static DEFINE_PER_CPU(struct cword
*, paes_last_cword
);
73 /* Tells whether the ACE is capable to generate
74 the extended key for a given key_len. */
76 aes_hw_extkey_available(uint8_t key_len
)
78 /* TODO: We should check the actual CPU model/stepping
79 as it's possible that the capability will be
80 added in the next CPU revisions. */
86 static inline struct aes_ctx
*aes_ctx_common(void *ctx
)
88 unsigned long addr
= (unsigned long)ctx
;
89 unsigned long align
= PADLOCK_ALIGNMENT
;
91 if (align
<= crypto_tfm_ctx_alignment())
93 return (struct aes_ctx
*)ALIGN(addr
, align
);
96 static inline struct aes_ctx
*aes_ctx(struct crypto_tfm
*tfm
)
98 return aes_ctx_common(crypto_tfm_ctx(tfm
));
101 static inline struct aes_ctx
*skcipher_aes_ctx(struct crypto_skcipher
*tfm
)
103 return aes_ctx_common(crypto_skcipher_ctx(tfm
));
106 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
107 unsigned int key_len
)
109 struct aes_ctx
*ctx
= aes_ctx(tfm
);
110 const __le32
*key
= (const __le32
*)in_key
;
111 struct crypto_aes_ctx gen_aes
;
118 * If the hardware is capable of generating the extended key
119 * itself we must supply the plain key for both encryption
124 ctx
->E
[0] = le32_to_cpu(key
[0]);
125 ctx
->E
[1] = le32_to_cpu(key
[1]);
126 ctx
->E
[2] = le32_to_cpu(key
[2]);
127 ctx
->E
[3] = le32_to_cpu(key
[3]);
129 /* Prepare control words. */
130 memset(&ctx
->cword
, 0, sizeof(ctx
->cword
));
132 ctx
->cword
.decrypt
.encdec
= 1;
133 ctx
->cword
.encrypt
.rounds
= 10 + (key_len
- 16) / 4;
134 ctx
->cword
.decrypt
.rounds
= ctx
->cword
.encrypt
.rounds
;
135 ctx
->cword
.encrypt
.ksize
= (key_len
- 16) / 8;
136 ctx
->cword
.decrypt
.ksize
= ctx
->cword
.encrypt
.ksize
;
138 /* Don't generate extended keys if the hardware can do it. */
139 if (aes_hw_extkey_available(key_len
))
142 ctx
->D
= ctx
->d_data
;
143 ctx
->cword
.encrypt
.keygen
= 1;
144 ctx
->cword
.decrypt
.keygen
= 1;
146 if (aes_expandkey(&gen_aes
, in_key
, key_len
))
149 memcpy(ctx
->E
, gen_aes
.key_enc
, AES_MAX_KEYLENGTH
);
150 memcpy(ctx
->D
, gen_aes
.key_dec
, AES_MAX_KEYLENGTH
);
153 for_each_online_cpu(cpu
)
154 if (&ctx
->cword
.encrypt
== per_cpu(paes_last_cword
, cpu
) ||
155 &ctx
->cword
.decrypt
== per_cpu(paes_last_cword
, cpu
))
156 per_cpu(paes_last_cword
, cpu
) = NULL
;
161 static int aes_set_key_skcipher(struct crypto_skcipher
*tfm
, const u8
*in_key
,
162 unsigned int key_len
)
164 return aes_set_key(crypto_skcipher_tfm(tfm
), in_key
, key_len
);
167 /* ====== Encryption/decryption routines ====== */
169 /* These are the real call to PadLock. */
170 static inline void padlock_reset_key(struct cword
*cword
)
172 int cpu
= raw_smp_processor_id();
174 if (cword
!= per_cpu(paes_last_cword
, cpu
))
175 #ifndef CONFIG_X86_64
176 asm volatile ("pushfl; popfl");
178 asm volatile ("pushfq; popfq");
182 static inline void padlock_store_cword(struct cword
*cword
)
184 per_cpu(paes_last_cword
, raw_smp_processor_id()) = cword
;
188 * While the padlock instructions don't use FP/SSE registers, they
189 * generate a spurious DNA fault when CR0.TS is '1'. Fortunately,
190 * the kernel doesn't use CR0.TS.
193 static inline void rep_xcrypt_ecb(const u8
*input
, u8
*output
, void *key
,
194 struct cword
*control_word
, int count
)
196 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
197 : "+S"(input
), "+D"(output
)
198 : "d"(control_word
), "b"(key
), "c"(count
));
201 static inline u8
*rep_xcrypt_cbc(const u8
*input
, u8
*output
, void *key
,
202 u8
*iv
, struct cword
*control_word
, int count
)
204 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
205 : "+S" (input
), "+D" (output
), "+a" (iv
)
206 : "d" (control_word
), "b" (key
), "c" (count
));
210 static void ecb_crypt_copy(const u8
*in
, u8
*out
, u32
*key
,
211 struct cword
*cword
, int count
)
214 * Padlock prefetches extra data so we must provide mapped input buffers.
215 * Assume there are at least 16 bytes of stack already in use.
217 u8 buf
[AES_BLOCK_SIZE
* (MAX_ECB_FETCH_BLOCKS
- 1) + PADLOCK_ALIGNMENT
- 1];
218 u8
*tmp
= PTR_ALIGN(&buf
[0], PADLOCK_ALIGNMENT
);
220 memcpy(tmp
, in
, count
* AES_BLOCK_SIZE
);
221 rep_xcrypt_ecb(tmp
, out
, key
, cword
, count
);
224 static u8
*cbc_crypt_copy(const u8
*in
, u8
*out
, u32
*key
,
225 u8
*iv
, struct cword
*cword
, int count
)
228 * Padlock prefetches extra data so we must provide mapped input buffers.
229 * Assume there are at least 16 bytes of stack already in use.
231 u8 buf
[AES_BLOCK_SIZE
* (MAX_CBC_FETCH_BLOCKS
- 1) + PADLOCK_ALIGNMENT
- 1];
232 u8
*tmp
= PTR_ALIGN(&buf
[0], PADLOCK_ALIGNMENT
);
234 memcpy(tmp
, in
, count
* AES_BLOCK_SIZE
);
235 return rep_xcrypt_cbc(tmp
, out
, key
, iv
, cword
, count
);
238 static inline void ecb_crypt(const u8
*in
, u8
*out
, u32
*key
,
239 struct cword
*cword
, int count
)
241 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
242 * We could avoid some copying here but it's probably not worth it.
244 if (unlikely(offset_in_page(in
) + ecb_fetch_bytes
> PAGE_SIZE
)) {
245 ecb_crypt_copy(in
, out
, key
, cword
, count
);
249 rep_xcrypt_ecb(in
, out
, key
, cword
, count
);
252 static inline u8
*cbc_crypt(const u8
*in
, u8
*out
, u32
*key
,
253 u8
*iv
, struct cword
*cword
, int count
)
255 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
256 if (unlikely(offset_in_page(in
) + cbc_fetch_bytes
> PAGE_SIZE
))
257 return cbc_crypt_copy(in
, out
, key
, iv
, cword
, count
);
259 return rep_xcrypt_cbc(in
, out
, key
, iv
, cword
, count
);
262 static inline void padlock_xcrypt_ecb(const u8
*input
, u8
*output
, void *key
,
263 void *control_word
, u32 count
)
265 u32 initial
= count
& (ecb_fetch_blocks
- 1);
267 if (count
< ecb_fetch_blocks
) {
268 ecb_crypt(input
, output
, key
, control_word
, count
);
275 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
276 : "+S"(input
), "+D"(output
)
277 : "d"(control_word
), "b"(key
), "c"(initial
));
279 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
280 : "+S"(input
), "+D"(output
)
281 : "d"(control_word
), "b"(key
), "c"(count
));
284 static inline u8
*padlock_xcrypt_cbc(const u8
*input
, u8
*output
, void *key
,
285 u8
*iv
, void *control_word
, u32 count
)
287 u32 initial
= count
& (cbc_fetch_blocks
- 1);
289 if (count
< cbc_fetch_blocks
)
290 return cbc_crypt(input
, output
, key
, iv
, control_word
, count
);
295 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
296 : "+S" (input
), "+D" (output
), "+a" (iv
)
297 : "d" (control_word
), "b" (key
), "c" (initial
));
299 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
300 : "+S" (input
), "+D" (output
), "+a" (iv
)
301 : "d" (control_word
), "b" (key
), "c" (count
));
305 static void padlock_aes_encrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
307 struct aes_ctx
*ctx
= aes_ctx(tfm
);
309 padlock_reset_key(&ctx
->cword
.encrypt
);
310 ecb_crypt(in
, out
, ctx
->E
, &ctx
->cword
.encrypt
, 1);
311 padlock_store_cword(&ctx
->cword
.encrypt
);
314 static void padlock_aes_decrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
316 struct aes_ctx
*ctx
= aes_ctx(tfm
);
318 padlock_reset_key(&ctx
->cword
.encrypt
);
319 ecb_crypt(in
, out
, ctx
->D
, &ctx
->cword
.decrypt
, 1);
320 padlock_store_cword(&ctx
->cword
.encrypt
);
323 static struct crypto_alg aes_alg
= {
325 .cra_driver_name
= "aes-padlock",
326 .cra_priority
= PADLOCK_CRA_PRIORITY
,
327 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
328 .cra_blocksize
= AES_BLOCK_SIZE
,
329 .cra_ctxsize
= sizeof(struct aes_ctx
),
330 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
331 .cra_module
= THIS_MODULE
,
334 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
335 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
336 .cia_setkey
= aes_set_key
,
337 .cia_encrypt
= padlock_aes_encrypt
,
338 .cia_decrypt
= padlock_aes_decrypt
,
343 static int ecb_aes_encrypt(struct skcipher_request
*req
)
345 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
346 struct aes_ctx
*ctx
= skcipher_aes_ctx(tfm
);
347 struct skcipher_walk walk
;
351 padlock_reset_key(&ctx
->cword
.encrypt
);
353 err
= skcipher_walk_virt(&walk
, req
, false);
355 while ((nbytes
= walk
.nbytes
) != 0) {
356 padlock_xcrypt_ecb(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
357 ctx
->E
, &ctx
->cword
.encrypt
,
358 nbytes
/ AES_BLOCK_SIZE
);
359 nbytes
&= AES_BLOCK_SIZE
- 1;
360 err
= skcipher_walk_done(&walk
, nbytes
);
363 padlock_store_cword(&ctx
->cword
.encrypt
);
368 static int ecb_aes_decrypt(struct skcipher_request
*req
)
370 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
371 struct aes_ctx
*ctx
= skcipher_aes_ctx(tfm
);
372 struct skcipher_walk walk
;
376 padlock_reset_key(&ctx
->cword
.decrypt
);
378 err
= skcipher_walk_virt(&walk
, req
, false);
380 while ((nbytes
= walk
.nbytes
) != 0) {
381 padlock_xcrypt_ecb(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
382 ctx
->D
, &ctx
->cword
.decrypt
,
383 nbytes
/ AES_BLOCK_SIZE
);
384 nbytes
&= AES_BLOCK_SIZE
- 1;
385 err
= skcipher_walk_done(&walk
, nbytes
);
388 padlock_store_cword(&ctx
->cword
.encrypt
);
393 static struct skcipher_alg ecb_aes_alg
= {
394 .base
.cra_name
= "ecb(aes)",
395 .base
.cra_driver_name
= "ecb-aes-padlock",
396 .base
.cra_priority
= PADLOCK_COMPOSITE_PRIORITY
,
397 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
398 .base
.cra_ctxsize
= sizeof(struct aes_ctx
),
399 .base
.cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
400 .base
.cra_module
= THIS_MODULE
,
401 .min_keysize
= AES_MIN_KEY_SIZE
,
402 .max_keysize
= AES_MAX_KEY_SIZE
,
403 .setkey
= aes_set_key_skcipher
,
404 .encrypt
= ecb_aes_encrypt
,
405 .decrypt
= ecb_aes_decrypt
,
408 static int cbc_aes_encrypt(struct skcipher_request
*req
)
410 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
411 struct aes_ctx
*ctx
= skcipher_aes_ctx(tfm
);
412 struct skcipher_walk walk
;
416 padlock_reset_key(&ctx
->cword
.encrypt
);
418 err
= skcipher_walk_virt(&walk
, req
, false);
420 while ((nbytes
= walk
.nbytes
) != 0) {
421 u8
*iv
= padlock_xcrypt_cbc(walk
.src
.virt
.addr
,
422 walk
.dst
.virt
.addr
, ctx
->E
,
423 walk
.iv
, &ctx
->cword
.encrypt
,
424 nbytes
/ AES_BLOCK_SIZE
);
425 memcpy(walk
.iv
, iv
, AES_BLOCK_SIZE
);
426 nbytes
&= AES_BLOCK_SIZE
- 1;
427 err
= skcipher_walk_done(&walk
, nbytes
);
430 padlock_store_cword(&ctx
->cword
.decrypt
);
435 static int cbc_aes_decrypt(struct skcipher_request
*req
)
437 struct crypto_skcipher
*tfm
= crypto_skcipher_reqtfm(req
);
438 struct aes_ctx
*ctx
= skcipher_aes_ctx(tfm
);
439 struct skcipher_walk walk
;
443 padlock_reset_key(&ctx
->cword
.encrypt
);
445 err
= skcipher_walk_virt(&walk
, req
, false);
447 while ((nbytes
= walk
.nbytes
) != 0) {
448 padlock_xcrypt_cbc(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
449 ctx
->D
, walk
.iv
, &ctx
->cword
.decrypt
,
450 nbytes
/ AES_BLOCK_SIZE
);
451 nbytes
&= AES_BLOCK_SIZE
- 1;
452 err
= skcipher_walk_done(&walk
, nbytes
);
455 padlock_store_cword(&ctx
->cword
.encrypt
);
460 static struct skcipher_alg cbc_aes_alg
= {
461 .base
.cra_name
= "cbc(aes)",
462 .base
.cra_driver_name
= "cbc-aes-padlock",
463 .base
.cra_priority
= PADLOCK_COMPOSITE_PRIORITY
,
464 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
465 .base
.cra_ctxsize
= sizeof(struct aes_ctx
),
466 .base
.cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
467 .base
.cra_module
= THIS_MODULE
,
468 .min_keysize
= AES_MIN_KEY_SIZE
,
469 .max_keysize
= AES_MAX_KEY_SIZE
,
470 .ivsize
= AES_BLOCK_SIZE
,
471 .setkey
= aes_set_key_skcipher
,
472 .encrypt
= cbc_aes_encrypt
,
473 .decrypt
= cbc_aes_decrypt
,
476 static const struct x86_cpu_id padlock_cpu_id
[] = {
477 X86_MATCH_FEATURE(X86_FEATURE_XCRYPT
, NULL
),
480 MODULE_DEVICE_TABLE(x86cpu
, padlock_cpu_id
);
482 static int __init
padlock_init(void)
485 struct cpuinfo_x86
*c
= &cpu_data(0);
487 if (!x86_match_cpu(padlock_cpu_id
))
490 if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN
)) {
491 printk(KERN_NOTICE PFX
"VIA PadLock detected, but not enabled. Hmm, strange...\n");
495 if ((ret
= crypto_register_alg(&aes_alg
)) != 0)
498 if ((ret
= crypto_register_skcipher(&ecb_aes_alg
)) != 0)
501 if ((ret
= crypto_register_skcipher(&cbc_aes_alg
)) != 0)
504 printk(KERN_NOTICE PFX
"Using VIA PadLock ACE for AES algorithm.\n");
506 if (c
->x86
== 6 && c
->x86_model
== 15 && c
->x86_stepping
== 2) {
507 ecb_fetch_blocks
= MAX_ECB_FETCH_BLOCKS
;
508 cbc_fetch_blocks
= MAX_CBC_FETCH_BLOCKS
;
509 printk(KERN_NOTICE PFX
"VIA Nano stepping 2 detected: enabling workaround.\n");
516 crypto_unregister_skcipher(&ecb_aes_alg
);
518 crypto_unregister_alg(&aes_alg
);
520 printk(KERN_ERR PFX
"VIA PadLock AES initialization failed.\n");
524 static void __exit
padlock_fini(void)
526 crypto_unregister_skcipher(&cbc_aes_alg
);
527 crypto_unregister_skcipher(&ecb_aes_alg
);
528 crypto_unregister_alg(&aes_alg
);
531 module_init(padlock_init
);
532 module_exit(padlock_fini
);
534 MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
535 MODULE_LICENSE("GPL");
536 MODULE_AUTHOR("Michal Ludvig");
538 MODULE_ALIAS_CRYPTO("aes");