4 * Support for VIA PadLock hardware crypto engine.
6 * Copyright (c) 2004 Michal Ludvig <michal@logix.cz>
10 #include <crypto/algapi.h>
11 #include <crypto/aes.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel.h>
18 #include <linux/percpu.h>
19 #include <linux/smp.h>
20 #include <asm/byteorder.h>
21 #include <asm/processor.h>
26 * Number of data blocks actually fetched for each xcrypt insn.
27 * Processors with prefetch errata will fetch extra blocks.
29 static unsigned int ecb_fetch_blocks
= 2;
30 #define MAX_ECB_FETCH_BLOCKS (8)
31 #define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE)
33 static unsigned int cbc_fetch_blocks
= 1;
34 #define MAX_CBC_FETCH_BLOCKS (4)
35 #define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE)
39 unsigned int __attribute__ ((__packed__
))
46 } __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
48 /* Whenever making any changes to the following
49 * structure *make sure* you keep E, d_data
50 * and cword aligned on 16 Bytes boundaries and
51 * the Hardware can access 16 * 16 bytes of E and d_data
52 * (only the first 15 * 16 bytes matter but the HW reads
56 u32 E
[AES_MAX_KEYLENGTH_U32
]
57 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
58 u32 d_data
[AES_MAX_KEYLENGTH_U32
]
59 __attribute__ ((__aligned__(PADLOCK_ALIGNMENT
)));
67 static DEFINE_PER_CPU(struct cword
*, paes_last_cword
);
69 /* Tells whether the ACE is capable to generate
70 the extended key for a given key_len. */
72 aes_hw_extkey_available(uint8_t key_len
)
74 /* TODO: We should check the actual CPU model/stepping
75 as it's possible that the capability will be
76 added in the next CPU revisions. */
82 static inline struct aes_ctx
*aes_ctx_common(void *ctx
)
84 unsigned long addr
= (unsigned long)ctx
;
85 unsigned long align
= PADLOCK_ALIGNMENT
;
87 if (align
<= crypto_tfm_ctx_alignment())
89 return (struct aes_ctx
*)ALIGN(addr
, align
);
92 static inline struct aes_ctx
*aes_ctx(struct crypto_tfm
*tfm
)
94 return aes_ctx_common(crypto_tfm_ctx(tfm
));
97 static inline struct aes_ctx
*blk_aes_ctx(struct crypto_blkcipher
*tfm
)
99 return aes_ctx_common(crypto_blkcipher_ctx(tfm
));
102 static int aes_set_key(struct crypto_tfm
*tfm
, const u8
*in_key
,
103 unsigned int key_len
)
105 struct aes_ctx
*ctx
= aes_ctx(tfm
);
106 const __le32
*key
= (const __le32
*)in_key
;
107 u32
*flags
= &tfm
->crt_flags
;
108 struct crypto_aes_ctx gen_aes
;
112 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
117 * If the hardware is capable of generating the extended key
118 * itself we must supply the plain key for both encryption
123 ctx
->E
[0] = le32_to_cpu(key
[0]);
124 ctx
->E
[1] = le32_to_cpu(key
[1]);
125 ctx
->E
[2] = le32_to_cpu(key
[2]);
126 ctx
->E
[3] = le32_to_cpu(key
[3]);
128 /* Prepare control words. */
129 memset(&ctx
->cword
, 0, sizeof(ctx
->cword
));
131 ctx
->cword
.decrypt
.encdec
= 1;
132 ctx
->cword
.encrypt
.rounds
= 10 + (key_len
- 16) / 4;
133 ctx
->cword
.decrypt
.rounds
= ctx
->cword
.encrypt
.rounds
;
134 ctx
->cword
.encrypt
.ksize
= (key_len
- 16) / 8;
135 ctx
->cword
.decrypt
.ksize
= ctx
->cword
.encrypt
.ksize
;
137 /* Don't generate extended keys if the hardware can do it. */
138 if (aes_hw_extkey_available(key_len
))
141 ctx
->D
= ctx
->d_data
;
142 ctx
->cword
.encrypt
.keygen
= 1;
143 ctx
->cword
.decrypt
.keygen
= 1;
145 if (crypto_aes_expand_key(&gen_aes
, in_key
, key_len
)) {
146 *flags
|= CRYPTO_TFM_RES_BAD_KEY_LEN
;
150 memcpy(ctx
->E
, gen_aes
.key_enc
, AES_MAX_KEYLENGTH
);
151 memcpy(ctx
->D
, gen_aes
.key_dec
, AES_MAX_KEYLENGTH
);
154 for_each_online_cpu(cpu
)
155 if (&ctx
->cword
.encrypt
== per_cpu(paes_last_cword
, cpu
) ||
156 &ctx
->cword
.decrypt
== per_cpu(paes_last_cword
, cpu
))
157 per_cpu(paes_last_cword
, cpu
) = NULL
;
162 /* ====== Encryption/decryption routines ====== */
164 /* These are the real call to PadLock. */
165 static inline void padlock_reset_key(struct cword
*cword
)
167 int cpu
= raw_smp_processor_id();
169 if (cword
!= per_cpu(paes_last_cword
, cpu
))
170 #ifndef CONFIG_X86_64
171 asm volatile ("pushfl; popfl");
173 asm volatile ("pushfq; popfq");
177 static inline void padlock_store_cword(struct cword
*cword
)
179 per_cpu(paes_last_cword
, raw_smp_processor_id()) = cword
;
183 * While the padlock instructions don't use FP/SSE registers, they
184 * generate a spurious DNA fault when cr0.ts is '1'. These instructions
185 * should be used only inside the irq_ts_save/restore() context
188 static inline void rep_xcrypt_ecb(const u8
*input
, u8
*output
, void *key
,
189 struct cword
*control_word
, int count
)
191 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
192 : "+S"(input
), "+D"(output
)
193 : "d"(control_word
), "b"(key
), "c"(count
));
196 static inline u8
*rep_xcrypt_cbc(const u8
*input
, u8
*output
, void *key
,
197 u8
*iv
, struct cword
*control_word
, int count
)
199 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
200 : "+S" (input
), "+D" (output
), "+a" (iv
)
201 : "d" (control_word
), "b" (key
), "c" (count
));
205 static void ecb_crypt_copy(const u8
*in
, u8
*out
, u32
*key
,
206 struct cword
*cword
, int count
)
209 * Padlock prefetches extra data so we must provide mapped input buffers.
210 * Assume there are at least 16 bytes of stack already in use.
212 u8 buf
[AES_BLOCK_SIZE
* (MAX_ECB_FETCH_BLOCKS
- 1) + PADLOCK_ALIGNMENT
- 1];
213 u8
*tmp
= PTR_ALIGN(&buf
[0], PADLOCK_ALIGNMENT
);
215 memcpy(tmp
, in
, count
* AES_BLOCK_SIZE
);
216 rep_xcrypt_ecb(tmp
, out
, key
, cword
, count
);
219 static u8
*cbc_crypt_copy(const u8
*in
, u8
*out
, u32
*key
,
220 u8
*iv
, struct cword
*cword
, int count
)
223 * Padlock prefetches extra data so we must provide mapped input buffers.
224 * Assume there are at least 16 bytes of stack already in use.
226 u8 buf
[AES_BLOCK_SIZE
* (MAX_CBC_FETCH_BLOCKS
- 1) + PADLOCK_ALIGNMENT
- 1];
227 u8
*tmp
= PTR_ALIGN(&buf
[0], PADLOCK_ALIGNMENT
);
229 memcpy(tmp
, in
, count
* AES_BLOCK_SIZE
);
230 return rep_xcrypt_cbc(tmp
, out
, key
, iv
, cword
, count
);
233 static inline void ecb_crypt(const u8
*in
, u8
*out
, u32
*key
,
234 struct cword
*cword
, int count
)
236 /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
237 * We could avoid some copying here but it's probably not worth it.
239 if (unlikely(((unsigned long)in
& ~PAGE_MASK
) + ecb_fetch_bytes
> PAGE_SIZE
)) {
240 ecb_crypt_copy(in
, out
, key
, cword
, count
);
244 rep_xcrypt_ecb(in
, out
, key
, cword
, count
);
247 static inline u8
*cbc_crypt(const u8
*in
, u8
*out
, u32
*key
,
248 u8
*iv
, struct cword
*cword
, int count
)
250 /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
251 if (unlikely(((unsigned long)in
& ~PAGE_MASK
) + cbc_fetch_bytes
> PAGE_SIZE
))
252 return cbc_crypt_copy(in
, out
, key
, iv
, cword
, count
);
254 return rep_xcrypt_cbc(in
, out
, key
, iv
, cword
, count
);
257 static inline void padlock_xcrypt_ecb(const u8
*input
, u8
*output
, void *key
,
258 void *control_word
, u32 count
)
260 u32 initial
= count
& (ecb_fetch_blocks
- 1);
262 if (count
< ecb_fetch_blocks
) {
263 ecb_crypt(input
, output
, key
, control_word
, count
);
268 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
269 : "+S"(input
), "+D"(output
)
270 : "d"(control_word
), "b"(key
), "c"(initial
));
272 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
273 : "+S"(input
), "+D"(output
)
274 : "d"(control_word
), "b"(key
), "c"(count
- initial
));
277 static inline u8
*padlock_xcrypt_cbc(const u8
*input
, u8
*output
, void *key
,
278 u8
*iv
, void *control_word
, u32 count
)
280 u32 initial
= count
& (cbc_fetch_blocks
- 1);
282 if (count
< cbc_fetch_blocks
)
283 return cbc_crypt(input
, output
, key
, iv
, control_word
, count
);
286 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
287 : "+S" (input
), "+D" (output
), "+a" (iv
)
288 : "d" (control_word
), "b" (key
), "c" (count
));
290 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
291 : "+S" (input
), "+D" (output
), "+a" (iv
)
292 : "d" (control_word
), "b" (key
), "c" (count
-initial
));
296 static void aes_encrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
298 struct aes_ctx
*ctx
= aes_ctx(tfm
);
301 padlock_reset_key(&ctx
->cword
.encrypt
);
302 ts_state
= irq_ts_save();
303 ecb_crypt(in
, out
, ctx
->E
, &ctx
->cword
.encrypt
, 1);
304 irq_ts_restore(ts_state
);
305 padlock_store_cword(&ctx
->cword
.encrypt
);
308 static void aes_decrypt(struct crypto_tfm
*tfm
, u8
*out
, const u8
*in
)
310 struct aes_ctx
*ctx
= aes_ctx(tfm
);
313 padlock_reset_key(&ctx
->cword
.encrypt
);
314 ts_state
= irq_ts_save();
315 ecb_crypt(in
, out
, ctx
->D
, &ctx
->cword
.decrypt
, 1);
316 irq_ts_restore(ts_state
);
317 padlock_store_cword(&ctx
->cword
.encrypt
);
320 static struct crypto_alg aes_alg
= {
322 .cra_driver_name
= "aes-padlock",
323 .cra_priority
= PADLOCK_CRA_PRIORITY
,
324 .cra_flags
= CRYPTO_ALG_TYPE_CIPHER
,
325 .cra_blocksize
= AES_BLOCK_SIZE
,
326 .cra_ctxsize
= sizeof(struct aes_ctx
),
327 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
328 .cra_module
= THIS_MODULE
,
329 .cra_list
= LIST_HEAD_INIT(aes_alg
.cra_list
),
332 .cia_min_keysize
= AES_MIN_KEY_SIZE
,
333 .cia_max_keysize
= AES_MAX_KEY_SIZE
,
334 .cia_setkey
= aes_set_key
,
335 .cia_encrypt
= aes_encrypt
,
336 .cia_decrypt
= aes_decrypt
,
341 static int ecb_aes_encrypt(struct blkcipher_desc
*desc
,
342 struct scatterlist
*dst
, struct scatterlist
*src
,
345 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
346 struct blkcipher_walk walk
;
350 padlock_reset_key(&ctx
->cword
.encrypt
);
352 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
353 err
= blkcipher_walk_virt(desc
, &walk
);
355 ts_state
= irq_ts_save();
356 while ((nbytes
= walk
.nbytes
)) {
357 padlock_xcrypt_ecb(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
358 ctx
->E
, &ctx
->cword
.encrypt
,
359 nbytes
/ AES_BLOCK_SIZE
);
360 nbytes
&= AES_BLOCK_SIZE
- 1;
361 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
363 irq_ts_restore(ts_state
);
365 padlock_store_cword(&ctx
->cword
.encrypt
);
370 static int ecb_aes_decrypt(struct blkcipher_desc
*desc
,
371 struct scatterlist
*dst
, struct scatterlist
*src
,
374 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
375 struct blkcipher_walk walk
;
379 padlock_reset_key(&ctx
->cword
.decrypt
);
381 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
382 err
= blkcipher_walk_virt(desc
, &walk
);
384 ts_state
= irq_ts_save();
385 while ((nbytes
= walk
.nbytes
)) {
386 padlock_xcrypt_ecb(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
387 ctx
->D
, &ctx
->cword
.decrypt
,
388 nbytes
/ AES_BLOCK_SIZE
);
389 nbytes
&= AES_BLOCK_SIZE
- 1;
390 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
392 irq_ts_restore(ts_state
);
394 padlock_store_cword(&ctx
->cword
.encrypt
);
399 static struct crypto_alg ecb_aes_alg
= {
400 .cra_name
= "ecb(aes)",
401 .cra_driver_name
= "ecb-aes-padlock",
402 .cra_priority
= PADLOCK_COMPOSITE_PRIORITY
,
403 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
404 .cra_blocksize
= AES_BLOCK_SIZE
,
405 .cra_ctxsize
= sizeof(struct aes_ctx
),
406 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
407 .cra_type
= &crypto_blkcipher_type
,
408 .cra_module
= THIS_MODULE
,
409 .cra_list
= LIST_HEAD_INIT(ecb_aes_alg
.cra_list
),
412 .min_keysize
= AES_MIN_KEY_SIZE
,
413 .max_keysize
= AES_MAX_KEY_SIZE
,
414 .setkey
= aes_set_key
,
415 .encrypt
= ecb_aes_encrypt
,
416 .decrypt
= ecb_aes_decrypt
,
421 static int cbc_aes_encrypt(struct blkcipher_desc
*desc
,
422 struct scatterlist
*dst
, struct scatterlist
*src
,
425 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
426 struct blkcipher_walk walk
;
430 padlock_reset_key(&ctx
->cword
.encrypt
);
432 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
433 err
= blkcipher_walk_virt(desc
, &walk
);
435 ts_state
= irq_ts_save();
436 while ((nbytes
= walk
.nbytes
)) {
437 u8
*iv
= padlock_xcrypt_cbc(walk
.src
.virt
.addr
,
438 walk
.dst
.virt
.addr
, ctx
->E
,
439 walk
.iv
, &ctx
->cword
.encrypt
,
440 nbytes
/ AES_BLOCK_SIZE
);
441 memcpy(walk
.iv
, iv
, AES_BLOCK_SIZE
);
442 nbytes
&= AES_BLOCK_SIZE
- 1;
443 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
445 irq_ts_restore(ts_state
);
447 padlock_store_cword(&ctx
->cword
.decrypt
);
452 static int cbc_aes_decrypt(struct blkcipher_desc
*desc
,
453 struct scatterlist
*dst
, struct scatterlist
*src
,
456 struct aes_ctx
*ctx
= blk_aes_ctx(desc
->tfm
);
457 struct blkcipher_walk walk
;
461 padlock_reset_key(&ctx
->cword
.encrypt
);
463 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
464 err
= blkcipher_walk_virt(desc
, &walk
);
466 ts_state
= irq_ts_save();
467 while ((nbytes
= walk
.nbytes
)) {
468 padlock_xcrypt_cbc(walk
.src
.virt
.addr
, walk
.dst
.virt
.addr
,
469 ctx
->D
, walk
.iv
, &ctx
->cword
.decrypt
,
470 nbytes
/ AES_BLOCK_SIZE
);
471 nbytes
&= AES_BLOCK_SIZE
- 1;
472 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
475 irq_ts_restore(ts_state
);
477 padlock_store_cword(&ctx
->cword
.encrypt
);
482 static struct crypto_alg cbc_aes_alg
= {
483 .cra_name
= "cbc(aes)",
484 .cra_driver_name
= "cbc-aes-padlock",
485 .cra_priority
= PADLOCK_COMPOSITE_PRIORITY
,
486 .cra_flags
= CRYPTO_ALG_TYPE_BLKCIPHER
,
487 .cra_blocksize
= AES_BLOCK_SIZE
,
488 .cra_ctxsize
= sizeof(struct aes_ctx
),
489 .cra_alignmask
= PADLOCK_ALIGNMENT
- 1,
490 .cra_type
= &crypto_blkcipher_type
,
491 .cra_module
= THIS_MODULE
,
492 .cra_list
= LIST_HEAD_INIT(cbc_aes_alg
.cra_list
),
495 .min_keysize
= AES_MIN_KEY_SIZE
,
496 .max_keysize
= AES_MAX_KEY_SIZE
,
497 .ivsize
= AES_BLOCK_SIZE
,
498 .setkey
= aes_set_key
,
499 .encrypt
= cbc_aes_encrypt
,
500 .decrypt
= cbc_aes_decrypt
,
505 static int __init
padlock_init(void)
508 struct cpuinfo_x86
*c
= &cpu_data(0);
510 if (!cpu_has_xcrypt
) {
511 printk(KERN_NOTICE PFX
"VIA PadLock not detected.\n");
515 if (!cpu_has_xcrypt_enabled
) {
516 printk(KERN_NOTICE PFX
"VIA PadLock detected, but not enabled. Hmm, strange...\n");
520 if ((ret
= crypto_register_alg(&aes_alg
)))
523 if ((ret
= crypto_register_alg(&ecb_aes_alg
)))
526 if ((ret
= crypto_register_alg(&cbc_aes_alg
)))
529 printk(KERN_NOTICE PFX
"Using VIA PadLock ACE for AES algorithm.\n");
531 if (c
->x86
== 6 && c
->x86_model
== 15 && c
->x86_mask
== 2) {
532 ecb_fetch_blocks
= MAX_ECB_FETCH_BLOCKS
;
533 cbc_fetch_blocks
= MAX_CBC_FETCH_BLOCKS
;
534 printk(KERN_NOTICE PFX
"VIA Nano stepping 2 detected: enabling workaround.\n");
541 crypto_unregister_alg(&ecb_aes_alg
);
543 crypto_unregister_alg(&aes_alg
);
545 printk(KERN_ERR PFX
"VIA PadLock AES initialization failed.\n");
549 static void __exit
padlock_fini(void)
551 crypto_unregister_alg(&cbc_aes_alg
);
552 crypto_unregister_alg(&ecb_aes_alg
);
553 crypto_unregister_alg(&aes_alg
);
556 module_init(padlock_init
);
557 module_exit(padlock_fini
);
559 MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
560 MODULE_LICENSE("GPL");
561 MODULE_AUTHOR("Michal Ludvig");