1 /* $OpenBSD: eng_padlock.c,v 1.15 2016/11/04 13:56:05 miod Exp $ */
3 * Support for VIA PadLock Advanced Cryptography Engine (ACE)
4 * Written by Michal Ludvig <michal@logix.cz>
5 * http://www.logix.cz/michal
7 * Big thanks to Andy Polyakov for a help with optimization,
8 * assembler fixes, port to MS Windows and a lot of other
9 * valuable work on this engine!
12 /* ====================================================================
13 * Copyright (c) 1999-2001 The OpenSSL Project. All rights reserved.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in
24 * the documentation and/or other materials provided with the
27 * 3. All advertising materials mentioning features or use of this
28 * software must display the following acknowledgment:
29 * "This product includes software developed by the OpenSSL Project
30 * for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
32 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
33 * endorse or promote products derived from this software without
34 * prior written permission. For written permission, please contact
35 * licensing@OpenSSL.org.
37 * 5. Products derived from this software may not be called "OpenSSL"
38 * nor may "OpenSSL" appear in their names without prior written
39 * permission of the OpenSSL Project.
41 * 6. Redistributions of any form whatsoever must retain the following
43 * "This product includes software developed by the OpenSSL Project
44 * for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
46 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
47 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
49 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
50 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
51 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
53 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
55 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
57 * OF THE POSSIBILITY OF SUCH DAMAGE.
58 * ====================================================================
60 * This product includes cryptographic software written by Eric Young
61 * (eay@cryptsoft.com). This product includes software written by Tim
62 * Hudson (tjh@cryptsoft.com).
69 #include <openssl/opensslconf.h>
71 #include <openssl/crypto.h>
72 #include <openssl/dso.h>
73 #include <openssl/engine.h>
74 #include <openssl/evp.h>
75 #ifndef OPENSSL_NO_AES
76 #include <openssl/aes.h>
78 #include <openssl/err.h>
81 #ifndef OPENSSL_NO_HW_PADLOCK
83 /* Attempt to have a single source for both 0.9.7 and 0.9.8 :-) */
84 #if (OPENSSL_VERSION_NUMBER >= 0x00908000L)
85 # ifndef OPENSSL_NO_DYNAMIC_ENGINE
86 # define DYNAMIC_ENGINE
88 #elif (OPENSSL_VERSION_NUMBER >= 0x00907000L)
89 # ifdef ENGINE_DYNAMIC_SUPPORT
90 # define DYNAMIC_ENGINE
93 # error "Only OpenSSL >= 0.9.7 is supported"
96 /* VIA PadLock AES is available *ONLY* on some x86 CPUs.
97 Not only that it doesn't exist elsewhere, but it
98 even can't be compiled on other platforms!
100 In addition, because of the heavy use of inline assembler,
101 compiler choice is limited to GCC and Microsoft C. */
102 #undef COMPILE_HW_PADLOCK
103 #if !defined(OPENSSL_NO_INLINE_ASM)
104 # if (defined(__GNUC__) && (defined(__i386__) || defined(__i386)))
105 # define COMPILE_HW_PADLOCK
109 #ifdef OPENSSL_NO_DYNAMIC_ENGINE
110 #ifdef COMPILE_HW_PADLOCK
111 static ENGINE
*ENGINE_padlock (void);
114 void ENGINE_load_padlock (void)
116 /* On non-x86 CPUs it just returns. */
117 #ifdef COMPILE_HW_PADLOCK
118 ENGINE
*toadd
= ENGINE_padlock ();
129 #ifdef COMPILE_HW_PADLOCK
130 /* We do these includes here to avoid header problems on platforms that
131 do not have the VIA padlock anyway... */
133 #if defined(__GNUC__)
135 # define alloca(s) __builtin_alloca(s)
139 /* Function for ENGINE detection and control */
140 static int padlock_available(void);
141 static int padlock_init(ENGINE
*e
);
144 static RAND_METHOD padlock_rand
;
147 #ifndef OPENSSL_NO_AES
148 static int padlock_ciphers(ENGINE
*e
, const EVP_CIPHER
**cipher
, const int **nids
, int nid
);
152 static const char *padlock_id
= "padlock";
153 static char padlock_name
[100];
155 /* Available features */
156 static int padlock_use_ace
= 0; /* Advanced Cryptography Engine */
157 static int padlock_use_rng
= 0; /* Random Number Generator */
158 #ifndef OPENSSL_NO_AES
159 static int padlock_aes_align_required
= 1;
162 /* ===== Engine "management" functions ===== */
164 /* Prepare the ENGINE structure for registration */
166 padlock_bind_helper(ENGINE
*e
)
168 /* Check available features */
172 * RNG is currently disabled for reasons discussed in commentary just
173 * before padlock_rand_bytes function.
177 /* Generate a nice engine name with available features */
178 (void) snprintf(padlock_name
, sizeof(padlock_name
),
179 "VIA PadLock (%s, %s)",
180 padlock_use_rng
? "RNG" : "no-RNG",
181 padlock_use_ace
? "ACE" : "no-ACE");
183 /* Register everything or return with an error */
184 if (!ENGINE_set_id(e
, padlock_id
) ||
185 !ENGINE_set_name(e
, padlock_name
) ||
186 !ENGINE_set_init_function(e
, padlock_init
) ||
187 #ifndef OPENSSL_NO_AES
188 (padlock_use_ace
&& !ENGINE_set_ciphers (e
, padlock_ciphers
)) ||
190 (padlock_use_rng
&& !ENGINE_set_RAND (e
, &padlock_rand
))) {
194 /* Everything looks good */
198 #ifdef OPENSSL_NO_DYNAMIC_ENGINE
204 ENGINE
*eng
= ENGINE_new();
210 if (!padlock_bind_helper(eng
)) {
220 /* Check availability of the engine */
222 padlock_init(ENGINE
*e
)
224 return (padlock_use_rng
|| padlock_use_ace
);
227 /* This stuff is needed if this ENGINE is being compiled into a self-contained
230 #ifdef DYNAMIC_ENGINE
232 padlock_bind_fn(ENGINE
*e
, const char *id
)
234 if (id
&& (strcmp(id
, padlock_id
) != 0)) {
238 if (!padlock_bind_helper(e
)) {
245 IMPLEMENT_DYNAMIC_CHECK_FN()
246 IMPLEMENT_DYNAMIC_BIND_FN (padlock_bind_fn
)
247 #endif /* DYNAMIC_ENGINE */
249 /* ===== Here comes the "real" engine ===== */
251 #ifndef OPENSSL_NO_AES
252 /* Some AES-related constants */
253 #define AES_BLOCK_SIZE 16
254 #define AES_KEY_SIZE_128 16
255 #define AES_KEY_SIZE_192 24
256 #define AES_KEY_SIZE_256 32
258 /* Here we store the status information relevant to the
261 * Inline assembler in PADLOCK_XCRYPT_ASM()
262 * depends on the order of items in this structure.
263 * Don't blindly modify, reorder, etc!
265 struct padlock_cipher_data
{
266 unsigned char iv
[AES_BLOCK_SIZE
]; /* Initialization vector */
271 int dgst
: 1; /* n/a in C3 */
272 int align
: 1; /* n/a in C3 */
273 int ciphr
: 1; /* n/a in C3 */
274 unsigned int keygen
: 1;
276 unsigned int encdec
: 1;
279 } cword
; /* Control word */
280 AES_KEY ks
; /* Encryption key */
284 * Essentially this variable belongs in thread local storage.
285 * Having this variable global on the other hand can only cause
286 * few bogus key reloads [if any at all on single-CPU system],
287 * so we accept the penatly...
289 static volatile struct padlock_cipher_data
*padlock_saved_context
;
293 * =======================================================
294 * Inline assembler section(s).
295 * =======================================================
296 * Order of arguments is chosen to facilitate Windows port
297 * using __fastcall calling convention. If you wish to add
298 * more routines, keep in mind that first __fastcall
299 * argument is passed in %ecx and second - in %edx.
300 * =======================================================
302 #if defined(__GNUC__) && __GNUC__>=2
304 * As for excessive "push %ebx"/"pop %ebx" found all over.
305 * When generating position-independent code GCC won't let
306 * us use "b" in assembler templates nor even respect "ebx"
307 * in "clobber description." Therefore the trouble...
310 /* Helper function - check if a CPUID instruction
311 is available on this CPU */
313 padlock_insn_cpuid_available(void)
317 /* We're checking if the bit #21 of EFLAGS
318 can be toggled. If yes = CPUID is available. */
322 "xorl $0x200000, %%eax\n"
323 "movl %%eax, %%ecx\n"
324 "andl $0x200000, %%ecx\n"
329 "andl $0x200000, %%eax\n"
330 "xorl %%eax, %%ecx\n"
332 : "=r" (result
) : : "eax", "ecx");
334 return (result
== 0);
337 /* Load supported features of the CPU to see if
338 the PadLock is available. */
340 padlock_available(void)
342 char vendor_string
[16];
343 unsigned int eax
, edx
;
345 /* First check if the CPUID instruction is available at all... */
346 if (! padlock_insn_cpuid_available())
349 /* Are we running on the Centaur (VIA) CPU? */
351 vendor_string
[12] = 0;
355 "movl %%ebx,(%%edi)\n"
356 "movl %%edx,4(%%edi)\n"
357 "movl %%ecx,8(%%edi)\n"
359 : "+a"(eax
) : "D"(vendor_string
) : "ecx", "edx");
360 if (strcmp(vendor_string
, "CentaurHauls") != 0)
363 /* Check for Centaur Extended Feature Flags presence */
365 asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
366 : "+a"(eax
) : : "ecx", "edx");
367 if (eax
< 0xC0000001)
370 /* Read the Centaur Extended Feature Flags */
372 asm volatile ("pushl %%ebx; cpuid; popl %%ebx"
373 : "+a"(eax
), "=d"(edx
) : : "ecx");
375 /* Fill up some flags */
376 padlock_use_ace
= ((edx
& (0x3 << 6)) == (0x3 << 6));
377 padlock_use_rng
= ((edx
& (0x3 << 2)) == (0x3 << 2));
379 return padlock_use_ace
+ padlock_use_rng
;
382 #ifndef OPENSSL_NO_AES
383 /* Our own htonl()/ntohl() */
385 padlock_bswapl(AES_KEY
*ks
)
387 size_t i
= sizeof(ks
->rd_key
)/sizeof(ks
->rd_key
[0]);
388 unsigned int *key
= ks
->rd_key
;
391 asm volatile ("bswapl %0" : "+r"(*key
));
397 /* Force key reload from memory to the CPU microcode.
398 Loading EFLAGS from the stack clears EFLAGS[30]
399 which does the trick. */
401 padlock_reload_key(void)
403 asm volatile ("pushfl; popfl");
406 #ifndef OPENSSL_NO_AES
408 * This is heuristic key context tracing. At first one
409 * believes that one should use atomic swap instructions,
410 * but it's not actually necessary. Point is that if
411 * padlock_saved_context was changed by another thread
412 * after we've read it and before we compare it with cdata,
413 * our key *shall* be reloaded upon thread context switch
414 * and we are therefore set in either case...
417 padlock_verify_context(struct padlock_cipher_data
*cdata
)
429 :"+m"(padlock_saved_context
)
430 : "r"(padlock_saved_context
), "r"(cdata
) : "cc");
433 /* Template for padlock_xcrypt_* modes */
435 * The offsets used with 'leal' instructions
436 * describe items of the 'padlock_cipher_data'
439 #define PADLOCK_XCRYPT_ASM(name,rep_xcrypt) \
440 static inline void *name(size_t cnt, \
441 struct padlock_cipher_data *cdata, \
442 void *out, const void *inp) \
444 asm volatile ( "pushl %%ebx\n" \
445 " leal 16(%0),%%edx\n" \
446 " leal 32(%0),%%ebx\n" \
449 : "=a"(iv), "=c"(cnt), "=D"(out), "=S"(inp) \
450 : "0"(cdata), "1"(cnt), "2"(out), "3"(inp) \
451 : "edx", "cc", "memory"); \
455 /* Generate all functions with appropriate opcodes */
456 PADLOCK_XCRYPT_ASM(padlock_xcrypt_ecb
, ".byte 0xf3,0x0f,0xa7,0xc8") /* rep xcryptecb */
457 PADLOCK_XCRYPT_ASM(padlock_xcrypt_cbc
, ".byte 0xf3,0x0f,0xa7,0xd0") /* rep xcryptcbc */
458 PADLOCK_XCRYPT_ASM(padlock_xcrypt_cfb
, ".byte 0xf3,0x0f,0xa7,0xe0") /* rep xcryptcfb */
459 PADLOCK_XCRYPT_ASM(padlock_xcrypt_ofb
, ".byte 0xf3,0x0f,0xa7,0xe8") /* rep xcryptofb */
462 /* The RNG call itself */
463 static inline unsigned int
464 padlock_xstore(void *addr
, unsigned int edx_in
)
466 unsigned int eax_out
;
468 asm volatile (".byte 0x0f,0xa7,0xc0" /* xstore */
469 : "=a"(eax_out
),"=m"(*(unsigned *)addr
)
470 : "D"(addr
), "d" (edx_in
)
476 /* Why not inline 'rep movsd'? I failed to find information on what
477 * value in Direction Flag one can expect and consequently have to
478 * apply "better-safe-than-sorry" approach and assume "undefined."
479 * I could explicitly clear it and restore the original value upon
480 * return from padlock_aes_cipher, but it's presumably too much
481 * trouble for too little gain...
483 * In case you wonder 'rep xcrypt*' instructions above are *not*
484 * affected by the Direction Flag and pointers advance toward
485 * larger addresses unconditionally.
487 static inline unsigned char *
488 padlock_memcpy(void *dst
, const void *src
, size_t n
)
501 /* ===== AES encryption/decryption ===== */
502 #ifndef OPENSSL_NO_AES
504 #if defined(NID_aes_128_cfb128) && ! defined (NID_aes_128_cfb)
505 #define NID_aes_128_cfb NID_aes_128_cfb128
508 #if defined(NID_aes_128_ofb128) && ! defined (NID_aes_128_ofb)
509 #define NID_aes_128_ofb NID_aes_128_ofb128
512 #if defined(NID_aes_192_cfb128) && ! defined (NID_aes_192_cfb)
513 #define NID_aes_192_cfb NID_aes_192_cfb128
516 #if defined(NID_aes_192_ofb128) && ! defined (NID_aes_192_ofb)
517 #define NID_aes_192_ofb NID_aes_192_ofb128
520 #if defined(NID_aes_256_cfb128) && ! defined (NID_aes_256_cfb)
521 #define NID_aes_256_cfb NID_aes_256_cfb128
524 #if defined(NID_aes_256_ofb128) && ! defined (NID_aes_256_ofb)
525 #define NID_aes_256_ofb NID_aes_256_ofb128
528 /* List of supported ciphers. */
529 static int padlock_cipher_nids
[] = {
545 static int padlock_cipher_nids_num
= (sizeof(padlock_cipher_nids
)/
546 sizeof(padlock_cipher_nids
[0]));
548 /* Function prototypes ... */
549 static int padlock_aes_init_key(EVP_CIPHER_CTX
*ctx
, const unsigned char *key
,
550 const unsigned char *iv
, int enc
);
551 static int padlock_aes_cipher(EVP_CIPHER_CTX
*ctx
, unsigned char *out
,
552 const unsigned char *in
, size_t nbytes
);
554 #define NEAREST_ALIGNED(ptr) ( (unsigned char *)(ptr) + \
555 ( (0x10 - ((size_t)(ptr) & 0x0F)) & 0x0F ) )
556 #define ALIGNED_CIPHER_DATA(ctx) ((struct padlock_cipher_data *)\
557 NEAREST_ALIGNED(ctx->cipher_data))
559 #define EVP_CIPHER_block_size_ECB AES_BLOCK_SIZE
560 #define EVP_CIPHER_block_size_CBC AES_BLOCK_SIZE
561 #define EVP_CIPHER_block_size_OFB 1
562 #define EVP_CIPHER_block_size_CFB 1
564 /* Declaring so many ciphers by hand would be a pain.
565 Instead introduce a bit of preprocessor magic :-) */
566 #define DECLARE_AES_EVP(ksize,lmode,umode) \
567 static const EVP_CIPHER padlock_aes_##ksize##_##lmode = { \
568 NID_aes_##ksize##_##lmode, \
569 EVP_CIPHER_block_size_##umode, \
570 AES_KEY_SIZE_##ksize, \
572 0 | EVP_CIPH_##umode##_MODE, \
573 padlock_aes_init_key, \
574 padlock_aes_cipher, \
576 sizeof(struct padlock_cipher_data) + 16, \
577 EVP_CIPHER_set_asn1_iv, \
578 EVP_CIPHER_get_asn1_iv, \
583 DECLARE_AES_EVP(128, ecb
, ECB
);
584 DECLARE_AES_EVP(128, cbc
, CBC
);
585 DECLARE_AES_EVP(128, cfb
, CFB
);
586 DECLARE_AES_EVP(128, ofb
, OFB
);
588 DECLARE_AES_EVP(192, ecb
, ECB
);
589 DECLARE_AES_EVP(192, cbc
, CBC
);
590 DECLARE_AES_EVP(192, cfb
, CFB
);
591 DECLARE_AES_EVP(192, ofb
, OFB
);
593 DECLARE_AES_EVP(256, ecb
, ECB
);
594 DECLARE_AES_EVP(256, cbc
, CBC
);
595 DECLARE_AES_EVP(256, cfb
, CFB
);
596 DECLARE_AES_EVP(256, ofb
, OFB
);
599 padlock_ciphers(ENGINE
*e
, const EVP_CIPHER
**cipher
, const int **nids
, int nid
)
601 /* No specific cipher => return a list of supported nids ... */
603 *nids
= padlock_cipher_nids
;
604 return padlock_cipher_nids_num
;
607 /* ... or the requested "cipher" otherwise */
609 case NID_aes_128_ecb
:
610 *cipher
= &padlock_aes_128_ecb
;
612 case NID_aes_128_cbc
:
613 *cipher
= &padlock_aes_128_cbc
;
615 case NID_aes_128_cfb
:
616 *cipher
= &padlock_aes_128_cfb
;
618 case NID_aes_128_ofb
:
619 *cipher
= &padlock_aes_128_ofb
;
621 case NID_aes_192_ecb
:
622 *cipher
= &padlock_aes_192_ecb
;
624 case NID_aes_192_cbc
:
625 *cipher
= &padlock_aes_192_cbc
;
627 case NID_aes_192_cfb
:
628 *cipher
= &padlock_aes_192_cfb
;
630 case NID_aes_192_ofb
:
631 *cipher
= &padlock_aes_192_ofb
;
633 case NID_aes_256_ecb
:
634 *cipher
= &padlock_aes_256_ecb
;
636 case NID_aes_256_cbc
:
637 *cipher
= &padlock_aes_256_cbc
;
639 case NID_aes_256_cfb
:
640 *cipher
= &padlock_aes_256_cfb
;
642 case NID_aes_256_ofb
:
643 *cipher
= &padlock_aes_256_ofb
;
646 /* Sorry, we don't support this NID */
654 /* Prepare the encryption key for PadLock usage */
656 padlock_aes_init_key (EVP_CIPHER_CTX
*ctx
, const unsigned char *key
,
657 const unsigned char *iv
, int enc
)
659 struct padlock_cipher_data
*cdata
;
660 int key_len
= EVP_CIPHER_CTX_key_length(ctx
) * 8;
663 return 0; /* ERROR */
665 cdata
= ALIGNED_CIPHER_DATA(ctx
);
666 memset(cdata
, 0, sizeof(struct padlock_cipher_data
));
668 /* Prepare Control word. */
669 if (EVP_CIPHER_CTX_mode(ctx
) == EVP_CIPH_OFB_MODE
)
670 cdata
->cword
.b
.encdec
= 0;
672 cdata
->cword
.b
.encdec
= (ctx
->encrypt
== 0);
673 cdata
->cword
.b
.rounds
= 10 + (key_len
- 128) / 32;
674 cdata
->cword
.b
.ksize
= (key_len
- 128) / 64;
678 /* PadLock can generate an extended key for
679 AES128 in hardware */
680 memcpy(cdata
->ks
.rd_key
, key
, AES_KEY_SIZE_128
);
681 cdata
->cword
.b
.keygen
= 0;
686 /* Generate an extended AES key in software.
687 Needed for AES192/AES256 */
688 /* Well, the above applies to Stepping 8 CPUs
689 and is listed as hardware errata. They most
690 likely will fix it at some point and then
691 a check for stepping would be due here. */
692 if (EVP_CIPHER_CTX_mode(ctx
) == EVP_CIPH_CFB_MODE
||
693 EVP_CIPHER_CTX_mode(ctx
) == EVP_CIPH_OFB_MODE
||
695 AES_set_encrypt_key(key
, key_len
, &cdata
->ks
);
697 AES_set_decrypt_key(key
, key_len
, &cdata
->ks
);
699 /* OpenSSL C functions use byte-swapped extended key. */
700 padlock_bswapl(&cdata
->ks
);
702 cdata
->cword
.b
.keygen
= 1;
711 * This is done to cover for cases when user reuses the
712 * context for new key. The catch is that if we don't do
713 * this, padlock_eas_cipher might proceed with old key...
715 padlock_reload_key ();
721 * Simplified version of padlock_aes_cipher() used when
722 * 1) both input and output buffers are at aligned addresses.
724 * 2) running on a newer CPU that doesn't require aligned buffers.
727 padlock_aes_cipher_omnivorous(EVP_CIPHER_CTX
*ctx
, unsigned char *out_arg
,
728 const unsigned char *in_arg
, size_t nbytes
)
730 struct padlock_cipher_data
*cdata
;
733 cdata
= ALIGNED_CIPHER_DATA(ctx
);
734 padlock_verify_context(cdata
);
736 switch (EVP_CIPHER_CTX_mode(ctx
)) {
737 case EVP_CIPH_ECB_MODE
:
738 padlock_xcrypt_ecb(nbytes
/ AES_BLOCK_SIZE
, cdata
,
742 case EVP_CIPH_CBC_MODE
:
743 memcpy(cdata
->iv
, ctx
->iv
, AES_BLOCK_SIZE
);
744 iv
= padlock_xcrypt_cbc(nbytes
/ AES_BLOCK_SIZE
, cdata
,
746 memcpy(ctx
->iv
, iv
, AES_BLOCK_SIZE
);
749 case EVP_CIPH_CFB_MODE
:
750 memcpy(cdata
->iv
, ctx
->iv
, AES_BLOCK_SIZE
);
751 iv
= padlock_xcrypt_cfb(nbytes
/ AES_BLOCK_SIZE
, cdata
,
753 memcpy(ctx
->iv
, iv
, AES_BLOCK_SIZE
);
756 case EVP_CIPH_OFB_MODE
:
757 memcpy(cdata
->iv
, ctx
->iv
, AES_BLOCK_SIZE
);
758 padlock_xcrypt_ofb(nbytes
/ AES_BLOCK_SIZE
, cdata
,
760 memcpy(ctx
->iv
, cdata
->iv
, AES_BLOCK_SIZE
);
767 memset(cdata
->iv
, 0, AES_BLOCK_SIZE
);
772 #ifndef PADLOCK_CHUNK
773 # define PADLOCK_CHUNK 512 /* Must be a power of 2 larger than 16 */
775 #if PADLOCK_CHUNK<16 || PADLOCK_CHUNK&(PADLOCK_CHUNK-1)
776 # error "insane PADLOCK_CHUNK..."
779 /* Re-align the arguments to 16-Bytes boundaries and run the
780 encryption function itself. This function is not AES-specific. */
782 padlock_aes_cipher(EVP_CIPHER_CTX
*ctx
, unsigned char *out_arg
,
783 const unsigned char *in_arg
, size_t nbytes
)
785 struct padlock_cipher_data
*cdata
;
789 int inp_misaligned
, out_misaligned
, realign_in_loop
;
790 size_t chunk
, allocated
= 0;
792 /* ctx->num is maintained in byte-oriented modes,
793 such as CFB and OFB... */
794 if ((chunk
= ctx
->num
)) {
795 /* borrow chunk variable */
796 unsigned char *ivp
= ctx
->iv
;
798 switch (EVP_CIPHER_CTX_mode(ctx
)) {
799 case EVP_CIPH_CFB_MODE
:
800 if (chunk
>= AES_BLOCK_SIZE
)
801 return 0; /* bogus value */
804 while (chunk
< AES_BLOCK_SIZE
&& nbytes
!= 0) {
805 ivp
[chunk
] = *(out_arg
++) = *(in_arg
++) ^ ivp
[chunk
];
809 while (chunk
< AES_BLOCK_SIZE
&& nbytes
!= 0) {
810 unsigned char c
= *(in_arg
++);
811 *(out_arg
++) = c
^ ivp
[chunk
];
812 ivp
[chunk
++] = c
, nbytes
--;
815 ctx
->num
= chunk
% AES_BLOCK_SIZE
;
817 case EVP_CIPH_OFB_MODE
:
818 if (chunk
>= AES_BLOCK_SIZE
)
819 return 0; /* bogus value */
821 while (chunk
< AES_BLOCK_SIZE
&& nbytes
!= 0) {
822 *(out_arg
++) = *(in_arg
++) ^ ivp
[chunk
];
826 ctx
->num
= chunk
% AES_BLOCK_SIZE
;
834 if (nbytes
% AES_BLOCK_SIZE
)
835 return 0; /* are we expected to do tail processing? */
837 /* nbytes is always multiple of AES_BLOCK_SIZE in ECB and CBC
838 modes and arbitrary value in byte-oriented modes, such as
842 /* VIA promises CPUs that won't require alignment in the future.
843 For now padlock_aes_align_required is initialized to 1 and
844 the condition is never met... */
845 /* C7 core is capable to manage unaligned input in non-ECB[!]
846 mode, but performance penalties appear to be approximately
847 same as for software alignment below or ~3x. They promise to
848 improve it in the future, but for now we can just as well
849 pretend that it can only handle aligned input... */
850 if (!padlock_aes_align_required
&& (nbytes
% AES_BLOCK_SIZE
) == 0)
851 return padlock_aes_cipher_omnivorous(ctx
, out_arg
, in_arg
,
854 inp_misaligned
= (((size_t)in_arg
) & 0x0F);
855 out_misaligned
= (((size_t)out_arg
) & 0x0F);
857 /* Note that even if output is aligned and input not,
858 * I still prefer to loop instead of copy the whole
859 * input and then encrypt in one stroke. This is done
860 * in order to improve L1 cache utilization... */
861 realign_in_loop
= out_misaligned
|inp_misaligned
;
863 if (!realign_in_loop
&& (nbytes
% AES_BLOCK_SIZE
) == 0)
864 return padlock_aes_cipher_omnivorous(ctx
, out_arg
, in_arg
,
867 /* this takes one "if" out of the loops */
869 chunk
%= PADLOCK_CHUNK
;
871 chunk
= PADLOCK_CHUNK
;
873 if (out_misaligned
) {
874 /* optmize for small input */
875 allocated
= (chunk
< nbytes
? PADLOCK_CHUNK
: nbytes
);
876 out
= alloca(0x10 + allocated
);
877 out
= NEAREST_ALIGNED(out
);
881 cdata
= ALIGNED_CIPHER_DATA(ctx
);
882 padlock_verify_context(cdata
);
884 switch (EVP_CIPHER_CTX_mode(ctx
)) {
885 case EVP_CIPH_ECB_MODE
:
888 inp
= padlock_memcpy(out
, in_arg
, chunk
);
893 padlock_xcrypt_ecb(chunk
/ AES_BLOCK_SIZE
, cdata
,
897 out_arg
= padlock_memcpy(out_arg
, out
, chunk
) +
900 out
= out_arg
+= chunk
;
903 chunk
= PADLOCK_CHUNK
;
907 case EVP_CIPH_CBC_MODE
:
908 memcpy(cdata
->iv
, ctx
->iv
, AES_BLOCK_SIZE
);
912 memcpy(cdata
->iv
, iv
, AES_BLOCK_SIZE
);
913 chunk
= PADLOCK_CHUNK
;
914 cbc_shortcut
: /* optimize for small input */
916 inp
= padlock_memcpy(out
, in_arg
, chunk
);
921 iv
= padlock_xcrypt_cbc(chunk
/ AES_BLOCK_SIZE
, cdata
,
925 out_arg
= padlock_memcpy(out_arg
, out
, chunk
) +
928 out
= out_arg
+= chunk
;
929 } while (nbytes
-= chunk
);
930 memcpy(ctx
->iv
, iv
, AES_BLOCK_SIZE
);
933 case EVP_CIPH_CFB_MODE
:
934 memcpy (iv
= cdata
->iv
, ctx
->iv
, AES_BLOCK_SIZE
);
935 chunk
&= ~(AES_BLOCK_SIZE
- 1);
942 memcpy(cdata
->iv
, iv
, AES_BLOCK_SIZE
);
943 chunk
= PADLOCK_CHUNK
;
944 cfb_shortcut
: /* optimize for small input */
946 inp
= padlock_memcpy(out
, in_arg
, chunk
);
951 iv
= padlock_xcrypt_cfb(chunk
/ AES_BLOCK_SIZE
, cdata
,
955 out_arg
= padlock_memcpy(out_arg
, out
, chunk
) +
958 out
= out_arg
+= chunk
;
961 } while (nbytes
>= AES_BLOCK_SIZE
);
965 unsigned char *ivp
= cdata
->iv
;
968 memcpy(ivp
, iv
, AES_BLOCK_SIZE
);
972 if (cdata
->cword
.b
.encdec
) {
973 cdata
->cword
.b
.encdec
= 0;
974 padlock_reload_key();
975 padlock_xcrypt_ecb(1, cdata
, ivp
, ivp
);
976 cdata
->cword
.b
.encdec
= 1;
977 padlock_reload_key();
979 unsigned char c
= *(in_arg
++);
980 *(out_arg
++) = c
^ *ivp
;
981 *(ivp
++) = c
, nbytes
--;
984 padlock_reload_key();
985 padlock_xcrypt_ecb(1, cdata
, ivp
, ivp
);
986 padlock_reload_key();
988 *ivp
= *(out_arg
++) = *(in_arg
++) ^ *ivp
;
994 memcpy(ctx
->iv
, iv
, AES_BLOCK_SIZE
);
997 case EVP_CIPH_OFB_MODE
:
998 memcpy(cdata
->iv
, ctx
->iv
, AES_BLOCK_SIZE
);
999 chunk
&= ~(AES_BLOCK_SIZE
- 1);
1002 inp
= padlock_memcpy(out
, in_arg
, chunk
);
1007 padlock_xcrypt_ofb(chunk
/ AES_BLOCK_SIZE
, cdata
,
1011 out_arg
= padlock_memcpy(out_arg
, out
, chunk
) +
1014 out
= out_arg
+= chunk
;
1017 chunk
= PADLOCK_CHUNK
;
1018 } while (nbytes
>= AES_BLOCK_SIZE
);
1021 unsigned char *ivp
= cdata
->iv
;
1024 padlock_reload_key(); /* empirically found */
1025 padlock_xcrypt_ecb(1, cdata
, ivp
, ivp
);
1026 padlock_reload_key(); /* empirically found */
1028 *(out_arg
++) = *(in_arg
++) ^ *ivp
;
1033 memcpy(ctx
->iv
, cdata
->iv
, AES_BLOCK_SIZE
);
1040 /* Clean the realign buffer if it was used */
1041 if (out_misaligned
) {
1042 volatile unsigned long *p
= (void *)out
;
1043 size_t n
= allocated
/sizeof(*p
);
1048 memset(cdata
->iv
, 0, AES_BLOCK_SIZE
);
1053 #endif /* OPENSSL_NO_AES */
1055 /* ===== Random Number Generator ===== */
1057 * This code is not engaged. The reason is that it does not comply
1058 * with recommendations for VIA RNG usage for secure applications
1059 * (posted at http://www.via.com.tw/en/viac3/c3.jsp) nor does it
1060 * provide meaningful error control...
1062 /* Wrapper that provides an interface between the API and
1063 the raw PadLock RNG */
1065 padlock_rand_bytes(unsigned char *output
, int count
)
1067 unsigned int eax
, buf
;
1069 while (count
>= 8) {
1070 eax
= padlock_xstore(output
, 0);
1071 if (!(eax
& (1 << 6)))
1072 return 0; /* RNG disabled */
1073 /* this ---vv--- covers DC bias, Raw Bits and String Filter */
1074 if (eax
& (0x1F << 10))
1076 if ((eax
& 0x1F) == 0)
1077 continue; /* no data, retry... */
1078 if ((eax
& 0x1F) != 8)
1079 return 0; /* fatal failure... */
1084 eax
= padlock_xstore(&buf
, 3);
1085 if (!(eax
& (1 << 6)))
1086 return 0; /* RNG disabled */
1087 /* this ---vv--- covers DC bias, Raw Bits and String Filter */
1088 if (eax
& (0x1F << 10))
1090 if ((eax
& 0x1F) == 0)
1091 continue; /* no data, retry... */
1092 if ((eax
& 0x1F) != 1)
1093 return 0; /* fatal failure... */
1094 *output
++ = (unsigned char)buf
;
1097 *(volatile unsigned int *)&buf
= 0;
1102 /* Dummy but necessary function */
1104 padlock_rand_status(void)
1109 /* Prepare structure for registration */
1110 static RAND_METHOD padlock_rand
= {
1111 .bytes
= padlock_rand_bytes
,
1112 .pseudorand
= padlock_rand_bytes
,
1113 .status
= padlock_rand_status
1116 #else /* !COMPILE_HW_PADLOCK */
1117 #ifndef OPENSSL_NO_DYNAMIC_ENGINE
1118 extern int bind_engine(ENGINE
*e
, const char *id
, const dynamic_fns
*fns
);
1120 bind_engine(ENGINE
*e
, const char *id
, const dynamic_fns
*fns
) {
1123 IMPLEMENT_DYNAMIC_CHECK_FN()
1125 #endif /* COMPILE_HW_PADLOCK */
1127 #endif /* !OPENSSL_NO_HW_PADLOCK */
1128 #endif /* !OPENSSL_NO_HW */