2 * Copyright (c) 2013, 2014 Kenneth MacKay. All rights reserved.
3 * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
18 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <crypto/ecc_curve.h>
28 #include <linux/module.h>
29 #include <linux/random.h>
30 #include <linux/slab.h>
31 #include <linux/swab.h>
32 #include <linux/fips.h>
33 #include <crypto/ecdh.h>
34 #include <crypto/rng.h>
35 #include <crypto/internal/ecc.h>
36 #include <linux/unaligned.h>
37 #include <linux/ratelimit.h>
39 #include "ecc_curve_defs.h"
46 /* Returns curv25519 curve param */
47 const struct ecc_curve
*ecc_get_curve25519(void)
51 EXPORT_SYMBOL(ecc_get_curve25519
);
53 const struct ecc_curve
*ecc_get_curve(unsigned int curve_id
)
56 /* In FIPS mode only allow P256 and higher */
57 case ECC_CURVE_NIST_P192
:
58 return fips_enabled
? NULL
: &nist_p192
;
59 case ECC_CURVE_NIST_P256
:
61 case ECC_CURVE_NIST_P384
:
63 case ECC_CURVE_NIST_P521
:
69 EXPORT_SYMBOL(ecc_get_curve
);
71 void ecc_digits_from_bytes(const u8
*in
, unsigned int nbytes
,
72 u64
*out
, unsigned int ndigits
)
74 int diff
= ndigits
- DIV_ROUND_UP(nbytes
, sizeof(u64
));
75 unsigned int o
= nbytes
& 7;
78 /* diff > 0: not enough input bytes: set most significant digits to 0 */
81 memset(&out
[ndigits
], 0, diff
* sizeof(u64
));
85 memcpy((u8
*)&msd
+ sizeof(msd
) - o
, in
, o
);
86 out
[--ndigits
] = be64_to_cpu(msd
);
89 ecc_swap_digits(in
, out
, ndigits
);
91 EXPORT_SYMBOL(ecc_digits_from_bytes
);
93 static u64
*ecc_alloc_digits_space(unsigned int ndigits
)
95 size_t len
= ndigits
* sizeof(u64
);
100 return kmalloc(len
, GFP_KERNEL
);
103 static void ecc_free_digits_space(u64
*space
)
105 kfree_sensitive(space
);
108 struct ecc_point
*ecc_alloc_point(unsigned int ndigits
)
110 struct ecc_point
*p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
115 p
->x
= ecc_alloc_digits_space(ndigits
);
119 p
->y
= ecc_alloc_digits_space(ndigits
);
123 p
->ndigits
= ndigits
;
128 ecc_free_digits_space(p
->x
);
133 EXPORT_SYMBOL(ecc_alloc_point
);
135 void ecc_free_point(struct ecc_point
*p
)
140 kfree_sensitive(p
->x
);
141 kfree_sensitive(p
->y
);
144 EXPORT_SYMBOL(ecc_free_point
);
146 static void vli_clear(u64
*vli
, unsigned int ndigits
)
150 for (i
= 0; i
< ndigits
; i
++)
154 /* Returns true if vli == 0, false otherwise. */
155 bool vli_is_zero(const u64
*vli
, unsigned int ndigits
)
159 for (i
= 0; i
< ndigits
; i
++) {
166 EXPORT_SYMBOL(vli_is_zero
);
168 /* Returns nonzero if bit of vli is set. */
169 static u64
vli_test_bit(const u64
*vli
, unsigned int bit
)
171 return (vli
[bit
/ 64] & ((u64
)1 << (bit
% 64)));
174 static bool vli_is_negative(const u64
*vli
, unsigned int ndigits
)
176 return vli_test_bit(vli
, ndigits
* 64 - 1);
179 /* Counts the number of 64-bit "digits" in vli. */
180 static unsigned int vli_num_digits(const u64
*vli
, unsigned int ndigits
)
184 /* Search from the end until we find a non-zero digit.
185 * We do it in reverse because we expect that most digits will
188 for (i
= ndigits
- 1; i
>= 0 && vli
[i
] == 0; i
--);
193 /* Counts the number of bits required for vli. */
194 unsigned int vli_num_bits(const u64
*vli
, unsigned int ndigits
)
196 unsigned int i
, num_digits
;
199 num_digits
= vli_num_digits(vli
, ndigits
);
203 digit
= vli
[num_digits
- 1];
204 for (i
= 0; digit
; i
++)
207 return ((num_digits
- 1) * 64 + i
);
209 EXPORT_SYMBOL(vli_num_bits
);
211 /* Set dest from unaligned bit string src. */
212 void vli_from_be64(u64
*dest
, const void *src
, unsigned int ndigits
)
215 const u64
*from
= src
;
217 for (i
= 0; i
< ndigits
; i
++)
218 dest
[i
] = get_unaligned_be64(&from
[ndigits
- 1 - i
]);
220 EXPORT_SYMBOL(vli_from_be64
);
222 void vli_from_le64(u64
*dest
, const void *src
, unsigned int ndigits
)
225 const u64
*from
= src
;
227 for (i
= 0; i
< ndigits
; i
++)
228 dest
[i
] = get_unaligned_le64(&from
[i
]);
230 EXPORT_SYMBOL(vli_from_le64
);
232 /* Sets dest = src. */
233 static void vli_set(u64
*dest
, const u64
*src
, unsigned int ndigits
)
237 for (i
= 0; i
< ndigits
; i
++)
241 /* Returns sign of left - right. */
242 int vli_cmp(const u64
*left
, const u64
*right
, unsigned int ndigits
)
246 for (i
= ndigits
- 1; i
>= 0; i
--) {
247 if (left
[i
] > right
[i
])
249 else if (left
[i
] < right
[i
])
255 EXPORT_SYMBOL(vli_cmp
);
257 /* Computes result = in << c, returning carry. Can modify in place
258 * (if result == in). 0 < shift < 64.
260 static u64
vli_lshift(u64
*result
, const u64
*in
, unsigned int shift
,
261 unsigned int ndigits
)
266 for (i
= 0; i
< ndigits
; i
++) {
269 result
[i
] = (temp
<< shift
) | carry
;
270 carry
= temp
>> (64 - shift
);
276 /* Computes vli = vli >> 1. */
277 static void vli_rshift1(u64
*vli
, unsigned int ndigits
)
284 while (vli
-- > end
) {
286 *vli
= (temp
>> 1) | carry
;
291 /* Computes result = left + right, returning carry. Can modify in place. */
292 static u64
vli_add(u64
*result
, const u64
*left
, const u64
*right
,
293 unsigned int ndigits
)
298 for (i
= 0; i
< ndigits
; i
++) {
301 sum
= left
[i
] + right
[i
] + carry
;
303 carry
= (sum
< left
[i
]);
311 /* Computes result = left + right, returning carry. Can modify in place. */
312 static u64
vli_uadd(u64
*result
, const u64
*left
, u64 right
,
313 unsigned int ndigits
)
318 for (i
= 0; i
< ndigits
; i
++) {
321 sum
= left
[i
] + carry
;
323 carry
= (sum
< left
[i
]);
333 /* Computes result = left - right, returning borrow. Can modify in place. */
334 u64
vli_sub(u64
*result
, const u64
*left
, const u64
*right
,
335 unsigned int ndigits
)
340 for (i
= 0; i
< ndigits
; i
++) {
343 diff
= left
[i
] - right
[i
] - borrow
;
345 borrow
= (diff
> left
[i
]);
352 EXPORT_SYMBOL(vli_sub
);
354 /* Computes result = left - right, returning borrow. Can modify in place. */
355 static u64
vli_usub(u64
*result
, const u64
*left
, u64 right
,
356 unsigned int ndigits
)
361 for (i
= 0; i
< ndigits
; i
++) {
364 diff
= left
[i
] - borrow
;
366 borrow
= (diff
> left
[i
]);
374 static uint128_t
mul_64_64(u64 left
, u64 right
)
377 #if defined(CONFIG_ARCH_SUPPORTS_INT128)
378 unsigned __int128 m
= (unsigned __int128
)left
* right
;
381 result
.m_high
= m
>> 64;
383 u64 a0
= left
& 0xffffffffull
;
385 u64 b0
= right
& 0xffffffffull
;
386 u64 b1
= right
>> 32;
397 m3
+= 0x100000000ull
;
399 result
.m_low
= (m0
& 0xffffffffull
) | (m2
<< 32);
400 result
.m_high
= m3
+ (m2
>> 32);
405 static uint128_t
add_128_128(uint128_t a
, uint128_t b
)
409 result
.m_low
= a
.m_low
+ b
.m_low
;
410 result
.m_high
= a
.m_high
+ b
.m_high
+ (result
.m_low
< a
.m_low
);
415 static void vli_mult(u64
*result
, const u64
*left
, const u64
*right
,
416 unsigned int ndigits
)
418 uint128_t r01
= { 0, 0 };
422 /* Compute each digit of result in sequence, maintaining the
425 for (k
= 0; k
< ndigits
* 2 - 1; k
++) {
431 min
= (k
+ 1) - ndigits
;
433 for (i
= min
; i
<= k
&& i
< ndigits
; i
++) {
436 product
= mul_64_64(left
[i
], right
[k
- i
]);
438 r01
= add_128_128(r01
, product
);
439 r2
+= (r01
.m_high
< product
.m_high
);
442 result
[k
] = r01
.m_low
;
443 r01
.m_low
= r01
.m_high
;
448 result
[ndigits
* 2 - 1] = r01
.m_low
;
451 /* Compute product = left * right, for a small right value. */
452 static void vli_umult(u64
*result
, const u64
*left
, u32 right
,
453 unsigned int ndigits
)
455 uint128_t r01
= { 0 };
458 for (k
= 0; k
< ndigits
; k
++) {
461 product
= mul_64_64(left
[k
], right
);
462 r01
= add_128_128(r01
, product
);
464 result
[k
] = r01
.m_low
;
465 r01
.m_low
= r01
.m_high
;
468 result
[k
] = r01
.m_low
;
469 for (++k
; k
< ndigits
* 2; k
++)
473 static void vli_square(u64
*result
, const u64
*left
, unsigned int ndigits
)
475 uint128_t r01
= { 0, 0 };
479 for (k
= 0; k
< ndigits
* 2 - 1; k
++) {
485 min
= (k
+ 1) - ndigits
;
487 for (i
= min
; i
<= k
&& i
<= k
- i
; i
++) {
490 product
= mul_64_64(left
[i
], left
[k
- i
]);
493 r2
+= product
.m_high
>> 63;
494 product
.m_high
= (product
.m_high
<< 1) |
495 (product
.m_low
>> 63);
499 r01
= add_128_128(r01
, product
);
500 r2
+= (r01
.m_high
< product
.m_high
);
503 result
[k
] = r01
.m_low
;
504 r01
.m_low
= r01
.m_high
;
509 result
[ndigits
* 2 - 1] = r01
.m_low
;
512 /* Computes result = (left + right) % mod.
513 * Assumes that left < mod and right < mod, result != mod.
515 static void vli_mod_add(u64
*result
, const u64
*left
, const u64
*right
,
516 const u64
*mod
, unsigned int ndigits
)
520 carry
= vli_add(result
, left
, right
, ndigits
);
522 /* result > mod (result = mod + remainder), so subtract mod to
525 if (carry
|| vli_cmp(result
, mod
, ndigits
) >= 0)
526 vli_sub(result
, result
, mod
, ndigits
);
529 /* Computes result = (left - right) % mod.
530 * Assumes that left < mod and right < mod, result != mod.
532 static void vli_mod_sub(u64
*result
, const u64
*left
, const u64
*right
,
533 const u64
*mod
, unsigned int ndigits
)
535 u64 borrow
= vli_sub(result
, left
, right
, ndigits
);
537 /* In this case, p_result == -diff == (max int) - diff.
538 * Since -x % d == d - x, we can get the correct result from
539 * result + mod (with overflow).
542 vli_add(result
, result
, mod
, ndigits
);
546 * Computes result = product % mod
547 * for special form moduli: p = 2^k-c, for small c (note the minus sign)
550 * R. Crandall, C. Pomerance. Prime Numbers: A Computational Perspective.
551 * 9 Fast Algorithms for Large-Integer Arithmetic. 9.2.3 Moduli of special form
552 * Algorithm 9.2.13 (Fast mod operation for special-form moduli).
554 static void vli_mmod_special(u64
*result
, const u64
*product
,
555 const u64
*mod
, unsigned int ndigits
)
558 u64 t
[ECC_MAX_DIGITS
* 2];
559 u64 r
[ECC_MAX_DIGITS
* 2];
561 vli_set(r
, product
, ndigits
* 2);
562 while (!vli_is_zero(r
+ ndigits
, ndigits
)) {
563 vli_umult(t
, r
+ ndigits
, c
, ndigits
);
564 vli_clear(r
+ ndigits
, ndigits
);
565 vli_add(r
, r
, t
, ndigits
* 2);
567 vli_set(t
, mod
, ndigits
);
568 vli_clear(t
+ ndigits
, ndigits
);
569 while (vli_cmp(r
, t
, ndigits
* 2) >= 0)
570 vli_sub(r
, r
, t
, ndigits
* 2);
571 vli_set(result
, r
, ndigits
);
575 * Computes result = product % mod
576 * for special form moduli: p = 2^{k-1}+c, for small c (note the plus sign)
577 * where k-1 does not fit into qword boundary by -1 bit (such as 255).
579 * References (loosely based on):
580 * A. Menezes, P. van Oorschot, S. Vanstone. Handbook of Applied Cryptography.
581 * 14.3.4 Reduction methods for moduli of special form. Algorithm 14.47.
582 * URL: http://cacr.uwaterloo.ca/hac/about/chap14.pdf
584 * H. Cohen, G. Frey, R. Avanzi, C. Doche, T. Lange, K. Nguyen, F. Vercauteren.
585 * Handbook of Elliptic and Hyperelliptic Curve Cryptography.
586 * Algorithm 10.25 Fast reduction for special form moduli
588 static void vli_mmod_special2(u64
*result
, const u64
*product
,
589 const u64
*mod
, unsigned int ndigits
)
592 u64 q
[ECC_MAX_DIGITS
];
593 u64 r
[ECC_MAX_DIGITS
* 2];
594 u64 m
[ECC_MAX_DIGITS
* 2]; /* expanded mod */
595 int carry
; /* last bit that doesn't fit into q */
598 vli_set(m
, mod
, ndigits
);
599 vli_clear(m
+ ndigits
, ndigits
);
601 vli_set(r
, product
, ndigits
);
602 /* q and carry are top bits */
603 vli_set(q
, product
+ ndigits
, ndigits
);
604 vli_clear(r
+ ndigits
, ndigits
);
605 carry
= vli_is_negative(r
, ndigits
);
607 r
[ndigits
- 1] &= (1ull << 63) - 1;
608 for (i
= 1; carry
|| !vli_is_zero(q
, ndigits
); i
++) {
609 u64 qc
[ECC_MAX_DIGITS
* 2];
611 vli_umult(qc
, q
, c2
, ndigits
);
613 vli_uadd(qc
, qc
, mod
[0], ndigits
* 2);
614 vli_set(q
, qc
+ ndigits
, ndigits
);
615 vli_clear(qc
+ ndigits
, ndigits
);
616 carry
= vli_is_negative(qc
, ndigits
);
618 qc
[ndigits
- 1] &= (1ull << 63) - 1;
620 vli_sub(r
, r
, qc
, ndigits
* 2);
622 vli_add(r
, r
, qc
, ndigits
* 2);
624 while (vli_is_negative(r
, ndigits
* 2))
625 vli_add(r
, r
, m
, ndigits
* 2);
626 while (vli_cmp(r
, m
, ndigits
* 2) >= 0)
627 vli_sub(r
, r
, m
, ndigits
* 2);
629 vli_set(result
, r
, ndigits
);
633 * Computes result = product % mod, where product is 2N words long.
634 * Reference: Ken MacKay's micro-ecc.
635 * Currently only designed to work for curve_p or curve_n.
637 static void vli_mmod_slow(u64
*result
, u64
*product
, const u64
*mod
,
638 unsigned int ndigits
)
640 u64 mod_m
[2 * ECC_MAX_DIGITS
];
641 u64 tmp
[2 * ECC_MAX_DIGITS
];
642 u64
*v
[2] = { tmp
, product
};
645 /* Shift mod so its highest set bit is at the maximum position. */
646 int shift
= (ndigits
* 2 * 64) - vli_num_bits(mod
, ndigits
);
647 int word_shift
= shift
/ 64;
648 int bit_shift
= shift
% 64;
650 vli_clear(mod_m
, word_shift
);
652 for (i
= 0; i
< ndigits
; ++i
) {
653 mod_m
[word_shift
+ i
] = (mod
[i
] << bit_shift
) | carry
;
654 carry
= mod
[i
] >> (64 - bit_shift
);
657 vli_set(mod_m
+ word_shift
, mod
, ndigits
);
659 for (i
= 1; shift
>= 0; --shift
) {
663 for (j
= 0; j
< ndigits
* 2; ++j
) {
664 u64 diff
= v
[i
][j
] - mod_m
[j
] - borrow
;
667 borrow
= (diff
> v
[i
][j
]);
670 i
= !(i
^ borrow
); /* Swap the index if there was no borrow */
671 vli_rshift1(mod_m
, ndigits
);
672 mod_m
[ndigits
- 1] |= mod_m
[ndigits
] << (64 - 1);
673 vli_rshift1(mod_m
+ ndigits
, ndigits
);
675 vli_set(result
, v
[i
], ndigits
);
678 /* Computes result = product % mod using Barrett's reduction with precomputed
679 * value mu appended to the mod after ndigits, mu = (2^{2w} / mod) and have
680 * length ndigits + 1, where mu * (2^w - 1) should not overflow ndigits
684 * R. Brent, P. Zimmermann. Modern Computer Arithmetic. 2010.
685 * 2.4.1 Barrett's algorithm. Algorithm 2.5.
687 static void vli_mmod_barrett(u64
*result
, u64
*product
, const u64
*mod
,
688 unsigned int ndigits
)
690 u64 q
[ECC_MAX_DIGITS
* 2];
691 u64 r
[ECC_MAX_DIGITS
* 2];
692 const u64
*mu
= mod
+ ndigits
;
694 vli_mult(q
, product
+ ndigits
, mu
, ndigits
);
696 vli_add(q
+ ndigits
, q
+ ndigits
, product
+ ndigits
, ndigits
);
697 vli_mult(r
, mod
, q
+ ndigits
, ndigits
);
698 vli_sub(r
, product
, r
, ndigits
* 2);
699 while (!vli_is_zero(r
+ ndigits
, ndigits
) ||
700 vli_cmp(r
, mod
, ndigits
) != -1) {
703 carry
= vli_sub(r
, r
, mod
, ndigits
);
704 vli_usub(r
+ ndigits
, r
+ ndigits
, carry
, ndigits
);
706 vli_set(result
, r
, ndigits
);
709 /* Computes p_result = p_product % curve_p.
710 * See algorithm 5 and 6 from
711 * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
713 static void vli_mmod_fast_192(u64
*result
, const u64
*product
,
714 const u64
*curve_prime
, u64
*tmp
)
716 const unsigned int ndigits
= ECC_CURVE_NIST_P192_DIGITS
;
719 vli_set(result
, product
, ndigits
);
721 vli_set(tmp
, &product
[3], ndigits
);
722 carry
= vli_add(result
, result
, tmp
, ndigits
);
727 carry
+= vli_add(result
, result
, tmp
, ndigits
);
729 tmp
[0] = tmp
[1] = product
[5];
731 carry
+= vli_add(result
, result
, tmp
, ndigits
);
733 while (carry
|| vli_cmp(curve_prime
, result
, ndigits
) != 1)
734 carry
-= vli_sub(result
, result
, curve_prime
, ndigits
);
737 /* Computes result = product % curve_prime
738 * from http://www.nsa.gov/ia/_files/nist-routines.pdf
740 static void vli_mmod_fast_256(u64
*result
, const u64
*product
,
741 const u64
*curve_prime
, u64
*tmp
)
744 const unsigned int ndigits
= ECC_CURVE_NIST_P256_DIGITS
;
747 vli_set(result
, product
, ndigits
);
751 tmp
[1] = product
[5] & 0xffffffff00000000ull
;
754 carry
= vli_lshift(tmp
, tmp
, 1, ndigits
);
755 carry
+= vli_add(result
, result
, tmp
, ndigits
);
758 tmp
[1] = product
[6] << 32;
759 tmp
[2] = (product
[6] >> 32) | (product
[7] << 32);
760 tmp
[3] = product
[7] >> 32;
761 carry
+= vli_lshift(tmp
, tmp
, 1, ndigits
);
762 carry
+= vli_add(result
, result
, tmp
, ndigits
);
766 tmp
[1] = product
[5] & 0xffffffff;
769 carry
+= vli_add(result
, result
, tmp
, ndigits
);
772 tmp
[0] = (product
[4] >> 32) | (product
[5] << 32);
773 tmp
[1] = (product
[5] >> 32) | (product
[6] & 0xffffffff00000000ull
);
775 tmp
[3] = (product
[6] >> 32) | (product
[4] << 32);
776 carry
+= vli_add(result
, result
, tmp
, ndigits
);
779 tmp
[0] = (product
[5] >> 32) | (product
[6] << 32);
780 tmp
[1] = (product
[6] >> 32);
782 tmp
[3] = (product
[4] & 0xffffffff) | (product
[5] << 32);
783 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
789 tmp
[3] = (product
[4] >> 32) | (product
[5] & 0xffffffff00000000ull
);
790 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
793 tmp
[0] = (product
[6] >> 32) | (product
[7] << 32);
794 tmp
[1] = (product
[7] >> 32) | (product
[4] << 32);
795 tmp
[2] = (product
[4] >> 32) | (product
[5] << 32);
796 tmp
[3] = (product
[6] << 32);
797 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
801 tmp
[1] = product
[4] & 0xffffffff00000000ull
;
803 tmp
[3] = product
[6] & 0xffffffff00000000ull
;
804 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
808 carry
+= vli_add(result
, result
, curve_prime
, ndigits
);
811 while (carry
|| vli_cmp(curve_prime
, result
, ndigits
) != 1)
812 carry
-= vli_sub(result
, result
, curve_prime
, ndigits
);
816 #define SL32OR32(x32, y32) (((u64)x32 << 32) | y32)
817 #define AND64H(x64) (x64 & 0xffFFffFF00000000ull)
818 #define AND64L(x64) (x64 & 0x00000000ffFFffFFull)
820 /* Computes result = product % curve_prime
821 * from "Mathematical routines for the NIST prime elliptic curves"
823 static void vli_mmod_fast_384(u64
*result
, const u64
*product
,
824 const u64
*curve_prime
, u64
*tmp
)
827 const unsigned int ndigits
= ECC_CURVE_NIST_P384_DIGITS
;
830 vli_set(result
, product
, ndigits
);
833 tmp
[0] = 0; // 0 || 0
834 tmp
[1] = 0; // 0 || 0
835 tmp
[2] = SL32OR32(product
[11], (product
[10]>>32)); //a22||a21
836 tmp
[3] = product
[11]>>32; // 0 ||a23
837 tmp
[4] = 0; // 0 || 0
838 tmp
[5] = 0; // 0 || 0
839 carry
= vli_lshift(tmp
, tmp
, 1, ndigits
);
840 carry
+= vli_add(result
, result
, tmp
, ndigits
);
843 tmp
[0] = product
[6]; //a13||a12
844 tmp
[1] = product
[7]; //a15||a14
845 tmp
[2] = product
[8]; //a17||a16
846 tmp
[3] = product
[9]; //a19||a18
847 tmp
[4] = product
[10]; //a21||a20
848 tmp
[5] = product
[11]; //a23||a22
849 carry
+= vli_add(result
, result
, tmp
, ndigits
);
852 tmp
[0] = SL32OR32(product
[11], (product
[10]>>32)); //a22||a21
853 tmp
[1] = SL32OR32(product
[6], (product
[11]>>32)); //a12||a23
854 tmp
[2] = SL32OR32(product
[7], (product
[6])>>32); //a14||a13
855 tmp
[3] = SL32OR32(product
[8], (product
[7]>>32)); //a16||a15
856 tmp
[4] = SL32OR32(product
[9], (product
[8]>>32)); //a18||a17
857 tmp
[5] = SL32OR32(product
[10], (product
[9]>>32)); //a20||a19
858 carry
+= vli_add(result
, result
, tmp
, ndigits
);
861 tmp
[0] = AND64H(product
[11]); //a23|| 0
862 tmp
[1] = (product
[10]<<32); //a20|| 0
863 tmp
[2] = product
[6]; //a13||a12
864 tmp
[3] = product
[7]; //a15||a14
865 tmp
[4] = product
[8]; //a17||a16
866 tmp
[5] = product
[9]; //a19||a18
867 carry
+= vli_add(result
, result
, tmp
, ndigits
);
872 tmp
[2] = product
[10]; //a21||a20
873 tmp
[3] = product
[11]; //a23||a22
876 carry
+= vli_add(result
, result
, tmp
, ndigits
);
879 tmp
[0] = AND64L(product
[10]); // 0 ||a20
880 tmp
[1] = AND64H(product
[10]); //a21|| 0
881 tmp
[2] = product
[11]; //a23||a22
882 tmp
[3] = 0; // 0 || 0
883 tmp
[4] = 0; // 0 || 0
884 tmp
[5] = 0; // 0 || 0
885 carry
+= vli_add(result
, result
, tmp
, ndigits
);
888 tmp
[0] = SL32OR32(product
[6], (product
[11]>>32)); //a12||a23
889 tmp
[1] = SL32OR32(product
[7], (product
[6]>>32)); //a14||a13
890 tmp
[2] = SL32OR32(product
[8], (product
[7]>>32)); //a16||a15
891 tmp
[3] = SL32OR32(product
[9], (product
[8]>>32)); //a18||a17
892 tmp
[4] = SL32OR32(product
[10], (product
[9]>>32)); //a20||a19
893 tmp
[5] = SL32OR32(product
[11], (product
[10]>>32)); //a22||a21
894 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
897 tmp
[0] = (product
[10]<<32); //a20|| 0
898 tmp
[1] = SL32OR32(product
[11], (product
[10]>>32)); //a22||a21
899 tmp
[2] = (product
[11]>>32); // 0 ||a23
900 tmp
[3] = 0; // 0 || 0
901 tmp
[4] = 0; // 0 || 0
902 tmp
[5] = 0; // 0 || 0
903 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
906 tmp
[0] = 0; // 0 || 0
907 tmp
[1] = AND64H(product
[11]); //a23|| 0
908 tmp
[2] = product
[11]>>32; // 0 ||a23
909 tmp
[3] = 0; // 0 || 0
910 tmp
[4] = 0; // 0 || 0
911 tmp
[5] = 0; // 0 || 0
912 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
916 carry
+= vli_add(result
, result
, curve_prime
, ndigits
);
919 while (carry
|| vli_cmp(curve_prime
, result
, ndigits
) != 1)
920 carry
-= vli_sub(result
, result
, curve_prime
, ndigits
);
930 * Computes result = product % curve_prime
931 * from "Recommendations for Discrete Logarithm-Based Cryptography:
932 * Elliptic Curve Domain Parameters" section G.1.4
934 static void vli_mmod_fast_521(u64
*result
, const u64
*product
,
935 const u64
*curve_prime
, u64
*tmp
)
937 const unsigned int ndigits
= ECC_CURVE_NIST_P521_DIGITS
;
940 /* Initialize result with lowest 521 bits from product */
941 vli_set(result
, product
, ndigits
);
944 for (i
= 0; i
< ndigits
; i
++)
945 tmp
[i
] = (product
[8 + i
] >> 9) | (product
[9 + i
] << 55);
948 vli_mod_add(result
, result
, tmp
, curve_prime
, ndigits
);
951 /* Computes result = product % curve_prime for different curve_primes.
953 * Note that curve_primes are distinguished just by heuristic check and
954 * not by complete conformance check.
956 static bool vli_mmod_fast(u64
*result
, u64
*product
,
957 const struct ecc_curve
*curve
)
959 u64 tmp
[2 * ECC_MAX_DIGITS
];
960 const u64
*curve_prime
= curve
->p
;
961 const unsigned int ndigits
= curve
->g
.ndigits
;
963 /* All NIST curves have name prefix 'nist_' */
964 if (strncmp(curve
->name
, "nist_", 5) != 0) {
965 /* Try to handle Pseudo-Marsenne primes. */
966 if (curve_prime
[ndigits
- 1] == -1ull) {
967 vli_mmod_special(result
, product
, curve_prime
,
970 } else if (curve_prime
[ndigits
- 1] == 1ull << 63 &&
971 curve_prime
[ndigits
- 2] == 0) {
972 vli_mmod_special2(result
, product
, curve_prime
,
976 vli_mmod_barrett(result
, product
, curve_prime
, ndigits
);
981 case ECC_CURVE_NIST_P192_DIGITS
:
982 vli_mmod_fast_192(result
, product
, curve_prime
, tmp
);
984 case ECC_CURVE_NIST_P256_DIGITS
:
985 vli_mmod_fast_256(result
, product
, curve_prime
, tmp
);
987 case ECC_CURVE_NIST_P384_DIGITS
:
988 vli_mmod_fast_384(result
, product
, curve_prime
, tmp
);
990 case ECC_CURVE_NIST_P521_DIGITS
:
991 vli_mmod_fast_521(result
, product
, curve_prime
, tmp
);
994 pr_err_ratelimited("ecc: unsupported digits size!\n");
1001 /* Computes result = (left * right) % mod.
1002 * Assumes that mod is big enough curve order.
1004 void vli_mod_mult_slow(u64
*result
, const u64
*left
, const u64
*right
,
1005 const u64
*mod
, unsigned int ndigits
)
1007 u64 product
[ECC_MAX_DIGITS
* 2];
1009 vli_mult(product
, left
, right
, ndigits
);
1010 vli_mmod_slow(result
, product
, mod
, ndigits
);
1012 EXPORT_SYMBOL(vli_mod_mult_slow
);
1014 /* Computes result = (left * right) % curve_prime. */
1015 static void vli_mod_mult_fast(u64
*result
, const u64
*left
, const u64
*right
,
1016 const struct ecc_curve
*curve
)
1018 u64 product
[2 * ECC_MAX_DIGITS
];
1020 vli_mult(product
, left
, right
, curve
->g
.ndigits
);
1021 vli_mmod_fast(result
, product
, curve
);
1024 /* Computes result = left^2 % curve_prime. */
1025 static void vli_mod_square_fast(u64
*result
, const u64
*left
,
1026 const struct ecc_curve
*curve
)
1028 u64 product
[2 * ECC_MAX_DIGITS
];
1030 vli_square(product
, left
, curve
->g
.ndigits
);
1031 vli_mmod_fast(result
, product
, curve
);
1034 #define EVEN(vli) (!(vli[0] & 1))
1035 /* Computes result = (1 / p_input) % mod. All VLIs are the same size.
1036 * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
1037 * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
1039 void vli_mod_inv(u64
*result
, const u64
*input
, const u64
*mod
,
1040 unsigned int ndigits
)
1042 u64 a
[ECC_MAX_DIGITS
], b
[ECC_MAX_DIGITS
];
1043 u64 u
[ECC_MAX_DIGITS
], v
[ECC_MAX_DIGITS
];
1047 if (vli_is_zero(input
, ndigits
)) {
1048 vli_clear(result
, ndigits
);
1052 vli_set(a
, input
, ndigits
);
1053 vli_set(b
, mod
, ndigits
);
1054 vli_clear(u
, ndigits
);
1056 vli_clear(v
, ndigits
);
1058 while ((cmp_result
= vli_cmp(a
, b
, ndigits
)) != 0) {
1062 vli_rshift1(a
, ndigits
);
1065 carry
= vli_add(u
, u
, mod
, ndigits
);
1067 vli_rshift1(u
, ndigits
);
1069 u
[ndigits
- 1] |= 0x8000000000000000ull
;
1070 } else if (EVEN(b
)) {
1071 vli_rshift1(b
, ndigits
);
1074 carry
= vli_add(v
, v
, mod
, ndigits
);
1076 vli_rshift1(v
, ndigits
);
1078 v
[ndigits
- 1] |= 0x8000000000000000ull
;
1079 } else if (cmp_result
> 0) {
1080 vli_sub(a
, a
, b
, ndigits
);
1081 vli_rshift1(a
, ndigits
);
1083 if (vli_cmp(u
, v
, ndigits
) < 0)
1084 vli_add(u
, u
, mod
, ndigits
);
1086 vli_sub(u
, u
, v
, ndigits
);
1088 carry
= vli_add(u
, u
, mod
, ndigits
);
1090 vli_rshift1(u
, ndigits
);
1092 u
[ndigits
- 1] |= 0x8000000000000000ull
;
1094 vli_sub(b
, b
, a
, ndigits
);
1095 vli_rshift1(b
, ndigits
);
1097 if (vli_cmp(v
, u
, ndigits
) < 0)
1098 vli_add(v
, v
, mod
, ndigits
);
1100 vli_sub(v
, v
, u
, ndigits
);
1102 carry
= vli_add(v
, v
, mod
, ndigits
);
1104 vli_rshift1(v
, ndigits
);
1106 v
[ndigits
- 1] |= 0x8000000000000000ull
;
1110 vli_set(result
, u
, ndigits
);
1112 EXPORT_SYMBOL(vli_mod_inv
);
1114 /* ------ Point operations ------ */
1116 /* Returns true if p_point is the point at infinity, false otherwise. */
1117 bool ecc_point_is_zero(const struct ecc_point
*point
)
1119 return (vli_is_zero(point
->x
, point
->ndigits
) &&
1120 vli_is_zero(point
->y
, point
->ndigits
));
1122 EXPORT_SYMBOL(ecc_point_is_zero
);
1124 /* Point multiplication algorithm using Montgomery's ladder with co-Z
1125 * coordinates. From https://eprint.iacr.org/2011/338.pdf
1128 /* Double in place */
1129 static void ecc_point_double_jacobian(u64
*x1
, u64
*y1
, u64
*z1
,
1130 const struct ecc_curve
*curve
)
1132 /* t1 = x, t2 = y, t3 = z */
1133 u64 t4
[ECC_MAX_DIGITS
];
1134 u64 t5
[ECC_MAX_DIGITS
];
1135 const u64
*curve_prime
= curve
->p
;
1136 const unsigned int ndigits
= curve
->g
.ndigits
;
1138 if (vli_is_zero(z1
, ndigits
))
1142 vli_mod_square_fast(t4
, y1
, curve
);
1143 /* t5 = x1*y1^2 = A */
1144 vli_mod_mult_fast(t5
, x1
, t4
, curve
);
1146 vli_mod_square_fast(t4
, t4
, curve
);
1147 /* t2 = y1*z1 = z3 */
1148 vli_mod_mult_fast(y1
, y1
, z1
, curve
);
1150 vli_mod_square_fast(z1
, z1
, curve
);
1152 /* t1 = x1 + z1^2 */
1153 vli_mod_add(x1
, x1
, z1
, curve_prime
, ndigits
);
1155 vli_mod_add(z1
, z1
, z1
, curve_prime
, ndigits
);
1156 /* t3 = x1 - z1^2 */
1157 vli_mod_sub(z1
, x1
, z1
, curve_prime
, ndigits
);
1158 /* t1 = x1^2 - z1^4 */
1159 vli_mod_mult_fast(x1
, x1
, z1
, curve
);
1161 /* t3 = 2*(x1^2 - z1^4) */
1162 vli_mod_add(z1
, x1
, x1
, curve_prime
, ndigits
);
1163 /* t1 = 3*(x1^2 - z1^4) */
1164 vli_mod_add(x1
, x1
, z1
, curve_prime
, ndigits
);
1165 if (vli_test_bit(x1
, 0)) {
1166 u64 carry
= vli_add(x1
, x1
, curve_prime
, ndigits
);
1168 vli_rshift1(x1
, ndigits
);
1169 x1
[ndigits
- 1] |= carry
<< 63;
1171 vli_rshift1(x1
, ndigits
);
1173 /* t1 = 3/2*(x1^2 - z1^4) = B */
1176 vli_mod_square_fast(z1
, x1
, curve
);
1178 vli_mod_sub(z1
, z1
, t5
, curve_prime
, ndigits
);
1179 /* t3 = B^2 - 2A = x3 */
1180 vli_mod_sub(z1
, z1
, t5
, curve_prime
, ndigits
);
1182 vli_mod_sub(t5
, t5
, z1
, curve_prime
, ndigits
);
1183 /* t1 = B * (A - x3) */
1184 vli_mod_mult_fast(x1
, x1
, t5
, curve
);
1185 /* t4 = B * (A - x3) - y1^4 = y3 */
1186 vli_mod_sub(t4
, x1
, t4
, curve_prime
, ndigits
);
1188 vli_set(x1
, z1
, ndigits
);
1189 vli_set(z1
, y1
, ndigits
);
1190 vli_set(y1
, t4
, ndigits
);
1193 /* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
1194 static void apply_z(u64
*x1
, u64
*y1
, u64
*z
, const struct ecc_curve
*curve
)
1196 u64 t1
[ECC_MAX_DIGITS
];
1198 vli_mod_square_fast(t1
, z
, curve
); /* z^2 */
1199 vli_mod_mult_fast(x1
, x1
, t1
, curve
); /* x1 * z^2 */
1200 vli_mod_mult_fast(t1
, t1
, z
, curve
); /* z^3 */
1201 vli_mod_mult_fast(y1
, y1
, t1
, curve
); /* y1 * z^3 */
1204 /* P = (x1, y1) => 2P, (x2, y2) => P' */
1205 static void xycz_initial_double(u64
*x1
, u64
*y1
, u64
*x2
, u64
*y2
,
1206 u64
*p_initial_z
, const struct ecc_curve
*curve
)
1208 u64 z
[ECC_MAX_DIGITS
];
1209 const unsigned int ndigits
= curve
->g
.ndigits
;
1211 vli_set(x2
, x1
, ndigits
);
1212 vli_set(y2
, y1
, ndigits
);
1214 vli_clear(z
, ndigits
);
1218 vli_set(z
, p_initial_z
, ndigits
);
1220 apply_z(x1
, y1
, z
, curve
);
1222 ecc_point_double_jacobian(x1
, y1
, z
, curve
);
1224 apply_z(x2
, y2
, z
, curve
);
1227 /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
1228 * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
1229 * or P => P', Q => P + Q
1231 static void xycz_add(u64
*x1
, u64
*y1
, u64
*x2
, u64
*y2
,
1232 const struct ecc_curve
*curve
)
1234 /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
1235 u64 t5
[ECC_MAX_DIGITS
];
1236 const u64
*curve_prime
= curve
->p
;
1237 const unsigned int ndigits
= curve
->g
.ndigits
;
1240 vli_mod_sub(t5
, x2
, x1
, curve_prime
, ndigits
);
1241 /* t5 = (x2 - x1)^2 = A */
1242 vli_mod_square_fast(t5
, t5
, curve
);
1244 vli_mod_mult_fast(x1
, x1
, t5
, curve
);
1246 vli_mod_mult_fast(x2
, x2
, t5
, curve
);
1248 vli_mod_sub(y2
, y2
, y1
, curve_prime
, ndigits
);
1249 /* t5 = (y2 - y1)^2 = D */
1250 vli_mod_square_fast(t5
, y2
, curve
);
1253 vli_mod_sub(t5
, t5
, x1
, curve_prime
, ndigits
);
1254 /* t5 = D - B - C = x3 */
1255 vli_mod_sub(t5
, t5
, x2
, curve_prime
, ndigits
);
1257 vli_mod_sub(x2
, x2
, x1
, curve_prime
, ndigits
);
1258 /* t2 = y1*(C - B) */
1259 vli_mod_mult_fast(y1
, y1
, x2
, curve
);
1261 vli_mod_sub(x2
, x1
, t5
, curve_prime
, ndigits
);
1262 /* t4 = (y2 - y1)*(B - x3) */
1263 vli_mod_mult_fast(y2
, y2
, x2
, curve
);
1265 vli_mod_sub(y2
, y2
, y1
, curve_prime
, ndigits
);
1267 vli_set(x2
, t5
, ndigits
);
1270 /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
1271 * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
1272 * or P => P - Q, Q => P + Q
1274 static void xycz_add_c(u64
*x1
, u64
*y1
, u64
*x2
, u64
*y2
,
1275 const struct ecc_curve
*curve
)
1277 /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
1278 u64 t5
[ECC_MAX_DIGITS
];
1279 u64 t6
[ECC_MAX_DIGITS
];
1280 u64 t7
[ECC_MAX_DIGITS
];
1281 const u64
*curve_prime
= curve
->p
;
1282 const unsigned int ndigits
= curve
->g
.ndigits
;
1285 vli_mod_sub(t5
, x2
, x1
, curve_prime
, ndigits
);
1286 /* t5 = (x2 - x1)^2 = A */
1287 vli_mod_square_fast(t5
, t5
, curve
);
1289 vli_mod_mult_fast(x1
, x1
, t5
, curve
);
1291 vli_mod_mult_fast(x2
, x2
, t5
, curve
);
1293 vli_mod_add(t5
, y2
, y1
, curve_prime
, ndigits
);
1295 vli_mod_sub(y2
, y2
, y1
, curve_prime
, ndigits
);
1298 vli_mod_sub(t6
, x2
, x1
, curve_prime
, ndigits
);
1299 /* t2 = y1 * (C - B) */
1300 vli_mod_mult_fast(y1
, y1
, t6
, curve
);
1302 vli_mod_add(t6
, x1
, x2
, curve_prime
, ndigits
);
1303 /* t3 = (y2 - y1)^2 */
1304 vli_mod_square_fast(x2
, y2
, curve
);
1306 vli_mod_sub(x2
, x2
, t6
, curve_prime
, ndigits
);
1309 vli_mod_sub(t7
, x1
, x2
, curve_prime
, ndigits
);
1310 /* t4 = (y2 - y1)*(B - x3) */
1311 vli_mod_mult_fast(y2
, y2
, t7
, curve
);
1313 vli_mod_sub(y2
, y2
, y1
, curve_prime
, ndigits
);
1315 /* t7 = (y2 + y1)^2 = F */
1316 vli_mod_square_fast(t7
, t5
, curve
);
1318 vli_mod_sub(t7
, t7
, t6
, curve_prime
, ndigits
);
1320 vli_mod_sub(t6
, t7
, x1
, curve_prime
, ndigits
);
1321 /* t6 = (y2 + y1)*(x3' - B) */
1322 vli_mod_mult_fast(t6
, t6
, t5
, curve
);
1324 vli_mod_sub(y1
, t6
, y1
, curve_prime
, ndigits
);
1326 vli_set(x1
, t7
, ndigits
);
1329 static void ecc_point_mult(struct ecc_point
*result
,
1330 const struct ecc_point
*point
, const u64
*scalar
,
1331 u64
*initial_z
, const struct ecc_curve
*curve
,
1332 unsigned int ndigits
)
1335 u64 rx
[2][ECC_MAX_DIGITS
];
1336 u64 ry
[2][ECC_MAX_DIGITS
];
1337 u64 z
[ECC_MAX_DIGITS
];
1338 u64 sk
[2][ECC_MAX_DIGITS
];
1339 u64
*curve_prime
= curve
->p
;
1344 carry
= vli_add(sk
[0], scalar
, curve
->n
, ndigits
);
1345 vli_add(sk
[1], sk
[0], curve
->n
, ndigits
);
1346 scalar
= sk
[!carry
];
1347 if (curve
->nbits
== 521) /* NIST P521 */
1348 num_bits
= curve
->nbits
+ 2;
1350 num_bits
= sizeof(u64
) * ndigits
* 8 + 1;
1352 vli_set(rx
[1], point
->x
, ndigits
);
1353 vli_set(ry
[1], point
->y
, ndigits
);
1355 xycz_initial_double(rx
[1], ry
[1], rx
[0], ry
[0], initial_z
, curve
);
1357 for (i
= num_bits
- 2; i
> 0; i
--) {
1358 nb
= !vli_test_bit(scalar
, i
);
1359 xycz_add_c(rx
[1 - nb
], ry
[1 - nb
], rx
[nb
], ry
[nb
], curve
);
1360 xycz_add(rx
[nb
], ry
[nb
], rx
[1 - nb
], ry
[1 - nb
], curve
);
1363 nb
= !vli_test_bit(scalar
, 0);
1364 xycz_add_c(rx
[1 - nb
], ry
[1 - nb
], rx
[nb
], ry
[nb
], curve
);
1366 /* Find final 1/Z value. */
1368 vli_mod_sub(z
, rx
[1], rx
[0], curve_prime
, ndigits
);
1369 /* Yb * (X1 - X0) */
1370 vli_mod_mult_fast(z
, z
, ry
[1 - nb
], curve
);
1371 /* xP * Yb * (X1 - X0) */
1372 vli_mod_mult_fast(z
, z
, point
->x
, curve
);
1374 /* 1 / (xP * Yb * (X1 - X0)) */
1375 vli_mod_inv(z
, z
, curve_prime
, point
->ndigits
);
1377 /* yP / (xP * Yb * (X1 - X0)) */
1378 vli_mod_mult_fast(z
, z
, point
->y
, curve
);
1379 /* Xb * yP / (xP * Yb * (X1 - X0)) */
1380 vli_mod_mult_fast(z
, z
, rx
[1 - nb
], curve
);
1381 /* End 1/Z calculation */
1383 xycz_add(rx
[nb
], ry
[nb
], rx
[1 - nb
], ry
[1 - nb
], curve
);
1385 apply_z(rx
[0], ry
[0], z
, curve
);
1387 vli_set(result
->x
, rx
[0], ndigits
);
1388 vli_set(result
->y
, ry
[0], ndigits
);
1391 /* Computes R = P + Q mod p */
1392 static void ecc_point_add(const struct ecc_point
*result
,
1393 const struct ecc_point
*p
, const struct ecc_point
*q
,
1394 const struct ecc_curve
*curve
)
1396 u64 z
[ECC_MAX_DIGITS
];
1397 u64 px
[ECC_MAX_DIGITS
];
1398 u64 py
[ECC_MAX_DIGITS
];
1399 unsigned int ndigits
= curve
->g
.ndigits
;
1401 vli_set(result
->x
, q
->x
, ndigits
);
1402 vli_set(result
->y
, q
->y
, ndigits
);
1403 vli_mod_sub(z
, result
->x
, p
->x
, curve
->p
, ndigits
);
1404 vli_set(px
, p
->x
, ndigits
);
1405 vli_set(py
, p
->y
, ndigits
);
1406 xycz_add(px
, py
, result
->x
, result
->y
, curve
);
1407 vli_mod_inv(z
, z
, curve
->p
, ndigits
);
1408 apply_z(result
->x
, result
->y
, z
, curve
);
1411 /* Computes R = u1P + u2Q mod p using Shamir's trick.
1412 * Based on: Kenneth MacKay's micro-ecc (2014).
1414 void ecc_point_mult_shamir(const struct ecc_point
*result
,
1415 const u64
*u1
, const struct ecc_point
*p
,
1416 const u64
*u2
, const struct ecc_point
*q
,
1417 const struct ecc_curve
*curve
)
1419 u64 z
[ECC_MAX_DIGITS
];
1420 u64 sump
[2][ECC_MAX_DIGITS
];
1421 u64
*rx
= result
->x
;
1422 u64
*ry
= result
->y
;
1423 unsigned int ndigits
= curve
->g
.ndigits
;
1424 unsigned int num_bits
;
1425 struct ecc_point sum
= ECC_POINT_INIT(sump
[0], sump
[1], ndigits
);
1426 const struct ecc_point
*points
[4];
1427 const struct ecc_point
*point
;
1431 ecc_point_add(&sum
, p
, q
, curve
);
1437 num_bits
= max(vli_num_bits(u1
, ndigits
), vli_num_bits(u2
, ndigits
));
1439 idx
= !!vli_test_bit(u1
, i
);
1440 idx
|= (!!vli_test_bit(u2
, i
)) << 1;
1441 point
= points
[idx
];
1443 vli_set(rx
, point
->x
, ndigits
);
1444 vli_set(ry
, point
->y
, ndigits
);
1445 vli_clear(z
+ 1, ndigits
- 1);
1448 for (--i
; i
>= 0; i
--) {
1449 ecc_point_double_jacobian(rx
, ry
, z
, curve
);
1450 idx
= !!vli_test_bit(u1
, i
);
1451 idx
|= (!!vli_test_bit(u2
, i
)) << 1;
1452 point
= points
[idx
];
1454 u64 tx
[ECC_MAX_DIGITS
];
1455 u64 ty
[ECC_MAX_DIGITS
];
1456 u64 tz
[ECC_MAX_DIGITS
];
1458 vli_set(tx
, point
->x
, ndigits
);
1459 vli_set(ty
, point
->y
, ndigits
);
1460 apply_z(tx
, ty
, z
, curve
);
1461 vli_mod_sub(tz
, rx
, tx
, curve
->p
, ndigits
);
1462 xycz_add(tx
, ty
, rx
, ry
, curve
);
1463 vli_mod_mult_fast(z
, z
, tz
, curve
);
1466 vli_mod_inv(z
, z
, curve
->p
, ndigits
);
1467 apply_z(rx
, ry
, z
, curve
);
1469 EXPORT_SYMBOL(ecc_point_mult_shamir
);
1472 * This function performs checks equivalent to Appendix A.4.2 of FIPS 186-5.
1473 * Whereas A.4.2 results in an integer in the interval [1, n-1], this function
1474 * ensures that the integer is in the range of [2, n-3]. We are slightly
1475 * stricter because of the currently used scalar multiplication algorithm.
1477 static int __ecc_is_key_valid(const struct ecc_curve
*curve
,
1478 const u64
*private_key
, unsigned int ndigits
)
1480 u64 one
[ECC_MAX_DIGITS
] = { 1, };
1481 u64 res
[ECC_MAX_DIGITS
];
1486 if (curve
->g
.ndigits
!= ndigits
)
1489 /* Make sure the private key is in the range [2, n-3]. */
1490 if (vli_cmp(one
, private_key
, ndigits
) != -1)
1492 vli_sub(res
, curve
->n
, one
, ndigits
);
1493 vli_sub(res
, res
, one
, ndigits
);
1494 if (vli_cmp(res
, private_key
, ndigits
) != 1)
1500 int ecc_is_key_valid(unsigned int curve_id
, unsigned int ndigits
,
1501 const u64
*private_key
, unsigned int private_key_len
)
1504 const struct ecc_curve
*curve
= ecc_get_curve(curve_id
);
1506 nbytes
= ndigits
<< ECC_DIGITS_TO_BYTES_SHIFT
;
1508 if (private_key_len
!= nbytes
)
1511 return __ecc_is_key_valid(curve
, private_key
, ndigits
);
1513 EXPORT_SYMBOL(ecc_is_key_valid
);
1516 * ECC private keys are generated using the method of rejection sampling,
1517 * equivalent to that described in FIPS 186-5, Appendix A.2.2.
1519 * This method generates a private key uniformly distributed in the range
1522 int ecc_gen_privkey(unsigned int curve_id
, unsigned int ndigits
,
1525 const struct ecc_curve
*curve
= ecc_get_curve(curve_id
);
1526 unsigned int nbytes
= ndigits
<< ECC_DIGITS_TO_BYTES_SHIFT
;
1527 unsigned int nbits
= vli_num_bits(curve
->n
, ndigits
);
1531 * Step 1 & 2: check that N is included in Table 1 of FIPS 186-5,
1538 * FIPS 186-5 recommends that the private key should be obtained from a
1539 * RBG with a security strength equal to or greater than the security
1540 * strength associated with N.
1542 * The maximum security strength identified by NIST SP800-57pt1r4 for
1543 * ECC is 256 (N >= 512).
1545 * This condition is met by the default RNG because it selects a favored
1546 * DRBG with a security strength of 256.
1548 if (crypto_get_default_rng())
1551 /* Step 3: obtain N returned_bits from the DRBG. */
1552 err
= crypto_rng_get_bytes(crypto_default_rng
,
1553 (u8
*)private_key
, nbytes
);
1554 crypto_put_default_rng();
1558 /* Step 4: make sure the private key is in the valid range. */
1559 if (__ecc_is_key_valid(curve
, private_key
, ndigits
))
1564 EXPORT_SYMBOL(ecc_gen_privkey
);
1566 int ecc_make_pub_key(unsigned int curve_id
, unsigned int ndigits
,
1567 const u64
*private_key
, u64
*public_key
)
1570 struct ecc_point
*pk
;
1571 const struct ecc_curve
*curve
= ecc_get_curve(curve_id
);
1578 pk
= ecc_alloc_point(ndigits
);
1584 ecc_point_mult(pk
, &curve
->g
, private_key
, NULL
, curve
, ndigits
);
1586 /* SP800-56A rev 3 5.6.2.1.3 key check */
1587 if (ecc_is_pubkey_valid_full(curve
, pk
)) {
1589 goto err_free_point
;
1592 ecc_swap_digits(pk
->x
, public_key
, ndigits
);
1593 ecc_swap_digits(pk
->y
, &public_key
[ndigits
], ndigits
);
1600 EXPORT_SYMBOL(ecc_make_pub_key
);
1602 /* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
1603 int ecc_is_pubkey_valid_partial(const struct ecc_curve
*curve
,
1604 struct ecc_point
*pk
)
1606 u64 yy
[ECC_MAX_DIGITS
], xxx
[ECC_MAX_DIGITS
], w
[ECC_MAX_DIGITS
];
1608 if (WARN_ON(pk
->ndigits
!= curve
->g
.ndigits
))
1611 /* Check 1: Verify key is not the zero point. */
1612 if (ecc_point_is_zero(pk
))
1615 /* Check 2: Verify key is in the range [1, p-1]. */
1616 if (vli_cmp(curve
->p
, pk
->x
, pk
->ndigits
) != 1)
1618 if (vli_cmp(curve
->p
, pk
->y
, pk
->ndigits
) != 1)
1621 /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
1622 vli_mod_square_fast(yy
, pk
->y
, curve
); /* y^2 */
1623 vli_mod_square_fast(xxx
, pk
->x
, curve
); /* x^2 */
1624 vli_mod_mult_fast(xxx
, xxx
, pk
->x
, curve
); /* x^3 */
1625 vli_mod_mult_fast(w
, curve
->a
, pk
->x
, curve
); /* a·x */
1626 vli_mod_add(w
, w
, curve
->b
, curve
->p
, pk
->ndigits
); /* a·x + b */
1627 vli_mod_add(w
, w
, xxx
, curve
->p
, pk
->ndigits
); /* x^3 + a·x + b */
1628 if (vli_cmp(yy
, w
, pk
->ndigits
) != 0) /* Equation */
1633 EXPORT_SYMBOL(ecc_is_pubkey_valid_partial
);
1635 /* SP800-56A section 5.6.2.3.3 full verification */
1636 int ecc_is_pubkey_valid_full(const struct ecc_curve
*curve
,
1637 struct ecc_point
*pk
)
1639 struct ecc_point
*nQ
;
1641 /* Checks 1 through 3 */
1642 int ret
= ecc_is_pubkey_valid_partial(curve
, pk
);
1647 /* Check 4: Verify that nQ is the zero point. */
1648 nQ
= ecc_alloc_point(pk
->ndigits
);
1652 ecc_point_mult(nQ
, pk
, curve
->n
, NULL
, curve
, pk
->ndigits
);
1653 if (!ecc_point_is_zero(nQ
))
1660 EXPORT_SYMBOL(ecc_is_pubkey_valid_full
);
1662 int crypto_ecdh_shared_secret(unsigned int curve_id
, unsigned int ndigits
,
1663 const u64
*private_key
, const u64
*public_key
,
1667 struct ecc_point
*product
, *pk
;
1668 u64 rand_z
[ECC_MAX_DIGITS
];
1669 unsigned int nbytes
;
1670 const struct ecc_curve
*curve
= ecc_get_curve(curve_id
);
1672 if (!private_key
|| !public_key
|| ndigits
> ARRAY_SIZE(rand_z
)) {
1677 nbytes
= ndigits
<< ECC_DIGITS_TO_BYTES_SHIFT
;
1679 get_random_bytes(rand_z
, nbytes
);
1681 pk
= ecc_alloc_point(ndigits
);
1687 ecc_swap_digits(public_key
, pk
->x
, ndigits
);
1688 ecc_swap_digits(&public_key
[ndigits
], pk
->y
, ndigits
);
1689 ret
= ecc_is_pubkey_valid_partial(curve
, pk
);
1691 goto err_alloc_product
;
1693 product
= ecc_alloc_point(ndigits
);
1696 goto err_alloc_product
;
1699 ecc_point_mult(product
, pk
, private_key
, rand_z
, curve
, ndigits
);
1701 if (ecc_point_is_zero(product
)) {
1706 ecc_swap_digits(product
->x
, secret
, ndigits
);
1709 memzero_explicit(rand_z
, sizeof(rand_z
));
1710 ecc_free_point(product
);
1716 EXPORT_SYMBOL(crypto_ecdh_shared_secret
);
1718 MODULE_DESCRIPTION("core elliptic curve module");
1719 MODULE_LICENSE("Dual BSD/GPL");