2 * Copyright (c) 2013, 2014 Kenneth MacKay. All rights reserved.
3 * Copyright (c) 2019 Vitaly Chikunov <vt@altlinux.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
15 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
16 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
17 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
18 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
20 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <linux/module.h>
28 #include <linux/random.h>
29 #include <linux/slab.h>
30 #include <linux/swab.h>
31 #include <linux/fips.h>
32 #include <crypto/ecdh.h>
33 #include <crypto/rng.h>
34 #include <asm/unaligned.h>
35 #include <linux/ratelimit.h>
38 #include "ecc_curve_defs.h"
45 static inline const struct ecc_curve
*ecc_get_curve(unsigned int curve_id
)
48 /* In FIPS mode only allow P256 and higher */
49 case ECC_CURVE_NIST_P192
:
50 return fips_enabled
? NULL
: &nist_p192
;
51 case ECC_CURVE_NIST_P256
:
58 static u64
*ecc_alloc_digits_space(unsigned int ndigits
)
60 size_t len
= ndigits
* sizeof(u64
);
65 return kmalloc(len
, GFP_KERNEL
);
68 static void ecc_free_digits_space(u64
*space
)
73 static struct ecc_point
*ecc_alloc_point(unsigned int ndigits
)
75 struct ecc_point
*p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
80 p
->x
= ecc_alloc_digits_space(ndigits
);
84 p
->y
= ecc_alloc_digits_space(ndigits
);
93 ecc_free_digits_space(p
->x
);
99 static void ecc_free_point(struct ecc_point
*p
)
109 static void vli_clear(u64
*vli
, unsigned int ndigits
)
113 for (i
= 0; i
< ndigits
; i
++)
117 /* Returns true if vli == 0, false otherwise. */
118 bool vli_is_zero(const u64
*vli
, unsigned int ndigits
)
122 for (i
= 0; i
< ndigits
; i
++) {
129 EXPORT_SYMBOL(vli_is_zero
);
131 /* Returns nonzero if bit bit of vli is set. */
132 static u64
vli_test_bit(const u64
*vli
, unsigned int bit
)
134 return (vli
[bit
/ 64] & ((u64
)1 << (bit
% 64)));
137 static bool vli_is_negative(const u64
*vli
, unsigned int ndigits
)
139 return vli_test_bit(vli
, ndigits
* 64 - 1);
142 /* Counts the number of 64-bit "digits" in vli. */
143 static unsigned int vli_num_digits(const u64
*vli
, unsigned int ndigits
)
147 /* Search from the end until we find a non-zero digit.
148 * We do it in reverse because we expect that most digits will
151 for (i
= ndigits
- 1; i
>= 0 && vli
[i
] == 0; i
--);
156 /* Counts the number of bits required for vli. */
157 static unsigned int vli_num_bits(const u64
*vli
, unsigned int ndigits
)
159 unsigned int i
, num_digits
;
162 num_digits
= vli_num_digits(vli
, ndigits
);
166 digit
= vli
[num_digits
- 1];
167 for (i
= 0; digit
; i
++)
170 return ((num_digits
- 1) * 64 + i
);
173 /* Set dest from unaligned bit string src. */
174 void vli_from_be64(u64
*dest
, const void *src
, unsigned int ndigits
)
177 const u64
*from
= src
;
179 for (i
= 0; i
< ndigits
; i
++)
180 dest
[i
] = get_unaligned_be64(&from
[ndigits
- 1 - i
]);
182 EXPORT_SYMBOL(vli_from_be64
);
184 void vli_from_le64(u64
*dest
, const void *src
, unsigned int ndigits
)
187 const u64
*from
= src
;
189 for (i
= 0; i
< ndigits
; i
++)
190 dest
[i
] = get_unaligned_le64(&from
[i
]);
192 EXPORT_SYMBOL(vli_from_le64
);
194 /* Sets dest = src. */
195 static void vli_set(u64
*dest
, const u64
*src
, unsigned int ndigits
)
199 for (i
= 0; i
< ndigits
; i
++)
203 /* Returns sign of left - right. */
204 int vli_cmp(const u64
*left
, const u64
*right
, unsigned int ndigits
)
208 for (i
= ndigits
- 1; i
>= 0; i
--) {
209 if (left
[i
] > right
[i
])
211 else if (left
[i
] < right
[i
])
217 EXPORT_SYMBOL(vli_cmp
);
219 /* Computes result = in << c, returning carry. Can modify in place
220 * (if result == in). 0 < shift < 64.
222 static u64
vli_lshift(u64
*result
, const u64
*in
, unsigned int shift
,
223 unsigned int ndigits
)
228 for (i
= 0; i
< ndigits
; i
++) {
231 result
[i
] = (temp
<< shift
) | carry
;
232 carry
= temp
>> (64 - shift
);
238 /* Computes vli = vli >> 1. */
239 static void vli_rshift1(u64
*vli
, unsigned int ndigits
)
246 while (vli
-- > end
) {
248 *vli
= (temp
>> 1) | carry
;
253 /* Computes result = left + right, returning carry. Can modify in place. */
254 static u64
vli_add(u64
*result
, const u64
*left
, const u64
*right
,
255 unsigned int ndigits
)
260 for (i
= 0; i
< ndigits
; i
++) {
263 sum
= left
[i
] + right
[i
] + carry
;
265 carry
= (sum
< left
[i
]);
273 /* Computes result = left + right, returning carry. Can modify in place. */
274 static u64
vli_uadd(u64
*result
, const u64
*left
, u64 right
,
275 unsigned int ndigits
)
280 for (i
= 0; i
< ndigits
; i
++) {
283 sum
= left
[i
] + carry
;
285 carry
= (sum
< left
[i
]);
295 /* Computes result = left - right, returning borrow. Can modify in place. */
296 u64
vli_sub(u64
*result
, const u64
*left
, const u64
*right
,
297 unsigned int ndigits
)
302 for (i
= 0; i
< ndigits
; i
++) {
305 diff
= left
[i
] - right
[i
] - borrow
;
307 borrow
= (diff
> left
[i
]);
314 EXPORT_SYMBOL(vli_sub
);
316 /* Computes result = left - right, returning borrow. Can modify in place. */
317 static u64
vli_usub(u64
*result
, const u64
*left
, u64 right
,
318 unsigned int ndigits
)
323 for (i
= 0; i
< ndigits
; i
++) {
326 diff
= left
[i
] - borrow
;
328 borrow
= (diff
> left
[i
]);
336 static uint128_t
mul_64_64(u64 left
, u64 right
)
339 #if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
340 unsigned __int128 m
= (unsigned __int128
)left
* right
;
343 result
.m_high
= m
>> 64;
345 u64 a0
= left
& 0xffffffffull
;
347 u64 b0
= right
& 0xffffffffull
;
348 u64 b1
= right
>> 32;
359 m3
+= 0x100000000ull
;
361 result
.m_low
= (m0
& 0xffffffffull
) | (m2
<< 32);
362 result
.m_high
= m3
+ (m2
>> 32);
367 static uint128_t
add_128_128(uint128_t a
, uint128_t b
)
371 result
.m_low
= a
.m_low
+ b
.m_low
;
372 result
.m_high
= a
.m_high
+ b
.m_high
+ (result
.m_low
< a
.m_low
);
377 static void vli_mult(u64
*result
, const u64
*left
, const u64
*right
,
378 unsigned int ndigits
)
380 uint128_t r01
= { 0, 0 };
384 /* Compute each digit of result in sequence, maintaining the
387 for (k
= 0; k
< ndigits
* 2 - 1; k
++) {
393 min
= (k
+ 1) - ndigits
;
395 for (i
= min
; i
<= k
&& i
< ndigits
; i
++) {
398 product
= mul_64_64(left
[i
], right
[k
- i
]);
400 r01
= add_128_128(r01
, product
);
401 r2
+= (r01
.m_high
< product
.m_high
);
404 result
[k
] = r01
.m_low
;
405 r01
.m_low
= r01
.m_high
;
410 result
[ndigits
* 2 - 1] = r01
.m_low
;
413 /* Compute product = left * right, for a small right value. */
414 static void vli_umult(u64
*result
, const u64
*left
, u32 right
,
415 unsigned int ndigits
)
417 uint128_t r01
= { 0 };
420 for (k
= 0; k
< ndigits
; k
++) {
423 product
= mul_64_64(left
[k
], right
);
424 r01
= add_128_128(r01
, product
);
426 result
[k
] = r01
.m_low
;
427 r01
.m_low
= r01
.m_high
;
430 result
[k
] = r01
.m_low
;
431 for (++k
; k
< ndigits
* 2; k
++)
435 static void vli_square(u64
*result
, const u64
*left
, unsigned int ndigits
)
437 uint128_t r01
= { 0, 0 };
441 for (k
= 0; k
< ndigits
* 2 - 1; k
++) {
447 min
= (k
+ 1) - ndigits
;
449 for (i
= min
; i
<= k
&& i
<= k
- i
; i
++) {
452 product
= mul_64_64(left
[i
], left
[k
- i
]);
455 r2
+= product
.m_high
>> 63;
456 product
.m_high
= (product
.m_high
<< 1) |
457 (product
.m_low
>> 63);
461 r01
= add_128_128(r01
, product
);
462 r2
+= (r01
.m_high
< product
.m_high
);
465 result
[k
] = r01
.m_low
;
466 r01
.m_low
= r01
.m_high
;
471 result
[ndigits
* 2 - 1] = r01
.m_low
;
474 /* Computes result = (left + right) % mod.
475 * Assumes that left < mod and right < mod, result != mod.
477 static void vli_mod_add(u64
*result
, const u64
*left
, const u64
*right
,
478 const u64
*mod
, unsigned int ndigits
)
482 carry
= vli_add(result
, left
, right
, ndigits
);
484 /* result > mod (result = mod + remainder), so subtract mod to
487 if (carry
|| vli_cmp(result
, mod
, ndigits
) >= 0)
488 vli_sub(result
, result
, mod
, ndigits
);
491 /* Computes result = (left - right) % mod.
492 * Assumes that left < mod and right < mod, result != mod.
494 static void vli_mod_sub(u64
*result
, const u64
*left
, const u64
*right
,
495 const u64
*mod
, unsigned int ndigits
)
497 u64 borrow
= vli_sub(result
, left
, right
, ndigits
);
499 /* In this case, p_result == -diff == (max int) - diff.
500 * Since -x % d == d - x, we can get the correct result from
501 * result + mod (with overflow).
504 vli_add(result
, result
, mod
, ndigits
);
508 * Computes result = product % mod
509 * for special form moduli: p = 2^k-c, for small c (note the minus sign)
512 * R. Crandall, C. Pomerance. Prime Numbers: A Computational Perspective.
513 * 9 Fast Algorithms for Large-Integer Arithmetic. 9.2.3 Moduli of special form
514 * Algorithm 9.2.13 (Fast mod operation for special-form moduli).
516 static void vli_mmod_special(u64
*result
, const u64
*product
,
517 const u64
*mod
, unsigned int ndigits
)
520 u64 t
[ECC_MAX_DIGITS
* 2];
521 u64 r
[ECC_MAX_DIGITS
* 2];
523 vli_set(r
, product
, ndigits
* 2);
524 while (!vli_is_zero(r
+ ndigits
, ndigits
)) {
525 vli_umult(t
, r
+ ndigits
, c
, ndigits
);
526 vli_clear(r
+ ndigits
, ndigits
);
527 vli_add(r
, r
, t
, ndigits
* 2);
529 vli_set(t
, mod
, ndigits
);
530 vli_clear(t
+ ndigits
, ndigits
);
531 while (vli_cmp(r
, t
, ndigits
* 2) >= 0)
532 vli_sub(r
, r
, t
, ndigits
* 2);
533 vli_set(result
, r
, ndigits
);
537 * Computes result = product % mod
538 * for special form moduli: p = 2^{k-1}+c, for small c (note the plus sign)
539 * where k-1 does not fit into qword boundary by -1 bit (such as 255).
541 * References (loosely based on):
542 * A. Menezes, P. van Oorschot, S. Vanstone. Handbook of Applied Cryptography.
543 * 14.3.4 Reduction methods for moduli of special form. Algorithm 14.47.
544 * URL: http://cacr.uwaterloo.ca/hac/about/chap14.pdf
546 * H. Cohen, G. Frey, R. Avanzi, C. Doche, T. Lange, K. Nguyen, F. Vercauteren.
547 * Handbook of Elliptic and Hyperelliptic Curve Cryptography.
548 * Algorithm 10.25 Fast reduction for special form moduli
550 static void vli_mmod_special2(u64
*result
, const u64
*product
,
551 const u64
*mod
, unsigned int ndigits
)
554 u64 q
[ECC_MAX_DIGITS
];
555 u64 r
[ECC_MAX_DIGITS
* 2];
556 u64 m
[ECC_MAX_DIGITS
* 2]; /* expanded mod */
557 int carry
; /* last bit that doesn't fit into q */
560 vli_set(m
, mod
, ndigits
);
561 vli_clear(m
+ ndigits
, ndigits
);
563 vli_set(r
, product
, ndigits
);
564 /* q and carry are top bits */
565 vli_set(q
, product
+ ndigits
, ndigits
);
566 vli_clear(r
+ ndigits
, ndigits
);
567 carry
= vli_is_negative(r
, ndigits
);
569 r
[ndigits
- 1] &= (1ull << 63) - 1;
570 for (i
= 1; carry
|| !vli_is_zero(q
, ndigits
); i
++) {
571 u64 qc
[ECC_MAX_DIGITS
* 2];
573 vli_umult(qc
, q
, c2
, ndigits
);
575 vli_uadd(qc
, qc
, mod
[0], ndigits
* 2);
576 vli_set(q
, qc
+ ndigits
, ndigits
);
577 vli_clear(qc
+ ndigits
, ndigits
);
578 carry
= vli_is_negative(qc
, ndigits
);
580 qc
[ndigits
- 1] &= (1ull << 63) - 1;
582 vli_sub(r
, r
, qc
, ndigits
* 2);
584 vli_add(r
, r
, qc
, ndigits
* 2);
586 while (vli_is_negative(r
, ndigits
* 2))
587 vli_add(r
, r
, m
, ndigits
* 2);
588 while (vli_cmp(r
, m
, ndigits
* 2) >= 0)
589 vli_sub(r
, r
, m
, ndigits
* 2);
591 vli_set(result
, r
, ndigits
);
595 * Computes result = product % mod, where product is 2N words long.
596 * Reference: Ken MacKay's micro-ecc.
597 * Currently only designed to work for curve_p or curve_n.
599 static void vli_mmod_slow(u64
*result
, u64
*product
, const u64
*mod
,
600 unsigned int ndigits
)
602 u64 mod_m
[2 * ECC_MAX_DIGITS
];
603 u64 tmp
[2 * ECC_MAX_DIGITS
];
604 u64
*v
[2] = { tmp
, product
};
607 /* Shift mod so its highest set bit is at the maximum position. */
608 int shift
= (ndigits
* 2 * 64) - vli_num_bits(mod
, ndigits
);
609 int word_shift
= shift
/ 64;
610 int bit_shift
= shift
% 64;
612 vli_clear(mod_m
, word_shift
);
614 for (i
= 0; i
< ndigits
; ++i
) {
615 mod_m
[word_shift
+ i
] = (mod
[i
] << bit_shift
) | carry
;
616 carry
= mod
[i
] >> (64 - bit_shift
);
619 vli_set(mod_m
+ word_shift
, mod
, ndigits
);
621 for (i
= 1; shift
>= 0; --shift
) {
625 for (j
= 0; j
< ndigits
* 2; ++j
) {
626 u64 diff
= v
[i
][j
] - mod_m
[j
] - borrow
;
629 borrow
= (diff
> v
[i
][j
]);
632 i
= !(i
^ borrow
); /* Swap the index if there was no borrow */
633 vli_rshift1(mod_m
, ndigits
);
634 mod_m
[ndigits
- 1] |= mod_m
[ndigits
] << (64 - 1);
635 vli_rshift1(mod_m
+ ndigits
, ndigits
);
637 vli_set(result
, v
[i
], ndigits
);
640 /* Computes result = product % mod using Barrett's reduction with precomputed
641 * value mu appended to the mod after ndigits, mu = (2^{2w} / mod) and have
642 * length ndigits + 1, where mu * (2^w - 1) should not overflow ndigits
646 * R. Brent, P. Zimmermann. Modern Computer Arithmetic. 2010.
647 * 2.4.1 Barrett's algorithm. Algorithm 2.5.
649 static void vli_mmod_barrett(u64
*result
, u64
*product
, const u64
*mod
,
650 unsigned int ndigits
)
652 u64 q
[ECC_MAX_DIGITS
* 2];
653 u64 r
[ECC_MAX_DIGITS
* 2];
654 const u64
*mu
= mod
+ ndigits
;
656 vli_mult(q
, product
+ ndigits
, mu
, ndigits
);
658 vli_add(q
+ ndigits
, q
+ ndigits
, product
+ ndigits
, ndigits
);
659 vli_mult(r
, mod
, q
+ ndigits
, ndigits
);
660 vli_sub(r
, product
, r
, ndigits
* 2);
661 while (!vli_is_zero(r
+ ndigits
, ndigits
) ||
662 vli_cmp(r
, mod
, ndigits
) != -1) {
665 carry
= vli_sub(r
, r
, mod
, ndigits
);
666 vli_usub(r
+ ndigits
, r
+ ndigits
, carry
, ndigits
);
668 vli_set(result
, r
, ndigits
);
671 /* Computes p_result = p_product % curve_p.
672 * See algorithm 5 and 6 from
673 * http://www.isys.uni-klu.ac.at/PDF/2001-0126-MT.pdf
675 static void vli_mmod_fast_192(u64
*result
, const u64
*product
,
676 const u64
*curve_prime
, u64
*tmp
)
678 const unsigned int ndigits
= 3;
681 vli_set(result
, product
, ndigits
);
683 vli_set(tmp
, &product
[3], ndigits
);
684 carry
= vli_add(result
, result
, tmp
, ndigits
);
689 carry
+= vli_add(result
, result
, tmp
, ndigits
);
691 tmp
[0] = tmp
[1] = product
[5];
693 carry
+= vli_add(result
, result
, tmp
, ndigits
);
695 while (carry
|| vli_cmp(curve_prime
, result
, ndigits
) != 1)
696 carry
-= vli_sub(result
, result
, curve_prime
, ndigits
);
699 /* Computes result = product % curve_prime
700 * from http://www.nsa.gov/ia/_files/nist-routines.pdf
702 static void vli_mmod_fast_256(u64
*result
, const u64
*product
,
703 const u64
*curve_prime
, u64
*tmp
)
706 const unsigned int ndigits
= 4;
709 vli_set(result
, product
, ndigits
);
713 tmp
[1] = product
[5] & 0xffffffff00000000ull
;
716 carry
= vli_lshift(tmp
, tmp
, 1, ndigits
);
717 carry
+= vli_add(result
, result
, tmp
, ndigits
);
720 tmp
[1] = product
[6] << 32;
721 tmp
[2] = (product
[6] >> 32) | (product
[7] << 32);
722 tmp
[3] = product
[7] >> 32;
723 carry
+= vli_lshift(tmp
, tmp
, 1, ndigits
);
724 carry
+= vli_add(result
, result
, tmp
, ndigits
);
728 tmp
[1] = product
[5] & 0xffffffff;
731 carry
+= vli_add(result
, result
, tmp
, ndigits
);
734 tmp
[0] = (product
[4] >> 32) | (product
[5] << 32);
735 tmp
[1] = (product
[5] >> 32) | (product
[6] & 0xffffffff00000000ull
);
737 tmp
[3] = (product
[6] >> 32) | (product
[4] << 32);
738 carry
+= vli_add(result
, result
, tmp
, ndigits
);
741 tmp
[0] = (product
[5] >> 32) | (product
[6] << 32);
742 tmp
[1] = (product
[6] >> 32);
744 tmp
[3] = (product
[4] & 0xffffffff) | (product
[5] << 32);
745 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
751 tmp
[3] = (product
[4] >> 32) | (product
[5] & 0xffffffff00000000ull
);
752 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
755 tmp
[0] = (product
[6] >> 32) | (product
[7] << 32);
756 tmp
[1] = (product
[7] >> 32) | (product
[4] << 32);
757 tmp
[2] = (product
[4] >> 32) | (product
[5] << 32);
758 tmp
[3] = (product
[6] << 32);
759 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
763 tmp
[1] = product
[4] & 0xffffffff00000000ull
;
765 tmp
[3] = product
[6] & 0xffffffff00000000ull
;
766 carry
-= vli_sub(result
, result
, tmp
, ndigits
);
770 carry
+= vli_add(result
, result
, curve_prime
, ndigits
);
773 while (carry
|| vli_cmp(curve_prime
, result
, ndigits
) != 1)
774 carry
-= vli_sub(result
, result
, curve_prime
, ndigits
);
778 /* Computes result = product % curve_prime for different curve_primes.
780 * Note that curve_primes are distinguished just by heuristic check and
781 * not by complete conformance check.
783 static bool vli_mmod_fast(u64
*result
, u64
*product
,
784 const u64
*curve_prime
, unsigned int ndigits
)
786 u64 tmp
[2 * ECC_MAX_DIGITS
];
788 /* Currently, both NIST primes have -1 in lowest qword. */
789 if (curve_prime
[0] != -1ull) {
790 /* Try to handle Pseudo-Marsenne primes. */
791 if (curve_prime
[ndigits
- 1] == -1ull) {
792 vli_mmod_special(result
, product
, curve_prime
,
795 } else if (curve_prime
[ndigits
- 1] == 1ull << 63 &&
796 curve_prime
[ndigits
- 2] == 0) {
797 vli_mmod_special2(result
, product
, curve_prime
,
801 vli_mmod_barrett(result
, product
, curve_prime
, ndigits
);
807 vli_mmod_fast_192(result
, product
, curve_prime
, tmp
);
810 vli_mmod_fast_256(result
, product
, curve_prime
, tmp
);
813 pr_err_ratelimited("ecc: unsupported digits size!\n");
820 /* Computes result = (left * right) % mod.
821 * Assumes that mod is big enough curve order.
823 void vli_mod_mult_slow(u64
*result
, const u64
*left
, const u64
*right
,
824 const u64
*mod
, unsigned int ndigits
)
826 u64 product
[ECC_MAX_DIGITS
* 2];
828 vli_mult(product
, left
, right
, ndigits
);
829 vli_mmod_slow(result
, product
, mod
, ndigits
);
831 EXPORT_SYMBOL(vli_mod_mult_slow
);
833 /* Computes result = (left * right) % curve_prime. */
834 static void vli_mod_mult_fast(u64
*result
, const u64
*left
, const u64
*right
,
835 const u64
*curve_prime
, unsigned int ndigits
)
837 u64 product
[2 * ECC_MAX_DIGITS
];
839 vli_mult(product
, left
, right
, ndigits
);
840 vli_mmod_fast(result
, product
, curve_prime
, ndigits
);
843 /* Computes result = left^2 % curve_prime. */
844 static void vli_mod_square_fast(u64
*result
, const u64
*left
,
845 const u64
*curve_prime
, unsigned int ndigits
)
847 u64 product
[2 * ECC_MAX_DIGITS
];
849 vli_square(product
, left
, ndigits
);
850 vli_mmod_fast(result
, product
, curve_prime
, ndigits
);
853 #define EVEN(vli) (!(vli[0] & 1))
854 /* Computes result = (1 / p_input) % mod. All VLIs are the same size.
855 * See "From Euclid's GCD to Montgomery Multiplication to the Great Divide"
856 * https://labs.oracle.com/techrep/2001/smli_tr-2001-95.pdf
858 void vli_mod_inv(u64
*result
, const u64
*input
, const u64
*mod
,
859 unsigned int ndigits
)
861 u64 a
[ECC_MAX_DIGITS
], b
[ECC_MAX_DIGITS
];
862 u64 u
[ECC_MAX_DIGITS
], v
[ECC_MAX_DIGITS
];
866 if (vli_is_zero(input
, ndigits
)) {
867 vli_clear(result
, ndigits
);
871 vli_set(a
, input
, ndigits
);
872 vli_set(b
, mod
, ndigits
);
873 vli_clear(u
, ndigits
);
875 vli_clear(v
, ndigits
);
877 while ((cmp_result
= vli_cmp(a
, b
, ndigits
)) != 0) {
881 vli_rshift1(a
, ndigits
);
884 carry
= vli_add(u
, u
, mod
, ndigits
);
886 vli_rshift1(u
, ndigits
);
888 u
[ndigits
- 1] |= 0x8000000000000000ull
;
889 } else if (EVEN(b
)) {
890 vli_rshift1(b
, ndigits
);
893 carry
= vli_add(v
, v
, mod
, ndigits
);
895 vli_rshift1(v
, ndigits
);
897 v
[ndigits
- 1] |= 0x8000000000000000ull
;
898 } else if (cmp_result
> 0) {
899 vli_sub(a
, a
, b
, ndigits
);
900 vli_rshift1(a
, ndigits
);
902 if (vli_cmp(u
, v
, ndigits
) < 0)
903 vli_add(u
, u
, mod
, ndigits
);
905 vli_sub(u
, u
, v
, ndigits
);
907 carry
= vli_add(u
, u
, mod
, ndigits
);
909 vli_rshift1(u
, ndigits
);
911 u
[ndigits
- 1] |= 0x8000000000000000ull
;
913 vli_sub(b
, b
, a
, ndigits
);
914 vli_rshift1(b
, ndigits
);
916 if (vli_cmp(v
, u
, ndigits
) < 0)
917 vli_add(v
, v
, mod
, ndigits
);
919 vli_sub(v
, v
, u
, ndigits
);
921 carry
= vli_add(v
, v
, mod
, ndigits
);
923 vli_rshift1(v
, ndigits
);
925 v
[ndigits
- 1] |= 0x8000000000000000ull
;
929 vli_set(result
, u
, ndigits
);
931 EXPORT_SYMBOL(vli_mod_inv
);
933 /* ------ Point operations ------ */
935 /* Returns true if p_point is the point at infinity, false otherwise. */
936 static bool ecc_point_is_zero(const struct ecc_point
*point
)
938 return (vli_is_zero(point
->x
, point
->ndigits
) &&
939 vli_is_zero(point
->y
, point
->ndigits
));
942 /* Point multiplication algorithm using Montgomery's ladder with co-Z
943 * coordinates. From http://eprint.iacr.org/2011/338.pdf
946 /* Double in place */
947 static void ecc_point_double_jacobian(u64
*x1
, u64
*y1
, u64
*z1
,
948 u64
*curve_prime
, unsigned int ndigits
)
950 /* t1 = x, t2 = y, t3 = z */
951 u64 t4
[ECC_MAX_DIGITS
];
952 u64 t5
[ECC_MAX_DIGITS
];
954 if (vli_is_zero(z1
, ndigits
))
958 vli_mod_square_fast(t4
, y1
, curve_prime
, ndigits
);
959 /* t5 = x1*y1^2 = A */
960 vli_mod_mult_fast(t5
, x1
, t4
, curve_prime
, ndigits
);
962 vli_mod_square_fast(t4
, t4
, curve_prime
, ndigits
);
963 /* t2 = y1*z1 = z3 */
964 vli_mod_mult_fast(y1
, y1
, z1
, curve_prime
, ndigits
);
966 vli_mod_square_fast(z1
, z1
, curve_prime
, ndigits
);
969 vli_mod_add(x1
, x1
, z1
, curve_prime
, ndigits
);
971 vli_mod_add(z1
, z1
, z1
, curve_prime
, ndigits
);
973 vli_mod_sub(z1
, x1
, z1
, curve_prime
, ndigits
);
974 /* t1 = x1^2 - z1^4 */
975 vli_mod_mult_fast(x1
, x1
, z1
, curve_prime
, ndigits
);
977 /* t3 = 2*(x1^2 - z1^4) */
978 vli_mod_add(z1
, x1
, x1
, curve_prime
, ndigits
);
979 /* t1 = 3*(x1^2 - z1^4) */
980 vli_mod_add(x1
, x1
, z1
, curve_prime
, ndigits
);
981 if (vli_test_bit(x1
, 0)) {
982 u64 carry
= vli_add(x1
, x1
, curve_prime
, ndigits
);
984 vli_rshift1(x1
, ndigits
);
985 x1
[ndigits
- 1] |= carry
<< 63;
987 vli_rshift1(x1
, ndigits
);
989 /* t1 = 3/2*(x1^2 - z1^4) = B */
992 vli_mod_square_fast(z1
, x1
, curve_prime
, ndigits
);
994 vli_mod_sub(z1
, z1
, t5
, curve_prime
, ndigits
);
995 /* t3 = B^2 - 2A = x3 */
996 vli_mod_sub(z1
, z1
, t5
, curve_prime
, ndigits
);
998 vli_mod_sub(t5
, t5
, z1
, curve_prime
, ndigits
);
999 /* t1 = B * (A - x3) */
1000 vli_mod_mult_fast(x1
, x1
, t5
, curve_prime
, ndigits
);
1001 /* t4 = B * (A - x3) - y1^4 = y3 */
1002 vli_mod_sub(t4
, x1
, t4
, curve_prime
, ndigits
);
1004 vli_set(x1
, z1
, ndigits
);
1005 vli_set(z1
, y1
, ndigits
);
1006 vli_set(y1
, t4
, ndigits
);
1009 /* Modify (x1, y1) => (x1 * z^2, y1 * z^3) */
1010 static void apply_z(u64
*x1
, u64
*y1
, u64
*z
, u64
*curve_prime
,
1011 unsigned int ndigits
)
1013 u64 t1
[ECC_MAX_DIGITS
];
1015 vli_mod_square_fast(t1
, z
, curve_prime
, ndigits
); /* z^2 */
1016 vli_mod_mult_fast(x1
, x1
, t1
, curve_prime
, ndigits
); /* x1 * z^2 */
1017 vli_mod_mult_fast(t1
, t1
, z
, curve_prime
, ndigits
); /* z^3 */
1018 vli_mod_mult_fast(y1
, y1
, t1
, curve_prime
, ndigits
); /* y1 * z^3 */
1021 /* P = (x1, y1) => 2P, (x2, y2) => P' */
1022 static void xycz_initial_double(u64
*x1
, u64
*y1
, u64
*x2
, u64
*y2
,
1023 u64
*p_initial_z
, u64
*curve_prime
,
1024 unsigned int ndigits
)
1026 u64 z
[ECC_MAX_DIGITS
];
1028 vli_set(x2
, x1
, ndigits
);
1029 vli_set(y2
, y1
, ndigits
);
1031 vli_clear(z
, ndigits
);
1035 vli_set(z
, p_initial_z
, ndigits
);
1037 apply_z(x1
, y1
, z
, curve_prime
, ndigits
);
1039 ecc_point_double_jacobian(x1
, y1
, z
, curve_prime
, ndigits
);
1041 apply_z(x2
, y2
, z
, curve_prime
, ndigits
);
1044 /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
1045 * Output P' = (x1', y1', Z3), P + Q = (x3, y3, Z3)
1046 * or P => P', Q => P + Q
1048 static void xycz_add(u64
*x1
, u64
*y1
, u64
*x2
, u64
*y2
, u64
*curve_prime
,
1049 unsigned int ndigits
)
1051 /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
1052 u64 t5
[ECC_MAX_DIGITS
];
1055 vli_mod_sub(t5
, x2
, x1
, curve_prime
, ndigits
);
1056 /* t5 = (x2 - x1)^2 = A */
1057 vli_mod_square_fast(t5
, t5
, curve_prime
, ndigits
);
1059 vli_mod_mult_fast(x1
, x1
, t5
, curve_prime
, ndigits
);
1061 vli_mod_mult_fast(x2
, x2
, t5
, curve_prime
, ndigits
);
1063 vli_mod_sub(y2
, y2
, y1
, curve_prime
, ndigits
);
1064 /* t5 = (y2 - y1)^2 = D */
1065 vli_mod_square_fast(t5
, y2
, curve_prime
, ndigits
);
1068 vli_mod_sub(t5
, t5
, x1
, curve_prime
, ndigits
);
1069 /* t5 = D - B - C = x3 */
1070 vli_mod_sub(t5
, t5
, x2
, curve_prime
, ndigits
);
1072 vli_mod_sub(x2
, x2
, x1
, curve_prime
, ndigits
);
1073 /* t2 = y1*(C - B) */
1074 vli_mod_mult_fast(y1
, y1
, x2
, curve_prime
, ndigits
);
1076 vli_mod_sub(x2
, x1
, t5
, curve_prime
, ndigits
);
1077 /* t4 = (y2 - y1)*(B - x3) */
1078 vli_mod_mult_fast(y2
, y2
, x2
, curve_prime
, ndigits
);
1080 vli_mod_sub(y2
, y2
, y1
, curve_prime
, ndigits
);
1082 vli_set(x2
, t5
, ndigits
);
1085 /* Input P = (x1, y1, Z), Q = (x2, y2, Z)
1086 * Output P + Q = (x3, y3, Z3), P - Q = (x3', y3', Z3)
1087 * or P => P - Q, Q => P + Q
1089 static void xycz_add_c(u64
*x1
, u64
*y1
, u64
*x2
, u64
*y2
, u64
*curve_prime
,
1090 unsigned int ndigits
)
1092 /* t1 = X1, t2 = Y1, t3 = X2, t4 = Y2 */
1093 u64 t5
[ECC_MAX_DIGITS
];
1094 u64 t6
[ECC_MAX_DIGITS
];
1095 u64 t7
[ECC_MAX_DIGITS
];
1098 vli_mod_sub(t5
, x2
, x1
, curve_prime
, ndigits
);
1099 /* t5 = (x2 - x1)^2 = A */
1100 vli_mod_square_fast(t5
, t5
, curve_prime
, ndigits
);
1102 vli_mod_mult_fast(x1
, x1
, t5
, curve_prime
, ndigits
);
1104 vli_mod_mult_fast(x2
, x2
, t5
, curve_prime
, ndigits
);
1106 vli_mod_add(t5
, y2
, y1
, curve_prime
, ndigits
);
1108 vli_mod_sub(y2
, y2
, y1
, curve_prime
, ndigits
);
1111 vli_mod_sub(t6
, x2
, x1
, curve_prime
, ndigits
);
1112 /* t2 = y1 * (C - B) */
1113 vli_mod_mult_fast(y1
, y1
, t6
, curve_prime
, ndigits
);
1115 vli_mod_add(t6
, x1
, x2
, curve_prime
, ndigits
);
1116 /* t3 = (y2 - y1)^2 */
1117 vli_mod_square_fast(x2
, y2
, curve_prime
, ndigits
);
1119 vli_mod_sub(x2
, x2
, t6
, curve_prime
, ndigits
);
1122 vli_mod_sub(t7
, x1
, x2
, curve_prime
, ndigits
);
1123 /* t4 = (y2 - y1)*(B - x3) */
1124 vli_mod_mult_fast(y2
, y2
, t7
, curve_prime
, ndigits
);
1126 vli_mod_sub(y2
, y2
, y1
, curve_prime
, ndigits
);
1128 /* t7 = (y2 + y1)^2 = F */
1129 vli_mod_square_fast(t7
, t5
, curve_prime
, ndigits
);
1131 vli_mod_sub(t7
, t7
, t6
, curve_prime
, ndigits
);
1133 vli_mod_sub(t6
, t7
, x1
, curve_prime
, ndigits
);
1134 /* t6 = (y2 + y1)*(x3' - B) */
1135 vli_mod_mult_fast(t6
, t6
, t5
, curve_prime
, ndigits
);
1137 vli_mod_sub(y1
, t6
, y1
, curve_prime
, ndigits
);
1139 vli_set(x1
, t7
, ndigits
);
1142 static void ecc_point_mult(struct ecc_point
*result
,
1143 const struct ecc_point
*point
, const u64
*scalar
,
1144 u64
*initial_z
, const struct ecc_curve
*curve
,
1145 unsigned int ndigits
)
1148 u64 rx
[2][ECC_MAX_DIGITS
];
1149 u64 ry
[2][ECC_MAX_DIGITS
];
1150 u64 z
[ECC_MAX_DIGITS
];
1151 u64 sk
[2][ECC_MAX_DIGITS
];
1152 u64
*curve_prime
= curve
->p
;
1157 carry
= vli_add(sk
[0], scalar
, curve
->n
, ndigits
);
1158 vli_add(sk
[1], sk
[0], curve
->n
, ndigits
);
1159 scalar
= sk
[!carry
];
1160 num_bits
= sizeof(u64
) * ndigits
* 8 + 1;
1162 vli_set(rx
[1], point
->x
, ndigits
);
1163 vli_set(ry
[1], point
->y
, ndigits
);
1165 xycz_initial_double(rx
[1], ry
[1], rx
[0], ry
[0], initial_z
, curve_prime
,
1168 for (i
= num_bits
- 2; i
> 0; i
--) {
1169 nb
= !vli_test_bit(scalar
, i
);
1170 xycz_add_c(rx
[1 - nb
], ry
[1 - nb
], rx
[nb
], ry
[nb
], curve_prime
,
1172 xycz_add(rx
[nb
], ry
[nb
], rx
[1 - nb
], ry
[1 - nb
], curve_prime
,
1176 nb
= !vli_test_bit(scalar
, 0);
1177 xycz_add_c(rx
[1 - nb
], ry
[1 - nb
], rx
[nb
], ry
[nb
], curve_prime
,
1180 /* Find final 1/Z value. */
1182 vli_mod_sub(z
, rx
[1], rx
[0], curve_prime
, ndigits
);
1183 /* Yb * (X1 - X0) */
1184 vli_mod_mult_fast(z
, z
, ry
[1 - nb
], curve_prime
, ndigits
);
1185 /* xP * Yb * (X1 - X0) */
1186 vli_mod_mult_fast(z
, z
, point
->x
, curve_prime
, ndigits
);
1188 /* 1 / (xP * Yb * (X1 - X0)) */
1189 vli_mod_inv(z
, z
, curve_prime
, point
->ndigits
);
1191 /* yP / (xP * Yb * (X1 - X0)) */
1192 vli_mod_mult_fast(z
, z
, point
->y
, curve_prime
, ndigits
);
1193 /* Xb * yP / (xP * Yb * (X1 - X0)) */
1194 vli_mod_mult_fast(z
, z
, rx
[1 - nb
], curve_prime
, ndigits
);
1195 /* End 1/Z calculation */
1197 xycz_add(rx
[nb
], ry
[nb
], rx
[1 - nb
], ry
[1 - nb
], curve_prime
, ndigits
);
1199 apply_z(rx
[0], ry
[0], z
, curve_prime
, ndigits
);
1201 vli_set(result
->x
, rx
[0], ndigits
);
1202 vli_set(result
->y
, ry
[0], ndigits
);
1205 /* Computes R = P + Q mod p */
1206 static void ecc_point_add(const struct ecc_point
*result
,
1207 const struct ecc_point
*p
, const struct ecc_point
*q
,
1208 const struct ecc_curve
*curve
)
1210 u64 z
[ECC_MAX_DIGITS
];
1211 u64 px
[ECC_MAX_DIGITS
];
1212 u64 py
[ECC_MAX_DIGITS
];
1213 unsigned int ndigits
= curve
->g
.ndigits
;
1215 vli_set(result
->x
, q
->x
, ndigits
);
1216 vli_set(result
->y
, q
->y
, ndigits
);
1217 vli_mod_sub(z
, result
->x
, p
->x
, curve
->p
, ndigits
);
1218 vli_set(px
, p
->x
, ndigits
);
1219 vli_set(py
, p
->y
, ndigits
);
1220 xycz_add(px
, py
, result
->x
, result
->y
, curve
->p
, ndigits
);
1221 vli_mod_inv(z
, z
, curve
->p
, ndigits
);
1222 apply_z(result
->x
, result
->y
, z
, curve
->p
, ndigits
);
1225 /* Computes R = u1P + u2Q mod p using Shamir's trick.
1226 * Based on: Kenneth MacKay's micro-ecc (2014).
1228 void ecc_point_mult_shamir(const struct ecc_point
*result
,
1229 const u64
*u1
, const struct ecc_point
*p
,
1230 const u64
*u2
, const struct ecc_point
*q
,
1231 const struct ecc_curve
*curve
)
1233 u64 z
[ECC_MAX_DIGITS
];
1234 u64 sump
[2][ECC_MAX_DIGITS
];
1235 u64
*rx
= result
->x
;
1236 u64
*ry
= result
->y
;
1237 unsigned int ndigits
= curve
->g
.ndigits
;
1238 unsigned int num_bits
;
1239 struct ecc_point sum
= ECC_POINT_INIT(sump
[0], sump
[1], ndigits
);
1240 const struct ecc_point
*points
[4];
1241 const struct ecc_point
*point
;
1245 ecc_point_add(&sum
, p
, q
, curve
);
1251 num_bits
= max(vli_num_bits(u1
, ndigits
),
1252 vli_num_bits(u2
, ndigits
));
1254 idx
= (!!vli_test_bit(u1
, i
)) | ((!!vli_test_bit(u2
, i
)) << 1);
1255 point
= points
[idx
];
1257 vli_set(rx
, point
->x
, ndigits
);
1258 vli_set(ry
, point
->y
, ndigits
);
1259 vli_clear(z
+ 1, ndigits
- 1);
1262 for (--i
; i
>= 0; i
--) {
1263 ecc_point_double_jacobian(rx
, ry
, z
, curve
->p
, ndigits
);
1264 idx
= (!!vli_test_bit(u1
, i
)) | ((!!vli_test_bit(u2
, i
)) << 1);
1265 point
= points
[idx
];
1267 u64 tx
[ECC_MAX_DIGITS
];
1268 u64 ty
[ECC_MAX_DIGITS
];
1269 u64 tz
[ECC_MAX_DIGITS
];
1271 vli_set(tx
, point
->x
, ndigits
);
1272 vli_set(ty
, point
->y
, ndigits
);
1273 apply_z(tx
, ty
, z
, curve
->p
, ndigits
);
1274 vli_mod_sub(tz
, rx
, tx
, curve
->p
, ndigits
);
1275 xycz_add(tx
, ty
, rx
, ry
, curve
->p
, ndigits
);
1276 vli_mod_mult_fast(z
, z
, tz
, curve
->p
, ndigits
);
1279 vli_mod_inv(z
, z
, curve
->p
, ndigits
);
1280 apply_z(rx
, ry
, z
, curve
->p
, ndigits
);
1282 EXPORT_SYMBOL(ecc_point_mult_shamir
);
1284 static inline void ecc_swap_digits(const u64
*in
, u64
*out
,
1285 unsigned int ndigits
)
1287 const __be64
*src
= (__force __be64
*)in
;
1290 for (i
= 0; i
< ndigits
; i
++)
1291 out
[i
] = be64_to_cpu(src
[ndigits
- 1 - i
]);
1294 static int __ecc_is_key_valid(const struct ecc_curve
*curve
,
1295 const u64
*private_key
, unsigned int ndigits
)
1297 u64 one
[ECC_MAX_DIGITS
] = { 1, };
1298 u64 res
[ECC_MAX_DIGITS
];
1303 if (curve
->g
.ndigits
!= ndigits
)
1306 /* Make sure the private key is in the range [2, n-3]. */
1307 if (vli_cmp(one
, private_key
, ndigits
) != -1)
1309 vli_sub(res
, curve
->n
, one
, ndigits
);
1310 vli_sub(res
, res
, one
, ndigits
);
1311 if (vli_cmp(res
, private_key
, ndigits
) != 1)
1317 int ecc_is_key_valid(unsigned int curve_id
, unsigned int ndigits
,
1318 const u64
*private_key
, unsigned int private_key_len
)
1321 const struct ecc_curve
*curve
= ecc_get_curve(curve_id
);
1323 nbytes
= ndigits
<< ECC_DIGITS_TO_BYTES_SHIFT
;
1325 if (private_key_len
!= nbytes
)
1328 return __ecc_is_key_valid(curve
, private_key
, ndigits
);
1330 EXPORT_SYMBOL(ecc_is_key_valid
);
1333 * ECC private keys are generated using the method of extra random bits,
1334 * equivalent to that described in FIPS 186-4, Appendix B.4.1.
1336 * d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer
1338 * 0 <= c mod(n-1) <= n-2 and implies that
1341 * This method generates a private key uniformly distributed in the range
1344 int ecc_gen_privkey(unsigned int curve_id
, unsigned int ndigits
, u64
*privkey
)
1346 const struct ecc_curve
*curve
= ecc_get_curve(curve_id
);
1347 u64 priv
[ECC_MAX_DIGITS
];
1348 unsigned int nbytes
= ndigits
<< ECC_DIGITS_TO_BYTES_SHIFT
;
1349 unsigned int nbits
= vli_num_bits(curve
->n
, ndigits
);
1352 /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
1353 if (nbits
< 160 || ndigits
> ARRAY_SIZE(priv
))
1357 * FIPS 186-4 recommends that the private key should be obtained from a
1358 * RBG with a security strength equal to or greater than the security
1359 * strength associated with N.
1361 * The maximum security strength identified by NIST SP800-57pt1r4 for
1362 * ECC is 256 (N >= 512).
1364 * This condition is met by the default RNG because it selects a favored
1365 * DRBG with a security strength of 256.
1367 if (crypto_get_default_rng())
1370 err
= crypto_rng_get_bytes(crypto_default_rng
, (u8
*)priv
, nbytes
);
1371 crypto_put_default_rng();
1375 /* Make sure the private key is in the valid range. */
1376 if (__ecc_is_key_valid(curve
, priv
, ndigits
))
1379 ecc_swap_digits(priv
, privkey
, ndigits
);
1383 EXPORT_SYMBOL(ecc_gen_privkey
);
1385 int ecc_make_pub_key(unsigned int curve_id
, unsigned int ndigits
,
1386 const u64
*private_key
, u64
*public_key
)
1389 struct ecc_point
*pk
;
1390 u64 priv
[ECC_MAX_DIGITS
];
1391 const struct ecc_curve
*curve
= ecc_get_curve(curve_id
);
1393 if (!private_key
|| !curve
|| ndigits
> ARRAY_SIZE(priv
)) {
1398 ecc_swap_digits(private_key
, priv
, ndigits
);
1400 pk
= ecc_alloc_point(ndigits
);
1406 ecc_point_mult(pk
, &curve
->g
, priv
, NULL
, curve
, ndigits
);
1407 if (ecc_point_is_zero(pk
)) {
1409 goto err_free_point
;
1412 ecc_swap_digits(pk
->x
, public_key
, ndigits
);
1413 ecc_swap_digits(pk
->y
, &public_key
[ndigits
], ndigits
);
1420 EXPORT_SYMBOL(ecc_make_pub_key
);
1422 /* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
1423 int ecc_is_pubkey_valid_partial(const struct ecc_curve
*curve
,
1424 struct ecc_point
*pk
)
1426 u64 yy
[ECC_MAX_DIGITS
], xxx
[ECC_MAX_DIGITS
], w
[ECC_MAX_DIGITS
];
1428 if (WARN_ON(pk
->ndigits
!= curve
->g
.ndigits
))
1431 /* Check 1: Verify key is not the zero point. */
1432 if (ecc_point_is_zero(pk
))
1435 /* Check 2: Verify key is in the range [1, p-1]. */
1436 if (vli_cmp(curve
->p
, pk
->x
, pk
->ndigits
) != 1)
1438 if (vli_cmp(curve
->p
, pk
->y
, pk
->ndigits
) != 1)
1441 /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
1442 vli_mod_square_fast(yy
, pk
->y
, curve
->p
, pk
->ndigits
); /* y^2 */
1443 vli_mod_square_fast(xxx
, pk
->x
, curve
->p
, pk
->ndigits
); /* x^2 */
1444 vli_mod_mult_fast(xxx
, xxx
, pk
->x
, curve
->p
, pk
->ndigits
); /* x^3 */
1445 vli_mod_mult_fast(w
, curve
->a
, pk
->x
, curve
->p
, pk
->ndigits
); /* a·x */
1446 vli_mod_add(w
, w
, curve
->b
, curve
->p
, pk
->ndigits
); /* a·x + b */
1447 vli_mod_add(w
, w
, xxx
, curve
->p
, pk
->ndigits
); /* x^3 + a·x + b */
1448 if (vli_cmp(yy
, w
, pk
->ndigits
) != 0) /* Equation */
1453 EXPORT_SYMBOL(ecc_is_pubkey_valid_partial
);
1455 int crypto_ecdh_shared_secret(unsigned int curve_id
, unsigned int ndigits
,
1456 const u64
*private_key
, const u64
*public_key
,
1460 struct ecc_point
*product
, *pk
;
1461 u64 priv
[ECC_MAX_DIGITS
];
1462 u64 rand_z
[ECC_MAX_DIGITS
];
1463 unsigned int nbytes
;
1464 const struct ecc_curve
*curve
= ecc_get_curve(curve_id
);
1466 if (!private_key
|| !public_key
|| !curve
||
1467 ndigits
> ARRAY_SIZE(priv
) || ndigits
> ARRAY_SIZE(rand_z
)) {
1472 nbytes
= ndigits
<< ECC_DIGITS_TO_BYTES_SHIFT
;
1474 get_random_bytes(rand_z
, nbytes
);
1476 pk
= ecc_alloc_point(ndigits
);
1482 ecc_swap_digits(public_key
, pk
->x
, ndigits
);
1483 ecc_swap_digits(&public_key
[ndigits
], pk
->y
, ndigits
);
1484 ret
= ecc_is_pubkey_valid_partial(curve
, pk
);
1486 goto err_alloc_product
;
1488 ecc_swap_digits(private_key
, priv
, ndigits
);
1490 product
= ecc_alloc_point(ndigits
);
1493 goto err_alloc_product
;
1496 ecc_point_mult(product
, pk
, priv
, rand_z
, curve
, ndigits
);
1498 ecc_swap_digits(product
->x
, secret
, ndigits
);
1500 if (ecc_point_is_zero(product
))
1503 ecc_free_point(product
);
1509 EXPORT_SYMBOL(crypto_ecdh_shared_secret
);
1511 MODULE_LICENSE("Dual BSD/GPL");