Merge pull request #578 from PX4/fix_mp_prime_strong_lucas_lefridge_compilation
[libtommath.git] / mp_prime_frobenius_underwood.c
blob62d3476a9509a5d317c3536afcf25200ed32e821
1 #include "tommath_private.h"
2 #ifdef MP_PRIME_FROBENIUS_UNDERWOOD_C
4 /* LibTomMath, multiple-precision integer library -- Tom St Denis */
5 /* SPDX-License-Identifier: Unlicense */
7 /*
8 * See file mp_prime_is_prime.c or the documentation in doc/bn.tex for the details
9 */
10 #ifndef LTM_USE_ONLY_MR
13 * floor of positive solution of
14 * (2^16)-1 = (a+4)*(2*a+5)
15 * TODO: Both values are smaller than N^(1/4), would have to use a bigint
16 * for a instead but any a biger than about 120 are already so rare that
17 * it is possible to ignore them and still get enough pseudoprimes.
18 * But it is still a restriction of the set of available pseudoprimes
19 * which makes this implementation less secure if used stand-alone.
21 #define LTM_FROBENIUS_UNDERWOOD_A 32764
23 mp_err mp_prime_frobenius_underwood(const mp_int *N, bool *result)
25 mp_int T1z, T2z, Np1z, sz, tz;
26 int a, ap2, i;
27 mp_err err;
29 if ((err = mp_init_multi(&T1z, &T2z, &Np1z, &sz, &tz, NULL)) != MP_OKAY) {
30 return err;
33 for (a = 0; a < LTM_FROBENIUS_UNDERWOOD_A; a++) {
34 int j;
36 /* TODO: That's ugly! No, really, it is! */
37 if ((a==2) || (a==4) || (a==7) || (a==8) || (a==10) ||
38 (a==14) || (a==18) || (a==23) || (a==26) || (a==28)) {
39 continue;
42 mp_set_i32(&T1z, (int32_t)((a * a) - 4));
44 if ((err = mp_kronecker(&T1z, N, &j)) != MP_OKAY) goto LBL_END;
46 if (j == -1) {
47 break;
50 if (j == 0) {
51 /* composite */
52 *result = false;
53 goto LBL_END;
56 /* Tell it a composite and set return value accordingly */
57 if (a >= LTM_FROBENIUS_UNDERWOOD_A) {
58 err = MP_ITER;
59 goto LBL_END;
61 /* Composite if N and (a+4)*(2*a+5) are not coprime */
62 mp_set_u32(&T1z, (uint32_t)((a+4)*((2*a)+5)));
64 if ((err = mp_gcd(N, &T1z, &T1z)) != MP_OKAY) goto LBL_END;
66 if (!((T1z.used == 1) && (T1z.dp[0] == 1u))) {
67 /* composite */
68 *result = false;
69 goto LBL_END;
72 ap2 = a + 2;
73 if ((err = mp_add_d(N, 1uL, &Np1z)) != MP_OKAY) goto LBL_END;
75 mp_set(&sz, 1uL);
76 mp_set(&tz, 2uL);
78 for (i = mp_count_bits(&Np1z) - 2; i >= 0; i--) {
80 * temp = (sz*(a*sz+2*tz))%N;
81 * tz = ((tz-sz)*(tz+sz))%N;
82 * sz = temp;
84 if ((err = mp_mul_2(&tz, &T2z)) != MP_OKAY) goto LBL_END;
86 /* a = 0 at about 50% of the cases (non-square and odd input) */
87 if (a != 0) {
88 if ((err = mp_mul_d(&sz, (mp_digit)a, &T1z)) != MP_OKAY) goto LBL_END;
89 if ((err = mp_add(&T1z, &T2z, &T2z)) != MP_OKAY) goto LBL_END;
92 if ((err = mp_mul(&T2z, &sz, &T1z)) != MP_OKAY) goto LBL_END;
93 if ((err = mp_sub(&tz, &sz, &T2z)) != MP_OKAY) goto LBL_END;
94 if ((err = mp_add(&sz, &tz, &sz)) != MP_OKAY) goto LBL_END;
95 if ((err = mp_mul(&sz, &T2z, &tz)) != MP_OKAY) goto LBL_END;
96 if ((err = mp_mod(&tz, N, &tz)) != MP_OKAY) goto LBL_END;
97 if ((err = mp_mod(&T1z, N, &sz)) != MP_OKAY) goto LBL_END;
98 if (s_mp_get_bit(&Np1z, i)) {
100 * temp = (a+2) * sz + tz
101 * tz = 2 * tz - sz
102 * sz = temp
104 if (a == 0) {
105 if ((err = mp_mul_2(&sz, &T1z)) != MP_OKAY) goto LBL_END;
106 } else {
107 if ((err = mp_mul_d(&sz, (mp_digit)ap2, &T1z)) != MP_OKAY) goto LBL_END;
109 if ((err = mp_add(&T1z, &tz, &T1z)) != MP_OKAY) goto LBL_END;
110 if ((err = mp_mul_2(&tz, &T2z)) != MP_OKAY) goto LBL_END;
111 if ((err = mp_sub(&T2z, &sz, &tz)) != MP_OKAY) goto LBL_END;
112 mp_exch(&sz, &T1z);
116 mp_set_u32(&T1z, (uint32_t)((2 * a) + 5));
117 if ((err = mp_mod(&T1z, N, &T1z)) != MP_OKAY) goto LBL_END;
119 *result = mp_iszero(&sz) && (mp_cmp(&tz, &T1z) == MP_EQ);
121 LBL_END:
122 mp_clear_multi(&tz, &sz, &Np1z, &T2z, &T1z, NULL);
123 return err;
126 #endif
127 #endif