1 /* $OpenBSD: ecp_nistputil.c,v 1.6 2014/07/10 22:45:57 jsing Exp $ */
3 * Written by Bodo Moeller for the OpenSSL project.
6 * Copyright (c) 2011 Google Inc.
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 #include <openssl/opensslconf.h>
25 #ifndef OPENSSL_NO_EC_NISTP_64_GCC_128
28 * Common utility functions for ecp_nistp224.c, ecp_nistp256.c, ecp_nistp521.c.
33 /* Convert an array of points into affine coordinates.
34 * (If the point at infinity is found (Z = 0), it remains unchanged.)
35 * This function is essentially an equivalent to EC_POINTs_make_affine(), but
36 * works with the internal representation of points as used by ecp_nistp###.c
37 * rather than with (BIGNUM-based) EC_POINT data structures.
39 * point_array is the input/output buffer ('num' points in projective form,
40 * i.e. three coordinates each), based on an internal representation of
41 * field elements of size 'felem_size'.
43 * tmp_felems needs to point to a temporary array of 'num'+1 field elements
44 * for storage of intermediate values.
47 ec_GFp_nistp_points_make_affine_internal(size_t num
, void *point_array
,
48 size_t felem_size
, void *tmp_felems
,
49 void (*felem_one
) (void *out
),
50 int (*felem_is_zero
) (const void *in
),
51 void (*felem_assign
) (void *out
, const void *in
),
52 void (*felem_square
) (void *out
, const void *in
),
53 void (*felem_mul
) (void *out
, const void *in1
, const void *in2
),
54 void (*felem_inv
) (void *out
, const void *in
),
55 void (*felem_contract
) (void *out
, const void *in
))
59 #define tmp_felem(I) (&((char *)tmp_felems)[(I) * felem_size])
60 #define X(I) (&((char *)point_array)[3*(I) * felem_size])
61 #define Y(I) (&((char *)point_array)[(3*(I) + 1) * felem_size])
62 #define Z(I) (&((char *)point_array)[(3*(I) + 2) * felem_size])
64 if (!felem_is_zero(Z(0)))
65 felem_assign(tmp_felem(0), Z(0));
67 felem_one(tmp_felem(0));
68 for (i
= 1; i
< (int) num
; i
++) {
69 if (!felem_is_zero(Z(i
)))
70 felem_mul(tmp_felem(i
), tmp_felem(i
- 1), Z(i
));
72 felem_assign(tmp_felem(i
), tmp_felem(i
- 1));
75 * Now each tmp_felem(i) is the product of Z(0) .. Z(i), skipping any
76 * zero-valued factors: if Z(i) = 0, we essentially pretend that Z(i)
80 felem_inv(tmp_felem(num
- 1), tmp_felem(num
- 1));
81 for (i
= num
- 1; i
>= 0; i
--) {
84 * tmp_felem(i-1) is the product of Z(0) .. Z(i-1),
85 * tmp_felem(i) is the inverse of the product of Z(0)
88 felem_mul(tmp_felem(num
), tmp_felem(i
- 1), tmp_felem(i
)); /* 1/Z(i) */
90 felem_assign(tmp_felem(num
), tmp_felem(0)); /* 1/Z(0) */
92 if (!felem_is_zero(Z(i
))) {
95 * For next iteration, replace tmp_felem(i-1)
98 felem_mul(tmp_felem(i
- 1), tmp_felem(i
), Z(i
));
101 * Convert point (X, Y, Z) into affine form (X/(Z^2),
104 felem_square(Z(i
), tmp_felem(num
)); /* 1/(Z^2) */
105 felem_mul(X(i
), X(i
), Z(i
)); /* X/(Z^2) */
106 felem_mul(Z(i
), Z(i
), tmp_felem(num
)); /* 1/(Z^3) */
107 felem_mul(Y(i
), Y(i
), Z(i
)); /* Y/(Z^3) */
108 felem_contract(X(i
), X(i
));
109 felem_contract(Y(i
), Y(i
));
114 * For next iteration, replace tmp_felem(i-1)
117 felem_assign(tmp_felem(i
- 1), tmp_felem(i
));
123 * This function looks at 5+1 scalar bits (5 current, 1 adjacent less
124 * significant bit), and recodes them into a signed digit for use in fast point
125 * multiplication: the use of signed rather than unsigned digits means that
126 * fewer points need to be precomputed, given that point inversion is easy
127 * (a precomputed point dP makes -dP available as well).
131 * Signed digits for multiplication were introduced by Booth ("A signed binary
132 * multiplication technique", Quart. Journ. Mech. and Applied Math., vol. IV,
133 * pt. 2 (1951), pp. 236-240), in that case for multiplication of integers.
134 * Booth's original encoding did not generally improve the density of nonzero
135 * digits over the binary representation, and was merely meant to simplify the
136 * handling of signed factors given in two's complement; but it has since been
137 * shown to be the basis of various signed-digit representations that do have
138 * further advantages, including the wNAF, using the following general approach:
140 * (1) Given a binary representation
142 * b_k ... b_2 b_1 b_0,
144 * of a nonnegative integer (b_k in {0, 1}), rewrite it in digits 0, 1, -1
145 * by using bit-wise subtraction as follows:
147 * b_k b_(k-1) ... b_2 b_1 b_0
148 * - b_k ... b_3 b_2 b_1 b_0
149 * -------------------------------------
150 * s_k b_(k-1) ... s_3 s_2 s_1 s_0
152 * A left-shift followed by subtraction of the original value yields a new
153 * representation of the same value, using signed bits s_i = b_(i+1) - b_i.
154 * This representation from Booth's paper has since appeared in the
155 * literature under a variety of different names including "reversed binary
156 * form", "alternating greedy expansion", "mutual opposite form", and
157 * "sign-alternating {+-1}-representation".
159 * An interesting property is that among the nonzero bits, values 1 and -1
160 * strictly alternate.
162 * (2) Various window schemes can be applied to the Booth representation of
163 * integers: for example, right-to-left sliding windows yield the wNAF
164 * (a signed-digit encoding independently discovered by various researchers
165 * in the 1990s), and left-to-right sliding windows yield a left-to-right
166 * equivalent of the wNAF (independently discovered by various researchers
169 * To prevent leaking information through side channels in point multiplication,
170 * we need to recode the given integer into a regular pattern: sliding windows
171 * as in wNAFs won't do, we need their fixed-window equivalent -- which is a few
172 * decades older: we'll be using the so-called "modified Booth encoding" due to
173 * MacSorley ("High-speed arithmetic in binary computers", Proc. IRE, vol. 49
174 * (1961), pp. 67-91), in a radix-2^5 setting. That is, we always combine five
175 * signed bits into a signed digit:
177 * s_(4j + 4) s_(4j + 3) s_(4j + 2) s_(4j + 1) s_(4j)
179 * The sign-alternating property implies that the resulting digit values are
180 * integers from -16 to 16.
182 * Of course, we don't actually need to compute the signed digits s_i as an
183 * intermediate step (that's just a nice way to see how this scheme relates
184 * to the wNAF): a direct computation obtains the recoded digit from the
185 * six bits b_(4j + 4) ... b_(4j - 1).
187 * This function takes those five bits as an integer (0 .. 63), writing the
188 * recoded digit to *sign (0 for positive, 1 for negative) and *digit (absolute
189 * value, in the range 0 .. 8). Note that this integer essentially provides the
190 * input bits "shifted to the left" by one position: for example, the input to
191 * compute the least significant recoded digit, given that there's no bit b_-1,
192 * has to be b_4 b_3 b_2 b_1 b_0 0.
196 ec_GFp_nistp_recode_scalar_bits(unsigned char *sign
, unsigned char *digit
, unsigned char in
)
200 s
= ~((in
>> 5) - 1); /* sets all bits to MSB(in), 'in' seen as
202 d
= (1 << 6) - in
- 1;
203 d
= (d
& s
) | (in
& ~s
);
204 d
= (d
>> 1) + (d
& 1);