Correct PPTP server firewall rules chain.
[tomato/davidwu.git] / release / src / router / nettle / yarrow256.c
blob39cb936643262440ba2854d98d398cd8965e116c
1 /* yarrow256.c
3 * The yarrow pseudo-randomness generator.
4 */
6 /* nettle, low-level cryptographics library
8 * Copyright (C) 2001 Niels Möller
9 *
10 * The nettle library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published by
12 * the Free Software Foundation; either version 2.1 of the License, or (at your
13 * option) any later version.
15 * The nettle library is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
18 * License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with the nettle library; see the file COPYING.LIB. If not, write to
22 * the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
23 * MA 02111-1301, USA.
26 #if HAVE_CONFIG_H
27 # include "config.h"
28 #endif
30 #include <assert.h>
31 #include <stdlib.h>
32 #include <string.h>
34 #include "yarrow.h"
36 #include "macros.h"
38 #ifndef YARROW_DEBUG
39 #define YARROW_DEBUG 0
40 #endif
42 #if YARROW_DEBUG
43 #include <stdio.h>
44 #endif
46 /* Parameters */
48 /* An upper limit on the entropy (in bits) in one octet of sample
49 * data. */
50 #define YARROW_MULTIPLIER 4
52 /* Entropy threshold for reseeding from the fast pool */
53 #define YARROW_FAST_THRESHOLD 100
55 /* Entropy threshold for reseeding from the fast pool */
56 #define YARROW_SLOW_THRESHOLD 160
58 /* Number of sources that must exceed the threshold for slow reseed */
59 #define YARROW_SLOW_K 2
61 /* The number of iterations when reseeding, P_t in the yarrow paper.
62 * Should be chosen so that reseeding takes on the order of 0.1-1
63 * seconds. */
64 #define YARROW_RESEED_ITERATIONS 1500
66 /* Entropy estimates sticks to this value, it is treated as infinity
67 * in calculations. It should fit comfortably in an uint32_t, to avoid
68 * overflows. */
69 #define YARROW_MAX_ENTROPY 0x100000
71 /* Forward declarations */
72 static void
73 yarrow_gate(struct yarrow256_ctx *ctx);
75 void
76 yarrow256_init(struct yarrow256_ctx *ctx,
77 unsigned n,
78 struct yarrow_source *s)
80 unsigned i;
82 sha256_init(&ctx->pools[0]);
83 sha256_init(&ctx->pools[1]);
85 ctx->seeded = 0;
87 /* Not strictly necessary, but it makes it easier to see if the
88 * values are sane. */
89 memset(ctx->counter, 0, sizeof(ctx->counter));
91 ctx->nsources = n;
92 ctx->sources = s;
94 for (i = 0; i<n; i++)
96 ctx->sources[i].estimate[YARROW_FAST] = 0;
97 ctx->sources[i].estimate[YARROW_SLOW] = 0;
98 ctx->sources[i].next = YARROW_FAST;
102 void
103 yarrow256_seed(struct yarrow256_ctx *ctx,
104 unsigned length,
105 const uint8_t *seed_file)
107 assert(length > 0);
109 sha256_update(&ctx->pools[YARROW_FAST], length, seed_file);
110 yarrow256_fast_reseed(ctx);
113 /* FIXME: Generalize so that it generates a few more blocks at a
114 * time. */
115 static void
116 yarrow_generate_block(struct yarrow256_ctx *ctx,
117 uint8_t *block)
119 unsigned i;
121 aes_encrypt(&ctx->key, sizeof(ctx->counter), block, ctx->counter);
123 /* Increment counter, treating it as a big-endian number. This is
124 * machine independent, and follows appendix B of the NIST
125 * specification of cipher modes of operation.
127 * We could keep a representation of the counter as 4 32-bit values,
128 * and write entire words (in big-endian byteorder) into the counter
129 * block, whenever they change. */
130 for (i = sizeof(ctx->counter); i--; )
132 if (++ctx->counter[i])
133 break;
137 static void
138 yarrow_iterate(uint8_t *digest)
140 uint8_t v0[SHA256_DIGEST_SIZE];
141 unsigned i;
143 memcpy(v0, digest, SHA256_DIGEST_SIZE);
145 /* When hashed inside the loop, i should run from 1 to
146 * YARROW_RESEED_ITERATIONS */
147 for (i = 0; ++i < YARROW_RESEED_ITERATIONS; )
149 uint8_t count[4];
150 struct sha256_ctx hash;
152 sha256_init(&hash);
154 /* Hash v_i | v_0 | i */
155 WRITE_UINT32(count, i);
156 sha256_update(&hash, SHA256_DIGEST_SIZE, digest);
157 sha256_update(&hash, sizeof(v0), v0);
158 sha256_update(&hash, sizeof(count), count);
160 sha256_digest(&hash, SHA256_DIGEST_SIZE, digest);
164 /* NOTE: The SHA-256 digest size equals the AES key size, so we need
165 * no "size adaptor". */
167 void
168 yarrow256_fast_reseed(struct yarrow256_ctx *ctx)
170 uint8_t digest[SHA256_DIGEST_SIZE];
171 unsigned i;
173 #if YARROW_DEBUG
174 fprintf(stderr, "yarrow256_fast_reseed\n");
175 #endif
177 /* We feed two block of output using the current key into the pool
178 * before emptying it. */
179 if (ctx->seeded)
181 uint8_t blocks[AES_BLOCK_SIZE * 2];
183 yarrow_generate_block(ctx, blocks);
184 yarrow_generate_block(ctx, blocks + AES_BLOCK_SIZE);
185 sha256_update(&ctx->pools[YARROW_FAST], sizeof(blocks), blocks);
188 sha256_digest(&ctx->pools[YARROW_FAST], sizeof(digest), digest);
190 /* Iterate */
191 yarrow_iterate(digest);
193 aes_set_encrypt_key(&ctx->key, sizeof(digest), digest);
194 ctx->seeded = 1;
196 /* Derive new counter value */
197 memset(ctx->counter, 0, sizeof(ctx->counter));
198 aes_encrypt(&ctx->key, sizeof(ctx->counter), ctx->counter, ctx->counter);
200 /* Reset estimates. */
201 for (i = 0; i<ctx->nsources; i++)
202 ctx->sources[i].estimate[YARROW_FAST] = 0;
205 void
206 yarrow256_slow_reseed(struct yarrow256_ctx *ctx)
208 uint8_t digest[SHA256_DIGEST_SIZE];
209 unsigned i;
211 #if YARROW_DEBUG
212 fprintf(stderr, "yarrow256_slow_reseed\n");
213 #endif
215 /* Get digest of the slow pool*/
216 sha256_digest(&ctx->pools[YARROW_SLOW], sizeof(digest), digest);
218 /* Feed it into the fast pool */
219 sha256_update(&ctx->pools[YARROW_FAST], sizeof(digest), digest);
221 yarrow256_fast_reseed(ctx);
223 /* Reset estimates. */
224 for (i = 0; i<ctx->nsources; i++)
225 ctx->sources[i].estimate[YARROW_SLOW] = 0;
229 yarrow256_update(struct yarrow256_ctx *ctx,
230 unsigned source_index, unsigned entropy,
231 unsigned length, const uint8_t *data)
233 enum yarrow_pool_id current;
234 struct yarrow_source *source;
236 assert(source_index < ctx->nsources);
238 if (!length)
239 /* Nothing happens */
240 return 0;
242 source = &ctx->sources[source_index];
244 if (!ctx->seeded)
245 /* While seeding, use the slow pool */
246 current = YARROW_SLOW;
247 else
249 current = source->next;
250 source->next = !source->next;
253 sha256_update(&ctx->pools[current], length, data);
255 /* NOTE: We should be careful to avoid overflows in the estimates. */
256 if (source->estimate[current] < YARROW_MAX_ENTROPY)
258 if (entropy > YARROW_MAX_ENTROPY)
259 entropy = YARROW_MAX_ENTROPY;
261 if ( (length < (YARROW_MAX_ENTROPY / YARROW_MULTIPLIER))
262 && (entropy > YARROW_MULTIPLIER * length) )
263 entropy = YARROW_MULTIPLIER * length;
265 entropy += source->estimate[current];
266 if (entropy > YARROW_MAX_ENTROPY)
267 entropy = YARROW_MAX_ENTROPY;
269 source->estimate[current] = entropy;
272 /* Check for seed/reseed */
273 switch(current)
275 case YARROW_FAST:
276 #if YARROW_DEBUG
277 fprintf(stderr,
278 "yarrow256_update: source_index = %d,\n"
279 " fast pool estimate = %d\n",
280 source_index, source->estimate[YARROW_FAST]);
281 #endif
282 if (source->estimate[YARROW_FAST] >= YARROW_FAST_THRESHOLD)
284 yarrow256_fast_reseed(ctx);
285 return 1;
287 else
288 return 0;
290 case YARROW_SLOW:
292 if (!yarrow256_needed_sources(ctx))
294 yarrow256_slow_reseed(ctx);
295 return 1;
297 else
298 return 0;
300 default:
301 abort();
305 static void
306 yarrow_gate(struct yarrow256_ctx *ctx)
308 uint8_t key[AES_MAX_KEY_SIZE];
309 unsigned i;
311 for (i = 0; i < sizeof(key); i+= AES_BLOCK_SIZE)
312 yarrow_generate_block(ctx, key + i);
314 aes_set_encrypt_key(&ctx->key, sizeof(key), key);
317 void
318 yarrow256_random(struct yarrow256_ctx *ctx, unsigned length, uint8_t *dst)
320 assert(ctx->seeded);
322 while (length >= AES_BLOCK_SIZE)
324 yarrow_generate_block(ctx, dst);
325 dst += AES_BLOCK_SIZE;
326 length -= AES_BLOCK_SIZE;
328 if (length)
330 uint8_t buffer[AES_BLOCK_SIZE];
332 assert(length < AES_BLOCK_SIZE);
333 yarrow_generate_block(ctx, buffer);
334 memcpy(dst, buffer, length);
336 yarrow_gate(ctx);
340 yarrow256_is_seeded(struct yarrow256_ctx *ctx)
342 return ctx->seeded;
345 unsigned
346 yarrow256_needed_sources(struct yarrow256_ctx *ctx)
348 /* FIXME: This is somewhat inefficient. It would be better to
349 * either maintain the count, or do this loop only if the
350 * current source just crossed the threshold. */
351 unsigned k, i;
353 for (i = k = 0; i < ctx->nsources; i++)
354 if (ctx->sources[i].estimate[YARROW_SLOW] >= YARROW_SLOW_THRESHOLD)
355 k++;
357 #if YARROW_DEBUG
358 fprintf(stderr,
359 "yarrow256_needed_sources: source_index = %d,\n"
360 " slow pool estimate = %d,\n"
361 " number of sources above threshold = %d\n",
362 source_index, source->estimate[YARROW_SLOW], k);
363 #endif
365 return (k < YARROW_SLOW_K) ? (YARROW_SLOW_K - k) : 0;