No empty .Rs/.Re
[netbsd-mini2440.git] / sys / dev / rnd.c
blobf92ce3eb46cbdea6f9da26a756724cb39daf526d
1 /* $NetBSD: rnd.c,v 1.76 2009/09/14 09:26:28 pooka Exp $ */
3 /*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Michael Graff <explorer@flame.org>. This code uses ideas and
9 * algorithms from the Linux driver written by Ted Ts'o.
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: rnd.c,v 1.76 2009/09/14 09:26:28 pooka Exp $");
36 #include <sys/param.h>
37 #include <sys/ioctl.h>
38 #include <sys/fcntl.h>
39 #include <sys/select.h>
40 #include <sys/poll.h>
41 #include <sys/malloc.h>
42 #include <sys/mutex.h>
43 #include <sys/proc.h>
44 #include <sys/kernel.h>
45 #include <sys/conf.h>
46 #include <sys/systm.h>
47 #include <sys/callout.h>
48 #include <sys/rnd.h>
49 #include <sys/vnode.h>
50 #include <sys/pool.h>
51 #include <sys/kauth.h>
52 #include <sys/once.h>
54 #if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */
55 #include <machine/cpu_counter.h>
56 #endif
58 #ifdef RND_DEBUG
59 #define DPRINTF(l,x) if (rnd_debug & (l)) printf x
60 int rnd_debug = 0;
61 #else
62 #define DPRINTF(l,x)
63 #endif
65 #define RND_DEBUG_WRITE 0x0001
66 #define RND_DEBUG_READ 0x0002
67 #define RND_DEBUG_IOCTL 0x0004
68 #define RND_DEBUG_SNOOZE 0x0008
71 * list devices attached
73 #if 0
74 #define RND_VERBOSE
75 #endif
78 * The size of a temporary buffer, malloc()ed when needed, and used for
79 * reading and writing data.
81 #define RND_TEMP_BUFFER_SIZE 128
84 * This is a little bit of state information attached to each device that we
85 * collect entropy from. This is simply a collection buffer, and when it
86 * is full it will be "detached" from the source and added to the entropy
87 * pool after entropy is distilled as much as possible.
89 #define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */
90 typedef struct _rnd_sample_t {
91 SIMPLEQ_ENTRY(_rnd_sample_t) next;
92 rndsource_t *source;
93 int cursor;
94 int entropy;
95 u_int32_t ts[RND_SAMPLE_COUNT];
96 u_int32_t values[RND_SAMPLE_COUNT];
97 } rnd_sample_t;
100 * The event queue. Fields are altered at an interrupt level.
101 * All accesses must be protected with the mutex.
103 volatile int rnd_timeout_pending;
104 SIMPLEQ_HEAD(, _rnd_sample_t) rnd_samples;
105 kmutex_t rnd_mtx;
108 * our select/poll queue
110 struct selinfo rnd_selq;
113 * Set when there are readers blocking on data from us
115 #define RND_READWAITING 0x00000001
116 volatile u_int32_t rnd_status;
119 * Memory pool for sample buffers
121 static struct pool rnd_mempool;
124 * Our random pool. This is defined here rather than using the general
125 * purpose one defined in rndpool.c.
127 * Samples are collected and queued into a separate mutex-protected queue
128 * (rnd_samples, see above), and processed in a timeout routine; therefore,
129 * the mutex protecting the random pool is at IPL_SOFTCLOCK() as well.
131 rndpool_t rnd_pool;
132 kmutex_t rndpool_mtx;
135 * This source is used to easily "remove" queue entries when the source
136 * which actually generated the events is going away.
138 static rndsource_t rnd_source_no_collect = {
139 { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', 0, 0, 0, 0, 0, 0, 0 },
140 0, 0, 0, 0,
141 RND_TYPE_UNKNOWN,
142 (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE | RND_TYPE_UNKNOWN),
143 NULL
146 struct callout rnd_callout;
148 void rndattach(int);
150 dev_type_open(rndopen);
151 dev_type_read(rndread);
152 dev_type_write(rndwrite);
153 dev_type_ioctl(rndioctl);
154 dev_type_poll(rndpoll);
155 dev_type_kqfilter(rndkqfilter);
157 const struct cdevsw rnd_cdevsw = {
158 rndopen, nullclose, rndread, rndwrite, rndioctl,
159 nostop, notty, rndpoll, nommap, rndkqfilter, D_OTHER,
162 static inline void rnd_wakeup_readers(void);
163 static inline u_int32_t rnd_estimate_entropy(rndsource_t *, u_int32_t);
164 static inline u_int32_t rnd_counter(void);
165 static void rnd_timeout(void *);
167 static int rnd_ready = 0;
168 static int rnd_have_entropy = 0;
170 LIST_HEAD(, __rndsource_element) rnd_sources;
173 * Generate a 32-bit counter. This should be more machine dependant,
174 * using cycle counters and the like when possible.
176 static inline u_int32_t
177 rnd_counter(void)
179 struct timeval tv;
181 #if defined(__HAVE_CPU_COUNTER) && !defined(_RUMPKERNEL) /* XXX: bad pooka */
182 if (cpu_hascounter())
183 return (cpu_counter32());
184 #endif
185 if (rnd_ready) {
186 microtime(&tv);
187 return (tv.tv_sec * 1000000 + tv.tv_usec);
189 /* when called from rnd_init, its too early to call microtime safely */
190 return (0);
194 * Check to see if there are readers waiting on us. If so, kick them.
196 static inline void
197 rnd_wakeup_readers(void)
201 * If we have added new bits, and now have enough to do something,
202 * wake up sleeping readers.
204 mutex_enter(&rndpool_mtx);
205 if (rndpool_get_entropy_count(&rnd_pool) > RND_ENTROPY_THRESHOLD * 8) {
206 if (rnd_status & RND_READWAITING) {
207 DPRINTF(RND_DEBUG_SNOOZE,
208 ("waking up pending readers.\n"));
209 rnd_status &= ~RND_READWAITING;
210 wakeup(&rnd_selq);
212 selnotify(&rnd_selq, 0, 0);
214 #ifdef RND_VERBOSE
215 if (!rnd_have_entropy)
216 printf("rnd: have initial entropy (%u)\n",
217 rndpool_get_entropy_count(&rnd_pool));
218 #endif
219 rnd_have_entropy = 1;
221 mutex_exit(&rndpool_mtx);
225 * Use the timing of the event to estimate the entropy gathered.
226 * If all the differentials (first, second, and third) are non-zero, return
227 * non-zero. If any of these are zero, return zero.
229 static inline u_int32_t
230 rnd_estimate_entropy(rndsource_t *rs, u_int32_t t)
232 int32_t delta, delta2, delta3;
235 * If the time counter has overflowed, calculate the real difference.
236 * If it has not, it is simplier.
238 if (t < rs->last_time)
239 delta = UINT_MAX - rs->last_time + t;
240 else
241 delta = rs->last_time - t;
243 if (delta < 0)
244 delta = -delta;
247 * Calculate the second and third order differentials
249 delta2 = rs->last_delta - delta;
250 if (delta2 < 0)
251 delta2 = -delta2;
253 delta3 = rs->last_delta2 - delta2;
254 if (delta3 < 0)
255 delta3 = -delta3;
257 rs->last_time = t;
258 rs->last_delta = delta;
259 rs->last_delta2 = delta2;
262 * If any delta is 0, we got no entropy. If all are non-zero, we
263 * might have something.
265 if (delta == 0 || delta2 == 0 || delta3 == 0)
266 return (0);
268 return (1);
271 static int
272 rnd_mempool_init(void)
275 pool_init(&rnd_mempool, sizeof(rnd_sample_t), 0, 0, 0, "rndsample",
276 NULL, IPL_VM);
277 return 0;
279 static ONCE_DECL(rnd_mempoolinit_ctrl);
282 * "Attach" the random device. This is an (almost) empty stub, since
283 * pseudo-devices don't get attached until after config, after the
284 * entropy sources will attach. We just use the timing of this event
285 * as another potential source of initial entropy.
287 void
288 rndattach(int num)
290 u_int32_t c;
292 RUN_ONCE(&rnd_mempoolinit_ctrl, rnd_mempool_init);
294 /* Trap unwary players who don't call rnd_init() early */
295 KASSERT(rnd_ready);
297 /* mix in another counter */
298 c = rnd_counter();
299 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
303 * initialize the global random pool for our use.
304 * rnd_init() must be called very early on in the boot process, so
305 * the pool is ready for other devices to attach as sources.
307 void
308 rnd_init(void)
310 u_int32_t c;
312 if (rnd_ready)
313 return;
315 mutex_init(&rnd_mtx, MUTEX_DEFAULT, IPL_VM);
317 callout_init(&rnd_callout, CALLOUT_MPSAFE);
320 * take a counter early, hoping that there's some variance in
321 * the following operations
323 c = rnd_counter();
325 LIST_INIT(&rnd_sources);
326 SIMPLEQ_INIT(&rnd_samples);
327 selinit(&rnd_selq);
329 rndpool_init(&rnd_pool);
330 mutex_init(&rndpool_mtx, MUTEX_DEFAULT, IPL_SOFTCLOCK);
332 /* Mix *something*, *anything* into the pool to help it get started.
333 * However, it's not safe for rnd_counter() to call microtime() yet,
334 * so on some platforms we might just end up with zeros anyway.
335 * XXX more things to add would be nice.
337 if (c) {
338 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
339 c = rnd_counter();
340 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
343 rnd_ready = 1;
345 #ifdef RND_VERBOSE
346 printf("rnd: initialised (%u)%s", RND_POOLBITS,
347 c ? " with counter\n" : "\n");
348 #endif
352 rndopen(dev_t dev, int flags, int ifmt,
353 struct lwp *l)
356 if (rnd_ready == 0)
357 return (ENXIO);
359 if (minor(dev) == RND_DEV_URANDOM || minor(dev) == RND_DEV_RANDOM)
360 return (0);
362 return (ENXIO);
366 rndread(dev_t dev, struct uio *uio, int ioflag)
368 u_int8_t *bf;
369 u_int32_t entcnt, mode, n, nread;
370 int ret;
372 DPRINTF(RND_DEBUG_READ,
373 ("Random: Read of %d requested, flags 0x%08x\n",
374 uio->uio_resid, ioflag));
376 if (uio->uio_resid == 0)
377 return (0);
379 switch (minor(dev)) {
380 case RND_DEV_RANDOM:
381 mode = RND_EXTRACT_GOOD;
382 break;
383 case RND_DEV_URANDOM:
384 mode = RND_EXTRACT_ANY;
385 break;
386 default:
387 /* Can't happen, but this is cheap */
388 return (ENXIO);
391 ret = 0;
393 bf = malloc(RND_TEMP_BUFFER_SIZE, M_TEMP, M_WAITOK);
395 while (uio->uio_resid > 0) {
396 n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
399 * Make certain there is data available. If there
400 * is, do the I/O even if it is partial. If not,
401 * sleep unless the user has requested non-blocking
402 * I/O.
404 for (;;) {
406 * If not requesting strong randomness, we
407 * can always read.
409 if (mode == RND_EXTRACT_ANY)
410 break;
413 * How much entropy do we have? If it is enough for
414 * one hash, we can read.
416 mutex_enter(&rndpool_mtx);
417 entcnt = rndpool_get_entropy_count(&rnd_pool);
418 mutex_exit(&rndpool_mtx);
419 if (entcnt >= RND_ENTROPY_THRESHOLD * 8)
420 break;
423 * Data is not available.
425 if (ioflag & IO_NDELAY) {
426 ret = EWOULDBLOCK;
427 goto out;
430 rnd_status |= RND_READWAITING;
431 ret = tsleep(&rnd_selq, PRIBIO|PCATCH,
432 "rndread", 0);
434 if (ret)
435 goto out;
438 nread = rnd_extract_data(bf, n, mode);
441 * Copy (possibly partial) data to the user.
442 * If an error occurs, or this is a partial
443 * read, bail out.
445 ret = uiomove((void *)bf, nread, uio);
446 if (ret != 0 || nread != n)
447 goto out;
450 out:
451 free(bf, M_TEMP);
452 return (ret);
456 rndwrite(dev_t dev, struct uio *uio, int ioflag)
458 u_int8_t *bf;
459 int n, ret;
461 DPRINTF(RND_DEBUG_WRITE,
462 ("Random: Write of %d requested\n", uio->uio_resid));
464 if (uio->uio_resid == 0)
465 return (0);
467 ret = 0;
469 bf = malloc(RND_TEMP_BUFFER_SIZE, M_TEMP, M_WAITOK);
471 while (uio->uio_resid > 0) {
472 n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
474 ret = uiomove((void *)bf, n, uio);
475 if (ret != 0)
476 break;
479 * Mix in the bytes.
481 mutex_enter(&rndpool_mtx);
482 rndpool_add_data(&rnd_pool, bf, n, 0);
483 mutex_exit(&rndpool_mtx);
485 DPRINTF(RND_DEBUG_WRITE, ("Random: Copied in %d bytes\n", n));
488 free(bf, M_TEMP);
489 return (ret);
493 rndioctl(dev_t dev, u_long cmd, void *addr, int flag,
494 struct lwp *l)
496 rndsource_element_t *rse;
497 rndstat_t *rst;
498 rndstat_name_t *rstnm;
499 rndctl_t *rctl;
500 rnddata_t *rnddata;
501 u_int32_t count, start;
502 int ret;
504 ret = 0;
506 switch (cmd) {
507 case FIONBIO:
508 case FIOASYNC:
509 case RNDGETENTCNT:
510 break;
512 case RNDGETPOOLSTAT:
513 case RNDGETSRCNUM:
514 case RNDGETSRCNAME:
515 ret = kauth_authorize_device(l->l_cred,
516 KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
517 if (ret)
518 return (ret);
519 break;
521 case RNDCTL:
522 ret = kauth_authorize_device(l->l_cred,
523 KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
524 if (ret)
525 return (ret);
526 break;
528 case RNDADDDATA:
529 ret = kauth_authorize_device(l->l_cred,
530 KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
531 if (ret)
532 return (ret);
533 break;
535 default:
536 return (EINVAL);
539 switch (cmd) {
542 * Handled in upper layer really, but we have to return zero
543 * for it to be accepted by the upper layer.
545 case FIONBIO:
546 case FIOASYNC:
547 break;
549 case RNDGETENTCNT:
550 mutex_enter(&rndpool_mtx);
551 *(u_int32_t *)addr = rndpool_get_entropy_count(&rnd_pool);
552 mutex_exit(&rndpool_mtx);
553 break;
555 case RNDGETPOOLSTAT:
556 mutex_enter(&rndpool_mtx);
557 rndpool_get_stats(&rnd_pool, addr, sizeof(rndpoolstat_t));
558 mutex_exit(&rndpool_mtx);
559 break;
561 case RNDGETSRCNUM:
562 rst = (rndstat_t *)addr;
564 if (rst->count == 0)
565 break;
567 if (rst->count > RND_MAXSTATCOUNT)
568 return (EINVAL);
571 * Find the starting source by running through the
572 * list of sources.
574 rse = rnd_sources.lh_first;
575 start = rst->start;
576 while (rse != NULL && start >= 1) {
577 rse = rse->list.le_next;
578 start--;
582 * Return up to as many structures as the user asked
583 * for. If we run out of sources, a count of zero
584 * will be returned, without an error.
586 for (count = 0; count < rst->count && rse != NULL; count++) {
587 memcpy(&rst->source[count], &rse->data,
588 sizeof(rndsource_t));
589 /* Zero out information which may leak */
590 rst->source[count].last_time = 0;
591 rst->source[count].last_delta = 0;
592 rst->source[count].last_delta2 = 0;
593 rst->source[count].state = 0;
594 rse = rse->list.le_next;
597 rst->count = count;
599 break;
601 case RNDGETSRCNAME:
603 * Scan through the list, trying to find the name.
605 rstnm = (rndstat_name_t *)addr;
606 rse = rnd_sources.lh_first;
607 while (rse != NULL) {
608 if (strncmp(rse->data.name, rstnm->name, 16) == 0) {
609 memcpy(&rstnm->source, &rse->data,
610 sizeof(rndsource_t));
612 return (0);
614 rse = rse->list.le_next;
617 ret = ENOENT; /* name not found */
619 break;
621 case RNDCTL:
623 * Set flags to enable/disable entropy counting and/or
624 * collection.
626 rctl = (rndctl_t *)addr;
627 rse = rnd_sources.lh_first;
630 * Flags set apply to all sources of this type.
632 if (rctl->type != 0xff) {
633 while (rse != NULL) {
634 if (rse->data.type == rctl->type) {
635 rse->data.flags &= ~rctl->mask;
636 rse->data.flags |=
637 (rctl->flags & rctl->mask);
639 rse = rse->list.le_next;
642 return (0);
646 * scan through the list, trying to find the name
648 while (rse != NULL) {
649 if (strncmp(rse->data.name, rctl->name, 16) == 0) {
650 rse->data.flags &= ~rctl->mask;
651 rse->data.flags |= (rctl->flags & rctl->mask);
653 return (0);
655 rse = rse->list.le_next;
658 ret = ENOENT; /* name not found */
660 break;
662 case RNDADDDATA:
663 rnddata = (rnddata_t *)addr;
665 mutex_enter(&rndpool_mtx);
666 rndpool_add_data(&rnd_pool, rnddata->data, rnddata->len,
667 rnddata->entropy);
668 mutex_exit(&rndpool_mtx);
670 rnd_wakeup_readers();
672 break;
674 default:
675 return (EINVAL);
678 return (ret);
682 rndpoll(dev_t dev, int events, struct lwp *l)
684 u_int32_t entcnt;
685 int revents;
688 * We are always writable.
690 revents = events & (POLLOUT | POLLWRNORM);
693 * Save some work if not checking for reads.
695 if ((events & (POLLIN | POLLRDNORM)) == 0)
696 return (revents);
699 * If the minor device is not /dev/random, we are always readable.
701 if (minor(dev) != RND_DEV_RANDOM) {
702 revents |= events & (POLLIN | POLLRDNORM);
703 return (revents);
707 * Make certain we have enough entropy to be readable.
709 mutex_enter(&rndpool_mtx);
710 entcnt = rndpool_get_entropy_count(&rnd_pool);
711 mutex_exit(&rndpool_mtx);
713 if (entcnt >= RND_ENTROPY_THRESHOLD * 8)
714 revents |= events & (POLLIN | POLLRDNORM);
715 else
716 selrecord(l, &rnd_selq);
718 return (revents);
721 static void
722 filt_rnddetach(struct knote *kn)
724 mutex_enter(&rndpool_mtx);
725 SLIST_REMOVE(&rnd_selq.sel_klist, kn, knote, kn_selnext);
726 mutex_exit(&rndpool_mtx);
729 static int
730 filt_rndread(struct knote *kn, long hint)
732 uint32_t entcnt;
734 entcnt = rndpool_get_entropy_count(&rnd_pool);
735 if (entcnt >= RND_ENTROPY_THRESHOLD * 8) {
736 kn->kn_data = RND_TEMP_BUFFER_SIZE;
737 return (1);
739 return (0);
742 static const struct filterops rnd_seltrue_filtops =
743 { 1, NULL, filt_rnddetach, filt_seltrue };
745 static const struct filterops rndread_filtops =
746 { 1, NULL, filt_rnddetach, filt_rndread };
749 rndkqfilter(dev_t dev, struct knote *kn)
751 struct klist *klist;
753 switch (kn->kn_filter) {
754 case EVFILT_READ:
755 klist = &rnd_selq.sel_klist;
756 if (minor(dev) == RND_DEV_URANDOM)
757 kn->kn_fop = &rnd_seltrue_filtops;
758 else
759 kn->kn_fop = &rndread_filtops;
760 break;
762 case EVFILT_WRITE:
763 klist = &rnd_selq.sel_klist;
764 kn->kn_fop = &rnd_seltrue_filtops;
765 break;
767 default:
768 return (EINVAL);
771 kn->kn_hook = NULL;
773 mutex_enter(&rndpool_mtx);
774 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
775 mutex_exit(&rndpool_mtx);
777 return (0);
780 static rnd_sample_t *
781 rnd_sample_allocate(rndsource_t *source)
783 rnd_sample_t *c;
785 c = pool_get(&rnd_mempool, PR_WAITOK);
786 if (c == NULL)
787 return (NULL);
789 c->source = source;
790 c->cursor = 0;
791 c->entropy = 0;
793 return (c);
797 * Don't wait on allocation. To be used in an interrupt context.
799 static rnd_sample_t *
800 rnd_sample_allocate_isr(rndsource_t *source)
802 rnd_sample_t *c;
804 c = pool_get(&rnd_mempool, PR_NOWAIT);
805 if (c == NULL)
806 return (NULL);
808 c->source = source;
809 c->cursor = 0;
810 c->entropy = 0;
812 return (c);
815 static void
816 rnd_sample_free(rnd_sample_t *c)
818 memset(c, 0, sizeof(rnd_sample_t));
819 pool_put(&rnd_mempool, c);
823 * Add a source to our list of sources.
825 void
826 rnd_attach_source(rndsource_element_t *rs, const char *name, u_int32_t type,
827 u_int32_t flags)
829 u_int32_t ts;
831 RUN_ONCE(&rnd_mempoolinit_ctrl, rnd_mempool_init);
833 ts = rnd_counter();
835 strlcpy(rs->data.name, name, sizeof(rs->data.name));
836 rs->data.last_time = ts;
837 rs->data.last_delta = 0;
838 rs->data.last_delta2 = 0;
839 rs->data.total = 0;
842 * Force network devices to not collect any entropy by
843 * default.
845 if (type == RND_TYPE_NET)
846 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
848 rs->data.type = type;
849 rs->data.flags = flags;
851 rs->data.state = rnd_sample_allocate(&rs->data);
853 LIST_INSERT_HEAD(&rnd_sources, rs, list);
855 #ifdef RND_VERBOSE
856 printf("rnd: %s attached as an entropy source (", rs->data.name);
857 if (!(flags & RND_FLAG_NO_COLLECT)) {
858 printf("collecting");
859 if (flags & RND_FLAG_NO_ESTIMATE)
860 printf(" without estimation");
862 else
863 printf("off");
864 printf(")\n");
865 #endif
868 * Again, put some more initial junk in the pool.
869 * XXX Bogus, but harder to guess than zeros.
871 rndpool_add_data(&rnd_pool, &ts, sizeof(u_int32_t), 1);
875 * Remove a source from our list of sources.
877 void
878 rnd_detach_source(rndsource_element_t *rs)
880 rnd_sample_t *sample;
881 rndsource_t *source;
883 mutex_enter(&rnd_mtx);
885 LIST_REMOVE(rs, list);
887 source = &rs->data;
889 if (source->state) {
890 rnd_sample_free(source->state);
891 source->state = NULL;
895 * If there are samples queued up "remove" them from the sample queue
896 * by setting the source to the no-collect pseudosource.
898 sample = SIMPLEQ_FIRST(&rnd_samples);
899 while (sample != NULL) {
900 if (sample->source == source)
901 sample->source = &rnd_source_no_collect;
903 sample = SIMPLEQ_NEXT(sample, next);
906 mutex_exit(&rnd_mtx);
907 #ifdef RND_VERBOSE
908 printf("rnd: %s detached as an entropy source\n", rs->data.name);
909 #endif
913 * Add a value to the entropy pool. The rs parameter should point to the
914 * source-specific source structure.
916 void
917 rnd_add_uint32(rndsource_element_t *rs, u_int32_t val)
919 rndsource_t *rst;
920 rnd_sample_t *state;
921 u_int32_t ts;
924 rst = &rs->data;
926 if (rst->flags & RND_FLAG_NO_COLLECT)
927 return;
930 * Sample the counter as soon as possible to avoid
931 * entropy overestimation.
933 ts = rnd_counter();
936 * If the sample buffer is NULL, try to allocate one here. If this
937 * fails, drop this sample.
939 state = rst->state;
940 if (state == NULL) {
941 state = rnd_sample_allocate_isr(rst);
942 if (state == NULL)
943 return;
944 rst->state = state;
948 * If we are estimating entropy on this source,
949 * calculate differentials.
952 if ((rst->flags & RND_FLAG_NO_ESTIMATE) == 0)
953 state->entropy += rnd_estimate_entropy(rst, ts);
955 state->ts[state->cursor] = ts;
956 state->values[state->cursor] = val;
957 state->cursor++;
960 * If the state arrays are not full, we're done.
962 if (state->cursor < RND_SAMPLE_COUNT)
963 return;
966 * State arrays are full. Queue this chunk on the processing queue.
968 mutex_enter(&rnd_mtx);
969 SIMPLEQ_INSERT_HEAD(&rnd_samples, state, next);
970 rst->state = NULL;
973 * If the timeout isn't pending, have it run in the near future.
975 if (rnd_timeout_pending == 0) {
976 rnd_timeout_pending = 1;
977 callout_reset(&rnd_callout, 1, rnd_timeout, NULL);
979 mutex_exit(&rnd_mtx);
982 * To get here we have to have queued the state up, and therefore
983 * we need a new state buffer. If we can, allocate one now;
984 * if we don't get it, it doesn't matter; we'll try again on
985 * the next random event.
987 rst->state = rnd_sample_allocate_isr(rst);
990 void
991 rnd_add_data(rndsource_element_t *rs, void *data, u_int32_t len,
992 u_int32_t entropy)
994 rndsource_t *rst;
996 /* Mix in the random data directly into the pool. */
997 rndpool_add_data(&rnd_pool, data, len, entropy);
999 if (rs != NULL) {
1000 rst = &rs->data;
1001 rst->total += entropy;
1003 if ((rst->flags & RND_FLAG_NO_ESTIMATE) == 0)
1004 /* Estimate entropy using timing information */
1005 rnd_add_uint32(rs, *(u_int8_t *)data);
1008 /* Wake up any potential readers since we've just added some data. */
1009 rnd_wakeup_readers();
1013 * Timeout, run to process the events in the ring buffer.
1015 static void
1016 rnd_timeout(void *arg)
1018 rnd_sample_t *sample;
1019 rndsource_t *source;
1020 u_int32_t entropy;
1023 * Sample queue is protected by rnd_mtx, take it briefly to dequeue.
1025 mutex_enter(&rnd_mtx);
1026 rnd_timeout_pending = 0;
1028 sample = SIMPLEQ_FIRST(&rnd_samples);
1029 while (sample != NULL) {
1030 SIMPLEQ_REMOVE_HEAD(&rnd_samples, next);
1031 mutex_exit(&rnd_mtx);
1033 source = sample->source;
1036 * We repeat this check here, since it is possible the source
1037 * was disabled before we were called, but after the entry
1038 * was queued.
1040 if ((source->flags & RND_FLAG_NO_COLLECT) == 0) {
1041 entropy = sample->entropy;
1042 if (source->flags & RND_FLAG_NO_ESTIMATE)
1043 entropy = 0;
1045 mutex_enter(&rndpool_mtx);
1046 rndpool_add_data(&rnd_pool, sample->values,
1047 RND_SAMPLE_COUNT * 4, 0);
1049 rndpool_add_data(&rnd_pool, sample->ts,
1050 RND_SAMPLE_COUNT * 4,
1051 entropy);
1052 mutex_exit(&rndpool_mtx);
1054 source->total += sample->entropy;
1057 rnd_sample_free(sample);
1059 /* Get mtx back to dequeue the next one.. */
1060 mutex_enter(&rnd_mtx);
1061 sample = SIMPLEQ_FIRST(&rnd_samples);
1063 mutex_exit(&rnd_mtx);
1066 * Wake up any potential readers waiting.
1068 rnd_wakeup_readers();
1071 u_int32_t
1072 rnd_extract_data(void *p, u_int32_t len, u_int32_t flags)
1074 int retval;
1075 u_int32_t c;
1077 mutex_enter(&rndpool_mtx);
1078 if (!rnd_have_entropy) {
1079 #ifdef RND_VERBOSE
1080 printf("rnd: WARNING! initial entropy low (%u).\n",
1081 rndpool_get_entropy_count(&rnd_pool));
1082 #endif
1083 /* Try once again to put something in the pool */
1084 c = rnd_counter();
1085 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
1087 retval = rndpool_extract_data(&rnd_pool, p, len, flags);
1088 mutex_exit(&rndpool_mtx);
1090 return (retval);