2 * hw_random/core.c: HWRNG core API
4 * Copyright 2006 Michael Buesch <m@bues.ch>
5 * Copyright 2005 (c) MontaVista Software, Inc.
7 * Please read Documentation/admin-guide/hw_random.rst for details on use.
9 * This software may be used and distributed according to the terms
10 * of the GNU General Public License, incorporated herein by reference.
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
17 #include <linux/hw_random.h>
18 #include <linux/kernel.h>
19 #include <linux/kthread.h>
20 #include <linux/miscdevice.h>
21 #include <linux/module.h>
22 #include <linux/random.h>
23 #include <linux/sched.h>
24 #include <linux/sched/signal.h>
25 #include <linux/slab.h>
26 #include <linux/string.h>
27 #include <linux/uaccess.h>
29 #define RNG_MODULE_NAME "hw_random"
31 #define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
33 static struct hwrng
*current_rng
;
34 /* the current rng has been explicitly chosen by user via sysfs */
35 static int cur_rng_set_by_user
;
36 static struct task_struct
*hwrng_fill
;
37 /* list of registered rngs */
38 static LIST_HEAD(rng_list
);
39 /* Protects rng_list and current_rng */
40 static DEFINE_MUTEX(rng_mutex
);
41 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
42 static DEFINE_MUTEX(reading_mutex
);
43 static int data_avail
;
44 static u8
*rng_buffer
, *rng_fillbuf
;
45 static unsigned short current_quality
;
46 static unsigned short default_quality
= 1024; /* default to maximum */
48 module_param(current_quality
, ushort
, 0644);
49 MODULE_PARM_DESC(current_quality
,
50 "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
51 module_param(default_quality
, ushort
, 0644);
52 MODULE_PARM_DESC(default_quality
,
53 "default maximum entropy content of hwrng per 1024 bits of input");
55 static void drop_current_rng(void);
56 static int hwrng_init(struct hwrng
*rng
);
57 static int hwrng_fillfn(void *unused
);
59 static inline int rng_get_data(struct hwrng
*rng
, u8
*buffer
, size_t size
,
62 static size_t rng_buffer_size(void)
64 return RNG_BUFFER_SIZE
;
67 static inline void cleanup_rng(struct kref
*kref
)
69 struct hwrng
*rng
= container_of(kref
, struct hwrng
, ref
);
74 complete(&rng
->cleanup_done
);
77 static int set_current_rng(struct hwrng
*rng
)
81 BUG_ON(!mutex_is_locked(&rng_mutex
));
83 err
= hwrng_init(rng
);
90 /* if necessary, start hwrng thread */
92 hwrng_fill
= kthread_run(hwrng_fillfn
, NULL
, "hwrng");
93 if (IS_ERR(hwrng_fill
)) {
94 pr_err("hwrng_fill thread creation failed\n");
102 static void drop_current_rng(void)
104 BUG_ON(!mutex_is_locked(&rng_mutex
));
108 /* decrease last reference for triggering the cleanup */
109 kref_put(¤t_rng
->ref
, cleanup_rng
);
113 /* Returns ERR_PTR(), NULL or refcounted hwrng */
114 static struct hwrng
*get_current_rng_nolock(void)
117 kref_get(¤t_rng
->ref
);
122 static struct hwrng
*get_current_rng(void)
126 if (mutex_lock_interruptible(&rng_mutex
))
127 return ERR_PTR(-ERESTARTSYS
);
129 rng
= get_current_rng_nolock();
131 mutex_unlock(&rng_mutex
);
135 static void put_rng(struct hwrng
*rng
)
138 * Hold rng_mutex here so we serialize in case they set_current_rng
139 * on rng again immediately.
141 mutex_lock(&rng_mutex
);
143 kref_put(&rng
->ref
, cleanup_rng
);
144 mutex_unlock(&rng_mutex
);
147 static int hwrng_init(struct hwrng
*rng
)
149 if (kref_get_unless_zero(&rng
->ref
))
155 ret
= rng
->init(rng
);
160 kref_init(&rng
->ref
);
161 reinit_completion(&rng
->cleanup_done
);
164 current_quality
= rng
->quality
; /* obsolete */
169 static int rng_dev_open(struct inode
*inode
, struct file
*filp
)
171 /* enforce read-only access to this chrdev */
172 if ((filp
->f_mode
& FMODE_READ
) == 0)
174 if (filp
->f_mode
& FMODE_WRITE
)
179 static inline int rng_get_data(struct hwrng
*rng
, u8
*buffer
, size_t size
,
183 BUG_ON(!mutex_is_locked(&reading_mutex
));
187 err
= rng
->read(rng
, buffer
, size
, wait
);
188 if (WARN_ON_ONCE(err
> 0 && err
> size
))
194 if (rng
->data_present
)
195 present
= rng
->data_present(rng
, wait
);
200 return rng
->data_read(rng
, (u32
*)buffer
);
205 static ssize_t
rng_dev_read(struct file
*filp
, char __user
*buf
,
206 size_t size
, loff_t
*offp
)
208 u8 buffer
[RNG_BUFFER_SIZE
];
215 rng
= get_current_rng();
225 if (mutex_lock_interruptible(&reading_mutex
)) {
230 bytes_read
= rng_get_data(rng
, rng_buffer
,
232 !(filp
->f_flags
& O_NONBLOCK
));
233 if (bytes_read
< 0) {
235 goto out_unlock_reading
;
236 } else if (bytes_read
== 0 &&
237 (filp
->f_flags
& O_NONBLOCK
)) {
239 goto out_unlock_reading
;
242 data_avail
= bytes_read
;
252 memcpy(buffer
, rng_buffer
+ data_avail
, len
);
254 mutex_unlock(&reading_mutex
);
258 if (copy_to_user(buf
+ ret
, buffer
, len
)) {
269 schedule_timeout_interruptible(1);
271 if (signal_pending(current
)) {
277 memzero_explicit(buffer
, sizeof(buffer
));
281 mutex_unlock(&reading_mutex
);
287 static const struct file_operations rng_chrdev_ops
= {
288 .owner
= THIS_MODULE
,
289 .open
= rng_dev_open
,
290 .read
= rng_dev_read
,
291 .llseek
= noop_llseek
,
294 static const struct attribute_group
*rng_dev_groups
[];
296 static struct miscdevice rng_miscdev
= {
297 .minor
= HWRNG_MINOR
,
298 .name
= RNG_MODULE_NAME
,
300 .fops
= &rng_chrdev_ops
,
301 .groups
= rng_dev_groups
,
304 static int enable_best_rng(void)
306 struct hwrng
*rng
, *new_rng
= NULL
;
309 BUG_ON(!mutex_is_locked(&rng_mutex
));
312 if (list_empty(&rng_list
)) {
314 cur_rng_set_by_user
= 0;
318 /* use the rng which offers the best quality */
319 list_for_each_entry(rng
, &rng_list
, list
) {
320 if (!new_rng
|| rng
->quality
> new_rng
->quality
)
324 ret
= ((new_rng
== current_rng
) ? 0 : set_current_rng(new_rng
));
326 cur_rng_set_by_user
= 0;
331 static ssize_t
rng_current_store(struct device
*dev
,
332 struct device_attribute
*attr
,
333 const char *buf
, size_t len
)
336 struct hwrng
*rng
, *new_rng
;
338 err
= mutex_lock_interruptible(&rng_mutex
);
342 if (sysfs_streq(buf
, "")) {
343 err
= enable_best_rng();
345 list_for_each_entry(rng
, &rng_list
, list
) {
346 if (sysfs_streq(rng
->name
, buf
)) {
347 err
= set_current_rng(rng
);
349 cur_rng_set_by_user
= 1;
354 new_rng
= get_current_rng_nolock();
355 mutex_unlock(&rng_mutex
);
363 static ssize_t
rng_current_show(struct device
*dev
,
364 struct device_attribute
*attr
,
370 rng
= get_current_rng();
374 ret
= sysfs_emit(buf
, "%s\n", rng
? rng
->name
: "none");
380 static ssize_t
rng_available_show(struct device
*dev
,
381 struct device_attribute
*attr
,
387 err
= mutex_lock_interruptible(&rng_mutex
);
391 list_for_each_entry(rng
, &rng_list
, list
) {
392 strlcat(buf
, rng
->name
, PAGE_SIZE
);
393 strlcat(buf
, " ", PAGE_SIZE
);
395 strlcat(buf
, "\n", PAGE_SIZE
);
396 mutex_unlock(&rng_mutex
);
401 static ssize_t
rng_selected_show(struct device
*dev
,
402 struct device_attribute
*attr
,
405 return sysfs_emit(buf
, "%d\n", cur_rng_set_by_user
);
408 static ssize_t
rng_quality_show(struct device
*dev
,
409 struct device_attribute
*attr
,
415 rng
= get_current_rng();
419 if (!rng
) /* no need to put_rng */
422 ret
= sysfs_emit(buf
, "%hu\n", rng
->quality
);
428 static ssize_t
rng_quality_store(struct device
*dev
,
429 struct device_attribute
*attr
,
430 const char *buf
, size_t len
)
438 ret
= mutex_lock_interruptible(&rng_mutex
);
442 ret
= kstrtou16(buf
, 0, &quality
);
443 if (ret
|| quality
> 1024) {
453 current_rng
->quality
= quality
;
454 current_quality
= quality
; /* obsolete */
456 /* the best available RNG may have changed */
457 ret
= enable_best_rng();
460 mutex_unlock(&rng_mutex
);
461 return ret
? ret
: len
;
464 static DEVICE_ATTR_RW(rng_current
);
465 static DEVICE_ATTR_RO(rng_available
);
466 static DEVICE_ATTR_RO(rng_selected
);
467 static DEVICE_ATTR_RW(rng_quality
);
469 static struct attribute
*rng_dev_attrs
[] = {
470 &dev_attr_rng_current
.attr
,
471 &dev_attr_rng_available
.attr
,
472 &dev_attr_rng_selected
.attr
,
473 &dev_attr_rng_quality
.attr
,
477 ATTRIBUTE_GROUPS(rng_dev
);
479 static int hwrng_fillfn(void *unused
)
481 size_t entropy
, entropy_credit
= 0; /* in 1/1024 of a bit */
484 while (!kthread_should_stop()) {
485 unsigned short quality
;
488 rng
= get_current_rng();
489 if (IS_ERR(rng
) || !rng
)
491 mutex_lock(&reading_mutex
);
492 rc
= rng_get_data(rng
, rng_fillbuf
,
493 rng_buffer_size(), 1);
494 if (current_quality
!= rng
->quality
)
495 rng
->quality
= current_quality
; /* obsolete */
496 quality
= rng
->quality
;
497 mutex_unlock(&reading_mutex
);
500 hwrng_msleep(rng
, 10000);
507 /* If we cannot credit at least one bit of entropy,
508 * keep track of the remainder for the next iteration
510 entropy
= rc
* quality
* 8 + entropy_credit
;
511 if ((entropy
>> 10) == 0)
512 entropy_credit
= entropy
;
514 /* Outside lock, sure, but y'know: randomness. */
515 add_hwgenerator_randomness((void *)rng_fillbuf
, rc
,
516 entropy
>> 10, true);
522 int hwrng_register(struct hwrng
*rng
)
527 if (!rng
->name
|| (!rng
->data_read
&& !rng
->read
))
530 mutex_lock(&rng_mutex
);
532 /* Must not register two RNGs with the same name. */
534 list_for_each_entry(tmp
, &rng_list
, list
) {
535 if (strcmp(tmp
->name
, rng
->name
) == 0)
538 list_add_tail(&rng
->list
, &rng_list
);
540 init_completion(&rng
->cleanup_done
);
541 complete(&rng
->cleanup_done
);
542 init_completion(&rng
->dying
);
544 /* Adjust quality field to always have a proper value */
545 rng
->quality
= min_t(u16
, min_t(u16
, default_quality
, 1024), rng
->quality
?: 1024);
548 (!cur_rng_set_by_user
&& rng
->quality
> current_rng
->quality
)) {
550 * Set new rng as current as the new rng source
551 * provides better entropy quality and was not
552 * chosen by userspace.
554 err
= set_current_rng(rng
);
558 mutex_unlock(&rng_mutex
);
561 mutex_unlock(&rng_mutex
);
565 EXPORT_SYMBOL_GPL(hwrng_register
);
567 void hwrng_unregister(struct hwrng
*rng
)
569 struct hwrng
*new_rng
;
572 mutex_lock(&rng_mutex
);
574 list_del(&rng
->list
);
575 complete_all(&rng
->dying
);
576 if (current_rng
== rng
) {
577 err
= enable_best_rng();
580 cur_rng_set_by_user
= 0;
584 new_rng
= get_current_rng_nolock();
585 if (list_empty(&rng_list
)) {
586 mutex_unlock(&rng_mutex
);
588 kthread_stop(hwrng_fill
);
590 mutex_unlock(&rng_mutex
);
595 wait_for_completion(&rng
->cleanup_done
);
597 EXPORT_SYMBOL_GPL(hwrng_unregister
);
599 static void devm_hwrng_release(struct device
*dev
, void *res
)
601 hwrng_unregister(*(struct hwrng
**)res
);
604 static int devm_hwrng_match(struct device
*dev
, void *res
, void *data
)
606 struct hwrng
**r
= res
;
608 if (WARN_ON(!r
|| !*r
))
614 int devm_hwrng_register(struct device
*dev
, struct hwrng
*rng
)
619 ptr
= devres_alloc(devm_hwrng_release
, sizeof(*ptr
), GFP_KERNEL
);
623 error
= hwrng_register(rng
);
630 devres_add(dev
, ptr
);
633 EXPORT_SYMBOL_GPL(devm_hwrng_register
);
635 void devm_hwrng_unregister(struct device
*dev
, struct hwrng
*rng
)
637 devres_release(dev
, devm_hwrng_release
, devm_hwrng_match
, rng
);
639 EXPORT_SYMBOL_GPL(devm_hwrng_unregister
);
641 long hwrng_msleep(struct hwrng
*rng
, unsigned int msecs
)
643 unsigned long timeout
= msecs_to_jiffies(msecs
) + 1;
645 return wait_for_completion_interruptible_timeout(&rng
->dying
, timeout
);
647 EXPORT_SYMBOL_GPL(hwrng_msleep
);
649 long hwrng_yield(struct hwrng
*rng
)
651 return wait_for_completion_interruptible_timeout(&rng
->dying
, 1);
653 EXPORT_SYMBOL_GPL(hwrng_yield
);
655 static int __init
hwrng_modinit(void)
659 /* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
660 rng_buffer
= kmalloc(rng_buffer_size(), GFP_KERNEL
);
664 rng_fillbuf
= kmalloc(rng_buffer_size(), GFP_KERNEL
);
670 ret
= misc_register(&rng_miscdev
);
679 static void __exit
hwrng_modexit(void)
681 mutex_lock(&rng_mutex
);
685 mutex_unlock(&rng_mutex
);
687 misc_deregister(&rng_miscdev
);
690 fs_initcall(hwrng_modinit
); /* depends on misc_register() */
691 module_exit(hwrng_modexit
);
693 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
694 MODULE_LICENSE("GPL");