1 /*
2  * hw_random/core.c: HWRNG core API
3  *
4  * Copyright 2006 Michael Buesch <m@bues.ch>
5  * Copyright 2005 (c) MontaVista Software, Inc.
6  *
7  * Please read Documentation/admin-guide/hw_random.rst for details on use.
8  *
9  * This software may be used and distributed according to the terms
10  * of the GNU General Public License, incorporated herein by reference.
11  */
12 
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/err.h>
16 #include <linux/fs.h>
17 #include <linux/hw_random.h>
18 #include <linux/random.h>
19 #include <linux/kernel.h>
20 #include <linux/kthread.h>
21 #include <linux/sched/signal.h>
22 #include <linux/miscdevice.h>
23 #include <linux/module.h>
24 #include <linux/random.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/string.h>
28 #include <linux/uaccess.h>
29 
30 #define RNG_MODULE_NAME		"hw_random"
31 
32 #define RNG_BUFFER_SIZE (SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES)
33 
34 static struct hwrng *current_rng;
35 /* the current rng has been explicitly chosen by user via sysfs */
36 static int cur_rng_set_by_user;
37 static struct task_struct *hwrng_fill;
38 /* list of registered rngs, sorted decending by quality */
39 static LIST_HEAD(rng_list);
40 /* Protects rng_list and current_rng */
41 static DEFINE_MUTEX(rng_mutex);
42 /* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
43 static DEFINE_MUTEX(reading_mutex);
44 static int data_avail;
45 static u8 *rng_buffer, *rng_fillbuf;
46 static unsigned short current_quality;
47 static unsigned short default_quality; /* = 0; default to "off" */
48 
49 module_param(current_quality, ushort, 0644);
50 MODULE_PARM_DESC(current_quality,
51 		 "current hwrng entropy estimation per 1024 bits of input");
52 module_param(default_quality, ushort, 0644);
53 MODULE_PARM_DESC(default_quality,
54 		 "default entropy content of hwrng per 1024 bits of input");
55 
56 static void drop_current_rng(void);
57 static int hwrng_init(struct hwrng *rng);
58 static void start_khwrngd(void);
59 
60 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
61 			       int wait);
62 
rng_buffer_size(void)63 static size_t rng_buffer_size(void)
64 {
65 	return RNG_BUFFER_SIZE;
66 }
67 
add_early_randomness(struct hwrng *rng)68 static void add_early_randomness(struct hwrng *rng)
69 {
70 	int bytes_read;
71 	size_t size = min_t(size_t, 16, rng_buffer_size());
72 
73 	mutex_lock(&reading_mutex);
74 	bytes_read = rng_get_data(rng, rng_buffer, size, 0);
75 	mutex_unlock(&reading_mutex);
76 	if (bytes_read > 0)
77 		add_device_randomness(rng_buffer, bytes_read);
78 }
79 
cleanup_rng(struct kref *kref)80 static inline void cleanup_rng(struct kref *kref)
81 {
82 	struct hwrng *rng = container_of(kref, struct hwrng, ref);
83 
84 	if (rng->cleanup)
85 		rng->cleanup(rng);
86 
87 	complete(&rng->cleanup_done);
88 }
89 
set_current_rng(struct hwrng *rng)90 static int set_current_rng(struct hwrng *rng)
91 {
92 	int err;
93 
94 	BUG_ON(!mutex_is_locked(&rng_mutex));
95 
96 	err = hwrng_init(rng);
97 	if (err)
98 		return err;
99 
100 	drop_current_rng();
101 	current_rng = rng;
102 
103 	return 0;
104 }
105 
drop_current_rng(void)106 static void drop_current_rng(void)
107 {
108 	BUG_ON(!mutex_is_locked(&rng_mutex));
109 	if (!current_rng)
110 		return;
111 
112 	/* decrease last reference for triggering the cleanup */
113 	kref_put(&current_rng->ref, cleanup_rng);
114 	current_rng = NULL;
115 }
116 
117 /* Returns ERR_PTR(), NULL or refcounted hwrng */
get_current_rng_nolock(void)118 static struct hwrng *get_current_rng_nolock(void)
119 {
120 	if (current_rng)
121 		kref_get(&current_rng->ref);
122 
123 	return current_rng;
124 }
125 
get_current_rng(void)126 static struct hwrng *get_current_rng(void)
127 {
128 	struct hwrng *rng;
129 
130 	if (mutex_lock_interruptible(&rng_mutex))
131 		return ERR_PTR(-ERESTARTSYS);
132 
133 	rng = get_current_rng_nolock();
134 
135 	mutex_unlock(&rng_mutex);
136 	return rng;
137 }
138 
put_rng(struct hwrng *rng)139 static void put_rng(struct hwrng *rng)
140 {
141 	/*
142 	 * Hold rng_mutex here so we serialize in case they set_current_rng
143 	 * on rng again immediately.
144 	 */
145 	mutex_lock(&rng_mutex);
146 	if (rng)
147 		kref_put(&rng->ref, cleanup_rng);
148 	mutex_unlock(&rng_mutex);
149 }
150 
hwrng_init(struct hwrng *rng)151 static int hwrng_init(struct hwrng *rng)
152 {
153 	if (kref_get_unless_zero(&rng->ref))
154 		goto skip_init;
155 
156 	if (rng->init) {
157 		int ret;
158 
159 		ret =  rng->init(rng);
160 		if (ret)
161 			return ret;
162 	}
163 
164 	kref_init(&rng->ref);
165 	reinit_completion(&rng->cleanup_done);
166 
167 skip_init:
168 	current_quality = rng->quality ? : default_quality;
169 	if (current_quality > 1024)
170 		current_quality = 1024;
171 
172 	if (current_quality == 0 && hwrng_fill)
173 		kthread_stop(hwrng_fill);
174 	if (current_quality > 0 && !hwrng_fill)
175 		start_khwrngd();
176 
177 	return 0;
178 }
179 
rng_dev_open(struct inode *inode, struct file *filp)180 static int rng_dev_open(struct inode *inode, struct file *filp)
181 {
182 	/* enforce read-only access to this chrdev */
183 	if ((filp->f_mode & FMODE_READ) == 0)
184 		return -EINVAL;
185 	if (filp->f_mode & FMODE_WRITE)
186 		return -EINVAL;
187 	return 0;
188 }
189 
rng_get_data(struct hwrng *rng, u8 *buffer, size_t size, int wait)190 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
191 			int wait) {
192 	int present;
193 
194 	BUG_ON(!mutex_is_locked(&reading_mutex));
195 	if (rng->read)
196 		return rng->read(rng, (void *)buffer, size, wait);
197 
198 	if (rng->data_present)
199 		present = rng->data_present(rng, wait);
200 	else
201 		present = 1;
202 
203 	if (present)
204 		return rng->data_read(rng, (u32 *)buffer);
205 
206 	return 0;
207 }
208 
rng_dev_read(struct file *filp, char __user *buf, size_t size, loff_t *offp)209 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
210 			    size_t size, loff_t *offp)
211 {
212 	u8 buffer[RNG_BUFFER_SIZE];
213 	ssize_t ret = 0;
214 	int err = 0;
215 	int bytes_read, len;
216 	struct hwrng *rng;
217 
218 	while (size) {
219 		rng = get_current_rng();
220 		if (IS_ERR(rng)) {
221 			err = PTR_ERR(rng);
222 			goto out;
223 		}
224 		if (!rng) {
225 			err = -ENODEV;
226 			goto out;
227 		}
228 
229 		if (mutex_lock_interruptible(&reading_mutex)) {
230 			err = -ERESTARTSYS;
231 			goto out_put;
232 		}
233 		if (!data_avail) {
234 			bytes_read = rng_get_data(rng, rng_buffer,
235 				rng_buffer_size(),
236 				!(filp->f_flags & O_NONBLOCK));
237 			if (bytes_read < 0) {
238 				err = bytes_read;
239 				goto out_unlock_reading;
240 			} else if (bytes_read == 0 &&
241 				   (filp->f_flags & O_NONBLOCK)) {
242 				err = -EAGAIN;
243 				goto out_unlock_reading;
244 			}
245 
246 			data_avail = bytes_read;
247 		}
248 
249 		len = data_avail;
250 		if (len) {
251 			if (len > size)
252 				len = size;
253 
254 			data_avail -= len;
255 
256 			memcpy(buffer, rng_buffer + data_avail, len);
257 		}
258 		mutex_unlock(&reading_mutex);
259 		put_rng(rng);
260 
261 		if (len) {
262 			if (copy_to_user(buf + ret, buffer, len)) {
263 				err = -EFAULT;
264 				goto out;
265 			}
266 
267 			size -= len;
268 			ret += len;
269 		}
270 
271 
272 		if (need_resched())
273 			schedule_timeout_interruptible(1);
274 
275 		if (signal_pending(current)) {
276 			err = -ERESTARTSYS;
277 			goto out;
278 		}
279 	}
280 out:
281 	memzero_explicit(buffer, sizeof(buffer));
282 	return ret ? : err;
283 
284 out_unlock_reading:
285 	mutex_unlock(&reading_mutex);
286 out_put:
287 	put_rng(rng);
288 	goto out;
289 }
290 
291 static const struct file_operations rng_chrdev_ops = {
292 	.owner		= THIS_MODULE,
293 	.open		= rng_dev_open,
294 	.read		= rng_dev_read,
295 	.llseek		= noop_llseek,
296 };
297 
298 static const struct attribute_group *rng_dev_groups[];
299 
300 static struct miscdevice rng_miscdev = {
301 	.minor		= HWRNG_MINOR,
302 	.name		= RNG_MODULE_NAME,
303 	.nodename	= "hwrng",
304 	.fops		= &rng_chrdev_ops,
305 	.groups		= rng_dev_groups,
306 };
307 
enable_best_rng(void)308 static int enable_best_rng(void)
309 {
310 	int ret = -ENODEV;
311 
312 	BUG_ON(!mutex_is_locked(&rng_mutex));
313 
314 	/* rng_list is sorted by quality, use the best (=first) one */
315 	if (!list_empty(&rng_list)) {
316 		struct hwrng *new_rng;
317 
318 		new_rng = list_entry(rng_list.next, struct hwrng, list);
319 		ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
320 		if (!ret)
321 			cur_rng_set_by_user = 0;
322 	} else {
323 		drop_current_rng();
324 		cur_rng_set_by_user = 0;
325 		ret = 0;
326 	}
327 
328 	return ret;
329 }
330 
hwrng_attr_current_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len)331 static ssize_t hwrng_attr_current_store(struct device *dev,
332 					struct device_attribute *attr,
333 					const char *buf, size_t len)
334 {
335 	int err = -ENODEV;
336 	struct hwrng *rng, *old_rng, *new_rng;
337 
338 	err = mutex_lock_interruptible(&rng_mutex);
339 	if (err)
340 		return -ERESTARTSYS;
341 
342 	old_rng = current_rng;
343 	if (sysfs_streq(buf, "")) {
344 		err = enable_best_rng();
345 	} else {
346 		list_for_each_entry(rng, &rng_list, list) {
347 			if (sysfs_streq(rng->name, buf)) {
348 				cur_rng_set_by_user = 1;
349 				err = set_current_rng(rng);
350 				break;
351 			}
352 		}
353 	}
354 	new_rng = get_current_rng_nolock();
355 	mutex_unlock(&rng_mutex);
356 
357 	if (new_rng) {
358 		if (new_rng != old_rng)
359 			add_early_randomness(new_rng);
360 		put_rng(new_rng);
361 	}
362 
363 	return err ? : len;
364 }
365 
hwrng_attr_current_show(struct device *dev, struct device_attribute *attr, char *buf)366 static ssize_t hwrng_attr_current_show(struct device *dev,
367 				       struct device_attribute *attr,
368 				       char *buf)
369 {
370 	ssize_t ret;
371 	struct hwrng *rng;
372 
373 	rng = get_current_rng();
374 	if (IS_ERR(rng))
375 		return PTR_ERR(rng);
376 
377 	ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
378 	put_rng(rng);
379 
380 	return ret;
381 }
382 
hwrng_attr_available_show(struct device *dev, struct device_attribute *attr, char *buf)383 static ssize_t hwrng_attr_available_show(struct device *dev,
384 					 struct device_attribute *attr,
385 					 char *buf)
386 {
387 	int err;
388 	struct hwrng *rng;
389 
390 	err = mutex_lock_interruptible(&rng_mutex);
391 	if (err)
392 		return -ERESTARTSYS;
393 	buf[0] = '\0';
394 	list_for_each_entry(rng, &rng_list, list) {
395 		strlcat(buf, rng->name, PAGE_SIZE);
396 		strlcat(buf, " ", PAGE_SIZE);
397 	}
398 	strlcat(buf, "\n", PAGE_SIZE);
399 	mutex_unlock(&rng_mutex);
400 
401 	return strlen(buf);
402 }
403 
hwrng_attr_selected_show(struct device *dev, struct device_attribute *attr, char *buf)404 static ssize_t hwrng_attr_selected_show(struct device *dev,
405 					struct device_attribute *attr,
406 					char *buf)
407 {
408 	return snprintf(buf, PAGE_SIZE, "%d\n", cur_rng_set_by_user);
409 }
410 
411 static DEVICE_ATTR(rng_current, S_IRUGO | S_IWUSR,
412 		   hwrng_attr_current_show,
413 		   hwrng_attr_current_store);
414 static DEVICE_ATTR(rng_available, S_IRUGO,
415 		   hwrng_attr_available_show,
416 		   NULL);
417 static DEVICE_ATTR(rng_selected, S_IRUGO,
418 		   hwrng_attr_selected_show,
419 		   NULL);
420 
421 static struct attribute *rng_dev_attrs[] = {
422 	&dev_attr_rng_current.attr,
423 	&dev_attr_rng_available.attr,
424 	&dev_attr_rng_selected.attr,
425 	NULL
426 };
427 
428 ATTRIBUTE_GROUPS(rng_dev);
429 
unregister_miscdev(void)430 static void __exit unregister_miscdev(void)
431 {
432 	misc_deregister(&rng_miscdev);
433 }
434 
register_miscdev(void)435 static int __init register_miscdev(void)
436 {
437 	return misc_register(&rng_miscdev);
438 }
439 
hwrng_fillfn(void *unused)440 static int hwrng_fillfn(void *unused)
441 {
442 	long rc;
443 
444 	while (!kthread_should_stop()) {
445 		struct hwrng *rng;
446 
447 		rng = get_current_rng();
448 		if (IS_ERR(rng) || !rng)
449 			break;
450 		mutex_lock(&reading_mutex);
451 		rc = rng_get_data(rng, rng_fillbuf,
452 				  rng_buffer_size(), 1);
453 		mutex_unlock(&reading_mutex);
454 		put_rng(rng);
455 		if (rc <= 0) {
456 			pr_warn("hwrng: no data available\n");
457 			msleep_interruptible(10000);
458 			continue;
459 		}
460 		/* Outside lock, sure, but y'know: randomness. */
461 		add_hwgenerator_randomness((void *)rng_fillbuf, rc,
462 					   rc * current_quality * 8 >> 10);
463 	}
464 	hwrng_fill = NULL;
465 	return 0;
466 }
467 
start_khwrngd(void)468 static void start_khwrngd(void)
469 {
470 	hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
471 	if (IS_ERR(hwrng_fill)) {
472 		pr_err("hwrng_fill thread creation failed\n");
473 		hwrng_fill = NULL;
474 	}
475 }
476 
hwrng_register(struct hwrng *rng)477 int hwrng_register(struct hwrng *rng)
478 {
479 	int err = -EINVAL;
480 	struct hwrng *tmp;
481 	struct list_head *rng_list_ptr;
482 	bool is_new_current = false;
483 
484 	if (!rng->name || (!rng->data_read && !rng->read))
485 		goto out;
486 
487 	mutex_lock(&rng_mutex);
488 
489 	/* Must not register two RNGs with the same name. */
490 	err = -EEXIST;
491 	list_for_each_entry(tmp, &rng_list, list) {
492 		if (strcmp(tmp->name, rng->name) == 0)
493 			goto out_unlock;
494 	}
495 
496 	init_completion(&rng->cleanup_done);
497 	complete(&rng->cleanup_done);
498 
499 	/* rng_list is sorted by decreasing quality */
500 	list_for_each(rng_list_ptr, &rng_list) {
501 		tmp = list_entry(rng_list_ptr, struct hwrng, list);
502 		if (tmp->quality < rng->quality)
503 			break;
504 	}
505 	list_add_tail(&rng->list, rng_list_ptr);
506 
507 	if (!current_rng ||
508 	    (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
509 		/*
510 		 * Set new rng as current as the new rng source
511 		 * provides better entropy quality and was not
512 		 * chosen by userspace.
513 		 */
514 		err = set_current_rng(rng);
515 		if (err)
516 			goto out_unlock;
517 		/* to use current_rng in add_early_randomness() we need
518 		 * to take a ref
519 		 */
520 		is_new_current = true;
521 		kref_get(&rng->ref);
522 	}
523 	mutex_unlock(&rng_mutex);
524 	if (is_new_current || !rng->init) {
525 		/*
526 		 * Use a new device's input to add some randomness to
527 		 * the system.  If this rng device isn't going to be
528 		 * used right away, its init function hasn't been
529 		 * called yet by set_current_rng(); so only use the
530 		 * randomness from devices that don't need an init callback
531 		 */
532 		add_early_randomness(rng);
533 	}
534 	if (is_new_current)
535 		put_rng(rng);
536 	return 0;
537 out_unlock:
538 	mutex_unlock(&rng_mutex);
539 out:
540 	return err;
541 }
542 EXPORT_SYMBOL_GPL(hwrng_register);
543 
hwrng_unregister(struct hwrng *rng)544 void hwrng_unregister(struct hwrng *rng)
545 {
546 	struct hwrng *old_rng, *new_rng;
547 	int err;
548 
549 	mutex_lock(&rng_mutex);
550 
551 	old_rng = current_rng;
552 	list_del(&rng->list);
553 	if (current_rng == rng) {
554 		err = enable_best_rng();
555 		if (err) {
556 			drop_current_rng();
557 			cur_rng_set_by_user = 0;
558 		}
559 	}
560 
561 	new_rng = get_current_rng_nolock();
562 	if (list_empty(&rng_list)) {
563 		mutex_unlock(&rng_mutex);
564 		if (hwrng_fill)
565 			kthread_stop(hwrng_fill);
566 	} else
567 		mutex_unlock(&rng_mutex);
568 
569 	if (new_rng) {
570 		if (old_rng != new_rng)
571 			add_early_randomness(new_rng);
572 		put_rng(new_rng);
573 	}
574 
575 	wait_for_completion(&rng->cleanup_done);
576 }
577 EXPORT_SYMBOL_GPL(hwrng_unregister);
578 
devm_hwrng_release(struct device *dev, void *res)579 static void devm_hwrng_release(struct device *dev, void *res)
580 {
581 	hwrng_unregister(*(struct hwrng **)res);
582 }
583 
devm_hwrng_match(struct device *dev, void *res, void *data)584 static int devm_hwrng_match(struct device *dev, void *res, void *data)
585 {
586 	struct hwrng **r = res;
587 
588 	if (WARN_ON(!r || !*r))
589 		return 0;
590 
591 	return *r == data;
592 }
593 
devm_hwrng_register(struct device *dev, struct hwrng *rng)594 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
595 {
596 	struct hwrng **ptr;
597 	int error;
598 
599 	ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
600 	if (!ptr)
601 		return -ENOMEM;
602 
603 	error = hwrng_register(rng);
604 	if (error) {
605 		devres_free(ptr);
606 		return error;
607 	}
608 
609 	*ptr = rng;
610 	devres_add(dev, ptr);
611 	return 0;
612 }
613 EXPORT_SYMBOL_GPL(devm_hwrng_register);
614 
devm_hwrng_unregister(struct device *dev, struct hwrng *rng)615 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
616 {
617 	devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
618 }
619 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
620 
hwrng_modinit(void)621 static int __init hwrng_modinit(void)
622 {
623 	int ret;
624 
625 	/* kmalloc makes this safe for virt_to_page() in virtio_rng.c */
626 	rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
627 	if (!rng_buffer)
628 		return -ENOMEM;
629 
630 	rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
631 	if (!rng_fillbuf) {
632 		kfree(rng_buffer);
633 		return -ENOMEM;
634 	}
635 
636 	ret = register_miscdev();
637 	if (ret) {
638 		kfree(rng_fillbuf);
639 		kfree(rng_buffer);
640 	}
641 
642 	return ret;
643 }
644 
hwrng_modexit(void)645 static void __exit hwrng_modexit(void)
646 {
647 	mutex_lock(&rng_mutex);
648 	BUG_ON(current_rng);
649 	kfree(rng_buffer);
650 	kfree(rng_fillbuf);
651 	mutex_unlock(&rng_mutex);
652 
653 	unregister_miscdev();
654 }
655 
656 module_init(hwrng_modinit);
657 module_exit(hwrng_modexit);
658 
659 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
660 MODULE_LICENSE("GPL");
661