0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013 #include <linux/delay.h>
0014 #include <linux/device.h>
0015 #include <linux/err.h>
0016 #include <linux/fs.h>
0017 #include <linux/hw_random.h>
0018 #include <linux/random.h>
0019 #include <linux/kernel.h>
0020 #include <linux/kthread.h>
0021 #include <linux/sched/signal.h>
0022 #include <linux/miscdevice.h>
0023 #include <linux/module.h>
0024 #include <linux/random.h>
0025 #include <linux/sched.h>
0026 #include <linux/slab.h>
0027 #include <linux/uaccess.h>
0028
0029 #define RNG_MODULE_NAME "hw_random"
0030
0031 static struct hwrng *current_rng;
0032
0033 static int cur_rng_set_by_user;
0034 static struct task_struct *hwrng_fill;
0035
0036 static LIST_HEAD(rng_list);
0037
0038 static DEFINE_MUTEX(rng_mutex);
0039
0040 static DEFINE_MUTEX(reading_mutex);
0041 static int data_avail;
0042 static u8 *rng_buffer, *rng_fillbuf;
0043 static unsigned short current_quality;
0044 static unsigned short default_quality;
0045
0046 module_param(current_quality, ushort, 0644);
0047 MODULE_PARM_DESC(current_quality,
0048 "current hwrng entropy estimation per 1024 bits of input -- obsolete, use rng_quality instead");
0049 module_param(default_quality, ushort, 0644);
0050 MODULE_PARM_DESC(default_quality,
0051 "default entropy content of hwrng per 1024 bits of input");
0052
0053 static void drop_current_rng(void);
0054 static int hwrng_init(struct hwrng *rng);
0055 static void hwrng_manage_rngd(struct hwrng *rng);
0056
0057 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
0058 int wait);
0059
0060 static size_t rng_buffer_size(void)
0061 {
0062 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
0063 }
0064
0065 static void add_early_randomness(struct hwrng *rng)
0066 {
0067 int bytes_read;
0068
0069 mutex_lock(&reading_mutex);
0070 bytes_read = rng_get_data(rng, rng_fillbuf, 32, 0);
0071 mutex_unlock(&reading_mutex);
0072 if (bytes_read > 0)
0073 add_device_randomness(rng_fillbuf, bytes_read);
0074 }
0075
0076 static inline void cleanup_rng(struct kref *kref)
0077 {
0078 struct hwrng *rng = container_of(kref, struct hwrng, ref);
0079
0080 if (rng->cleanup)
0081 rng->cleanup(rng);
0082
0083 complete(&rng->cleanup_done);
0084 }
0085
0086 static int set_current_rng(struct hwrng *rng)
0087 {
0088 int err;
0089
0090 BUG_ON(!mutex_is_locked(&rng_mutex));
0091
0092 err = hwrng_init(rng);
0093 if (err)
0094 return err;
0095
0096 drop_current_rng();
0097 current_rng = rng;
0098
0099 return 0;
0100 }
0101
0102 static void drop_current_rng(void)
0103 {
0104 BUG_ON(!mutex_is_locked(&rng_mutex));
0105 if (!current_rng)
0106 return;
0107
0108
0109 kref_put(¤t_rng->ref, cleanup_rng);
0110 current_rng = NULL;
0111 }
0112
0113
0114 static struct hwrng *get_current_rng_nolock(void)
0115 {
0116 if (current_rng)
0117 kref_get(¤t_rng->ref);
0118
0119 return current_rng;
0120 }
0121
0122 static struct hwrng *get_current_rng(void)
0123 {
0124 struct hwrng *rng;
0125
0126 if (mutex_lock_interruptible(&rng_mutex))
0127 return ERR_PTR(-ERESTARTSYS);
0128
0129 rng = get_current_rng_nolock();
0130
0131 mutex_unlock(&rng_mutex);
0132 return rng;
0133 }
0134
0135 static void put_rng(struct hwrng *rng)
0136 {
0137
0138
0139
0140
0141 mutex_lock(&rng_mutex);
0142 if (rng)
0143 kref_put(&rng->ref, cleanup_rng);
0144 mutex_unlock(&rng_mutex);
0145 }
0146
0147 static int hwrng_init(struct hwrng *rng)
0148 {
0149 if (kref_get_unless_zero(&rng->ref))
0150 goto skip_init;
0151
0152 if (rng->init) {
0153 int ret;
0154
0155 ret = rng->init(rng);
0156 if (ret)
0157 return ret;
0158 }
0159
0160 kref_init(&rng->ref);
0161 reinit_completion(&rng->cleanup_done);
0162
0163 skip_init:
0164 if (!rng->quality)
0165 rng->quality = default_quality;
0166 if (rng->quality > 1024)
0167 rng->quality = 1024;
0168 current_quality = rng->quality;
0169
0170 hwrng_manage_rngd(rng);
0171
0172 return 0;
0173 }
0174
0175 static int rng_dev_open(struct inode *inode, struct file *filp)
0176 {
0177
0178 if ((filp->f_mode & FMODE_READ) == 0)
0179 return -EINVAL;
0180 if (filp->f_mode & FMODE_WRITE)
0181 return -EINVAL;
0182 return 0;
0183 }
0184
0185 static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
0186 int wait) {
0187 int present;
0188
0189 BUG_ON(!mutex_is_locked(&reading_mutex));
0190 if (rng->read)
0191 return rng->read(rng, (void *)buffer, size, wait);
0192
0193 if (rng->data_present)
0194 present = rng->data_present(rng, wait);
0195 else
0196 present = 1;
0197
0198 if (present)
0199 return rng->data_read(rng, (u32 *)buffer);
0200
0201 return 0;
0202 }
0203
0204 static ssize_t rng_dev_read(struct file *filp, char __user *buf,
0205 size_t size, loff_t *offp)
0206 {
0207 ssize_t ret = 0;
0208 int err = 0;
0209 int bytes_read, len;
0210 struct hwrng *rng;
0211
0212 while (size) {
0213 rng = get_current_rng();
0214 if (IS_ERR(rng)) {
0215 err = PTR_ERR(rng);
0216 goto out;
0217 }
0218 if (!rng) {
0219 err = -ENODEV;
0220 goto out;
0221 }
0222
0223 if (mutex_lock_interruptible(&reading_mutex)) {
0224 err = -ERESTARTSYS;
0225 goto out_put;
0226 }
0227 if (!data_avail) {
0228 bytes_read = rng_get_data(rng, rng_buffer,
0229 rng_buffer_size(),
0230 !(filp->f_flags & O_NONBLOCK));
0231 if (bytes_read < 0) {
0232 err = bytes_read;
0233 goto out_unlock_reading;
0234 }
0235 data_avail = bytes_read;
0236 }
0237
0238 if (!data_avail) {
0239 if (filp->f_flags & O_NONBLOCK) {
0240 err = -EAGAIN;
0241 goto out_unlock_reading;
0242 }
0243 } else {
0244 len = data_avail;
0245 if (len > size)
0246 len = size;
0247
0248 data_avail -= len;
0249
0250 if (copy_to_user(buf + ret, rng_buffer + data_avail,
0251 len)) {
0252 err = -EFAULT;
0253 goto out_unlock_reading;
0254 }
0255
0256 size -= len;
0257 ret += len;
0258 }
0259
0260 mutex_unlock(&reading_mutex);
0261 put_rng(rng);
0262
0263 if (need_resched())
0264 schedule_timeout_interruptible(1);
0265
0266 if (signal_pending(current)) {
0267 err = -ERESTARTSYS;
0268 goto out;
0269 }
0270 }
0271 out:
0272 return ret ? : err;
0273
0274 out_unlock_reading:
0275 mutex_unlock(&reading_mutex);
0276 out_put:
0277 put_rng(rng);
0278 goto out;
0279 }
0280
0281 static const struct file_operations rng_chrdev_ops = {
0282 .owner = THIS_MODULE,
0283 .open = rng_dev_open,
0284 .read = rng_dev_read,
0285 .llseek = noop_llseek,
0286 };
0287
0288 static const struct attribute_group *rng_dev_groups[];
0289
0290 static struct miscdevice rng_miscdev = {
0291 .minor = HWRNG_MINOR,
0292 .name = RNG_MODULE_NAME,
0293 .nodename = "hwrng",
0294 .fops = &rng_chrdev_ops,
0295 .groups = rng_dev_groups,
0296 };
0297
0298 static int enable_best_rng(void)
0299 {
0300 struct hwrng *rng, *new_rng = NULL;
0301 int ret = -ENODEV;
0302
0303 BUG_ON(!mutex_is_locked(&rng_mutex));
0304
0305
0306 if (list_empty(&rng_list)) {
0307 drop_current_rng();
0308 cur_rng_set_by_user = 0;
0309 return 0;
0310 }
0311
0312
0313 list_for_each_entry(rng, &rng_list, list) {
0314 if (!new_rng || rng->quality > new_rng->quality)
0315 new_rng = rng;
0316 }
0317
0318 ret = ((new_rng == current_rng) ? 0 : set_current_rng(new_rng));
0319 if (!ret)
0320 cur_rng_set_by_user = 0;
0321
0322 return ret;
0323 }
0324
0325 static ssize_t rng_current_store(struct device *dev,
0326 struct device_attribute *attr,
0327 const char *buf, size_t len)
0328 {
0329 int err;
0330 struct hwrng *rng, *old_rng, *new_rng;
0331
0332 err = mutex_lock_interruptible(&rng_mutex);
0333 if (err)
0334 return -ERESTARTSYS;
0335
0336 old_rng = current_rng;
0337 if (sysfs_streq(buf, "")) {
0338 err = enable_best_rng();
0339 } else {
0340 list_for_each_entry(rng, &rng_list, list) {
0341 if (sysfs_streq(rng->name, buf)) {
0342 err = set_current_rng(rng);
0343 if (!err)
0344 cur_rng_set_by_user = 1;
0345 break;
0346 }
0347 }
0348 }
0349 new_rng = get_current_rng_nolock();
0350 mutex_unlock(&rng_mutex);
0351
0352 if (new_rng) {
0353 if (new_rng != old_rng)
0354 add_early_randomness(new_rng);
0355 put_rng(new_rng);
0356 }
0357
0358 return err ? : len;
0359 }
0360
0361 static ssize_t rng_current_show(struct device *dev,
0362 struct device_attribute *attr,
0363 char *buf)
0364 {
0365 ssize_t ret;
0366 struct hwrng *rng;
0367
0368 rng = get_current_rng();
0369 if (IS_ERR(rng))
0370 return PTR_ERR(rng);
0371
0372 ret = snprintf(buf, PAGE_SIZE, "%s\n", rng ? rng->name : "none");
0373 put_rng(rng);
0374
0375 return ret;
0376 }
0377
0378 static ssize_t rng_available_show(struct device *dev,
0379 struct device_attribute *attr,
0380 char *buf)
0381 {
0382 int err;
0383 struct hwrng *rng;
0384
0385 err = mutex_lock_interruptible(&rng_mutex);
0386 if (err)
0387 return -ERESTARTSYS;
0388 buf[0] = '\0';
0389 list_for_each_entry(rng, &rng_list, list) {
0390 strlcat(buf, rng->name, PAGE_SIZE);
0391 strlcat(buf, " ", PAGE_SIZE);
0392 }
0393 strlcat(buf, "\n", PAGE_SIZE);
0394 mutex_unlock(&rng_mutex);
0395
0396 return strlen(buf);
0397 }
0398
0399 static ssize_t rng_selected_show(struct device *dev,
0400 struct device_attribute *attr,
0401 char *buf)
0402 {
0403 return sysfs_emit(buf, "%d\n", cur_rng_set_by_user);
0404 }
0405
0406 static ssize_t rng_quality_show(struct device *dev,
0407 struct device_attribute *attr,
0408 char *buf)
0409 {
0410 ssize_t ret;
0411 struct hwrng *rng;
0412
0413 rng = get_current_rng();
0414 if (IS_ERR(rng))
0415 return PTR_ERR(rng);
0416
0417 if (!rng)
0418 return -ENODEV;
0419
0420 ret = sysfs_emit(buf, "%hu\n", rng->quality);
0421 put_rng(rng);
0422
0423 return ret;
0424 }
0425
0426 static ssize_t rng_quality_store(struct device *dev,
0427 struct device_attribute *attr,
0428 const char *buf, size_t len)
0429 {
0430 u16 quality;
0431 int ret = -EINVAL;
0432
0433 if (len < 2)
0434 return -EINVAL;
0435
0436 ret = mutex_lock_interruptible(&rng_mutex);
0437 if (ret)
0438 return -ERESTARTSYS;
0439
0440 ret = kstrtou16(buf, 0, &quality);
0441 if (ret || quality > 1024) {
0442 ret = -EINVAL;
0443 goto out;
0444 }
0445
0446 if (!current_rng) {
0447 ret = -ENODEV;
0448 goto out;
0449 }
0450
0451 current_rng->quality = quality;
0452 current_quality = quality;
0453
0454
0455 ret = enable_best_rng();
0456
0457
0458 if (current_rng)
0459 hwrng_manage_rngd(current_rng);
0460
0461 out:
0462 mutex_unlock(&rng_mutex);
0463 return ret ? ret : len;
0464 }
0465
0466 static DEVICE_ATTR_RW(rng_current);
0467 static DEVICE_ATTR_RO(rng_available);
0468 static DEVICE_ATTR_RO(rng_selected);
0469 static DEVICE_ATTR_RW(rng_quality);
0470
0471 static struct attribute *rng_dev_attrs[] = {
0472 &dev_attr_rng_current.attr,
0473 &dev_attr_rng_available.attr,
0474 &dev_attr_rng_selected.attr,
0475 &dev_attr_rng_quality.attr,
0476 NULL
0477 };
0478
0479 ATTRIBUTE_GROUPS(rng_dev);
0480
0481 static void __exit unregister_miscdev(void)
0482 {
0483 misc_deregister(&rng_miscdev);
0484 }
0485
0486 static int __init register_miscdev(void)
0487 {
0488 return misc_register(&rng_miscdev);
0489 }
0490
0491 static int hwrng_fillfn(void *unused)
0492 {
0493 size_t entropy, entropy_credit = 0;
0494 long rc;
0495
0496 while (!kthread_should_stop()) {
0497 unsigned short quality;
0498 struct hwrng *rng;
0499
0500 rng = get_current_rng();
0501 if (IS_ERR(rng) || !rng)
0502 break;
0503 mutex_lock(&reading_mutex);
0504 rc = rng_get_data(rng, rng_fillbuf,
0505 rng_buffer_size(), 1);
0506 if (current_quality != rng->quality)
0507 rng->quality = current_quality;
0508 quality = rng->quality;
0509 mutex_unlock(&reading_mutex);
0510 put_rng(rng);
0511
0512 if (!quality)
0513 break;
0514
0515 if (rc <= 0) {
0516 pr_warn("hwrng: no data available\n");
0517 msleep_interruptible(10000);
0518 continue;
0519 }
0520
0521
0522
0523
0524 entropy = rc * quality * 8 + entropy_credit;
0525 if ((entropy >> 10) == 0)
0526 entropy_credit = entropy;
0527
0528
0529 add_hwgenerator_randomness((void *)rng_fillbuf, rc,
0530 entropy >> 10);
0531 }
0532 hwrng_fill = NULL;
0533 return 0;
0534 }
0535
0536 static void hwrng_manage_rngd(struct hwrng *rng)
0537 {
0538 if (WARN_ON(!mutex_is_locked(&rng_mutex)))
0539 return;
0540
0541 if (rng->quality == 0 && hwrng_fill)
0542 kthread_stop(hwrng_fill);
0543 if (rng->quality > 0 && !hwrng_fill) {
0544 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
0545 if (IS_ERR(hwrng_fill)) {
0546 pr_err("hwrng_fill thread creation failed\n");
0547 hwrng_fill = NULL;
0548 }
0549 }
0550 }
0551
0552 int hwrng_register(struct hwrng *rng)
0553 {
0554 int err = -EINVAL;
0555 struct hwrng *tmp;
0556 bool is_new_current = false;
0557
0558 if (!rng->name || (!rng->data_read && !rng->read))
0559 goto out;
0560
0561 mutex_lock(&rng_mutex);
0562
0563
0564 err = -EEXIST;
0565 list_for_each_entry(tmp, &rng_list, list) {
0566 if (strcmp(tmp->name, rng->name) == 0)
0567 goto out_unlock;
0568 }
0569 list_add_tail(&rng->list, &rng_list);
0570
0571 init_completion(&rng->cleanup_done);
0572 complete(&rng->cleanup_done);
0573
0574 if (!current_rng ||
0575 (!cur_rng_set_by_user && rng->quality > current_rng->quality)) {
0576
0577
0578
0579
0580
0581 err = set_current_rng(rng);
0582 if (err)
0583 goto out_unlock;
0584
0585
0586
0587 is_new_current = true;
0588 kref_get(&rng->ref);
0589 }
0590 mutex_unlock(&rng_mutex);
0591 if (is_new_current || !rng->init) {
0592
0593
0594
0595
0596
0597
0598
0599 add_early_randomness(rng);
0600 }
0601 if (is_new_current)
0602 put_rng(rng);
0603 return 0;
0604 out_unlock:
0605 mutex_unlock(&rng_mutex);
0606 out:
0607 return err;
0608 }
0609 EXPORT_SYMBOL_GPL(hwrng_register);
0610
0611 void hwrng_unregister(struct hwrng *rng)
0612 {
0613 struct hwrng *old_rng, *new_rng;
0614 int err;
0615
0616 mutex_lock(&rng_mutex);
0617
0618 old_rng = current_rng;
0619 list_del(&rng->list);
0620 if (current_rng == rng) {
0621 err = enable_best_rng();
0622 if (err) {
0623 drop_current_rng();
0624 cur_rng_set_by_user = 0;
0625 }
0626 }
0627
0628 new_rng = get_current_rng_nolock();
0629 if (list_empty(&rng_list)) {
0630 mutex_unlock(&rng_mutex);
0631 if (hwrng_fill)
0632 kthread_stop(hwrng_fill);
0633 } else
0634 mutex_unlock(&rng_mutex);
0635
0636 if (new_rng) {
0637 if (old_rng != new_rng)
0638 add_early_randomness(new_rng);
0639 put_rng(new_rng);
0640 }
0641
0642 wait_for_completion(&rng->cleanup_done);
0643 }
0644 EXPORT_SYMBOL_GPL(hwrng_unregister);
0645
0646 static void devm_hwrng_release(struct device *dev, void *res)
0647 {
0648 hwrng_unregister(*(struct hwrng **)res);
0649 }
0650
0651 static int devm_hwrng_match(struct device *dev, void *res, void *data)
0652 {
0653 struct hwrng **r = res;
0654
0655 if (WARN_ON(!r || !*r))
0656 return 0;
0657
0658 return *r == data;
0659 }
0660
0661 int devm_hwrng_register(struct device *dev, struct hwrng *rng)
0662 {
0663 struct hwrng **ptr;
0664 int error;
0665
0666 ptr = devres_alloc(devm_hwrng_release, sizeof(*ptr), GFP_KERNEL);
0667 if (!ptr)
0668 return -ENOMEM;
0669
0670 error = hwrng_register(rng);
0671 if (error) {
0672 devres_free(ptr);
0673 return error;
0674 }
0675
0676 *ptr = rng;
0677 devres_add(dev, ptr);
0678 return 0;
0679 }
0680 EXPORT_SYMBOL_GPL(devm_hwrng_register);
0681
0682 void devm_hwrng_unregister(struct device *dev, struct hwrng *rng)
0683 {
0684 devres_release(dev, devm_hwrng_release, devm_hwrng_match, rng);
0685 }
0686 EXPORT_SYMBOL_GPL(devm_hwrng_unregister);
0687
0688 static int __init hwrng_modinit(void)
0689 {
0690 int ret;
0691
0692
0693 rng_buffer = kmalloc(rng_buffer_size(), GFP_KERNEL);
0694 if (!rng_buffer)
0695 return -ENOMEM;
0696
0697 rng_fillbuf = kmalloc(rng_buffer_size(), GFP_KERNEL);
0698 if (!rng_fillbuf) {
0699 kfree(rng_buffer);
0700 return -ENOMEM;
0701 }
0702
0703 ret = register_miscdev();
0704 if (ret) {
0705 kfree(rng_fillbuf);
0706 kfree(rng_buffer);
0707 }
0708
0709 return ret;
0710 }
0711
0712 static void __exit hwrng_modexit(void)
0713 {
0714 mutex_lock(&rng_mutex);
0715 BUG_ON(current_rng);
0716 kfree(rng_buffer);
0717 kfree(rng_fillbuf);
0718 mutex_unlock(&rng_mutex);
0719
0720 unregister_miscdev();
0721 }
0722
0723 fs_initcall(hwrng_modinit);
0724 module_exit(hwrng_modexit);
0725
0726 MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
0727 MODULE_LICENSE("GPL");