0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014
0015
0016
0017
0018
0019
0020
0021
0022
0023
0024
0025
0026 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0027
0028 #include <linux/utsname.h>
0029 #include <linux/module.h>
0030 #include <linux/kernel.h>
0031 #include <linux/major.h>
0032 #include <linux/string.h>
0033 #include <linux/fcntl.h>
0034 #include <linux/slab.h>
0035 #include <linux/random.h>
0036 #include <linux/poll.h>
0037 #include <linux/init.h>
0038 #include <linux/fs.h>
0039 #include <linux/blkdev.h>
0040 #include <linux/interrupt.h>
0041 #include <linux/mm.h>
0042 #include <linux/nodemask.h>
0043 #include <linux/spinlock.h>
0044 #include <linux/kthread.h>
0045 #include <linux/percpu.h>
0046 #include <linux/ptrace.h>
0047 #include <linux/workqueue.h>
0048 #include <linux/irq.h>
0049 #include <linux/ratelimit.h>
0050 #include <linux/syscalls.h>
0051 #include <linux/completion.h>
0052 #include <linux/uuid.h>
0053 #include <linux/uaccess.h>
0054 #include <linux/suspend.h>
0055 #include <linux/siphash.h>
0056 #include <crypto/chacha.h>
0057 #include <crypto/blake2s.h>
0058 #include <asm/processor.h>
0059 #include <asm/irq.h>
0060 #include <asm/irq_regs.h>
0061 #include <asm/io.h>
0062
0063
0064
0065
0066
0067
0068
0069
0070
0071
0072
0073
0074
0075
0076
0077 static enum {
0078 CRNG_EMPTY = 0,
0079 CRNG_EARLY = 1,
0080 CRNG_READY = 2
0081 } crng_init __read_mostly = CRNG_EMPTY;
0082 static DEFINE_STATIC_KEY_FALSE(crng_is_ready);
0083 #define crng_ready() (static_branch_likely(&crng_is_ready) || crng_init >= CRNG_READY)
0084
0085 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
0086 static struct fasync_struct *fasync;
0087
0088
0089 static struct ratelimit_state urandom_warning =
0090 RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE);
0091 static int ratelimit_disable __read_mostly =
0092 IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM);
0093 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
0094 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
0095
0096
0097
0098
0099
0100
0101
0102
0103
0104
0105 bool rng_is_initialized(void)
0106 {
0107 return crng_ready();
0108 }
0109 EXPORT_SYMBOL(rng_is_initialized);
0110
0111 static void __cold crng_set_ready(struct work_struct *work)
0112 {
0113 static_branch_enable(&crng_is_ready);
0114 }
0115
0116
0117 static void try_to_generate_entropy(void);
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129 int wait_for_random_bytes(void)
0130 {
0131 while (!crng_ready()) {
0132 int ret;
0133
0134 try_to_generate_entropy();
0135 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
0136 if (ret)
0137 return ret > 0 ? 0 : ret;
0138 }
0139 return 0;
0140 }
0141 EXPORT_SYMBOL(wait_for_random_bytes);
0142
0143 #define warn_unseeded_randomness() \
0144 if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
0145 printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
0146 __func__, (void *)_RET_IP_, crng_init)
0147
0148
0149
0150
0151
0152
0153
0154
0155
0156
0157
0158
0159
0160
0161
0162
0163
0164
0165
0166
0167
0168
0169
0170
0171
0172
0173
0174 enum {
0175 CRNG_RESEED_START_INTERVAL = HZ,
0176 CRNG_RESEED_INTERVAL = 60 * HZ
0177 };
0178
0179 static struct {
0180 u8 key[CHACHA_KEY_SIZE] __aligned(__alignof__(long));
0181 unsigned long birth;
0182 unsigned long generation;
0183 spinlock_t lock;
0184 } base_crng = {
0185 .lock = __SPIN_LOCK_UNLOCKED(base_crng.lock)
0186 };
0187
0188 struct crng {
0189 u8 key[CHACHA_KEY_SIZE];
0190 unsigned long generation;
0191 local_lock_t lock;
0192 };
0193
0194 static DEFINE_PER_CPU(struct crng, crngs) = {
0195 .generation = ULONG_MAX,
0196 .lock = INIT_LOCAL_LOCK(crngs.lock),
0197 };
0198
0199
0200 static void extract_entropy(void *buf, size_t len);
0201
0202
0203 static void crng_reseed(void)
0204 {
0205 unsigned long flags;
0206 unsigned long next_gen;
0207 u8 key[CHACHA_KEY_SIZE];
0208
0209 extract_entropy(key, sizeof(key));
0210
0211
0212
0213
0214
0215
0216
0217 spin_lock_irqsave(&base_crng.lock, flags);
0218 memcpy(base_crng.key, key, sizeof(base_crng.key));
0219 next_gen = base_crng.generation + 1;
0220 if (next_gen == ULONG_MAX)
0221 ++next_gen;
0222 WRITE_ONCE(base_crng.generation, next_gen);
0223 WRITE_ONCE(base_crng.birth, jiffies);
0224 if (!static_branch_likely(&crng_is_ready))
0225 crng_init = CRNG_READY;
0226 spin_unlock_irqrestore(&base_crng.lock, flags);
0227 memzero_explicit(key, sizeof(key));
0228 }
0229
0230
0231
0232
0233
0234
0235
0236
0237
0238
0239
0240
0241
0242
0243
0244 static void crng_fast_key_erasure(u8 key[CHACHA_KEY_SIZE],
0245 u32 chacha_state[CHACHA_STATE_WORDS],
0246 u8 *random_data, size_t random_data_len)
0247 {
0248 u8 first_block[CHACHA_BLOCK_SIZE];
0249
0250 BUG_ON(random_data_len > 32);
0251
0252 chacha_init_consts(chacha_state);
0253 memcpy(&chacha_state[4], key, CHACHA_KEY_SIZE);
0254 memset(&chacha_state[12], 0, sizeof(u32) * 4);
0255 chacha20_block(chacha_state, first_block);
0256
0257 memcpy(key, first_block, CHACHA_KEY_SIZE);
0258 memcpy(random_data, first_block + CHACHA_KEY_SIZE, random_data_len);
0259 memzero_explicit(first_block, sizeof(first_block));
0260 }
0261
0262
0263
0264
0265
0266
0267
0268 static bool crng_has_old_seed(void)
0269 {
0270 static bool early_boot = true;
0271 unsigned long interval = CRNG_RESEED_INTERVAL;
0272
0273 if (unlikely(READ_ONCE(early_boot))) {
0274 time64_t uptime = ktime_get_seconds();
0275 if (uptime >= CRNG_RESEED_INTERVAL / HZ * 2)
0276 WRITE_ONCE(early_boot, false);
0277 else
0278 interval = max_t(unsigned int, CRNG_RESEED_START_INTERVAL,
0279 (unsigned int)uptime / 2 * HZ);
0280 }
0281 return time_is_before_jiffies(READ_ONCE(base_crng.birth) + interval);
0282 }
0283
0284
0285
0286
0287
0288
0289 static void crng_make_state(u32 chacha_state[CHACHA_STATE_WORDS],
0290 u8 *random_data, size_t random_data_len)
0291 {
0292 unsigned long flags;
0293 struct crng *crng;
0294
0295 BUG_ON(random_data_len > 32);
0296
0297
0298
0299
0300
0301
0302
0303 if (!crng_ready()) {
0304 bool ready;
0305
0306 spin_lock_irqsave(&base_crng.lock, flags);
0307 ready = crng_ready();
0308 if (!ready) {
0309 if (crng_init == CRNG_EMPTY)
0310 extract_entropy(base_crng.key, sizeof(base_crng.key));
0311 crng_fast_key_erasure(base_crng.key, chacha_state,
0312 random_data, random_data_len);
0313 }
0314 spin_unlock_irqrestore(&base_crng.lock, flags);
0315 if (!ready)
0316 return;
0317 }
0318
0319
0320
0321
0322
0323 if (unlikely(crng_has_old_seed()))
0324 crng_reseed();
0325
0326 local_lock_irqsave(&crngs.lock, flags);
0327 crng = raw_cpu_ptr(&crngs);
0328
0329
0330
0331
0332
0333
0334
0335 if (unlikely(crng->generation != READ_ONCE(base_crng.generation))) {
0336 spin_lock(&base_crng.lock);
0337 crng_fast_key_erasure(base_crng.key, chacha_state,
0338 crng->key, sizeof(crng->key));
0339 crng->generation = base_crng.generation;
0340 spin_unlock(&base_crng.lock);
0341 }
0342
0343
0344
0345
0346
0347
0348
0349
0350 crng_fast_key_erasure(crng->key, chacha_state, random_data, random_data_len);
0351 local_unlock_irqrestore(&crngs.lock, flags);
0352 }
0353
0354 static void _get_random_bytes(void *buf, size_t len)
0355 {
0356 u32 chacha_state[CHACHA_STATE_WORDS];
0357 u8 tmp[CHACHA_BLOCK_SIZE];
0358 size_t first_block_len;
0359
0360 if (!len)
0361 return;
0362
0363 first_block_len = min_t(size_t, 32, len);
0364 crng_make_state(chacha_state, buf, first_block_len);
0365 len -= first_block_len;
0366 buf += first_block_len;
0367
0368 while (len) {
0369 if (len < CHACHA_BLOCK_SIZE) {
0370 chacha20_block(chacha_state, tmp);
0371 memcpy(buf, tmp, len);
0372 memzero_explicit(tmp, sizeof(tmp));
0373 break;
0374 }
0375
0376 chacha20_block(chacha_state, buf);
0377 if (unlikely(chacha_state[12] == 0))
0378 ++chacha_state[13];
0379 len -= CHACHA_BLOCK_SIZE;
0380 buf += CHACHA_BLOCK_SIZE;
0381 }
0382
0383 memzero_explicit(chacha_state, sizeof(chacha_state));
0384 }
0385
0386
0387
0388
0389
0390
0391
0392
0393 void get_random_bytes(void *buf, size_t len)
0394 {
0395 warn_unseeded_randomness();
0396 _get_random_bytes(buf, len);
0397 }
0398 EXPORT_SYMBOL(get_random_bytes);
0399
0400 static ssize_t get_random_bytes_user(struct iov_iter *iter)
0401 {
0402 u32 chacha_state[CHACHA_STATE_WORDS];
0403 u8 block[CHACHA_BLOCK_SIZE];
0404 size_t ret = 0, copied;
0405
0406 if (unlikely(!iov_iter_count(iter)))
0407 return 0;
0408
0409
0410
0411
0412
0413
0414 crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE);
0415
0416
0417
0418
0419
0420 if (iov_iter_count(iter) <= CHACHA_KEY_SIZE) {
0421 ret = copy_to_iter(&chacha_state[4], CHACHA_KEY_SIZE, iter);
0422 goto out_zero_chacha;
0423 }
0424
0425 for (;;) {
0426 chacha20_block(chacha_state, block);
0427 if (unlikely(chacha_state[12] == 0))
0428 ++chacha_state[13];
0429
0430 copied = copy_to_iter(block, sizeof(block), iter);
0431 ret += copied;
0432 if (!iov_iter_count(iter) || copied != sizeof(block))
0433 break;
0434
0435 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
0436 if (ret % PAGE_SIZE == 0) {
0437 if (signal_pending(current))
0438 break;
0439 cond_resched();
0440 }
0441 }
0442
0443 memzero_explicit(block, sizeof(block));
0444 out_zero_chacha:
0445 memzero_explicit(chacha_state, sizeof(chacha_state));
0446 return ret ? ret : -EFAULT;
0447 }
0448
0449
0450
0451
0452
0453
0454
0455
0456 #define DEFINE_BATCHED_ENTROPY(type) \
0457 struct batch_ ##type { \
0458
0459
0460
0461
0462
0463
0464 \
0465 type entropy[CHACHA_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
0466 local_lock_t lock; \
0467 unsigned long generation; \
0468 unsigned int position; \
0469 }; \
0470 \
0471 static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
0472 .lock = INIT_LOCAL_LOCK(batched_entropy_ ##type.lock), \
0473 .position = UINT_MAX \
0474 }; \
0475 \
0476 type get_random_ ##type(void) \
0477 { \
0478 type ret; \
0479 unsigned long flags; \
0480 struct batch_ ##type *batch; \
0481 unsigned long next_gen; \
0482 \
0483 warn_unseeded_randomness(); \
0484 \
0485 if (!crng_ready()) { \
0486 _get_random_bytes(&ret, sizeof(ret)); \
0487 return ret; \
0488 } \
0489 \
0490 local_lock_irqsave(&batched_entropy_ ##type.lock, flags); \
0491 batch = raw_cpu_ptr(&batched_entropy_##type); \
0492 \
0493 next_gen = READ_ONCE(base_crng.generation); \
0494 if (batch->position >= ARRAY_SIZE(batch->entropy) || \
0495 next_gen != batch->generation) { \
0496 _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
0497 batch->position = 0; \
0498 batch->generation = next_gen; \
0499 } \
0500 \
0501 ret = batch->entropy[batch->position]; \
0502 batch->entropy[batch->position] = 0; \
0503 ++batch->position; \
0504 local_unlock_irqrestore(&batched_entropy_ ##type.lock, flags); \
0505 return ret; \
0506 } \
0507 EXPORT_SYMBOL(get_random_ ##type);
0508
0509 DEFINE_BATCHED_ENTROPY(u64)
0510 DEFINE_BATCHED_ENTROPY(u32)
0511
0512 #ifdef CONFIG_SMP
0513
0514
0515
0516
0517 int __cold random_prepare_cpu(unsigned int cpu)
0518 {
0519
0520
0521
0522
0523
0524 per_cpu_ptr(&crngs, cpu)->generation = ULONG_MAX;
0525 per_cpu_ptr(&batched_entropy_u32, cpu)->position = UINT_MAX;
0526 per_cpu_ptr(&batched_entropy_u64, cpu)->position = UINT_MAX;
0527 return 0;
0528 }
0529 #endif
0530
0531
0532
0533
0534
0535
0536
0537
0538
0539
0540
0541
0542
0543
0544
0545
0546
0547
0548
0549
0550 enum {
0551 POOL_BITS = BLAKE2S_HASH_SIZE * 8,
0552 POOL_READY_BITS = POOL_BITS,
0553 POOL_EARLY_BITS = POOL_READY_BITS / 2
0554 };
0555
0556 static struct {
0557 struct blake2s_state hash;
0558 spinlock_t lock;
0559 unsigned int init_bits;
0560 } input_pool = {
0561 .hash.h = { BLAKE2S_IV0 ^ (0x01010000 | BLAKE2S_HASH_SIZE),
0562 BLAKE2S_IV1, BLAKE2S_IV2, BLAKE2S_IV3, BLAKE2S_IV4,
0563 BLAKE2S_IV5, BLAKE2S_IV6, BLAKE2S_IV7 },
0564 .hash.outlen = BLAKE2S_HASH_SIZE,
0565 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
0566 };
0567
0568 static void _mix_pool_bytes(const void *buf, size_t len)
0569 {
0570 blake2s_update(&input_pool.hash, buf, len);
0571 }
0572
0573
0574
0575
0576
0577
0578 static void mix_pool_bytes(const void *buf, size_t len)
0579 {
0580 unsigned long flags;
0581
0582 spin_lock_irqsave(&input_pool.lock, flags);
0583 _mix_pool_bytes(buf, len);
0584 spin_unlock_irqrestore(&input_pool.lock, flags);
0585 }
0586
0587
0588
0589
0590
0591 static void extract_entropy(void *buf, size_t len)
0592 {
0593 unsigned long flags;
0594 u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
0595 struct {
0596 unsigned long rdseed[32 / sizeof(long)];
0597 size_t counter;
0598 } block;
0599 size_t i, longs;
0600
0601 for (i = 0; i < ARRAY_SIZE(block.rdseed);) {
0602 longs = arch_get_random_seed_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
0603 if (longs) {
0604 i += longs;
0605 continue;
0606 }
0607 longs = arch_get_random_longs(&block.rdseed[i], ARRAY_SIZE(block.rdseed) - i);
0608 if (longs) {
0609 i += longs;
0610 continue;
0611 }
0612 block.rdseed[i++] = random_get_entropy();
0613 }
0614
0615 spin_lock_irqsave(&input_pool.lock, flags);
0616
0617
0618 blake2s_final(&input_pool.hash, seed);
0619
0620
0621 block.counter = 0;
0622 blake2s(next_key, (u8 *)&block, seed, sizeof(next_key), sizeof(block), sizeof(seed));
0623 blake2s_init_key(&input_pool.hash, BLAKE2S_HASH_SIZE, next_key, sizeof(next_key));
0624
0625 spin_unlock_irqrestore(&input_pool.lock, flags);
0626 memzero_explicit(next_key, sizeof(next_key));
0627
0628 while (len) {
0629 i = min_t(size_t, len, BLAKE2S_HASH_SIZE);
0630
0631 ++block.counter;
0632 blake2s(buf, (u8 *)&block, seed, i, sizeof(block), sizeof(seed));
0633 len -= i;
0634 buf += i;
0635 }
0636
0637 memzero_explicit(seed, sizeof(seed));
0638 memzero_explicit(&block, sizeof(block));
0639 }
0640
0641 #define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
0642
0643 static void __cold _credit_init_bits(size_t bits)
0644 {
0645 static struct execute_work set_ready;
0646 unsigned int new, orig, add;
0647 unsigned long flags;
0648
0649 if (!bits)
0650 return;
0651
0652 add = min_t(size_t, bits, POOL_BITS);
0653
0654 orig = READ_ONCE(input_pool.init_bits);
0655 do {
0656 new = min_t(unsigned int, POOL_BITS, orig + add);
0657 } while (!try_cmpxchg(&input_pool.init_bits, &orig, new));
0658
0659 if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) {
0660 crng_reseed();
0661 if (static_key_initialized)
0662 execute_in_process_context(crng_set_ready, &set_ready);
0663 wake_up_interruptible(&crng_init_wait);
0664 kill_fasync(&fasync, SIGIO, POLL_IN);
0665 pr_notice("crng init done\n");
0666 if (urandom_warning.missed)
0667 pr_notice("%d urandom warning(s) missed due to ratelimiting\n",
0668 urandom_warning.missed);
0669 } else if (orig < POOL_EARLY_BITS && new >= POOL_EARLY_BITS) {
0670 spin_lock_irqsave(&base_crng.lock, flags);
0671
0672 if (crng_init == CRNG_EMPTY) {
0673 extract_entropy(base_crng.key, sizeof(base_crng.key));
0674 crng_init = CRNG_EARLY;
0675 }
0676 spin_unlock_irqrestore(&base_crng.lock, flags);
0677 }
0678 }
0679
0680
0681
0682
0683
0684
0685
0686
0687
0688
0689
0690
0691
0692
0693
0694
0695
0696
0697
0698
0699
0700
0701
0702
0703
0704
0705
0706
0707
0708
0709
0710
0711
0712
0713
0714
0715
0716
0717
0718
0719
0720
0721
0722
0723
0724
0725
0726
0727
0728
0729
0730
0731
0732
0733
0734
0735
0736 static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
0737 static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER);
0738 static int __init parse_trust_cpu(char *arg)
0739 {
0740 return kstrtobool(arg, &trust_cpu);
0741 }
0742 static int __init parse_trust_bootloader(char *arg)
0743 {
0744 return kstrtobool(arg, &trust_bootloader);
0745 }
0746 early_param("random.trust_cpu", parse_trust_cpu);
0747 early_param("random.trust_bootloader", parse_trust_bootloader);
0748
0749 static int random_pm_notification(struct notifier_block *nb, unsigned long action, void *data)
0750 {
0751 unsigned long flags, entropy = random_get_entropy();
0752
0753
0754
0755
0756
0757 ktime_t stamps[] = { ktime_get(), ktime_get_boottime(), ktime_get_real() };
0758
0759 spin_lock_irqsave(&input_pool.lock, flags);
0760 _mix_pool_bytes(&action, sizeof(action));
0761 _mix_pool_bytes(stamps, sizeof(stamps));
0762 _mix_pool_bytes(&entropy, sizeof(entropy));
0763 spin_unlock_irqrestore(&input_pool.lock, flags);
0764
0765 if (crng_ready() && (action == PM_RESTORE_PREPARE ||
0766 (action == PM_POST_SUSPEND && !IS_ENABLED(CONFIG_PM_AUTOSLEEP) &&
0767 !IS_ENABLED(CONFIG_PM_USERSPACE_AUTOSLEEP)))) {
0768 crng_reseed();
0769 pr_notice("crng reseeded on system resumption\n");
0770 }
0771 return 0;
0772 }
0773
0774 static struct notifier_block pm_notifier = { .notifier_call = random_pm_notification };
0775
0776
0777
0778
0779
0780
0781
0782
0783
0784 int __init random_init(const char *command_line)
0785 {
0786 ktime_t now = ktime_get_real();
0787 size_t i, longs, arch_bits;
0788 unsigned long entropy[BLAKE2S_BLOCK_SIZE / sizeof(long)];
0789
0790 #if defined(LATENT_ENTROPY_PLUGIN)
0791 static const u8 compiletime_seed[BLAKE2S_BLOCK_SIZE] __initconst __latent_entropy;
0792 _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed));
0793 #endif
0794
0795 for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
0796 longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
0797 if (longs) {
0798 _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
0799 i += longs;
0800 continue;
0801 }
0802 longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
0803 if (longs) {
0804 _mix_pool_bytes(entropy, sizeof(*entropy) * longs);
0805 i += longs;
0806 continue;
0807 }
0808 entropy[0] = random_get_entropy();
0809 _mix_pool_bytes(entropy, sizeof(*entropy));
0810 arch_bits -= sizeof(*entropy) * 8;
0811 ++i;
0812 }
0813 _mix_pool_bytes(&now, sizeof(now));
0814 _mix_pool_bytes(utsname(), sizeof(*(utsname())));
0815 _mix_pool_bytes(command_line, strlen(command_line));
0816 add_latent_entropy();
0817
0818
0819
0820
0821
0822
0823 if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY)
0824 crng_set_ready(NULL);
0825
0826 if (crng_ready())
0827 crng_reseed();
0828 else if (trust_cpu)
0829 _credit_init_bits(arch_bits);
0830
0831 WARN_ON(register_pm_notifier(&pm_notifier));
0832
0833 WARN(!random_get_entropy(), "Missing cycle counter and fallback timer; RNG "
0834 "entropy collection will consequently suffer.");
0835 return 0;
0836 }
0837
0838
0839
0840
0841
0842
0843
0844
0845
0846 void add_device_randomness(const void *buf, size_t len)
0847 {
0848 unsigned long entropy = random_get_entropy();
0849 unsigned long flags;
0850
0851 spin_lock_irqsave(&input_pool.lock, flags);
0852 _mix_pool_bytes(&entropy, sizeof(entropy));
0853 _mix_pool_bytes(buf, len);
0854 spin_unlock_irqrestore(&input_pool.lock, flags);
0855 }
0856 EXPORT_SYMBOL(add_device_randomness);
0857
0858
0859
0860
0861
0862
0863 void add_hwgenerator_randomness(const void *buf, size_t len, size_t entropy)
0864 {
0865 mix_pool_bytes(buf, len);
0866 credit_init_bits(entropy);
0867
0868
0869
0870
0871
0872 if (!kthread_should_stop() && crng_ready())
0873 schedule_timeout_interruptible(CRNG_RESEED_INTERVAL);
0874 }
0875 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
0876
0877
0878
0879
0880
0881 void __init add_bootloader_randomness(const void *buf, size_t len)
0882 {
0883 mix_pool_bytes(buf, len);
0884 if (trust_bootloader)
0885 credit_init_bits(len * 8);
0886 }
0887
0888 #if IS_ENABLED(CONFIG_VMGENID)
0889 static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
0890
0891
0892
0893
0894
0895
0896 void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
0897 {
0898 add_device_randomness(unique_vm_id, len);
0899 if (crng_ready()) {
0900 crng_reseed();
0901 pr_notice("crng reseeded due to virtual machine fork\n");
0902 }
0903 blocking_notifier_call_chain(&vmfork_chain, 0, NULL);
0904 }
0905 #if IS_MODULE(CONFIG_VMGENID)
0906 EXPORT_SYMBOL_GPL(add_vmfork_randomness);
0907 #endif
0908
0909 int __cold register_random_vmfork_notifier(struct notifier_block *nb)
0910 {
0911 return blocking_notifier_chain_register(&vmfork_chain, nb);
0912 }
0913 EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
0914
0915 int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
0916 {
0917 return blocking_notifier_chain_unregister(&vmfork_chain, nb);
0918 }
0919 EXPORT_SYMBOL_GPL(unregister_random_vmfork_notifier);
0920 #endif
0921
0922 struct fast_pool {
0923 struct work_struct mix;
0924 unsigned long pool[4];
0925 unsigned long last;
0926 unsigned int count;
0927 };
0928
0929 static DEFINE_PER_CPU(struct fast_pool, irq_randomness) = {
0930 #ifdef CONFIG_64BIT
0931 #define FASTMIX_PERM SIPHASH_PERMUTATION
0932 .pool = { SIPHASH_CONST_0, SIPHASH_CONST_1, SIPHASH_CONST_2, SIPHASH_CONST_3 }
0933 #else
0934 #define FASTMIX_PERM HSIPHASH_PERMUTATION
0935 .pool = { HSIPHASH_CONST_0, HSIPHASH_CONST_1, HSIPHASH_CONST_2, HSIPHASH_CONST_3 }
0936 #endif
0937 };
0938
0939
0940
0941
0942
0943
0944
0945 static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
0946 {
0947 s[3] ^= v1;
0948 FASTMIX_PERM(s[0], s[1], s[2], s[3]);
0949 s[0] ^= v1;
0950 s[3] ^= v2;
0951 FASTMIX_PERM(s[0], s[1], s[2], s[3]);
0952 s[0] ^= v2;
0953 }
0954
0955 #ifdef CONFIG_SMP
0956
0957
0958
0959
0960 int __cold random_online_cpu(unsigned int cpu)
0961 {
0962
0963
0964
0965
0966
0967
0968
0969
0970
0971
0972
0973 per_cpu_ptr(&irq_randomness, cpu)->count = 0;
0974 return 0;
0975 }
0976 #endif
0977
0978 static void mix_interrupt_randomness(struct work_struct *work)
0979 {
0980 struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
0981
0982
0983
0984
0985
0986
0987
0988 unsigned long pool[2];
0989 unsigned int count;
0990
0991
0992 local_irq_disable();
0993 if (fast_pool != this_cpu_ptr(&irq_randomness)) {
0994 local_irq_enable();
0995 return;
0996 }
0997
0998
0999
1000
1001
1002 memcpy(pool, fast_pool->pool, sizeof(pool));
1003 count = fast_pool->count;
1004 fast_pool->count = 0;
1005 fast_pool->last = jiffies;
1006 local_irq_enable();
1007
1008 mix_pool_bytes(pool, sizeof(pool));
1009 credit_init_bits(max(1u, (count & U16_MAX) / 64));
1010
1011 memzero_explicit(pool, sizeof(pool));
1012 }
1013
1014 void add_interrupt_randomness(int irq)
1015 {
1016 enum { MIX_INFLIGHT = 1U << 31 };
1017 unsigned long entropy = random_get_entropy();
1018 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1019 struct pt_regs *regs = get_irq_regs();
1020 unsigned int new_count;
1021
1022 fast_mix(fast_pool->pool, entropy,
1023 (regs ? instruction_pointer(regs) : _RET_IP_) ^ swab(irq));
1024 new_count = ++fast_pool->count;
1025
1026 if (new_count & MIX_INFLIGHT)
1027 return;
1028
1029 if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ))
1030 return;
1031
1032 if (unlikely(!fast_pool->mix.func))
1033 INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
1034 fast_pool->count |= MIX_INFLIGHT;
1035 queue_work_on(raw_smp_processor_id(), system_highpri_wq, &fast_pool->mix);
1036 }
1037 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1038
1039
1040 struct timer_rand_state {
1041 unsigned long last_time;
1042 long last_delta, last_delta2;
1043 };
1044
1045
1046
1047
1048
1049
1050
1051
1052 static void add_timer_randomness(struct timer_rand_state *state, unsigned int num)
1053 {
1054 unsigned long entropy = random_get_entropy(), now = jiffies, flags;
1055 long delta, delta2, delta3;
1056 unsigned int bits;
1057
1058
1059
1060
1061
1062 if (in_hardirq()) {
1063 fast_mix(this_cpu_ptr(&irq_randomness)->pool, entropy, num);
1064 } else {
1065 spin_lock_irqsave(&input_pool.lock, flags);
1066 _mix_pool_bytes(&entropy, sizeof(entropy));
1067 _mix_pool_bytes(&num, sizeof(num));
1068 spin_unlock_irqrestore(&input_pool.lock, flags);
1069 }
1070
1071 if (crng_ready())
1072 return;
1073
1074
1075
1076
1077
1078
1079 delta = now - READ_ONCE(state->last_time);
1080 WRITE_ONCE(state->last_time, now);
1081
1082 delta2 = delta - READ_ONCE(state->last_delta);
1083 WRITE_ONCE(state->last_delta, delta);
1084
1085 delta3 = delta2 - READ_ONCE(state->last_delta2);
1086 WRITE_ONCE(state->last_delta2, delta2);
1087
1088 if (delta < 0)
1089 delta = -delta;
1090 if (delta2 < 0)
1091 delta2 = -delta2;
1092 if (delta3 < 0)
1093 delta3 = -delta3;
1094 if (delta > delta2)
1095 delta = delta2;
1096 if (delta > delta3)
1097 delta = delta3;
1098
1099
1100
1101
1102
1103 bits = min(fls(delta >> 1), 11);
1104
1105
1106
1107
1108
1109
1110
1111
1112 if (in_hardirq())
1113 this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
1114 else
1115 _credit_init_bits(bits);
1116 }
1117
1118 void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
1119 {
1120 static unsigned char last_value;
1121 static struct timer_rand_state input_timer_state = { INITIAL_JIFFIES };
1122
1123
1124 if (value == last_value)
1125 return;
1126
1127 last_value = value;
1128 add_timer_randomness(&input_timer_state,
1129 (type << 4) ^ code ^ (code >> 4) ^ value);
1130 }
1131 EXPORT_SYMBOL_GPL(add_input_randomness);
1132
1133 #ifdef CONFIG_BLOCK
1134 void add_disk_randomness(struct gendisk *disk)
1135 {
1136 if (!disk || !disk->random)
1137 return;
1138
1139 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1140 }
1141 EXPORT_SYMBOL_GPL(add_disk_randomness);
1142
1143 void __cold rand_initialize_disk(struct gendisk *disk)
1144 {
1145 struct timer_rand_state *state;
1146
1147
1148
1149
1150
1151 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1152 if (state) {
1153 state->last_time = INITIAL_JIFFIES;
1154 disk->random = state;
1155 }
1156 }
1157 #endif
1158
1159 struct entropy_timer_state {
1160 unsigned long entropy;
1161 struct timer_list timer;
1162 unsigned int samples, samples_per_bit;
1163 };
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178 static void __cold entropy_timer(struct timer_list *timer)
1179 {
1180 struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
1181
1182 if (++state->samples == state->samples_per_bit) {
1183 credit_init_bits(1);
1184 state->samples = 0;
1185 }
1186 }
1187
1188
1189
1190
1191
1192 static void __cold try_to_generate_entropy(void)
1193 {
1194 enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = HZ / 30 };
1195 struct entropy_timer_state stack;
1196 unsigned int i, num_different = 0;
1197 unsigned long last = random_get_entropy();
1198
1199 for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
1200 stack.entropy = random_get_entropy();
1201 if (stack.entropy != last)
1202 ++num_different;
1203 last = stack.entropy;
1204 }
1205 stack.samples_per_bit = DIV_ROUND_UP(NUM_TRIAL_SAMPLES, num_different + 1);
1206 if (stack.samples_per_bit > MAX_SAMPLES_PER_BIT)
1207 return;
1208
1209 stack.samples = 0;
1210 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1211 while (!crng_ready() && !signal_pending(current)) {
1212 if (!timer_pending(&stack.timer))
1213 mod_timer(&stack.timer, jiffies + 1);
1214 mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1215 schedule();
1216 stack.entropy = random_get_entropy();
1217 }
1218
1219 del_timer_sync(&stack.timer);
1220 destroy_timer_on_stack(&stack.timer);
1221 mix_pool_bytes(&stack.entropy, sizeof(stack.entropy));
1222 }
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253 SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
1254 {
1255 struct iov_iter iter;
1256 struct iovec iov;
1257 int ret;
1258
1259 if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
1260 return -EINVAL;
1261
1262
1263
1264
1265
1266 if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
1267 return -EINVAL;
1268
1269 if (!crng_ready() && !(flags & GRND_INSECURE)) {
1270 if (flags & GRND_NONBLOCK)
1271 return -EAGAIN;
1272 ret = wait_for_random_bytes();
1273 if (unlikely(ret))
1274 return ret;
1275 }
1276
1277 ret = import_single_range(READ, ubuf, len, &iov, &iter);
1278 if (unlikely(ret))
1279 return ret;
1280 return get_random_bytes_user(&iter);
1281 }
1282
1283 static __poll_t random_poll(struct file *file, poll_table *wait)
1284 {
1285 poll_wait(file, &crng_init_wait, wait);
1286 return crng_ready() ? EPOLLIN | EPOLLRDNORM : EPOLLOUT | EPOLLWRNORM;
1287 }
1288
1289 static ssize_t write_pool_user(struct iov_iter *iter)
1290 {
1291 u8 block[BLAKE2S_BLOCK_SIZE];
1292 ssize_t ret = 0;
1293 size_t copied;
1294
1295 if (unlikely(!iov_iter_count(iter)))
1296 return 0;
1297
1298 for (;;) {
1299 copied = copy_from_iter(block, sizeof(block), iter);
1300 ret += copied;
1301 mix_pool_bytes(block, copied);
1302 if (!iov_iter_count(iter) || copied != sizeof(block))
1303 break;
1304
1305 BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
1306 if (ret % PAGE_SIZE == 0) {
1307 if (signal_pending(current))
1308 break;
1309 cond_resched();
1310 }
1311 }
1312
1313 memzero_explicit(block, sizeof(block));
1314 return ret ? ret : -EFAULT;
1315 }
1316
1317 static ssize_t random_write_iter(struct kiocb *kiocb, struct iov_iter *iter)
1318 {
1319 return write_pool_user(iter);
1320 }
1321
1322 static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1323 {
1324 static int maxwarn = 10;
1325
1326
1327
1328
1329
1330 if (!crng_ready())
1331 try_to_generate_entropy();
1332
1333 if (!crng_ready()) {
1334 if (!ratelimit_disable && maxwarn <= 0)
1335 ++urandom_warning.missed;
1336 else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
1337 --maxwarn;
1338 pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
1339 current->comm, iov_iter_count(iter));
1340 }
1341 }
1342
1343 return get_random_bytes_user(iter);
1344 }
1345
1346 static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
1347 {
1348 int ret;
1349
1350 ret = wait_for_random_bytes();
1351 if (ret != 0)
1352 return ret;
1353 return get_random_bytes_user(iter);
1354 }
1355
1356 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
1357 {
1358 int __user *p = (int __user *)arg;
1359 int ent_count;
1360
1361 switch (cmd) {
1362 case RNDGETENTCNT:
1363
1364 if (put_user(input_pool.init_bits, p))
1365 return -EFAULT;
1366 return 0;
1367 case RNDADDTOENTCNT:
1368 if (!capable(CAP_SYS_ADMIN))
1369 return -EPERM;
1370 if (get_user(ent_count, p))
1371 return -EFAULT;
1372 if (ent_count < 0)
1373 return -EINVAL;
1374 credit_init_bits(ent_count);
1375 return 0;
1376 case RNDADDENTROPY: {
1377 struct iov_iter iter;
1378 struct iovec iov;
1379 ssize_t ret;
1380 int len;
1381
1382 if (!capable(CAP_SYS_ADMIN))
1383 return -EPERM;
1384 if (get_user(ent_count, p++))
1385 return -EFAULT;
1386 if (ent_count < 0)
1387 return -EINVAL;
1388 if (get_user(len, p++))
1389 return -EFAULT;
1390 ret = import_single_range(WRITE, p, len, &iov, &iter);
1391 if (unlikely(ret))
1392 return ret;
1393 ret = write_pool_user(&iter);
1394 if (unlikely(ret < 0))
1395 return ret;
1396
1397 if (unlikely(ret != len))
1398 return -EFAULT;
1399 credit_init_bits(ent_count);
1400 return 0;
1401 }
1402 case RNDZAPENTCNT:
1403 case RNDCLEARPOOL:
1404
1405 if (!capable(CAP_SYS_ADMIN))
1406 return -EPERM;
1407 return 0;
1408 case RNDRESEEDCRNG:
1409 if (!capable(CAP_SYS_ADMIN))
1410 return -EPERM;
1411 if (!crng_ready())
1412 return -ENODATA;
1413 crng_reseed();
1414 return 0;
1415 default:
1416 return -EINVAL;
1417 }
1418 }
1419
1420 static int random_fasync(int fd, struct file *filp, int on)
1421 {
1422 return fasync_helper(fd, filp, on, &fasync);
1423 }
1424
1425 const struct file_operations random_fops = {
1426 .read_iter = random_read_iter,
1427 .write_iter = random_write_iter,
1428 .poll = random_poll,
1429 .unlocked_ioctl = random_ioctl,
1430 .compat_ioctl = compat_ptr_ioctl,
1431 .fasync = random_fasync,
1432 .llseek = noop_llseek,
1433 .splice_read = generic_file_splice_read,
1434 .splice_write = iter_file_splice_write,
1435 };
1436
1437 const struct file_operations urandom_fops = {
1438 .read_iter = urandom_read_iter,
1439 .write_iter = random_write_iter,
1440 .unlocked_ioctl = random_ioctl,
1441 .compat_ioctl = compat_ptr_ioctl,
1442 .fasync = random_fasync,
1443 .llseek = noop_llseek,
1444 .splice_read = generic_file_splice_read,
1445 .splice_write = iter_file_splice_write,
1446 };
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479 #ifdef CONFIG_SYSCTL
1480
1481 #include <linux/sysctl.h>
1482
1483 static int sysctl_random_min_urandom_seed = CRNG_RESEED_INTERVAL / HZ;
1484 static int sysctl_random_write_wakeup_bits = POOL_READY_BITS;
1485 static int sysctl_poolsize = POOL_BITS;
1486 static u8 sysctl_bootid[UUID_SIZE];
1487
1488
1489
1490
1491
1492
1493 static int proc_do_uuid(struct ctl_table *table, int write, void *buf,
1494 size_t *lenp, loff_t *ppos)
1495 {
1496 u8 tmp_uuid[UUID_SIZE], *uuid;
1497 char uuid_string[UUID_STRING_LEN + 1];
1498 struct ctl_table fake_table = {
1499 .data = uuid_string,
1500 .maxlen = UUID_STRING_LEN
1501 };
1502
1503 if (write)
1504 return -EPERM;
1505
1506 uuid = table->data;
1507 if (!uuid) {
1508 uuid = tmp_uuid;
1509 generate_random_uuid(uuid);
1510 } else {
1511 static DEFINE_SPINLOCK(bootid_spinlock);
1512
1513 spin_lock(&bootid_spinlock);
1514 if (!uuid[8])
1515 generate_random_uuid(uuid);
1516 spin_unlock(&bootid_spinlock);
1517 }
1518
1519 snprintf(uuid_string, sizeof(uuid_string), "%pU", uuid);
1520 return proc_dostring(&fake_table, 0, buf, lenp, ppos);
1521 }
1522
1523
1524 static int proc_do_rointvec(struct ctl_table *table, int write, void *buf,
1525 size_t *lenp, loff_t *ppos)
1526 {
1527 return write ? 0 : proc_dointvec(table, 0, buf, lenp, ppos);
1528 }
1529
1530 static struct ctl_table random_table[] = {
1531 {
1532 .procname = "poolsize",
1533 .data = &sysctl_poolsize,
1534 .maxlen = sizeof(int),
1535 .mode = 0444,
1536 .proc_handler = proc_dointvec,
1537 },
1538 {
1539 .procname = "entropy_avail",
1540 .data = &input_pool.init_bits,
1541 .maxlen = sizeof(int),
1542 .mode = 0444,
1543 .proc_handler = proc_dointvec,
1544 },
1545 {
1546 .procname = "write_wakeup_threshold",
1547 .data = &sysctl_random_write_wakeup_bits,
1548 .maxlen = sizeof(int),
1549 .mode = 0644,
1550 .proc_handler = proc_do_rointvec,
1551 },
1552 {
1553 .procname = "urandom_min_reseed_secs",
1554 .data = &sysctl_random_min_urandom_seed,
1555 .maxlen = sizeof(int),
1556 .mode = 0644,
1557 .proc_handler = proc_do_rointvec,
1558 },
1559 {
1560 .procname = "boot_id",
1561 .data = &sysctl_bootid,
1562 .mode = 0444,
1563 .proc_handler = proc_do_uuid,
1564 },
1565 {
1566 .procname = "uuid",
1567 .mode = 0444,
1568 .proc_handler = proc_do_uuid,
1569 },
1570 { }
1571 };
1572
1573
1574
1575
1576
1577 static int __init random_sysctls_init(void)
1578 {
1579 register_sysctl_init("kernel/random", random_table);
1580 return 0;
1581 }
1582 device_initcall(random_sysctls_init);
1583 #endif