summaryrefslogtreecommitdiffstats
path: root/drivers/char/random.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/random.c')
-rw-r--r--drivers/char/random.c148
1 files changed, 125 insertions, 23 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0ab024918907..afa3ce7d3e72 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1,6 +1,9 @@
/*
* random.c -- A strong random number generator
*
+ * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
+ * Rights Reserved.
+ *
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
*
* Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
@@ -285,7 +288,6 @@
#define SEC_XFER_SIZE 512
#define EXTRACT_SIZE 10
-#define DEBUG_RANDOM_BOOT 0
#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
@@ -434,6 +436,7 @@ static void _extract_crng(struct crng_state *crng,
static void _crng_backtrack_protect(struct crng_state *crng,
__u8 tmp[CHACHA20_BLOCK_SIZE], int used);
static void process_random_ready_list(void);
+static void _get_random_bytes(void *buf, int nbytes);
/**********************************************************************
*
@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
static struct crng_state **crng_node_pool __read_mostly;
#endif
+static void invalidate_batched_entropy(void);
+
static void crng_initialize(struct crng_state *crng)
{
int i;
@@ -772,7 +777,7 @@ static void crng_initialize(struct crng_state *crng)
_extract_entropy(&input_pool, &crng->state[4],
sizeof(__u32) * 12, 0);
else
- get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
+ _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
for (i = 4; i < 16; i++) {
if (!arch_get_random_seed_long(&rv) &&
!arch_get_random_long(&rv))
@@ -798,12 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
cp++; crng_init_cnt++; len--;
}
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+ invalidate_batched_entropy();
crng_init = 1;
wake_up_interruptible(&crng_init_wait);
pr_notice("random: fast init done\n");
}
- spin_unlock_irqrestore(&primary_crng.lock, flags);
return 1;
}
@@ -835,18 +841,14 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
}
memzero_explicit(&buf, sizeof(buf));
crng->init_time = jiffies;
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng == &primary_crng && crng_init < 2) {
+ invalidate_batched_entropy();
crng_init = 2;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
pr_notice("random: crng init done\n");
}
- spin_unlock_irqrestore(&primary_crng.lock, flags);
-}
-
-static inline void crng_wait_ready(void)
-{
- wait_event_interruptible(crng_init_wait, crng_ready());
}
static void _extract_crng(struct crng_state *crng,
@@ -980,6 +982,11 @@ void add_device_randomness(const void *buf, unsigned int size)
unsigned long time = random_get_entropy() ^ jiffies;
unsigned long flags;
+ if (!crng_ready()) {
+ crng_fast_load(buf, size);
+ return;
+ }
+
trace_add_device_randomness(size, _RET_IP_);
spin_lock_irqsave(&input_pool.lock, flags);
_mix_pool_bytes(&input_pool, buf, size);
@@ -1097,12 +1104,16 @@ static void add_interrupt_bench(cycles_t start)
static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
{
__u32 *ptr = (__u32 *) regs;
+ unsigned int idx;
if (regs == NULL)
return 0;
- if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
- f->reg_idx = 0;
- return *(ptr + f->reg_idx++);
+ idx = READ_ONCE(f->reg_idx);
+ if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
+ idx = 0;
+ ptr += idx++;
+ WRITE_ONCE(f->reg_idx, idx);
+ return *ptr;
}
void add_interrupt_randomness(int irq, int irq_flags)
@@ -1461,22 +1472,44 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
return ret;
}
+#define warn_unseeded_randomness(previous) \
+ _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
+
+static void _warn_unseeded_randomness(const char *func_name, void *caller,
+ void **previous)
+{
+#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ const bool print_once = false;
+#else
+ static bool print_once __read_mostly;
+#endif
+
+ if (print_once ||
+ crng_ready() ||
+ (previous && (caller == READ_ONCE(*previous))))
+ return;
+ WRITE_ONCE(*previous, caller);
+#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
+ print_once = true;
+#endif
+ pr_notice("random: %s called from %pF with crng_init=%d\n",
+ func_name, caller, crng_init);
+}
+
/*
* This function is the exported kernel interface. It returns some
* number of good random numbers, suitable for key generation, seeding
* TCP sequence numbers, etc. It does not rely on the hardware random
* number generator. For random bytes direct from the hardware RNG
- * (when available), use get_random_bytes_arch().
+ * (when available), use get_random_bytes_arch(). In order to ensure
+ * that the randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
*/
-void get_random_bytes(void *buf, int nbytes)
+static void _get_random_bytes(void *buf, int nbytes)
{
__u8 tmp[CHACHA20_BLOCK_SIZE];
-#if DEBUG_RANDOM_BOOT > 0
- if (!crng_ready())
- printk(KERN_NOTICE "random: %pF get_random_bytes called "
- "with crng_init = %d\n", (void *) _RET_IP_, crng_init);
-#endif
trace_get_random_bytes(nbytes, _RET_IP_);
while (nbytes >= CHACHA20_BLOCK_SIZE) {
@@ -1493,9 +1526,35 @@ void get_random_bytes(void *buf, int nbytes)
crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE);
memzero_explicit(tmp, sizeof(tmp));
}
+
+void get_random_bytes(void *buf, int nbytes)
+{
+ static void *previous;
+
+ warn_unseeded_randomness(&previous);
+ _get_random_bytes(buf, nbytes);
+}
EXPORT_SYMBOL(get_random_bytes);
/*
+ * Wait for the urandom pool to be seeded and thus guaranteed to supply
+ * cryptographically secure random numbers. This applies to: the /dev/urandom
+ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
+ * family of functions. Using any of these functions without first calling
+ * this function forfeits the guarantee of security.
+ *
+ * Returns: 0 if the urandom pool has been seeded.
+ * -ERESTARTSYS if the function was interrupted by a signal.
+ */
+int wait_for_random_bytes(void)
+{
+ if (likely(crng_ready()))
+ return 0;
+ return wait_event_interruptible(crng_init_wait, crng_ready());
+}
+EXPORT_SYMBOL(wait_for_random_bytes);
+
+/*
* Add a callback function that will be invoked when the nonblocking
* pool is initialised.
*
@@ -1849,6 +1908,8 @@ const struct file_operations urandom_fops = {
SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
unsigned int, flags)
{
+ int ret;
+
if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
return -EINVAL;
@@ -1861,9 +1922,9 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
if (!crng_ready()) {
if (flags & GRND_NONBLOCK)
return -EAGAIN;
- crng_wait_ready();
- if (signal_pending(current))
- return -ERESTARTSYS;
+ ret = wait_for_random_bytes();
+ if (unlikely(ret))
+ return ret;
}
return urandom_read(NULL, buf, count, NULL);
}
@@ -2019,17 +2080,24 @@ struct batched_entropy {
};
unsigned int position;
};
+static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
/*
* Get a random word for internal kernel use only. The quality of the random
* number is either as good as RDRAND or as good as /dev/urandom, with the
- * goal of being quite fast and not depleting entropy.
+ * goal of being quite fast and not depleting entropy. In order to ensure
+ * that the randomness provided by this function is okay, the function
+ * wait_for_random_bytes() should be called and return 0 at least once
+ * at any point prior.
*/
static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
u64 get_random_u64(void)
{
u64 ret;
+ bool use_lock;
+ unsigned long flags = 0;
struct batched_entropy *batch;
+ static void *previous;
#if BITS_PER_LONG == 64
if (arch_get_random_long((unsigned long *)&ret))
@@ -2040,12 +2108,19 @@ u64 get_random_u64(void)
return ret;
#endif
+ warn_unseeded_randomness(&previous);
+
+ use_lock = READ_ONCE(crng_init) < 2;
batch = &get_cpu_var(batched_entropy_u64);
+ if (use_lock)
+ read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
}
ret = batch->entropy_u64[batch->position++];
+ if (use_lock)
+ read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
put_cpu_var(batched_entropy_u64);
return ret;
}
@@ -2055,22 +2130,49 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
u32 get_random_u32(void)
{
u32 ret;
+ bool use_lock;
+ unsigned long flags = 0;
struct batched_entropy *batch;
+ static void *previous;
if (arch_get_random_int(&ret))
return ret;
+ warn_unseeded_randomness(&previous);
+
+ use_lock = READ_ONCE(crng_init) < 2;
batch = &get_cpu_var(batched_entropy_u32);
+ if (use_lock)
+ read_lock_irqsave(&batched_entropy_reset_lock, flags);
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
}
ret = batch->entropy_u32[batch->position++];
+ if (use_lock)
+ read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
put_cpu_var(batched_entropy_u32);
return ret;
}
EXPORT_SYMBOL(get_random_u32);
+/* It's important to invalidate all potential batched entropy that might
+ * be stored before the crng is initialized, which we can do lazily by
+ * simply resetting the counter to zero so that it's re-extracted on the
+ * next usage. */
+static void invalidate_batched_entropy(void)
+{
+ int cpu;
+ unsigned long flags;
+
+ write_lock_irqsave(&batched_entropy_reset_lock, flags);
+ for_each_possible_cpu (cpu) {
+ per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
+ per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+ }
+ write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+}
+
/**
* randomize_page - Generate a random, page aligned address
* @start: The smallest acceptable address the caller will take.