summaryrefslogtreecommitdiffstats
path: root/drivers/char/hw_random
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2014-12-08 16:50:35 +0800
committerHerbert Xu <herbert@gondor.apana.org.au>2014-12-22 23:02:38 +1100
commit9372b35e11149c5314f56f939775e67d83057604 (patch)
tree5f5d8e33af4f3f02a56299c88348853b2582bca7 /drivers/char/hw_random
parent25fb8638e919bc7431a73f2fb4a9713818ae2c9d (diff)
downloadtalos-obmc-linux-9372b35e11149c5314f56f939775e67d83057604.tar.gz
talos-obmc-linux-9372b35e11149c5314f56f939775e67d83057604.zip
hwrng: place mutex around read functions and buffers.
There's currently a big lock around everything, and it means that we can't query sysfs (eg /sys/devices/virtual/misc/hw_random/rng_current) while the rng is reading. This is a real problem when the rng is slow, or blocked (eg. virtio_rng with qemu's default /dev/random backend) This doesn't help (it leaves the current lock untouched), just adds a lock to protect the read function and the static buffers, in preparation for transition. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'drivers/char/hw_random')
-rw-r--r--drivers/char/hw_random/core.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 1500cfd799a7..2dd3144d69b1 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -53,7 +53,10 @@
static struct hwrng *current_rng;
static struct task_struct *hwrng_fill;
static LIST_HEAD(rng_list);
+/* Protects rng_list and current_rng */
static DEFINE_MUTEX(rng_mutex);
+/* Protects rng read functions, data_avail, rng_buffer and rng_fillbuf */
+static DEFINE_MUTEX(reading_mutex);
static int data_avail;
static u8 *rng_buffer, *rng_fillbuf;
static unsigned short current_quality;
@@ -81,7 +84,9 @@ static void add_early_randomness(struct hwrng *rng)
unsigned char bytes[16];
int bytes_read;
+ mutex_lock(&reading_mutex);
bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+ mutex_unlock(&reading_mutex);
if (bytes_read > 0)
add_device_randomness(bytes, bytes_read);
}
@@ -128,6 +133,7 @@ static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
int wait) {
int present;
+ BUG_ON(!mutex_is_locked(&reading_mutex));
if (rng->read)
return rng->read(rng, (void *)buffer, size, wait);
@@ -160,13 +166,14 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
goto out_unlock;
}
+ mutex_lock(&reading_mutex);
if (!data_avail) {
bytes_read = rng_get_data(current_rng, rng_buffer,
rng_buffer_size(),
!(filp->f_flags & O_NONBLOCK));
if (bytes_read < 0) {
err = bytes_read;
- goto out_unlock;
+ goto out_unlock_reading;
}
data_avail = bytes_read;
}
@@ -174,7 +181,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
if (!data_avail) {
if (filp->f_flags & O_NONBLOCK) {
err = -EAGAIN;
- goto out_unlock;
+ goto out_unlock_reading;
}
} else {
len = data_avail;
@@ -186,7 +193,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
if (copy_to_user(buf + ret, rng_buffer + data_avail,
len)) {
err = -EFAULT;
- goto out_unlock;
+ goto out_unlock_reading;
}
size -= len;
@@ -194,6 +201,7 @@ static ssize_t rng_dev_read(struct file *filp, char __user *buf,
}
mutex_unlock(&rng_mutex);
+ mutex_unlock(&reading_mutex);
if (need_resched())
schedule_timeout_interruptible(1);
@@ -208,6 +216,9 @@ out:
out_unlock:
mutex_unlock(&rng_mutex);
goto out;
+out_unlock_reading:
+ mutex_unlock(&reading_mutex);
+ goto out_unlock;
}
@@ -344,13 +355,16 @@ static int hwrng_fillfn(void *unused)
while (!kthread_should_stop()) {
if (!current_rng)
break;
+ mutex_lock(&reading_mutex);
rc = rng_get_data(current_rng, rng_fillbuf,
rng_buffer_size(), 1);
+ mutex_unlock(&reading_mutex);
if (rc <= 0) {
pr_warn("hwrng: no data available\n");
msleep_interruptible(10000);
continue;
}
+ /* Outside lock, sure, but y'know: randomness. */
add_hwgenerator_randomness((void *)rng_fillbuf, rc,
rc * current_quality * 8 >> 10);
}
OpenPOWER on IntegriCloud