diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2007-02-19 16:59:24 +0000 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2007-02-20 01:26:42 +0000 |
commit | e03b526932a9ae1ff20b47459c040f3c6407f625 (patch) | |
tree | 6c1753fc5a0497621b05c7dae9d3d686503bc5d7 /include/asm-mips | |
parent | 269dd2b2526d046d8b43554ff27b486e2ddb3f08 (diff) | |
download | talos-obmc-linux-e03b526932a9ae1ff20b47459c040f3c6407f625.tar.gz talos-obmc-linux-e03b526932a9ae1ff20b47459c040f3c6407f625.zip |
[MIPS] Fixup copy_from_user_inatomic
From the 01408c4939479ec46c15aa7ef6e2406be50eeeca log message:
The problem is that when we write to a file, the copy from userspace to
pagecache is first done with preemption disabled, so if the source
address is not immediately available the copy fails *and* *zeros* *the*
*destination*.
This is a problem because a concurrent read (which admittedly is an odd
thing to do) might see zeros rather that was there before the write, or
what was there after, or some mixture of the two (any of these being a
reasonable thing to see).
If the copy did fail, it will immediately be retried with preemption
re-enabled so any transient problem with accessing the source won't
cause an error.
The first copying does not need to zero any uncopied bytes, and doing
so causes the problem. It uses copy_from_user_atomic rather than
copy_from_user so the simple expedient is to change copy_from_user_atomic
to *not* zero out bytes on failure.
< --- end cite --- >
This patch finally implements at least a not so pretty solution by
duplicating the relevant part of __copy_user.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include/asm-mips')
-rw-r--r-- | include/asm-mips/uaccess.h | 51 |
1 files changed, 49 insertions, 2 deletions
diff --git a/include/asm-mips/uaccess.h b/include/asm-mips/uaccess.h index 3eff8d8fe28a..c62c20e7b5c6 100644 --- a/include/asm-mips/uaccess.h +++ b/include/asm-mips/uaccess.h @@ -435,8 +435,32 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); __cu_len; \ }) -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user +#define __copy_to_user_inatomic(to,from,n) \ +({ \ + void __user *__cu_to; \ + const void *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ + __cu_len; \ +}) + +#define __copy_from_user_inatomic(to,from,n) \ +({ \ + void *__cu_to; \ + const void __user *__cu_from; \ + long __cu_len; \ + \ + __cu_to = (to); \ + __cu_from = (from); \ + __cu_len = (n); \ + __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \ + __cu_len); \ + __cu_len; \ +}) /* * copy_to_user: - Copy a block of data into user space. @@ -490,6 +514,29 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); __cu_len_r; \ }) +#define __invoke_copy_from_user_inatomic(to,from,n) \ +({ \ + register void *__cu_to_r __asm__ ("$4"); \ + register const void __user *__cu_from_r __asm__ ("$5"); \ + register long __cu_len_r __asm__ ("$6"); \ + \ + __cu_to_r = (to); \ + __cu_from_r = (from); \ + __cu_len_r = (n); \ + __asm__ __volatile__( \ + ".set\tnoreorder\n\t" \ + __MODULE_JAL(__copy_user_inatomic) \ + ".set\tnoat\n\t" \ + __UA_ADDU "\t$1, %1, %2\n\t" \ + ".set\tat\n\t" \ + ".set\treorder" \ + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : \ + : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ + "memory"); \ + __cu_len_r; \ +}) + /* * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. |