summaryrefslogtreecommitdiffstats
path: root/arch/parisc/include/asm
diff options
context:
space:
mode:
authorHelge Deller <deller@gmx.de>2017-03-25 11:59:15 +0100
committerHelge Deller <deller@gmx.de>2017-03-29 21:50:36 +0200
commitd19f5e41b344a057bb2450024a807476f30978d2 (patch)
tree4f7f4fa52e5e845ec0abe38d1906404d575ca8ab /arch/parisc/include/asm
parent554bfeceb8a22d448cd986fc9efce25e833278a1 (diff)
downloadblackbird-obmc-linux-d19f5e41b344a057bb2450024a807476f30978d2.tar.gz
blackbird-obmc-linux-d19f5e41b344a057bb2450024a807476f30978d2.zip
parisc: Clean up fixup routines for get_user()/put_user()
Al Viro noticed that userspace accesses via get_user()/put_user() can be simplified a lot with regard to usage of the exception handling. This patch implements a fixup routine for get_user() and put_user() in such that the exception handler will automatically load -EFAULT into the register %r8 (the error value) in case on a fault on userspace. Additionally the fixup routine will zero the target register on fault in case of a get_user() call. The target register is extracted out of the faulting assembly instruction. This patch brings a few benefits over the old implementation: 1. Exception handling gets much cleaner, easier and smaller in size. 2. Helper functions like fixup_get_user_skip_1 (all of fixup.S) can be dropped. 3. No need to hardcode %r9 as target register for get_user() any longer. This helps the compiler register allocator and thus creates less assembler statements. 4. No dependency on the exception_data contents any longer. 5. Nested faults will be handled cleanly. Reported-by: Al Viro <viro@ZenIV.linux.org.uk> Cc: <stable@vger.kernel.org> # v4.9+ Signed-off-by: Helge Deller <deller@gmx.de>
Diffstat (limited to 'arch/parisc/include/asm')
-rw-r--r--arch/parisc/include/asm/uaccess.h59
1 files changed, 34 insertions, 25 deletions
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index edfbf9d6a6dd..8442727f28d2 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -65,6 +65,15 @@ struct exception_table_entry {
".previous\n"
/*
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+ * (with lowest bit set) for which the fault handler in fixup_exception() will
+ * load -EFAULT into %r8 for a read or write fault, and zeroes the target
+ * register in case of a read fault in get_user().
+ */
+#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
+ ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
+
+/*
* The page fault handler stores, in a per-cpu area, the following information
* if a fixup routine is available.
*/
@@ -91,7 +100,7 @@ struct exception_data {
#define __get_user(x, ptr) \
({ \
register long __gu_err __asm__ ("r8") = 0; \
- register long __gu_val __asm__ ("r9") = 0; \
+ register long __gu_val; \
\
load_sr2(); \
switch (sizeof(*(ptr))) { \
@@ -107,22 +116,23 @@ struct exception_data {
})
#define __get_user_asm(ldx, ptr) \
- __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
+ __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err) \
- : "r1");
+ : "r"(ptr), "1"(__gu_err));
#if !defined(CONFIG_64BIT)
#define __get_user_asm64(ptr) \
- __asm__("\n1:\tldw 0(%%sr2,%2),%0" \
- "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
- ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
+ __asm__(" copy %%r0,%R0\n" \
+ "1: ldw 0(%%sr2,%2),%0\n" \
+ "2: ldw 4(%%sr2,%2),%R0\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__gu_val), "=r"(__gu_err) \
- : "r"(ptr), "1"(__gu_err) \
- : "r1");
+ : "r"(ptr), "1"(__gu_err));
#endif /* !defined(CONFIG_64BIT) */
@@ -148,32 +158,31 @@ struct exception_data {
* The "__put_user/kernel_asm()" macros tell gcc they read from memory
* instead of writing. This is because they do not write to any memory
* gcc knows about, so there are no aliasing issues. These macros must
- * also be aware that "fixup_put_user_skip_[12]" are executed in the
- * context of the fault, and any registers used there must be listed
- * as clobbers. In this case only "r1" is used by the current routines.
- * r8/r9 are already listed as err/val.
+ * also be aware that fixups are executed in the context of the fault,
+ * and any registers used there must be listed as clobbers.
+ * r8 is already listed as err.
*/
#define __put_user_asm(stx, x, ptr) \
__asm__ __volatile__ ( \
- "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
+ "1: " stx " %2,0(%%sr2,%1)\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
: "=r"(__pu_err) \
- : "r"(ptr), "r"(x), "0"(__pu_err) \
- : "r1")
+ : "r"(ptr), "r"(x), "0"(__pu_err))
#if !defined(CONFIG_64BIT)
#define __put_user_asm64(__val, ptr) do { \
__asm__ __volatile__ ( \
- "\n1:\tstw %2,0(%%sr2,%1)" \
- "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \
- ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
- ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
+ "1: stw %2,0(%%sr2,%1)\n" \
+ "2: stw %R2,4(%%sr2,%1)\n" \
+ "9:\n" \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
: "=r"(__pu_err) \
- : "r"(ptr), "r"(__val), "0"(__pu_err) \
- : "r1"); \
+ : "r"(ptr), "r"(__val), "0"(__pu_err)); \
} while (0)
#endif /* !defined(CONFIG_64BIT) */
OpenPOWER on IntegriCloud