summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVille Syrjälä <ville.syrjala@linux.intel.com>2012-12-12 13:34:03 +0200
committerH. Peter Anvin <hpa@linux.intel.com>2013-02-07 15:07:28 -0800
commit96477b4cd705c5416346aef262b0a1116cfcdd80 (patch)
treefcc031d7eb37e0eeef361e65102c260c8b62490b
parent07f4207a305c834f528d08428df4531744e25678 (diff)
downloadtalos-obmc-linux-96477b4cd705c5416346aef262b0a1116cfcdd80.tar.gz
talos-obmc-linux-96477b4cd705c5416346aef262b0a1116cfcdd80.zip
x86-32: Add support for 64bit get_user()
Implement __get_user_8() for x86-32. It will return the 64-bit result in edx:eax register pair, and ecx is used to pass in the address and return the error value. For consistency, change the register assignment for all other __get_user_x() variants, so that address is passed in ecx/rcx, the error value is returned in ecx/rcx, and eax/rax contains the actual value. [ hpa: I modified the patch so that it does NOT change the calling conventions for the existing callsites, this also means that the code is completely unchanged for 64 bits. Instead, continue to use eax for address input/error output and use the ecx:edx register pair for the output. ] This is a partial refresh of a patch [1] by Jamie Lokier from 2004. Only the minimal changes to implement 64bit get_user() were picked from the original patch. [1] http://article.gmane.org/gmane.linux.kernel/198823 Originally-by: Jamie Lokier <jamie@shareable.org> Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Link: http://lkml.kernel.org/r/1355312043-11467-1-git-send-email-ville.syrjala@linux.intel.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/include/asm/uaccess.h19
-rw-r--r--arch/x86/kernel/i386_ksyms_32.c1
-rw-r--r--arch/x86/lib/getuser.S37
3 files changed, 48 insertions, 9 deletions
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 1709801d18ec..1e963267d44e 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -151,8 +151,15 @@ extern int __get_user_bad(void);
* On error, the variable @x is set to zero.
*/
#ifdef CONFIG_X86_32
-#define __get_user_8(__ret_gu, __val_gu, ptr) \
- __get_user_x(X, __ret_gu, __val_gu, ptr)
+#define __get_user_8(ret, x, ptr) \
+do { \
+ register unsigned long long __xx asm("%edx"); \
+ asm volatile("call __get_user_8" \
+ : "=a" (ret), "=r" (__xx) \
+ : "0" (ptr)); \
+ (x) = __xx; \
+} while (0)
+
#else
#define __get_user_8(__ret_gu, __val_gu, ptr) \
__get_user_x(8, __ret_gu, __val_gu, ptr)
@@ -162,6 +169,7 @@ extern int __get_user_bad(void);
({ \
int __ret_gu; \
unsigned long __val_gu; \
+ unsigned long long __val_gu8; \
__chk_user_ptr(ptr); \
might_fault(); \
switch (sizeof(*(ptr))) { \
@@ -175,13 +183,16 @@ extern int __get_user_bad(void);
__get_user_x(4, __ret_gu, __val_gu, ptr); \
break; \
case 8: \
- __get_user_8(__ret_gu, __val_gu, ptr); \
+ __get_user_8(__ret_gu, __val_gu8, ptr); \
break; \
default: \
__get_user_x(X, __ret_gu, __val_gu, ptr); \
break; \
} \
- (x) = (__typeof__(*(ptr)))__val_gu; \
+ if (sizeof(*(ptr)) == 8) \
+ (x) = (__typeof__(*(ptr)))__val_gu8; \
+ else \
+ (x) = (__typeof__(*(ptr)))__val_gu; \
__ret_gu; \
})
diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
index 9c3bd4a2050e..0fa69127209a 100644
--- a/arch/x86/kernel/i386_ksyms_32.c
+++ b/arch/x86/kernel/i386_ksyms_32.c
@@ -26,6 +26,7 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(__get_user_1);
EXPORT_SYMBOL(__get_user_2);
EXPORT_SYMBOL(__get_user_4);
+EXPORT_SYMBOL(__get_user_8);
EXPORT_SYMBOL(__put_user_1);
EXPORT_SYMBOL(__put_user_2);
diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
index 156b9c804670..d3bf9f99ca77 100644
--- a/arch/x86/lib/getuser.S
+++ b/arch/x86/lib/getuser.S
@@ -15,11 +15,10 @@
* __get_user_X
*
* Inputs: %[r|e]ax contains the address.
- * The register is modified, but all changes are undone
- * before returning because the C code doesn't know about it.
*
* Outputs: %[r|e]ax is error code (0 or -EFAULT)
* %[r|e]dx contains zero-extended value
+ * %ecx contains the high half for 32-bit __get_user_8
*
*
* These functions should not modify any other registers,
@@ -79,22 +78,35 @@ ENTRY(__get_user_4)
CFI_ENDPROC
ENDPROC(__get_user_4)
-#ifdef CONFIG_X86_64
ENTRY(__get_user_8)
CFI_STARTPROC
+#ifdef CONFIG_X86_64
add $7,%_ASM_AX
jc bad_get_user
GET_THREAD_INFO(%_ASM_DX)
cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
- jae bad_get_user
+ jae bad_get_user
ASM_STAC
4: movq -7(%_ASM_AX),%_ASM_DX
xor %eax,%eax
ASM_CLAC
ret
+#else
+ add $7,%_ASM_AX
+ jc bad_get_user_8
+ GET_THREAD_INFO(%_ASM_DX)
+ cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
+ jae bad_get_user_8
+ ASM_STAC
+4: mov -7(%_ASM_AX),%edx
+5: mov -3(%_ASM_AX),%ecx
+ xor %eax,%eax
+ ASM_CLAC
+ ret
+#endif
CFI_ENDPROC
ENDPROC(__get_user_8)
-#endif
+
bad_get_user:
CFI_STARTPROC
@@ -105,9 +117,24 @@ bad_get_user:
CFI_ENDPROC
END(bad_get_user)
+#ifdef CONFIG_X86_32
+bad_get_user_8:
+ CFI_STARTPROC
+ xor %edx,%edx
+ xor %ecx,%ecx
+ mov $(-EFAULT),%_ASM_AX
+ ASM_CLAC
+ ret
+ CFI_ENDPROC
+END(bad_get_user_8)
+#endif
+
_ASM_EXTABLE(1b,bad_get_user)
_ASM_EXTABLE(2b,bad_get_user)
_ASM_EXTABLE(3b,bad_get_user)
#ifdef CONFIG_X86_64
_ASM_EXTABLE(4b,bad_get_user)
+#else
+ _ASM_EXTABLE(4b,bad_get_user_8)
+ _ASM_EXTABLE(5b,bad_get_user_8)
#endif
OpenPOWER on IntegriCloud