diff options
author | Glauber Costa <gcosta@redhat.com> | 2008-06-25 11:48:29 -0300 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-09 09:14:22 +0200 |
commit | dc70ddf4098de043ac52f623c7573a11f2ae3d09 (patch) | |
tree | 0641f33c243a7d1feec051670cdced9ffec36535 /include/asm-x86/uaccess_32.h | |
parent | d42e6af613375be7a9a431628ecd742e87230554 (diff) | |
download | blackbird-op-linux-dc70ddf4098de043ac52f623c7573a11f2ae3d09.tar.gz blackbird-op-linux-dc70ddf4098de043ac52f623c7573a11f2ae3d09.zip |
x86: merge __put_user_asm and its user.
Move both __put_user_asm and __put_user_size to
uaccess.h. i386 already had a special function for 64-bit access,
so for x86_64, we just define a macro with the same name.
Note that for X86_64, CONFIG_X86_WP_WORKS_OK will always
be defined, so the #else part will never be even compiled in.
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/asm-x86/uaccess_32.h')
-rw-r--r-- | include/asm-x86/uaccess_32.h | 77 |
1 files changed, 0 insertions, 77 deletions
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h index 4c47a5ba65e3..fab755781b9b 100644 --- a/include/asm-x86/uaccess_32.h +++ b/include/asm-x86/uaccess_32.h @@ -145,83 +145,6 @@ extern void __put_user_8(void); #define __put_user(x, ptr) \ __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -#define __put_user_nocheck(x, ptr, size) \ -({ \ - long __pu_err; \ - __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ - __pu_err; \ -}) - - -#define __put_user_u64(x, addr, err) \ - asm volatile("1: movl %%eax,0(%2)\n" \ - "2: movl %%edx,4(%2)\n" \ - "3:\n" \ - ".section .fixup,\"ax\"\n" \ - "4: movl %3,%0\n" \ - " jmp 3b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b, 4b) \ - _ASM_EXTABLE(2b, 4b) \ - : "=r" (err) \ - : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) - -#ifdef CONFIG_X86_WP_WORKS_OK - -#define __put_user_size(x, ptr, size, retval, errret) \ -do { \ - retval = 0; \ - __chk_user_ptr(ptr); \ - switch (size) { \ - case 1: \ - __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ - break; \ - case 2: \ - __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ - break; \ - case 4: \ - __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\ - break; \ - case 8: \ - __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ - break; \ - default: \ - __put_user_bad(); \ - } \ -} while (0) - -#else - -#define __put_user_size(x, ptr, size, retval, errret) \ -do { \ - __typeof__(*(ptr))__pus_tmp = x; \ - retval = 0; \ - \ - if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ - retval = errret; \ -} while (0) - -#endif -struct __large_struct { unsigned long buf[100]; }; -#define __m(x) (*(struct __large_struct __user *)(x)) - -/* - * Tell gcc we read from memory instead of writing: this is because - * we do not write to any memory gcc knows about, so there are no - * aliasing issues. - */ -#define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ - asm volatile("1: mov"itype" %"rtype"1,%2\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "3: movl %3,%0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(1b, 3b) \ - : "=r"(err) \ - : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err)) - - #define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err; \ |