diff options
Diffstat (limited to 'arch/s390/lib')
-rw-r--r-- | arch/s390/lib/Makefile | 5 | ||||
-rw-r--r-- | arch/s390/lib/delay.c | 12 | ||||
-rw-r--r-- | arch/s390/lib/div64.c | 151 | ||||
-rw-r--r-- | arch/s390/lib/string.c | 4 | ||||
-rw-r--r-- | arch/s390/lib/uaccess.S | 210 | ||||
-rw-r--r-- | arch/s390/lib/uaccess64.S | 206 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_mvcos.c | 166 | ||||
-rw-r--r-- | arch/s390/lib/uaccess_std.c | 356 |
8 files changed, 682 insertions, 428 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile index e05d087a6eae..b0cfa6c4883d 100644 --- a/arch/s390/lib/Makefile +++ b/arch/s390/lib/Makefile @@ -4,6 +4,7 @@ EXTRA_AFLAGS := -traditional -lib-y += delay.o string.o -lib-y += $(if $(CONFIG_64BIT),uaccess64.o,uaccess.o) +lib-y += delay.o string.o uaccess_std.o +lib-$(CONFIG_32BIT) += div64.o +lib-$(CONFIG_64BIT) += uaccess_mvcos.o lib-$(CONFIG_SMP) += spinlock.o diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 71f0a2fb3078..027c4742a001 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c @@ -11,7 +11,6 @@ * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> */ -#include <linux/config.h> #include <linux/sched.h> #include <linux/delay.h> @@ -28,9 +27,7 @@ void __delay(unsigned long loops) * yield the megahertz number of the cpu. The important function * is udelay and that is done using the tod clock. -- martin. */ - __asm__ __volatile__( - "0: brct %0,0b" - : /* no outputs */ : "r" ((loops/2) + 1)); + asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); } /* @@ -39,13 +36,12 @@ void __delay(unsigned long loops) */ void __udelay(unsigned long usecs) { - uint64_t start_cc, end_cc; + uint64_t start_cc; if (usecs == 0) return; - asm volatile ("STCK %0" : "=m" (start_cc)); + start_cc = get_clock(); do { cpu_relax(); - asm volatile ("STCK %0" : "=m" (end_cc)); - } while (((end_cc - start_cc)/4096) < usecs); + } while (((get_clock() - start_cc)/4096) < usecs); } diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c new file mode 100644 index 000000000000..0481f3424a13 --- /dev/null +++ b/arch/s390/lib/div64.c @@ -0,0 +1,151 @@ +/* + * arch/s390/lib/div64.c + * + * __div64_32 implementation for 31 bit. + * + * Copyright (C) IBM Corp. 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + */ + +#include <linux/types.h> +#include <linux/module.h> + +#ifdef CONFIG_MARCH_G5 + +/* + * Function to divide an unsigned 64 bit integer by an unsigned + * 31 bit integer using signed 64/32 bit division. + */ +static uint32_t __div64_31(uint64_t *n, uint32_t base) +{ + register uint32_t reg2 asm("2"); + register uint32_t reg3 asm("3"); + uint32_t *words = (uint32_t *) n; + uint32_t tmp; + + /* Special case base==1, remainder = 0, quotient = n */ + if (base == 1) + return 0; + /* + * Special case base==0 will cause a fixed point divide exception + * on the dr instruction and may not happen anyway. For the + * following calculation we can assume base > 1. The first + * signed 64 / 32 bit division with an upper half of 0 will + * give the correct upper half of the 64 bit quotient. + */ + reg2 = 0UL; + reg3 = words[0]; + asm volatile( + " dr %0,%2\n" + : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); + words[0] = reg3; + reg3 = words[1]; + /* + * To get the lower half of the 64 bit quotient and the 32 bit + * remainder we have to use a little trick. Since we only have + * a signed division the quotient can get too big. To avoid this + * the 64 bit dividend is halved, then the signed division will + * work. Afterwards the quotient and the remainder are doubled. + * If the last bit of the dividend has been one the remainder + * is increased by one then checked against the base. If the + * remainder has overflown subtract base and increase the + * quotient. Simple, no ? + */ + asm volatile( + " nr %2,%1\n" + " srdl %0,1\n" + " dr %0,%3\n" + " alr %0,%0\n" + " alr %1,%1\n" + " alr %0,%2\n" + " clr %0,%3\n" + " jl 0f\n" + " slr %0,%3\n" + " alr %1,%2\n" + "0:\n" + : "+d" (reg2), "+d" (reg3), "=d" (tmp) + : "d" (base), "2" (1UL) : "cc" ); + words[1] = reg3; + return reg2; +} + +/* + * Function to divide an unsigned 64 bit integer by an unsigned + * 32 bit integer using the unsigned 64/31 bit division. + */ +uint32_t __div64_32(uint64_t *n, uint32_t base) +{ + uint32_t r; + + /* + * If the most significant bit of base is set, divide n by + * (base/2). That allows to use 64/31 bit division and gives a + * good approximation of the result: n = (base/2)*q + r. The + * result needs to be corrected with two simple transformations. + * If base is already < 2^31-1 __div64_31 can be used directly. + */ + r = __div64_31(n, ((signed) base < 0) ? (base/2) : base); + if ((signed) base < 0) { + uint64_t q = *n; + /* + * First transformation: + * n = (base/2)*q + r + * = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r + * Since r < (base/2), r + (base/2) < base. + * With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0) + * n = ((base/2)*2)*q1 + r1 with r1 < base. + */ + if (q & 1) + r += base/2; + q >>= 1; + /* + * Second transformation. ((base/2)*2) could have lost the + * last bit. + * n = ((base/2)*2)*q1 + r1 + * = base*q1 - ((base&1) ? q1 : 0) + r1 + */ + if (base & 1) { + int64_t rx = r - q; + /* + * base is >= 2^31. The worst case for the while + * loop is n=2^64-1 base=2^31+1. That gives a + * maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since + * base >= 2^31 the loop is finished after a maximum + * of three iterations. + */ + while (rx < 0) { + rx += base; + q--; + } + r = rx; + } + *n = q; + } + return r; +} + +#else /* MARCH_G5 */ + +uint32_t __div64_32(uint64_t *n, uint32_t base) +{ + register uint32_t reg2 asm("2"); + register uint32_t reg3 asm("3"); + uint32_t *words = (uint32_t *) n; + + reg2 = 0UL; + reg3 = words[0]; + asm volatile( + " dlr %0,%2\n" + : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); + words[0] = reg3; + reg3 = words[1]; + asm volatile( + " dlr %0,%2\n" + : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" ); + words[1] = reg3; + return reg2; +} + +#endif /* MARCH_G5 */ + +EXPORT_SYMBOL(__div64_32); diff --git a/arch/s390/lib/string.c b/arch/s390/lib/string.c index 8240cc77e06e..ae5cf5d03d41 100644 --- a/arch/s390/lib/string.c +++ b/arch/s390/lib/string.c @@ -233,7 +233,7 @@ char * strrchr(const char * s, int c) if (s[len] == (char) c) return (char *) s + len; } while (--len > 0); - return 0; + return NULL; } EXPORT_SYMBOL(strrchr); @@ -267,7 +267,7 @@ char * strstr(const char * s1,const char * s2) return (char *) s1; s1++; } - return 0; + return NULL; } EXPORT_SYMBOL(strstr); diff --git a/arch/s390/lib/uaccess.S b/arch/s390/lib/uaccess.S deleted file mode 100644 index 5d59e2625048..000000000000 --- a/arch/s390/lib/uaccess.S +++ /dev/null @@ -1,210 +0,0 @@ -/* - * arch/s390/lib/uaccess.S - * __copy_{from|to}_user functions. - * - * s390 - * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - * - * These functions have standard call interface - */ - -#include <linux/errno.h> -#include <asm/lowcore.h> -#include <asm/asm-offsets.h> - - .text - .align 4 - .globl __copy_from_user_asm - # %r2 = to, %r3 = n, %r4 = from -__copy_from_user_asm: - slr %r0,%r0 -0: mvcp 0(%r3,%r2),0(%r4),%r0 - jnz 1f - slr %r2,%r2 - br %r14 -1: la %r2,256(%r2) - la %r4,256(%r4) - ahi %r3,-256 -2: mvcp 0(%r3,%r2),0(%r4),%r0 - jnz 1b -3: slr %r2,%r2 - br %r14 -4: lhi %r0,-4096 - lr %r5,%r4 - slr %r5,%r0 - nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 - slr %r5,%r4 # %r5 = #bytes to next user page boundary - clr %r3,%r5 # copy crosses next page boundary ? - jnh 6f # no, the current page faulted - # move with the reduced length which is < 256 -5: mvcp 0(%r5,%r2),0(%r4),%r0 - slr %r3,%r5 -6: lr %r2,%r3 - br %r14 - .section __ex_table,"a" - .long 0b,4b - .long 2b,4b - .long 5b,6b - .previous - - .align 4 - .text - .globl __copy_to_user_asm - # %r2 = from, %r3 = n, %r4 = to -__copy_to_user_asm: - slr %r0,%r0 -0: mvcs 0(%r3,%r4),0(%r2),%r0 - jnz 1f - slr %r2,%r2 - br %r14 -1: la %r2,256(%r2) - la %r4,256(%r4) - ahi %r3,-256 -2: mvcs 0(%r3,%r4),0(%r2),%r0 - jnz 1b -3: slr %r2,%r2 - br %r14 -4: lhi %r0,-4096 - lr %r5,%r4 - slr %r5,%r0 - nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 - slr %r5,%r4 # %r5 = #bytes to next user page boundary - clr %r3,%r5 # copy crosses next page boundary ? - jnh 6f # no, the current page faulted - # move with the reduced length which is < 256 -5: mvcs 0(%r5,%r4),0(%r2),%r0 - slr %r3,%r5 -6: lr %r2,%r3 - br %r14 - .section __ex_table,"a" - .long 0b,4b - .long 2b,4b - .long 5b,6b - .previous - - .align 4 - .text - .globl __copy_in_user_asm - # %r2 = from, %r3 = n, %r4 = to -__copy_in_user_asm: - sacf 256 - bras 1,1f - mvc 0(1,%r4),0(%r2) -0: mvc 0(256,%r4),0(%r2) - la %r2,256(%r2) - la %r4,256(%r4) -1: ahi %r3,-256 - jnm 0b -2: ex %r3,0(%r1) - sacf 0 - slr %r2,%r2 - br 14 -3: mvc 0(1,%r4),0(%r2) - la %r2,1(%r2) - la %r4,1(%r4) - ahi %r3,-1 - jnm 3b -4: lr %r2,%r3 - sacf 0 - br %r14 - .section __ex_table,"a" - .long 0b,3b - .long 2b,3b - .long 3b,4b - .previous - - .align 4 - .text - .globl __clear_user_asm - # %r2 = to, %r3 = n -__clear_user_asm: - bras %r5,0f - .long empty_zero_page -0: l %r5,0(%r5) - slr %r0,%r0 -1: mvcs 0(%r3,%r2),0(%r5),%r0 - jnz 2f - slr %r2,%r2 - br %r14 -2: la %r2,256(%r2) - ahi %r3,-256 -3: mvcs 0(%r3,%r2),0(%r5),%r0 - jnz 2b -4: slr %r2,%r2 - br %r14 -5: lhi %r0,-4096 - lr %r4,%r2 - slr %r4,%r0 - nr %r4,%r0 # %r4 = (%r2 + 4096) & -4096 - slr %r4,%r2 # %r4 = #bytes to next user page boundary - clr %r3,%r4 # clear crosses next page boundary ? - jnh 7f # no, the current page faulted - # clear with the reduced length which is < 256 -6: mvcs 0(%r4,%r2),0(%r5),%r0 - slr %r3,%r4 -7: lr %r2,%r3 - br %r14 - .section __ex_table,"a" - .long 1b,5b - .long 3b,5b - .long 6b,7b - .previous - - .align 4 - .text - .globl __strncpy_from_user_asm - # %r2 = count, %r3 = dst, %r4 = src -__strncpy_from_user_asm: - lhi %r0,0 - lr %r1,%r4 - la %r4,0(%r4) # clear high order bit from %r4 - la %r2,0(%r2,%r4) # %r2 points to first byte after string - sacf 256 -0: srst %r2,%r1 - jo 0b - sacf 0 - lr %r1,%r2 - jh 1f # \0 found in string ? - ahi %r1,1 # include \0 in copy -1: slr %r1,%r4 # %r1 = copy length (without \0) - slr %r2,%r4 # %r2 = return length (including \0) -2: mvcp 0(%r1,%r3),0(%r4),%r0 - jnz 3f - br %r14 -3: la %r3,256(%r3) - la %r4,256(%r4) - ahi %r1,-256 - mvcp 0(%r1,%r3),0(%r4),%r0 - jnz 3b - br %r14 -4: sacf 0 - lhi %r2,-EFAULT - br %r14 - .section __ex_table,"a" - .long 0b,4b - .previous - - .align 4 - .text - .globl __strnlen_user_asm - # %r2 = count, %r3 = src -__strnlen_user_asm: - lhi %r0,0 - lr %r1,%r3 - la %r3,0(%r3) # clear high order bit from %r4 - la %r2,0(%r2,%r3) # %r2 points to first byte after string - sacf 256 -0: srst %r2,%r1 - jo 0b - sacf 0 - ahi %r2,1 # strnlen_user result includes the \0 - # or return count+1 if \0 not found - slr %r2,%r3 - br %r14 -2: sacf 0 - slr %r2,%r2 # return 0 on exception - br %r14 - .section __ex_table,"a" - .long 0b,2b - .previous diff --git a/arch/s390/lib/uaccess64.S b/arch/s390/lib/uaccess64.S deleted file mode 100644 index 19b41a33c230..000000000000 --- a/arch/s390/lib/uaccess64.S +++ /dev/null @@ -1,206 +0,0 @@ -/* - * arch/s390x/lib/uaccess.S - * __copy_{from|to}_user functions. - * - * s390 - * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation - * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com) - * - * These functions have standard call interface - */ - -#include <linux/errno.h> -#include <asm/lowcore.h> -#include <asm/asm-offsets.h> - - .text - .align 4 - .globl __copy_from_user_asm - # %r2 = to, %r3 = n, %r4 = from -__copy_from_user_asm: - slgr %r0,%r0 -0: mvcp 0(%r3,%r2),0(%r4),%r0 - jnz 1f - slgr %r2,%r2 - br %r14 -1: la %r2,256(%r2) - la %r4,256(%r4) - aghi %r3,-256 -2: mvcp 0(%r3,%r2),0(%r4),%r0 - jnz 1b -3: slgr %r2,%r2 - br %r14 -4: lghi %r0,-4096 - lgr %r5,%r4 - slgr %r5,%r0 - ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 - slgr %r5,%r4 # %r5 = #bytes to next user page boundary - clgr %r3,%r5 # copy crosses next page boundary ? - jnh 6f # no, the current page faulted - # move with the reduced length which is < 256 -5: mvcp 0(%r5,%r2),0(%r4),%r0 - slgr %r3,%r5 -6: lgr %r2,%r3 - br %r14 - .section __ex_table,"a" - .quad 0b,4b - .quad 2b,4b - .quad 5b,6b - .previous - - .align 4 - .text - .globl __copy_to_user_asm - # %r2 = from, %r3 = n, %r4 = to -__copy_to_user_asm: - slgr %r0,%r0 -0: mvcs 0(%r3,%r4),0(%r2),%r0 - jnz 1f - slgr %r2,%r2 - br %r14 -1: la %r2,256(%r2) - la %r4,256(%r4) - aghi %r3,-256 -2: mvcs 0(%r3,%r4),0(%r2),%r0 - jnz 1b -3: slgr %r2,%r2 - br %r14 -4: lghi %r0,-4096 - lgr %r5,%r4 - slgr %r5,%r0 - ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096 - slgr %r5,%r4 # %r5 = #bytes to next user page boundary - clgr %r3,%r5 # copy crosses next page boundary ? - jnh 6f # no, the current page faulted - # move with the reduced length which is < 256 -5: mvcs 0(%r5,%r4),0(%r2),%r0 - slgr %r3,%r5 -6: lgr %r2,%r3 - br %r14 - .section __ex_table,"a" - .quad 0b,4b - .quad 2b,4b - .quad 5b,6b - .previous - - .align 4 - .text - .globl __copy_in_user_asm - # %r2 = from, %r3 = n, %r4 = to -__copy_in_user_asm: - sacf 256 - bras 1,1f - mvc 0(1,%r4),0(%r2) -0: mvc 0(256,%r4),0(%r2) - la %r2,256(%r2) - la %r4,256(%r4) -1: aghi %r3,-256 - jnm 0b -2: ex %r3,0(%r1) - sacf 0 - slgr %r2,%r2 - br 14 -3: mvc 0(1,%r4),0(%r2) - la %r2,1(%r2) - la %r4,1(%r4) - aghi %r3,-1 - jnm 3b -4: lgr %r2,%r3 - sacf 0 - br %r14 - .section __ex_table,"a" - .quad 0b,3b - .quad 2b,3b - .quad 3b,4b - .previous - - .align 4 - .text - .globl __clear_user_asm - # %r2 = to, %r3 = n -__clear_user_asm: - slgr %r0,%r0 - larl %r5,empty_zero_page -1: mvcs 0(%r3,%r2),0(%r5),%r0 - jnz 2f - slgr %r2,%r2 - br %r14 -2: la %r2,256(%r2) - aghi %r3,-256 -3: mvcs 0(%r3,%r2),0(%r5),%r0 - jnz 2b -4: slgr %r2,%r2 - br %r14 -5: lghi %r0,-4096 - lgr %r4,%r2 - slgr %r4,%r0 - ngr %r4,%r0 # %r4 = (%r2 + 4096) & -4096 - slgr %r4,%r2 # %r4 = #bytes to next user page boundary - clgr %r3,%r4 # clear crosses next page boundary ? - jnh 7f # no, the current page faulted - # clear with the reduced length which is < 256 -6: mvcs 0(%r4,%r2),0(%r5),%r0 - slgr %r3,%r4 -7: lgr %r2,%r3 - br %r14 - .section __ex_table,"a" - .quad 1b,5b - .quad 3b,5b - .quad 6b,7b - .previous - - .align 4 - .text - .globl __strncpy_from_user_asm - # %r2 = count, %r3 = dst, %r4 = src -__strncpy_from_user_asm: - lghi %r0,0 - lgr %r1,%r4 - la %r2,0(%r2,%r4) # %r2 points to first byte after string - sacf 256 -0: srst %r2,%r1 - jo 0b - sacf 0 - lgr %r1,%r2 - jh 1f # \0 found in string ? - aghi %r1,1 # include \0 in copy -1: slgr %r1,%r4 # %r1 = copy length (without \0) - slgr %r2,%r4 # %r2 = return length (including \0) -2: mvcp 0(%r1,%r3),0(%r4),%r0 - jnz 3f - br %r14 -3: la %r3,256(%r3) - la %r4,256(%r4) - aghi %r1,-256 - mvcp 0(%r1,%r3),0(%r4),%r0 - jnz 3b - br %r14 -4: sacf 0 - lghi %r2,-EFAULT - br %r14 - .section __ex_table,"a" - .quad 0b,4b - .previous - - .align 4 - .text - .globl __strnlen_user_asm - # %r2 = count, %r3 = src -__strnlen_user_asm: - lghi %r0,0 - lgr %r1,%r3 - la %r2,0(%r2,%r3) # %r2 points to first byte after string - sacf 256 -0: srst %r2,%r1 - jo 0b - sacf 0 - aghi %r2,1 # strnlen_user result includes the \0 - # or return count+1 if \0 not found - slgr %r2,%r3 - br %r14 -2: sacf 0 - slgr %r2,%r2 # return 0 on exception - br %r14 - .section __ex_table,"a" - .quad 0b,2b - .previous diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c new file mode 100644 index 000000000000..121b2935a422 --- /dev/null +++ b/arch/s390/lib/uaccess_mvcos.c @@ -0,0 +1,166 @@ +/* + * arch/s390/lib/uaccess_mvcos.c + * + * Optimized user space space access functions based on mvcos. + * + * Copyright (C) IBM Corp. 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Gerald Schaefer (gerald.schaefer@de.ibm.com) + */ + +#include <linux/errno.h> +#include <linux/mm.h> +#include <asm/uaccess.h> +#include <asm/futex.h> + +#ifndef __s390x__ +#define AHI "ahi" +#define ALR "alr" +#define CLR "clr" +#define LHI "lhi" +#define SLR "slr" +#else +#define AHI "aghi" +#define ALR "algr" +#define CLR "clgr" +#define LHI "lghi" +#define SLR "slgr" +#endif + +size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x) +{ + register unsigned long reg0 asm("0") = 0x81UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" + " jz 7f\n" + "1:"ALR" %0,%3\n" + " "SLR" %1,%3\n" + " "SLR" %2,%3\n" + " j 0b\n" + "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ + " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 4f\n" + "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" + " "SLR" %0,%4\n" + " "ALR" %2,%4\n" + "4:"LHI" %4,-1\n" + " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ + " bras %3,6f\n" /* memset loop */ + " xc 0(1,%2),0(%2)\n" + "5: xc 0(256,%2),0(%2)\n" + " la %2,256(%2)\n" + "6:"AHI" %4,-256\n" + " jnm 5b\n" + " ex %4,0(%3)\n" + " j 8f\n" + "7:"SLR" %0,%0\n" + "8: \n" + EX_TABLE(0b,2b) EX_TABLE(3b,4b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x) +{ + register unsigned long reg0 asm("0") = 0x810000UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" + " jz 4f\n" + "1:"ALR" %0,%3\n" + " "SLR" %1,%3\n" + " "SLR" %2,%3\n" + " j 0b\n" + "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */ + " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n" + " "SLR" %0,%4\n" + " j 5f\n" + "4:"SLR" %0,%0\n" + "5: \n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from) +{ + register unsigned long reg0 asm("0") = 0x810081UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + /* FIXME: copy with reduced length. */ + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n" + " jz 2f\n" + "1:"ALR" %0,%3\n" + " "SLR" %1,%3\n" + " "SLR" %2,%3\n" + " j 0b\n" + "2:"SLR" %0,%0\n" + "3: \n" + EX_TABLE(0b,3b) + : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +size_t clear_user_mvcos(size_t size, void __user *to) +{ + register unsigned long reg0 asm("0") = 0x810000UL; + unsigned long tmp1, tmp2; + + tmp1 = -4096UL; + asm volatile( + "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n" + " jz 4f\n" + "1:"ALR" %0,%2\n" + " "SLR" %1,%2\n" + " j 0b\n" + "2: la %3,4095(%1)\n"/* %4 = to + 4095 */ + " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */ + " "SLR" %3,%1\n" + " "CLR" %0,%3\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n" + " "SLR" %0,%3\n" + " j 5f\n" + "4:"SLR" %0,%0\n" + "5: \n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) + : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2) + : "a" (empty_zero_page), "d" (reg0) : "cc", "memory"); + return size; +} + +extern size_t copy_from_user_std_small(size_t, const void __user *, void *); +extern size_t copy_to_user_std_small(size_t, void __user *, const void *); +extern size_t strnlen_user_std(size_t, const char __user *); +extern size_t strncpy_from_user_std(size_t, const char __user *, char *); +extern int futex_atomic_op(int, int __user *, int, int *); +extern int futex_atomic_cmpxchg(int __user *, int, int); + +struct uaccess_ops uaccess_mvcos = { + .copy_from_user = copy_from_user_mvcos, + .copy_from_user_small = copy_from_user_std_small, + .copy_to_user = copy_to_user_mvcos, + .copy_to_user_small = copy_to_user_std_small, + .copy_in_user = copy_in_user_mvcos, + .clear_user = clear_user_mvcos, + .strnlen_user = strnlen_user_std, + .strncpy_from_user = strncpy_from_user_std, + .futex_atomic_op = futex_atomic_op, + .futex_atomic_cmpxchg = futex_atomic_cmpxchg, +}; diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c new file mode 100644 index 000000000000..f44f0078b354 --- /dev/null +++ b/arch/s390/lib/uaccess_std.c @@ -0,0 +1,356 @@ +/* + * arch/s390/lib/uaccess_std.c + * + * Standard user space access functions based on mvcp/mvcs and doing + * interesting things in the secondary space mode. + * + * Copyright (C) IBM Corp. 2006 + * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), + * Gerald Schaefer (gerald.schaefer@de.ibm.com) + */ + +#include <linux/errno.h> +#include <linux/mm.h> +#include <asm/uaccess.h> +#include <asm/futex.h> + +#ifndef __s390x__ +#define AHI "ahi" +#define ALR "alr" +#define CLR "clr" +#define LHI "lhi" +#define SLR "slr" +#else +#define AHI "aghi" +#define ALR "algr" +#define CLR "clgr" +#define LHI "lghi" +#define SLR "slgr" +#endif + +size_t copy_from_user_std(size_t size, const void __user *ptr, void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = -256UL; + asm volatile( + "0: mvcp 0(%0,%2),0(%1),%3\n" + " jz 8f\n" + "1:"ALR" %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcp 0(%0,%2),0(%1),%3\n" + " jnz 1b\n" + " j 8f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "4: mvcp 0(%4,%2),0(%1),%3\n" + " "SLR" %0,%4\n" + " "ALR" %2,%4\n" + "5:"LHI" %4,-1\n" + " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ + " bras %3,7f\n" /* memset loop */ + " xc 0(1,%2),0(%2)\n" + "6: xc 0(256,%2),0(%2)\n" + " la %2,256(%2)\n" + "7:"AHI" %4,-256\n" + " jnm 6b\n" + " ex %4,0(%3)\n" + " j 9f\n" + "8:"SLR" %0,%0\n" + "9: \n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = 0UL; + asm volatile( + "0: mvcp 0(%0,%2),0(%1),%3\n" + " "SLR" %0,%0\n" + " j 5f\n" + "1: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 5f\n" + "2: mvcp 0(%4,%2),0(%1),%3\n" + " "SLR" %0,%4\n" + " "ALR" %2,%4\n" + "3:"LHI" %4,-1\n" + " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */ + " bras %3,4f\n" + " xc 0(1,%2),0(%2)\n" + "4: ex %4,0(%3)\n" + "5:\n" + EX_TABLE(0b,1b) EX_TABLE(2b,3b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t copy_to_user_std(size_t size, void __user *ptr, const void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = -256UL; + asm volatile( + "0: mvcs 0(%0,%1),0(%2),%3\n" + " jz 5f\n" + "1:"ALR" %0,%3\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "2: mvcs 0(%0,%1),0(%2),%3\n" + " jnz 1b\n" + " j 5f\n" + "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 6f\n" + "4: mvcs 0(%4,%1),0(%2),%3\n" + " "SLR" %0,%4\n" + " j 6f\n" + "5:"SLR" %0,%0\n" + "6: \n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t copy_to_user_std_small(size_t size, void __user *ptr, const void *x) +{ + unsigned long tmp1, tmp2; + + tmp1 = 0UL; + asm volatile( + "0: mvcs 0(%0,%1),0(%2),%3\n" + " "SLR" %0,%0\n" + " j 3f\n" + "1: la %4,255(%1)\n" /* ptr + 255 */ + " "LHI" %3,-4096\n" + " nr %4,%3\n" /* (ptr + 255) & -4096UL */ + " "SLR" %4,%1\n" + " "CLR" %0,%4\n" /* copy crosses next page boundary? */ + " jnh 3f\n" + "2: mvcs 0(%4,%1),0(%2),%3\n" + " "SLR" %0,%4\n" + "3:\n" + EX_TABLE(0b,1b) EX_TABLE(2b,3b) + : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t copy_in_user_std(size_t size, void __user *to, const void __user *from) +{ + unsigned long tmp1; + + asm volatile( + " "AHI" %0,-1\n" + " jo 5f\n" + " sacf 256\n" + " bras %3,3f\n" + "0:"AHI" %0,257\n" + "1: mvc 0(1,%1),0(%2)\n" + " la %1,1(%1)\n" + " la %2,1(%2)\n" + " "AHI" %0,-1\n" + " jnz 1b\n" + " j 5f\n" + "2: mvc 0(256,%1),0(%2)\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "3:"AHI" %0,-256\n" + " jnm 2b\n" + "4: ex %0,1b-0b(%3)\n" + " sacf 0\n" + "5: "SLR" %0,%0\n" + "6:\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1) + : : "cc", "memory"); + return size; +} + +size_t clear_user_std(size_t size, void __user *to) +{ + unsigned long tmp1, tmp2; + + asm volatile( + " "AHI" %0,-1\n" + " jo 5f\n" + " sacf 256\n" + " bras %3,3f\n" + " xc 0(1,%1),0(%1)\n" + "0:"AHI" %0,257\n" + " la %2,255(%1)\n" /* %2 = ptr + 255 */ + " srl %2,12\n" + " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */ + " "SLR" %2,%1\n" + " "CLR" %0,%2\n" /* clear crosses next page boundary? */ + " jnh 5f\n" + " "AHI" %2,-1\n" + "1: ex %2,0(%3)\n" + " "AHI" %2,1\n" + " "SLR" %0,%2\n" + " j 5f\n" + "2: xc 0(256,%1),0(%1)\n" + " la %1,256(%1)\n" + "3:"AHI" %0,-256\n" + " jnm 2b\n" + "4: ex %0,0(%3)\n" + " sacf 0\n" + "5: "SLR" %0,%0\n" + "6:\n" + EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b) + : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2) + : : "cc", "memory"); + return size; +} + +size_t strnlen_user_std(size_t size, const char __user *src) +{ + register unsigned long reg0 asm("0") = 0UL; + unsigned long tmp1, tmp2; + + asm volatile( + " la %2,0(%1)\n" + " la %3,0(%0,%1)\n" + " "SLR" %0,%0\n" + " sacf 256\n" + "0: srst %3,%2\n" + " jo 0b\n" + " la %0,1(%3)\n" /* strnlen_user results includes \0 */ + " "SLR" %0,%1\n" + "1: sacf 0\n" + EX_TABLE(0b,1b) + : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2) + : "d" (reg0) : "cc", "memory"); + return size; +} + +size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) +{ + register unsigned long reg0 asm("0") = 0UL; + unsigned long tmp1, tmp2; + + asm volatile( + " la %3,0(%1)\n" + " la %4,0(%0,%1)\n" + " sacf 256\n" + "0: srst %4,%3\n" + " jo 0b\n" + " sacf 0\n" + " la %0,0(%4)\n" + " jh 1f\n" /* found \0 in string ? */ + " "AHI" %4,1\n" /* include \0 in copy */ + "1:"SLR" %0,%1\n" /* %0 = return length (without \0) */ + " "SLR" %4,%1\n" /* %4 = copy length (including \0) */ + "2: mvcp 0(%4,%2),0(%1),%5\n" + " jz 9f\n" + "3:"AHI" %4,-256\n" + " la %1,256(%1)\n" + " la %2,256(%2)\n" + "4: mvcp 0(%4,%2),0(%1),%5\n" + " jnz 3b\n" + " j 9f\n" + "7: sacf 0\n" + "8:"LHI" %0,%6\n" + "9:\n" + EX_TABLE(0b,7b) EX_TABLE(2b,8b) EX_TABLE(4b,8b) + : "+a" (size), "+a" (src), "+d" (dst), "=a" (tmp1), "=a" (tmp2) + : "d" (reg0), "K" (-EFAULT) : "cc", "memory"); + return size; +} + +#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \ + asm volatile( \ + " sacf 256\n" \ + "0: l %1,0(%6)\n" \ + "1:"insn \ + "2: cs %1,%2,0(%6)\n" \ + "3: jl 1b\n" \ + " lhi %0,0\n" \ + "4: sacf 0\n" \ + EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \ + : "=d" (ret), "=&d" (oldval), "=&d" (newval), \ + "=m" (*uaddr) \ + : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ + "m" (*uaddr) : "cc"); + +int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old) +{ + int oldval = 0, newval, ret; + + inc_preempt_count(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("lr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("lr %2,%1\nar %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("lr %2,%1\nor %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("lr %2,%1\nnr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("lr %2,%1\nxr %2,%5\n", + ret, oldval, newval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + dec_preempt_count(); + *old = oldval; + return ret; +} + +int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval) +{ + int ret; + + asm volatile( + " sacf 256\n" + " cs %1,%4,0(%5)\n" + "0: lr %0,%1\n" + "1: sacf 0\n" + EX_TABLE(0b,1b) + : "=d" (ret), "+d" (oldval), "=m" (*uaddr) + : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) + : "cc", "memory" ); + return ret; +} + +struct uaccess_ops uaccess_std = { + .copy_from_user = copy_from_user_std, + .copy_from_user_small = copy_from_user_std_small, + .copy_to_user = copy_to_user_std, + .copy_to_user_small = copy_to_user_std_small, + .copy_in_user = copy_in_user_std, + .clear_user = clear_user_std, + .strnlen_user = strnlen_user_std, + .strncpy_from_user = strncpy_from_user_std, + .futex_atomic_op = futex_atomic_op, + .futex_atomic_cmpxchg = futex_atomic_cmpxchg, +}; |