summaryrefslogtreecommitdiffstats
path: root/arch/s390/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/lib')
-rw-r--r--arch/s390/lib/Makefile3
-rw-r--r--arch/s390/lib/div64.c147
-rw-r--r--arch/s390/lib/mem32.S92
-rw-r--r--arch/s390/lib/qrnnd.S78
-rw-r--r--arch/s390/lib/uaccess.c136
-rw-r--r--arch/s390/lib/ucmpdi2.c26
6 files changed, 62 insertions, 420 deletions
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index a01df233856f..15536da68e18 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -3,8 +3,7 @@
#
lib-y += delay.o string.o uaccess.o find.o
-obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
-obj-$(CONFIG_64BIT) += mem64.o
+obj-y += mem64.o
lib-$(CONFIG_SMP) += spinlock.o
lib-$(CONFIG_KPROBES) += probes.o
lib-$(CONFIG_UPROBES) += probes.o
diff --git a/arch/s390/lib/div64.c b/arch/s390/lib/div64.c
deleted file mode 100644
index 261152f83242..000000000000
--- a/arch/s390/lib/div64.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * __div64_32 implementation for 31 bit.
- *
- * Copyright IBM Corp. 2006
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-
-#ifdef CONFIG_MARCH_G5
-
-/*
- * Function to divide an unsigned 64 bit integer by an unsigned
- * 31 bit integer using signed 64/32 bit division.
- */
-static uint32_t __div64_31(uint64_t *n, uint32_t base)
-{
- register uint32_t reg2 asm("2");
- register uint32_t reg3 asm("3");
- uint32_t *words = (uint32_t *) n;
- uint32_t tmp;
-
- /* Special case base==1, remainder = 0, quotient = n */
- if (base == 1)
- return 0;
- /*
- * Special case base==0 will cause a fixed point divide exception
- * on the dr instruction and may not happen anyway. For the
- * following calculation we can assume base > 1. The first
- * signed 64 / 32 bit division with an upper half of 0 will
- * give the correct upper half of the 64 bit quotient.
- */
- reg2 = 0UL;
- reg3 = words[0];
- asm volatile(
- " dr %0,%2\n"
- : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
- words[0] = reg3;
- reg3 = words[1];
- /*
- * To get the lower half of the 64 bit quotient and the 32 bit
- * remainder we have to use a little trick. Since we only have
- * a signed division the quotient can get too big. To avoid this
- * the 64 bit dividend is halved, then the signed division will
- * work. Afterwards the quotient and the remainder are doubled.
- * If the last bit of the dividend has been one the remainder
- * is increased by one then checked against the base. If the
- * remainder has overflown subtract base and increase the
- * quotient. Simple, no ?
- */
- asm volatile(
- " nr %2,%1\n"
- " srdl %0,1\n"
- " dr %0,%3\n"
- " alr %0,%0\n"
- " alr %1,%1\n"
- " alr %0,%2\n"
- " clr %0,%3\n"
- " jl 0f\n"
- " slr %0,%3\n"
- " ahi %1,1\n"
- "0:\n"
- : "+d" (reg2), "+d" (reg3), "=d" (tmp)
- : "d" (base), "2" (1UL) : "cc" );
- words[1] = reg3;
- return reg2;
-}
-
-/*
- * Function to divide an unsigned 64 bit integer by an unsigned
- * 32 bit integer using the unsigned 64/31 bit division.
- */
-uint32_t __div64_32(uint64_t *n, uint32_t base)
-{
- uint32_t r;
-
- /*
- * If the most significant bit of base is set, divide n by
- * (base/2). That allows to use 64/31 bit division and gives a
- * good approximation of the result: n = (base/2)*q + r. The
- * result needs to be corrected with two simple transformations.
- * If base is already < 2^31-1 __div64_31 can be used directly.
- */
- r = __div64_31(n, ((signed) base < 0) ? (base/2) : base);
- if ((signed) base < 0) {
- uint64_t q = *n;
- /*
- * First transformation:
- * n = (base/2)*q + r
- * = ((base/2)*2)*(q/2) + ((q&1) ? (base/2) : 0) + r
- * Since r < (base/2), r + (base/2) < base.
- * With q1 = (q/2) and r1 = r + ((q&1) ? (base/2) : 0)
- * n = ((base/2)*2)*q1 + r1 with r1 < base.
- */
- if (q & 1)
- r += base/2;
- q >>= 1;
- /*
- * Second transformation. ((base/2)*2) could have lost the
- * last bit.
- * n = ((base/2)*2)*q1 + r1
- * = base*q1 - ((base&1) ? q1 : 0) + r1
- */
- if (base & 1) {
- int64_t rx = r - q;
- /*
- * base is >= 2^31. The worst case for the while
- * loop is n=2^64-1 base=2^31+1. That gives a
- * maximum for q=(2^64-1)/2^31 = 0x1ffffffff. Since
- * base >= 2^31 the loop is finished after a maximum
- * of three iterations.
- */
- while (rx < 0) {
- rx += base;
- q--;
- }
- r = rx;
- }
- *n = q;
- }
- return r;
-}
-
-#else /* MARCH_G5 */
-
-uint32_t __div64_32(uint64_t *n, uint32_t base)
-{
- register uint32_t reg2 asm("2");
- register uint32_t reg3 asm("3");
- uint32_t *words = (uint32_t *) n;
-
- reg2 = 0UL;
- reg3 = words[0];
- asm volatile(
- " dlr %0,%2\n"
- : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
- words[0] = reg3;
- reg3 = words[1];
- asm volatile(
- " dlr %0,%2\n"
- : "+d" (reg2), "+d" (reg3) : "d" (base) : "cc" );
- words[1] = reg3;
- return reg2;
-}
-
-#endif /* MARCH_G5 */
diff --git a/arch/s390/lib/mem32.S b/arch/s390/lib/mem32.S
deleted file mode 100644
index 14ca9244b615..000000000000
--- a/arch/s390/lib/mem32.S
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * String handling functions.
- *
- * Copyright IBM Corp. 2012
- */
-
-#include <linux/linkage.h>
-
-/*
- * memset implementation
- *
- * This code corresponds to the C construct below. We do distinguish
- * between clearing (c == 0) and setting a memory array (c != 0) simply
- * because nearly all memset invocations in the kernel clear memory and
- * the xc instruction is preferred in such cases.
- *
- * void *memset(void *s, int c, size_t n)
- * {
- * if (likely(c == 0))
- * return __builtin_memset(s, 0, n);
- * return __builtin_memset(s, c, n);
- * }
- */
-ENTRY(memset)
- basr %r5,%r0
-.Lmemset_base:
- ltr %r4,%r4
- bzr %r14
- ltr %r3,%r3
- jnz .Lmemset_fill
- ahi %r4,-1
- lr %r3,%r4
- srl %r3,8
- ltr %r3,%r3
- lr %r1,%r2
- je .Lmemset_clear_rest
-.Lmemset_clear_loop:
- xc 0(256,%r1),0(%r1)
- la %r1,256(%r1)
- brct %r3,.Lmemset_clear_loop
-.Lmemset_clear_rest:
- ex %r4,.Lmemset_xc-.Lmemset_base(%r5)
- br %r14
-.Lmemset_fill:
- stc %r3,0(%r2)
- chi %r4,1
- lr %r1,%r2
- ber %r14
- ahi %r4,-2
- lr %r3,%r4
- srl %r3,8
- ltr %r3,%r3
- je .Lmemset_fill_rest
-.Lmemset_fill_loop:
- mvc 1(256,%r1),0(%r1)
- la %r1,256(%r1)
- brct %r3,.Lmemset_fill_loop
-.Lmemset_fill_rest:
- ex %r4,.Lmemset_mvc-.Lmemset_base(%r5)
- br %r14
-.Lmemset_xc:
- xc 0(1,%r1),0(%r1)
-.Lmemset_mvc:
- mvc 1(1,%r1),0(%r1)
-
-/*
- * memcpy implementation
- *
- * void *memcpy(void *dest, const void *src, size_t n)
- */
-ENTRY(memcpy)
- basr %r5,%r0
-.Lmemcpy_base:
- ltr %r4,%r4
- bzr %r14
- ahi %r4,-1
- lr %r0,%r4
- srl %r0,8
- ltr %r0,%r0
- lr %r1,%r2
- jnz .Lmemcpy_loop
-.Lmemcpy_rest:
- ex %r4,.Lmemcpy_mvc-.Lmemcpy_base(%r5)
- br %r14
-.Lmemcpy_loop:
- mvc 0(256,%r1),0(%r3)
- la %r1,256(%r1)
- la %r3,256(%r3)
- brct %r0,.Lmemcpy_loop
- j .Lmemcpy_rest
-.Lmemcpy_mvc:
- mvc 0(1,%r1),0(%r3)
diff --git a/arch/s390/lib/qrnnd.S b/arch/s390/lib/qrnnd.S
deleted file mode 100644
index d321329130ec..000000000000
--- a/arch/s390/lib/qrnnd.S
+++ /dev/null
@@ -1,78 +0,0 @@
-# S/390 __udiv_qrnnd
-
-#include <linux/linkage.h>
-
-# r2 : &__r
-# r3 : upper half of 64 bit word n
-# r4 : lower half of 64 bit word n
-# r5 : divisor d
-# the reminder r of the division is to be stored to &__r and
-# the quotient q is to be returned
-
- .text
-ENTRY(__udiv_qrnnd)
- st %r2,24(%r15) # store pointer to reminder for later
- lr %r0,%r3 # reload n
- lr %r1,%r4
- ltr %r2,%r5 # reload and test divisor
- jp 5f
- # divisor >= 0x80000000
- srdl %r0,2 # n/4
- srl %r2,1 # d/2
- slr %r1,%r2 # special case if last bit of d is set
- brc 3,0f # (n/4) div (n/2) can overflow by 1
- ahi %r0,-1 # trick: subtract n/2, then divide
-0: dr %r0,%r2 # signed division
- ahi %r1,1 # trick part 2: add 1 to the quotient
- # now (n >> 2) = (d >> 1) * %r1 + %r0
- lhi %r3,1
- nr %r3,%r1 # test last bit of q
- jz 1f
- alr %r0,%r2 # add (d>>1) to r
-1: srl %r1,1 # q >>= 1
- # now (n >> 2) = (d&-2) * %r1 + %r0
- lhi %r3,1
- nr %r3,%r5 # test last bit of d
- jz 2f
- slr %r0,%r1 # r -= q
- brc 3,2f # borrow ?
- alr %r0,%r5 # r += d
- ahi %r1,-1
-2: # now (n >> 2) = d * %r1 + %r0
- alr %r1,%r1 # q <<= 1
- alr %r0,%r0 # r <<= 1
- brc 12,3f # overflow on r ?
- slr %r0,%r5 # r -= d
- ahi %r1,1 # q += 1
-3: lhi %r3,2
- nr %r3,%r4 # test next to last bit of n
- jz 4f
- ahi %r0,1 # r += 1
-4: clr %r0,%r5 # r >= d ?
- jl 6f
- slr %r0,%r5 # r -= d
- ahi %r1,1 # q += 1
- # now (n >> 1) = d * %r1 + %r0
- j 6f
-5: # divisor < 0x80000000
- srdl %r0,1
- dr %r0,%r2 # signed division
- # now (n >> 1) = d * %r1 + %r0
-6: alr %r1,%r1 # q <<= 1
- alr %r0,%r0 # r <<= 1
- brc 12,7f # overflow on r ?
- slr %r0,%r5 # r -= d
- ahi %r1,1 # q += 1
-7: lhi %r3,1
- nr %r3,%r4 # isolate last bit of n
- alr %r0,%r3 # r += (n & 1)
- clr %r0,%r5 # r >= d ?
- jl 8f
- slr %r0,%r5 # r -= d
- ahi %r1,1 # q += 1
-8: # now n = d * %r1 + %r0
- l %r2,24(%r15)
- st %r0,0(%r2)
- lr %r2,%r1
- br %r14
- .end __udiv_qrnnd
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
index 53dd5d7a0c96..4614d415bb58 100644
--- a/arch/s390/lib/uaccess.c
+++ b/arch/s390/lib/uaccess.c
@@ -15,20 +15,6 @@
#include <asm/mmu_context.h>
#include <asm/facility.h>
-#ifndef CONFIG_64BIT
-#define AHI "ahi"
-#define ALR "alr"
-#define CLR "clr"
-#define LHI "lhi"
-#define SLR "slr"
-#else
-#define AHI "aghi"
-#define ALR "algr"
-#define CLR "clgr"
-#define LHI "lghi"
-#define SLR "slgr"
-#endif
-
static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
@@ -41,29 +27,29 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
"9: jz 7f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
+ "1: algr %0,%3\n"
+ " slgr %1,%3\n"
+ " slgr %2,%3\n"
" j 0b\n"
"2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " slgr %4,%1\n"
+ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 4f\n"
"3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
- "10:"SLR" %0,%4\n"
- " "ALR" %2,%4\n"
- "4:"LHI" %4,-1\n"
- " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ "10:slgr %0,%4\n"
+ " algr %2,%4\n"
+ "4: lghi %4,-1\n"
+ " algr %4,%0\n" /* copy remaining size, subtract 1 */
" bras %3,6f\n" /* memset loop */
" xc 0(1,%2),0(%2)\n"
"5: xc 0(256,%2),0(%2)\n"
" la %2,256(%2)\n"
- "6:"AHI" %4,-256\n"
+ "6: aghi %4,-256\n"
" jnm 5b\n"
" ex %4,0(%3)\n"
" j 8f\n"
- "7:"SLR" %0,%0\n"
+ "7:slgr %0,%0\n"
"8:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
@@ -82,32 +68,32 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
" sacf 0\n"
"0: mvcp 0(%0,%2),0(%1),%3\n"
"10:jz 8f\n"
- "1:"ALR" %0,%3\n"
+ "1: algr %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcp 0(%0,%2),0(%1),%3\n"
"11:jnz 1b\n"
" j 8f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
+ " lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " slgr %4,%1\n"
+ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 5f\n"
"4: mvcp 0(%4,%2),0(%1),%3\n"
- "12:"SLR" %0,%4\n"
- " "ALR" %2,%4\n"
- "5:"LHI" %4,-1\n"
- " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ "12:slgr %0,%4\n"
+ " algr %2,%4\n"
+ "5: lghi %4,-1\n"
+ " algr %4,%0\n" /* copy remaining size, subtract 1 */
" bras %3,7f\n" /* memset loop */
" xc 0(1,%2),0(%2)\n"
"6: xc 0(256,%2),0(%2)\n"
" la %2,256(%2)\n"
- "7:"AHI" %4,-256\n"
+ "7: aghi %4,-256\n"
" jnm 6b\n"
" ex %4,0(%3)\n"
" j 9f\n"
- "8:"SLR" %0,%0\n"
+ "8:slgr %0,%0\n"
"9: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
@@ -134,19 +120,19 @@ static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
"6: jz 4f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
+ "1: algr %0,%3\n"
+ " slgr %1,%3\n"
+ " slgr %2,%3\n"
" j 0b\n"
"2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
" nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " slgr %4,%1\n"
+ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 5f\n"
"3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
- "7:"SLR" %0,%4\n"
+ "7: slgr %0,%4\n"
" j 5f\n"
- "4:"SLR" %0,%0\n"
+ "4: slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
: "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
@@ -165,22 +151,22 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
" sacf 0\n"
"0: mvcs 0(%0,%1),0(%2),%3\n"
"7: jz 5f\n"
- "1:"ALR" %0,%3\n"
+ "1: algr %0,%3\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
"2: mvcs 0(%0,%1),0(%2),%3\n"
"8: jnz 1b\n"
" j 5f\n"
"3: la %4,255(%1)\n" /* %4 = ptr + 255 */
- " "LHI" %3,-4096\n"
+ " lghi %3,-4096\n"
" nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " slgr %4,%1\n"
+ " clgr %0,%4\n" /* copy crosses next page boundary? */
" jnh 6f\n"
"4: mvcs 0(%4,%1),0(%2),%3\n"
- "9:"SLR" %0,%4\n"
+ "9: slgr %0,%4\n"
" j 6f\n"
- "5:"SLR" %0,%0\n"
+ "5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
@@ -208,11 +194,11 @@ static inline unsigned long copy_in_user_mvcos(void __user *to, const void __use
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
" jz 2f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
+ "1: algr %0,%3\n"
+ " slgr %1,%3\n"
+ " slgr %2,%3\n"
" j 0b\n"
- "2:"SLR" %0,%0\n"
+ "2:slgr %0,%0\n"
"3: \n"
EX_TABLE(0b,3b)
: "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
@@ -228,23 +214,23 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user
load_kernel_asce();
asm volatile(
" sacf 256\n"
- " "AHI" %0,-1\n"
+ " aghi %0,-1\n"
" jo 5f\n"
" bras %3,3f\n"
- "0:"AHI" %0,257\n"
+ "0: aghi %0,257\n"
"1: mvc 0(1,%1),0(%2)\n"
" la %1,1(%1)\n"
" la %2,1(%2)\n"
- " "AHI" %0,-1\n"
+ " aghi %0,-1\n"
" jnz 1b\n"
" j 5f\n"
"2: mvc 0(256,%1),0(%2)\n"
" la %1,256(%1)\n"
" la %2,256(%2)\n"
- "3:"AHI" %0,-256\n"
+ "3: aghi %0,-256\n"
" jnm 2b\n"
"4: ex %0,1b-0b(%3)\n"
- "5: "SLR" %0,%0\n"
+ "5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
@@ -269,18 +255,18 @@ static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size
asm volatile(
"0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
" jz 4f\n"
- "1:"ALR" %0,%2\n"
- " "SLR" %1,%2\n"
+ "1: algr %0,%2\n"
+ " slgr %1,%2\n"
" j 0b\n"
"2: la %3,4095(%1)\n"/* %4 = to + 4095 */
" nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
- " "SLR" %3,%1\n"
- " "CLR" %0,%3\n" /* copy crosses next page boundary? */
+ " slgr %3,%1\n"
+ " clgr %0,%3\n" /* copy crosses next page boundary? */
" jnh 5f\n"
"3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
- " "SLR" %0,%3\n"
+ " slgr %0,%3\n"
" j 5f\n"
- "4:"SLR" %0,%0\n"
+ "4:slgr %0,%0\n"
"5:\n"
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
@@ -295,28 +281,28 @@ static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
load_kernel_asce();
asm volatile(
" sacf 256\n"
- " "AHI" %0,-1\n"
+ " aghi %0,-1\n"
" jo 5f\n"
" bras %3,3f\n"
" xc 0(1,%1),0(%1)\n"
- "0:"AHI" %0,257\n"
+ "0: aghi %0,257\n"
" la %2,255(%1)\n" /* %2 = ptr + 255 */
" srl %2,12\n"
" sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
- " "SLR" %2,%1\n"
- " "CLR" %0,%2\n" /* clear crosses next page boundary? */
+ " slgr %2,%1\n"
+ " clgr %0,%2\n" /* clear crosses next page boundary? */
" jnh 5f\n"
- " "AHI" %2,-1\n"
+ " aghi %2,-1\n"
"1: ex %2,0(%3)\n"
- " "AHI" %2,1\n"
- " "SLR" %0,%2\n"
+ " aghi %2,1\n"
+ " slgr %0,%2\n"
" j 5f\n"
"2: xc 0(256,%1),0(%1)\n"
" la %1,256(%1)\n"
- "3:"AHI" %0,-256\n"
+ "3: aghi %0,-256\n"
" jnm 2b\n"
"4: ex %0,0(%3)\n"
- "5: "SLR" %0,%0\n"
+ "5: slgr %0,%0\n"
"6: sacf 768\n"
EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
: "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
@@ -341,12 +327,12 @@ static inline unsigned long strnlen_user_srst(const char __user *src,
asm volatile(
" la %2,0(%1)\n"
" la %3,0(%0,%1)\n"
- " "SLR" %0,%0\n"
+ " slgr %0,%0\n"
" sacf 256\n"
"0: srst %3,%2\n"
" jo 0b\n"
" la %0,1(%3)\n" /* strnlen_user results includes \0 */
- " "SLR" %0,%1\n"
+ " slgr %0,%1\n"
"1: sacf 768\n"
EX_TABLE(0b,1b)
: "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
@@ -399,7 +385,7 @@ early_param("uaccess_primary", parse_uaccess_pt);
static int __init uaccess_init(void)
{
- if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27))
+ if (!uaccess_primary && test_facility(27))
static_key_slow_inc(&have_mvcos);
return 0;
}
diff --git a/arch/s390/lib/ucmpdi2.c b/arch/s390/lib/ucmpdi2.c
deleted file mode 100644
index 3e05ff532582..000000000000
--- a/arch/s390/lib/ucmpdi2.c
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <linux/module.h>
-
-union ull_union {
- unsigned long long ull;
- struct {
- unsigned int high;
- unsigned int low;
- } ui;
-};
-
-int __ucmpdi2(unsigned long long a, unsigned long long b)
-{
- union ull_union au = {.ull = a};
- union ull_union bu = {.ull = b};
-
- if (au.ui.high < bu.ui.high)
- return 0;
- else if (au.ui.high > bu.ui.high)
- return 2;
- if (au.ui.low < bu.ui.low)
- return 0;
- else if (au.ui.low > bu.ui.low)
- return 2;
- return 1;
-}
-EXPORT_SYMBOL(__ucmpdi2);
OpenPOWER on IntegriCloud