summaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-08 12:02:28 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-08 12:02:28 -0700
commitd586c86d50cefa0897a51a2dbc714060ccedae76 (patch)
tree76a7f454637badb74390047aebca5c071c0988fe /arch/s390
parente9f37d3a8d126e73f5737ef548cdf6f618e295e4 (diff)
parent457f2180951cdcbfb4657ddcc83b486e93497f56 (diff)
downloadblackbird-op-linux-d586c86d50cefa0897a51a2dbc714060ccedae76.tar.gz
blackbird-op-linux-d586c86d50cefa0897a51a2dbc714060ccedae76.zip
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
Pull second set of s390 patches from Martin Schwidefsky: "The second part of Heikos uaccess rework, the page table walker for uaccess is now a thing of the past (yay!) The code change to fix the theoretical TLB flush problem allows us to add a TLB flush optimization for zEC12, this machine has new instructions that allow to do CPU local TLB flushes for single pages and for all pages of a specific address space. Plus the usual bug fixing and some more cleanup" * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: s390/uaccess: rework uaccess code - fix locking issues s390/mm,tlb: optimize TLB flushing for zEC12 s390/mm,tlb: safeguard against speculative TLB creation s390/irq: Use defines for external interruption codes s390/irq: Add defines for external interruption codes s390/sclp: add timeout for queued requests kvm/s390: also set guest pages back to stable on kexec/kdump lcs: Add missing destroy_timer_on_stack() s390/tape: Add missing destroy_timer_on_stack() s390/tape: Use del_timer_sync() s390/3270: fix crash with multiple reset device requests s390/bitops,atomic: add missing memory barriers s390/zcrypt: add length check for aligned data to avoid overflow in msg-type 6
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/atomic.h70
-rw-r--r--arch/s390/include/asm/bitops.h41
-rw-r--r--arch/s390/include/asm/futex.h66
-rw-r--r--arch/s390/include/asm/irq.h18
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h45
-rw-r--r--arch/s390/include/asm/pgtable.h128
-rw-r--r--arch/s390/include/asm/setup.h3
-rw-r--r--arch/s390/include/asm/switch_to.h1
-rw-r--r--arch/s390/include/asm/thread_info.h2
-rw-r--r--arch/s390/include/asm/tlb.h14
-rw-r--r--arch/s390/include/asm/tlbflush.h115
-rw-r--r--arch/s390/include/asm/uaccess.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/entry.S24
-rw-r--r--arch/s390/kernel/entry64.S24
-rw-r--r--arch/s390/kernel/irq.c10
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c6
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c6
-rw-r--r--arch/s390/kernel/runtime_instr.c3
-rw-r--r--arch/s390/kernel/sclp.S5
-rw-r--r--arch/s390/kernel/smp.c10
-rw-r--r--arch/s390/kernel/time.c6
-rw-r--r--arch/s390/kvm/diag.c4
-rw-r--r--arch/s390/lib/Makefile2
-rw-r--r--arch/s390/lib/uaccess.c407
-rw-r--r--arch/s390/lib/uaccess.h16
-rw-r--r--arch/s390/lib/uaccess_mvcos.c263
-rw-r--r--arch/s390/lib/uaccess_pt.c471
-rw-r--r--arch/s390/mm/fault.c53
-rw-r--r--arch/s390/mm/hugetlbpage.c5
-rw-r--r--arch/s390/mm/init.c7
-rw-r--r--arch/s390/mm/pgtable.c12
-rw-r--r--arch/s390/mm/vmem.c2
-rw-r--r--arch/s390/oprofile/hwsampler.c4
36 files changed, 892 insertions, 958 deletions
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index fa9aaf7144b7..1d4706114a45 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -15,23 +15,29 @@
#include <linux/compiler.h>
#include <linux/types.h>
+#include <asm/barrier.h>
#include <asm/cmpxchg.h>
#define ATOMIC_INIT(i) { (i) }
+#define __ATOMIC_NO_BARRIER "\n"
+
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC_OR "lao"
#define __ATOMIC_AND "lan"
#define __ATOMIC_ADD "laa"
+#define __ATOMIC_BARRIER "bcr 14,0\n"
-#define __ATOMIC_LOOP(ptr, op_val, op_string) \
+#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
({ \
int old_val; \
\
typecheck(atomic_t *, ptr); \
asm volatile( \
+ __barrier \
op_string " %0,%2,%1\n" \
+ __barrier \
: "=d" (old_val), "+Q" ((ptr)->counter) \
: "d" (op_val) \
: "cc", "memory"); \
@@ -43,8 +49,9 @@
#define __ATOMIC_OR "or"
#define __ATOMIC_AND "nr"
#define __ATOMIC_ADD "ar"
+#define __ATOMIC_BARRIER "\n"
-#define __ATOMIC_LOOP(ptr, op_val, op_string) \
+#define __ATOMIC_LOOP(ptr, op_val, op_string, __barrier) \
({ \
int old_val, new_val; \
\
@@ -82,7 +89,7 @@ static inline void atomic_set(atomic_t *v, int i)
static inline int atomic_add_return(int i, atomic_t *v)
{
- return __ATOMIC_LOOP(v, i, __ATOMIC_ADD) + i;
+ return __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_BARRIER) + i;
}
static inline void atomic_add(int i, atomic_t *v)
@@ -94,12 +101,10 @@ static inline void atomic_add(int i, atomic_t *v)
: "+Q" (v->counter)
: "i" (i)
: "cc", "memory");
- } else {
- atomic_add_return(i, v);
+ return;
}
-#else
- atomic_add_return(i, v);
#endif
+ __ATOMIC_LOOP(v, i, __ATOMIC_ADD, __ATOMIC_NO_BARRIER);
}
#define atomic_add_negative(_i, _v) (atomic_add_return(_i, _v) < 0)
@@ -115,12 +120,12 @@ static inline void atomic_add(int i, atomic_t *v)
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
- __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND);
+ __ATOMIC_LOOP(v, ~mask, __ATOMIC_AND, __ATOMIC_NO_BARRIER);
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
- __ATOMIC_LOOP(v, mask, __ATOMIC_OR);
+ __ATOMIC_LOOP(v, mask, __ATOMIC_OR, __ATOMIC_NO_BARRIER);
}
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -157,19 +162,24 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#ifdef CONFIG_64BIT
+#define __ATOMIC64_NO_BARRIER "\n"
+
#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
#define __ATOMIC64_OR "laog"
#define __ATOMIC64_AND "lang"
#define __ATOMIC64_ADD "laag"
+#define __ATOMIC64_BARRIER "bcr 14,0\n"
-#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
+#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
({ \
long long old_val; \
\
typecheck(atomic64_t *, ptr); \
asm volatile( \
+ __barrier \
op_string " %0,%2,%1\n" \
+ __barrier \
: "=d" (old_val), "+Q" ((ptr)->counter) \
: "d" (op_val) \
: "cc", "memory"); \
@@ -181,8 +191,9 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define __ATOMIC64_OR "ogr"
#define __ATOMIC64_AND "ngr"
#define __ATOMIC64_ADD "agr"
+#define __ATOMIC64_BARRIER "\n"
-#define __ATOMIC64_LOOP(ptr, op_val, op_string) \
+#define __ATOMIC64_LOOP(ptr, op_val, op_string, __barrier) \
({ \
long long old_val, new_val; \
\
@@ -220,17 +231,32 @@ static inline void atomic64_set(atomic64_t *v, long long i)
static inline long long atomic64_add_return(long long i, atomic64_t *v)
{
- return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD) + i;
+ return __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_BARRIER) + i;
+}
+
+static inline void atomic64_add(long long i, atomic64_t *v)
+{
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+ if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
+ asm volatile(
+ "agsi %0,%1\n"
+ : "+Q" (v->counter)
+ : "i" (i)
+ : "cc", "memory");
+ return;
+ }
+#endif
+ __ATOMIC64_LOOP(v, i, __ATOMIC64_ADD, __ATOMIC64_NO_BARRIER);
}
static inline void atomic64_clear_mask(unsigned long mask, atomic64_t *v)
{
- __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND);
+ __ATOMIC64_LOOP(v, ~mask, __ATOMIC64_AND, __ATOMIC64_NO_BARRIER);
}
static inline void atomic64_set_mask(unsigned long mask, atomic64_t *v)
{
- __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR);
+ __ATOMIC64_LOOP(v, mask, __ATOMIC64_OR, __ATOMIC64_NO_BARRIER);
}
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
@@ -334,25 +360,13 @@ static inline void atomic64_clear_mask(unsigned long long mask, atomic64_t *v)
} while (atomic64_cmpxchg(v, old, new) != old);
}
-#endif /* CONFIG_64BIT */
-
static inline void atomic64_add(long long i, atomic64_t *v)
{
-#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
- if (__builtin_constant_p(i) && (i > -129) && (i < 128)) {
- asm volatile(
- "agsi %0,%1\n"
- : "+Q" (v->counter)
- : "i" (i)
- : "cc", "memory");
- } else {
- atomic64_add_return(i, v);
- }
-#else
atomic64_add_return(i, v);
-#endif
}
+#endif /* CONFIG_64BIT */
+
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
{
long long c, old;
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index ec5ef891db6b..520542477678 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -47,14 +47,18 @@
#include <linux/typecheck.h>
#include <linux/compiler.h>
+#include <asm/barrier.h>
+
+#define __BITOPS_NO_BARRIER "\n"
#ifndef CONFIG_64BIT
#define __BITOPS_OR "or"
#define __BITOPS_AND "nr"
#define __BITOPS_XOR "xr"
+#define __BITOPS_BARRIER "\n"
-#define __BITOPS_LOOP(__addr, __val, __op_string) \
+#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
({ \
unsigned long __old, __new; \
\
@@ -67,7 +71,7 @@
" jl 0b" \
: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
: "d" (__val) \
- : "cc"); \
+ : "cc", "memory"); \
__old; \
})
@@ -78,17 +82,20 @@
#define __BITOPS_OR "laog"
#define __BITOPS_AND "lang"
#define __BITOPS_XOR "laxg"
+#define __BITOPS_BARRIER "bcr 14,0\n"
-#define __BITOPS_LOOP(__addr, __val, __op_string) \
+#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
({ \
unsigned long __old; \
\
typecheck(unsigned long *, (__addr)); \
asm volatile( \
+ __barrier \
__op_string " %0,%2,%1\n" \
+ __barrier \
: "=d" (__old), "+Q" (*(__addr)) \
: "d" (__val) \
- : "cc"); \
+ : "cc", "memory"); \
__old; \
})
@@ -97,8 +104,9 @@
#define __BITOPS_OR "ogr"
#define __BITOPS_AND "ngr"
#define __BITOPS_XOR "xgr"
+#define __BITOPS_BARRIER "\n"
-#define __BITOPS_LOOP(__addr, __val, __op_string) \
+#define __BITOPS_LOOP(__addr, __val, __op_string, __barrier) \
({ \
unsigned long __old, __new; \
\
@@ -111,7 +119,7 @@
" jl 0b" \
: "=&d" (__old), "=&d" (__new), "+Q" (*(__addr))\
: "d" (__val) \
- : "cc"); \
+ : "cc", "memory"); \
__old; \
})
@@ -149,12 +157,12 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *ptr)
"oi %0,%b1\n"
: "+Q" (*caddr)
: "i" (1 << (nr & 7))
- : "cc");
+ : "cc", "memory");
return;
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __BITOPS_LOOP(addr, mask, __BITOPS_OR);
+ __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_NO_BARRIER);
}
static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -170,12 +178,12 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *ptr)
"ni %0,%b1\n"
: "+Q" (*caddr)
: "i" (~(1 << (nr & 7)))
- : "cc");
+ : "cc", "memory");
return;
}
#endif
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- __BITOPS_LOOP(addr, mask, __BITOPS_AND);
+ __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_NO_BARRIER);
}
static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
@@ -191,12 +199,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *ptr)
"xi %0,%b1\n"
: "+Q" (*caddr)
: "i" (1 << (nr & 7))
- : "cc");
+ : "cc", "memory");
return;
}
#endif
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
+ __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_NO_BARRIER);
}
static inline int
@@ -206,8 +214,7 @@ test_and_set_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_OR);
- barrier();
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_OR, __BITOPS_BARRIER);
return (old & mask) != 0;
}
@@ -218,8 +225,7 @@ test_and_clear_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = ~(1UL << (nr & (BITS_PER_LONG - 1)));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_AND);
- barrier();
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_AND, __BITOPS_BARRIER);
return (old & ~mask) != 0;
}
@@ -230,8 +236,7 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *ptr)
unsigned long old, mask;
mask = 1UL << (nr & (BITS_PER_LONG - 1));
- old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR);
- barrier();
+ old = __BITOPS_LOOP(addr, mask, __BITOPS_XOR, __BITOPS_BARRIER);
return (old & mask) != 0;
}
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index fda46bd38c99..69cf5b5eddc9 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -1,12 +1,25 @@
#ifndef _ASM_S390_FUTEX_H
#define _ASM_S390_FUTEX_H
-#include <linux/futex.h>
#include <linux/uaccess.h>
+#include <linux/futex.h>
+#include <asm/mmu_context.h>
#include <asm/errno.h>
-int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newval);
-int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old);
+#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
+ asm volatile( \
+ " sacf 256\n" \
+ "0: l %1,0(%6)\n" \
+ "1:"insn \
+ "2: cs %1,%2,0(%6)\n" \
+ "3: jl 1b\n" \
+ " lhi %0,0\n" \
+ "4: sacf 768\n" \
+ EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
+ : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
+ "=m" (*uaddr) \
+ : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
+ "m" (*uaddr) : "cc");
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
{
@@ -14,13 +27,37 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
int cmp = (encoded_op >> 24) & 15;
int oparg = (encoded_op << 8) >> 20;
int cmparg = (encoded_op << 20) >> 20;
- int oldval, ret;
+ int oldval = 0, newval, ret;
+ update_primary_asce(current);
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;
pagefault_disable();
- ret = __futex_atomic_op_inuser(op, uaddr, oparg, &oldval);
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("lr %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("lr %2,%1\nar %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("lr %2,%1\nor %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
pagefault_enable();
if (!ret) {
@@ -37,4 +74,23 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
return ret;
}
+static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret;
+
+ update_primary_asce(current);
+ asm volatile(
+ " sacf 256\n"
+ "0: cs %1,%4,0(%5)\n"
+ "1: la %0,0\n"
+ "2: sacf 768\n"
+ EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
+ : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
+ : "cc", "memory");
+ *uval = oldval;
+ return ret;
+}
+
#endif /* _ASM_S390_FUTEX_H */
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 35f0faab5361..c4dd400a2791 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -16,6 +16,20 @@
/* This number is used when no interrupt has been assigned */
#define NO_IRQ 0
+/* External interruption codes */
+#define EXT_IRQ_INTERRUPT_KEY 0x0040
+#define EXT_IRQ_CLK_COMP 0x1004
+#define EXT_IRQ_CPU_TIMER 0x1005
+#define EXT_IRQ_WARNING_TRACK 0x1007
+#define EXT_IRQ_MALFUNC_ALERT 0x1200
+#define EXT_IRQ_EMERGENCY_SIG 0x1201
+#define EXT_IRQ_EXTERNAL_CALL 0x1202
+#define EXT_IRQ_TIMING_ALERT 0x1406
+#define EXT_IRQ_MEASURE_ALERT 0x1407
+#define EXT_IRQ_SERVICE_SIG 0x2401
+#define EXT_IRQ_CP_SERVICE 0x2603
+#define EXT_IRQ_IUCV 0x4000
+
#ifndef __ASSEMBLY__
#include <linux/hardirq.h>
@@ -77,8 +91,8 @@ struct ext_code {
typedef void (*ext_int_handler_t)(struct ext_code, unsigned int, unsigned long);
-int register_external_interrupt(u16 code, ext_int_handler_t handler);
-int unregister_external_interrupt(u16 code, ext_int_handler_t handler);
+int register_external_irq(u16 code, ext_int_handler_t handler);
+int unregister_external_irq(u16 code, ext_int_handler_t handler);
enum irq_subclass {
IRQ_SUBCLASS_MEASUREMENT_ALERT = 5,
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index ff132ac64ddd..f77695a82f64 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -1,9 +1,11 @@
#ifndef __MMU_H
#define __MMU_H
+#include <linux/cpumask.h>
#include <linux/errno.h>
typedef struct {
+ cpumask_t cpu_attach_mask;
atomic_t attach_count;
unsigned int flush_mm;
spinlock_t list_lock;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 38149b63dc44..71be346d0e3c 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -15,6 +15,7 @@
static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm)
{
+ cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.attach_count, 0);
mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
@@ -29,41 +30,61 @@ static inline int init_new_context(struct task_struct *tsk,
#define destroy_context(mm) do { } while (0)
-#ifndef CONFIG_64BIT
-#define LCTL_OPCODE "lctl"
-#else
-#define LCTL_OPCODE "lctlg"
-#endif
-
-static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
+static inline void update_user_asce(struct mm_struct *mm, int load_primary)
{
pgd_t *pgd = mm->pgd;
S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
- /* Load primary space page table origin. */
- asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
+ if (load_primary)
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
set_fs(current->thread.mm_segment);
}
+static inline void clear_user_asce(struct mm_struct *mm, int load_primary)
+{
+ S390_lowcore.user_asce = S390_lowcore.kernel_asce;
+
+ if (load_primary)
+ __ctl_load(S390_lowcore.user_asce, 1, 1);
+ __ctl_load(S390_lowcore.user_asce, 7, 7);
+}
+
+static inline void update_primary_asce(struct task_struct *tsk)
+{
+ unsigned long asce;
+
+ __ctl_store(asce, 1, 1);
+ if (asce != S390_lowcore.kernel_asce)
+ __ctl_load(S390_lowcore.kernel_asce, 1, 1);
+ set_tsk_thread_flag(tsk, TIF_ASCE);
+}
+
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
int cpu = smp_processor_id();
+ update_primary_asce(tsk);
if (prev == next)
return;
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_set_cpu(cpu, &next->context.cpu_attach_mask);
if (atomic_inc_return(&next->context.attach_count) >> 16) {
- /* Delay update_mm until all TLB flushes are done. */
+ /* Delay update_user_asce until all TLB flushes are done. */
set_tsk_thread_flag(tsk, TIF_TLB_WAIT);
+ /* Clear old ASCE by loading the kernel ASCE. */
+ clear_user_asce(next, 0);
} else {
cpumask_set_cpu(cpu, mm_cpumask(next));
- update_mm(next, tsk);
+ update_user_asce(next, 0);
if (next->context.flush_mm)
/* Flush pending TLBs */
__tlb_flush_mm(next);
}
atomic_dec(&prev->context.attach_count);
WARN_ON(atomic_read(&prev->context.attach_count) < 0);
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
}
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
@@ -80,7 +101,7 @@ static inline void finish_arch_post_lock_switch(void)
cpu_relax();
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- update_mm(mm, tsk);
+ update_user_asce(mm, 0);
if (mm->context.flush_mm)
__tlb_flush_mm(mm);
preempt_enable();
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 50a75d96f939..12f75313e086 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1070,12 +1070,35 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
: "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
}
+static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
+{
+ unsigned long pto = (unsigned long) ptep;
+
+#ifndef CONFIG_64BIT
+ /* pto in ESA mode must point to the start of the segment table */
+ pto &= 0x7ffffc00;
+#endif
+ /* Invalidation + local TLB flush for the pte */
+ asm volatile(
+ " .insn rrf,0xb2210000,%2,%3,0,1"
+ : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
+}
+
static inline void ptep_flush_direct(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
{
+ int active, count;
+
if (pte_val(*ptep) & _PAGE_INVALID)
return;
- __ptep_ipte(address, ptep);
+ active = (mm == current->active_mm) ? 1 : 0;
+ count = atomic_add_return(0x10000, &mm->context.attach_count);
+ if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
+ cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
+ __ptep_ipte_local(address, ptep);
+ else
+ __ptep_ipte(address, ptep);
+ atomic_sub(0x10000, &mm->context.attach_count);
}
static inline void ptep_flush_lazy(struct mm_struct *mm,
@@ -1384,35 +1407,6 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
-static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
-{
- unsigned long sto = (unsigned long) pmdp -
- pmd_index(address) * sizeof(pmd_t);
-
- if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) {
- asm volatile(
- " .insn rrf,0xb98e0000,%2,%3,0,0"
- : "=m" (*pmdp)
- : "m" (*pmdp), "a" (sto),
- "a" ((address & HPAGE_MASK))
- : "cc"
- );
- }
-}
-
-static inline void __pmd_csp(pmd_t *pmdp)
-{
- register unsigned long reg2 asm("2") = pmd_val(*pmdp);
- register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
- _SEGMENT_ENTRY_INVALID;
- register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
-
- asm volatile(
- " csp %1,%3"
- : "=m" (*pmdp)
- : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
-}
-
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
@@ -1481,18 +1475,80 @@ static inline pmd_t pmd_mkwrite(pmd_t pmd)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
+static inline void __pmdp_csp(pmd_t *pmdp)
+{
+ register unsigned long reg2 asm("2") = pmd_val(*pmdp);
+ register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
+ _SEGMENT_ENTRY_INVALID;
+ register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
+
+ asm volatile(
+ " csp %1,%3"
+ : "=m" (*pmdp)
+ : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
+}
+
+static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
+{
+ unsigned long sto;
+
+ sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
+ asm volatile(
+ " .insn rrf,0xb98e0000,%2,%3,0,0"
+ : "=m" (*pmdp)
+ : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
+ : "cc" );
+}
+
+static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
+{
+ unsigned long sto;
+
+ sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
+ asm volatile(
+ " .insn rrf,0xb98e0000,%2,%3,0,1"
+ : "=m" (*pmdp)
+ : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
+ : "cc" );
+}
+
+static inline void pmdp_flush_direct(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ int active, count;
+
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+ return;
+ if (!MACHINE_HAS_IDTE) {
+ __pmdp_csp(pmdp);
+ return;
+ }
+ active = (mm == current->active_mm) ? 1 : 0;
+ count = atomic_add_return(0x10000, &mm->context.attach_count);
+ if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
+ cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
+ __pmdp_idte_local(address, pmdp);
+ else
+ __pmdp_idte(address, pmdp);
+ atomic_sub(0x10000, &mm->context.attach_count);
+}
+
static inline void pmdp_flush_lazy(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
int active, count;
+ if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
+ return;
active = (mm == current->active_mm) ? 1 : 0;
count = atomic_add_return(0x10000, &mm->context.attach_count);
if ((count & 0xffff) <= active) {
pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
mm->context.flush_mm = 1;
- } else
- __pmd_idte(address, pmdp);
+ } else if (MACHINE_HAS_IDTE)
+ __pmdp_idte(address, pmdp);
+ else
+ __pmdp_csp(pmdp);
atomic_sub(0x10000, &mm->context.attach_count);
}
@@ -1545,7 +1601,7 @@ static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
pmd_t pmd;
pmd = *pmdp;
- __pmd_idte(address, pmdp);
+ pmdp_flush_direct(vma->vm_mm, address, pmdp);
*pmdp = pmd_mkold(pmd);
return pmd_young(pmd);
}
@@ -1556,7 +1612,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
{
pmd_t pmd = *pmdp;
- __pmd_idte(address, pmdp);
+ pmdp_flush_direct(mm, address, pmdp);
pmd_clear(pmdp);
return pmd;
}
@@ -1572,7 +1628,7 @@ static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
static inline void pmdp_invalidate(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
- __pmd_idte(address, pmdp);
+ pmdp_flush_direct(vma->vm_mm, address, pmdp);
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
@@ -1582,7 +1638,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
pmd_t pmd = *pmdp;
if (pmd_write(pmd)) {
- __pmd_idte(address, pmdp);
+ pmdp_flush_direct(mm, address, pmdp);
set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
}
}
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 406f3a1e63ef..b31b22dba948 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -68,6 +68,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
#define MACHINE_FLAG_TOPOLOGY (1UL << 14)
#define MACHINE_FLAG_TE (1UL << 15)
#define MACHINE_FLAG_RRBM (1UL << 16)
+#define MACHINE_FLAG_TLB_LC (1UL << 17)
#define MACHINE_IS_VM (S390_lowcore.machine_flags & MACHINE_FLAG_VM)
#define MACHINE_IS_KVM (S390_lowcore.machine_flags & MACHINE_FLAG_KVM)
@@ -90,6 +91,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
#define MACHINE_HAS_TOPOLOGY (0)
#define MACHINE_HAS_TE (0)
#define MACHINE_HAS_RRBM (0)
+#define MACHINE_HAS_TLB_LC (0)
#else /* CONFIG_64BIT */
#define MACHINE_HAS_IEEE (1)
#define MACHINE_HAS_CSP (1)
@@ -102,6 +104,7 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
#define MACHINE_HAS_TE (S390_lowcore.machine_flags & MACHINE_FLAG_TE)
#define MACHINE_HAS_RRBM (S390_lowcore.machine_flags & MACHINE_FLAG_RRBM)
+#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#endif /* CONFIG_64BIT */
/*
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
index 29c81f82705e..e759181357fc 100644
--- a/arch/s390/include/asm/switch_to.h
+++ b/arch/s390/include/asm/switch_to.h
@@ -132,6 +132,7 @@ static inline void restore_access_regs(unsigned int *acrs)
update_cr_regs(next); \
} \
prev = __switch_to(prev,next); \
+ update_primary_asce(current); \
} while (0)
#define finish_arch_switch(prev) do { \
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 3ccd71b90345..50630e6a35de 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -82,6 +82,7 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_TLB_WAIT 4 /* wait for TLB flush completion */
+#define TIF_ASCE 5 /* primary asce needs fixup / uaccess */
#define TIF_PER_TRAP 6 /* deliver sigtrap on return to user */
#define TIF_MCCK_PENDING 7 /* machine check handling is pending */
#define TIF_SYSCALL_TRACE 8 /* syscall trace active */
@@ -99,6 +100,7 @@ static inline struct thread_info *current_thread_info(void)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_TLB_WAIT (1<<TIF_TLB_WAIT)
+#define _TIF_ASCE (1<<TIF_ASCE)
#define _TIF_PER_TRAP (1<<TIF_PER_TRAP)
#define _TIF_MCCK_PENDING (1<<TIF_MCCK_PENDING)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 2cb846c4b37f..c544b6f05d95 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -57,8 +57,6 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb,
tlb->end = end;
tlb->fullmm = !(start | (end+1));
tlb->batch = NULL;
- if (tlb->fullmm)
- __tlb_flush_mm(mm);
}
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
@@ -96,9 +94,7 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
unsigned long address)
{
- if (!tlb->fullmm)
- return page_table_free_rcu(tlb, (unsigned long *) pte);
- page_table_free(tlb->mm, (unsigned long *) pte);
+ page_table_free_rcu(tlb, (unsigned long *) pte);
}
/*
@@ -114,9 +110,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 31))
return;
- if (!tlb->fullmm)
- return tlb_remove_table(tlb, pmd);
- crst_table_free(tlb->mm, (unsigned long *) pmd);
+ tlb_remove_table(tlb, pmd);
#endif
}
@@ -133,9 +127,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
#ifdef CONFIG_64BIT
if (tlb->mm->context.asce_limit <= (1UL << 42))
return;
- if (!tlb->fullmm)
- return tlb_remove_table(tlb, pud);
- crst_table_free(tlb->mm, (unsigned long *) pud);
+ tlb_remove_table(tlb, pud);
#endif
}
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index f9fef0425fee..16c9c88658c8 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -7,19 +7,41 @@
#include <asm/pgalloc.h>
/*
- * Flush all tlb entries on the local cpu.
+ * Flush all TLB entries on the local CPU.
*/
static inline void __tlb_flush_local(void)
{
asm volatile("ptlb" : : : "memory");
}
-#ifdef CONFIG_SMP
/*
- * Flush all tlb entries on all cpus.
+ * Flush TLB entries for a specific ASCE on all CPUs
*/
+static inline void __tlb_flush_idte(unsigned long asce)
+{
+ /* Global TLB flush for the mm */
+ asm volatile(
+ " .insn rrf,0xb98e0000,0,%0,%1,0"
+ : : "a" (2048), "a" (asce) : "cc");
+}
+
+/*
+ * Flush TLB entries for a specific ASCE on the local CPU
+ */
+static inline void __tlb_flush_idte_local(unsigned long asce)
+{
+ /* Local TLB flush for the mm */
+ asm volatile(
+ " .insn rrf,0xb98e0000,0,%0,%1,1"
+ : : "a" (2048), "a" (asce) : "cc");
+}
+
+#ifdef CONFIG_SMP
void smp_ptlb_all(void);
+/*
+ * Flush all TLB entries on all CPUs.
+ */
static inline void __tlb_flush_global(void)
{
register unsigned long reg2 asm("2");
@@ -42,36 +64,89 @@ static inline void __tlb_flush_global(void)
: : "d" (reg2), "d" (reg3), "d" (reg4), "m" (dummy) : "cc" );
}
+/*
+ * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
+ * this implicates multiple ASCEs!).
+ */
static inline void __tlb_flush_full(struct mm_struct *mm)
{
- cpumask_t local_cpumask;
-
preempt_disable();
- /*
- * If the process only ran on the local cpu, do a local flush.
- */
- cpumask_copy(&local_cpumask, cpumask_of(smp_processor_id()));
- if (cpumask_equal(mm_cpumask(mm), &local_cpumask))
+ atomic_add(0x10000, &mm->context.attach_count);
+ if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
+ /* Local TLB flush */
__tlb_flush_local();
- else
+ } else {
+ /* Global TLB flush */
__tlb_flush_global();
+ /* Reset TLB flush mask */
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_copy(mm_cpumask(mm),
+ &mm->context.cpu_attach_mask);
+ }
+ atomic_sub(0x10000, &mm->context.attach_count);
preempt_enable();
}
+
+/*
+ * Flush TLB entries for a specific ASCE on all CPUs.
+ */
+static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
+{
+ int active, count;
+
+ preempt_disable();
+ active = (mm == current->active_mm) ? 1 : 0;
+ count = atomic_add_return(0x10000, &mm->context.attach_count);
+ if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
+ cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
+ __tlb_flush_idte_local(asce);
+ } else {
+ if (MACHINE_HAS_IDTE)
+ __tlb_flush_idte(asce);
+ else
+ __tlb_flush_global();
+ /* Reset TLB flush mask */
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_copy(mm_cpumask(mm),
+ &mm->context.cpu_attach_mask);
+ }
+ atomic_sub(0x10000, &mm->context.attach_count);
+ preempt_enable();
+}
+
+static inline void __tlb_flush_kernel(void)
+{
+ if (MACHINE_HAS_IDTE)
+ __tlb_flush_idte((unsigned long) init_mm.pgd |
+ init_mm.context.asce_bits);
+ else
+ __tlb_flush_global();
+}
#else
-#define __tlb_flush_full(mm) __tlb_flush_local()
#define __tlb_flush_global() __tlb_flush_local()
-#endif
+#define __tlb_flush_full(mm) __tlb_flush_local()
/*
- * Flush all tlb entries of a page table on all cpus.
+ * Flush TLB entries for a specific ASCE on all CPUs.
*/
-static inline void __tlb_flush_idte(unsigned long asce)
+static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
{
- asm volatile(
- " .insn rrf,0xb98e0000,0,%0,%1,0"
- : : "a" (2048), "a" (asce) : "cc" );
+ if (MACHINE_HAS_TLB_LC)
+ __tlb_flush_idte_local(asce);
+ else
+ __tlb_flush_local();
}
+static inline void __tlb_flush_kernel(void)
+{
+ if (MACHINE_HAS_TLB_LC)
+ __tlb_flush_idte_local((unsigned long) init_mm.pgd |
+ init_mm.context.asce_bits);
+ else
+ __tlb_flush_local();
+}
+#endif
+
static inline void __tlb_flush_mm(struct mm_struct * mm)
{
/*
@@ -80,7 +155,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
- __tlb_flush_idte((unsigned long) mm->pgd |
+ __tlb_flush_asce(mm, (unsigned long) mm->pgd |
mm->context.asce_bits);
else
__tlb_flush_full(mm);
@@ -130,7 +205,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
- __tlb_flush_mm(&init_mm);
+ __tlb_flush_kernel();
}
#endif /* _S390_TLBFLUSH_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 4133b3f72fb0..1be64a1506d0 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -92,8 +92,6 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
#define ARCH_HAS_SORT_EXTABLE
#define ARCH_HAS_SEARCH_EXTABLE
-int __handle_fault(unsigned long, unsigned long, int);
-
/**
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index e4c99a183651..cc10cdd4d6a2 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -136,6 +136,7 @@ int main(void)
DEFINE(__LC_RESTART_FN, offsetof(struct _lowcore, restart_fn));
DEFINE(__LC_RESTART_DATA, offsetof(struct _lowcore, restart_data));
DEFINE(__LC_RESTART_SOURCE, offsetof(struct _lowcore, restart_source));
+ DEFINE(__LC_KERNEL_ASCE, offsetof(struct _lowcore, kernel_asce));
DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce));
DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 6b594439cca5..a734f3585ceb 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -386,6 +386,8 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
if (test_facility(66))
S390_lowcore.machine_flags |= MACHINE_FLAG_RRBM;
+ if (test_facility(51))
+ S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
#endif
}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 526d3735ed29..1662038516c0 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -38,9 +38,9 @@ __PT_R14 = __PT_GPRS + 56
__PT_R15 = __PT_GPRS + 60
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
- _TIF_MCCK_PENDING | _TIF_PER_TRAP )
+ _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE)
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
- _TIF_MCCK_PENDING)
+ _TIF_MCCK_PENDING | _TIF_ASCE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT)
@@ -241,6 +241,8 @@ sysc_work:
jo sysc_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
jo sysc_notify_resume
+ tm __TI_flags+3(%r12),_TIF_ASCE
+ jo sysc_uaccess
j sysc_return # beware of critical section cleanup
#
@@ -260,6 +262,14 @@ sysc_mcck_pending:
br %r1 # TIF bit will be cleared by handler
#
+# _TIF_ASCE is set, load user space asce
+#
+sysc_uaccess:
+ ni __TI_flags+3(%r12),255-_TIF_ASCE
+ lctl %c1,%c1,__LC_USER_ASCE # load primary asce
+ j sysc_return
+
+#
# _TIF_SIGPENDING is set, call do_signal
#
sysc_sigpending:
@@ -522,6 +532,8 @@ io_work_tif:
jo io_sigpending
tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
jo io_notify_resume
+ tm __TI_flags+3(%r12),_TIF_ASCE
+ jo io_uaccess
j io_return # beware of critical section cleanup
#
@@ -535,6 +547,14 @@ io_mcck_pending:
j io_return
#
+# _TIF_ASCE is set, load user space asce
+#
+io_uaccess:
+ ni __TI_flags+3(%r12),255-_TIF_ASCE
+ lctl %c1,%c1,__LC_USER_ASCE # load primary asce
+ j io_return
+
+#
# _TIF_NEED_RESCHED is set, call schedule
#
io_reschedule:
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index e09dbe5f2901..5963e43618bb 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -43,9 +43,9 @@ STACK_SIZE = 1 << STACK_SHIFT
STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
- _TIF_MCCK_PENDING | _TIF_PER_TRAP )
+ _TIF_MCCK_PENDING | _TIF_PER_TRAP | _TIF_ASCE)
_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
- _TIF_MCCK_PENDING)
+ _TIF_MCCK_PENDING | _TIF_ASCE)
_TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \
_TIF_SYSCALL_TRACEPOINT)
_TIF_TRANSFER = (_TIF_MCCK_PENDING | _TIF_TLB_WAIT)
@@ -275,6 +275,8 @@ sysc_work:
jo sysc_sigpending
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo sysc_notify_resume
+ tm __TI_flags+7(%r12),_TIF_ASCE
+ jo sysc_uaccess
j sysc_return # beware of critical section cleanup
#
@@ -292,6 +294,14 @@ sysc_mcck_pending:
jg s390_handle_mcck # TIF bit will be cleared by handler
#
+# _TIF_ASCE is set, load user space asce
+#
+sysc_uaccess:
+ ni __TI_flags+7(%r12),255-_TIF_ASCE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ j sysc_return
+
+#
# _TIF_SIGPENDING is set, call do_signal
#
sysc_sigpending:
@@ -559,6 +569,8 @@ io_work_tif:
jo io_sigpending
tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
jo io_notify_resume
+ tm __TI_flags+7(%r12),_TIF_ASCE
+ jo io_uaccess
j io_return # beware of critical section cleanup
#
@@ -571,6 +583,14 @@ io_mcck_pending:
j io_return
#
+# _TIF_ASCE is set, load user space asce
+#
+io_uaccess:
+ ni __TI_flags+7(%r12),255-_TIF_ASCE
+ lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
+ j io_return
+
+#
# _TIF_NEED_RESCHED is set, call schedule
#
io_reschedule:
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index d42b14cc72a4..c7463aa0014b 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -207,7 +207,7 @@ static inline int ext_hash(u16 code)
return (code + (code >> 9)) & (ARRAY_SIZE(ext_int_hash) - 1);
}
-int register_external_interrupt(u16 code, ext_int_handler_t handler)
+int register_external_irq(u16 code, ext_int_handler_t handler)
{
struct ext_int_info *p;
unsigned long flags;
@@ -225,9 +225,9 @@ int register_external_interrupt(u16 code, ext_int_handler_t handler)
spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0;
}
-EXPORT_SYMBOL(register_external_interrupt);
+EXPORT_SYMBOL(register_external_irq);
-int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
+int unregister_external_irq(u16 code, ext_int_handler_t handler)
{
struct ext_int_info *p;
unsigned long flags;
@@ -243,7 +243,7 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
spin_unlock_irqrestore(&ext_int_hash_lock, flags);
return 0;
}
-EXPORT_SYMBOL(unregister_external_interrupt);
+EXPORT_SYMBOL(unregister_external_irq);
static irqreturn_t do_ext_interrupt(int irq, void *dummy)
{
@@ -253,7 +253,7 @@ static irqreturn_t do_ext_interrupt(int irq, void *dummy)
int index;
ext_code = *(struct ext_code *) &regs->int_code;
- if (ext_code.code != 0x1004)
+ if (ext_code.code != EXT_IRQ_CLK_COMP)
__get_cpu_var(s390_idle).nohz_delay = 1;
index = ext_hash(ext_code.code);
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index f51214c04858..ea75d011a6fc 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -673,7 +673,8 @@ static int __init cpumf_pmu_init(void)
ctl_clear_bit(0, 48);
/* register handler for measurement-alert interruptions */
- rc = register_external_interrupt(0x1407, cpumf_measurement_alert);
+ rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
+ cpumf_measurement_alert);
if (rc) {
pr_err("Registering for CPU-measurement alerts "
"failed with rc=%i\n", rc);
@@ -684,7 +685,8 @@ static int __init cpumf_pmu_init(void)
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
if (rc) {
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
- unregister_external_interrupt(0x1407, cpumf_measurement_alert);
+ unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
+ cpumf_measurement_alert);
goto out;
}
perf_cpu_notifier(cpumf_pmu_notifier);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 6c0d29827cb6..ea0c7b2ef030 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1621,7 +1621,8 @@ static int __init init_cpum_sampling_pmu(void)
pr_err("Registering for s390dbf failed\n");
debug_register_view(sfdbg, &debug_sprintf_view);
- err = register_external_interrupt(0x1407, cpumf_measurement_alert);
+ err = register_external_irq(EXT_IRQ_MEASURE_ALERT,
+ cpumf_measurement_alert);
if (err) {
pr_cpumsf_err(RS_INIT_FAILURE_ALRT);
goto out;
@@ -1630,7 +1631,8 @@ static int __init init_cpum_sampling_pmu(void)
err = perf_pmu_register(&cpumf_sampling, "cpum_sf", PERF_TYPE_RAW);
if (err) {
pr_cpumsf_err(RS_INIT_FAILURE_PERF);
- unregister_external_interrupt(0x1407, cpumf_measurement_alert);
+ unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
+ cpumf_measurement_alert);
goto out;
}
perf_cpu_notifier(cpumf_pmu_notifier);
diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
index d817cce7e72d..26b4ae96fdd7 100644
--- a/arch/s390/kernel/runtime_instr.c
+++ b/arch/s390/kernel/runtime_instr.c
@@ -138,7 +138,8 @@ static int __init runtime_instr_init(void)
return 0;
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
- rc = register_external_interrupt(0x1407, runtime_instr_int_handler);
+ rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
+ runtime_instr_int_handler);
if (rc)
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
else
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S
index 29bd7bec4176..a41f2c99dcc8 100644
--- a/arch/s390/kernel/sclp.S
+++ b/arch/s390/kernel/sclp.S
@@ -9,6 +9,7 @@
*/
#include <linux/linkage.h>
+#include <asm/irq.h>
LC_EXT_NEW_PSW = 0x58 # addr of ext int handler
LC_EXT_NEW_PSW_64 = 0x1b0 # addr of ext int handler 64 bit
@@ -73,9 +74,9 @@ _sclp_wait_int:
lpsw .LwaitpswS1-.LbaseS1(%r13) # wait until interrupt
.LwaitS1:
lh %r7,LC_EXT_INT_CODE
- chi %r7,0x1004 # timeout?
+ chi %r7,EXT_IRQ_CLK_COMP # timeout?
je .LtimeoutS1
- chi %r7,0x2401 # service int?
+ chi %r7,EXT_IRQ_SERVICE_SIG # service int?
jne .LloopS1
sr %r2,%r2
l %r3,LC_EXT_INT_PARAM
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 5a640b395bd4..512ce1cde2a4 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -236,6 +236,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
{
struct _lowcore *lc = pcpu->lowcore;
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_set_cpu(cpu, &init_mm.context.cpu_attach_mask);
+ cpumask_set_cpu(cpu, mm_cpumask(&init_mm));
atomic_inc(&init_mm.context.attach_count);
lc->cpu_nr = cpu;
lc->percpu_offset = __per_cpu_offset[cpu];
@@ -760,6 +763,9 @@ void __cpu_die(unsigned int cpu)
cpu_relax();
pcpu_free_lowcore(pcpu);
atomic_dec(&init_mm.context.attach_count);
+ cpumask_clear_cpu(cpu, mm_cpumask(&init_mm));
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_clear_cpu(cpu, &init_mm.context.cpu_attach_mask);
}
void __noreturn cpu_die(void)
@@ -785,10 +791,10 @@ void __init smp_fill_possible_mask(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
/* request the 0x1201 emergency signal external interrupt */
- if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
+ if (register_external_irq(EXT_IRQ_EMERGENCY_SIG, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1201");
/* request the 0x1202 external call external interrupt */
- if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
+ if (register_external_irq(EXT_IRQ_EXTERNAL_CALL, do_ext_call_interrupt))
panic("Couldn't request external interrupt 0x1202");
smp_detect_cpus();
}
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index dd95f1631621..386d37a228bb 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -262,11 +262,11 @@ void __init time_init(void)
stp_reset();
/* request the clock comparator external interrupt */
- if (register_external_interrupt(0x1004, clock_comparator_interrupt))
- panic("Couldn't request external interrupt 0x1004");
+ if (register_external_irq(EXT_IRQ_CLK_COMP, clock_comparator_interrupt))
+ panic("Couldn't request external interrupt 0x1004");
/* request the timing alert external interrupt */
- if (register_external_interrupt(0x1406, timing_alert_interrupt))
+ if (register_external_irq(EXT_IRQ_TIMING_ALERT, timing_alert_interrupt))
panic("Couldn't request external interrupt 0x1406");
if (clocksource_register(&clocksource_tod) != 0)
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index 03a05ffb662f..08dfc839a6cf 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -167,6 +167,10 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
switch (subcode) {
+ case 0:
+ case 1:
+ page_table_reset_pgste(current->mm, 0, TASK_SIZE);
+ return -EOPNOTSUPP;
case 3:
vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
page_table_reset_pgste(current->mm, 0, TASK_SIZE);
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index e3fffe1dff51..c6d752e8bf28 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -2,7 +2,7 @@
# Makefile for s390-specific library files..
#
-lib-y += delay.o string.o uaccess_pt.o uaccess_mvcos.o find.o
+lib-y += delay.o string.o uaccess.o find.o
obj-$(CONFIG_32BIT) += div64.o qrnnd.o ucmpdi2.o mem32.o
obj-$(CONFIG_64BIT) += mem64.o
lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c
new file mode 100644
index 000000000000..23f866b4c7f1
--- /dev/null
+++ b/arch/s390/lib/uaccess.c
@@ -0,0 +1,407 @@
+/*
+ * Standard user space access functions based on mvcp/mvcs and doing
+ * interesting things in the secondary space mode.
+ *
+ * Copyright IBM Corp. 2006,2014
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Gerald Schaefer (gerald.schaefer@de.ibm.com)
+ */
+
+#include <linux/jump_label.h>
+#include <linux/uaccess.h>
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <asm/mmu_context.h>
+#include <asm/facility.h>
+
+#ifndef CONFIG_64BIT
+#define AHI "ahi"
+#define ALR "alr"
+#define CLR "clr"
+#define LHI "lhi"
+#define SLR "slr"
+#else
+#define AHI "aghi"
+#define ALR "algr"
+#define CLR "clgr"
+#define LHI "lghi"
+#define SLR "slgr"
+#endif
+
+static struct static_key have_mvcos = STATIC_KEY_INIT_FALSE;
+
+static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
+ unsigned long size)
+{
+ register unsigned long reg0 asm("0") = 0x81UL;
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -4096UL;
+ asm volatile(
+ "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
+ "9: jz 7f\n"
+ "1:"ALR" %0,%3\n"
+ " "SLR" %1,%3\n"
+ " "SLR" %2,%3\n"
+ " j 0b\n"
+ "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
+ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 4f\n"
+ "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
+ "10:"SLR" %0,%4\n"
+ " "ALR" %2,%4\n"
+ "4:"LHI" %4,-1\n"
+ " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ " bras %3,6f\n" /* memset loop */
+ " xc 0(1,%2),0(%2)\n"
+ "5: xc 0(256,%2),0(%2)\n"
+ " la %2,256(%2)\n"
+ "6:"AHI" %4,-256\n"
+ " jnm 5b\n"
+ " ex %4,0(%3)\n"
+ " j 8f\n"
+ "7:"SLR" %0,%0\n"
+ "8:\n"
+ EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr,
+ unsigned long size)
+{
+ unsigned long tmp1, tmp2;
+
+ update_primary_asce(current);
+ tmp1 = -256UL;
+ asm volatile(
+ " sacf 0\n"
+ "0: mvcp 0(%0,%2),0(%1),%3\n"
+ "10:jz 8f\n"
+ "1:"ALR" %0,%3\n"
+ " la %1,256(%1)\n"
+ " la %2,256(%2)\n"
+ "2: mvcp 0(%0,%2),0(%1),%3\n"
+ "11:jnz 1b\n"
+ " j 8f\n"
+ "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
+ " "LHI" %3,-4096\n"
+ " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 5f\n"
+ "4: mvcp 0(%4,%2),0(%1),%3\n"
+ "12:"SLR" %0,%4\n"
+ " "ALR" %2,%4\n"
+ "5:"LHI" %4,-1\n"
+ " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
+ " bras %3,7f\n" /* memset loop */
+ " xc 0(1,%2),0(%2)\n"
+ "6: xc 0(256,%2),0(%2)\n"
+ " la %2,256(%2)\n"
+ "7:"AHI" %4,-256\n"
+ " jnm 6b\n"
+ " ex %4,0(%3)\n"
+ " j 9f\n"
+ "8:"SLR" %0,%0\n"
+ "9: sacf 768\n"
+ EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b)
+ EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : : "cc", "memory");
+ return size;
+}
+
+unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ if (static_key_false(&have_mvcos))
+ return copy_from_user_mvcos(to, from, n);
+ return copy_from_user_mvcp(to, from, n);
+}
+EXPORT_SYMBOL(__copy_from_user);
+
+static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
+ unsigned long size)
+{
+ register unsigned long reg0 asm("0") = 0x810000UL;
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -4096UL;
+ asm volatile(
+ "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
+ "6: jz 4f\n"
+ "1:"ALR" %0,%3\n"
+ " "SLR" %1,%3\n"
+ " "SLR" %2,%3\n"
+ " j 0b\n"
+ "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
+ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 5f\n"
+ "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
+ "7:"SLR" %0,%4\n"
+ " j 5f\n"
+ "4:"SLR" %0,%0\n"
+ "5:\n"
+ EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x,
+ unsigned long size)
+{
+ unsigned long tmp1, tmp2;
+
+ update_primary_asce(current);
+ tmp1 = -256UL;
+ asm volatile(
+ " sacf 0\n"
+ "0: mvcs 0(%0,%1),0(%2),%3\n"
+ "7: jz 5f\n"
+ "1:"ALR" %0,%3\n"
+ " la %1,256(%1)\n"
+ " la %2,256(%2)\n"
+ "2: mvcs 0(%0,%1),0(%2),%3\n"
+ "8: jnz 1b\n"
+ " j 5f\n"
+ "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
+ " "LHI" %3,-4096\n"
+ " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 6f\n"
+ "4: mvcs 0(%4,%1),0(%2),%3\n"
+ "9:"SLR" %0,%4\n"
+ " j 6f\n"
+ "5:"SLR" %0,%0\n"
+ "6: sacf 768\n"
+ EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
+ EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : : "cc", "memory");
+ return size;
+}
+
+unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (static_key_false(&have_mvcos))
+ return copy_to_user_mvcos(to, from, n);
+ return copy_to_user_mvcs(to, from, n);
+}
+EXPORT_SYMBOL(__copy_to_user);
+
+static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ register unsigned long reg0 asm("0") = 0x810081UL;
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -4096UL;
+ /* FIXME: copy with reduced length. */
+ asm volatile(
+ "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
+ " jz 2f\n"
+ "1:"ALR" %0,%3\n"
+ " "SLR" %1,%3\n"
+ " "SLR" %2,%3\n"
+ " j 0b\n"
+ "2:"SLR" %0,%0\n"
+ "3: \n"
+ EX_TABLE(0b,3b)
+ : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
+ : "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+static inline unsigned long copy_in_user_mvc(void __user *to, const void __user *from,
+ unsigned long size)
+{
+ unsigned long tmp1;
+
+ update_primary_asce(current);
+ asm volatile(
+ " sacf 256\n"
+ " "AHI" %0,-1\n"
+ " jo 5f\n"
+ " bras %3,3f\n"
+ "0:"AHI" %0,257\n"
+ "1: mvc 0(1,%1),0(%2)\n"
+ " la %1,1(%1)\n"
+ " la %2,1(%2)\n"
+ " "AHI" %0,-1\n"
+ " jnz 1b\n"
+ " j 5f\n"
+ "2: mvc 0(256,%1),0(%2)\n"
+ " la %1,256(%1)\n"
+ " la %2,256(%2)\n"
+ "3:"AHI" %0,-256\n"
+ " jnm 2b\n"
+ "4: ex %0,1b-0b(%3)\n"
+ "5: "SLR" %0,%0\n"
+ "6: sacf 768\n"
+ EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
+ : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
+ : : "cc", "memory");
+ return size;
+}
+
+unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+ if (static_key_false(&have_mvcos))
+ return copy_in_user_mvcos(to, from, n);
+ return copy_in_user_mvc(to, from, n);
+}
+EXPORT_SYMBOL(__copy_in_user);
+
+static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
+{
+ register unsigned long reg0 asm("0") = 0x810000UL;
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -4096UL;
+ asm volatile(
+ "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
+ " jz 4f\n"
+ "1:"ALR" %0,%2\n"
+ " "SLR" %1,%2\n"
+ " j 0b\n"
+ "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
+ " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
+ " "SLR" %3,%1\n"
+ " "CLR" %0,%3\n" /* copy crosses next page boundary? */
+ " jnh 5f\n"
+ "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
+ " "SLR" %0,%3\n"
+ " j 5f\n"
+ "4:"SLR" %0,%0\n"
+ "5:\n"
+ EX_TABLE(0b,2b) EX_TABLE(3b,5b)
+ : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
+ : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+static inline unsigned long clear_user_xc(void __user *to, unsigned long size)
+{
+ unsigned long tmp1, tmp2;
+
+ update_primary_asce(current);
+ asm volatile(
+ " sacf 256\n"
+ " "AHI" %0,-1\n"
+ " jo 5f\n"
+ " bras %3,3f\n"
+ " xc 0(1,%1),0(%1)\n"
+ "0:"AHI" %0,257\n"
+ " la %2,255(%1)\n" /* %2 = ptr + 255 */
+ " srl %2,12\n"
+ " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
+ " "SLR" %2,%1\n"
+ " "CLR" %0,%2\n" /* clear crosses next page boundary? */
+ " jnh 5f\n"
+ " "AHI" %2,-1\n"
+ "1: ex %2,0(%3)\n"
+ " "AHI" %2,1\n"
+ " "SLR" %0,%2\n"
+ " j 5f\n"
+ "2: xc 0(256,%1),0(%1)\n"
+ " la %1,256(%1)\n"
+ "3:"AHI" %0,-256\n"
+ " jnm 2b\n"
+ "4: ex %0,0(%3)\n"
+ "5: "SLR" %0,%0\n"
+ "6: sacf 768\n"
+ EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
+ : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
+ : : "cc", "memory");
+ return size;
+}
+
+unsigned long __clear_user(void __user *to, unsigned long size)
+{
+ if (static_key_false(&have_mvcos))
+ return clear_user_mvcos(to, size);
+ return clear_user_xc(to, size);
+}
+EXPORT_SYMBOL(__clear_user);
+
+static inline unsigned long strnlen_user_srst(const char __user *src,
+ unsigned long size)
+{
+ register unsigned long reg0 asm("0") = 0;
+ unsigned long tmp1, tmp2;
+
+ if (unlikely(!size))
+ return 0;
+ update_primary_asce(current);
+ asm volatile(
+ " la %2,0(%1)\n"
+ " la %3,0(%0,%1)\n"
+ " "SLR" %0,%0\n"
+ " sacf 256\n"
+ "0: srst %3,%2\n"
+ " jo 0b\n"
+ " la %0,1(%3)\n" /* strnlen_user results includes \0 */
+ " "SLR" %0,%1\n"
+ "1: sacf 768\n"
+ EX_TABLE(0b,1b)
+ : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
+ : "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+unsigned long __strnlen_user(const char __user *src, unsigned long size)
+{
+ update_primary_asce(current);
+ return strnlen_user_srst(src, size);
+}
+EXPORT_SYMBOL(__strnlen_user);
+
+long __strncpy_from_user(char *dst, const char __user *src, long size)
+{
+ size_t done, len, offset, len_str;
+
+ if (unlikely(size <= 0))
+ return 0;
+ done = 0;
+ do {
+ offset = (size_t)src & ~PAGE_MASK;
+ len = min(size - done, PAGE_SIZE - offset);
+ if (copy_from_user(dst, src, len))
+ return -EFAULT;
+ len_str = strnlen(dst, len);
+ done += len_str;
+ src += len_str;
+ dst += len_str;
+ } while ((len_str == len) && (done < size));
+ return done;
+}
+EXPORT_SYMBOL(__strncpy_from_user);
+
+/*
+ * The "old" uaccess variant without mvcos can be enforced with the
+ * uaccess_primary kernel parameter. This is mainly for debugging purposes.
+ */
+static int uaccess_primary __initdata;
+
+static int __init parse_uaccess_pt(char *__unused)
+{
+ uaccess_primary = 1;
+ return 0;
+}
+early_param("uaccess_primary", parse_uaccess_pt);
+
+static int __init uaccess_init(void)
+{
+ if (IS_ENABLED(CONFIG_64BIT) && !uaccess_primary && test_facility(27))
+ static_key_slow_inc(&have_mvcos);
+ return 0;
+}
+early_initcall(uaccess_init);
diff --git a/arch/s390/lib/uaccess.h b/arch/s390/lib/uaccess.h
deleted file mode 100644
index c7e0e81f4b4e..000000000000
--- a/arch/s390/lib/uaccess.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Copyright IBM Corp. 2007
- *
- */
-
-#ifndef __ARCH_S390_LIB_UACCESS_H
-#define __ARCH_S390_LIB_UACCESS_H
-
-unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n);
-unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n);
-unsigned long copy_in_user_pt(void __user *to, const void __user *from, unsigned long n);
-unsigned long clear_user_pt(void __user *to, unsigned long n);
-unsigned long strnlen_user_pt(const char __user *src, unsigned long count);
-long strncpy_from_user_pt(char *dst, const char __user *src, long count);
-
-#endif /* __ARCH_S390_LIB_UACCESS_H */
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
deleted file mode 100644
index ae97b8df11aa..000000000000
--- a/arch/s390/lib/uaccess_mvcos.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/*
- * Optimized user space space access functions based on mvcos.
- *
- * Copyright IBM Corp. 2006
- * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
- * Gerald Schaefer (gerald.schaefer@de.ibm.com)
- */
-
-#include <linux/jump_label.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/mm.h>
-#include <asm/facility.h>
-#include <asm/uaccess.h>
-#include <asm/futex.h>
-#include "uaccess.h"
-
-#ifndef CONFIG_64BIT
-#define AHI "ahi"
-#define ALR "alr"
-#define CLR "clr"
-#define LHI "lhi"
-#define SLR "slr"
-#else
-#define AHI "aghi"
-#define ALR "algr"
-#define CLR "clgr"
-#define LHI "lghi"
-#define SLR "slgr"
-#endif
-
-static struct static_key have_mvcos = STATIC_KEY_INIT_TRUE;
-
-static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr,
- unsigned long size)
-{
- register unsigned long reg0 asm("0") = 0x81UL;
- unsigned long tmp1, tmp2;
-
- tmp1 = -4096UL;
- asm volatile(
- "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
- "9: jz 7f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
- " j 0b\n"
- "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
- " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 4f\n"
- "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
- "10:"SLR" %0,%4\n"
- " "ALR" %2,%4\n"
- "4:"LHI" %4,-1\n"
- " "ALR" %4,%0\n" /* copy remaining size, subtract 1 */
- " bras %3,6f\n" /* memset loop */
- " xc 0(1,%2),0(%2)\n"
- "5: xc 0(256,%2),0(%2)\n"
- " la %2,256(%2)\n"
- "6:"AHI" %4,-256\n"
- " jnm 5b\n"
- " ex %4,0(%3)\n"
- " j 8f\n"
- "7:"SLR" %0,%0\n"
- "8: \n"
- EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b)
- : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
- return size;
-}
-
-unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- if (static_key_true(&have_mvcos))
- return copy_from_user_mvcos(to, from, n);
- return copy_from_user_pt(to, from, n);
-}
-EXPORT_SYMBOL(__copy_from_user);
-
-static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x,
- unsigned long size)
-{
- register unsigned long reg0 asm("0") = 0x810000UL;
- unsigned long tmp1, tmp2;
-
- tmp1 = -4096UL;
- asm volatile(
- "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
- "6: jz 4f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
- " j 0b\n"
- "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
- " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
- " "SLR" %4,%1\n"
- " "CLR" %0,%4\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
- "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
- "7:"SLR" %0,%4\n"
- " j 5f\n"
- "4:"SLR" %0,%0\n"
- "5: \n"
- EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b)
- : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
- return size;
-}
-
-unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (static_key_true(&have_mvcos))
- return copy_to_user_mvcos(to, from, n);
- return copy_to_user_pt(to, from, n);
-}
-EXPORT_SYMBOL(__copy_to_user);
-
-static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from,
- unsigned long size)
-{
- register unsigned long reg0 asm("0") = 0x810081UL;
- unsigned long tmp1, tmp2;
-
- tmp1 = -4096UL;
- /* FIXME: copy with reduced length. */
- asm volatile(
- "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
- " jz 2f\n"
- "1:"ALR" %0,%3\n"
- " "SLR" %1,%3\n"
- " "SLR" %2,%3\n"
- " j 0b\n"
- "2:"SLR" %0,%0\n"
- "3: \n"
- EX_TABLE(0b,3b)
- : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
- return size;
-}
-
-unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n)
-{
- if (static_key_true(&have_mvcos))
- return copy_in_user_mvcos(to, from, n);
- return copy_in_user_pt(to, from, n);
-}
-EXPORT_SYMBOL(__copy_in_user);
-
-static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size)
-{
- register unsigned long reg0 asm("0") = 0x810000UL;
- unsigned long tmp1, tmp2;
-
- tmp1 = -4096UL;
- asm volatile(
- "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
- " jz 4f\n"
- "1:"ALR" %0,%2\n"
- " "SLR" %1,%2\n"
- " j 0b\n"
- "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
- " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
- " "SLR" %3,%1\n"
- " "CLR" %0,%3\n" /* copy crosses next page boundary? */
- " jnh 5f\n"
- "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
- " "SLR" %0,%3\n"
- " j 5f\n"
- "4:"SLR" %0,%0\n"
- "5: \n"
- EX_TABLE(0b,2b) EX_TABLE(3b,5b)
- : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
- : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
- return size;
-}
-
-unsigned long __clear_user(void __user *to, unsigned long size)
-{
- if (static_key_true(&have_mvcos))
- return clear_user_mvcos(to, size);
- return clear_user_pt(to, size);
-}
-EXPORT_SYMBOL(__clear_user);
-
-static inline unsigned long strnlen_user_mvcos(const char __user *src,
- unsigned long count)
-{
- unsigned long done, len, offset, len_str;
- char buf[256];
-
- done = 0;
- do {
- offset = (unsigned long)src & ~PAGE_MASK;
- len = min(256UL, PAGE_SIZE - offset);
- len = min(count - done, len);
- if (copy_from_user_mvcos(buf, src, len))
- return 0;
- len_str = strnlen(buf, len);
- done += len_str;
- src += len_str;
- } while ((len_str == len) && (done < count));
- return done + 1;
-}
-
-unsigned long __strnlen_user(const char __user *src, unsigned long count)
-{
- if (static_key_true(&have_mvcos))
- return strnlen_user_mvcos(src, count);
- return strnlen_user_pt(src, count);
-}
-EXPORT_SYMBOL(__strnlen_user);
-
-static inline long strncpy_from_user_mvcos(char *dst, const char __user *src,
- long count)
-{
- unsigned long done, len, offset, len_str;
-
- if (unlikely(count <= 0))
- return 0;
- done = 0;
- do {
- offset = (unsigned long)src & ~PAGE_MASK;
- len = min(count - done, PAGE_SIZE - offset);
- if (copy_from_user_mvcos(dst, src, len))
- return -EFAULT;
- len_str = strnlen(dst, len);
- done += len_str;
- src += len_str;
- dst += len_str;
- } while ((len_str == len) && (done < count));
- return done;
-}
-
-long __strncpy_from_user(char *dst, const char __user *src, long count)
-{
- if (static_key_true(&have_mvcos))
- return strncpy_from_user_mvcos(dst, src, count);
- return strncpy_from_user_pt(dst, src, count);
-}
-EXPORT_SYMBOL(__strncpy_from_user);
-
-/*
- * The uaccess page tabe walk variant can be enforced with the "uaccesspt"
- * kernel parameter. This is mainly for debugging purposes.
- */
-static int force_uaccess_pt __initdata;
-
-static int __init parse_uaccess_pt(char *__unused)
-{
- force_uaccess_pt = 1;
- return 0;
-}
-early_param("uaccesspt", parse_uaccess_pt);
-
-static int __init uaccess_init(void)
-{
- if (IS_ENABLED(CONFIG_32BIT) || force_uaccess_pt || !test_facility(27))
- static_key_slow_dec(&have_mvcos);
- return 0;
-}
-early_initcall(uaccess_init);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
deleted file mode 100644
index 8d39760bae68..000000000000
--- a/arch/s390/lib/uaccess_pt.c
+++ /dev/null
@@ -1,471 +0,0 @@
-/*
- * User access functions based on page table walks for enhanced
- * system layout without hardware support.
- *
- * Copyright IBM Corp. 2006, 2012
- * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
- */
-
-#include <linux/errno.h>
-#include <linux/hardirq.h>
-#include <linux/mm.h>
-#include <linux/hugetlb.h>
-#include <asm/uaccess.h>
-#include <asm/futex.h>
-#include "uaccess.h"
-
-#ifndef CONFIG_64BIT
-#define AHI "ahi"
-#define SLR "slr"
-#else
-#define AHI "aghi"
-#define SLR "slgr"
-#endif
-
-static unsigned long strnlen_kernel(const char __user *src, unsigned long count)
-{
- register unsigned long reg0 asm("0") = 0UL;
- unsigned long tmp1, tmp2;
-
- asm volatile(
- " la %2,0(%1)\n"
- " la %3,0(%0,%1)\n"
- " "SLR" %0,%0\n"
- "0: srst %3,%2\n"
- " jo 0b\n"
- " la %0,1(%3)\n" /* strnlen_kernel results includes \0 */
- " "SLR" %0,%1\n"
- "1:\n"
- EX_TABLE(0b,1b)
- : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
- : "d" (reg0) : "cc", "memory");
- return count;
-}
-
-static unsigned long copy_in_kernel(void __user *to, const void __user *from,
- unsigned long count)
-{
- unsigned long tmp1;
-
- asm volatile(
- " "AHI" %0,-1\n"
- " jo 5f\n"
- " bras %3,3f\n"
- "0:"AHI" %0,257\n"
- "1: mvc 0(1,%1),0(%2)\n"
- " la %1,1(%1)\n"
- " la %2,1(%2)\n"
- " "AHI" %0,-1\n"
- " jnz 1b\n"
- " j 5f\n"
- "2: mvc 0(256,%1),0(%2)\n"
- " la %1,256(%1)\n"
- " la %2,256(%2)\n"
- "3:"AHI" %0,-256\n"
- " jnm 2b\n"
- "4: ex %0,1b-0b(%3)\n"
- "5:"SLR" %0,%0\n"
- "6:\n"
- EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
- : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
- : : "cc", "memory");
- return count;
-}
-
-/*
- * Returns kernel address for user virtual address. If the returned address is
- * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occurred and the
- * address contains the (negative) exception code.
- */
-#ifdef CONFIG_64BIT
-
-static unsigned long follow_table(struct mm_struct *mm,
- unsigned long address, int write)
-{
- unsigned long *table = (unsigned long *)__pa(mm->pgd);
-
- if (unlikely(address > mm->context.asce_limit - 1))
- return -0x38UL;
- switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
- case _ASCE_TYPE_REGION1:
- table = table + ((address >> 53) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INVALID))
- return -0x39UL;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- /* fallthrough */
- case _ASCE_TYPE_REGION2:
- table = table + ((address >> 42) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INVALID))
- return -0x3aUL;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- /* fallthrough */
- case _ASCE_TYPE_REGION3:
- table = table + ((address >> 31) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INVALID))
- return -0x3bUL;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- /* fallthrough */
- case _ASCE_TYPE_SEGMENT:
- table = table + ((address >> 20) & 0x7ff);
- if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
- return -0x10UL;
- if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
- if (write && (*table & _SEGMENT_ENTRY_PROTECT))
- return -0x04UL;
- return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
- (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
- }
- table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
- }
- table = table + ((address >> 12) & 0xff);
- if (unlikely(*table & _PAGE_INVALID))
- return -0x11UL;
- if (write && (*table & _PAGE_PROTECT))
- return -0x04UL;
- return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
-}
-
-#else /* CONFIG_64BIT */
-
-static unsigned long follow_table(struct mm_struct *mm,
- unsigned long address, int write)
-{
- unsigned long *table = (unsigned long *)__pa(mm->pgd);
-
- table = table + ((address >> 20) & 0x7ff);
- if (unlikely(*table & _SEGMENT_ENTRY_INVALID))
- return -0x10UL;
- table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
- table = table + ((address >> 12) & 0xff);
- if (unlikely(*table & _PAGE_INVALID))
- return -0x11UL;
- if (write && (*table & _PAGE_PROTECT))
- return -0x04UL;
- return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
-}
-
-#endif /* CONFIG_64BIT */
-
-static inline unsigned long __user_copy_pt(unsigned long uaddr, void *kptr,
- unsigned long n, int write_user)
-{
- struct mm_struct *mm = current->mm;
- unsigned long offset, done, size, kaddr;
- void *from, *to;
-
- if (!mm)
- return n;
- done = 0;
-retry:
- spin_lock(&mm->page_table_lock);
- do {
- kaddr = follow_table(mm, uaddr, write_user);
- if (IS_ERR_VALUE(kaddr))
- goto fault;
-
- offset = uaddr & ~PAGE_MASK;
- size = min(n - done, PAGE_SIZE - offset);
- if (write_user) {
- to = (void *) kaddr;
- from = kptr + done;
- } else {
- from = (void *) kaddr;
- to = kptr + done;
- }
- memcpy(to, from, size);
- done += size;
- uaddr += size;
- } while (done < n);
- spin_unlock(&mm->page_table_lock);
- return n - done;
-fault:
- spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, -kaddr, write_user))
- return n - done;
- goto retry;
-}
-
-/*
- * Do DAT for user address by page table walk, return kernel address.
- * This function needs to be called with current->mm->page_table_lock held.
- */
-static inline unsigned long __dat_user_addr(unsigned long uaddr, int write)
-{
- struct mm_struct *mm = current->mm;
- unsigned long kaddr;
- int rc;
-
-retry:
- kaddr = follow_table(mm, uaddr, write);
- if (IS_ERR_VALUE(kaddr))
- goto fault;
-
- return kaddr;
-fault:
- spin_unlock(&mm->page_table_lock);
- rc = __handle_fault(uaddr, -kaddr, write);
- spin_lock(&mm->page_table_lock);
- if (!rc)
- goto retry;
- return 0;
-}
-
-unsigned long copy_from_user_pt(void *to, const void __user *from, unsigned long n)
-{
- unsigned long rc;
-
- if (segment_eq(get_fs(), KERNEL_DS))
- return copy_in_kernel((void __user *) to, from, n);
- rc = __user_copy_pt((unsigned long) from, to, n, 0);
- if (unlikely(rc))
- memset(to + n - rc, 0, rc);
- return rc;
-}
-
-unsigned long copy_to_user_pt(void __user *to, const void *from, unsigned long n)
-{
- if (segment_eq(get_fs(), KERNEL_DS))
- return copy_in_kernel(to, (void __user *) from, n);
- return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
-}
-
-unsigned long clear_user_pt(void __user *to, unsigned long n)
-{
- void *zpage = (void *) empty_zero_page;
- unsigned long done, size, ret;
-
- done = 0;
- do {
- if (n - done > PAGE_SIZE)
- size = PAGE_SIZE;
- else
- size = n - done;
- if (segment_eq(get_fs(), KERNEL_DS))
- ret = copy_in_kernel(to, (void __user *) zpage, n);
- else
- ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
- done += size;
- to += size;
- if (ret)
- return ret + n - done;
- } while (done < n);
- return 0;
-}
-
-unsigned long strnlen_user_pt(const char __user *src, unsigned long count)
-{
- unsigned long uaddr = (unsigned long) src;
- struct mm_struct *mm = current->mm;
- unsigned long offset, done, len, kaddr;
- unsigned long len_str;
-
- if (unlikely(!count))
- return 0;
- if (segment_eq(get_fs(), KERNEL_DS))
- return strnlen_kernel(src, count);
- if (!mm)
- return 0;
- done = 0;
-retry:
- spin_lock(&mm->page_table_lock);
- do {
- kaddr = follow_table(mm, uaddr, 0);
- if (IS_ERR_VALUE(kaddr))
- goto fault;
-
- offset = uaddr & ~PAGE_MASK;
- len = min(count - done, PAGE_SIZE - offset);
- len_str = strnlen((char *) kaddr, len);
- done += len_str;
- uaddr += len_str;
- } while ((len_str == len) && (done < count));
- spin_unlock(&mm->page_table_lock);
- return done + 1;
-fault:
- spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, -kaddr, 0))
- return 0;
- goto retry;
-}
-
-long strncpy_from_user_pt(char *dst, const char __user *src, long count)
-{
- unsigned long done, len, offset, len_str;
-
- if (unlikely(count <= 0))
- return 0;
- done = 0;
- do {
- offset = (unsigned long)src & ~PAGE_MASK;
- len = min(count - done, PAGE_SIZE - offset);
- if (segment_eq(get_fs(), KERNEL_DS)) {
- if (copy_in_kernel((void __user *) dst, src, len))
- return -EFAULT;
- } else {
- if (__user_copy_pt((unsigned long) src, dst, len, 0))
- return -EFAULT;
- }
- len_str = strnlen(dst, len);
- done += len_str;
- src += len_str;
- dst += len_str;
- } while ((len_str == len) && (done < count));
- return done;
-}
-
-unsigned long copy_in_user_pt(void __user *to, const void __user *from,
- unsigned long n)
-{
- struct mm_struct *mm = current->mm;
- unsigned long offset_max, uaddr, done, size, error_code;
- unsigned long uaddr_from = (unsigned long) from;
- unsigned long uaddr_to = (unsigned long) to;
- unsigned long kaddr_to, kaddr_from;
- int write_user;
-
- if (segment_eq(get_fs(), KERNEL_DS))
- return copy_in_kernel(to, from, n);
- if (!mm)
- return n;
- done = 0;
-retry:
- spin_lock(&mm->page_table_lock);
- do {
- write_user = 0;
- uaddr = uaddr_from;
- kaddr_from = follow_table(mm, uaddr_from, 0);
- error_code = kaddr_from;
- if (IS_ERR_VALUE(error_code))
- goto fault;
-
- write_user = 1;
- uaddr = uaddr_to;
- kaddr_to = follow_table(mm, uaddr_to, 1);
- error_code = (unsigned long) kaddr_to;
- if (IS_ERR_VALUE(error_code))
- goto fault;
-
- offset_max = max(uaddr_from & ~PAGE_MASK,
- uaddr_to & ~PAGE_MASK);
- size = min(n - done, PAGE_SIZE - offset_max);
-
- memcpy((void *) kaddr_to, (void *) kaddr_from, size);
- done += size;
- uaddr_from += size;
- uaddr_to += size;
- } while (done < n);
- spin_unlock(&mm->page_table_lock);
- return n - done;
-fault:
- spin_unlock(&mm->page_table_lock);
- if (__handle_fault(uaddr, -error_code, write_user))
- return n - done;
- goto retry;
-}
-
-#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
- asm volatile("0: l %1,0(%6)\n" \
- "1: " insn \
- "2: cs %1,%2,0(%6)\n" \
- "3: jl 1b\n" \
- " lhi %0,0\n" \
- "4:\n" \
- EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
- : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
- "=m" (*uaddr) \
- : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
- "m" (*uaddr) : "cc" );
-
-static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
-{
- int oldval = 0, newval, ret;
-
- switch (op) {
- case FUTEX_OP_SET:
- __futex_atomic_op("lr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_ADD:
- __futex_atomic_op("lr %2,%1\nar %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_OR:
- __futex_atomic_op("lr %2,%1\nor %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_ANDN:
- __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- case FUTEX_OP_XOR:
- __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
- ret, oldval, newval, uaddr, oparg);
- break;
- default:
- ret = -ENOSYS;
- }
- if (ret == 0)
- *old = oldval;
- return ret;
-}
-
-int __futex_atomic_op_inuser(int op, u32 __user *uaddr, int oparg, int *old)
-{
- int ret;
-
- if (segment_eq(get_fs(), KERNEL_DS))
- return __futex_atomic_op_pt(op, uaddr, oparg, old);
- if (unlikely(!current->mm))
- return -EFAULT;
- spin_lock(&current->mm->page_table_lock);
- uaddr = (u32 __force __user *)
- __dat_user_addr((__force unsigned long) uaddr, 1);
- if (!uaddr) {
- spin_unlock(&current->mm->page_table_lock);
- return -EFAULT;
- }
- get_page(virt_to_page(uaddr));
- spin_unlock(&current->mm->page_table_lock);
- ret = __futex_atomic_op_pt(op, uaddr, oparg, old);
- put_page(virt_to_page(uaddr));
- return ret;
-}
-
-static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
-{
- int ret;
-
- asm volatile("0: cs %1,%4,0(%5)\n"
- "1: la %0,0\n"
- "2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
- : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
- : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
- : "cc", "memory" );
- *uval = oldval;
- return ret;
-}
-
-int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
- u32 oldval, u32 newval)
-{
- int ret;
-
- if (segment_eq(get_fs(), KERNEL_DS))
- return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
- if (unlikely(!current->mm))
- return -EFAULT;
- spin_lock(&current->mm->page_table_lock);
- uaddr = (u32 __force __user *)
- __dat_user_addr((__force unsigned long) uaddr, 1);
- if (!uaddr) {
- spin_unlock(&current->mm->page_table_lock);
- return -EFAULT;
- }
- get_page(virt_to_page(uaddr));
- spin_unlock(&current->mm->page_table_lock);
- ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
- put_page(virt_to_page(uaddr));
- return ret;
-}
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 88cef505453b..19f623f1f21c 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -106,21 +106,24 @@ void bust_spinlocks(int yes)
* Returns the address space associated with the fault.
* Returns 0 for kernel space and 1 for user space.
*/
-static inline int user_space_fault(unsigned long trans_exc_code)
+static inline int user_space_fault(struct pt_regs *regs)
{
+ unsigned long trans_exc_code;
+
/*
* The lowest two bits of the translation exception
* identification indicate which paging table was used.
*/
- trans_exc_code &= 3;
- if (trans_exc_code == 2)
- /* Access via secondary space, set_fs setting decides */
+ trans_exc_code = regs->int_parm_long & 3;
+ if (trans_exc_code == 3) /* home space -> kernel */
+ return 0;
+ if (user_mode(regs))
+ return 1;
+ if (trans_exc_code == 2) /* secondary space -> set_fs */
return current->thread.mm_segment.ar4;
- /*
- * Access via primary space or access register is from user space
- * and access via home space is from the kernel.
- */
- return trans_exc_code != 3;
+ if (current->flags & PF_VCPU)
+ return 1;
+ return 0;
}
static inline void report_user_fault(struct pt_regs *regs, long signr)
@@ -172,7 +175,7 @@ static noinline void do_no_context(struct pt_regs *regs)
* terminate things with extreme prejudice.
*/
address = regs->int_parm_long & __FAIL_ADDR_MASK;
- if (!user_space_fault(regs->int_parm_long))
+ if (!user_space_fault(regs))
printk(KERN_ALERT "Unable to handle kernel pointer dereference"
" at virtual kernel address %p\n", (void *)address);
else
@@ -296,7 +299,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
* user context.
*/
fault = VM_FAULT_BADCONTEXT;
- if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm))
+ if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
@@ -441,30 +444,6 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
do_fault_error(regs, fault);
}
-int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
-{
- struct pt_regs regs;
- int access, fault;
-
- /* Emulate a uaccess fault from kernel mode. */
- regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT | PSW_MASK_MCHECK;
- if (!irqs_disabled())
- regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
- regs.psw.addr = (unsigned long) __builtin_return_address(0);
- regs.psw.addr |= PSW_ADDR_AMODE;
- regs.int_code = pgm_int_code;
- regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
- access = write ? VM_WRITE : VM_READ;
- fault = do_exception(&regs, access);
- /*
- * Since the fault happened in kernel mode while performing a uaccess
- * all we need to do now is emulating a fixup in case "fault" is not
- * zero.
- * For the calling uaccess functions this results always in -EFAULT.
- */
- return fault ? -EFAULT : 0;
-}
-
#ifdef CONFIG_PFAULT
/*
* 'pfault' pseudo page faults routines.
@@ -645,7 +624,7 @@ static int __init pfault_irq_init(void)
{
int rc;
- rc = register_external_interrupt(0x2603, pfault_interrupt);
+ rc = register_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
if (rc)
goto out_extint;
rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP;
@@ -656,7 +635,7 @@ static int __init pfault_irq_init(void)
return 0;
out_pfault:
- unregister_external_interrupt(0x2603, pfault_interrupt);
+ unregister_external_irq(EXT_IRQ_CP_SERVICE, pfault_interrupt);
out_extint:
pfault_disable = 1;
return rc;
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index d261c62e40a6..0727a55d87d9 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -123,10 +123,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
pmd_t *pmdp = (pmd_t *) ptep;
pte_t pte = huge_ptep_get(ptep);
- if (MACHINE_HAS_IDTE)
- __pmd_idte(addr, pmdp);
- else
- __pmd_csp(pmdp);
+ pmdp_flush_direct(mm, addr, pmdp);
pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
return pte;
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index ad446b0c55b6..0c1073ed1e84 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -124,8 +124,6 @@ void __init paging_init(void)
__ctl_load(S390_lowcore.kernel_asce, 13, 13);
arch_local_irq_restore(4UL << (BITS_PER_LONG - 8));
- atomic_set(&init_mm.context.attach_count, 1);
-
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
@@ -136,6 +134,11 @@ void __init paging_init(void)
void __init mem_init(void)
{
+ if (MACHINE_HAS_TLB_LC)
+ cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
+ cpumask_set_cpu(0, mm_cpumask(&init_mm));
+ atomic_set(&init_mm.context.attach_count, 1);
+
max_mapnr = max_low_pfn;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 5d8324cd866b..d7cfd57815fb 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -54,7 +54,7 @@ static void __crst_table_upgrade(void *arg)
struct mm_struct *mm = arg;
if (current->active_mm == mm)
- update_mm(mm, current);
+ update_user_asce(mm, 1);
__tlb_flush_local();
}
@@ -107,8 +107,10 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
{
pgd_t *pgd;
- if (current->active_mm == mm)
+ if (current->active_mm == mm) {
+ clear_user_asce(mm, 1);
__tlb_flush_mm(mm);
+ }
while (mm->context.asce_limit > limit) {
pgd = mm->pgd;
switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
@@ -132,7 +134,7 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
crst_table_free(mm, (unsigned long *) pgd);
}
if (current->active_mm == mm)
- update_mm(mm, current);
+ update_user_asce(mm, 1);
}
#endif
@@ -198,7 +200,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
static void gmap_flush_tlb(struct gmap *gmap)
{
if (MACHINE_HAS_IDTE)
- __tlb_flush_idte((unsigned long) gmap->table |
+ __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
_ASCE_TYPE_REGION1);
else
__tlb_flush_global();
@@ -217,7 +219,7 @@ void gmap_free(struct gmap *gmap)
/* Flush tlb. */
if (MACHINE_HAS_IDTE)
- __tlb_flush_idte((unsigned long) gmap->table |
+ __tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
_ASCE_TYPE_REGION1);
else
__tlb_flush_global();
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index bcfb70b60be6..72b04de18283 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -138,7 +138,6 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
}
ret = 0;
out:
- flush_tlb_kernel_range(start, end);
return ret;
}
@@ -265,7 +264,6 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
memset((void *)start, 0, end - start);
ret = 0;
out:
- flush_tlb_kernel_range(start, end);
return ret;
}
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index a32c96761eab..276f2e26c761 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -1033,7 +1033,7 @@ int hwsampler_setup(void)
max_sampler_rate = cb->qsi.max_sampl_rate;
}
}
- register_external_interrupt(0x1407, hws_ext_handler);
+ register_external_irq(EXT_IRQ_MEASURE_ALERT, hws_ext_handler);
hws_state = HWS_DEALLOCATED;
rc = 0;
@@ -1068,7 +1068,7 @@ int hwsampler_shutdown(void)
hws_wq = NULL;
}
- unregister_external_interrupt(0x1407, hws_ext_handler);
+ unregister_external_irq(EXT_IRQ_MEASURE_ALERT, hws_ext_handler);
hws_state = HWS_INIT;
rc = 0;
}
OpenPOWER on IntegriCloud