From 2ec420b26f7b6ff332393f0bb5a7d245f7ad87f0 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 31 May 2017 16:19:47 +0100 Subject: MIPS: Fix mips_atomic_set() retry condition The inline asm retry check in the MIPS_ATOMIC_SET operation of the sysmips system call has been backwards since commit f1e39a4a616c ("MIPS: Rewrite sysmips(MIPS_ATOMIC_SET, ...) in C with inline assembler") merged in v2.6.32, resulting in the non R10000_LLSC_WAR case retrying until the operation was inatomic, before returning the new value that was probably just written multiple times instead of the old value. Invert the branch condition to fix that particular issue. Fixes: f1e39a4a616c ("MIPS: Rewrite sysmips(MIPS_ATOMIC_SET, ...) in C with inline assembler") Signed-off-by: James Hogan Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16148/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/syscall.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/mips/kernel/syscall.c') diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 1dfa7f5796c7..95e1b300ac0d 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -134,7 +134,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) "1: ll %[old], (%[addr]) \n" " move %[tmp], %[new] \n" "2: sc %[tmp], (%[addr]) \n" - " bnez %[tmp], 4f \n" + " beqz %[tmp], 4f \n" "3: \n" " .insn \n" " .subsection 2 \n" -- cgit v1.2.1 From 49955d84cd9ccdca5a16a495e448e1a06fad9e49 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 31 May 2017 16:19:48 +0100 Subject: MIPS: Save static registers before sysmips The MIPS sysmips system call handler may return directly from the MIPS_ATOMIC_SET case (mips_atomic_set()) to syscall_exit. This path restores the static (callee saved) registers, however they won't have been saved on entry to the system call. Use the save_static_function() macro to create a __sys_sysmips wrapper function which saves the static registers before calling sys_sysmips, so that the correct static register state is restored by syscall_exit. Fixes: f1e39a4a616c ("MIPS: Rewrite sysmips(MIPS_ATOMIC_SET, ...) in C with inline assembler") Signed-off-by: James Hogan Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/16149/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/syscall.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/mips/kernel/syscall.c') diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 95e1b300ac0d..3e5d82e744ac 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -192,6 +192,12 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) unreachable(); } +/* + * mips_atomic_set() normally returns directly via syscall_exit potentially + * clobbering static registers, so be sure to preserve them. + */ +save_static_function(sys_sysmips); + SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2) { switch (cmd) { -- cgit v1.2.1 From 4915e1b043d6286928207b1f6968197b50407294 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 31 May 2017 16:19:49 +0100 Subject: MIPS: Fix mips_atomic_set() with EVA EVA linked loads (LLE) and conditional stores (SCE) should be used on EVA kernels for the MIPS_ATOMIC_SET operation of the sysmips system call, or else the atomic set will apply to the kernel view of the virtual address space (potentially unmapped on EVA kernels) rather than the user view (TLB mapped). Signed-off-by: James Hogan Cc: linux-mips@linux-mips.org Cc: # 3.15.x- Patchwork: https://patchwork.linux-mips.org/patch/16151/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/syscall.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/mips/kernel/syscall.c') diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 3e5d82e744ac..863f958c126c 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -29,6 +29,7 @@ #include #include +#include #include #include #include @@ -131,9 +132,11 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) __asm__ __volatile__ ( " .set "MIPS_ISA_ARCH_LEVEL" \n" " li %[err], 0 \n" - "1: ll %[old], (%[addr]) \n" + "1: \n" + user_ll("%[old]", "(%[addr])") " move %[tmp], %[new] \n" - "2: sc %[tmp], (%[addr]) \n" + "2: \n" + user_sc("%[tmp]", "(%[addr])") " beqz %[tmp], 4f \n" "3: \n" " .insn \n" -- cgit v1.2.1 From 203e090ade7357101e40a3d3aad3af77653da8df Mon Sep 17 00:00:00 2001 From: James Hogan Date: Wed, 31 May 2017 16:19:50 +0100 Subject: MIPS: Branch straight to ll in mips_atomic_set() Adjust the atomic loop in the MIPS_ATOMIC_SET operation of the sysmips system call to branch straight back to the linked load rather than jumping via a different subsection (whose purpose remains a mystery to me). Signed-off-by: James Hogan Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/16150/ Signed-off-by: Ralf Baechle --- arch/mips/kernel/syscall.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'arch/mips/kernel/syscall.c') diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 863f958c126c..58c6f634b550 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -137,13 +137,9 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) " move %[tmp], %[new] \n" "2: \n" user_sc("%[tmp]", "(%[addr])") - " beqz %[tmp], 4f \n" + " beqz %[tmp], 1b \n" "3: \n" " .insn \n" - " .subsection 2 \n" - "4: b 1b \n" - " .previous \n" - " \n" " .section .fixup,\"ax\" \n" "5: li %[err], %[efault] \n" " j 3b \n" -- cgit v1.2.1