summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/lib')
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/VISsave.S8
-rw-r--r--arch/sparc64/lib/dec_and_lock.S80
-rw-r--r--arch/sparc64/lib/strncpy_from_user.S16
-rw-r--r--arch/sparc64/lib/user_fixup.c63
5 files changed, 40 insertions, 129 deletions
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index d968aebe83b2..c295806500f7 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -14,6 +14,4 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
copy_in_user.o user_fixup.o memmove.o \
mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
-lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
-
obj-y += iomap.o
diff --git a/arch/sparc64/lib/VISsave.S b/arch/sparc64/lib/VISsave.S
index 4e18989bd602..a0ded5c5aa5c 100644
--- a/arch/sparc64/lib/VISsave.S
+++ b/arch/sparc64/lib/VISsave.S
@@ -59,15 +59,17 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
be,pn %icc, 9b
add %g6, TI_FPREGS, %g2
andcc %o5, FPRS_DL, %g0
- membar #StoreStore | #LoadStore
be,pn %icc, 4f
add %g6, TI_FPREGS+0x40, %g3
+ membar #Sync
stda %f0, [%g2 + %g1] ASI_BLK_P
stda %f16, [%g3 + %g1] ASI_BLK_P
+ membar #Sync
andcc %o5, FPRS_DU, %g0
be,pn %icc, 5f
4: add %g1, 128, %g1
+ membar #Sync
stda %f32, [%g2 + %g1] ASI_BLK_P
stda %f48, [%g3 + %g1] ASI_BLK_P
@@ -87,7 +89,7 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
sll %g1, 5, %g1
add %g6, TI_FPREGS+0xc0, %g3
wr %g0, FPRS_FEF, %fprs
- membar #StoreStore | #LoadStore
+ membar #Sync
stda %f32, [%g2 + %g1] ASI_BLK_P
stda %f48, [%g3 + %g1] ASI_BLK_P
membar #Sync
@@ -128,8 +130,8 @@ VISenterhalf:
be,pn %icc, 4f
add %g6, TI_FPREGS, %g2
- membar #StoreStore | #LoadStore
add %g6, TI_FPREGS+0x40, %g3
+ membar #Sync
stda %f0, [%g2 + %g1] ASI_BLK_P
stda %f16, [%g3 + %g1] ASI_BLK_P
membar #Sync
diff --git a/arch/sparc64/lib/dec_and_lock.S b/arch/sparc64/lib/dec_and_lock.S
deleted file mode 100644
index 8ee288dd0afc..000000000000
--- a/arch/sparc64/lib/dec_and_lock.S
+++ /dev/null
@@ -1,80 +0,0 @@
-/* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $
- * dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()"
- * using cas and ldstub instructions.
- *
- * Copyright (C) 2000 David S. Miller (davem@redhat.com)
- */
-#include <linux/config.h>
-#include <asm/thread_info.h>
-
- .text
- .align 64
-
- /* CAS basically works like this:
- *
- * void CAS(MEM, REG1, REG2)
- * {
- * START_ATOMIC();
- * if (*(MEM) == REG1) {
- * TMP = *(MEM);
- * *(MEM) = REG2;
- * REG2 = TMP;
- * } else
- * REG2 = *(MEM);
- * END_ATOMIC();
- * }
- */
-
- .globl _atomic_dec_and_lock
-_atomic_dec_and_lock: /* %o0 = counter, %o1 = lock */
-loop1: lduw [%o0], %g2
- subcc %g2, 1, %g7
- be,pn %icc, start_to_zero
- nop
-nzero: cas [%o0], %g2, %g7
- cmp %g2, %g7
- bne,pn %icc, loop1
- mov 0, %g1
-
-out:
- membar #StoreLoad | #StoreStore
- retl
- mov %g1, %o0
-start_to_zero:
-#ifdef CONFIG_PREEMPT
- ldsw [%g6 + TI_PRE_COUNT], %g3
- add %g3, 1, %g3
- stw %g3, [%g6 + TI_PRE_COUNT]
-#endif
-to_zero:
- ldstub [%o1], %g3
- membar #StoreLoad | #StoreStore
- brnz,pn %g3, spin_on_lock
- nop
-loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */
- cmp %g2, %g7
-
- be,pt %icc, out
- mov 1, %g1
- lduw [%o0], %g2
- subcc %g2, 1, %g7
- be,pn %icc, loop2
- nop
- membar #StoreStore | #LoadStore
- stb %g0, [%o1]
-#ifdef CONFIG_PREEMPT
- ldsw [%g6 + TI_PRE_COUNT], %g3
- sub %g3, 1, %g3
- stw %g3, [%g6 + TI_PRE_COUNT]
-#endif
-
- b,pt %xcc, nzero
- nop
-spin_on_lock:
- ldub [%o1], %g3
- membar #LoadLoad
- brnz,pt %g3, spin_on_lock
- nop
- ba,pt %xcc, to_zero
- nop
- nop
diff --git a/arch/sparc64/lib/strncpy_from_user.S b/arch/sparc64/lib/strncpy_from_user.S
index 09cbbaa0ebf4..e1264650ca7a 100644
--- a/arch/sparc64/lib/strncpy_from_user.S
+++ b/arch/sparc64/lib/strncpy_from_user.S
@@ -125,15 +125,11 @@ __strncpy_from_user:
add %o2, %o3, %o0
.size __strncpy_from_user, .-__strncpy_from_user
- .section .fixup,#alloc,#execinstr
- .align 4
-4: retl
- mov -EFAULT, %o0
-
.section __ex_table,#alloc
.align 4
- .word 60b, 4b
- .word 61b, 4b
- .word 62b, 4b
- .word 63b, 4b
- .word 64b, 4b
+ .word 60b, __retl_efault
+ .word 61b, __retl_efault
+ .word 62b, __retl_efault
+ .word 63b, __retl_efault
+ .word 64b, __retl_efault
+ .previous
diff --git a/arch/sparc64/lib/user_fixup.c b/arch/sparc64/lib/user_fixup.c
index 0278e34125db..19d1fdb17d0e 100644
--- a/arch/sparc64/lib/user_fixup.c
+++ b/arch/sparc64/lib/user_fixup.c
@@ -11,61 +11,56 @@
/* Calculating the exact fault address when using
* block loads and stores can be very complicated.
+ *
* Instead of trying to be clever and handling all
* of the cases, just fix things up simply here.
*/
-unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
+static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
{
- char *dst = to;
- const char __user *src = from;
+ unsigned long fault_addr = current_thread_info()->fault_address;
+ unsigned long end = start + size;
- while (size) {
- if (__get_user(*dst, src))
- break;
- dst++;
- src++;
- size--;
+ if (fault_addr < start || fault_addr >= end) {
+ *offset = 0;
+ } else {
+ *offset = start - fault_addr;
+ size = end - fault_addr;
}
+ return size;
+}
- if (size)
- memset(dst, 0, size);
+unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
+{
+ unsigned long offset;
+
+ size = compute_size((unsigned long) from, size, &offset);
+ if (likely(size))
+ memset(to + offset, 0, size);
return size;
}
unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
{
- char __user *dst = to;
- const char *src = from;
-
- while (size) {
- if (__put_user(*src, dst))
- break;
- dst++;
- src++;
- size--;
- }
+ unsigned long offset;
- return size;
+ return compute_size((unsigned long) to, size, &offset);
}
unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
{
- char __user *dst = to;
- char __user *src = from;
+ unsigned long fault_addr = current_thread_info()->fault_address;
+ unsigned long start = (unsigned long) to;
+ unsigned long end = start + size;
- while (size) {
- char tmp;
+ if (fault_addr >= start && fault_addr < end)
+ return end - fault_addr;
- if (__get_user(tmp, src))
- break;
- if (__put_user(tmp, dst))
- break;
- dst++;
- src++;
- size--;
- }
+ start = (unsigned long) from;
+ end = start + size;
+ if (fault_addr >= start && fault_addr < end)
+ return end - fault_addr;
return size;
}
OpenPOWER on IntegriCloud