diff options
Diffstat (limited to 'arch/sparc64')
-rw-r--r-- | arch/sparc64/Kconfig.debug | 8 | ||||
-rw-r--r-- | arch/sparc64/kernel/entry.S | 39 | ||||
-rw-r--r-- | arch/sparc64/kernel/ptrace.c | 7 | ||||
-rw-r--r-- | arch/sparc64/kernel/sparc64_ksyms.c | 3 | ||||
-rw-r--r-- | arch/sparc64/kernel/una_asm.S | 2 | ||||
-rw-r--r-- | arch/sparc64/kernel/unaligned.c | 64 | ||||
-rw-r--r-- | arch/sparc64/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/sparc64/lib/dec_and_lock.S | 80 |
8 files changed, 81 insertions, 124 deletions
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug index cd8d39fb954d..af0e9411b83e 100644 --- a/arch/sparc64/Kconfig.debug +++ b/arch/sparc64/Kconfig.debug @@ -33,14 +33,6 @@ config DEBUG_BOOTMEM depends on DEBUG_KERNEL bool "Debug BOOTMEM initialization" -# We have a custom atomic_dec_and_lock() implementation but it's not -# compatible with spinlock debugging so we need to fall back on -# the generic version in that case. -config HAVE_DEC_LOCK - bool - depends on SMP && !DEBUG_SPINLOCK - default y - config MCOUNT bool depends on STACK_DEBUG diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S index 3e0badb820c5..b48349527853 100644 --- a/arch/sparc64/kernel/entry.S +++ b/arch/sparc64/kernel/entry.S @@ -42,19 +42,15 @@ * executing (see inherit_locked_prom_mappings() rant). */ sparc64_vpte_nucleus: - /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */ - mov 0xf, %g5 - sllx %g5, 28, %g5 - - /* Is addr >= LOW_OBP_ADDRESS? */ + /* Note that kvmap below has verified that the address is + * in the range MODULES_VADDR --> VMALLOC_END already. So + * here we need only check if it is an OBP address or not. + */ + sethi %hi(LOW_OBP_ADDRESS), %g5 cmp %g4, %g5 blu,pn %xcc, sparc64_vpte_patchme1 mov 0x1, %g5 - - /* Load 0x100000000, which is HI_OBP_ADDRESS. */ sllx %g5, 32, %g5 - - /* Is addr < HI_OBP_ADDRESS? */ cmp %g4, %g5 blu,pn %xcc, obp_iaddr_patch nop @@ -156,26 +152,29 @@ obp_daddr_patch: * rather, use information saved during inherit_prom_mappings() using 8k * pagesize. */ + .align 32 kvmap: - /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */ - mov 0xf, %g5 - sllx %g5, 28, %g5 + sethi %hi(MODULES_VADDR), %g5 + cmp %g4, %g5 + blu,pn %xcc, longpath + mov (VMALLOC_END >> 24), %g5 + sllx %g5, 24, %g5 + cmp %g4, %g5 + bgeu,pn %xcc, longpath + nop - /* Is addr >= LOW_OBP_ADDRESS? */ +kvmap_check_obp: + sethi %hi(LOW_OBP_ADDRESS), %g5 cmp %g4, %g5 - blu,pn %xcc, vmalloc_addr + blu,pn %xcc, kvmap_vmalloc_addr mov 0x1, %g5 - - /* Load 0x100000000, which is HI_OBP_ADDRESS. */ sllx %g5, 32, %g5 - - /* Is addr < HI_OBP_ADDRESS? */ cmp %g4, %g5 blu,pn %xcc, obp_daddr_patch nop -vmalloc_addr: - /* If we get here, a vmalloc addr accessed, load kernel VPTE. */ +kvmap_vmalloc_addr: + /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */ ldxa [%g3 + %g6] ASI_N, %g5 brgez,pn %g5, longpath nop diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c index 23ad839d113f..5efbff90d668 100644 --- a/arch/sparc64/kernel/ptrace.c +++ b/arch/sparc64/kernel/ptrace.c @@ -30,6 +30,7 @@ #include <asm/psrcompat.h> #include <asm/visasm.h> #include <asm/spitfire.h> +#include <asm/page.h> /* Returning from ptrace is a bit tricky because the syscall return * low level code assumes any value returned which is negative and @@ -128,20 +129,20 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, * is mapped to in the user's address space, we can skip the * D-cache flush. */ - if ((uaddr ^ kaddr) & (1UL << 13)) { + if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) { unsigned long start = __pa(kaddr); unsigned long end = start + len; if (tlb_type == spitfire) { for (; start < end; start += 32) - spitfire_put_dcache_tag(va & 0x3fe0, 0x0); + spitfire_put_dcache_tag(start & 0x3fe0, 0x0); } else { for (; start < end; start += 32) __asm__ __volatile__( "stxa %%g0, [%0] %1\n\t" "membar #Sync" : /* no outputs */ - : "r" (va), + : "r" (start), "i" (ASI_DCACHE_INVALIDATE)); } } diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index cbb5e59824e5..fb7a5370dbfc 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c @@ -163,9 +163,6 @@ EXPORT_SYMBOL(atomic64_add); EXPORT_SYMBOL(atomic64_add_ret); EXPORT_SYMBOL(atomic64_sub); EXPORT_SYMBOL(atomic64_sub_ret); -#ifdef CONFIG_SMP -EXPORT_SYMBOL(_atomic_dec_and_lock); -#endif /* Atomic bit operations. */ EXPORT_SYMBOL(test_and_set_bit); diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S index cbb40585253c..da48400bcc95 100644 --- a/arch/sparc64/kernel/una_asm.S +++ b/arch/sparc64/kernel/una_asm.S @@ -17,7 +17,7 @@ kernel_unaligned_trap_fault: __do_int_store: rd %asi, %o4 wr %o3, 0, %asi - ldx [%o2], %g3 + mov %o2, %g3 cmp %o1, 2 be,pn %icc, 2f cmp %o1, 4 diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c index da9739f0d437..42718f6a7d36 100644 --- a/arch/sparc64/kernel/unaligned.c +++ b/arch/sparc64/kernel/unaligned.c @@ -184,13 +184,14 @@ extern void do_int_load(unsigned long *dest_reg, int size, unsigned long *saddr, int is_signed, int asi); extern void __do_int_store(unsigned long *dst_addr, int size, - unsigned long *src_val, int asi); + unsigned long src_val, int asi); static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, - struct pt_regs *regs, int asi) + struct pt_regs *regs, int asi, int orig_asi) { unsigned long zero = 0; - unsigned long *src_val = &zero; + unsigned long *src_val_p = &zero; + unsigned long src_val; if (size == 16) { size = 8; @@ -198,7 +199,25 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr, (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) | (unsigned)fetch_reg(reg_num + 1, regs); } else if (reg_num) { - src_val = fetch_reg_addr(reg_num, regs); + src_val_p = fetch_reg_addr(reg_num, regs); + } + src_val = *src_val_p; + if (unlikely(asi != orig_asi)) { + switch (size) { + case 2: + src_val = swab16(src_val); + break; + case 4: + src_val = swab32(src_val); + break; + case 8: + src_val = swab64(src_val); + break; + case 16: + default: + BUG(); + break; + }; } __do_int_store(dst_addr, size, src_val, asi); } @@ -276,6 +295,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u kernel_mna_trap_fault(); } else { unsigned long addr; + int orig_asi, asi; addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); @@ -285,18 +305,48 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u regs->tpc, dirstrings[dir], addr, size, regs->u_regs[UREG_RETPC]); #endif + orig_asi = asi = decode_asi(insn, regs); + switch (asi) { + case ASI_NL: + case ASI_AIUPL: + case ASI_AIUSL: + case ASI_PL: + case ASI_SL: + case ASI_PNFL: + case ASI_SNFL: + asi &= ~0x08; + break; + }; switch (dir) { case load: do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs), size, (unsigned long *) addr, - decode_signedness(insn), - decode_asi(insn, regs)); + decode_signedness(insn), asi); + if (unlikely(asi != orig_asi)) { + unsigned long val_in = *(unsigned long *) addr; + switch (size) { + case 2: + val_in = swab16(val_in); + break; + case 4: + val_in = swab32(val_in); + break; + case 8: + val_in = swab64(val_in); + break; + case 16: + default: + BUG(); + break; + }; + *(unsigned long *) addr = val_in; + } break; case store: do_int_store(((insn>>25)&0x1f), size, (unsigned long *) addr, regs, - decode_asi(insn, regs)); + asi, orig_asi); break; default: diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index d968aebe83b2..c295806500f7 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile @@ -14,6 +14,4 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ copy_in_user.o user_fixup.o memmove.o \ mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o -lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o - obj-y += iomap.o diff --git a/arch/sparc64/lib/dec_and_lock.S b/arch/sparc64/lib/dec_and_lock.S deleted file mode 100644 index 8ee288dd0afc..000000000000 --- a/arch/sparc64/lib/dec_and_lock.S +++ /dev/null @@ -1,80 +0,0 @@ -/* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $ - * dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()" - * using cas and ldstub instructions. - * - * Copyright (C) 2000 David S. Miller (davem@redhat.com) - */ -#include <linux/config.h> -#include <asm/thread_info.h> - - .text - .align 64 - - /* CAS basically works like this: - * - * void CAS(MEM, REG1, REG2) - * { - * START_ATOMIC(); - * if (*(MEM) == REG1) { - * TMP = *(MEM); - * *(MEM) = REG2; - * REG2 = TMP; - * } else - * REG2 = *(MEM); - * END_ATOMIC(); - * } - */ - - .globl _atomic_dec_and_lock -_atomic_dec_and_lock: /* %o0 = counter, %o1 = lock */ -loop1: lduw [%o0], %g2 - subcc %g2, 1, %g7 - be,pn %icc, start_to_zero - nop -nzero: cas [%o0], %g2, %g7 - cmp %g2, %g7 - bne,pn %icc, loop1 - mov 0, %g1 - -out: - membar #StoreLoad | #StoreStore - retl - mov %g1, %o0 -start_to_zero: -#ifdef CONFIG_PREEMPT - ldsw [%g6 + TI_PRE_COUNT], %g3 - add %g3, 1, %g3 - stw %g3, [%g6 + TI_PRE_COUNT] -#endif -to_zero: - ldstub [%o1], %g3 - membar #StoreLoad | #StoreStore - brnz,pn %g3, spin_on_lock - nop -loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */ - cmp %g2, %g7 - - be,pt %icc, out - mov 1, %g1 - lduw [%o0], %g2 - subcc %g2, 1, %g7 - be,pn %icc, loop2 - nop - membar #StoreStore | #LoadStore - stb %g0, [%o1] -#ifdef CONFIG_PREEMPT - ldsw [%g6 + TI_PRE_COUNT], %g3 - sub %g3, 1, %g3 - stw %g3, [%g6 + TI_PRE_COUNT] -#endif - - b,pt %xcc, nzero - nop -spin_on_lock: - ldub [%o1], %g3 - membar #LoadLoad - brnz,pt %g3, spin_on_lock - nop - ba,pt %xcc, to_zero - nop - nop |