summaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-10-20 10:41:44 -0700
committerTony Luck <tony.luck@intel.com>2005-10-20 10:41:44 -0700
commit9cec58dc138d6fcad9f447a19c8ff69f6540e667 (patch)
tree4fe1cca94fdba8b705c87615bee06d3346f687ce /arch/sparc64
parent17e5ad6c0ce5a970e2830d0de8bdd60a2f077d38 (diff)
parentac9b9c667c2e1194e22ebe0a441ae1c37aaa9b90 (diff)
downloadblackbird-op-linux-9cec58dc138d6fcad9f447a19c8ff69f6540e667.tar.gz
blackbird-op-linux-9cec58dc138d6fcad9f447a19c8ff69f6540e667.zip
Update from upstream with manual merge of Yasunori Goto's
changes to swiotlb.c made in commit 281dd25cdc0d6903929b79183816d151ea626341 since this file has been moved from arch/ia64/lib/swiotlb.c to lib/swiotlb.c Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/Kconfig.debug14
-rw-r--r--arch/sparc64/kernel/cpu.c4
-rw-r--r--arch/sparc64/kernel/devices.c22
-rw-r--r--arch/sparc64/kernel/dtlb_backend.S13
-rw-r--r--arch/sparc64/kernel/dtlb_base.S18
-rw-r--r--arch/sparc64/kernel/dtlb_prot.S12
-rw-r--r--arch/sparc64/kernel/entry.S263
-rw-r--r--arch/sparc64/kernel/etrap.S51
-rw-r--r--arch/sparc64/kernel/head.S748
-rw-r--r--arch/sparc64/kernel/irq.c1
-rw-r--r--arch/sparc64/kernel/itlb_base.S26
-rw-r--r--arch/sparc64/kernel/ktlb.S194
-rw-r--r--arch/sparc64/kernel/pci_iommu.c363
-rw-r--r--arch/sparc64/kernel/pci_psycho.c44
-rw-r--r--arch/sparc64/kernel/pci_sabre.c39
-rw-r--r--arch/sparc64/kernel/pci_schizo.c59
-rw-r--r--arch/sparc64/kernel/power.c64
-rw-r--r--arch/sparc64/kernel/ptrace.c21
-rw-r--r--arch/sparc64/kernel/rtrap.S30
-rw-r--r--arch/sparc64/kernel/setup.c58
-rw-r--r--arch/sparc64/kernel/smp.c28
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c3
-rw-r--r--arch/sparc64/kernel/sys32.S170
-rw-r--r--arch/sparc64/kernel/trampoline.S31
-rw-r--r--arch/sparc64/kernel/traps.c100
-rw-r--r--arch/sparc64/kernel/una_asm.S67
-rw-r--r--arch/sparc64/kernel/unaligned.c101
-rw-r--r--arch/sparc64/kernel/us3_cpufreq.c5
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S3
-rw-r--r--arch/sparc64/kernel/winfixup.S33
-rw-r--r--arch/sparc64/lib/Makefile2
-rw-r--r--arch/sparc64/lib/VISsave.S8
-rw-r--r--arch/sparc64/lib/dec_and_lock.S80
-rw-r--r--arch/sparc64/lib/strncpy_from_user.S16
-rw-r--r--arch/sparc64/lib/user_fixup.c63
-rw-r--r--arch/sparc64/mm/Makefile2
-rw-r--r--arch/sparc64/mm/extable.c80
-rw-r--r--arch/sparc64/mm/fault.c69
-rw-r--r--arch/sparc64/mm/init.c868
-rw-r--r--arch/sparc64/mm/ultra.S112
-rw-r--r--arch/sparc64/prom/Makefile4
-rw-r--r--arch/sparc64/prom/console.c2
-rw-r--r--arch/sparc64/prom/devops.c2
-rw-r--r--arch/sparc64/prom/init.c5
-rw-r--r--arch/sparc64/prom/map.S72
-rw-r--r--arch/sparc64/prom/memory.c152
-rw-r--r--arch/sparc64/prom/misc.c46
-rw-r--r--arch/sparc64/prom/p1275.c2
-rw-r--r--arch/sparc64/prom/printf.c2
-rw-r--r--arch/sparc64/prom/tree.c50
50 files changed, 1614 insertions, 2608 deletions
diff --git a/arch/sparc64/Kconfig.debug b/arch/sparc64/Kconfig.debug
index cd8d39fb954d..fa06ea04837b 100644
--- a/arch/sparc64/Kconfig.debug
+++ b/arch/sparc64/Kconfig.debug
@@ -33,13 +33,13 @@ config DEBUG_BOOTMEM
depends on DEBUG_KERNEL
bool "Debug BOOTMEM initialization"
-# We have a custom atomic_dec_and_lock() implementation but it's not
-# compatible with spinlock debugging so we need to fall back on
-# the generic version in that case.
-config HAVE_DEC_LOCK
- bool
- depends on SMP && !DEBUG_SPINLOCK
- default y
+config DEBUG_PAGEALLOC
+ bool "Page alloc debugging"
+ depends on DEBUG_KERNEL && !SOFTWARE_SUSPEND
+ help
+ Unmap pages from the kernel linear mapping after free_pages().
+ This results in a large slowdown, but helps to find certain types
+ of memory corruptions.
config MCOUNT
bool
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
index 48756958116b..77ef5df4e5a7 100644
--- a/arch/sparc64/kernel/cpu.c
+++ b/arch/sparc64/kernel/cpu.c
@@ -39,6 +39,8 @@ struct cpu_fp_info linux_sparc_fpu[] = {
{ 0x3e, 0x15, 0, "UltraSparc III+ integrated FPU"},
{ 0x3e, 0x16, 0, "UltraSparc IIIi integrated FPU"},
{ 0x3e, 0x18, 0, "UltraSparc IV integrated FPU"},
+ { 0x3e, 0x19, 0, "UltraSparc IV+ integrated FPU"},
+ { 0x3e, 0x22, 0, "UltraSparc IIIi+ integrated FPU"},
};
#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
@@ -53,6 +55,8 @@ struct cpu_iu_info linux_sparc_chips[] = {
{ 0x3e, 0x15, "TI UltraSparc III+ (Cheetah+)"},
{ 0x3e, 0x16, "TI UltraSparc IIIi (Jalapeno)"},
{ 0x3e, 0x18, "TI UltraSparc IV (Jaguar)"},
+ { 0x3e, 0x19, "TI UltraSparc IV+ (Panther)"},
+ { 0x3e, 0x22, "TI UltraSparc IIIi+ (Serrano)"},
};
#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
diff --git a/arch/sparc64/kernel/devices.c b/arch/sparc64/kernel/devices.c
index d710274e516b..df9a1ca8fd77 100644
--- a/arch/sparc64/kernel/devices.c
+++ b/arch/sparc64/kernel/devices.c
@@ -135,6 +135,28 @@ void __init device_scan(void)
cpu_data(0).clock_tick = prom_getintdefault(cpu_node,
"clock-frequency",
0);
+ cpu_data(0).dcache_size = prom_getintdefault(cpu_node,
+ "dcache-size",
+ 16 * 1024);
+ cpu_data(0).dcache_line_size =
+ prom_getintdefault(cpu_node, "dcache-line-size", 32);
+ cpu_data(0).icache_size = prom_getintdefault(cpu_node,
+ "icache-size",
+ 16 * 1024);
+ cpu_data(0).icache_line_size =
+ prom_getintdefault(cpu_node, "icache-line-size", 32);
+ cpu_data(0).ecache_size = prom_getintdefault(cpu_node,
+ "ecache-size",
+ 4 * 1024 * 1024);
+ cpu_data(0).ecache_line_size =
+ prom_getintdefault(cpu_node, "ecache-line-size", 64);
+ printk("CPU[0]: Caches "
+ "D[sz(%d):line_sz(%d)] "
+ "I[sz(%d):line_sz(%d)] "
+ "E[sz(%d):line_sz(%d)]\n",
+ cpu_data(0).dcache_size, cpu_data(0).dcache_line_size,
+ cpu_data(0).icache_size, cpu_data(0).icache_line_size,
+ cpu_data(0).ecache_size, cpu_data(0).ecache_line_size);
}
#endif
diff --git a/arch/sparc64/kernel/dtlb_backend.S b/arch/sparc64/kernel/dtlb_backend.S
index 538522848ad4..acc889a7f9c1 100644
--- a/arch/sparc64/kernel/dtlb_backend.S
+++ b/arch/sparc64/kernel/dtlb_backend.S
@@ -9,17 +9,7 @@
#include <asm/pgtable.h>
#include <asm/mmu.h>
-#if PAGE_SHIFT == 13
-#define SZ_BITS _PAGE_SZ8K
-#elif PAGE_SHIFT == 16
-#define SZ_BITS _PAGE_SZ64K
-#elif PAGE_SHIFT == 19
-#define SZ_BITS _PAGE_SZ512K
-#elif PAGE_SHIFT == 22
-#define SZ_BITS _PAGE_SZ4MB
-#endif
-
-#define VALID_SZ_BITS (_PAGE_VALID | SZ_BITS)
+#define VALID_SZ_BITS (_PAGE_VALID | _PAGE_SZBITS)
#define VPTE_BITS (_PAGE_CP | _PAGE_CV | _PAGE_P )
#define VPTE_SHIFT (PAGE_SHIFT - 3)
@@ -163,7 +153,6 @@ sparc64_vpte_continue:
stxa %g4, [%g1 + %g1] ASI_DMMU ! Restore previous TAG_ACCESS
retry ! Load PTE once again
-#undef SZ_BITS
#undef VALID_SZ_BITS
#undef VPTE_SHIFT
#undef VPTE_BITS
diff --git a/arch/sparc64/kernel/dtlb_base.S b/arch/sparc64/kernel/dtlb_base.S
index ded2fed23fcc..6528786840c0 100644
--- a/arch/sparc64/kernel/dtlb_base.S
+++ b/arch/sparc64/kernel/dtlb_base.S
@@ -53,39 +53,36 @@
* be guaranteed to be 0 ... mmu_context.h does guarantee this
* by only using 10 bits in the hwcontext value.
*/
-#define CREATE_VPTE_OFFSET1(r1, r2)
+#define CREATE_VPTE_OFFSET1(r1, r2) nop
#define CREATE_VPTE_OFFSET2(r1, r2) \
srax r1, 10, r2
-#define CREATE_VPTE_NOP nop
#else
#define CREATE_VPTE_OFFSET1(r1, r2) \
srax r1, PAGE_SHIFT, r2
#define CREATE_VPTE_OFFSET2(r1, r2) \
sllx r2, 3, r2
-#define CREATE_VPTE_NOP
#endif
/* DTLB ** ICACHE line 1: Quick user TLB misses */
+ mov TLB_SFSR, %g1
ldxa [%g1 + %g1] ASI_DMMU, %g4 ! Get TAG_ACCESS
andcc %g4, TAG_CONTEXT_BITS, %g0 ! From Nucleus?
from_tl1_trap:
rdpr %tl, %g5 ! For TL==3 test
CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
- be,pn %xcc, 3f ! Yep, special processing
+ be,pn %xcc, kvmap ! Yep, special processing
CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
cmp %g5, 4 ! Last trap level?
- be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
- nop ! delay slot
/* DTLB ** ICACHE line 2: User finish + quick kernel TLB misses */
+ be,pn %xcc, longpath ! Yep, cannot risk VPTE miss
+ nop ! delay slot
ldxa [%g3 + %g6] ASI_S, %g5 ! Load VPTE
1: brgez,pn %g5, longpath ! Invalid, branch out
nop ! Delay-slot
9: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
retry ! Trap return
-3: brlz,pt %g4, 9b ! Kernel virtual map?
- xor %g2, %g4, %g5 ! Finish bit twiddles
- ba,a,pt %xcc, kvmap ! Yep, go check for obp/vmalloc
+ nop
/* DTLB ** ICACHE line 3: winfixups+real_faults */
longpath:
@@ -106,8 +103,7 @@ longpath:
nop
nop
nop
- CREATE_VPTE_NOP
+ nop
#undef CREATE_VPTE_OFFSET1
#undef CREATE_VPTE_OFFSET2
-#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/dtlb_prot.S b/arch/sparc64/kernel/dtlb_prot.S
index d848bb7374bb..e0a920162604 100644
--- a/arch/sparc64/kernel/dtlb_prot.S
+++ b/arch/sparc64/kernel/dtlb_prot.S
@@ -14,14 +14,14 @@
*/
/* PROT ** ICACHE line 1: User DTLB protection trap */
- stxa %g0, [%g1] ASI_DMMU ! Clear SFSR FaultValid bit
- membar #Sync ! Synchronize ASI stores
- rdpr %pstate, %g5 ! Move into alternate globals
+ mov TLB_SFSR, %g1
+ stxa %g0, [%g1] ASI_DMMU ! Clear FaultValid bit
+ membar #Sync ! Synchronize stores
+ rdpr %pstate, %g5 ! Move into alt-globals
wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
- rdpr %tl, %g1 ! Need to do a winfixup?
+ rdpr %tl, %g1 ! Need a winfixup?
cmp %g1, 1 ! Trap level >1?
- mov TLB_TAG_ACCESS, %g4 ! Prepare reload of vaddr
- nop
+ mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr
/* PROT ** ICACHE line 2: More real fault processing */
bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 3e0badb820c5..11a848402fb1 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -30,164 +30,10 @@
.text
.align 32
- .globl sparc64_vpte_patchme1
- .globl sparc64_vpte_patchme2
-/*
- * On a second level vpte miss, check whether the original fault is to the OBP
- * range (note that this is only possible for instruction miss, data misses to
- * obp range do not use vpte). If so, go back directly to the faulting address.
- * This is because we want to read the tpc, otherwise we have no way of knowing
- * the 8k aligned faulting address if we are using >8k kernel pagesize. This
- * also ensures no vpte range addresses are dropped into tlb while obp is
- * executing (see inherit_locked_prom_mappings() rant).
- */
-sparc64_vpte_nucleus:
- /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
- mov 0xf, %g5
- sllx %g5, 28, %g5
-
- /* Is addr >= LOW_OBP_ADDRESS? */
- cmp %g4, %g5
- blu,pn %xcc, sparc64_vpte_patchme1
- mov 0x1, %g5
-
- /* Load 0x100000000, which is HI_OBP_ADDRESS. */
- sllx %g5, 32, %g5
-
- /* Is addr < HI_OBP_ADDRESS? */
- cmp %g4, %g5
- blu,pn %xcc, obp_iaddr_patch
- nop
-
- /* These two instructions are patched by paginig_init(). */
-sparc64_vpte_patchme1:
- sethi %hi(0), %g5
-sparc64_vpte_patchme2:
- or %g5, %lo(0), %g5
-
- /* With kernel PGD in %g5, branch back into dtlb_backend. */
- ba,pt %xcc, sparc64_kpte_continue
- andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
-
-vpte_noent:
- /* Restore previous TAG_ACCESS, %g5 is zero, and we will
- * skip over the trap instruction so that the top level
- * TLB miss handler will thing this %g5 value is just an
- * invalid PTE, thus branching to full fault processing.
- */
- mov TLB_SFSR, %g1
- stxa %g4, [%g1 + %g1] ASI_DMMU
- done
-
- .globl obp_iaddr_patch
-obp_iaddr_patch:
- /* These two instructions patched by inherit_prom_mappings(). */
- sethi %hi(0), %g5
- or %g5, %lo(0), %g5
-
- /* Behave as if we are at TL0. */
- wrpr %g0, 1, %tl
- rdpr %tpc, %g4 /* Find original faulting iaddr */
- srlx %g4, 13, %g4 /* Throw out context bits */
- sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
-
- /* Restore previous TAG_ACCESS. */
- mov TLB_SFSR, %g1
- stxa %g4, [%g1 + %g1] ASI_IMMU
-
- /* Get PMD offset. */
- srlx %g4, 23, %g6
- and %g6, 0x7ff, %g6
- sllx %g6, 2, %g6
-
- /* Load PMD, is it valid? */
- lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
- brz,pn %g5, longpath
- sllx %g5, 11, %g5
-
- /* Get PTE offset. */
- srlx %g4, 13, %g6
- and %g6, 0x3ff, %g6
- sllx %g6, 3, %g6
-
- /* Load PTE. */
- ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
- brgez,pn %g5, longpath
- nop
-
- /* TLB load and return from trap. */
- stxa %g5, [%g0] ASI_ITLB_DATA_IN
- retry
-
- .globl obp_daddr_patch
-obp_daddr_patch:
- /* These two instructions patched by inherit_prom_mappings(). */
- sethi %hi(0), %g5
- or %g5, %lo(0), %g5
-
- /* Get PMD offset. */
- srlx %g4, 23, %g6
- and %g6, 0x7ff, %g6
- sllx %g6, 2, %g6
-
- /* Load PMD, is it valid? */
- lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
- brz,pn %g5, longpath
- sllx %g5, 11, %g5
-
- /* Get PTE offset. */
- srlx %g4, 13, %g6
- and %g6, 0x3ff, %g6
- sllx %g6, 3, %g6
-
- /* Load PTE. */
- ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
- brgez,pn %g5, longpath
- nop
-
- /* TLB load and return from trap. */
- stxa %g5, [%g0] ASI_DTLB_DATA_IN
- retry
-
-/*
- * On a first level data miss, check whether this is to the OBP range (note
- * that such accesses can be made by prom, as well as by kernel using
- * prom_getproperty on "address"), and if so, do not use vpte access ...
- * rather, use information saved during inherit_prom_mappings() using 8k
- * pagesize.
- */
-kvmap:
- /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
- mov 0xf, %g5
- sllx %g5, 28, %g5
-
- /* Is addr >= LOW_OBP_ADDRESS? */
- cmp %g4, %g5
- blu,pn %xcc, vmalloc_addr
- mov 0x1, %g5
-
- /* Load 0x100000000, which is HI_OBP_ADDRESS. */
- sllx %g5, 32, %g5
-
- /* Is addr < HI_OBP_ADDRESS? */
- cmp %g4, %g5
- blu,pn %xcc, obp_daddr_patch
- nop
-
-vmalloc_addr:
- /* If we get here, a vmalloc addr accessed, load kernel VPTE. */
- ldxa [%g3 + %g6] ASI_N, %g5
- brgez,pn %g5, longpath
- nop
-
- /* PTE is valid, load into TLB and return from trap. */
- stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
- retry
-
/* This is trivial with the new code... */
.globl do_fpdis
do_fpdis:
- sethi %hi(TSTATE_PEF), %g4 ! IEU0
+ sethi %hi(TSTATE_PEF), %g4
rdpr %tstate, %g5
andcc %g5, %g4, %g0
be,pt %xcc, 1f
@@ -204,18 +50,18 @@ do_fpdis:
add %g0, %g0, %g0
ba,a,pt %xcc, rtrap_clr_l6
-1: ldub [%g6 + TI_FPSAVED], %g5 ! Load Group
- wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles
- andcc %g5, FPRS_FEF, %g0 ! IEU1 Group
- be,a,pt %icc, 1f ! CTI
- clr %g7 ! IEU0
- ldx [%g6 + TI_GSR], %g7 ! Load Group
-1: andcc %g5, FPRS_DL, %g0 ! IEU1
- bne,pn %icc, 2f ! CTI
- fzero %f0 ! FPA
- andcc %g5, FPRS_DU, %g0 ! IEU1 Group
- bne,pn %icc, 1f ! CTI
- fzero %f2 ! FPA
+1: ldub [%g6 + TI_FPSAVED], %g5
+ wr %g0, FPRS_FEF, %fprs
+ andcc %g5, FPRS_FEF, %g0
+ be,a,pt %icc, 1f
+ clr %g7
+ ldx [%g6 + TI_GSR], %g7
+1: andcc %g5, FPRS_DL, %g0
+ bne,pn %icc, 2f
+ fzero %f0
+ andcc %g5, FPRS_DU, %g0
+ bne,pn %icc, 1f
+ fzero %f2
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
faddd %f0, %f2, %f8
@@ -251,15 +97,17 @@ do_fpdis:
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
ldxa [%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_1:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
add %g6, TI_FPREGS + 0xc0, %g2
faddd %f0, %f2, %f8
fmuld %f0, %f2, %f10
- ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ membar #Sync
+ ldda [%g1] ASI_BLK_S, %f32
ldda [%g2] ASI_BLK_S, %f48
+ membar #Sync
faddd %f0, %f2, %f12
fmuld %f0, %f2, %f14
faddd %f0, %f2, %f16
@@ -270,7 +118,6 @@ cplus_fptrap_insn_1:
fmuld %f0, %f2, %f26
faddd %f0, %f2, %f28
fmuld %f0, %f2, %f30
- membar #Sync
b,pt %xcc, fpdis_exit
nop
2: andcc %g5, FPRS_DU, %g0
@@ -280,15 +127,17 @@ cplus_fptrap_insn_1:
fzero %f34
ldxa [%g3] ASI_DMMU, %g5
add %g6, TI_FPREGS, %g1
-cplus_fptrap_insn_2:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
add %g6, TI_FPREGS + 0x40, %g2
faddd %f32, %f34, %f36
fmuld %f32, %f34, %f38
- ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ membar #Sync
+ ldda [%g1] ASI_BLK_S, %f0
ldda [%g2] ASI_BLK_S, %f16
+ membar #Sync
faddd %f32, %f34, %f40
fmuld %f32, %f34, %f42
faddd %f32, %f34, %f44
@@ -301,18 +150,18 @@ cplus_fptrap_insn_2:
fmuld %f32, %f34, %f58
faddd %f32, %f34, %f60
fmuld %f32, %f34, %f62
- membar #Sync
ba,pt %xcc, fpdis_exit
nop
3: mov SECONDARY_CONTEXT, %g3
add %g6, TI_FPREGS, %g1
ldxa [%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_3:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
mov 0x40, %g2
- ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ membar #Sync
+ ldda [%g1] ASI_BLK_S, %f0
ldda [%g1 + %g2] ASI_BLK_S, %f16
add %g1, 0x80, %g1
ldda [%g1] ASI_BLK_S, %f32
@@ -473,8 +322,8 @@ do_fptrap_after_fsr:
stx %g3, [%g6 + TI_GSR]
mov SECONDARY_CONTEXT, %g3
ldxa [%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_4:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
add %g6, TI_FPREGS, %g2
@@ -495,45 +344,17 @@ cplus_fptrap_insn_4:
ba,pt %xcc, etrap
wr %g0, 0, %fprs
-cplus_fptrap_1:
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
- .globl cheetah_plus_patch_fpdis
-cheetah_plus_patch_fpdis:
- /* We configure the dTLB512_0 for 4MB pages and the
- * dTLB512_1 for 8K pages when in context zero.
- */
- sethi %hi(cplus_fptrap_1), %o0
- lduw [%o0 + %lo(cplus_fptrap_1)], %o1
-
- set cplus_fptrap_insn_1, %o2
- stw %o1, [%o2]
- flush %o2
- set cplus_fptrap_insn_2, %o2
- stw %o1, [%o2]
- flush %o2
- set cplus_fptrap_insn_3, %o2
- stw %o1, [%o2]
- flush %o2
- set cplus_fptrap_insn_4, %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop
-
/* The registers for cross calls will be:
*
* DATA 0: [low 32-bits] Address of function to call, jmp to this
* [high 32-bits] MMU Context Argument 0, place in %g5
- * DATA 1: Address Argument 1, place in %g6
+ * DATA 1: Address Argument 1, place in %g1
* DATA 2: Address Argument 2, place in %g7
*
* With this method we can do most of the cross-call tlb/cache
* flushing very quickly.
*
- * Current CPU's IRQ worklist table is locked into %g1,
- * don't touch.
+ * Current CPU's IRQ worklist table is locked into %g6, don't touch.
*/
.text
.align 32
@@ -1007,13 +828,14 @@ cheetah_plus_dcpe_trap_vector:
nop
do_cheetah_plus_data_parity:
- ba,pt %xcc, etrap
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ ba,pt %xcc, etrap_irq
rd %pc, %g7
mov 0x0, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
- ba,pt %xcc, rtrap
- clr %l6
+ ba,a,pt %xcc, rtrap_irq
cheetah_plus_dcpe_trap_vector_tl1:
membar #Sync
@@ -1037,13 +859,14 @@ cheetah_plus_icpe_trap_vector:
nop
do_cheetah_plus_insn_parity:
- ba,pt %xcc, etrap
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ ba,pt %xcc, etrap_irq
rd %pc, %g7
mov 0x1, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
- ba,pt %xcc, rtrap
- clr %l6
+ ba,a,pt %xcc, rtrap_irq
cheetah_plus_icpe_trap_vector_tl1:
membar #Sync
@@ -1076,6 +899,10 @@ do_dcpe_tl1:
nop
wrpr %g1, %tl ! Restore original trap level
do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
+ sethi %hi(dcache_parity_tl1_occurred), %g2
+ lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1
+ add %g1, 1, %g1
+ stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)]
/* Reset D-cache parity */
sethi %hi(1 << 16), %g1 ! D-cache size
mov (1 << 5), %g2 ! D-cache line size
@@ -1122,6 +949,10 @@ do_icpe_tl1:
nop
wrpr %g1, %tl ! Restore original trap level
do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
+ sethi %hi(icache_parity_tl1_occurred), %g2
+ lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1
+ add %g1, 1, %g1
+ stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)]
/* Flush I-cache */
sethi %hi(1 << 15), %g1 ! I-cache size
mov (1 << 5), %g2 ! I-cache line size
diff --git a/arch/sparc64/kernel/etrap.S b/arch/sparc64/kernel/etrap.S
index 50d2af1d98ae..0d8eba21111b 100644
--- a/arch/sparc64/kernel/etrap.S
+++ b/arch/sparc64/kernel/etrap.S
@@ -68,12 +68,8 @@ etrap_irq:
wrpr %g3, 0, %otherwin
wrpr %g2, 0, %wstate
-cplus_etrap_insn_1:
- sethi %hi(0), %g3
- sllx %g3, 32, %g3
-cplus_etrap_insn_2:
- sethi %hi(0), %g2
- or %g3, %g2, %g3
+ sethi %hi(sparc64_kern_pri_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
stxa %g3, [%l4] ASI_DMMU
flush %l6
wr %g0, ASI_AIUS, %asi
@@ -215,12 +211,8 @@ scetrap: rdpr %pil, %g2
mov PRIMARY_CONTEXT, %l4
wrpr %g3, 0, %otherwin
wrpr %g2, 0, %wstate
-cplus_etrap_insn_3:
- sethi %hi(0), %g3
- sllx %g3, 32, %g3
-cplus_etrap_insn_4:
- sethi %hi(0), %g2
- or %g3, %g2, %g3
+ sethi %hi(sparc64_kern_pri_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
stxa %g3, [%l4] ASI_DMMU
flush %l6
@@ -264,38 +256,3 @@ cplus_etrap_insn_4:
#undef TASK_REGOFF
#undef ETRAP_PSTATE1
-
-cplus_einsn_1:
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
-cplus_einsn_2:
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
- .globl cheetah_plus_patch_etrap
-cheetah_plus_patch_etrap:
- /* We configure the dTLB512_0 for 4MB pages and the
- * dTLB512_1 for 8K pages when in context zero.
- */
- sethi %hi(cplus_einsn_1), %o0
- sethi %hi(cplus_etrap_insn_1), %o2
- lduw [%o0 + %lo(cplus_einsn_1)], %o1
- or %o2, %lo(cplus_etrap_insn_1), %o2
- stw %o1, [%o2]
- flush %o2
- sethi %hi(cplus_etrap_insn_3), %o2
- or %o2, %lo(cplus_etrap_insn_3), %o2
- stw %o1, [%o2]
- flush %o2
-
- sethi %hi(cplus_einsn_2), %o0
- sethi %hi(cplus_etrap_insn_2), %o2
- lduw [%o0 + %lo(cplus_einsn_2)], %o1
- or %o2, %lo(cplus_etrap_insn_2), %o2
- stw %o1, [%o2]
- flush %o2
- sethi %hi(cplus_etrap_insn_4), %o2
- or %o2, %lo(cplus_etrap_insn_4), %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop
diff --git a/arch/sparc64/kernel/head.S b/arch/sparc64/kernel/head.S
index 1fa06c4e3bdb..b49dcd4504b0 100644
--- a/arch/sparc64/kernel/head.S
+++ b/arch/sparc64/kernel/head.S
@@ -28,19 +28,14 @@
#include <asm/mmu.h>
/* This section from from _start to sparc64_boot_end should fit into
- * 0x0000.0000.0040.4000 to 0x0000.0000.0040.8000 and will be sharing space
- * with bootup_user_stack, which is from 0x0000.0000.0040.4000 to
- * 0x0000.0000.0040.6000 and empty_bad_page, which is from
- * 0x0000.0000.0040.6000 to 0x0000.0000.0040.8000.
+ * 0x0000000000404000 to 0x0000000000408000.
*/
-
.text
.globl start, _start, stext, _stext
_start:
start:
_stext:
stext:
-bootup_user_stack:
! 0x0000000000404000
b sparc64_boot
flushw /* Flush register file. */
@@ -80,15 +75,169 @@ sparc_ramdisk_image64:
.xword 0
.word _end
- /* We must be careful, 32-bit OpenBOOT will get confused if it
- * tries to save away a register window to a 64-bit kernel
- * stack address. Flush all windows, disable interrupts,
- * remap if necessary, jump onto kernel trap table, then kernel
- * stack, or else we die.
+ /* PROM cif handler code address is in %o4. */
+sparc64_boot:
+1: rd %pc, %g7
+ set 1b, %g1
+ cmp %g1, %g7
+ be,pn %xcc, sparc64_boot_after_remap
+ mov %o4, %l7
+
+ /* We need to remap the kernel. Use position independant
+ * code to remap us to KERNBASE.
*
- * PROM entry point is on %o4
+ * SILO can invoke us with 32-bit address masking enabled,
+ * so make sure that's clear.
*/
-sparc64_boot:
+ rdpr %pstate, %g1
+ andn %g1, PSTATE_AM, %g1
+ wrpr %g1, 0x0, %pstate
+ ba,a,pt %xcc, 1f
+
+ .globl prom_finddev_name, prom_chosen_path
+ .globl prom_getprop_name, prom_mmu_name
+ .globl prom_callmethod_name, prom_translate_name
+ .globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache
+ .globl prom_boot_mapped_pc, prom_boot_mapping_mode
+ .globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low
+prom_finddev_name:
+ .asciz "finddevice"
+prom_chosen_path:
+ .asciz "/chosen"
+prom_getprop_name:
+ .asciz "getprop"
+prom_mmu_name:
+ .asciz "mmu"
+prom_callmethod_name:
+ .asciz "call-method"
+prom_translate_name:
+ .asciz "translate"
+prom_map_name:
+ .asciz "map"
+prom_unmap_name:
+ .asciz "unmap"
+ .align 4
+prom_mmu_ihandle_cache:
+ .word 0
+prom_boot_mapped_pc:
+ .word 0
+prom_boot_mapping_mode:
+ .word 0
+ .align 8
+prom_boot_mapping_phys_high:
+ .xword 0
+prom_boot_mapping_phys_low:
+ .xword 0
+1:
+ rd %pc, %l0
+ mov (1b - prom_finddev_name), %l1
+ mov (1b - prom_chosen_path), %l2
+ mov (1b - prom_boot_mapped_pc), %l3
+ sub %l0, %l1, %l1
+ sub %l0, %l2, %l2
+ sub %l0, %l3, %l3
+ stw %l0, [%l3]
+ sub %sp, (192 + 128), %sp
+
+ /* chosen_node = prom_finddevice("/chosen") */
+ stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice"
+ mov 1, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
+ stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
+ stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/chosen"
+ stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
+ call %l7
+ add %sp, (2047 + 128), %o0 ! argument array
+
+ ldx [%sp + 2047 + 128 + 0x20], %l4 ! chosen device node
+
+ mov (1b - prom_getprop_name), %l1
+ mov (1b - prom_mmu_name), %l2
+ mov (1b - prom_mmu_ihandle_cache), %l5
+ sub %l0, %l1, %l1
+ sub %l0, %l2, %l2
+ sub %l0, %l5, %l5
+
+ /* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */
+ stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
+ mov 4, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
+ mov 1, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
+ stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, chosen_node
+ stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "mmu"
+ stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_mmu_ihandle_cache
+ mov 4, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, sizeof(arg3)
+ stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
+ call %l7
+ add %sp, (2047 + 128), %o0 ! argument array
+
+ mov (1b - prom_callmethod_name), %l1
+ mov (1b - prom_translate_name), %l2
+ sub %l0, %l1, %l1
+ sub %l0, %l2, %l2
+ lduw [%l5], %l5 ! prom_mmu_ihandle_cache
+
+ stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "call-method"
+ mov 3, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 3
+ mov 5, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5
+ stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate"
+ stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache
+ /* PAGE align */
+ srlx %l0, 13, %l3
+ sllx %l3, 13, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC
+ stx %g0, [%sp + 2047 + 128 + 0x30] ! res1
+ stx %g0, [%sp + 2047 + 128 + 0x38] ! res2
+ stx %g0, [%sp + 2047 + 128 + 0x40] ! res3
+ stx %g0, [%sp + 2047 + 128 + 0x48] ! res4
+ stx %g0, [%sp + 2047 + 128 + 0x50] ! res5
+ call %l7
+ add %sp, (2047 + 128), %o0 ! argument array
+
+ ldx [%sp + 2047 + 128 + 0x40], %l1 ! translation mode
+ mov (1b - prom_boot_mapping_mode), %l4
+ sub %l0, %l4, %l4
+ stw %l1, [%l4]
+ mov (1b - prom_boot_mapping_phys_high), %l4
+ sub %l0, %l4, %l4
+ ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high
+ stx %l2, [%l4 + 0x0]
+ ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low
+ /* 4MB align */
+ srlx %l3, 22, %l3
+ sllx %l3, 22, %l3
+ stx %l3, [%l4 + 0x8]
+
+ /* Leave service as-is, "call-method" */
+ mov 7, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 7
+ mov 1, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
+ mov (1b - prom_map_name), %l3
+ sub %l0, %l3, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x18] ! arg1: "map"
+ /* Leave arg2 as-is, prom_mmu_ihandle_cache */
+ mov -1, %l3
+ stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default)
+ sethi %hi(8 * 1024 * 1024), %l3
+ stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: size (8MB)
+ sethi %hi(KERNBASE), %l3
+ stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE)
+ stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
+ mov (1b - prom_boot_mapping_phys_low), %l3
+ sub %l0, %l3, %l3
+ ldx [%l3], %l3
+ stx %l3, [%sp + 2047 + 128 + 0x48] ! arg7: phys addr
+ call %l7
+ add %sp, (2047 + 128), %o0 ! argument array
+
+ add %sp, (192 + 128), %sp
+
+sparc64_boot_after_remap:
BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
ba,pt %xcc, spitfire_boot
@@ -125,185 +274,7 @@ cheetah_generic_boot:
stxa %g0, [%g3] ASI_IMMU
membar #Sync
- wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
- wr %g0, 0, %fprs
-
- /* Just like for Spitfire, we probe itlb-2 for a mapping which
- * matches our current %pc. We take the physical address in
- * that mapping and use it to make our own.
- */
-
- /* %g5 holds the tlb data */
- sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
- sllx %g5, 32, %g5
- or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
-
- /* Put PADDR tlb data mask into %g3. */
- sethi %uhi(_PAGE_PADDR), %g3
- or %g3, %ulo(_PAGE_PADDR), %g3
- sllx %g3, 32, %g3
- sethi %hi(_PAGE_PADDR), %g7
- or %g7, %lo(_PAGE_PADDR), %g7
- or %g3, %g7, %g3
-
- set 2 << 16, %l0 /* TLB entry walker. */
- set 0x1fff, %l2 /* Page mask. */
- rd %pc, %l3
- andn %l3, %l2, %g2 /* vaddr comparator */
-
-1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
- membar #Sync
- andn %g1, %l2, %g1
- cmp %g1, %g2
- be,pn %xcc, cheetah_got_tlbentry
- nop
- and %l0, (127 << 3), %g1
- cmp %g1, (127 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- /* Search the small TLB. OBP never maps us like that but
- * newer SILO can.
- */
- clr %l0
-
-1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
- membar #Sync
- andn %g1, %l2, %g1
- cmp %g1, %g2
- be,pn %xcc, cheetah_got_tlbentry
- nop
- cmp %l0, (15 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- /* BUG() if we get here... */
- ta 0x5
-
-cheetah_got_tlbentry:
- ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g0
- ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
- membar #Sync
- and %g1, %g3, %g1
- set 0x5fff, %l0
- andn %g1, %l0, %g1
- or %g5, %g1, %g5
-
- /* Clear out any KERNBASE area entries. */
- set 2 << 16, %l0
- sethi %hi(KERNBASE), %g3
- sethi %hi(KERNBASE<<1), %g7
- mov TLB_TAG_ACCESS, %l7
-
- /* First, check ITLB */
-1: ldxa [%l0] ASI_ITLB_TAG_READ, %g1
- membar #Sync
- andn %g1, %l2, %g1
- cmp %g1, %g3
- blu,pn %xcc, 2f
- cmp %g1, %g7
- bgeu,pn %xcc, 2f
- nop
- stxa %g0, [%l7] ASI_IMMU
- membar #Sync
- stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
- membar #Sync
-
-2: and %l0, (127 << 3), %g1
- cmp %g1, (127 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- /* Next, check DTLB */
- set 2 << 16, %l0
-1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
- membar #Sync
- andn %g1, %l2, %g1
- cmp %g1, %g3
- blu,pn %xcc, 2f
- cmp %g1, %g7
- bgeu,pn %xcc, 2f
- nop
- stxa %g0, [%l7] ASI_DMMU
- membar #Sync
- stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
- membar #Sync
-
-2: and %l0, (511 << 3), %g1
- cmp %g1, (511 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- /* On Cheetah+, have to check second DTLB. */
- BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,l0,2f)
- ba,pt %xcc, 9f
- nop
-
-2: set 3 << 16, %l0
-1: ldxa [%l0] ASI_DTLB_TAG_READ, %g1
- membar #Sync
- andn %g1, %l2, %g1
- cmp %g1, %g3
- blu,pn %xcc, 2f
- cmp %g1, %g7
- bgeu,pn %xcc, 2f
- nop
- stxa %g0, [%l7] ASI_DMMU
- membar #Sync
- stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
- membar #Sync
-
-2: and %l0, (511 << 3), %g1
- cmp %g1, (511 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
-9:
-
- /* Now lock the TTE we created into ITLB-0 and DTLB-0,
- * entry 15 (and maybe 14 too).
- */
- sethi %hi(KERNBASE), %g3
- set (0 << 16) | (15 << 3), %g7
- stxa %g3, [%l7] ASI_DMMU
- membar #Sync
- stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
- membar #Sync
- stxa %g3, [%l7] ASI_IMMU
- membar #Sync
- stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
- membar #Sync
- flush %g3
- membar #Sync
- sethi %hi(_end), %g3 /* Check for bigkernel case */
- or %g3, %lo(_end), %g3
- srl %g3, 23, %g3 /* Check if _end > 8M */
- brz,pt %g3, 1f
- sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
- sethi %hi(0x400000), %g3
- or %g3, %lo(0x400000), %g3
- add %g5, %g3, %g5 /* New tte data */
- andn %g5, (_PAGE_G), %g5
- sethi %hi(KERNBASE+0x400000), %g3
- or %g3, %lo(KERNBASE+0x400000), %g3
- set (0 << 16) | (14 << 3), %g7
- stxa %g3, [%l7] ASI_DMMU
- membar #Sync
- stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
- membar #Sync
- stxa %g3, [%l7] ASI_IMMU
- membar #Sync
- stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
- membar #Sync
- flush %g3
- membar #Sync
- sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
- ba,pt %xcc, 1f
- nop
-
-1: set sun4u_init, %g2
- jmpl %g2 + %g0, %g0
- nop
+ ba,a,pt %xcc, jump_to_sun4u_init
spitfire_boot:
/* Typically PROM has already enabled both MMU's and both on-chip
@@ -313,6 +284,7 @@ spitfire_boot:
stxa %g1, [%g0] ASI_LSU_CONTROL
membar #Sync
+jump_to_sun4u_init:
/*
* Make sure we are in privileged mode, have address masking,
* using the ordinary globals and have enabled floating
@@ -324,151 +296,6 @@ spitfire_boot:
wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
wr %g0, 0, %fprs
-spitfire_create_mappings:
- /* %g5 holds the tlb data */
- sethi %uhi(_PAGE_VALID | _PAGE_SZ4MB), %g5
- sllx %g5, 32, %g5
- or %g5, (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W | _PAGE_G), %g5
-
- /* Base of physical memory cannot reliably be assumed to be
- * at 0x0! Figure out where it happens to be. -DaveM
- */
-
- /* Put PADDR tlb data mask into %g3. */
- sethi %uhi(_PAGE_PADDR_SF), %g3
- or %g3, %ulo(_PAGE_PADDR_SF), %g3
- sllx %g3, 32, %g3
- sethi %hi(_PAGE_PADDR_SF), %g7
- or %g7, %lo(_PAGE_PADDR_SF), %g7
- or %g3, %g7, %g3
-
- /* Walk through entire ITLB, looking for entry which maps
- * our %pc currently, stick PADDR from there into %g5 tlb data.
- */
- clr %l0 /* TLB entry walker. */
- set 0x1fff, %l2 /* Page mask. */
- rd %pc, %l3
- andn %l3, %l2, %g2 /* vaddr comparator */
-1:
- /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
- ldxa [%l0] ASI_ITLB_TAG_READ, %g1
- nop
- nop
- nop
- andn %g1, %l2, %g1 /* Get vaddr */
- cmp %g1, %g2
- be,a,pn %xcc, spitfire_got_tlbentry
- ldxa [%l0] ASI_ITLB_DATA_ACCESS, %g1
- cmp %l0, (63 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- /* BUG() if we get here... */
- ta 0x5
-
-spitfire_got_tlbentry:
- /* Nops here again, perhaps Cheetah/Blackbird are better behaved... */
- nop
- nop
- nop
- and %g1, %g3, %g1 /* Mask to just get paddr bits. */
- set 0x5fff, %l3 /* Mask offset to get phys base. */
- andn %g1, %l3, %g1
-
- /* NOTE: We hold on to %g1 paddr base as we need it below to lock
- * NOTE: the PROM cif code into the TLB.
- */
-
- or %g5, %g1, %g5 /* Or it into TAG being built. */
-
- clr %l0 /* TLB entry walker. */
- sethi %hi(KERNBASE), %g3 /* 4M lower limit */
- sethi %hi(KERNBASE<<1), %g7 /* 8M upper limit */
- mov TLB_TAG_ACCESS, %l7
-1:
- /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
- ldxa [%l0] ASI_ITLB_TAG_READ, %g1
- nop
- nop
- nop
- andn %g1, %l2, %g1 /* Get vaddr */
- cmp %g1, %g3
- blu,pn %xcc, 2f
- cmp %g1, %g7
- bgeu,pn %xcc, 2f
- nop
- stxa %g0, [%l7] ASI_IMMU
- stxa %g0, [%l0] ASI_ITLB_DATA_ACCESS
- membar #Sync
-2:
- cmp %l0, (63 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- nop; nop; nop
-
- clr %l0 /* TLB entry walker. */
-1:
- /* Yes, the nops seem to be necessary for now, don't ask me why. -DaveM */
- ldxa [%l0] ASI_DTLB_TAG_READ, %g1
- nop
- nop
- nop
- andn %g1, %l2, %g1 /* Get vaddr */
- cmp %g1, %g3
- blu,pn %xcc, 2f
- cmp %g1, %g7
- bgeu,pn %xcc, 2f
- nop
- stxa %g0, [%l7] ASI_DMMU
- stxa %g0, [%l0] ASI_DTLB_DATA_ACCESS
- membar #Sync
-2:
- cmp %l0, (63 << 3)
- blu,pt %xcc, 1b
- add %l0, (1 << 3), %l0
-
- nop; nop; nop
-
-
- /* PROM never puts any TLB entries into the MMU with the lock bit
- * set. So we gladly use tlb entry 63 for KERNBASE. And maybe 62 too.
- */
-
- sethi %hi(KERNBASE), %g3
- mov (63 << 3), %g7
- stxa %g3, [%l7] ASI_DMMU /* KERNBASE into TLB TAG */
- stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS /* TTE into TLB DATA */
- membar #Sync
- stxa %g3, [%l7] ASI_IMMU /* KERNBASE into TLB TAG */
- stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS /* TTE into TLB DATA */
- membar #Sync
- flush %g3
- membar #Sync
- sethi %hi(_end), %g3 /* Check for bigkernel case */
- or %g3, %lo(_end), %g3
- srl %g3, 23, %g3 /* Check if _end > 8M */
- brz,pt %g3, 2f
- sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
- sethi %hi(0x400000), %g3
- or %g3, %lo(0x400000), %g3
- add %g5, %g3, %g5 /* New tte data */
- andn %g5, (_PAGE_G), %g5
- sethi %hi(KERNBASE+0x400000), %g3
- or %g3, %lo(KERNBASE+0x400000), %g3
- mov (62 << 3), %g7
- stxa %g3, [%l7] ASI_DMMU
- stxa %g5, [%g7] ASI_DTLB_DATA_ACCESS
- membar #Sync
- stxa %g3, [%l7] ASI_IMMU
- stxa %g5, [%g7] ASI_ITLB_DATA_ACCESS
- membar #Sync
- flush %g3
- membar #Sync
- sethi %hi(KERNBASE), %g3 /* Restore for fixup code below */
-2: ba,pt %xcc, 1f
- nop
-1:
set sun4u_init, %g2
jmpl %g2 + %g0, %g0
nop
@@ -483,38 +310,12 @@ sun4u_init:
stxa %g0, [%g7] ASI_DMMU
membar #Sync
- /* We are now safely (we hope) in Nucleus context (0), rewrite
- * the KERNBASE TTE's so they no longer have the global bit set.
- * Don't forget to setup TAG_ACCESS first 8-)
- */
- mov TLB_TAG_ACCESS, %g2
- stxa %g3, [%g2] ASI_IMMU
- stxa %g3, [%g2] ASI_DMMU
- membar #Sync
-
BRANCH_IF_ANY_CHEETAH(g1,g7,cheetah_tlb_fixup)
ba,pt %xcc, spitfire_tlb_fixup
nop
cheetah_tlb_fixup:
- set (0 << 16) | (15 << 3), %g7
- ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g0
- ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
- andn %g1, (_PAGE_G), %g1
- stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
- membar #Sync
-
- ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g0
- ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
- andn %g1, (_PAGE_G), %g1
- stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
- membar #Sync
-
- /* Kill instruction prefetch queues. */
- flush %g3
- membar #Sync
-
mov 2, %g2 /* Set TLB type to cheetah+. */
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
@@ -523,23 +324,7 @@ cheetah_tlb_fixup:
1: sethi %hi(tlb_type), %g1
stw %g2, [%g1 + %lo(tlb_type)]
- BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
- ba,pt %xcc, 2f
- nop
-
-1: /* Patch context register writes to support nucleus page
- * size correctly.
- */
- call cheetah_plus_patch_etrap
- nop
- call cheetah_plus_patch_rtrap
- nop
- call cheetah_plus_patch_fpdis
- nop
- call cheetah_plus_patch_winfixup
- nop
-
-2: /* Patch copy/page operations to cheetah optimized versions. */
+ /* Patch copy/page operations to cheetah optimized versions. */
call cheetah_patch_copyops
nop
call cheetah_patch_copy_page
@@ -551,21 +336,6 @@ cheetah_tlb_fixup:
nop
spitfire_tlb_fixup:
- mov (63 << 3), %g7
- ldxa [%g7] ASI_ITLB_DATA_ACCESS, %g1
- andn %g1, (_PAGE_G), %g1
- stxa %g1, [%g7] ASI_ITLB_DATA_ACCESS
- membar #Sync
-
- ldxa [%g7] ASI_DTLB_DATA_ACCESS, %g1
- andn %g1, (_PAGE_G), %g1
- stxa %g1, [%g7] ASI_DTLB_DATA_ACCESS
- membar #Sync
-
- /* Kill instruction prefetch queues. */
- flush %g3
- membar #Sync
-
/* Set TLB type to spitfire. */
mov 0, %g2
sethi %hi(tlb_type), %g1
@@ -578,24 +348,6 @@ tlb_fixup_done:
mov %sp, %l6
mov %o4, %l7
-#if 0 /* We don't do it like this anymore, but for historical hack value
- * I leave this snippet here to show how crazy we can be sometimes. 8-)
- */
-
- /* Setup "Linux Current Register", thanks Sun 8-) */
- wr %g0, 0x1, %pcr
-
- /* Blackbird errata workaround. See commentary in
- * smp.c:smp_percpu_timer_interrupt() for more
- * information.
- */
- ba,pt %xcc, 99f
- nop
- .align 64
-99: wr %g6, %g0, %pic
- rd %pic, %g0
-#endif
-
wr %g0, ASI_P, %asi
mov 1, %g1
sllx %g1, THREAD_SHIFT, %g1
@@ -629,32 +381,78 @@ tlb_fixup_done:
nop
/* Not reached... */
-/* IMPORTANT NOTE: Whenever making changes here, check
- * trampoline.S as well. -jj */
- .globl setup_tba
-setup_tba: /* i0 = is_starfire */
- save %sp, -160, %sp
+ /* This is meant to allow the sharing of this code between
+ * boot processor invocation (via setup_tba() below) and
+ * secondary processor startup (via trampoline.S). The
+ * former does use this code, the latter does not yet due
+ * to some complexities. That should be fixed up at some
+ * point.
+ *
+ * There used to be enormous complexity wrt. transferring
+ * over from the firwmare's trap table to the Linux kernel's.
+ * For example, there was a chicken & egg problem wrt. building
+ * the OBP page tables, yet needing to be on the Linux kernel
+ * trap table (to translate PAGE_OFFSET addresses) in order to
+ * do that.
+ *
+ * We now handle OBP tlb misses differently, via linear lookups
+ * into the prom_trans[] array. So that specific problem no
+ * longer exists. Yet, unfortunately there are still some issues
+ * preventing trampoline.S from using this code... ho hum.
+ */
+ .globl setup_trap_table
+setup_trap_table:
+ save %sp, -192, %sp
- rdpr %tba, %g7
- sethi %hi(prom_tba), %o1
- or %o1, %lo(prom_tba), %o1
- stx %g7, [%o1]
+ /* Force interrupts to be disabled. */
+ rdpr %pstate, %o1
+ andn %o1, PSTATE_IE, %o1
+ wrpr %o1, 0x0, %pstate
+ wrpr %g0, 15, %pil
+
+ /* Make the firmware call to jump over to the Linux trap table. */
+ call prom_set_trap_table
+ sethi %hi(sparc64_ttable_tl0), %o0
+
+ /* Start using proper page size encodings in ctx register. */
+ sethi %hi(sparc64_kern_pri_context), %g3
+ ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
+ mov PRIMARY_CONTEXT, %g1
+ stxa %g2, [%g1] ASI_DMMU
+ membar #Sync
+
+ /* The Linux trap handlers expect various trap global registers
+ * to be setup with some fixed values. So here we set these
+ * up very carefully. These globals are:
+ *
+ * Alternate Globals (PSTATE_AG):
+ *
+ * %g6 --> current_thread_info()
+ *
+ * MMU Globals (PSTATE_MG):
+ *
+ * %g1 --> TLB_SFSR
+ * %g2 --> ((_PAGE_VALID | _PAGE_SZ4MB |
+ * _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
+ * ^ 0xfffff80000000000)
+ * (this %g2 value is used for computing the PAGE_OFFSET kernel
+ * TLB entries quickly, the virtual address of the fault XOR'd
+ * with this %g2 value is the PTE to load into the TLB)
+ * %g3 --> VPTE_BASE_CHEETAH or VPTE_BASE_SPITFIRE
+ *
+ * Interrupt Globals (PSTATE_IG, setup by init_irqwork_curcpu()):
+ *
+ * %g6 --> __irq_work[smp_processor_id()]
+ */
- /* Setup "Linux" globals 8-) */
rdpr %pstate, %o1
mov %g6, %o2
- wrpr %o1, (PSTATE_AG|PSTATE_IE), %pstate
- sethi %hi(sparc64_ttable_tl0), %g1
- wrpr %g1, %tba
+ wrpr %o1, PSTATE_AG, %pstate
mov %o2, %g6
- /* Set up MMU globals */
- wrpr %o1, (PSTATE_MG|PSTATE_IE), %pstate
-
- /* Set fixed globals used by dTLB miss handler. */
#define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZ4MB)^0xfffff80000000000)
#define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W)
-
+ wrpr %o1, PSTATE_MG, %pstate
mov TSB_REG, %g1
stxa %g0, [%g1] ASI_DMMU
membar #Sync
@@ -666,17 +464,17 @@ setup_tba: /* i0 = is_starfire */
sllx %g2, 32, %g2
or %g2, KERN_LOWBITS, %g2
- BRANCH_IF_ANY_CHEETAH(g3,g7,cheetah_vpte_base)
- ba,pt %xcc, spitfire_vpte_base
+ BRANCH_IF_ANY_CHEETAH(g3,g7,8f)
+ ba,pt %xcc, 9f
nop
-cheetah_vpte_base:
+8:
sethi %uhi(VPTE_BASE_CHEETAH), %g3
or %g3, %ulo(VPTE_BASE_CHEETAH), %g3
ba,pt %xcc, 2f
sllx %g3, 32, %g3
-spitfire_vpte_base:
+9:
sethi %uhi(VPTE_BASE_SPITFIRE), %g3
or %g3, %ulo(VPTE_BASE_SPITFIRE), %g3
sllx %g3, 32, %g3
@@ -702,48 +500,55 @@ spitfire_vpte_base:
sllx %o2, 32, %o2
wr %o2, %asr25
- /* Ok, we're done setting up all the state our trap mechanims needs,
- * now get back into normal globals and let the PROM know what is up.
- */
2:
wrpr %g0, %g0, %wstate
- wrpr %o1, PSTATE_IE, %pstate
+ wrpr %o1, 0x0, %pstate
call init_irqwork_curcpu
nop
- call prom_set_trap_table
- sethi %hi(sparc64_ttable_tl0), %o0
-
- BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
- ba,pt %xcc, 2f
- nop
-
-1: /* Start using proper page size encodings in ctx register. */
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
- mov PRIMARY_CONTEXT, %g1
- sllx %g3, 32, %g3
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
- or %g3, %g2, %g3
- stxa %g3, [%g1] ASI_DMMU
- membar #Sync
-
-2:
+ /* Now we can turn interrupts back on. */
rdpr %pstate, %o1
or %o1, PSTATE_IE, %o1
wrpr %o1, 0, %pstate
+ wrpr %g0, 0x0, %pil
+
+ ret
+ restore
+
+ .globl setup_tba
+setup_tba: /* i0 = is_starfire */
+ save %sp, -192, %sp
+
+ /* The boot processor is the only cpu which invokes this
+ * routine, the other cpus set things up via trampoline.S.
+ * So save the OBP trap table address here.
+ */
+ rdpr %tba, %g7
+ sethi %hi(prom_tba), %o1
+ or %o1, %lo(prom_tba), %o1
+ stx %g7, [%o1]
+
+ call setup_trap_table
+ nop
ret
restore
+sparc64_boot_end:
+
+#include "systbls.S"
+#include "ktlb.S"
+#include "etrap.S"
+#include "rtrap.S"
+#include "winfixup.S"
+#include "entry.S"
/*
- * The following skips make sure the trap table in ttable.S is aligned
+ * The following skip makes sure the trap table in ttable.S is aligned
* on a 32K boundary as required by the v9 specs for TBA register.
*/
-sparc64_boot_end:
- .skip 0x2000 + _start - sparc64_boot_end
-bootup_user_stack_end:
- .skip 0x2000
+1:
+ .skip 0x4000 + _start - 1b
#ifdef CONFIG_SBUS
/* This is just a hack to fool make depend config.h discovering
@@ -755,20 +560,6 @@ bootup_user_stack_end:
! 0x0000000000408000
#include "ttable.S"
-#include "systbls.S"
-
- .align 1024
- .globl swapper_pg_dir
-swapper_pg_dir:
- .word 0
-
-#include "etrap.S"
-#include "rtrap.S"
-#include "winfixup.S"
-#include "entry.S"
-
- /* This is just anal retentiveness on my part... */
- .align 16384
.data
.align 8
@@ -776,8 +567,11 @@ swapper_pg_dir:
prom_tba: .xword 0
tlb_type: .word 0 /* Must NOT end up in BSS */
.section ".fixup",#alloc,#execinstr
- .globl __ret_efault
+
+ .globl __ret_efault, __retl_efault
__ret_efault:
ret
restore %g0, -EFAULT, %o0
-
+__retl_efault:
+ retl
+ mov -EFAULT, %o0
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index c9b69167632a..233526ba3abe 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -27,6 +27,7 @@
#include <asm/atomic.h>
#include <asm/system.h>
#include <asm/irq.h>
+#include <asm/io.h>
#include <asm/sbus.h>
#include <asm/iommu.h>
#include <asm/upa.h>
diff --git a/arch/sparc64/kernel/itlb_base.S b/arch/sparc64/kernel/itlb_base.S
index b5e32dfa4fbc..4951ff8f6877 100644
--- a/arch/sparc64/kernel/itlb_base.S
+++ b/arch/sparc64/kernel/itlb_base.S
@@ -15,14 +15,12 @@
*/
#define CREATE_VPTE_OFFSET1(r1, r2) \
srax r1, 10, r2
-#define CREATE_VPTE_OFFSET2(r1, r2)
-#define CREATE_VPTE_NOP nop
+#define CREATE_VPTE_OFFSET2(r1, r2) nop
#else /* PAGE_SHIFT */
#define CREATE_VPTE_OFFSET1(r1, r2) \
srax r1, PAGE_SHIFT, r2
#define CREATE_VPTE_OFFSET2(r1, r2) \
sllx r2, 3, r2
-#define CREATE_VPTE_NOP
#endif /* PAGE_SHIFT */
@@ -36,6 +34,7 @@
*/
/* ITLB ** ICACHE line 1: Quick user TLB misses */
+ mov TLB_SFSR, %g1
ldxa [%g1 + %g1] ASI_IMMU, %g4 ! Get TAG_ACCESS
CREATE_VPTE_OFFSET1(%g4, %g6) ! Create VPTE offset
CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
@@ -43,41 +42,38 @@
1: brgez,pn %g5, 3f ! Not valid, branch out
sethi %hi(_PAGE_EXEC), %g4 ! Delay-slot
andcc %g5, %g4, %g0 ! Executable?
+
+/* ITLB ** ICACHE line 2: Real faults */
be,pn %xcc, 3f ! Nope, branch.
nop ! Delay-slot
2: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB
retry ! Trap return
-3: rdpr %pstate, %g4 ! Move into alternate globals
-
-/* ITLB ** ICACHE line 2: Real faults */
+3: rdpr %pstate, %g4 ! Move into alt-globals
wrpr %g4, PSTATE_AG|PSTATE_MG, %pstate
rdpr %tpc, %g5 ! And load faulting VA
mov FAULT_CODE_ITLB, %g4 ! It was read from ITLB
-sparc64_realfault_common: ! Called by TL0 dtlb_miss too
+
+/* ITLB ** ICACHE line 3: Finish faults */
+sparc64_realfault_common: ! Called by dtlb_miss
stb %g4, [%g6 + TI_FAULT_CODE]
stx %g5, [%g6 + TI_FAULT_ADDR]
ba,pt %xcc, etrap ! Save state
1: rd %pc, %g7 ! ...
- nop
-
-/* ITLB ** ICACHE line 3: Finish faults + window fixups */
call do_sparc64_fault ! Call fault handler
add %sp, PTREGS_OFF, %o0! Compute pt_regs arg
ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
nop
+
+/* ITLB ** ICACHE line 4: Window fixups */
winfix_trampoline:
rdpr %tpc, %g3 ! Prepare winfixup TNPC
- or %g3, 0x7c, %g3 ! Compute offset to branch
+ or %g3, 0x7c, %g3 ! Compute branch offset
wrpr %g3, %tnpc ! Write it into TNPC
done ! Do it to it
-
-/* ITLB ** ICACHE line 4: Unused... */
nop
nop
nop
nop
- CREATE_VPTE_NOP
#undef CREATE_VPTE_OFFSET1
#undef CREATE_VPTE_OFFSET2
-#undef CREATE_VPTE_NOP
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
new file mode 100644
index 000000000000..d9244d3c9f73
--- /dev/null
+++ b/arch/sparc64/kernel/ktlb.S
@@ -0,0 +1,194 @@
+/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
+ *
+ * Copyright (C) 1995, 1997, 2005 David S. Miller <davem@davemloft.net>
+ * Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
+ * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
+ * Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
+*/
+
+#include <linux/config.h>
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+ .text
+ .align 32
+
+/*
+ * On a second level vpte miss, check whether the original fault is to the OBP
+ * range (note that this is only possible for instruction miss, data misses to
+ * obp range do not use vpte). If so, go back directly to the faulting address.
+ * This is because we want to read the tpc, otherwise we have no way of knowing
+ * the 8k aligned faulting address if we are using >8k kernel pagesize. This
+ * also ensures no vpte range addresses are dropped into tlb while obp is
+ * executing (see inherit_locked_prom_mappings() rant).
+ */
+sparc64_vpte_nucleus:
+ /* Note that kvmap below has verified that the address is
+ * in the range MODULES_VADDR --> VMALLOC_END already. So
+ * here we need only check if it is an OBP address or not.
+ */
+ sethi %hi(LOW_OBP_ADDRESS), %g5
+ cmp %g4, %g5
+ blu,pn %xcc, kern_vpte
+ mov 0x1, %g5
+ sllx %g5, 32, %g5
+ cmp %g4, %g5
+ blu,pn %xcc, vpte_insn_obp
+ nop
+
+ /* These two instructions are patched by paginig_init(). */
+kern_vpte:
+ sethi %hi(swapper_pgd_zero), %g5
+ lduw [%g5 + %lo(swapper_pgd_zero)], %g5
+
+ /* With kernel PGD in %g5, branch back into dtlb_backend. */
+ ba,pt %xcc, sparc64_kpte_continue
+ andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
+
+vpte_noent:
+ /* Restore previous TAG_ACCESS, %g5 is zero, and we will
+ * skip over the trap instruction so that the top level
+ * TLB miss handler will thing this %g5 value is just an
+ * invalid PTE, thus branching to full fault processing.
+ */
+ mov TLB_SFSR, %g1
+ stxa %g4, [%g1 + %g1] ASI_DMMU
+ done
+
+vpte_insn_obp:
+ /* Behave as if we are at TL0. */
+ wrpr %g0, 1, %tl
+ rdpr %tpc, %g4 /* Find original faulting iaddr */
+ srlx %g4, 13, %g4 /* Throw out context bits */
+ sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
+
+ /* Restore previous TAG_ACCESS. */
+ mov TLB_SFSR, %g1
+ stxa %g4, [%g1 + %g1] ASI_IMMU
+
+ sethi %hi(prom_trans), %g5
+ or %g5, %lo(prom_trans), %g5
+
+1: ldx [%g5 + 0x00], %g6 ! base
+ brz,a,pn %g6, longpath ! no more entries, fail
+ mov TLB_SFSR, %g1 ! and restore %g1
+ ldx [%g5 + 0x08], %g1 ! len
+ add %g6, %g1, %g1 ! end
+ cmp %g6, %g4
+ bgu,pt %xcc, 2f
+ cmp %g4, %g1
+ bgeu,pt %xcc, 2f
+ ldx [%g5 + 0x10], %g1 ! PTE
+
+ /* TLB load, restore %g1, and return from trap. */
+ sub %g4, %g6, %g6
+ add %g1, %g6, %g5
+ mov TLB_SFSR, %g1
+ stxa %g5, [%g0] ASI_ITLB_DATA_IN
+ retry
+
+2: ba,pt %xcc, 1b
+ add %g5, (3 * 8), %g5 ! next entry
+
+kvmap_do_obp:
+ sethi %hi(prom_trans), %g5
+ or %g5, %lo(prom_trans), %g5
+ srlx %g4, 13, %g4
+ sllx %g4, 13, %g4
+
+1: ldx [%g5 + 0x00], %g6 ! base
+ brz,a,pn %g6, longpath ! no more entries, fail
+ mov TLB_SFSR, %g1 ! and restore %g1
+ ldx [%g5 + 0x08], %g1 ! len
+ add %g6, %g1, %g1 ! end
+ cmp %g6, %g4
+ bgu,pt %xcc, 2f
+ cmp %g4, %g1
+ bgeu,pt %xcc, 2f
+ ldx [%g5 + 0x10], %g1 ! PTE
+
+ /* TLB load, restore %g1, and return from trap. */
+ sub %g4, %g6, %g6
+ add %g1, %g6, %g5
+ mov TLB_SFSR, %g1
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN
+ retry
+
+2: ba,pt %xcc, 1b
+ add %g5, (3 * 8), %g5 ! next entry
+
+/*
+ * On a first level data miss, check whether this is to the OBP range (note
+ * that such accesses can be made by prom, as well as by kernel using
+ * prom_getproperty on "address"), and if so, do not use vpte access ...
+ * rather, use information saved during inherit_prom_mappings() using 8k
+ * pagesize.
+ */
+ .align 32
+kvmap:
+ brgez,pn %g4, kvmap_nonlinear
+ nop
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ .globl kvmap_linear_patch
+kvmap_linear_patch:
+#endif
+ ba,pt %xcc, kvmap_load
+ xor %g2, %g4, %g5
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ sethi %hi(swapper_pg_dir), %g5
+ or %g5, %lo(swapper_pg_dir), %g5
+ sllx %g4, 64 - (PGDIR_SHIFT + PGDIR_BITS), %g6
+ srlx %g6, 64 - PAGE_SHIFT, %g6
+ andn %g6, 0x3, %g6
+ lduw [%g5 + %g6], %g5
+ brz,pn %g5, longpath
+ sllx %g4, 64 - (PMD_SHIFT + PMD_BITS), %g6
+ srlx %g6, 64 - PAGE_SHIFT, %g6
+ sllx %g5, 11, %g5
+ andn %g6, 0x3, %g6
+ lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
+ brz,pn %g5, longpath
+ sllx %g4, 64 - PMD_SHIFT, %g6
+ srlx %g6, 64 - PAGE_SHIFT, %g6
+ sllx %g5, 11, %g5
+ andn %g6, 0x7, %g6
+ ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
+ brz,pn %g5, longpath
+ nop
+ ba,a,pt %xcc, kvmap_load
+#endif
+
+kvmap_nonlinear:
+ sethi %hi(MODULES_VADDR), %g5
+ cmp %g4, %g5
+ blu,pn %xcc, longpath
+ mov (VMALLOC_END >> 24), %g5
+ sllx %g5, 24, %g5
+ cmp %g4, %g5
+ bgeu,pn %xcc, longpath
+ nop
+
+kvmap_check_obp:
+ sethi %hi(LOW_OBP_ADDRESS), %g5
+ cmp %g4, %g5
+ blu,pn %xcc, kvmap_vmalloc_addr
+ mov 0x1, %g5
+ sllx %g5, 32, %g5
+ cmp %g4, %g5
+ blu,pn %xcc, kvmap_do_obp
+ nop
+
+kvmap_vmalloc_addr:
+ /* If we get here, a vmalloc addr was accessed, load kernel VPTE. */
+ ldxa [%g3 + %g6] ASI_N, %g5
+ brgez,pn %g5, longpath
+ nop
+
+kvmap_load:
+ /* PTE is valid, load into TLB and return from trap. */
+ stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
+ retry
diff --git a/arch/sparc64/kernel/pci_iommu.c b/arch/sparc64/kernel/pci_iommu.c
index 425c60cfea19..a11910be1013 100644
--- a/arch/sparc64/kernel/pci_iommu.c
+++ b/arch/sparc64/kernel/pci_iommu.c
@@ -49,12 +49,6 @@ static void __iommu_flushall(struct pci_iommu *iommu)
/* Ensure completion of previous PIO writes. */
(void) pci_iommu_read(iommu->write_complete_reg);
-
- /* Now update everyone's flush point. */
- for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
- iommu->alloc_info[entry].flush =
- iommu->alloc_info[entry].next;
- }
}
#define IOPTE_CONSISTENT(CTX) \
@@ -80,120 +74,117 @@ static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
iopte_val(*iopte) = val;
}
-void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize)
+/* Based largely upon the ppc64 iommu allocator. */
+static long pci_arena_alloc(struct pci_iommu *iommu, unsigned long npages)
{
- int i;
-
- tsbsize /= sizeof(iopte_t);
-
- for (i = 0; i < tsbsize; i++)
- iopte_make_dummy(iommu, &iommu->page_table[i]);
-}
-
-static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
-{
- iopte_t *iopte, *limit, *first;
- unsigned long cnum, ent, flush_point;
-
- cnum = 0;
- while ((1UL << cnum) < npages)
- cnum++;
- iopte = (iommu->page_table +
- (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
-
- if (cnum == 0)
- limit = (iommu->page_table +
- iommu->lowest_consistent_map);
- else
- limit = (iopte +
- (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
-
- iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
- flush_point = iommu->alloc_info[cnum].flush;
-
- first = iopte;
- for (;;) {
- if (IOPTE_IS_DUMMY(iommu, iopte)) {
- if ((iopte + (1 << cnum)) >= limit)
- ent = 0;
- else
- ent = ent + 1;
- iommu->alloc_info[cnum].next = ent;
- if (ent == flush_point)
- __iommu_flushall(iommu);
- break;
+ struct pci_iommu_arena *arena = &iommu->arena;
+ unsigned long n, i, start, end, limit;
+ int pass;
+
+ limit = arena->limit;
+ start = arena->hint;
+ pass = 0;
+
+again:
+ n = find_next_zero_bit(arena->map, limit, start);
+ end = n + npages;
+ if (unlikely(end >= limit)) {
+ if (likely(pass < 1)) {
+ limit = start;
+ start = 0;
+ __iommu_flushall(iommu);
+ pass++;
+ goto again;
+ } else {
+ /* Scanned the whole thing, give up. */
+ return -1;
}
- iopte += (1 << cnum);
- ent++;
- if (iopte >= limit) {
- iopte = (iommu->page_table +
- (cnum <<
- (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
- ent = 0;
+ }
+
+ for (i = n; i < end; i++) {
+ if (test_bit(i, arena->map)) {
+ start = i + 1;
+ goto again;
}
- if (ent == flush_point)
- __iommu_flushall(iommu);
- if (iopte == first)
- goto bad;
}
- /* I've got your streaming cluster right here buddy boy... */
- return iopte;
+ for (i = n; i < end; i++)
+ __set_bit(i, arena->map);
-bad:
- printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
- npages);
- return NULL;
+ arena->hint = end;
+
+ return n;
}
-static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
- unsigned long npages, unsigned long ctx)
+static void pci_arena_free(struct pci_iommu_arena *arena, unsigned long base, unsigned long npages)
{
- unsigned long cnum, ent;
+ unsigned long i;
- cnum = 0;
- while ((1UL << cnum) < npages)
- cnum++;
+ for (i = base; i < (base + npages); i++)
+ __clear_bit(i, arena->map);
+}
- ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
- >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
+void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize, u32 dma_offset, u32 dma_addr_mask)
+{
+ unsigned long i, tsbbase, order, sz, num_tsb_entries;
+
+ num_tsb_entries = tsbsize / sizeof(iopte_t);
+
+ /* Setup initial software IOMMU state. */
+ spin_lock_init(&iommu->lock);
+ iommu->ctx_lowest_free = 1;
+ iommu->page_table_map_base = dma_offset;
+ iommu->dma_addr_mask = dma_addr_mask;
+
+ /* Allocate and initialize the free area map. */
+ sz = num_tsb_entries / 8;
+ sz = (sz + 7UL) & ~7UL;
+ iommu->arena.map = kmalloc(sz, GFP_KERNEL);
+ if (!iommu->arena.map) {
+ prom_printf("PCI_IOMMU: Error, kmalloc(arena.map) failed.\n");
+ prom_halt();
+ }
+ memset(iommu->arena.map, 0, sz);
+ iommu->arena.limit = num_tsb_entries;
- /* If the global flush might not have caught this entry,
- * adjust the flush point such that we will flush before
- * ever trying to reuse it.
+ /* Allocate and initialize the dummy page which we
+ * set inactive IO PTEs to point to.
*/
-#define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
- if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
- iommu->alloc_info[cnum].flush = ent;
-#undef between
+ iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
+ if (!iommu->dummy_page) {
+ prom_printf("PCI_IOMMU: Error, gfp(dummy_page) failed.\n");
+ prom_halt();
+ }
+ memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
+ iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
+
+ /* Now allocate and setup the IOMMU page table itself. */
+ order = get_order(tsbsize);
+ tsbbase = __get_free_pages(GFP_KERNEL, order);
+ if (!tsbbase) {
+ prom_printf("PCI_IOMMU: Error, gfp(tsb) failed.\n");
+ prom_halt();
+ }
+ iommu->page_table = (iopte_t *)tsbbase;
+
+ for (i = 0; i < num_tsb_entries; i++)
+ iopte_make_dummy(iommu, &iommu->page_table[i]);
}
-/* We allocate consistent mappings from the end of cluster zero. */
-static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
+static inline iopte_t *alloc_npages(struct pci_iommu *iommu, unsigned long npages)
{
- iopte_t *iopte;
+ long entry;
- iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
- while (iopte > iommu->page_table) {
- iopte--;
- if (IOPTE_IS_DUMMY(iommu, iopte)) {
- unsigned long tmp = npages;
+ entry = pci_arena_alloc(iommu, npages);
+ if (unlikely(entry < 0))
+ return NULL;
- while (--tmp) {
- iopte--;
- if (!IOPTE_IS_DUMMY(iommu, iopte))
- break;
- }
- if (tmp == 0) {
- u32 entry = (iopte - iommu->page_table);
+ return iommu->page_table + entry;
+}
- if (entry < iommu->lowest_consistent_map)
- iommu->lowest_consistent_map = entry;
- return iopte;
- }
- }
- }
- return NULL;
+static inline void free_npages(struct pci_iommu *iommu, dma_addr_t base, unsigned long npages)
+{
+ pci_arena_free(&iommu->arena, base >> IO_PAGE_SHIFT, npages);
}
static int iommu_alloc_ctx(struct pci_iommu *iommu)
@@ -233,7 +224,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
struct pcidev_cookie *pcp;
struct pci_iommu *iommu;
iopte_t *iopte;
- unsigned long flags, order, first_page, ctx;
+ unsigned long flags, order, first_page;
void *ret;
int npages;
@@ -251,9 +242,10 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
iommu = pcp->pbm->iommu;
spin_lock_irqsave(&iommu->lock, flags);
- iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
- if (iopte == NULL) {
- spin_unlock_irqrestore(&iommu->lock, flags);
+ iopte = alloc_npages(iommu, size >> IO_PAGE_SHIFT);
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ if (unlikely(iopte == NULL)) {
free_pages(first_page, order);
return NULL;
}
@@ -262,31 +254,15 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
ret = (void *) first_page;
npages = size >> IO_PAGE_SHIFT;
- ctx = 0;
- if (iommu->iommu_ctxflush)
- ctx = iommu_alloc_ctx(iommu);
first_page = __pa(first_page);
while (npages--) {
- iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
+ iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
IOPTE_WRITE |
(first_page & IOPTE_PAGE));
iopte++;
first_page += IO_PAGE_SIZE;
}
- {
- int i;
- u32 daddr = *dma_addrp;
-
- npages = size >> IO_PAGE_SHIFT;
- for (i = 0; i < npages; i++) {
- pci_iommu_write(iommu->iommu_flush, daddr);
- daddr += IO_PAGE_SIZE;
- }
- }
-
- spin_unlock_irqrestore(&iommu->lock, flags);
-
return ret;
}
@@ -296,7 +272,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
struct pcidev_cookie *pcp;
struct pci_iommu *iommu;
iopte_t *iopte;
- unsigned long flags, order, npages, i, ctx;
+ unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
pcp = pdev->sysdata;
@@ -306,46 +282,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
spin_lock_irqsave(&iommu->lock, flags);
- if ((iopte - iommu->page_table) ==
- iommu->lowest_consistent_map) {
- iopte_t *walk = iopte + npages;
- iopte_t *limit;
-
- limit = (iommu->page_table +
- (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
- while (walk < limit) {
- if (!IOPTE_IS_DUMMY(iommu, walk))
- break;
- walk++;
- }
- iommu->lowest_consistent_map =
- (walk - iommu->page_table);
- }
-
- /* Data for consistent mappings cannot enter the streaming
- * buffers, so we only need to update the TSB. We flush
- * the IOMMU here as well to prevent conflicts with the
- * streaming mapping deferred tlb flush scheme.
- */
-
- ctx = 0;
- if (iommu->iommu_ctxflush)
- ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
-
- for (i = 0; i < npages; i++, iopte++)
- iopte_make_dummy(iommu, iopte);
-
- if (iommu->iommu_ctxflush) {
- pci_iommu_write(iommu->iommu_ctxflush, ctx);
- } else {
- for (i = 0; i < npages; i++) {
- u32 daddr = dvma + (i << IO_PAGE_SHIFT);
-
- pci_iommu_write(iommu->iommu_flush, daddr);
- }
- }
-
- iommu_free_ctx(iommu, ctx);
+ free_npages(iommu, dvma, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
@@ -372,25 +309,27 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
iommu = pcp->pbm->iommu;
strbuf = &pcp->pbm->stc;
- if (direction == PCI_DMA_NONE)
- BUG();
+ if (unlikely(direction == PCI_DMA_NONE))
+ goto bad_no_ctx;
oaddr = (unsigned long)ptr;
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
spin_lock_irqsave(&iommu->lock, flags);
+ base = alloc_npages(iommu, npages);
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = iommu_alloc_ctx(iommu);
+ spin_unlock_irqrestore(&iommu->lock, flags);
- base = alloc_streaming_cluster(iommu, npages);
- if (base == NULL)
+ if (unlikely(!base))
goto bad;
+
bus_addr = (iommu->page_table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
- ctx = 0;
- if (iommu->iommu_ctxflush)
- ctx = iommu_alloc_ctx(iommu);
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
@@ -401,12 +340,13 @@ dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direct
for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
iopte_val(*base) = iopte_protection | base_paddr;
- spin_unlock_irqrestore(&iommu->lock, flags);
-
return ret;
bad:
- spin_unlock_irqrestore(&iommu->lock, flags);
+ iommu_free_ctx(iommu, ctx);
+bad_no_ctx:
+ if (printk_ratelimit())
+ WARN_ON(1);
return PCI_DMA_ERROR_CODE;
}
@@ -481,10 +421,13 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
struct pci_iommu *iommu;
struct pci_strbuf *strbuf;
iopte_t *base;
- unsigned long flags, npages, ctx;
+ unsigned long flags, npages, ctx, i;
- if (direction == PCI_DMA_NONE)
- BUG();
+ if (unlikely(direction == PCI_DMA_NONE)) {
+ if (printk_ratelimit())
+ WARN_ON(1);
+ return;
+ }
pcp = pdev->sysdata;
iommu = pcp->pbm->iommu;
@@ -510,13 +453,14 @@ void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int
/* Step 1: Kick data out of streaming buffers if necessary. */
if (strbuf->strbuf_enabled)
- pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
+ pci_strbuf_flush(strbuf, iommu, bus_addr, ctx,
+ npages, direction);
- /* Step 2: Clear out first TSB entry. */
- iopte_make_dummy(iommu, base);
+ /* Step 2: Clear out TSB entries. */
+ for (i = 0; i < npages; i++)
+ iopte_make_dummy(iommu, base + i);
- free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
- npages, ctx);
+ free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
iommu_free_ctx(iommu, ctx);
@@ -621,6 +565,8 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
pci_map_single(pdev,
(page_address(sglist->page) + sglist->offset),
sglist->length, direction);
+ if (unlikely(sglist->dma_address == PCI_DMA_ERROR_CODE))
+ return 0;
sglist->dma_length = sglist->length;
return 1;
}
@@ -629,21 +575,29 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
iommu = pcp->pbm->iommu;
strbuf = &pcp->pbm->stc;
- if (direction == PCI_DMA_NONE)
- BUG();
+ if (unlikely(direction == PCI_DMA_NONE))
+ goto bad_no_ctx;
/* Step 1: Prepare scatter list. */
npages = prepare_sg(sglist, nelems);
- /* Step 2: Allocate a cluster. */
+ /* Step 2: Allocate a cluster and context, if necessary. */
spin_lock_irqsave(&iommu->lock, flags);
- base = alloc_streaming_cluster(iommu, npages);
+ base = alloc_npages(iommu, npages);
+ ctx = 0;
+ if (iommu->iommu_ctxflush)
+ ctx = iommu_alloc_ctx(iommu);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
if (base == NULL)
goto bad;
- dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
+
+ dma_base = iommu->page_table_map_base +
+ ((base - iommu->page_table) << IO_PAGE_SHIFT);
/* Step 3: Normalize DMA addresses. */
used = nelems;
@@ -656,30 +610,28 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
}
used = nelems - used;
- /* Step 4: Choose a context if necessary. */
- ctx = 0;
- if (iommu->iommu_ctxflush)
- ctx = iommu_alloc_ctx(iommu);
-
- /* Step 5: Create the mappings. */
+ /* Step 4: Create the mappings. */
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
iopte_protection = IOPTE_CONSISTENT(ctx);
if (direction != PCI_DMA_TODEVICE)
iopte_protection |= IOPTE_WRITE;
- fill_sg (base, sglist, used, nelems, iopte_protection);
+
+ fill_sg(base, sglist, used, nelems, iopte_protection);
+
#ifdef VERIFY_SG
verify_sglist(sglist, nelems, base, npages);
#endif
- spin_unlock_irqrestore(&iommu->lock, flags);
-
return used;
bad:
- spin_unlock_irqrestore(&iommu->lock, flags);
- return PCI_DMA_ERROR_CODE;
+ iommu_free_ctx(iommu, ctx);
+bad_no_ctx:
+ if (printk_ratelimit())
+ WARN_ON(1);
+ return 0;
}
/* Unmap a set of streaming mode DMA translations. */
@@ -692,8 +644,10 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
unsigned long flags, ctx, i, npages;
u32 bus_addr;
- if (direction == PCI_DMA_NONE)
- BUG();
+ if (unlikely(direction == PCI_DMA_NONE)) {
+ if (printk_ratelimit())
+ WARN_ON(1);
+ }
pcp = pdev->sysdata;
iommu = pcp->pbm->iommu;
@@ -705,7 +659,8 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
if (sglist[i].dma_length == 0)
break;
i--;
- npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
+ npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
+ bus_addr) >> IO_PAGE_SHIFT;
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
@@ -726,11 +681,11 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
if (strbuf->strbuf_enabled)
pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
- /* Step 2: Clear out first TSB entry. */
- iopte_make_dummy(iommu, base);
+ /* Step 2: Clear out the TSB entries. */
+ for (i = 0; i < npages; i++)
+ iopte_make_dummy(iommu, base + i);
- free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
- npages, ctx);
+ free_npages(iommu, bus_addr - iommu->page_table_map_base, npages);
iommu_free_ctx(iommu, ctx);
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 6ed1ef25e0ac..c03ed5f49d31 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1207,13 +1207,9 @@ static void psycho_scan_bus(struct pci_controller_info *p)
static void psycho_iommu_init(struct pci_controller_info *p)
{
struct pci_iommu *iommu = p->pbm_A.iommu;
- unsigned long tsbbase, i;
+ unsigned long i;
u64 control;
- /* Setup initial software IOMMU state. */
- spin_lock_init(&iommu->lock);
- iommu->ctx_lowest_free = 1;
-
/* Register addresses. */
iommu->iommu_control = p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL;
iommu->iommu_tsbbase = p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE;
@@ -1240,40 +1236,10 @@ static void psycho_iommu_init(struct pci_controller_info *p)
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
+ pci_iommu_table_init(iommu, IO_TSB_SIZE, 0xc0000000, 0xffffffff);
- iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
- if (!iommu->dummy_page) {
- prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
- prom_halt();
- }
- memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
- iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
-
- /* Using assumed page size 8K with 128K entries we need 1MB iommu page
- * table (128K ioptes * 8 bytes per iopte). This is
- * page order 7 on UltraSparc.
- */
- tsbbase = __get_free_pages(GFP_KERNEL, get_order(IO_TSB_SIZE));
- if (!tsbbase) {
- prom_printf("PSYCHO_IOMMU: Error, gfp(tsb) failed.\n");
- prom_halt();
- }
- iommu->page_table = (iopte_t *)tsbbase;
- iommu->page_table_sz_bits = 17;
- iommu->page_table_map_base = 0xc0000000;
- iommu->dma_addr_mask = 0xffffffff;
- pci_iommu_table_init(iommu, IO_TSB_SIZE);
-
- /* We start with no consistent mappings. */
- iommu->lowest_consistent_map =
- 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
-
- for (i = 0; i < PBM_NCLUSTERS; i++) {
- iommu->alloc_info[i].flush = 0;
- iommu->alloc_info[i].next = 0;
- }
-
- psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE, __pa(tsbbase));
+ psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_TSBBASE,
+ __pa(iommu->page_table));
control = psycho_read(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL);
control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
@@ -1281,7 +1247,7 @@ static void psycho_iommu_init(struct pci_controller_info *p)
psycho_write(p->pbm_A.controller_regs + PSYCHO_IOMMU_CONTROL, control);
/* If necessary, hook us up for starfire IRQ translations. */
- if(this_is_starfire)
+ if (this_is_starfire)
p->starfire_cookie = starfire_hookup(p->pbm_A.portid);
else
p->starfire_cookie = NULL;
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 0ee6bd5b9ac6..da8e1364194f 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -1267,13 +1267,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
u32 dma_mask)
{
struct pci_iommu *iommu = p->pbm_A.iommu;
- unsigned long tsbbase, i, order;
+ unsigned long i;
u64 control;
- /* Setup initial software IOMMU state. */
- spin_lock_init(&iommu->lock);
- iommu->ctx_lowest_free = 1;
-
/* Register addresses. */
iommu->iommu_control = p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL;
iommu->iommu_tsbbase = p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE;
@@ -1295,26 +1291,10 @@ static void sabre_iommu_init(struct pci_controller_info *p,
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
+ pci_iommu_table_init(iommu, tsbsize * 1024 * 8, dvma_offset, dma_mask);
- iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
- if (!iommu->dummy_page) {
- prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
- prom_halt();
- }
- memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
- iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
-
- tsbbase = __get_free_pages(GFP_KERNEL, order = get_order(tsbsize * 1024 * 8));
- if (!tsbbase) {
- prom_printf("SABRE_IOMMU: Error, gfp(tsb) failed.\n");
- prom_halt();
- }
- iommu->page_table = (iopte_t *)tsbbase;
- iommu->page_table_map_base = dvma_offset;
- iommu->dma_addr_mask = dma_mask;
- pci_iommu_table_init(iommu, PAGE_SIZE << order);
-
- sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE, __pa(tsbbase));
+ sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_TSBBASE,
+ __pa(iommu->page_table));
control = sabre_read(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL);
control &= ~(SABRE_IOMMUCTRL_TSBSZ | SABRE_IOMMUCTRL_TBWSZ);
@@ -1322,11 +1302,9 @@ static void sabre_iommu_init(struct pci_controller_info *p,
switch(tsbsize) {
case 64:
control |= SABRE_IOMMU_TSBSZ_64K;
- iommu->page_table_sz_bits = 16;
break;
case 128:
control |= SABRE_IOMMU_TSBSZ_128K;
- iommu->page_table_sz_bits = 17;
break;
default:
prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
@@ -1334,15 +1312,6 @@ static void sabre_iommu_init(struct pci_controller_info *p,
break;
}
sabre_write(p->pbm_A.controller_regs + SABRE_IOMMU_CONTROL, control);
-
- /* We start with no consistent mappings. */
- iommu->lowest_consistent_map =
- 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
-
- for (i = 0; i < PBM_NCLUSTERS; i++) {
- iommu->alloc_info[i].flush = 0;
- iommu->alloc_info[i].next = 0;
- }
}
static void pbm_register_toplevel_resources(struct pci_controller_info *p,
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index 331382e1a75d..d8c4e0919b4e 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -330,7 +330,7 @@ static int schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2)
{
unsigned long sync_reg = (unsigned long) _arg2;
- u64 mask = 1 << (__irq_ino(__irq(bucket)) & IMAP_INO);
+ u64 mask = 1UL << (__irq_ino(__irq(bucket)) & IMAP_INO);
u64 val;
int limit;
@@ -1765,7 +1765,7 @@ static void schizo_pbm_strbuf_init(struct pci_pbm_info *pbm)
static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
{
struct pci_iommu *iommu = pbm->iommu;
- unsigned long tsbbase, i, tagbase, database, order;
+ unsigned long i, tagbase, database;
u32 vdma[2], dma_mask;
u64 control;
int err, tsbsize;
@@ -1800,10 +1800,6 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
prom_halt();
};
- /* Setup initial software IOMMU state. */
- spin_lock_init(&iommu->lock);
- iommu->ctx_lowest_free = 1;
-
/* Register addresses, SCHIZO has iommu ctx flushing. */
iommu->iommu_control = pbm->pbm_regs + SCHIZO_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->pbm_regs + SCHIZO_IOMMU_TSBBASE;
@@ -1832,56 +1828,9 @@ static void schizo_pbm_iommu_init(struct pci_pbm_info *pbm)
/* Leave diag mode enabled for full-flushing done
* in pci_iommu.c
*/
+ pci_iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask);
- iommu->dummy_page = __get_free_pages(GFP_KERNEL, 0);
- if (!iommu->dummy_page) {
- prom_printf("PSYCHO_IOMMU: Error, gfp(dummy_page) failed.\n");
- prom_halt();
- }
- memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
- iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
-
- /* Using assumed page size 8K with 128K entries we need 1MB iommu page
- * table (128K ioptes * 8 bytes per iopte). This is
- * page order 7 on UltraSparc.
- */
- order = get_order(tsbsize * 8 * 1024);
- tsbbase = __get_free_pages(GFP_KERNEL, order);
- if (!tsbbase) {
- prom_printf("%s: Error, gfp(tsb) failed.\n", pbm->name);
- prom_halt();
- }
-
- iommu->page_table = (iopte_t *)tsbbase;
- iommu->page_table_map_base = vdma[0];
- iommu->dma_addr_mask = dma_mask;
- pci_iommu_table_init(iommu, PAGE_SIZE << order);
-
- switch (tsbsize) {
- case 64:
- iommu->page_table_sz_bits = 16;
- break;
-
- case 128:
- iommu->page_table_sz_bits = 17;
- break;
-
- default:
- prom_printf("iommu_init: Illegal TSB size %d\n", tsbsize);
- prom_halt();
- break;
- };
-
- /* We start with no consistent mappings. */
- iommu->lowest_consistent_map =
- 1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS);
-
- for (i = 0; i < PBM_NCLUSTERS; i++) {
- iommu->alloc_info[i].flush = 0;
- iommu->alloc_info[i].next = 0;
- }
-
- schizo_write(iommu->iommu_tsbbase, __pa(tsbbase));
+ schizo_write(iommu->iommu_tsbbase, __pa(iommu->page_table));
control = schizo_read(iommu->iommu_control);
control &= ~(SCHIZO_IOMMU_CTRL_TSBSZ | SCHIZO_IOMMU_CTRL_TBWSZ);
diff --git a/arch/sparc64/kernel/power.c b/arch/sparc64/kernel/power.c
index 946cee0257ea..9e8362ea3104 100644
--- a/arch/sparc64/kernel/power.c
+++ b/arch/sparc64/kernel/power.c
@@ -17,6 +17,7 @@
#include <asm/system.h>
#include <asm/ebus.h>
+#include <asm/isa.h>
#include <asm/auxio.h>
#include <linux/unistd.h>
@@ -100,46 +101,83 @@ again:
return 0;
}
-static int __init has_button_interrupt(struct linux_ebus_device *edev)
+static int __init has_button_interrupt(unsigned int irq, int prom_node)
{
- if (edev->irqs[0] == PCI_IRQ_NONE)
+ if (irq == PCI_IRQ_NONE)
return 0;
- if (!prom_node_has_property(edev->prom_node, "button"))
+ if (!prom_node_has_property(prom_node, "button"))
return 0;
return 1;
}
-void __init power_init(void)
+static int __init power_probe_ebus(struct resource **resp, unsigned int *irq_p, int *prom_node_p)
{
struct linux_ebus *ebus;
struct linux_ebus_device *edev;
+
+ for_each_ebus(ebus) {
+ for_each_ebusdev(edev, ebus) {
+ if (!strcmp(edev->prom_name, "power")) {
+ *resp = &edev->resource[0];
+ *irq_p = edev->irqs[0];
+ *prom_node_p = edev->prom_node;
+ return 0;
+ }
+ }
+ }
+ return -ENODEV;
+}
+
+static int __init power_probe_isa(struct resource **resp, unsigned int *irq_p, int *prom_node_p)
+{
+ struct sparc_isa_bridge *isa_bus;
+ struct sparc_isa_device *isa_dev;
+
+ for_each_isa(isa_bus) {
+ for_each_isadev(isa_dev, isa_bus) {
+ if (!strcmp(isa_dev->prom_name, "power")) {
+ *resp = &isa_dev->resource;
+ *irq_p = isa_dev->irq;
+ *prom_node_p = isa_dev->prom_node;
+ return 0;
+ }
+ }
+ }
+ return -ENODEV;
+}
+
+void __init power_init(void)
+{
+ struct resource *res = NULL;
+ unsigned int irq;
+ int prom_node;
static int invoked;
if (invoked)
return;
invoked = 1;
- for_each_ebus(ebus) {
- for_each_ebusdev(edev, ebus) {
- if (!strcmp(edev->prom_name, "power"))
- goto found;
- }
- }
+ if (!power_probe_ebus(&res, &irq, &prom_node))
+ goto found;
+
+ if (!power_probe_isa(&res, &irq, &prom_node))
+ goto found;
+
return;
found:
- power_reg = ioremap(edev->resource[0].start, 0x4);
+ power_reg = ioremap(res->start, 0x4);
printk("power: Control reg at %p ... ", power_reg);
poweroff_method = machine_halt; /* able to use the standard halt */
- if (has_button_interrupt(edev)) {
+ if (has_button_interrupt(irq, prom_node)) {
if (kernel_thread(powerd, NULL, CLONE_FS) < 0) {
printk("Failed to start power daemon.\n");
return;
}
printk("powerd running.\n");
- if (request_irq(edev->irqs[0],
+ if (request_irq(irq,
power_handler, SA_SHIRQ, "power", NULL) < 0)
printk("power: Error, cannot register IRQ handler.\n");
} else {
diff --git a/arch/sparc64/kernel/ptrace.c b/arch/sparc64/kernel/ptrace.c
index 23ad839d113f..774ecbb8a031 100644
--- a/arch/sparc64/kernel/ptrace.c
+++ b/arch/sparc64/kernel/ptrace.c
@@ -30,6 +30,8 @@
#include <asm/psrcompat.h>
#include <asm/visasm.h>
#include <asm/spitfire.h>
+#include <asm/page.h>
+#include <asm/cpudata.h>
/* Returning from ptrace is a bit tricky because the syscall return
* low level code assumes any value returned which is negative and
@@ -128,20 +130,24 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
* is mapped to in the user's address space, we can skip the
* D-cache flush.
*/
- if ((uaddr ^ kaddr) & (1UL << 13)) {
+ if ((uaddr ^ (unsigned long) kaddr) & (1UL << 13)) {
unsigned long start = __pa(kaddr);
unsigned long end = start + len;
+ unsigned long dcache_line_size;
+
+ dcache_line_size = local_cpu_data().dcache_line_size;
if (tlb_type == spitfire) {
- for (; start < end; start += 32)
- spitfire_put_dcache_tag(va & 0x3fe0, 0x0);
+ for (; start < end; start += dcache_line_size)
+ spitfire_put_dcache_tag(start & 0x3fe0, 0x0);
} else {
- for (; start < end; start += 32)
+ start &= ~(dcache_line_size - 1);
+ for (; start < end; start += dcache_line_size)
__asm__ __volatile__(
"stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
- : "r" (va),
+ : "r" (start),
"i" (ASI_DCACHE_INVALIDATE));
}
}
@@ -149,8 +155,11 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
if (write && tlb_type == spitfire) {
unsigned long start = (unsigned long) kaddr;
unsigned long end = start + len;
+ unsigned long icache_line_size;
+
+ icache_line_size = local_cpu_data().icache_line_size;
- for (; start < end; start += 32)
+ for (; start < end; start += icache_line_size)
flushi(start);
}
}
diff --git a/arch/sparc64/kernel/rtrap.S b/arch/sparc64/kernel/rtrap.S
index fafd227735fa..090dcca00d2a 100644
--- a/arch/sparc64/kernel/rtrap.S
+++ b/arch/sparc64/kernel/rtrap.S
@@ -256,9 +256,8 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
brnz,pn %l3, kern_rtt
mov PRIMARY_CONTEXT, %l7
ldxa [%l7 + %l7] ASI_DMMU, %l0
-cplus_rtrap_insn_1:
- sethi %hi(0), %l1
- sllx %l1, 32, %l1
+ sethi %hi(sparc64_kern_pri_nuc_bits), %l1
+ ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
or %l0, %l1, %l0
stxa %l0, [%l7] ASI_DMMU
flush %g6
@@ -313,53 +312,36 @@ kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
wr %g1, FPRS_FEF, %fprs
ldx [%o1 + %o5], %g1
add %g6, TI_XFSR, %o1
- membar #StoreLoad | #LoadLoad
sll %o0, 8, %o2
add %g6, TI_FPREGS, %o3
brz,pn %l6, 1f
add %g6, TI_FPREGS+0x40, %o4
+ membar #Sync
ldda [%o3 + %o2] ASI_BLK_P, %f0
ldda [%o4 + %o2] ASI_BLK_P, %f16
+ membar #Sync
1: andcc %l2, FPRS_DU, %g0
be,pn %icc, 1f
wr %g1, 0, %gsr
add %o2, 0x80, %o2
+ membar #Sync
ldda [%o3 + %o2] ASI_BLK_P, %f32
ldda [%o4 + %o2] ASI_BLK_P, %f48
-
1: membar #Sync
ldx [%o1 + %o5], %fsr
2: stb %l5, [%g6 + TI_FPDEPTH]
ba,pt %xcc, rt_continue
nop
5: wr %g0, FPRS_FEF, %fprs
- membar #StoreLoad | #LoadLoad
sll %o0, 8, %o2
add %g6, TI_FPREGS+0x80, %o3
add %g6, TI_FPREGS+0xc0, %o4
+ membar #Sync
ldda [%o3 + %o2] ASI_BLK_P, %f32
ldda [%o4 + %o2] ASI_BLK_P, %f48
membar #Sync
wr %g0, FPRS_DU, %fprs
ba,pt %xcc, rt_continue
stb %l5, [%g6 + TI_FPDEPTH]
-
-cplus_rinsn_1:
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
-
- .globl cheetah_plus_patch_rtrap
-cheetah_plus_patch_rtrap:
- /* We configure the dTLB512_0 for 4MB pages and the
- * dTLB512_1 for 8K pages when in context zero.
- */
- sethi %hi(cplus_rinsn_1), %o0
- sethi %hi(cplus_rtrap_insn_1), %o2
- lduw [%o0 + %lo(cplus_rinsn_1)], %o1
- or %o2, %lo(cplus_rtrap_insn_1), %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index ddbed3341a23..c1f34237cdf2 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -187,17 +187,13 @@ int prom_callback(long *args)
}
if ((va >= KERNBASE) && (va < (KERNBASE + (4 * 1024 * 1024)))) {
- unsigned long kernel_pctx = 0;
-
- if (tlb_type == cheetah_plus)
- kernel_pctx |= (CTX_CHEETAH_PLUS_NUC |
- CTX_CHEETAH_PLUS_CTX0);
+ extern unsigned long sparc64_kern_pri_context;
/* Spitfire Errata #32 workaround */
__asm__ __volatile__("stxa %0, [%1] %2\n\t"
"flush %%g6"
: /* No outputs */
- : "r" (kernel_pctx),
+ : "r" (sparc64_kern_pri_context),
"r" (PRIMARY_CONTEXT),
"i" (ASI_DMMU));
@@ -464,8 +460,6 @@ static void __init boot_flags_init(char *commands)
}
}
-extern int prom_probe_memory(void);
-extern unsigned long start, end;
extern void panic_setup(char *, int *);
extern unsigned short root_flags;
@@ -492,13 +486,8 @@ void register_prom_callbacks(void)
"' linux-.soft2 to .soft2");
}
-extern void paging_init(void);
-
void __init setup_arch(char **cmdline_p)
{
- unsigned long highest_paddr;
- int i;
-
/* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs();
strcpy(saved_command_line, *cmdline_p);
@@ -517,40 +506,6 @@ void __init setup_arch(char **cmdline_p)
boot_flags_init(*cmdline_p);
idprom_init();
- (void) prom_probe_memory();
-
- /* In paging_init() we tip off this value to see if we need
- * to change init_mm.pgd to point to the real alias mapping.
- */
- phys_base = 0xffffffffffffffffUL;
- highest_paddr = 0UL;
- for (i = 0; sp_banks[i].num_bytes != 0; i++) {
- unsigned long top;
-
- if (sp_banks[i].base_addr < phys_base)
- phys_base = sp_banks[i].base_addr;
- top = sp_banks[i].base_addr +
- sp_banks[i].num_bytes;
- if (highest_paddr < top)
- highest_paddr = top;
- }
- pfn_base = phys_base >> PAGE_SHIFT;
-
- switch (tlb_type) {
- default:
- case spitfire:
- kern_base = spitfire_get_itlb_data(sparc64_highest_locked_tlbent());
- kern_base &= _PAGE_PADDR_SF;
- break;
-
- case cheetah:
- case cheetah_plus:
- kern_base = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
- kern_base &= _PAGE_PADDR;
- break;
- };
-
- kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
if (!root_flags)
root_mountflags &= ~MS_RDONLY;
@@ -625,6 +580,9 @@ extern void smp_info(struct seq_file *);
extern void smp_bogo(struct seq_file *);
extern void mmu_info(struct seq_file *);
+unsigned int dcache_parity_tl1_occurred;
+unsigned int icache_parity_tl1_occurred;
+
static int show_cpuinfo(struct seq_file *m, void *__unused)
{
seq_printf(m,
@@ -635,6 +593,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
"type\t\t: sun4u\n"
"ncpus probed\t: %ld\n"
"ncpus active\t: %ld\n"
+ "D$ parity tl1\t: %u\n"
+ "I$ parity tl1\t: %u\n"
#ifndef CONFIG_SMP
"Cpu0Bogo\t: %lu.%02lu\n"
"Cpu0ClkTck\t: %016lx\n"
@@ -647,7 +607,9 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
(prom_prev >> 8) & 0xff,
prom_prev & 0xff,
(long)num_possible_cpus(),
- (long)num_online_cpus()
+ (long)num_online_cpus(),
+ dcache_parity_tl1_occurred,
+ icache_parity_tl1_occurred
#ifndef CONFIG_SMP
, cpu_data(0).udelay_val/(500000/HZ),
(cpu_data(0).udelay_val/(5000/HZ)) % 100,
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index b4fc6a5462b2..b137fd63f5e1 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -93,6 +93,27 @@ void __init smp_store_cpu_info(int id)
cpu_data(id).pte_cache[1] = NULL;
cpu_data(id).pgd_cache = NULL;
cpu_data(id).idle_volume = 1;
+
+ cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
+ 16 * 1024);
+ cpu_data(id).dcache_line_size =
+ prom_getintdefault(cpu_node, "dcache-line-size", 32);
+ cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
+ 16 * 1024);
+ cpu_data(id).icache_line_size =
+ prom_getintdefault(cpu_node, "icache-line-size", 32);
+ cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
+ 4 * 1024 * 1024);
+ cpu_data(id).ecache_line_size =
+ prom_getintdefault(cpu_node, "ecache-line-size", 64);
+ printk("CPU[%d]: Caches "
+ "D[sz(%d):line_sz(%d)] "
+ "I[sz(%d):line_sz(%d)] "
+ "E[sz(%d):line_sz(%d)]\n",
+ id,
+ cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
+ cpu_data(id).icache_size, cpu_data(id).icache_line_size,
+ cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
}
static void smp_setup_percpu_timer(void);
@@ -980,13 +1001,6 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
preempt_enable();
}
-extern unsigned long xcall_promstop;
-
-void smp_promstop_others(void)
-{
- smp_cross_call(&xcall_promstop, 0, 0, 0);
-}
-
#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
#define prof_counter(__cpu) cpu_data(__cpu).counter
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index cbb5e59824e5..fb7a5370dbfc 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -163,9 +163,6 @@ EXPORT_SYMBOL(atomic64_add);
EXPORT_SYMBOL(atomic64_add_ret);
EXPORT_SYMBOL(atomic64_sub);
EXPORT_SYMBOL(atomic64_sub_ret);
-#ifdef CONFIG_SMP
-EXPORT_SYMBOL(_atomic_dec_and_lock);
-#endif
/* Atomic bit operations. */
EXPORT_SYMBOL(test_and_set_bit);
diff --git a/arch/sparc64/kernel/sys32.S b/arch/sparc64/kernel/sys32.S
index 5f9e4fae612e..9cd272ac3ac1 100644
--- a/arch/sparc64/kernel/sys32.S
+++ b/arch/sparc64/kernel/sys32.S
@@ -157,173 +157,199 @@ sys32_socketcall: /* %o0=call, %o1=args */
or %g2, %lo(__socketcall_table_begin), %g2
jmpl %g2 + %o0, %g0
nop
+do_einval:
+ retl
+ mov -EINVAL, %o0
- /* Each entry is exactly 32 bytes. */
.align 32
__socketcall_table_begin:
+
+ /* Each entry is exactly 32 bytes. */
do_sys_socket: /* sys_socket(int, int, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+1: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_socket), %g1
- ldswa [%o1 + 0x8] %asi, %o2
+2: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_socket), %g0
- ldswa [%o1 + 0x4] %asi, %o1
+3: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+4: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_bind), %g1
- ldswa [%o1 + 0x8] %asi, %o2
+5: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_bind), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+6: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+7: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_connect), %g1
- ldswa [%o1 + 0x8] %asi, %o2
+8: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_connect), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+9: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_listen: /* sys_listen(int, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+10: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_listen), %g1
jmpl %g1 + %lo(sys_listen), %g0
- ldswa [%o1 + 0x4] %asi, %o1
+11: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
nop
do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
- ldswa [%o1 + 0x0] %asi, %o0
+12: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_accept), %g1
- lduwa [%o1 + 0x8] %asi, %o2
+13: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_accept), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+14: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
- ldswa [%o1 + 0x0] %asi, %o0
+15: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getsockname), %g1
- lduwa [%o1 + 0x8] %asi, %o2
+16: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_getsockname), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+17: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
- ldswa [%o1 + 0x0] %asi, %o0
+18: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getpeername), %g1
- lduwa [%o1 + 0x8] %asi, %o2
+19: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_getpeername), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+20: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
- ldswa [%o1 + 0x0] %asi, %o0
+21: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_socketpair), %g1
- ldswa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
+22: ldswa [%o1 + 0x8] %asi, %o2
+23: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_socketpair), %g0
- ldswa [%o1 + 0x4] %asi, %o1
+24: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
- ldswa [%o1 + 0x0] %asi, %o0
+25: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_send), %g1
- lduwa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
+26: lduwa [%o1 + 0x8] %asi, %o2
+27: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_send), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+28: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
- ldswa [%o1 + 0x0] %asi, %o0
+29: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_recv), %g1
- lduwa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
+30: lduwa [%o1 + 0x8] %asi, %o2
+31: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_recv), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+32: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+33: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_sendto), %g1
- lduwa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
- lduwa [%o1 + 0x10] %asi, %o4
- ldswa [%o1 + 0x14] %asi, %o5
+34: lduwa [%o1 + 0x8] %asi, %o2
+35: lduwa [%o1 + 0xc] %asi, %o3
+36: lduwa [%o1 + 0x10] %asi, %o4
+37: ldswa [%o1 + 0x14] %asi, %o5
jmpl %g1 + %lo(sys_sendto), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+38: lduwa [%o1 + 0x4] %asi, %o1
do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
- ldswa [%o1 + 0x0] %asi, %o0
+39: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_recvfrom), %g1
- lduwa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
- lduwa [%o1 + 0x10] %asi, %o4
- lduwa [%o1 + 0x14] %asi, %o5
+40: lduwa [%o1 + 0x8] %asi, %o2
+41: lduwa [%o1 + 0xc] %asi, %o3
+42: lduwa [%o1 + 0x10] %asi, %o4
+43: lduwa [%o1 + 0x14] %asi, %o5
jmpl %g1 + %lo(sys_recvfrom), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+44: lduwa [%o1 + 0x4] %asi, %o1
do_sys_shutdown: /* sys_shutdown(int, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+45: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_shutdown), %g1
jmpl %g1 + %lo(sys_shutdown), %g0
- ldswa [%o1 + 0x4] %asi, %o1
+46: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
nop
do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */
- ldswa [%o1 + 0x0] %asi, %o0
+47: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_setsockopt), %g1
- ldswa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
- ldswa [%o1 + 0x10] %asi, %o4
+48: ldswa [%o1 + 0x8] %asi, %o2
+49: lduwa [%o1 + 0xc] %asi, %o3
+50: ldswa [%o1 + 0x10] %asi, %o4
jmpl %g1 + %lo(compat_sys_setsockopt), %g0
- ldswa [%o1 + 0x4] %asi, %o1
+51: ldswa [%o1 + 0x4] %asi, %o1
nop
do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */
- ldswa [%o1 + 0x0] %asi, %o0
+52: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_getsockopt), %g1
- ldswa [%o1 + 0x8] %asi, %o2
- lduwa [%o1 + 0xc] %asi, %o3
- lduwa [%o1 + 0x10] %asi, %o4
+53: ldswa [%o1 + 0x8] %asi, %o2
+54: lduwa [%o1 + 0xc] %asi, %o3
+55: lduwa [%o1 + 0x10] %asi, %o4
jmpl %g1 + %lo(compat_sys_getsockopt), %g0
- ldswa [%o1 + 0x4] %asi, %o1
+56: ldswa [%o1 + 0x4] %asi, %o1
nop
do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
- ldswa [%o1 + 0x0] %asi, %o0
+57: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_sendmsg), %g1
- lduwa [%o1 + 0x8] %asi, %o2
+58: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(compat_sys_sendmsg), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+59: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
- ldswa [%o1 + 0x0] %asi, %o0
+60: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_recvmsg), %g1
- lduwa [%o1 + 0x8] %asi, %o2
+61: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(compat_sys_recvmsg), %g0
- lduwa [%o1 + 0x4] %asi, %o1
+62: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
-__socketcall_table_end:
-
-do_einval:
- retl
- mov -EINVAL, %o0
-do_efault:
- retl
- mov -EFAULT, %o0
.section __ex_table
.align 4
- .word __socketcall_table_begin, 0, __socketcall_table_end, do_efault
+ .word 1b, __retl_efault, 2b, __retl_efault
+ .word 3b, __retl_efault, 4b, __retl_efault
+ .word 5b, __retl_efault, 6b, __retl_efault
+ .word 7b, __retl_efault, 8b, __retl_efault
+ .word 9b, __retl_efault, 10b, __retl_efault
+ .word 11b, __retl_efault, 12b, __retl_efault
+ .word 13b, __retl_efault, 14b, __retl_efault
+ .word 15b, __retl_efault, 16b, __retl_efault
+ .word 17b, __retl_efault, 18b, __retl_efault
+ .word 19b, __retl_efault, 20b, __retl_efault
+ .word 21b, __retl_efault, 22b, __retl_efault
+ .word 23b, __retl_efault, 24b, __retl_efault
+ .word 25b, __retl_efault, 26b, __retl_efault
+ .word 27b, __retl_efault, 28b, __retl_efault
+ .word 29b, __retl_efault, 30b, __retl_efault
+ .word 31b, __retl_efault, 32b, __retl_efault
+ .word 33b, __retl_efault, 34b, __retl_efault
+ .word 35b, __retl_efault, 36b, __retl_efault
+ .word 37b, __retl_efault, 38b, __retl_efault
+ .word 39b, __retl_efault, 40b, __retl_efault
+ .word 41b, __retl_efault, 42b, __retl_efault
+ .word 43b, __retl_efault, 44b, __retl_efault
+ .word 45b, __retl_efault, 46b, __retl_efault
+ .word 47b, __retl_efault, 48b, __retl_efault
+ .word 49b, __retl_efault, 50b, __retl_efault
+ .word 51b, __retl_efault, 52b, __retl_efault
+ .word 53b, __retl_efault, 54b, __retl_efault
+ .word 55b, __retl_efault, 56b, __retl_efault
+ .word 57b, __retl_efault, 58b, __retl_efault
+ .word 59b, __retl_efault, 60b, __retl_efault
+ .word 61b, __retl_efault, 62b, __retl_efault
.previous
diff --git a/arch/sparc64/kernel/trampoline.S b/arch/sparc64/kernel/trampoline.S
index 3a145fc39cf2..9478551cb020 100644
--- a/arch/sparc64/kernel/trampoline.S
+++ b/arch/sparc64/kernel/trampoline.S
@@ -119,8 +119,8 @@ startup_continue:
sethi %hi(itlb_load), %g2
or %g2, %lo(itlb_load), %g2
stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ sethi %hi(prom_mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
sethi %hi(KERNBASE), %g2
stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -156,8 +156,8 @@ startup_continue:
sethi %hi(itlb_load), %g2
or %g2, %lo(itlb_load), %g2
stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ sethi %hi(prom_mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
sethi %hi(KERNBASE + 0x400000), %g2
stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -190,8 +190,8 @@ do_dtlb:
sethi %hi(dtlb_load), %g2
or %g2, %lo(dtlb_load), %g2
stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ sethi %hi(prom_mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
sethi %hi(KERNBASE), %g2
stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -228,8 +228,8 @@ do_dtlb:
sethi %hi(dtlb_load), %g2
or %g2, %lo(dtlb_load), %g2
stx %g2, [%sp + 2047 + 128 + 0x18]
- sethi %hi(mmu_ihandle_cache), %g2
- lduw [%g2 + %lo(mmu_ihandle_cache)], %g2
+ sethi %hi(prom_mmu_ihandle_cache), %g2
+ lduw [%g2 + %lo(prom_mmu_ihandle_cache)], %g2
stx %g2, [%sp + 2047 + 128 + 0x20]
sethi %hi(KERNBASE + 0x400000), %g2
stx %g2, [%sp + 2047 + 128 + 0x28]
@@ -336,20 +336,13 @@ do_unlock:
call init_irqwork_curcpu
nop
- BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g2,g3,1f)
- ba,pt %xcc, 2f
- nop
-
-1: /* Start using proper page size encodings in ctx register. */
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %g3
+ /* Start using proper page size encodings in ctx register. */
+ sethi %hi(sparc64_kern_pri_context), %g3
+ ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1
- sllx %g3, 32, %g3
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
- or %g3, %g2, %g3
- stxa %g3, [%g1] ASI_DMMU
+ stxa %g2, [%g1] ASI_DMMU
membar #Sync
-2:
rdpr %pstate, %o1
or %o1, PSTATE_IE, %o1
wrpr %o1, 0, %pstate
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index b280b2ef674f..5570e7bb22bb 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -189,19 +189,18 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un
if (regs->tstate & TSTATE_PRIV) {
/* Test if this comes from uaccess places. */
- unsigned long fixup;
- unsigned long g2 = regs->u_regs[UREG_G2];
+ const struct exception_table_entry *entry;
- if ((fixup = search_extables_range(regs->tpc, &g2))) {
- /* Ouch, somebody is trying ugly VM hole tricks on us... */
+ entry = search_exception_tables(regs->tpc);
+ if (entry) {
+ /* Ouch, somebody is trying VM hole tricks on us... */
#ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%016lx> faddr<UNKNOWN>\n", regs->tpc);
- printk("EX_TABLE: insn<%016lx> fixup<%016lx> "
- "g2<%016lx>\n", regs->tpc, fixup, g2);
+ printk("EX_TABLE: insn<%016lx> fixup<%016lx>\n",
+ regs->tpc, entry->fixup);
#endif
- regs->tpc = fixup;
+ regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
- regs->u_regs[UREG_G2] = g2;
return;
}
/* Shit... */
@@ -758,26 +757,12 @@ void __init cheetah_ecache_flush_init(void)
ecache_flush_size = (2 * largest_size);
ecache_flush_linesize = smallest_linesize;
- /* Discover a physically contiguous chunk of physical
- * memory in 'sp_banks' of size ecache_flush_size calculated
- * above. Store the physical base of this area at
- * ecache_flush_physbase.
- */
- for (node = 0; ; node++) {
- if (sp_banks[node].num_bytes == 0)
- break;
- if (sp_banks[node].num_bytes >= ecache_flush_size) {
- ecache_flush_physbase = sp_banks[node].base_addr;
- break;
- }
- }
+ ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size);
- /* Note: Zero would be a valid value of ecache_flush_physbase so
- * don't use that as the success test. :-)
- */
- if (sp_banks[node].num_bytes == 0) {
+ if (ecache_flush_physbase == ~0UL) {
prom_printf("cheetah_ecache_flush_init: Cannot find %d byte "
- "contiguous physical memory.\n", ecache_flush_size);
+ "contiguous physical memory.\n",
+ ecache_flush_size);
prom_halt();
}
@@ -869,14 +854,19 @@ static void cheetah_flush_ecache_line(unsigned long physaddr)
*/
static void __cheetah_flush_icache(void)
{
- unsigned long i;
+ unsigned int icache_size, icache_line_size;
+ unsigned long addr;
+
+ icache_size = local_cpu_data().icache_size;
+ icache_line_size = local_cpu_data().icache_line_size;
/* Clear the valid bits in all the tags. */
- for (i = 0; i < (1 << 15); i += (1 << 5)) {
+ for (addr = 0; addr < icache_size; addr += icache_line_size) {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
- : "r" (i | (2 << 3)), "i" (ASI_IC_TAG));
+ : "r" (addr | (2 << 3)),
+ "i" (ASI_IC_TAG));
}
}
@@ -904,13 +894,17 @@ static void cheetah_flush_icache(void)
static void cheetah_flush_dcache(void)
{
- unsigned long i;
+ unsigned int dcache_size, dcache_line_size;
+ unsigned long addr;
- for (i = 0; i < (1 << 16); i += (1 << 5)) {
+ dcache_size = local_cpu_data().dcache_size;
+ dcache_line_size = local_cpu_data().dcache_line_size;
+
+ for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
- : "r" (i), "i" (ASI_DCACHE_TAG));
+ : "r" (addr), "i" (ASI_DCACHE_TAG));
}
}
@@ -921,24 +915,29 @@ static void cheetah_flush_dcache(void)
*/
static void cheetah_plus_zap_dcache_parity(void)
{
- unsigned long i;
+ unsigned int dcache_size, dcache_line_size;
+ unsigned long addr;
+
+ dcache_size = local_cpu_data().dcache_size;
+ dcache_line_size = local_cpu_data().dcache_line_size;
- for (i = 0; i < (1 << 16); i += (1 << 5)) {
- unsigned long tag = (i >> 14);
- unsigned long j;
+ for (addr = 0; addr < dcache_size; addr += dcache_line_size) {
+ unsigned long tag = (addr >> 14);
+ unsigned long line;
__asm__ __volatile__("membar #Sync\n\t"
"stxa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
- : "r" (tag), "r" (i),
+ : "r" (tag), "r" (addr),
"i" (ASI_DCACHE_UTAG));
- for (j = i; j < i + (1 << 5); j += (1 << 3))
+ for (line = addr; line < addr + dcache_line_size; line += 8)
__asm__ __volatile__("membar #Sync\n\t"
"stxa %%g0, [%0] %1\n\t"
"membar #Sync"
: /* no outputs */
- : "r" (j), "i" (ASI_DCACHE_DATA));
+ : "r" (line),
+ "i" (ASI_DCACHE_DATA));
}
}
@@ -1332,16 +1331,12 @@ static int cheetah_fix_ce(unsigned long physaddr)
/* Return non-zero if PADDR is a valid physical memory address. */
static int cheetah_check_main_memory(unsigned long paddr)
{
- int i;
+ unsigned long vaddr = PAGE_OFFSET + paddr;
- for (i = 0; ; i++) {
- if (sp_banks[i].num_bytes == 0)
- break;
- if (paddr >= sp_banks[i].base_addr &&
- paddr < (sp_banks[i].base_addr + sp_banks[i].num_bytes))
- return 1;
- }
- return 0;
+ if (vaddr > (unsigned long) high_memory)
+ return 0;
+
+ return kern_addr_valid(vaddr);
}
void cheetah_cee_handler(struct pt_regs *regs, unsigned long afsr, unsigned long afar)
@@ -1596,10 +1591,10 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
/* OK, usermode access. */
recoverable = 1;
} else {
- unsigned long g2 = regs->u_regs[UREG_G2];
- unsigned long fixup = search_extables_range(regs->tpc, &g2);
+ const struct exception_table_entry *entry;
- if (fixup != 0UL) {
+ entry = search_exception_tables(regs->tpc);
+ if (entry) {
/* OK, kernel access to userspace. */
recoverable = 1;
@@ -1618,9 +1613,8 @@ void cheetah_deferred_handler(struct pt_regs *regs, unsigned long afsr, unsigned
* recoverable condition.
*/
if (recoverable) {
- regs->tpc = fixup;
+ regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
- regs->u_regs[UREG_G2] = g2;
}
}
}
diff --git a/arch/sparc64/kernel/una_asm.S b/arch/sparc64/kernel/una_asm.S
index cbb40585253c..1f5b5b708ce7 100644
--- a/arch/sparc64/kernel/una_asm.S
+++ b/arch/sparc64/kernel/una_asm.S
@@ -6,18 +6,11 @@
.text
-kernel_unaligned_trap_fault:
- call kernel_mna_trap_fault
- nop
- retl
- nop
- .size kern_unaligned_trap_fault, .-kern_unaligned_trap_fault
-
.globl __do_int_store
__do_int_store:
rd %asi, %o4
wr %o3, 0, %asi
- ldx [%o2], %g3
+ mov %o2, %g3
cmp %o1, 2
be,pn %icc, 2f
cmp %o1, 4
@@ -51,24 +44,24 @@ __do_int_store:
0:
wr %o4, 0x0, %asi
retl
- nop
+ mov 0, %o0
.size __do_int_store, .-__do_int_store
.section __ex_table
- .word 4b, kernel_unaligned_trap_fault
- .word 5b, kernel_unaligned_trap_fault
- .word 6b, kernel_unaligned_trap_fault
- .word 7b, kernel_unaligned_trap_fault
- .word 8b, kernel_unaligned_trap_fault
- .word 9b, kernel_unaligned_trap_fault
- .word 10b, kernel_unaligned_trap_fault
- .word 11b, kernel_unaligned_trap_fault
- .word 12b, kernel_unaligned_trap_fault
- .word 13b, kernel_unaligned_trap_fault
- .word 14b, kernel_unaligned_trap_fault
- .word 15b, kernel_unaligned_trap_fault
- .word 16b, kernel_unaligned_trap_fault
- .word 17b, kernel_unaligned_trap_fault
+ .word 4b, __retl_efault
+ .word 5b, __retl_efault
+ .word 6b, __retl_efault
+ .word 7b, __retl_efault
+ .word 8b, __retl_efault
+ .word 9b, __retl_efault
+ .word 10b, __retl_efault
+ .word 11b, __retl_efault
+ .word 12b, __retl_efault
+ .word 13b, __retl_efault
+ .word 14b, __retl_efault
+ .word 15b, __retl_efault
+ .word 16b, __retl_efault
+ .word 17b, __retl_efault
.previous
.globl do_int_load
@@ -133,21 +126,21 @@ do_int_load:
0:
wr %o5, 0x0, %asi
retl
- nop
+ mov 0, %o0
.size __do_int_load, .-__do_int_load
.section __ex_table
- .word 4b, kernel_unaligned_trap_fault
- .word 5b, kernel_unaligned_trap_fault
- .word 6b, kernel_unaligned_trap_fault
- .word 7b, kernel_unaligned_trap_fault
- .word 8b, kernel_unaligned_trap_fault
- .word 9b, kernel_unaligned_trap_fault
- .word 10b, kernel_unaligned_trap_fault
- .word 11b, kernel_unaligned_trap_fault
- .word 12b, kernel_unaligned_trap_fault
- .word 13b, kernel_unaligned_trap_fault
- .word 14b, kernel_unaligned_trap_fault
- .word 15b, kernel_unaligned_trap_fault
- .word 16b, kernel_unaligned_trap_fault
+ .word 4b, __retl_efault
+ .word 5b, __retl_efault
+ .word 6b, __retl_efault
+ .word 7b, __retl_efault
+ .word 8b, __retl_efault
+ .word 9b, __retl_efault
+ .word 10b, __retl_efault
+ .word 11b, __retl_efault
+ .word 12b, __retl_efault
+ .word 13b, __retl_efault
+ .word 14b, __retl_efault
+ .word 15b, __retl_efault
+ .word 16b, __retl_efault
.previous
diff --git a/arch/sparc64/kernel/unaligned.c b/arch/sparc64/kernel/unaligned.c
index da9739f0d437..70faf630603b 100644
--- a/arch/sparc64/kernel/unaligned.c
+++ b/arch/sparc64/kernel/unaligned.c
@@ -180,17 +180,18 @@ static void __attribute_used__ unaligned_panic(char *str, struct pt_regs *regs)
die_if_kernel(str, regs);
}
-extern void do_int_load(unsigned long *dest_reg, int size,
- unsigned long *saddr, int is_signed, int asi);
+extern int do_int_load(unsigned long *dest_reg, int size,
+ unsigned long *saddr, int is_signed, int asi);
-extern void __do_int_store(unsigned long *dst_addr, int size,
- unsigned long *src_val, int asi);
+extern int __do_int_store(unsigned long *dst_addr, int size,
+ unsigned long src_val, int asi);
-static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
- struct pt_regs *regs, int asi)
+static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
+ struct pt_regs *regs, int asi, int orig_asi)
{
unsigned long zero = 0;
- unsigned long *src_val = &zero;
+ unsigned long *src_val_p = &zero;
+ unsigned long src_val;
if (size == 16) {
size = 8;
@@ -198,9 +199,27 @@ static inline void do_int_store(int reg_num, int size, unsigned long *dst_addr,
(unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
(unsigned)fetch_reg(reg_num + 1, regs);
} else if (reg_num) {
- src_val = fetch_reg_addr(reg_num, regs);
+ src_val_p = fetch_reg_addr(reg_num, regs);
}
- __do_int_store(dst_addr, size, src_val, asi);
+ src_val = *src_val_p;
+ if (unlikely(asi != orig_asi)) {
+ switch (size) {
+ case 2:
+ src_val = swab16(src_val);
+ break;
+ case 4:
+ src_val = swab32(src_val);
+ break;
+ case 8:
+ src_val = swab64(src_val);
+ break;
+ case 16:
+ default:
+ BUG();
+ break;
+ };
+ }
+ return __do_int_store(dst_addr, size, src_val, asi);
}
static inline void advance(struct pt_regs *regs)
@@ -223,14 +242,14 @@ static inline int ok_for_kernel(unsigned int insn)
return !floating_point_load_or_store_p(insn);
}
-void kernel_mna_trap_fault(void)
+static void kernel_mna_trap_fault(void)
{
struct pt_regs *regs = current_thread_info()->kern_una_regs;
unsigned int insn = current_thread_info()->kern_una_insn;
- unsigned long g2 = regs->u_regs[UREG_G2];
- unsigned long fixup = search_extables_range(regs->tpc, &g2);
+ const struct exception_table_entry *entry;
- if (!fixup) {
+ entry = search_exception_tables(regs->tpc);
+ if (!entry) {
unsigned long address;
address = compute_effective_address(regs, insn,
@@ -251,9 +270,8 @@ void kernel_mna_trap_fault(void)
die_if_kernel("Oops", regs);
/* Not reached */
}
- regs->tpc = fixup;
+ regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
- regs->u_regs [UREG_G2] = g2;
regs->tstate &= ~TSTATE_ASI;
regs->tstate |= (ASI_AIUS << 24UL);
@@ -275,7 +293,8 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
kernel_mna_trap_fault();
} else {
- unsigned long addr;
+ unsigned long addr, *reg_addr;
+ int orig_asi, asi, err;
addr = compute_effective_address(regs, insn,
((insn >> 25) & 0x1f));
@@ -285,25 +304,59 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn, u
regs->tpc, dirstrings[dir], addr, size,
regs->u_regs[UREG_RETPC]);
#endif
+ orig_asi = asi = decode_asi(insn, regs);
+ switch (asi) {
+ case ASI_NL:
+ case ASI_AIUPL:
+ case ASI_AIUSL:
+ case ASI_PL:
+ case ASI_SL:
+ case ASI_PNFL:
+ case ASI_SNFL:
+ asi &= ~0x08;
+ break;
+ };
switch (dir) {
case load:
- do_int_load(fetch_reg_addr(((insn>>25)&0x1f), regs),
- size, (unsigned long *) addr,
- decode_signedness(insn),
- decode_asi(insn, regs));
+ reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
+ err = do_int_load(reg_addr, size,
+ (unsigned long *) addr,
+ decode_signedness(insn), asi);
+ if (likely(!err) && unlikely(asi != orig_asi)) {
+ unsigned long val_in = *reg_addr;
+ switch (size) {
+ case 2:
+ val_in = swab16(val_in);
+ break;
+ case 4:
+ val_in = swab32(val_in);
+ break;
+ case 8:
+ val_in = swab64(val_in);
+ break;
+ case 16:
+ default:
+ BUG();
+ break;
+ };
+ *reg_addr = val_in;
+ }
break;
case store:
- do_int_store(((insn>>25)&0x1f), size,
- (unsigned long *) addr, regs,
- decode_asi(insn, regs));
+ err = do_int_store(((insn>>25)&0x1f), size,
+ (unsigned long *) addr, regs,
+ asi, orig_asi);
break;
default:
panic("Impossible kernel unaligned trap.");
/* Not reached... */
}
- advance(regs);
+ if (unlikely(err))
+ kernel_mna_trap_fault();
+ else
+ advance(regs);
}
}
diff --git a/arch/sparc64/kernel/us3_cpufreq.c b/arch/sparc64/kernel/us3_cpufreq.c
index 9080e7cd4bb0..0340041f6143 100644
--- a/arch/sparc64/kernel/us3_cpufreq.c
+++ b/arch/sparc64/kernel/us3_cpufreq.c
@@ -208,7 +208,10 @@ static int __init us3_freq_init(void)
impl = ((ver >> 32) & 0xffff);
if (manuf == CHEETAH_MANUF &&
- (impl == CHEETAH_IMPL || impl == CHEETAH_PLUS_IMPL)) {
+ (impl == CHEETAH_IMPL ||
+ impl == CHEETAH_PLUS_IMPL ||
+ impl == JAGUAR_IMPL ||
+ impl == PANTHER_IMPL)) {
struct cpufreq_driver *driver;
ret = -ENOMEM;
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index f47d0be39378..2af0cf0a8640 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -9,8 +9,7 @@ ENTRY(_start)
jiffies = jiffies_64;
SECTIONS
{
- swapper_pmd_dir = 0x0000000000402000;
- empty_pg_dir = 0x0000000000403000;
+ swapper_low_pmd_dir = 0x0000000000402000;
. = 0x4000;
.text 0x0000000000404000 :
{
diff --git a/arch/sparc64/kernel/winfixup.S b/arch/sparc64/kernel/winfixup.S
index 99c809a1e5ac..39160926267b 100644
--- a/arch/sparc64/kernel/winfixup.S
+++ b/arch/sparc64/kernel/winfixup.S
@@ -16,23 +16,14 @@
.text
set_pcontext:
-cplus_winfixup_insn_1:
- sethi %hi(0), %l1
+ sethi %hi(sparc64_kern_pri_context), %l1
+ ldx [%l1 + %lo(sparc64_kern_pri_context)], %l1
mov PRIMARY_CONTEXT, %g1
- sllx %l1, 32, %l1
-cplus_winfixup_insn_2:
- sethi %hi(0), %g2
- or %l1, %g2, %l1
stxa %l1, [%g1] ASI_DMMU
flush %g6
retl
nop
-cplus_wfinsn_1:
- sethi %uhi(CTX_CHEETAH_PLUS_NUC), %l1
-cplus_wfinsn_2:
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
.align 32
/* Here are the rules, pay attention.
@@ -395,23 +386,3 @@ window_dax_from_user_common:
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
clr %l6
-
-
- .globl cheetah_plus_patch_winfixup
-cheetah_plus_patch_winfixup:
- sethi %hi(cplus_wfinsn_1), %o0
- sethi %hi(cplus_winfixup_insn_1), %o2
- lduw [%o0 + %lo(cplus_wfinsn_1)], %o1
- or %o2, %lo(cplus_winfixup_insn_1), %o2
- stw %o1, [%o2]
- flush %o2
-
- sethi %hi(cplus_wfinsn_2), %o0
- sethi %hi(cplus_winfixup_insn_2), %o2
- lduw [%o0 + %lo(cplus_wfinsn_2)], %o1
- or %o2, %lo(cplus_winfixup_insn_2), %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile
index d968aebe83b2..c295806500f7 100644
--- a/arch/sparc64/lib/Makefile
+++ b/arch/sparc64/lib/Makefile
@@ -14,6 +14,4 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \
copy_in_user.o user_fixup.o memmove.o \
mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o
-lib-$(CONFIG_HAVE_DEC_LOCK) += dec_and_lock.o
-
obj-y += iomap.o
diff --git a/arch/sparc64/lib/VISsave.S b/arch/sparc64/lib/VISsave.S
index 4e18989bd602..a0ded5c5aa5c 100644
--- a/arch/sparc64/lib/VISsave.S
+++ b/arch/sparc64/lib/VISsave.S
@@ -59,15 +59,17 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
be,pn %icc, 9b
add %g6, TI_FPREGS, %g2
andcc %o5, FPRS_DL, %g0
- membar #StoreStore | #LoadStore
be,pn %icc, 4f
add %g6, TI_FPREGS+0x40, %g3
+ membar #Sync
stda %f0, [%g2 + %g1] ASI_BLK_P
stda %f16, [%g3 + %g1] ASI_BLK_P
+ membar #Sync
andcc %o5, FPRS_DU, %g0
be,pn %icc, 5f
4: add %g1, 128, %g1
+ membar #Sync
stda %f32, [%g2 + %g1] ASI_BLK_P
stda %f48, [%g3 + %g1] ASI_BLK_P
@@ -87,7 +89,7 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
sll %g1, 5, %g1
add %g6, TI_FPREGS+0xc0, %g3
wr %g0, FPRS_FEF, %fprs
- membar #StoreStore | #LoadStore
+ membar #Sync
stda %f32, [%g2 + %g1] ASI_BLK_P
stda %f48, [%g3 + %g1] ASI_BLK_P
membar #Sync
@@ -128,8 +130,8 @@ VISenterhalf:
be,pn %icc, 4f
add %g6, TI_FPREGS, %g2
- membar #StoreStore | #LoadStore
add %g6, TI_FPREGS+0x40, %g3
+ membar #Sync
stda %f0, [%g2 + %g1] ASI_BLK_P
stda %f16, [%g3 + %g1] ASI_BLK_P
membar #Sync
diff --git a/arch/sparc64/lib/dec_and_lock.S b/arch/sparc64/lib/dec_and_lock.S
deleted file mode 100644
index 8ee288dd0afc..000000000000
--- a/arch/sparc64/lib/dec_and_lock.S
+++ /dev/null
@@ -1,80 +0,0 @@
-/* $Id: dec_and_lock.S,v 1.5 2001/11/18 00:12:56 davem Exp $
- * dec_and_lock.S: Sparc64 version of "atomic_dec_and_lock()"
- * using cas and ldstub instructions.
- *
- * Copyright (C) 2000 David S. Miller (davem@redhat.com)
- */
-#include <linux/config.h>
-#include <asm/thread_info.h>
-
- .text
- .align 64
-
- /* CAS basically works like this:
- *
- * void CAS(MEM, REG1, REG2)
- * {
- * START_ATOMIC();
- * if (*(MEM) == REG1) {
- * TMP = *(MEM);
- * *(MEM) = REG2;
- * REG2 = TMP;
- * } else
- * REG2 = *(MEM);
- * END_ATOMIC();
- * }
- */
-
- .globl _atomic_dec_and_lock
-_atomic_dec_and_lock: /* %o0 = counter, %o1 = lock */
-loop1: lduw [%o0], %g2
- subcc %g2, 1, %g7
- be,pn %icc, start_to_zero
- nop
-nzero: cas [%o0], %g2, %g7
- cmp %g2, %g7
- bne,pn %icc, loop1
- mov 0, %g1
-
-out:
- membar #StoreLoad | #StoreStore
- retl
- mov %g1, %o0
-start_to_zero:
-#ifdef CONFIG_PREEMPT
- ldsw [%g6 + TI_PRE_COUNT], %g3
- add %g3, 1, %g3
- stw %g3, [%g6 + TI_PRE_COUNT]
-#endif
-to_zero:
- ldstub [%o1], %g3
- membar #StoreLoad | #StoreStore
- brnz,pn %g3, spin_on_lock
- nop
-loop2: cas [%o0], %g2, %g7 /* ASSERT(g7 == 0) */
- cmp %g2, %g7
-
- be,pt %icc, out
- mov 1, %g1
- lduw [%o0], %g2
- subcc %g2, 1, %g7
- be,pn %icc, loop2
- nop
- membar #StoreStore | #LoadStore
- stb %g0, [%o1]
-#ifdef CONFIG_PREEMPT
- ldsw [%g6 + TI_PRE_COUNT], %g3
- sub %g3, 1, %g3
- stw %g3, [%g6 + TI_PRE_COUNT]
-#endif
-
- b,pt %xcc, nzero
- nop
-spin_on_lock:
- ldub [%o1], %g3
- membar #LoadLoad
- brnz,pt %g3, spin_on_lock
- nop
- ba,pt %xcc, to_zero
- nop
- nop
diff --git a/arch/sparc64/lib/strncpy_from_user.S b/arch/sparc64/lib/strncpy_from_user.S
index 09cbbaa0ebf4..e1264650ca7a 100644
--- a/arch/sparc64/lib/strncpy_from_user.S
+++ b/arch/sparc64/lib/strncpy_from_user.S
@@ -125,15 +125,11 @@ __strncpy_from_user:
add %o2, %o3, %o0
.size __strncpy_from_user, .-__strncpy_from_user
- .section .fixup,#alloc,#execinstr
- .align 4
-4: retl
- mov -EFAULT, %o0
-
.section __ex_table,#alloc
.align 4
- .word 60b, 4b
- .word 61b, 4b
- .word 62b, 4b
- .word 63b, 4b
- .word 64b, 4b
+ .word 60b, __retl_efault
+ .word 61b, __retl_efault
+ .word 62b, __retl_efault
+ .word 63b, __retl_efault
+ .word 64b, __retl_efault
+ .previous
diff --git a/arch/sparc64/lib/user_fixup.c b/arch/sparc64/lib/user_fixup.c
index 0278e34125db..19d1fdb17d0e 100644
--- a/arch/sparc64/lib/user_fixup.c
+++ b/arch/sparc64/lib/user_fixup.c
@@ -11,61 +11,56 @@
/* Calculating the exact fault address when using
* block loads and stores can be very complicated.
+ *
* Instead of trying to be clever and handling all
* of the cases, just fix things up simply here.
*/
-unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
+static unsigned long compute_size(unsigned long start, unsigned long size, unsigned long *offset)
{
- char *dst = to;
- const char __user *src = from;
+ unsigned long fault_addr = current_thread_info()->fault_address;
+ unsigned long end = start + size;
- while (size) {
- if (__get_user(*dst, src))
- break;
- dst++;
- src++;
- size--;
+ if (fault_addr < start || fault_addr >= end) {
+ *offset = 0;
+ } else {
+ *offset = start - fault_addr;
+ size = end - fault_addr;
}
+ return size;
+}
- if (size)
- memset(dst, 0, size);
+unsigned long copy_from_user_fixup(void *to, const void __user *from, unsigned long size)
+{
+ unsigned long offset;
+
+ size = compute_size((unsigned long) from, size, &offset);
+ if (likely(size))
+ memset(to + offset, 0, size);
return size;
}
unsigned long copy_to_user_fixup(void __user *to, const void *from, unsigned long size)
{
- char __user *dst = to;
- const char *src = from;
-
- while (size) {
- if (__put_user(*src, dst))
- break;
- dst++;
- src++;
- size--;
- }
+ unsigned long offset;
- return size;
+ return compute_size((unsigned long) to, size, &offset);
}
unsigned long copy_in_user_fixup(void __user *to, void __user *from, unsigned long size)
{
- char __user *dst = to;
- char __user *src = from;
+ unsigned long fault_addr = current_thread_info()->fault_address;
+ unsigned long start = (unsigned long) to;
+ unsigned long end = start + size;
- while (size) {
- char tmp;
+ if (fault_addr >= start && fault_addr < end)
+ return end - fault_addr;
- if (__get_user(tmp, src))
- break;
- if (__put_user(tmp, dst))
- break;
- dst++;
- src++;
- size--;
- }
+ start = (unsigned long) from;
+ end = start + size;
+ if (fault_addr >= start && fault_addr < end)
+ return end - fault_addr;
return size;
}
diff --git a/arch/sparc64/mm/Makefile b/arch/sparc64/mm/Makefile
index cda87333a77b..9d0960e69f48 100644
--- a/arch/sparc64/mm/Makefile
+++ b/arch/sparc64/mm/Makefile
@@ -5,6 +5,6 @@
EXTRA_AFLAGS := -ansi
EXTRA_CFLAGS := -Werror
-obj-y := ultra.o tlb.o fault.o init.o generic.o extable.o
+obj-y := ultra.o tlb.o fault.o init.o generic.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc64/mm/extable.c b/arch/sparc64/mm/extable.c
deleted file mode 100644
index ec334297ff4f..000000000000
--- a/arch/sparc64/mm/extable.c
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * linux/arch/sparc64/mm/extable.c
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <asm/uaccess.h>
-
-extern const struct exception_table_entry __start___ex_table[];
-extern const struct exception_table_entry __stop___ex_table[];
-
-void sort_extable(struct exception_table_entry *start,
- struct exception_table_entry *finish)
-{
-}
-
-/* Caller knows they are in a range if ret->fixup == 0 */
-const struct exception_table_entry *
-search_extable(const struct exception_table_entry *start,
- const struct exception_table_entry *last,
- unsigned long value)
-{
- const struct exception_table_entry *walk;
-
- /* Single insn entries are encoded as:
- * word 1: insn address
- * word 2: fixup code address
- *
- * Range entries are encoded as:
- * word 1: first insn address
- * word 2: 0
- * word 3: last insn address + 4 bytes
- * word 4: fixup code address
- *
- * See asm/uaccess.h for more details.
- */
-
- /* 1. Try to find an exact match. */
- for (walk = start; walk <= last; walk++) {
- if (walk->fixup == 0) {
- /* A range entry, skip both parts. */
- walk++;
- continue;
- }
-
- if (walk->insn == value)
- return walk;
- }
-
- /* 2. Try to find a range match. */
- for (walk = start; walk <= (last - 1); walk++) {
- if (walk->fixup)
- continue;
-
- if (walk[0].insn <= value && walk[1].insn > value)
- return walk;
-
- walk++;
- }
-
- return NULL;
-}
-
-/* Special extable search, which handles ranges. Returns fixup */
-unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
-{
- const struct exception_table_entry *entry;
-
- entry = search_exception_tables(addr);
- if (!entry)
- return 0;
-
- /* Inside range? Fix g2 and return correct fixup */
- if (!entry->fixup) {
- *g2 = (addr - entry->insn) / 4;
- return (entry + 1)->fixup;
- }
-
- return entry->fixup;
-}
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index db1e3310e907..31fbc67719a1 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -32,8 +32,6 @@
#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
-extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
-
/*
* To debug kernel to catch accesses to certain virtual/physical addresses.
* Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
@@ -71,53 +69,6 @@ void set_brkpt(unsigned long addr, unsigned char mask, int flags, int mode)
: "memory");
}
-/* Nice, simple, prom library does all the sweating for us. ;) */
-unsigned long __init prom_probe_memory (void)
-{
- register struct linux_mlist_p1275 *mlist;
- register unsigned long bytes, base_paddr, tally;
- register int i;
-
- i = 0;
- mlist = *prom_meminfo()->p1275_available;
- bytes = tally = mlist->num_bytes;
- base_paddr = mlist->start_adr;
-
- sp_banks[0].base_addr = base_paddr;
- sp_banks[0].num_bytes = bytes;
-
- while (mlist->theres_more != (void *) 0) {
- i++;
- mlist = mlist->theres_more;
- bytes = mlist->num_bytes;
- tally += bytes;
- if (i >= SPARC_PHYS_BANKS-1) {
- printk ("The machine has more banks than "
- "this kernel can support\n"
- "Increase the SPARC_PHYS_BANKS "
- "setting (currently %d)\n",
- SPARC_PHYS_BANKS);
- i = SPARC_PHYS_BANKS-1;
- break;
- }
-
- sp_banks[i].base_addr = mlist->start_adr;
- sp_banks[i].num_bytes = mlist->num_bytes;
- }
-
- i++;
- sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
- sp_banks[i].num_bytes = 0;
-
- /* Now mask all bank sizes on a page boundary, it is all we can
- * use anyways.
- */
- for (i = 0; sp_banks[i].num_bytes != 0; i++)
- sp_banks[i].num_bytes &= PAGE_MASK;
-
- return tally;
-}
-
static void __kprobes unhandled_fault(unsigned long address,
struct task_struct *tsk,
struct pt_regs *regs)
@@ -242,7 +193,6 @@ static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
unsigned int insn, unsigned long address)
{
- unsigned long g2;
unsigned char asi = ASI_P;
if ((!insn) && (regs->tstate & TSTATE_PRIV))
@@ -273,11 +223,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
}
}
- g2 = regs->u_regs[UREG_G2];
-
/* Is this in ex_table? */
if (regs->tstate & TSTATE_PRIV) {
- unsigned long fixup;
+ const struct exception_table_entry *entry;
if (asi == ASI_P && (insn & 0xc0800000) == 0xc0800000) {
if (insn & 0x2000)
@@ -288,10 +236,9 @@ static void do_kernel_fault(struct pt_regs *regs, int si_code, int fault_code,
/* Look in asi.h: All _S asis have LS bit set */
if ((asi & 0x1) &&
- (fixup = search_extables_range(regs->tpc, &g2))) {
- regs->tpc = fixup;
+ (entry = search_exception_tables(regs->tpc))) {
+ regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
- regs->u_regs[UREG_G2] = g2;
return;
}
} else {
@@ -461,7 +408,7 @@ good_area:
}
up_read(&mm->mmap_sem);
- goto fault_done;
+ return;
/*
* Something tried to access memory that isn't in our memory map..
@@ -473,8 +420,7 @@ bad_area:
handle_kernel_fault:
do_kernel_fault(regs, si_code, fault_code, insn, address);
-
- goto fault_done;
+ return;
/*
* We ran out of memory, or some other thing happened to us that made
@@ -505,9 +451,4 @@ do_sigbus:
/* Kernel mode? Handle exceptions or die */
if (regs->tstate & TSTATE_PRIV)
goto handle_kernel_fault;
-
-fault_done:
- /* These values are no longer needed, clear them. */
- set_thread_fault_code(0);
- current_thread_info()->fault_address = 0;
}
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index fdb1ebb308c9..1e44ee26cee8 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -20,6 +20,8 @@
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/kprobes.h>
+#include <linux/cache.h>
+#include <linux/sort.h>
#include <asm/head.h>
#include <asm/system.h>
@@ -40,24 +42,80 @@
extern void device_scan(void);
-struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
+#define MAX_BANKS 32
-unsigned long *sparc64_valid_addr_bitmap;
+static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
+static struct linux_prom64_registers pavail_rescan[MAX_BANKS] __initdata;
+static int pavail_ents __initdata;
+static int pavail_rescan_ents __initdata;
+
+static int cmp_p64(const void *a, const void *b)
+{
+ const struct linux_prom64_registers *x = a, *y = b;
+
+ if (x->phys_addr > y->phys_addr)
+ return 1;
+ if (x->phys_addr < y->phys_addr)
+ return -1;
+ return 0;
+}
+
+static void __init read_obp_memory(const char *property,
+ struct linux_prom64_registers *regs,
+ int *num_ents)
+{
+ int node = prom_finddevice("/memory");
+ int prop_size = prom_getproplen(node, property);
+ int ents, ret, i;
+
+ ents = prop_size / sizeof(struct linux_prom64_registers);
+ if (ents > MAX_BANKS) {
+ prom_printf("The machine has more %s property entries than "
+ "this kernel can support (%d).\n",
+ property, MAX_BANKS);
+ prom_halt();
+ }
+
+ ret = prom_getproperty(node, property, (char *) regs, prop_size);
+ if (ret == -1) {
+ prom_printf("Couldn't get %s property from /memory.\n");
+ prom_halt();
+ }
+
+ *num_ents = ents;
+
+ /* Sanitize what we got from the firmware, by page aligning
+ * everything.
+ */
+ for (i = 0; i < ents; i++) {
+ unsigned long base, size;
+
+ base = regs[i].phys_addr;
+ size = regs[i].reg_size;
+
+ size &= PAGE_MASK;
+ if (base & ~PAGE_MASK) {
+ unsigned long new_base = PAGE_ALIGN(base);
+
+ size -= new_base - base;
+ if ((long) size < 0L)
+ size = 0UL;
+ base = new_base;
+ }
+ regs[i].phys_addr = base;
+ regs[i].reg_size = size;
+ }
+ sort(regs, ents, sizeof(struct linux_prom64_registers),
+ cmp_p64, NULL);
+}
+
+unsigned long *sparc64_valid_addr_bitmap __read_mostly;
/* Ugly, but necessary... -DaveM */
-unsigned long phys_base;
-unsigned long kern_base;
-unsigned long kern_size;
-unsigned long pfn_base;
-
-/* This is even uglier. We have a problem where the kernel may not be
- * located at phys_base. However, initial __alloc_bootmem() calls need to
- * be adjusted to be within the 4-8Megs that the kernel is mapped to, else
- * those page mappings wont work. Things are ok after inherit_prom_mappings
- * is called though. Dave says he'll clean this up some other time.
- * -- BenC
- */
-static unsigned long bootmap_base;
+unsigned long phys_base __read_mostly;
+unsigned long kern_base __read_mostly;
+unsigned long kern_size __read_mostly;
+unsigned long pfn_base __read_mostly;
/* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -73,7 +131,13 @@ extern unsigned long sparc_ramdisk_image64;
extern unsigned int sparc_ramdisk_image;
extern unsigned int sparc_ramdisk_size;
-struct page *mem_map_zero;
+struct page *mem_map_zero __read_mostly;
+
+unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
+
+unsigned long sparc64_kern_pri_context __read_mostly;
+unsigned long sparc64_kern_pri_nuc_bits __read_mostly;
+unsigned long sparc64_kern_sec_context __read_mostly;
int bigkernel = 0;
@@ -179,8 +243,6 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
: "g1", "g7");
}
-extern void __update_mmu_cache(unsigned long mmu_context_hw, unsigned long address, pte_t pte, int code);
-
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
struct page *page;
@@ -207,10 +269,6 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
put_cpu();
}
-
- if (get_thread_fault_code())
- __update_mmu_cache(CTX_NRBITS(vma->vm_mm->context),
- address, pte, get_thread_fault_code());
}
void flush_dcache_page(struct page *page)
@@ -310,6 +368,11 @@ struct linux_prom_translation {
unsigned long data;
};
+/* Exported for kernel TLB miss handling in ktlb.S */
+struct linux_prom_translation prom_trans[512] __read_mostly;
+unsigned int prom_trans_ents __read_mostly;
+unsigned int swapper_pgd_zero __read_mostly;
+
extern unsigned long prom_boot_page;
extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
extern int prom_get_mmu_ihandle(void);
@@ -318,297 +381,162 @@ extern void register_prom_callbacks(void);
/* Exported for SMP bootup purposes. */
unsigned long kern_locked_tte_data;
-void __init early_pgtable_allocfail(char *type)
-{
- prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
- prom_halt();
-}
-
-#define BASE_PAGE_SIZE 8192
-static pmd_t *prompmd;
-
/*
* Translate PROM's mapping we capture at boot time into physical address.
* The second parameter is only set from prom_callback() invocations.
*/
unsigned long prom_virt_to_phys(unsigned long promva, int *error)
{
- pmd_t *pmdp = prompmd + ((promva >> 23) & 0x7ff);
- pte_t *ptep;
- unsigned long base;
-
- if (pmd_none(*pmdp)) {
- if (error)
- *error = 1;
- return(0);
- }
- ptep = (pte_t *)__pmd_page(*pmdp) + ((promva >> 13) & 0x3ff);
- if (!pte_present(*ptep)) {
- if (error)
- *error = 1;
- return(0);
- }
- if (error) {
- *error = 0;
- return(pte_val(*ptep));
+ int i;
+
+ for (i = 0; i < prom_trans_ents; i++) {
+ struct linux_prom_translation *p = &prom_trans[i];
+
+ if (promva >= p->virt &&
+ promva < (p->virt + p->size)) {
+ unsigned long base = p->data & _PAGE_PADDR;
+
+ if (error)
+ *error = 0;
+ return base + (promva & (8192 - 1));
+ }
}
- base = pte_val(*ptep) & _PAGE_PADDR;
- return(base + (promva & (BASE_PAGE_SIZE - 1)));
+ if (error)
+ *error = 1;
+ return 0UL;
}
-static void inherit_prom_mappings(void)
+/* The obp translations are saved based on 8k pagesize, since obp can
+ * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
+ * HI_OBP_ADDRESS range are handled in ktlb.S and do not use the vpte
+ * scheme (also, see rant in inherit_locked_prom_mappings()).
+ */
+static inline int in_obp_range(unsigned long vaddr)
{
- struct linux_prom_translation *trans;
- unsigned long phys_page, tte_vaddr, tte_data;
- void (*remap_func)(unsigned long, unsigned long, int);
- pmd_t *pmdp;
- pte_t *ptep;
- int node, n, i, tsz;
- extern unsigned int obp_iaddr_patch[2], obp_daddr_patch[2];
+ return (vaddr >= LOW_OBP_ADDRESS &&
+ vaddr < HI_OBP_ADDRESS);
+}
+
+static int cmp_ptrans(const void *a, const void *b)
+{
+ const struct linux_prom_translation *x = a, *y = b;
+
+ if (x->virt > y->virt)
+ return 1;
+ if (x->virt < y->virt)
+ return -1;
+ return 0;
+}
+
+/* Read OBP translations property into 'prom_trans[]'. */
+static void __init read_obp_translations(void)
+{
+ int n, node, ents, first, last, i;
node = prom_finddevice("/virtual-memory");
n = prom_getproplen(node, "translations");
- if (n == 0 || n == -1) {
- prom_printf("Couldn't get translation property\n");
+ if (unlikely(n == 0 || n == -1)) {
+ prom_printf("prom_mappings: Couldn't get size.\n");
prom_halt();
}
- n += 5 * sizeof(struct linux_prom_translation);
- for (tsz = 1; tsz < n; tsz <<= 1)
- /* empty */;
- trans = __alloc_bootmem(tsz, SMP_CACHE_BYTES, bootmap_base);
- if (trans == NULL) {
- prom_printf("inherit_prom_mappings: Cannot alloc translations.\n");
+ if (unlikely(n > sizeof(prom_trans))) {
+ prom_printf("prom_mappings: Size %Zd is too big.\n", n);
prom_halt();
}
- memset(trans, 0, tsz);
- if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
- prom_printf("Couldn't get translation property\n");
+ if ((n = prom_getproperty(node, "translations",
+ (char *)&prom_trans[0],
+ sizeof(prom_trans))) == -1) {
+ prom_printf("prom_mappings: Couldn't get property.\n");
prom_halt();
}
- n = n / sizeof(*trans);
- /*
- * The obp translations are saved based on 8k pagesize, since obp can
- * use a mixture of pagesizes. Misses to the 0xf0000000 - 0x100000000,
- * ie obp range, are handled in entry.S and do not use the vpte scheme
- * (see rant in inherit_locked_prom_mappings()).
- */
-#define OBP_PMD_SIZE 2048
- prompmd = __alloc_bootmem(OBP_PMD_SIZE, OBP_PMD_SIZE, bootmap_base);
- if (prompmd == NULL)
- early_pgtable_allocfail("pmd");
- memset(prompmd, 0, OBP_PMD_SIZE);
- for (i = 0; i < n; i++) {
- unsigned long vaddr;
-
- if (trans[i].virt >= LOW_OBP_ADDRESS && trans[i].virt < HI_OBP_ADDRESS) {
- for (vaddr = trans[i].virt;
- ((vaddr < trans[i].virt + trans[i].size) &&
- (vaddr < HI_OBP_ADDRESS));
- vaddr += BASE_PAGE_SIZE) {
- unsigned long val;
-
- pmdp = prompmd + ((vaddr >> 23) & 0x7ff);
- if (pmd_none(*pmdp)) {
- ptep = __alloc_bootmem(BASE_PAGE_SIZE,
- BASE_PAGE_SIZE,
- bootmap_base);
- if (ptep == NULL)
- early_pgtable_allocfail("pte");
- memset(ptep, 0, BASE_PAGE_SIZE);
- pmd_set(pmdp, ptep);
- }
- ptep = (pte_t *)__pmd_page(*pmdp) +
- ((vaddr >> 13) & 0x3ff);
+ n = n / sizeof(struct linux_prom_translation);
- val = trans[i].data;
+ ents = n;
- /* Clear diag TTE bits. */
- if (tlb_type == spitfire)
- val &= ~0x0003fe0000000000UL;
+ sort(prom_trans, ents, sizeof(struct linux_prom_translation),
+ cmp_ptrans, NULL);
- set_pte_at(&init_mm, vaddr,
- ptep, __pte(val | _PAGE_MODIFIED));
- trans[i].data += BASE_PAGE_SIZE;
- }
- }
+ /* Now kick out all the non-OBP entries. */
+ for (i = 0; i < ents; i++) {
+ if (in_obp_range(prom_trans[i].virt))
+ break;
+ }
+ first = i;
+ for (; i < ents; i++) {
+ if (!in_obp_range(prom_trans[i].virt))
+ break;
}
- phys_page = __pa(prompmd);
- obp_iaddr_patch[0] |= (phys_page >> 10);
- obp_iaddr_patch[1] |= (phys_page & 0x3ff);
- flushi((long)&obp_iaddr_patch[0]);
- obp_daddr_patch[0] |= (phys_page >> 10);
- obp_daddr_patch[1] |= (phys_page & 0x3ff);
- flushi((long)&obp_daddr_patch[0]);
+ last = i;
- /* Now fixup OBP's idea about where we really are mapped. */
- prom_printf("Remapping the kernel... ");
+ for (i = 0; i < (last - first); i++) {
+ struct linux_prom_translation *src = &prom_trans[i + first];
+ struct linux_prom_translation *dest = &prom_trans[i];
- /* Spitfire Errata #32 workaround */
- /* NOTE: Using plain zero for the context value is
- * correct here, we are not using the Linux trap
- * tables yet so we should not use the special
- * UltraSPARC-III+ page size encodings yet.
- */
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "flush %%g6"
- : /* No outputs */
- : "r" (0), "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
- switch (tlb_type) {
- default:
- case spitfire:
- phys_page = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
- break;
-
- case cheetah:
- case cheetah_plus:
- phys_page = cheetah_get_litlb_data(sparc64_highest_locked_tlbent());
- break;
- };
-
- phys_page &= _PAGE_PADDR;
- phys_page += ((unsigned long)&prom_boot_page -
- (unsigned long)KERNBASE);
+ *dest = *src;
+ }
+ for (; i < ents; i++) {
+ struct linux_prom_translation *dest = &prom_trans[i];
+ dest->virt = dest->size = dest->data = 0x0UL;
+ }
+
+ prom_trans_ents = last - first;
if (tlb_type == spitfire) {
- /* Lock this into i/d tlb entry 59 */
- __asm__ __volatile__(
- "stxa %%g0, [%2] %3\n\t"
- "stxa %0, [%1] %4\n\t"
- "membar #Sync\n\t"
- "flush %%g6\n\t"
- "stxa %%g0, [%2] %5\n\t"
- "stxa %0, [%1] %6\n\t"
- "membar #Sync\n\t"
- "flush %%g6"
- : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
- _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
- "r" (59 << 3), "r" (TLB_TAG_ACCESS),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
- "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
- : "memory");
- } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
- /* Lock this into i/d tlb-0 entry 11 */
- __asm__ __volatile__(
- "stxa %%g0, [%2] %3\n\t"
- "stxa %0, [%1] %4\n\t"
- "membar #Sync\n\t"
- "flush %%g6\n\t"
- "stxa %%g0, [%2] %5\n\t"
- "stxa %0, [%1] %6\n\t"
- "membar #Sync\n\t"
- "flush %%g6"
- : : "r" (phys_page | _PAGE_VALID | _PAGE_SZ8K | _PAGE_CP |
- _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W),
- "r" ((0 << 16) | (11 << 3)), "r" (TLB_TAG_ACCESS),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS),
- "i" (ASI_IMMU), "i" (ASI_ITLB_DATA_ACCESS)
- : "memory");
- } else {
- /* Implement me :-) */
- BUG();
+ /* Clear diag TTE bits. */
+ for (i = 0; i < prom_trans_ents; i++)
+ prom_trans[i].data &= ~0x0003fe0000000000UL;
}
+}
- tte_vaddr = (unsigned long) KERNBASE;
+static void __init remap_kernel(void)
+{
+ unsigned long phys_page, tte_vaddr, tte_data;
+ int tlb_ent = sparc64_highest_locked_tlbent();
- /* Spitfire Errata #32 workaround */
- /* NOTE: Using plain zero for the context value is
- * correct here, we are not using the Linux trap
- * tables yet so we should not use the special
- * UltraSPARC-III+ page size encodings yet.
- */
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "flush %%g6"
- : /* No outputs */
- : "r" (0),
- "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
- if (tlb_type == spitfire)
- tte_data = spitfire_get_dtlb_data(sparc64_highest_locked_tlbent());
- else
- tte_data = cheetah_get_ldtlb_data(sparc64_highest_locked_tlbent());
+ tte_vaddr = (unsigned long) KERNBASE;
+ phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+ tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB |
+ _PAGE_CP | _PAGE_CV | _PAGE_P |
+ _PAGE_L | _PAGE_W));
kern_locked_tte_data = tte_data;
- remap_func = (void *) ((unsigned long) &prom_remap -
- (unsigned long) &prom_boot_page);
-
-
- /* Spitfire Errata #32 workaround */
- /* NOTE: Using plain zero for the context value is
- * correct here, we are not using the Linux trap
- * tables yet so we should not use the special
- * UltraSPARC-III+ page size encodings yet.
- */
- __asm__ __volatile__("stxa %0, [%1] %2\n\t"
- "flush %%g6"
- : /* No outputs */
- : "r" (0),
- "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
-
- remap_func((tlb_type == spitfire ?
- (spitfire_get_dtlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR) :
- (cheetah_get_litlb_data(sparc64_highest_locked_tlbent()) & _PAGE_PADDR)),
- (unsigned long) KERNBASE,
- prom_get_mmu_ihandle());
-
- if (bigkernel)
- remap_func(((tte_data + 0x400000) & _PAGE_PADDR),
- (unsigned long) KERNBASE + 0x400000, prom_get_mmu_ihandle());
-
- /* Flush out that temporary mapping. */
- spitfire_flush_dtlb_nucleus_page(0x0);
- spitfire_flush_itlb_nucleus_page(0x0);
-
- /* Now lock us back into the TLBs via OBP. */
- prom_dtlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
- prom_itlb_load(sparc64_highest_locked_tlbent(), tte_data, tte_vaddr);
+ /* Now lock us into the TLBs via OBP. */
+ prom_dtlb_load(tlb_ent, tte_data, tte_vaddr);
+ prom_itlb_load(tlb_ent, tte_data, tte_vaddr);
if (bigkernel) {
- prom_dtlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
- tte_vaddr + 0x400000);
- prom_itlb_load(sparc64_highest_locked_tlbent()-1, tte_data + 0x400000,
- tte_vaddr + 0x400000);
+ tlb_ent -= 1;
+ prom_dtlb_load(tlb_ent,
+ tte_data + 0x400000,
+ tte_vaddr + 0x400000);
+ prom_itlb_load(tlb_ent,
+ tte_data + 0x400000,
+ tte_vaddr + 0x400000);
}
-
- /* Re-read translations property. */
- if ((n = prom_getproperty(node, "translations", (char *)trans, tsz)) == -1) {
- prom_printf("Couldn't get translation property\n");
- prom_halt();
+ sparc64_highest_unlocked_tlb_ent = tlb_ent - 1;
+ if (tlb_type == cheetah_plus) {
+ sparc64_kern_pri_context = (CTX_CHEETAH_PLUS_CTX0 |
+ CTX_CHEETAH_PLUS_NUC);
+ sparc64_kern_pri_nuc_bits = CTX_CHEETAH_PLUS_NUC;
+ sparc64_kern_sec_context = CTX_CHEETAH_PLUS_CTX0;
}
- n = n / sizeof(*trans);
-
- for (i = 0; i < n; i++) {
- unsigned long vaddr = trans[i].virt;
- unsigned long size = trans[i].size;
-
- if (vaddr < 0xf0000000UL) {
- unsigned long avoid_start = (unsigned long) KERNBASE;
- unsigned long avoid_end = avoid_start + (4 * 1024 * 1024);
-
- if (bigkernel)
- avoid_end += (4 * 1024 * 1024);
- if (vaddr < avoid_start) {
- unsigned long top = vaddr + size;
+}
- if (top > avoid_start)
- top = avoid_start;
- prom_unmap(top - vaddr, vaddr);
- }
- if ((vaddr + size) > avoid_end) {
- unsigned long bottom = vaddr;
- if (bottom < avoid_end)
- bottom = avoid_end;
- prom_unmap((vaddr + size) - bottom, bottom);
- }
- }
- }
+static void __init inherit_prom_mappings(void)
+{
+ read_obp_translations();
+ /* Now fixup OBP's idea about where we really are mapped. */
+ prom_printf("Remapping the kernel... ");
+ remap_kernel();
prom_printf("done.\n");
+ prom_printf("Registering callbacks... ");
register_prom_callbacks();
+ prom_printf("done.\n");
}
/* The OBP specifications for sun4u mark 0xfffffffc00000000 and
@@ -792,8 +720,8 @@ void inherit_locked_prom_mappings(int save_p)
}
}
if (tlb_type == spitfire) {
- int high = SPITFIRE_HIGHEST_LOCKED_TLBENT - bigkernel;
- for (i = 0; i < high; i++) {
+ int high = sparc64_highest_unlocked_tlb_ent;
+ for (i = 0; i <= high; i++) {
unsigned long data;
/* Spitfire Errata #32 workaround */
@@ -881,9 +809,9 @@ void inherit_locked_prom_mappings(int save_p)
}
}
} else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
- int high = CHEETAH_HIGHEST_LOCKED_TLBENT - bigkernel;
+ int high = sparc64_highest_unlocked_tlb_ent;
- for (i = 0; i < high; i++) {
+ for (i = 0; i <= high; i++) {
unsigned long data;
data = cheetah_get_ldtlb_data(i);
@@ -1276,14 +1204,14 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
int i;
#ifdef CONFIG_DEBUG_BOOTMEM
- prom_printf("bootmem_init: Scan sp_banks, ");
+ prom_printf("bootmem_init: Scan pavail, ");
#endif
bytes_avail = 0UL;
- for (i = 0; sp_banks[i].num_bytes != 0; i++) {
- end_of_phys_memory = sp_banks[i].base_addr +
- sp_banks[i].num_bytes;
- bytes_avail += sp_banks[i].num_bytes;
+ for (i = 0; i < pavail_ents; i++) {
+ end_of_phys_memory = pavail[i].phys_addr +
+ pavail[i].reg_size;
+ bytes_avail += pavail[i].reg_size;
if (cmdline_memory_size) {
if (bytes_avail > cmdline_memory_size) {
unsigned long slack = bytes_avail - cmdline_memory_size;
@@ -1291,12 +1219,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
bytes_avail -= slack;
end_of_phys_memory -= slack;
- sp_banks[i].num_bytes -= slack;
- if (sp_banks[i].num_bytes == 0) {
- sp_banks[i].base_addr = 0xdeadbeef;
+ pavail[i].reg_size -= slack;
+ if ((long)pavail[i].reg_size <= 0L) {
+ pavail[i].phys_addr = 0xdeadbeefUL;
+ pavail[i].reg_size = 0UL;
+ pavail_ents = i;
} else {
- sp_banks[i+1].num_bytes = 0;
- sp_banks[i+1].base_addr = 0xdeadbeef;
+ pavail[i+1].reg_size = 0Ul;
+ pavail[i+1].phys_addr = 0xdeadbeefUL;
+ pavail_ents = i + 1;
}
break;
}
@@ -1347,17 +1278,15 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
#endif
bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap_pfn, pfn_base, end_pfn);
- bootmap_base = bootmap_pfn << PAGE_SHIFT;
-
/* Now register the available physical memory with the
* allocator.
*/
- for (i = 0; sp_banks[i].num_bytes != 0; i++) {
+ for (i = 0; i < pavail_ents; i++) {
#ifdef CONFIG_DEBUG_BOOTMEM
- prom_printf("free_bootmem(sp_banks:%d): base[%lx] size[%lx]\n",
- i, sp_banks[i].base_addr, sp_banks[i].num_bytes);
+ prom_printf("free_bootmem(pavail:%d): base[%lx] size[%lx]\n",
+ i, pavail[i].phys_addr, pavail[i].reg_size);
#endif
- free_bootmem(sp_banks[i].base_addr, sp_banks[i].num_bytes);
+ free_bootmem(pavail[i].phys_addr, pavail[i].reg_size);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -1398,121 +1327,167 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
return end_pfn;
}
+#ifdef CONFIG_DEBUG_PAGEALLOC
+static unsigned long kernel_map_range(unsigned long pstart, unsigned long pend, pgprot_t prot)
+{
+ unsigned long vstart = PAGE_OFFSET + pstart;
+ unsigned long vend = PAGE_OFFSET + pend;
+ unsigned long alloc_bytes = 0UL;
+
+ if ((vstart & ~PAGE_MASK) || (vend & ~PAGE_MASK)) {
+ prom_printf("kernel_map: Unaligned physmem[%lx:%lx]\n",
+ vstart, vend);
+ prom_halt();
+ }
+
+ while (vstart < vend) {
+ unsigned long this_end, paddr = __pa(vstart);
+ pgd_t *pgd = pgd_offset_k(vstart);
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ pud = pud_offset(pgd, vstart);
+ if (pud_none(*pud)) {
+ pmd_t *new;
+
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ alloc_bytes += PAGE_SIZE;
+ pud_populate(&init_mm, pud, new);
+ }
+
+ pmd = pmd_offset(pud, vstart);
+ if (!pmd_present(*pmd)) {
+ pte_t *new;
+
+ new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
+ alloc_bytes += PAGE_SIZE;
+ pmd_populate_kernel(&init_mm, pmd, new);
+ }
+
+ pte = pte_offset_kernel(pmd, vstart);
+ this_end = (vstart + PMD_SIZE) & PMD_MASK;
+ if (this_end > vend)
+ this_end = vend;
+
+ while (vstart < this_end) {
+ pte_val(*pte) = (paddr | pgprot_val(prot));
+
+ vstart += PAGE_SIZE;
+ paddr += PAGE_SIZE;
+ pte++;
+ }
+ }
+
+ return alloc_bytes;
+}
+
+static struct linux_prom64_registers pall[MAX_BANKS] __initdata;
+static int pall_ents __initdata;
+
+extern unsigned int kvmap_linear_patch[1];
+
+static void __init kernel_physical_mapping_init(void)
+{
+ unsigned long i, mem_alloced = 0UL;
+
+ read_obp_memory("reg", &pall[0], &pall_ents);
+
+ for (i = 0; i < pall_ents; i++) {
+ unsigned long phys_start, phys_end;
+
+ phys_start = pall[i].phys_addr;
+ phys_end = phys_start + pall[i].reg_size;
+ mem_alloced += kernel_map_range(phys_start, phys_end,
+ PAGE_KERNEL);
+ }
+
+ printk("Allocated %ld bytes for kernel page tables.\n",
+ mem_alloced);
+
+ kvmap_linear_patch[0] = 0x01000000; /* nop */
+ flushi(&kvmap_linear_patch[0]);
+
+ __flush_tlb_all();
+}
+
+void kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
+ unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
+
+ kernel_map_range(phys_start, phys_end,
+ (enable ? PAGE_KERNEL : __pgprot(0)));
+
+ /* we should perform an IPI and flush all tlbs,
+ * but that can deadlock->flush only current cpu.
+ */
+ __flush_tlb_kernel_range(PAGE_OFFSET + phys_start,
+ PAGE_OFFSET + phys_end);
+}
+#endif
+
+unsigned long __init find_ecache_flush_span(unsigned long size)
+{
+ int i;
+
+ for (i = 0; i < pavail_ents; i++) {
+ if (pavail[i].reg_size >= size)
+ return pavail[i].phys_addr;
+ }
+
+ return ~0UL;
+}
+
/* paging_init() sets up the page tables */
extern void cheetah_ecache_flush_init(void);
static unsigned long last_valid_pfn;
+pgd_t swapper_pg_dir[2048];
void __init paging_init(void)
{
- extern pmd_t swapper_pmd_dir[1024];
- extern unsigned int sparc64_vpte_patchme1[1];
- extern unsigned int sparc64_vpte_patchme2[1];
- unsigned long alias_base = kern_base + PAGE_OFFSET;
- unsigned long second_alias_page = 0;
- unsigned long pt, flags, end_pfn, pages_avail;
- unsigned long shift = alias_base - ((unsigned long)KERNBASE);
- unsigned long real_end;
+ unsigned long end_pfn, pages_avail, shift;
+ unsigned long real_end, i;
+
+ /* Find available physical memory... */
+ read_obp_memory("available", &pavail[0], &pavail_ents);
+
+ phys_base = 0xffffffffffffffffUL;
+ for (i = 0; i < pavail_ents; i++)
+ phys_base = min(phys_base, pavail[i].phys_addr);
+
+ pfn_base = phys_base >> PAGE_SHIFT;
+
+ kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
+ kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
set_bit(0, mmu_context_bmap);
+ shift = kern_base + PAGE_OFFSET - ((unsigned long)KERNBASE);
+
real_end = (unsigned long)_end;
if ((real_end > ((unsigned long)KERNBASE + 0x400000)))
bigkernel = 1;
-#ifdef CONFIG_BLK_DEV_INITRD
- if (sparc_ramdisk_image || sparc_ramdisk_image64)
- real_end = (PAGE_ALIGN(real_end) + PAGE_ALIGN(sparc_ramdisk_size));
-#endif
-
- /* We assume physical memory starts at some 4mb multiple,
- * if this were not true we wouldn't boot up to this point
- * anyways.
- */
- pt = kern_base | _PAGE_VALID | _PAGE_SZ4MB;
- pt |= _PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_L | _PAGE_W;
- local_irq_save(flags);
- if (tlb_type == spitfire) {
- __asm__ __volatile__(
- " stxa %1, [%0] %3\n"
- " stxa %2, [%5] %4\n"
- " membar #Sync\n"
- " flush %%g6\n"
- " nop\n"
- " nop\n"
- " nop\n"
- : /* No outputs */
- : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (61 << 3)
- : "memory");
- if (real_end >= KERNBASE + 0x340000) {
- second_alias_page = alias_base + 0x400000;
- __asm__ __volatile__(
- " stxa %1, [%0] %3\n"
- " stxa %2, [%5] %4\n"
- " membar #Sync\n"
- " flush %%g6\n"
- " nop\n"
- " nop\n"
- " nop\n"
- : /* No outputs */
- : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" (60 << 3)
- : "memory");
- }
- } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
- __asm__ __volatile__(
- " stxa %1, [%0] %3\n"
- " stxa %2, [%5] %4\n"
- " membar #Sync\n"
- " flush %%g6\n"
- " nop\n"
- " nop\n"
- " nop\n"
- : /* No outputs */
- : "r" (TLB_TAG_ACCESS), "r" (alias_base), "r" (pt),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (13<<3))
- : "memory");
- if (real_end >= KERNBASE + 0x340000) {
- second_alias_page = alias_base + 0x400000;
- __asm__ __volatile__(
- " stxa %1, [%0] %3\n"
- " stxa %2, [%5] %4\n"
- " membar #Sync\n"
- " flush %%g6\n"
- " nop\n"
- " nop\n"
- " nop\n"
- : /* No outputs */
- : "r" (TLB_TAG_ACCESS), "r" (second_alias_page), "r" (pt + 0x400000),
- "i" (ASI_DMMU), "i" (ASI_DTLB_DATA_ACCESS), "r" ((0<<16) | (12<<3))
- : "memory");
- }
+ if ((real_end > ((unsigned long)KERNBASE + 0x800000))) {
+ prom_printf("paging_init: Kernel > 8MB, too large.\n");
+ prom_halt();
}
- local_irq_restore(flags);
-
- /* Now set kernel pgd to upper alias so physical page computations
+
+ /* Set kernel pgd to upper alias so physical page computations
* work.
*/
init_mm.pgd += ((shift) / (sizeof(pgd_t)));
- memset(swapper_pmd_dir, 0, sizeof(swapper_pmd_dir));
+ memset(swapper_low_pmd_dir, 0, sizeof(swapper_low_pmd_dir));
/* Now can init the kernel/bad page tables. */
pud_set(pud_offset(&swapper_pg_dir[0], 0),
- swapper_pmd_dir + (shift / sizeof(pgd_t)));
+ swapper_low_pmd_dir + (shift / sizeof(pgd_t)));
- sparc64_vpte_patchme1[0] |=
- (((unsigned long)pgd_val(init_mm.pgd[0])) >> 10);
- sparc64_vpte_patchme2[0] |=
- (((unsigned long)pgd_val(init_mm.pgd[0])) & 0x3ff);
- flushi((long)&sparc64_vpte_patchme1[0]);
+ swapper_pgd_zero = pgd_val(swapper_pg_dir[0]);
- /* Setup bootmem... */
- pages_avail = 0;
- last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
-
- /* Inherit non-locked OBP mappings. */
inherit_prom_mappings();
/* Ok, we can use our TLB miss and window trap handlers safely.
@@ -1527,13 +1502,16 @@ void __init paging_init(void)
inherit_locked_prom_mappings(1);
- /* We only created DTLB mapping of this stuff. */
- spitfire_flush_dtlb_nucleus_page(alias_base);
- if (second_alias_page)
- spitfire_flush_dtlb_nucleus_page(second_alias_page);
-
__flush_tlb_all();
+ /* Setup bootmem... */
+ pages_avail = 0;
+ last_valid_pfn = end_pfn = bootmem_init(&pages_avail);
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+ kernel_physical_mapping_init();
+#endif
+
{
unsigned long zones_size[MAX_NR_ZONES];
unsigned long zholes_size[MAX_NR_ZONES];
@@ -1554,128 +1532,35 @@ void __init paging_init(void)
device_scan();
}
-/* Ok, it seems that the prom can allocate some more memory chunks
- * as a side effect of some prom calls we perform during the
- * boot sequence. My most likely theory is that it is from the
- * prom_set_traptable() call, and OBP is allocating a scratchpad
- * for saving client program register state etc.
- */
-static void __init sort_memlist(struct linux_mlist_p1275 *thislist)
-{
- int swapi = 0;
- int i, mitr;
- unsigned long tmpaddr, tmpsize;
- unsigned long lowest;
-
- for (i = 0; thislist[i].theres_more != 0; i++) {
- lowest = thislist[i].start_adr;
- for (mitr = i+1; thislist[mitr-1].theres_more != 0; mitr++)
- if (thislist[mitr].start_adr < lowest) {
- lowest = thislist[mitr].start_adr;
- swapi = mitr;
- }
- if (lowest == thislist[i].start_adr)
- continue;
- tmpaddr = thislist[swapi].start_adr;
- tmpsize = thislist[swapi].num_bytes;
- for (mitr = swapi; mitr > i; mitr--) {
- thislist[mitr].start_adr = thislist[mitr-1].start_adr;
- thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
- }
- thislist[i].start_adr = tmpaddr;
- thislist[i].num_bytes = tmpsize;
- }
-}
-
-void __init rescan_sp_banks(void)
-{
- struct linux_prom64_registers memlist[64];
- struct linux_mlist_p1275 avail[64], *mlist;
- unsigned long bytes, base_paddr;
- int num_regs, node = prom_finddevice("/memory");
- int i;
-
- num_regs = prom_getproperty(node, "available",
- (char *) memlist, sizeof(memlist));
- num_regs = (num_regs / sizeof(struct linux_prom64_registers));
- for (i = 0; i < num_regs; i++) {
- avail[i].start_adr = memlist[i].phys_addr;
- avail[i].num_bytes = memlist[i].reg_size;
- avail[i].theres_more = &avail[i + 1];
- }
- avail[i - 1].theres_more = NULL;
- sort_memlist(avail);
-
- mlist = &avail[0];
- i = 0;
- bytes = mlist->num_bytes;
- base_paddr = mlist->start_adr;
-
- sp_banks[0].base_addr = base_paddr;
- sp_banks[0].num_bytes = bytes;
-
- while (mlist->theres_more != NULL){
- i++;
- mlist = mlist->theres_more;
- bytes = mlist->num_bytes;
- if (i >= SPARC_PHYS_BANKS-1) {
- printk ("The machine has more banks than "
- "this kernel can support\n"
- "Increase the SPARC_PHYS_BANKS "
- "setting (currently %d)\n",
- SPARC_PHYS_BANKS);
- i = SPARC_PHYS_BANKS-1;
- break;
- }
-
- sp_banks[i].base_addr = mlist->start_adr;
- sp_banks[i].num_bytes = mlist->num_bytes;
- }
-
- i++;
- sp_banks[i].base_addr = 0xdeadbeefbeefdeadUL;
- sp_banks[i].num_bytes = 0;
-
- for (i = 0; sp_banks[i].num_bytes != 0; i++)
- sp_banks[i].num_bytes &= PAGE_MASK;
-}
-
static void __init taint_real_pages(void)
{
- struct sparc_phys_banks saved_sp_banks[SPARC_PHYS_BANKS];
int i;
- for (i = 0; i < SPARC_PHYS_BANKS; i++) {
- saved_sp_banks[i].base_addr =
- sp_banks[i].base_addr;
- saved_sp_banks[i].num_bytes =
- sp_banks[i].num_bytes;
- }
-
- rescan_sp_banks();
+ read_obp_memory("available", &pavail_rescan[0], &pavail_rescan_ents);
- /* Find changes discovered in the sp_bank rescan and
+ /* Find changes discovered in the physmem available rescan and
* reserve the lost portions in the bootmem maps.
*/
- for (i = 0; saved_sp_banks[i].num_bytes; i++) {
+ for (i = 0; i < pavail_ents; i++) {
unsigned long old_start, old_end;
- old_start = saved_sp_banks[i].base_addr;
+ old_start = pavail[i].phys_addr;
old_end = old_start +
- saved_sp_banks[i].num_bytes;
+ pavail[i].reg_size;
while (old_start < old_end) {
int n;
- for (n = 0; sp_banks[n].num_bytes; n++) {
+ for (n = 0; pavail_rescan_ents; n++) {
unsigned long new_start, new_end;
- new_start = sp_banks[n].base_addr;
- new_end = new_start + sp_banks[n].num_bytes;
+ new_start = pavail_rescan[n].phys_addr;
+ new_end = new_start +
+ pavail_rescan[n].reg_size;
if (new_start <= old_start &&
new_end >= (old_start + PAGE_SIZE)) {
- set_bit (old_start >> 22,
- sparc64_valid_addr_bitmap);
+ set_bit(old_start >> 22,
+ sparc64_valid_addr_bitmap);
goto do_next_page;
}
}
@@ -1695,8 +1580,7 @@ void __init mem_init(void)
i = last_valid_pfn >> ((22 - PAGE_SHIFT) + 6);
i += 1;
- sparc64_valid_addr_bitmap = (unsigned long *)
- __alloc_bootmem(i << 3, SMP_CACHE_BYTES, bootmap_base);
+ sparc64_valid_addr_bitmap = (unsigned long *) alloc_bootmem(i << 3);
if (sparc64_valid_addr_bitmap == NULL) {
prom_printf("mem_init: Cannot alloc valid_addr_bitmap.\n");
prom_halt();
@@ -1749,7 +1633,7 @@ void __init mem_init(void)
cheetah_ecache_flush_init();
}
-void free_initmem (void)
+void free_initmem(void)
{
unsigned long addr, initend;
diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
index b2ee9b53227f..e4c9151fa116 100644
--- a/arch/sparc64/mm/ultra.S
+++ b/arch/sparc64/mm/ultra.S
@@ -144,42 +144,29 @@ __flush_icache_page: /* %o0 = phys_page */
#define DTAG_MASK 0x3
+ /* This routine is Spitfire specific so the hardcoded
+ * D-cache size and line-size are OK.
+ */
.align 64
.globl __flush_dcache_page
__flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
sethi %uhi(PAGE_OFFSET), %g1
sllx %g1, 32, %g1
- sub %o0, %g1, %o0
- clr %o4
- srlx %o0, 11, %o0
- sethi %hi(1 << 14), %o2
-1: ldxa [%o4] ASI_DCACHE_TAG, %o3 ! LSU Group
- add %o4, (1 << 5), %o4 ! IEU0
- ldxa [%o4] ASI_DCACHE_TAG, %g1 ! LSU Group
- add %o4, (1 << 5), %o4 ! IEU0
- ldxa [%o4] ASI_DCACHE_TAG, %g2 ! LSU Group o3 available
- add %o4, (1 << 5), %o4 ! IEU0
- andn %o3, DTAG_MASK, %o3 ! IEU1
- ldxa [%o4] ASI_DCACHE_TAG, %g3 ! LSU Group
- add %o4, (1 << 5), %o4 ! IEU0
- andn %g1, DTAG_MASK, %g1 ! IEU1
- cmp %o0, %o3 ! IEU1 Group
- be,a,pn %xcc, dflush1 ! CTI
- sub %o4, (4 << 5), %o4 ! IEU0 (Group)
- cmp %o0, %g1 ! IEU1 Group
- andn %g2, DTAG_MASK, %g2 ! IEU0
- be,a,pn %xcc, dflush2 ! CTI
- sub %o4, (3 << 5), %o4 ! IEU0 (Group)
- cmp %o0, %g2 ! IEU1 Group
- andn %g3, DTAG_MASK, %g3 ! IEU0
- be,a,pn %xcc, dflush3 ! CTI
- sub %o4, (2 << 5), %o4 ! IEU0 (Group)
- cmp %o0, %g3 ! IEU1 Group
- be,a,pn %xcc, dflush4 ! CTI
- sub %o4, (1 << 5), %o4 ! IEU0
-2: cmp %o4, %o2 ! IEU1 Group
- bne,pt %xcc, 1b ! CTI
- nop ! IEU0
+ sub %o0, %g1, %o0 ! physical address
+ srlx %o0, 11, %o0 ! make D-cache TAG
+ sethi %hi(1 << 14), %o2 ! D-cache size
+ sub %o2, (1 << 5), %o2 ! D-cache line size
+1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
+ andcc %o3, DTAG_MASK, %g0 ! Valid?
+ be,pn %xcc, 2f ! Nope, branch
+ andn %o3, DTAG_MASK, %o3 ! Clear valid bits
+ cmp %o3, %o0 ! TAG match?
+ bne,pt %xcc, 2f ! Nope, branch
+ nop
+ stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
+ membar #Sync
+2: brnz,pt %o2, 1b
+ sub %o2, (1 << 5), %o2 ! D-cache line size
/* The I-cache does not snoop local stores so we
* better flush that too when necessary.
@@ -189,48 +176,9 @@ __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
retl
nop
-dflush1:stxa %g0, [%o4] ASI_DCACHE_TAG
- add %o4, (1 << 5), %o4
-dflush2:stxa %g0, [%o4] ASI_DCACHE_TAG
- add %o4, (1 << 5), %o4
-dflush3:stxa %g0, [%o4] ASI_DCACHE_TAG
- add %o4, (1 << 5), %o4
-dflush4:stxa %g0, [%o4] ASI_DCACHE_TAG
- add %o4, (1 << 5), %o4
- membar #Sync
- ba,pt %xcc, 2b
- nop
#endif /* DCACHE_ALIASING_POSSIBLE */
- .previous .text
- .align 32
-__prefill_dtlb:
- rdpr %pstate, %g7
- wrpr %g7, PSTATE_IE, %pstate
- mov TLB_TAG_ACCESS, %g1
- stxa %o5, [%g1] ASI_DMMU
- stxa %o2, [%g0] ASI_DTLB_DATA_IN
- flush %g6
- retl
- wrpr %g7, %pstate
-__prefill_itlb:
- rdpr %pstate, %g7
- wrpr %g7, PSTATE_IE, %pstate
- mov TLB_TAG_ACCESS, %g1
- stxa %o5, [%g1] ASI_IMMU
- stxa %o2, [%g0] ASI_ITLB_DATA_IN
- flush %g6
- retl
- wrpr %g7, %pstate
-
- .globl __update_mmu_cache
-__update_mmu_cache: /* %o0=hw_context, %o1=address, %o2=pte, %o3=fault_code */
- srlx %o1, PAGE_SHIFT, %o1
- andcc %o3, FAULT_CODE_DTLB, %g0
- sllx %o1, PAGE_SHIFT, %o5
- bne,pt %xcc, __prefill_dtlb
- or %o5, %o0, %o5
- ba,a,pt %xcc, __prefill_itlb
+ .previous
/* Cheetah specific versions, patched at boot time. */
__cheetah_flush_tlb_mm: /* 18 insns */
@@ -283,7 +231,7 @@ __cheetah_flush_tlb_pending: /* 26 insns */
wrpr %g7, 0x0, %pstate
#ifdef DCACHE_ALIASING_POSSIBLE
-flush_dcpage_cheetah: /* 11 insns */
+__cheetah_flush_dcache_page: /* 11 insns */
sethi %uhi(PAGE_OFFSET), %g1
sllx %g1, 32, %g1
sub %o0, %g1, %o0
@@ -329,8 +277,8 @@ cheetah_patch_cachetlbops:
#ifdef DCACHE_ALIASING_POSSIBLE
sethi %hi(__flush_dcache_page), %o0
or %o0, %lo(__flush_dcache_page), %o0
- sethi %hi(flush_dcpage_cheetah), %o1
- or %o1, %lo(flush_dcpage_cheetah), %o1
+ sethi %hi(__cheetah_flush_dcache_page), %o1
+ or %o1, %lo(__cheetah_flush_dcache_page), %o1
call cheetah_patch_one
mov 11, %o2
#endif /* DCACHE_ALIASING_POSSIBLE */
@@ -505,22 +453,6 @@ xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
nop
nop
- .globl xcall_promstop
-xcall_promstop:
- rdpr %pstate, %g2
- wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
- rdpr %pil, %g2
- wrpr %g0, 15, %pil
- sethi %hi(109f), %g7
- b,pt %xcc, etrap_irq
-109: or %g7, %lo(109b), %g7
- flushw
- call prom_stopself
- nop
- /* We should not return, just spin if we do... */
-1: b,a,pt %xcc, 1b
- nop
-
.data
errata32_hwbug:
diff --git a/arch/sparc64/prom/Makefile b/arch/sparc64/prom/Makefile
index 8f2420d9e9e6..3d33ed27bc27 100644
--- a/arch/sparc64/prom/Makefile
+++ b/arch/sparc64/prom/Makefile
@@ -6,5 +6,5 @@
EXTRA_AFLAGS := -ansi
EXTRA_CFLAGS := -Werror
-lib-y := bootstr.o devops.o init.o memory.o misc.o \
- tree.o console.o printf.o p1275.o map.o cif.o
+lib-y := bootstr.o devops.o init.o misc.o \
+ tree.o console.o printf.o p1275.o cif.o
diff --git a/arch/sparc64/prom/console.c b/arch/sparc64/prom/console.c
index 028a53fcb1ec..eae5db8dda56 100644
--- a/arch/sparc64/prom/console.c
+++ b/arch/sparc64/prom/console.c
@@ -67,7 +67,7 @@ prom_putchar(char c)
}
void
-prom_puts(char *s, int len)
+prom_puts(const char *s, int len)
{
p1275_cmd("write", P1275_ARG(1,P1275_ARG_IN_BUF)|
P1275_INOUT(3,1),
diff --git a/arch/sparc64/prom/devops.c b/arch/sparc64/prom/devops.c
index 2c99b21b6981..4641839eb39a 100644
--- a/arch/sparc64/prom/devops.c
+++ b/arch/sparc64/prom/devops.c
@@ -16,7 +16,7 @@
* Returns 0 on failure.
*/
int
-prom_devopen(char *dstr)
+prom_devopen(const char *dstr)
{
return p1275_cmd ("open", P1275_ARG(0,P1275_ARG_IN_STRING)|
P1275_INOUT(1,1),
diff --git a/arch/sparc64/prom/init.c b/arch/sparc64/prom/init.c
index 817faae058cd..f3cc2d8578b2 100644
--- a/arch/sparc64/prom/init.c
+++ b/arch/sparc64/prom/init.c
@@ -27,7 +27,6 @@ int prom_chosen_node;
* failure. It gets passed the pointer to the PROM vector.
*/
-extern void prom_meminit(void);
extern void prom_cif_init(void *, void *);
void __init prom_init(void *cif_handler, void *cif_stack)
@@ -46,7 +45,7 @@ void __init prom_init(void *cif_handler, void *cif_stack)
if((prom_root_node == 0) || (prom_root_node == -1))
prom_halt();
- prom_chosen_node = prom_finddevice("/chosen");
+ prom_chosen_node = prom_finddevice(prom_chosen_path);
if (!prom_chosen_node || prom_chosen_node == -1)
prom_halt();
@@ -90,8 +89,6 @@ void __init prom_init(void *cif_handler, void *cif_stack)
printk ("PROMLIB: Sun IEEE Boot Prom %s\n", buffer + bufadjust);
- prom_meminit();
-
/* Initialization successful. */
return;
diff --git a/arch/sparc64/prom/map.S b/arch/sparc64/prom/map.S
deleted file mode 100644
index 21b3f9c99ea7..000000000000
--- a/arch/sparc64/prom/map.S
+++ /dev/null
@@ -1,72 +0,0 @@
-/* $Id: map.S,v 1.2 1999/11/19 05:53:02 davem Exp $
- * map.S: Tricky coding required to fixup the kernel OBP maps
- * properly.
- *
- * Copyright (C) 1999 David S. Miller (davem@redhat.com)
- */
-
- .text
- .align 8192
- .globl prom_boot_page
-prom_boot_page:
-call_method:
- .asciz "call-method"
- .align 8
-map:
- .asciz "map"
- .align 8
-
- /* When we are invoked, our caller has remapped us to
- * page zero, therefore we must use PC relative addressing
- * for everything after we begin performing the unmap/map
- * calls.
- */
- .globl prom_remap
-prom_remap: /* %o0 = physpage, %o1 = virtpage, %o2 = mmu_ihandle */
- rd %pc, %g1
- srl %o2, 0, %o2 ! kill sign extension
- sethi %hi(p1275buf), %g2
- or %g2, %lo(p1275buf), %g2
- ldx [%g2 + 0x10], %g3 ! prom_cif_stack
- save %g3, -(192 + 128), %sp
- ldx [%g2 + 0x08], %l0 ! prom_cif_handler
- mov %g6, %i3
- mov %g4, %i4
- mov %g5, %i5
- flushw
-
- sethi %hi(prom_remap - call_method), %g7
- or %g7, %lo(prom_remap - call_method), %g7
- sub %g1, %g7, %l2 ! call-method string
- sethi %hi(prom_remap - map), %g7
- or %g7, %lo(prom_remap - map), %g7
- sub %g1, %g7, %l4 ! map string
-
- /* OK, map the 4MB region we really live at. */
- stx %l2, [%sp + 2047 + 128 + 0x00] ! call-method
- mov 7, %l5
- stx %l5, [%sp + 2047 + 128 + 0x08] ! num_args
- mov 1, %l5
- stx %l5, [%sp + 2047 + 128 + 0x10] ! num_rets
- stx %l4, [%sp + 2047 + 128 + 0x18] ! map
- stx %i2, [%sp + 2047 + 128 + 0x20] ! mmu_ihandle
- mov -1, %l5
- stx %l5, [%sp + 2047 + 128 + 0x28] ! mode == default
- sethi %hi(4 * 1024 * 1024), %l5
- stx %l5, [%sp + 2047 + 128 + 0x30] ! size
- stx %i1, [%sp + 2047 + 128 + 0x38] ! vaddr
- stx %g0, [%sp + 2047 + 128 + 0x40] ! filler
- stx %i0, [%sp + 2047 + 128 + 0x48] ! paddr
- call %l0
- add %sp, (2047 + 128), %o0 ! argument array
-
- /* Restore hard-coded globals. */
- mov %i3, %g6
- mov %i4, %g4
- mov %i5, %g5
-
- /* Wheee.... we are done. */
- ret
- restore
-
- .align 8192
diff --git a/arch/sparc64/prom/memory.c b/arch/sparc64/prom/memory.c
deleted file mode 100644
index f4a8143e052c..000000000000
--- a/arch/sparc64/prom/memory.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/* $Id: memory.c,v 1.5 1999/08/31 06:55:04 davem Exp $
- * memory.c: Prom routine for acquiring various bits of information
- * about RAM on the machine, both virtual and physical.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-
-#include <asm/openprom.h>
-#include <asm/oplib.h>
-
-/* This routine, for consistency, returns the ram parameters in the
- * V0 prom memory descriptor format. I choose this format because I
- * think it was the easiest to work with. I feel the religious
- * arguments now... ;) Also, I return the linked lists sorted to
- * prevent paging_init() upset stomach as I have not yet written
- * the pepto-bismol kernel module yet.
- */
-
-struct linux_prom64_registers prom_reg_memlist[64];
-struct linux_prom64_registers prom_reg_tmp[64];
-
-struct linux_mlist_p1275 prom_phys_total[64];
-struct linux_mlist_p1275 prom_prom_taken[64];
-struct linux_mlist_p1275 prom_phys_avail[64];
-
-struct linux_mlist_p1275 *prom_ptot_ptr = prom_phys_total;
-struct linux_mlist_p1275 *prom_ptak_ptr = prom_prom_taken;
-struct linux_mlist_p1275 *prom_pavl_ptr = prom_phys_avail;
-
-struct linux_mem_p1275 prom_memlist;
-
-
-/* Internal Prom library routine to sort a linux_mlist_p1275 memory
- * list. Used below in initialization.
- */
-static void __init
-prom_sortmemlist(struct linux_mlist_p1275 *thislist)
-{
- int swapi = 0;
- int i, mitr;
- unsigned long tmpaddr, tmpsize;
- unsigned long lowest;
-
- for(i=0; thislist[i].theres_more; i++) {
- lowest = thislist[i].start_adr;
- for(mitr = i+1; thislist[mitr-1].theres_more; mitr++)
- if(thislist[mitr].start_adr < lowest) {
- lowest = thislist[mitr].start_adr;
- swapi = mitr;
- }
- if(lowest == thislist[i].start_adr) continue;
- tmpaddr = thislist[swapi].start_adr;
- tmpsize = thislist[swapi].num_bytes;
- for(mitr = swapi; mitr > i; mitr--) {
- thislist[mitr].start_adr = thislist[mitr-1].start_adr;
- thislist[mitr].num_bytes = thislist[mitr-1].num_bytes;
- }
- thislist[i].start_adr = tmpaddr;
- thislist[i].num_bytes = tmpsize;
- }
-}
-
-/* Initialize the memory lists based upon the prom version. */
-void __init prom_meminit(void)
-{
- int node = 0;
- unsigned int iter, num_regs;
-
- node = prom_finddevice("/memory");
- num_regs = prom_getproperty(node, "available",
- (char *) prom_reg_memlist,
- sizeof(prom_reg_memlist));
- num_regs = (num_regs/sizeof(struct linux_prom64_registers));
- for(iter=0; iter<num_regs; iter++) {
- prom_phys_avail[iter].start_adr =
- prom_reg_memlist[iter].phys_addr;
- prom_phys_avail[iter].num_bytes =
- prom_reg_memlist[iter].reg_size;
- prom_phys_avail[iter].theres_more =
- &prom_phys_avail[iter+1];
- }
- prom_phys_avail[iter-1].theres_more = NULL;
-
- num_regs = prom_getproperty(node, "reg",
- (char *) prom_reg_memlist,
- sizeof(prom_reg_memlist));
- num_regs = (num_regs/sizeof(struct linux_prom64_registers));
- for(iter=0; iter<num_regs; iter++) {
- prom_phys_total[iter].start_adr =
- prom_reg_memlist[iter].phys_addr;
- prom_phys_total[iter].num_bytes =
- prom_reg_memlist[iter].reg_size;
- prom_phys_total[iter].theres_more =
- &prom_phys_total[iter+1];
- }
- prom_phys_total[iter-1].theres_more = NULL;
-
- node = prom_finddevice("/virtual-memory");
- num_regs = prom_getproperty(node, "available",
- (char *) prom_reg_memlist,
- sizeof(prom_reg_memlist));
- num_regs = (num_regs/sizeof(struct linux_prom64_registers));
-
- /* Convert available virtual areas to taken virtual
- * areas. First sort, then convert.
- */
- for(iter=0; iter<num_regs; iter++) {
- prom_prom_taken[iter].start_adr =
- prom_reg_memlist[iter].phys_addr;
- prom_prom_taken[iter].num_bytes =
- prom_reg_memlist[iter].reg_size;
- prom_prom_taken[iter].theres_more =
- &prom_prom_taken[iter+1];
- }
- prom_prom_taken[iter-1].theres_more = NULL;
-
- prom_sortmemlist(prom_prom_taken);
-
- /* Finally, convert. */
- for(iter=0; iter<num_regs; iter++) {
- prom_prom_taken[iter].start_adr =
- prom_prom_taken[iter].start_adr +
- prom_prom_taken[iter].num_bytes;
- prom_prom_taken[iter].num_bytes =
- prom_prom_taken[iter+1].start_adr -
- prom_prom_taken[iter].start_adr;
- }
- prom_prom_taken[iter-1].num_bytes =
- -1UL - prom_prom_taken[iter-1].start_adr;
-
- /* Sort the other two lists. */
- prom_sortmemlist(prom_phys_total);
- prom_sortmemlist(prom_phys_avail);
-
- /* Link all the lists into the top-level descriptor. */
- prom_memlist.p1275_totphys=&prom_ptot_ptr;
- prom_memlist.p1275_prommap=&prom_ptak_ptr;
- prom_memlist.p1275_available=&prom_pavl_ptr;
-}
-
-/* This returns a pointer to our libraries internal p1275 format
- * memory descriptor.
- */
-struct linux_mem_p1275 *
-prom_meminfo(void)
-{
- return &prom_memlist;
-}
diff --git a/arch/sparc64/prom/misc.c b/arch/sparc64/prom/misc.c
index 19c44e97e9ee..87f5cfce23bb 100644
--- a/arch/sparc64/prom/misc.c
+++ b/arch/sparc64/prom/misc.c
@@ -17,14 +17,14 @@
#include <asm/system.h>
/* Reset and reboot the machine with the command 'bcommand'. */
-void prom_reboot(char *bcommand)
+void prom_reboot(const char *bcommand)
{
p1275_cmd("boot", P1275_ARG(0, P1275_ARG_IN_STRING) |
P1275_INOUT(1, 0), bcommand);
}
/* Forth evaluate the expression contained in 'fstring'. */
-void prom_feval(char *fstring)
+void prom_feval(const char *fstring)
{
if (!fstring || fstring[0] == 0)
return;
@@ -68,19 +68,11 @@ void prom_cmdline(void)
local_irq_restore(flags);
}
-#ifdef CONFIG_SMP
-extern void smp_promstop_others(void);
-#endif
-
/* Drop into the prom, but completely terminate the program.
* No chance of continuing.
*/
void prom_halt(void)
{
-#ifdef CONFIG_SMP
- smp_promstop_others();
- udelay(8000);
-#endif
again:
p1275_cmd("exit", P1275_INOUT(0, 0));
goto again; /* PROM is out to get me -DaveM */
@@ -88,10 +80,6 @@ again:
void prom_halt_power_off(void)
{
-#ifdef CONFIG_SMP
- smp_promstop_others();
- udelay(8000);
-#endif
p1275_cmd("SUNW,power-off", P1275_INOUT(0, 0));
/* if nothing else helps, we just halt */
@@ -148,21 +136,19 @@ void prom_set_trap_table(unsigned long tba)
p1275_cmd("SUNW,set-trap-table", P1275_INOUT(1, 0), tba);
}
-int mmu_ihandle_cache = 0;
-
int prom_get_mmu_ihandle(void)
{
int node, ret;
- if (mmu_ihandle_cache != 0)
- return mmu_ihandle_cache;
+ if (prom_mmu_ihandle_cache != 0)
+ return prom_mmu_ihandle_cache;
- node = prom_finddevice("/chosen");
- ret = prom_getint(node, "mmu");
+ node = prom_finddevice(prom_chosen_path);
+ ret = prom_getint(node, prom_mmu_name);
if (ret == -1 || ret == 0)
- mmu_ihandle_cache = -1;
+ prom_mmu_ihandle_cache = -1;
else
- mmu_ihandle_cache = ret;
+ prom_mmu_ihandle_cache = ret;
return ret;
}
@@ -190,7 +176,7 @@ long prom_itlb_load(unsigned long index,
unsigned long tte_data,
unsigned long vaddr)
{
- return p1275_cmd("call-method",
+ return p1275_cmd(prom_callmethod_name,
(P1275_ARG(0, P1275_ARG_IN_STRING) |
P1275_ARG(2, P1275_ARG_IN_64B) |
P1275_ARG(3, P1275_ARG_IN_64B) |
@@ -207,7 +193,7 @@ long prom_dtlb_load(unsigned long index,
unsigned long tte_data,
unsigned long vaddr)
{
- return p1275_cmd("call-method",
+ return p1275_cmd(prom_callmethod_name,
(P1275_ARG(0, P1275_ARG_IN_STRING) |
P1275_ARG(2, P1275_ARG_IN_64B) |
P1275_ARG(3, P1275_ARG_IN_64B) |
@@ -223,13 +209,13 @@ long prom_dtlb_load(unsigned long index,
int prom_map(int mode, unsigned long size,
unsigned long vaddr, unsigned long paddr)
{
- int ret = p1275_cmd("call-method",
+ int ret = p1275_cmd(prom_callmethod_name,
(P1275_ARG(0, P1275_ARG_IN_STRING) |
P1275_ARG(3, P1275_ARG_IN_64B) |
P1275_ARG(4, P1275_ARG_IN_64B) |
P1275_ARG(6, P1275_ARG_IN_64B) |
P1275_INOUT(7, 1)),
- "map",
+ prom_map_name,
prom_get_mmu_ihandle(),
mode,
size,
@@ -244,12 +230,12 @@ int prom_map(int mode, unsigned long size,
void prom_unmap(unsigned long size, unsigned long vaddr)
{
- p1275_cmd("call-method",
+ p1275_cmd(prom_callmethod_name,
(P1275_ARG(0, P1275_ARG_IN_STRING) |
P1275_ARG(2, P1275_ARG_IN_64B) |
P1275_ARG(3, P1275_ARG_IN_64B) |
P1275_INOUT(4, 0)),
- "unmap",
+ prom_unmap_name,
prom_get_mmu_ihandle(),
size,
vaddr);
@@ -258,7 +244,7 @@ void prom_unmap(unsigned long size, unsigned long vaddr)
/* Set aside physical memory which is not touched or modified
* across soft resets.
*/
-unsigned long prom_retain(char *name,
+unsigned long prom_retain(const char *name,
unsigned long pa_low, unsigned long pa_high,
long size, long align)
{
@@ -290,7 +276,7 @@ int prom_getunumber(int syndrome_code,
unsigned long phys_addr,
char *buf, int buflen)
{
- return p1275_cmd("call-method",
+ return p1275_cmd(prom_callmethod_name,
(P1275_ARG(0, P1275_ARG_IN_STRING) |
P1275_ARG(3, P1275_ARG_OUT_BUF) |
P1275_ARG(6, P1275_ARG_IN_64B) |
diff --git a/arch/sparc64/prom/p1275.c b/arch/sparc64/prom/p1275.c
index 59fe38bba39e..a5a7c5712028 100644
--- a/arch/sparc64/prom/p1275.c
+++ b/arch/sparc64/prom/p1275.c
@@ -46,7 +46,7 @@ static inline unsigned long spitfire_get_primary_context(void)
*/
DEFINE_SPINLOCK(prom_entry_lock);
-long p1275_cmd (char *service, long fmt, ...)
+long p1275_cmd(const char *service, long fmt, ...)
{
char *p, *q;
unsigned long flags;
diff --git a/arch/sparc64/prom/printf.c b/arch/sparc64/prom/printf.c
index a6df82cafa0d..660943ee4c2a 100644
--- a/arch/sparc64/prom/printf.c
+++ b/arch/sparc64/prom/printf.c
@@ -34,7 +34,7 @@ prom_write(const char *buf, unsigned int n)
}
void
-prom_printf(char *fmt, ...)
+prom_printf(const char *fmt, ...)
{
va_list args;
int i;
diff --git a/arch/sparc64/prom/tree.c b/arch/sparc64/prom/tree.c
index ccf73258ebf7..b1ff9e87dcc6 100644
--- a/arch/sparc64/prom/tree.c
+++ b/arch/sparc64/prom/tree.c
@@ -69,7 +69,7 @@ prom_getsibling(int node)
* Return -1 on error.
*/
__inline__ int
-prom_getproplen(int node, char *prop)
+prom_getproplen(int node, const char *prop)
{
if((!node) || (!prop)) return -1;
return p1275_cmd ("getproplen",
@@ -83,20 +83,20 @@ prom_getproplen(int node, char *prop)
* was successful the length will be returned, else -1 is returned.
*/
__inline__ int
-prom_getproperty(int node, char *prop, char *buffer, int bufsize)
+prom_getproperty(int node, const char *prop, char *buffer, int bufsize)
{
int plen;
plen = prom_getproplen(node, prop);
- if((plen > bufsize) || (plen == 0) || (plen == -1))
+ if ((plen > bufsize) || (plen == 0) || (plen == -1)) {
return -1;
- else {
+ } else {
/* Ok, things seem all right. */
- return p1275_cmd ("getprop",
- P1275_ARG(1,P1275_ARG_IN_STRING)|
- P1275_ARG(2,P1275_ARG_OUT_BUF)|
- P1275_INOUT(4, 1),
- node, prop, buffer, P1275_SIZE(plen));
+ return p1275_cmd(prom_getprop_name,
+ P1275_ARG(1,P1275_ARG_IN_STRING)|
+ P1275_ARG(2,P1275_ARG_OUT_BUF)|
+ P1275_INOUT(4, 1),
+ node, prop, buffer, P1275_SIZE(plen));
}
}
@@ -104,7 +104,7 @@ prom_getproperty(int node, char *prop, char *buffer, int bufsize)
* on failure.
*/
__inline__ int
-prom_getint(int node, char *prop)
+prom_getint(int node, const char *prop)
{
int intprop;
@@ -119,7 +119,7 @@ prom_getint(int node, char *prop)
*/
int
-prom_getintdefault(int node, char *property, int deflt)
+prom_getintdefault(int node, const char *property, int deflt)
{
int retval;
@@ -131,7 +131,7 @@ prom_getintdefault(int node, char *property, int deflt)
/* Acquire a boolean property, 1=TRUE 0=FALSE. */
int
-prom_getbool(int node, char *prop)
+prom_getbool(int node, const char *prop)
{
int retval;
@@ -145,7 +145,7 @@ prom_getbool(int node, char *prop)
* buffer.
*/
void
-prom_getstring(int node, char *prop, char *user_buf, int ubuf_size)
+prom_getstring(int node, const char *prop, char *user_buf, int ubuf_size)
{
int len;
@@ -160,7 +160,7 @@ prom_getstring(int node, char *prop, char *user_buf, int ubuf_size)
* YES = 1 NO = 0
*/
int
-prom_nodematch(int node, char *name)
+prom_nodematch(int node, const char *name)
{
char namebuf[128];
prom_getproperty(node, "name", namebuf, sizeof(namebuf));
@@ -172,7 +172,7 @@ prom_nodematch(int node, char *name)
* 'nodename'. Return node if successful, zero if not.
*/
int
-prom_searchsiblings(int node_start, char *nodename)
+prom_searchsiblings(int node_start, const char *nodename)
{
int thisnode, error;
@@ -294,7 +294,7 @@ prom_firstprop(int node, char *buffer)
* property types for this node.
*/
__inline__ char *
-prom_nextprop(int node, char *oprop, char *buffer)
+prom_nextprop(int node, const char *oprop, char *buffer)
{
char buf[32];
@@ -314,15 +314,17 @@ prom_nextprop(int node, char *oprop, char *buffer)
}
int
-prom_finddevice(char *name)
+prom_finddevice(const char *name)
{
- if(!name) return 0;
- return p1275_cmd ("finddevice", P1275_ARG(0,P1275_ARG_IN_STRING)|
- P1275_INOUT(1, 1),
- name);
+ if (!name)
+ return 0;
+ return p1275_cmd(prom_finddev_name,
+ P1275_ARG(0,P1275_ARG_IN_STRING)|
+ P1275_INOUT(1, 1),
+ name);
}
-int prom_node_has_property(int node, char *prop)
+int prom_node_has_property(int node, const char *prop)
{
char buf [32];
@@ -339,7 +341,7 @@ int prom_node_has_property(int node, char *prop)
* of 'size' bytes. Return the number of bytes the prom accepted.
*/
int
-prom_setprop(int node, char *pname, char *value, int size)
+prom_setprop(int node, const char *pname, char *value, int size)
{
if(size == 0) return 0;
if((pname == 0) || (value == 0)) return 0;
@@ -364,7 +366,7 @@ prom_inst2pkg(int inst)
* FIXME: Should work for v0 as well
*/
int
-prom_pathtoinode(char *path)
+prom_pathtoinode(const char *path)
{
int node, inst;
OpenPOWER on IntegriCloud