summaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/entry.S')
-rw-r--r--arch/sparc64/kernel/entry.S263
1 files changed, 47 insertions, 216 deletions
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 3e0badb820c5..11a848402fb1 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -30,164 +30,10 @@
.text
.align 32
- .globl sparc64_vpte_patchme1
- .globl sparc64_vpte_patchme2
-/*
- * On a second level vpte miss, check whether the original fault is to the OBP
- * range (note that this is only possible for instruction miss, data misses to
- * obp range do not use vpte). If so, go back directly to the faulting address.
- * This is because we want to read the tpc, otherwise we have no way of knowing
- * the 8k aligned faulting address if we are using >8k kernel pagesize. This
- * also ensures no vpte range addresses are dropped into tlb while obp is
- * executing (see inherit_locked_prom_mappings() rant).
- */
-sparc64_vpte_nucleus:
- /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
- mov 0xf, %g5
- sllx %g5, 28, %g5
-
- /* Is addr >= LOW_OBP_ADDRESS? */
- cmp %g4, %g5
- blu,pn %xcc, sparc64_vpte_patchme1
- mov 0x1, %g5
-
- /* Load 0x100000000, which is HI_OBP_ADDRESS. */
- sllx %g5, 32, %g5
-
- /* Is addr < HI_OBP_ADDRESS? */
- cmp %g4, %g5
- blu,pn %xcc, obp_iaddr_patch
- nop
-
- /* These two instructions are patched by paginig_init(). */
-sparc64_vpte_patchme1:
- sethi %hi(0), %g5
-sparc64_vpte_patchme2:
- or %g5, %lo(0), %g5
-
- /* With kernel PGD in %g5, branch back into dtlb_backend. */
- ba,pt %xcc, sparc64_kpte_continue
- andn %g1, 0x3, %g1 /* Finish PMD offset adjustment. */
-
-vpte_noent:
- /* Restore previous TAG_ACCESS, %g5 is zero, and we will
- * skip over the trap instruction so that the top level
- * TLB miss handler will thing this %g5 value is just an
- * invalid PTE, thus branching to full fault processing.
- */
- mov TLB_SFSR, %g1
- stxa %g4, [%g1 + %g1] ASI_DMMU
- done
-
- .globl obp_iaddr_patch
-obp_iaddr_patch:
- /* These two instructions patched by inherit_prom_mappings(). */
- sethi %hi(0), %g5
- or %g5, %lo(0), %g5
-
- /* Behave as if we are at TL0. */
- wrpr %g0, 1, %tl
- rdpr %tpc, %g4 /* Find original faulting iaddr */
- srlx %g4, 13, %g4 /* Throw out context bits */
- sllx %g4, 13, %g4 /* g4 has vpn + ctx0 now */
-
- /* Restore previous TAG_ACCESS. */
- mov TLB_SFSR, %g1
- stxa %g4, [%g1 + %g1] ASI_IMMU
-
- /* Get PMD offset. */
- srlx %g4, 23, %g6
- and %g6, 0x7ff, %g6
- sllx %g6, 2, %g6
-
- /* Load PMD, is it valid? */
- lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
- brz,pn %g5, longpath
- sllx %g5, 11, %g5
-
- /* Get PTE offset. */
- srlx %g4, 13, %g6
- and %g6, 0x3ff, %g6
- sllx %g6, 3, %g6
-
- /* Load PTE. */
- ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
- brgez,pn %g5, longpath
- nop
-
- /* TLB load and return from trap. */
- stxa %g5, [%g0] ASI_ITLB_DATA_IN
- retry
-
- .globl obp_daddr_patch
-obp_daddr_patch:
- /* These two instructions patched by inherit_prom_mappings(). */
- sethi %hi(0), %g5
- or %g5, %lo(0), %g5
-
- /* Get PMD offset. */
- srlx %g4, 23, %g6
- and %g6, 0x7ff, %g6
- sllx %g6, 2, %g6
-
- /* Load PMD, is it valid? */
- lduwa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
- brz,pn %g5, longpath
- sllx %g5, 11, %g5
-
- /* Get PTE offset. */
- srlx %g4, 13, %g6
- and %g6, 0x3ff, %g6
- sllx %g6, 3, %g6
-
- /* Load PTE. */
- ldxa [%g5 + %g6] ASI_PHYS_USE_EC, %g5
- brgez,pn %g5, longpath
- nop
-
- /* TLB load and return from trap. */
- stxa %g5, [%g0] ASI_DTLB_DATA_IN
- retry
-
-/*
- * On a first level data miss, check whether this is to the OBP range (note
- * that such accesses can be made by prom, as well as by kernel using
- * prom_getproperty on "address"), and if so, do not use vpte access ...
- * rather, use information saved during inherit_prom_mappings() using 8k
- * pagesize.
- */
-kvmap:
- /* Load 0xf0000000, which is LOW_OBP_ADDRESS. */
- mov 0xf, %g5
- sllx %g5, 28, %g5
-
- /* Is addr >= LOW_OBP_ADDRESS? */
- cmp %g4, %g5
- blu,pn %xcc, vmalloc_addr
- mov 0x1, %g5
-
- /* Load 0x100000000, which is HI_OBP_ADDRESS. */
- sllx %g5, 32, %g5
-
- /* Is addr < HI_OBP_ADDRESS? */
- cmp %g4, %g5
- blu,pn %xcc, obp_daddr_patch
- nop
-
-vmalloc_addr:
- /* If we get here, a vmalloc addr accessed, load kernel VPTE. */
- ldxa [%g3 + %g6] ASI_N, %g5
- brgez,pn %g5, longpath
- nop
-
- /* PTE is valid, load into TLB and return from trap. */
- stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
- retry
-
/* This is trivial with the new code... */
.globl do_fpdis
do_fpdis:
- sethi %hi(TSTATE_PEF), %g4 ! IEU0
+ sethi %hi(TSTATE_PEF), %g4
rdpr %tstate, %g5
andcc %g5, %g4, %g0
be,pt %xcc, 1f
@@ -204,18 +50,18 @@ do_fpdis:
add %g0, %g0, %g0
ba,a,pt %xcc, rtrap_clr_l6
-1: ldub [%g6 + TI_FPSAVED], %g5 ! Load Group
- wr %g0, FPRS_FEF, %fprs ! LSU Group+4bubbles
- andcc %g5, FPRS_FEF, %g0 ! IEU1 Group
- be,a,pt %icc, 1f ! CTI
- clr %g7 ! IEU0
- ldx [%g6 + TI_GSR], %g7 ! Load Group
-1: andcc %g5, FPRS_DL, %g0 ! IEU1
- bne,pn %icc, 2f ! CTI
- fzero %f0 ! FPA
- andcc %g5, FPRS_DU, %g0 ! IEU1 Group
- bne,pn %icc, 1f ! CTI
- fzero %f2 ! FPA
+1: ldub [%g6 + TI_FPSAVED], %g5
+ wr %g0, FPRS_FEF, %fprs
+ andcc %g5, FPRS_FEF, %g0
+ be,a,pt %icc, 1f
+ clr %g7
+ ldx [%g6 + TI_GSR], %g7
+1: andcc %g5, FPRS_DL, %g0
+ bne,pn %icc, 2f
+ fzero %f0
+ andcc %g5, FPRS_DU, %g0
+ bne,pn %icc, 1f
+ fzero %f2
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
faddd %f0, %f2, %f8
@@ -251,15 +97,17 @@ do_fpdis:
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
ldxa [%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_1:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
add %g6, TI_FPREGS + 0xc0, %g2
faddd %f0, %f2, %f8
fmuld %f0, %f2, %f10
- ldda [%g1] ASI_BLK_S, %f32 ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ membar #Sync
+ ldda [%g1] ASI_BLK_S, %f32
ldda [%g2] ASI_BLK_S, %f48
+ membar #Sync
faddd %f0, %f2, %f12
fmuld %f0, %f2, %f14
faddd %f0, %f2, %f16
@@ -270,7 +118,6 @@ cplus_fptrap_insn_1:
fmuld %f0, %f2, %f26
faddd %f0, %f2, %f28
fmuld %f0, %f2, %f30
- membar #Sync
b,pt %xcc, fpdis_exit
nop
2: andcc %g5, FPRS_DU, %g0
@@ -280,15 +127,17 @@ cplus_fptrap_insn_1:
fzero %f34
ldxa [%g3] ASI_DMMU, %g5
add %g6, TI_FPREGS, %g1
-cplus_fptrap_insn_2:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
add %g6, TI_FPREGS + 0x40, %g2
faddd %f32, %f34, %f36
fmuld %f32, %f34, %f38
- ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ membar #Sync
+ ldda [%g1] ASI_BLK_S, %f0
ldda [%g2] ASI_BLK_S, %f16
+ membar #Sync
faddd %f32, %f34, %f40
fmuld %f32, %f34, %f42
faddd %f32, %f34, %f44
@@ -301,18 +150,18 @@ cplus_fptrap_insn_2:
fmuld %f32, %f34, %f58
faddd %f32, %f34, %f60
fmuld %f32, %f34, %f62
- membar #Sync
ba,pt %xcc, fpdis_exit
nop
3: mov SECONDARY_CONTEXT, %g3
add %g6, TI_FPREGS, %g1
ldxa [%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_3:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
mov 0x40, %g2
- ldda [%g1] ASI_BLK_S, %f0 ! grrr, where is ASI_BLK_NUCLEUS 8-(
+ membar #Sync
+ ldda [%g1] ASI_BLK_S, %f0
ldda [%g1 + %g2] ASI_BLK_S, %f16
add %g1, 0x80, %g1
ldda [%g1] ASI_BLK_S, %f32
@@ -473,8 +322,8 @@ do_fptrap_after_fsr:
stx %g3, [%g6 + TI_GSR]
mov SECONDARY_CONTEXT, %g3
ldxa [%g3] ASI_DMMU, %g5
-cplus_fptrap_insn_4:
- sethi %hi(0), %g2
+ sethi %hi(sparc64_kern_sec_context), %g2
+ ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
stxa %g2, [%g3] ASI_DMMU
membar #Sync
add %g6, TI_FPREGS, %g2
@@ -495,45 +344,17 @@ cplus_fptrap_insn_4:
ba,pt %xcc, etrap
wr %g0, 0, %fprs
-cplus_fptrap_1:
- sethi %hi(CTX_CHEETAH_PLUS_CTX0), %g2
-
- .globl cheetah_plus_patch_fpdis
-cheetah_plus_patch_fpdis:
- /* We configure the dTLB512_0 for 4MB pages and the
- * dTLB512_1 for 8K pages when in context zero.
- */
- sethi %hi(cplus_fptrap_1), %o0
- lduw [%o0 + %lo(cplus_fptrap_1)], %o1
-
- set cplus_fptrap_insn_1, %o2
- stw %o1, [%o2]
- flush %o2
- set cplus_fptrap_insn_2, %o2
- stw %o1, [%o2]
- flush %o2
- set cplus_fptrap_insn_3, %o2
- stw %o1, [%o2]
- flush %o2
- set cplus_fptrap_insn_4, %o2
- stw %o1, [%o2]
- flush %o2
-
- retl
- nop
-
/* The registers for cross calls will be:
*
* DATA 0: [low 32-bits] Address of function to call, jmp to this
* [high 32-bits] MMU Context Argument 0, place in %g5
- * DATA 1: Address Argument 1, place in %g6
+ * DATA 1: Address Argument 1, place in %g1
* DATA 2: Address Argument 2, place in %g7
*
* With this method we can do most of the cross-call tlb/cache
* flushing very quickly.
*
- * Current CPU's IRQ worklist table is locked into %g1,
- * don't touch.
+ * Current CPU's IRQ worklist table is locked into %g6, don't touch.
*/
.text
.align 32
@@ -1007,13 +828,14 @@ cheetah_plus_dcpe_trap_vector:
nop
do_cheetah_plus_data_parity:
- ba,pt %xcc, etrap
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ ba,pt %xcc, etrap_irq
rd %pc, %g7
mov 0x0, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
- ba,pt %xcc, rtrap
- clr %l6
+ ba,a,pt %xcc, rtrap_irq
cheetah_plus_dcpe_trap_vector_tl1:
membar #Sync
@@ -1037,13 +859,14 @@ cheetah_plus_icpe_trap_vector:
nop
do_cheetah_plus_insn_parity:
- ba,pt %xcc, etrap
+ rdpr %pil, %g2
+ wrpr %g0, 15, %pil
+ ba,pt %xcc, etrap_irq
rd %pc, %g7
mov 0x1, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
- ba,pt %xcc, rtrap
- clr %l6
+ ba,a,pt %xcc, rtrap_irq
cheetah_plus_icpe_trap_vector_tl1:
membar #Sync
@@ -1076,6 +899,10 @@ do_dcpe_tl1:
nop
wrpr %g1, %tl ! Restore original trap level
do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
+ sethi %hi(dcache_parity_tl1_occurred), %g2
+ lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1
+ add %g1, 1, %g1
+ stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)]
/* Reset D-cache parity */
sethi %hi(1 << 16), %g1 ! D-cache size
mov (1 << 5), %g2 ! D-cache line size
@@ -1122,6 +949,10 @@ do_icpe_tl1:
nop
wrpr %g1, %tl ! Restore original trap level
do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
+ sethi %hi(icache_parity_tl1_occurred), %g2
+ lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1
+ add %g1, 1, %g1
+ stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)]
/* Flush I-cache */
sethi %hi(1 << 15), %g1 ! I-cache size
mov (1 << 5), %g2 ! I-cache line size
OpenPOWER on IntegriCloud