From 9f4f04ba2b117a5c741d019629d7ffccdc621122 Mon Sep 17 00:00:00 2001 From: Joakim Tjernlund Date: Tue, 29 Dec 2009 05:10:58 +0000 Subject: powerpc/8xx: Always pin kernel instruction TLB Various kernel asm modifies SRR0/SRR1 just before executing a rfi. If such code crosses a page boundary you risk a TLB miss which will clobber SRR0/SRR1. Avoid this by always pinning kernel instruction TLB space. Signed-off-by: Joakim Tjernlund Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/head_8xx.S | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kernel/head_8xx.S') diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 678f98cd5e64..a2ed3422fa3d 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -768,12 +768,12 @@ start_here: */ initial_mmu: tlbia /* Invalidate all TLB entries */ -#ifdef CONFIG_PIN_TLB +/* Always pin the first 8 MB ITLB to prevent ITLB + misses while mucking around with SRR0/SRR1 in asm +*/ lis r8, MI_RSV4I@h ori r8, r8, 0x1c00 -#else - li r8, 0 -#endif + mtspr SPRN_MI_CTR, r8 /* Set instruction MMU control */ #ifdef CONFIG_PIN_TLB -- cgit v1.2.1