summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig18
-rw-r--r--arch/arm/mm/cache-l2x0.c6
-rw-r--r--arch/arm/mm/mmu.c14
-rw-r--r--arch/arm/mm/proc-syms.c2
-rw-r--r--arch/arm/mm/proc-v7.S33
5 files changed, 38 insertions, 35 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e7904bc92c73..12161ae445da 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -345,13 +345,14 @@ config CPU_XSC3
# ARMv6
config CPU_V6
bool "Support ARM V6 processor"
- depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2
+ depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2 || ARCH_MX3
+ default y if ARCH_MX3
select CPU_32v6
select CPU_ABRT_EV6
select CPU_CACHE_V6
select CPU_CACHE_VIPT
select CPU_CP15_MMU
- select CPU_HAS_ASID
+ select CPU_HAS_ASID if MMU
select CPU_COPY_V6 if MMU
select CPU_TLB_V6 if MMU
@@ -359,7 +360,7 @@ config CPU_V6
config CPU_32v6K
bool "Support ARM V6K processor extensions" if !SMP
depends on CPU_V6
- default y if SMP
+ default y if SMP && !ARCH_MX3
help
Say Y here if your ARMv6 processor supports the 'K' extension.
This enables the kernel to use some instructions not present
@@ -377,7 +378,7 @@ config CPU_V7
select CPU_CACHE_V7
select CPU_CACHE_VIPT
select CPU_CP15_MMU
- select CPU_HAS_ASID
+ select CPU_HAS_ASID if MMU
select CPU_COPY_V6 if MMU
select CPU_TLB_V7 if MMU
@@ -405,6 +406,7 @@ config CPU_32v5
config CPU_32v6
bool
+ select TLS_REG_EMUL if !CPU_32v6K && !MMU
config CPU_32v7
bool
@@ -598,7 +600,7 @@ config CPU_DCACHE_SIZE
config CPU_DCACHE_WRITETHROUGH
bool "Force write through D-cache"
- depends on (CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE
+ depends on (CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020) && !CPU_DCACHE_DISABLE
default y if CPU_ARM925T
help
Say Y here to use the data cache in writethrough mode. Unless you
@@ -611,12 +613,6 @@ config CPU_CACHE_ROUND_ROBIN
Say Y here to use the predictable round-robin cache replacement
policy. Unless you specifically require this or are unsure, say N.
-config CPU_L2CACHE_DISABLE
- bool "Disable level 2 cache"
- depends on CPU_V7
- help
- Say Y here to disable the level 2 cache. If unsure, say N.
-
config CPU_BPREDICT_DISABLE
bool "Disable branch prediction"
depends on CPU_ARM1020 || CPU_V6 || CPU_XSC3 || CPU_V7
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 08a36f1b35d2..b4e9b734e0bd 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -17,6 +17,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
+#include <linux/spinlock.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
@@ -25,14 +26,19 @@
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
+static DEFINE_SPINLOCK(l2x0_lock);
static inline void sync_writel(unsigned long val, unsigned long reg,
unsigned long complete_mask)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&l2x0_lock, flags);
writel(val, l2x0_base + reg);
/* wait for the operation to complete */
while (readl(l2x0_base + reg) & complete_mask)
;
+ spin_unlock_irqrestore(&l2x0_lock, flags);
}
static inline void cache_sync(void)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 3b5e47dc0c97..e5d61ee3d4a1 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -114,6 +114,10 @@ static void __init early_cachepolicy(char **p)
}
if (i == ARRAY_SIZE(cache_policies))
printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
+ if (cpu_architecture() >= CPU_ARCH_ARMv6) {
+ printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
+ cachepolicy = CPOLICY_WRITEBACK;
+ }
flush_cache_all();
set_cr(cr_alignment);
}
@@ -252,13 +256,15 @@ static void __init build_mem_type_table(void)
int cpu_arch = cpu_architecture();
int i;
+ if (cpu_arch < CPU_ARCH_ARMv6) {
#if defined(CONFIG_CPU_DCACHE_DISABLE)
- if (cachepolicy > CPOLICY_BUFFERED)
- cachepolicy = CPOLICY_BUFFERED;
+ if (cachepolicy > CPOLICY_BUFFERED)
+ cachepolicy = CPOLICY_BUFFERED;
#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
- if (cachepolicy > CPOLICY_WRITETHROUGH)
- cachepolicy = CPOLICY_WRITETHROUGH;
+ if (cachepolicy > CPOLICY_WRITETHROUGH)
+ cachepolicy = CPOLICY_WRITETHROUGH;
#endif
+ }
if (cpu_arch < CPU_ARCH_ARMv5) {
if (cachepolicy >= CPOLICY_WRITEALLOC)
cachepolicy = CPOLICY_WRITEBACK;
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 9f396b4fa0b7..2b5ba396e3a6 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -31,12 +31,14 @@ EXPORT_SYMBOL(__cpuc_coherent_kern_range);
EXPORT_SYMBOL(cpu_cache);
#endif
+#ifdef CONFIG_MMU
#ifndef MULTI_USER
EXPORT_SYMBOL(__cpu_clear_user_page);
EXPORT_SYMBOL(__cpu_copy_user_page);
#else
EXPORT_SYMBOL(cpu_user);
#endif
+#endif
/*
* No module should need to touch the TLB (and currently
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 718f4782ee8b..e0acc5ae6f6f 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -77,6 +77,7 @@ ENTRY(cpu_v7_dcache_clean_area)
* - we are not using split page tables
*/
ENTRY(cpu_v7_switch_mm)
+#ifdef CONFIG_MMU
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
orr r0, r0, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB
@@ -86,6 +87,7 @@ ENTRY(cpu_v7_switch_mm)
isb
mcr p15, 0, r1, c13, c0, 1 @ set context ID
isb
+#endif
mov pc, lr
/*
@@ -109,6 +111,7 @@ ENTRY(cpu_v7_switch_mm)
* 1111 0 1 1 r/w r/w
*/
ENTRY(cpu_v7_set_pte_ext)
+#ifdef CONFIG_MMU
str r1, [r0], #-2048 @ linux version
bic r3, r1, #0x000003f0
@@ -136,6 +139,7 @@ ENTRY(cpu_v7_set_pte_ext)
str r3, [r0]
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+#endif
mov pc, lr
cpu_v7_name:
@@ -169,6 +173,7 @@ __v7_setup:
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
#endif
dsb
+#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r10, c2, c0, 2 @ TTB control register
orr r4, r4, #TTB_RGN_OC_WB @ mark PTWs outer cacheable, WB
@@ -176,21 +181,12 @@ __v7_setup:
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
mov r10, #0x1f @ domains 0, 1 = manager
mcr p15, 0, r10, c3, c0, 0 @ load domain access register
-#ifndef CONFIG_CPU_L2CACHE_DISABLE
- @ L2 cache configuration in the L2 aux control register
- mrc p15, 1, r10, c9, c0, 2
- bic r10, r10, #(1 << 16) @ L2 outer cache
- mcr p15, 1, r10, c9, c0, 2
- @ L2 cache is enabled in the aux control register
- mrc p15, 0, r10, c1, c0, 1
- orr r10, r10, #2
- mcr p15, 0, r10, c1, c0, 1
#endif
- mrc p15, 0, r0, c1, c0, 0 @ read control register
- ldr r10, cr1_clear @ get mask for bits to clear
- bic r0, r0, r10 @ clear bits them
- ldr r10, cr1_set @ get mask for bits to set
- orr r0, r0, r10 @ set them
+ adr r5, v7_crval
+ ldmia r5, {r5, r6}
+ mrc p15, 0, r0, c1, c0, 0 @ read control register
+ bic r0, r0, r5 @ clear bits them
+ orr r0, r0, r6 @ set them
mov pc, lr @ return to head.S:__ret
/*
@@ -199,12 +195,9 @@ __v7_setup:
* rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
* 0 110 0011 1.00 .111 1101 < we want
*/
- .type cr1_clear, #object
- .type cr1_set, #object
-cr1_clear:
- .word 0x0120c302
-cr1_set:
- .word 0x00c0387d
+ .type v7_crval, #object
+v7_crval:
+ crval clear=0x0120c302, mmuset=0x00c0387d, ucset=0x00c0187c
__v7_setup_stack:
.space 4 * 11 @ 11 registers
OpenPOWER on IntegriCloud