summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig65
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c4
-rw-r--r--arch/arm/mm/cache-xsc3l2.c4
-rw-r--r--arch/arm/mm/mmu.c119
-rw-r--r--arch/arm/mm/pgd.c2
-rw-r--r--arch/arm/mm/proc-v7.S12
-rw-r--r--arch/arm/mm/proc-xsc3.S2
7 files changed, 110 insertions, 98 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index ab5f7a21350b..cf44de512830 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -10,8 +10,7 @@ config CPU_32
# ARM610
config CPU_ARM610
- bool "Support ARM610 processor"
- depends on ARCH_RPC
+ bool "Support ARM610 processor" if ARCH_RPC
select CPU_32v3
select CPU_CACHE_V3
select CPU_CACHE_VIVT
@@ -43,8 +42,7 @@ config CPU_ARM7TDMI
# ARM710
config CPU_ARM710
- bool "Support ARM710 processor" if !ARCH_CLPS7500 && ARCH_RPC
- default y if ARCH_CLPS7500
+ bool "Support ARM710 processor" if ARCH_RPC
select CPU_32v3
select CPU_CACHE_V3
select CPU_CACHE_VIVT
@@ -63,8 +61,7 @@ config CPU_ARM710
# ARM720T
config CPU_ARM720T
- bool "Support ARM720T processor" if !ARCH_CLPS711X && !ARCH_L7200 && !ARCH_CDB89712 && ARCH_INTEGRATOR
- default y if ARCH_CLPS711X || ARCH_L7200 || ARCH_CDB89712 || ARCH_H720X
+ bool "Support ARM720T processor" if ARCH_INTEGRATOR
select CPU_32v4T
select CPU_ABRT_LV4T
select CPU_PABRT_NOIFAR
@@ -114,9 +111,7 @@ config CPU_ARM9TDMI
# ARM920T
config CPU_ARM920T
- bool "Support ARM920T processor"
- depends on ARCH_EP93XX || ARCH_INTEGRATOR || CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_IMX || ARCH_AAEC2000 || ARCH_AT91RM9200
- default y if CPU_S3C2410 || CPU_S3C2440 || CPU_S3C2442 || ARCH_AT91RM9200
+ bool "Support ARM920T processor" if ARCH_INTEGRATOR
select CPU_32v4T
select CPU_ABRT_EV4T
select CPU_PABRT_NOIFAR
@@ -138,8 +133,6 @@ config CPU_ARM920T
# ARM922T
config CPU_ARM922T
bool "Support ARM922T processor" if ARCH_INTEGRATOR
- depends on ARCH_LH7A40X || ARCH_INTEGRATOR || ARCH_KS8695
- default y if ARCH_LH7A40X || ARCH_KS8695
select CPU_32v4T
select CPU_ABRT_EV4T
select CPU_PABRT_NOIFAR
@@ -159,8 +152,6 @@ config CPU_ARM922T
# ARM925T
config CPU_ARM925T
bool "Support ARM925T processor" if ARCH_OMAP1
- depends on ARCH_OMAP15XX
- default y if ARCH_OMAP15XX
select CPU_32v4T
select CPU_ABRT_EV4T
select CPU_PABRT_NOIFAR
@@ -179,22 +170,7 @@ config CPU_ARM925T
# ARM926T
config CPU_ARM926T
- bool "Support ARM926T processor"
- depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || \
- MACH_VERSATILE_AB || ARCH_OMAP730 || \
- ARCH_OMAP16XX || MACH_REALVIEW_EB || \
- ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || \
- ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || \
- ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || \
- ARCH_AT91SAM9G20 || ARCH_AT91CAP9 || \
- ARCH_NS9XXX || ARCH_DAVINCI || ARCH_MX2
- default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || \
- ARCH_OMAP730 || ARCH_OMAP16XX || \
- ARCH_PNX4008 || ARCH_NETX || CPU_S3C2412 || \
- ARCH_AT91SAM9260 || ARCH_AT91SAM9261 || \
- ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || \
- ARCH_AT91SAM9G20 || ARCH_AT91CAP9 || \
- ARCH_NS9XXX || ARCH_DAVINCI || ARCH_MX2
+ bool "Support ARM926T processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB
select CPU_32v5
select CPU_ABRT_EV5TJ
select CPU_PABRT_NOIFAR
@@ -247,8 +223,7 @@ config CPU_ARM946E
# ARM1020 - needs validating
config CPU_ARM1020
- bool "Support ARM1020T (rev 0) processor"
- depends on ARCH_INTEGRATOR
+ bool "Support ARM1020T (rev 0) processor" if ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV4T
select CPU_PABRT_NOIFAR
@@ -266,8 +241,7 @@ config CPU_ARM1020
# ARM1020E - needs validating
config CPU_ARM1020E
- bool "Support ARM1020E processor"
- depends on ARCH_INTEGRATOR
+ bool "Support ARM1020E processor" if ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV4T
select CPU_PABRT_NOIFAR
@@ -280,8 +254,7 @@ config CPU_ARM1020E
# ARM1022E
config CPU_ARM1022
- bool "Support ARM1022E processor"
- depends on ARCH_INTEGRATOR
+ bool "Support ARM1022E processor" if ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV4T
select CPU_PABRT_NOIFAR
@@ -299,8 +272,7 @@ config CPU_ARM1022
# ARM1026EJ-S
config CPU_ARM1026
- bool "Support ARM1026EJ-S processor"
- depends on ARCH_INTEGRATOR
+ bool "Support ARM1026EJ-S processor" if ARCH_INTEGRATOR
select CPU_32v5
select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10
select CPU_PABRT_NOIFAR
@@ -317,8 +289,7 @@ config CPU_ARM1026
# SA110
config CPU_SA110
- bool "Support StrongARM(R) SA-110 processor" if !ARCH_EBSA110 && !FOOTBRIDGE && !ARCH_TBOX && !ARCH_SHARK && !ARCH_NEXUSPCI && ARCH_RPC
- default y if ARCH_EBSA110 || FOOTBRIDGE || ARCH_TBOX || ARCH_SHARK || ARCH_NEXUSPCI
+ bool "Support StrongARM(R) SA-110 processor" if ARCH_RPC
select CPU_32v3 if ARCH_RPC
select CPU_32v4 if !ARCH_RPC
select CPU_ABRT_EV4
@@ -340,8 +311,6 @@ config CPU_SA110
# SA1100
config CPU_SA1100
bool
- depends on ARCH_SA1100
- default y
select CPU_32v4
select CPU_ABRT_EV4
select CPU_PABRT_NOIFAR
@@ -353,8 +322,6 @@ config CPU_SA1100
# XScale
config CPU_XSCALE
bool
- depends on ARCH_IOP32X || ARCH_IOP33X || PXA25x || PXA27x || ARCH_IXP4XX || ARCH_IXP2000
- default y
select CPU_32v5
select CPU_ABRT_EV5T
select CPU_PABRT_NOIFAR
@@ -365,8 +332,6 @@ config CPU_XSCALE
# XScale Core Version 3
config CPU_XSC3
bool
- depends on ARCH_IXP23XX || ARCH_IOP13XX || PXA3xx
- default y
select CPU_32v5
select CPU_ABRT_EV5T
select CPU_PABRT_NOIFAR
@@ -378,8 +343,6 @@ config CPU_XSC3
# Feroceon
config CPU_FEROCEON
bool
- depends on ARCH_ORION5X || ARCH_LOKI || ARCH_KIRKWOOD || ARCH_MV78XX0
- default y
select CPU_32v5
select CPU_ABRT_EV5T
select CPU_PABRT_NOIFAR
@@ -399,10 +362,7 @@ config CPU_FEROCEON_OLD_ID
# ARMv6
config CPU_V6
- bool "Support ARM V6 processor"
- depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2 || ARCH_MX3 || ARCH_MSM || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176
- default y if ARCH_MX3
- default y if ARCH_MSM
+ bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB
select CPU_32v6
select CPU_ABRT_EV6
select CPU_PABRT_NOIFAR
@@ -427,8 +387,7 @@ config CPU_32v6K
# ARMv7
config CPU_V7
- bool "Support ARM V7 processor"
- depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP3
+ bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB
select CPU_32v6K
select CPU_32v7
select CPU_ABRT_EV7
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 13cdae8b0d44..80cd207cbaea 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -150,7 +150,7 @@ static void feroceon_l2_inv_range(unsigned long start, unsigned long end)
/*
* Clean and invalidate partial last cache line.
*/
- if (end & (CACHE_LINE_SIZE - 1)) {
+ if (start < end && end & (CACHE_LINE_SIZE - 1)) {
l2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
end &= ~(CACHE_LINE_SIZE - 1);
}
@@ -158,7 +158,7 @@ static void feroceon_l2_inv_range(unsigned long start, unsigned long end)
/*
* Invalidate all full cache lines between 'start' and 'end'.
*/
- while (start != end) {
+ while (start < end) {
unsigned long range_end = calc_range_end(start, end);
l2_inv_pa_range(start, range_end - CACHE_LINE_SIZE);
start = range_end;
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index 10b1bae1a258..464de893a988 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -98,7 +98,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
/*
* Clean and invalidate partial last cache line.
*/
- if (end & (CACHE_LINE_SIZE - 1)) {
+ if (start < end && (end & (CACHE_LINE_SIZE - 1))) {
xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1));
xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
end &= ~(CACHE_LINE_SIZE - 1);
@@ -107,7 +107,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
/*
* Invalidate all full cache lines between 'start' and 'end'.
*/
- while (start != end) {
+ while (start < end) {
xsc3_l2_inv_pa(start);
start += CACHE_LINE_SIZE;
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 8ba754064559..f24803c1fb0b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -180,20 +180,20 @@ void adjust_cr(unsigned long mask, unsigned long set)
#endif
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
-#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_XN|PMD_SECT_AP_WRITE
+#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE | PMD_SECT_UNCACHED,
+ .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
},
[MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE | PMD_SECT_TEX(2),
+ .prot_sect = PROT_SECT_DEVICE,
.domain = DOMAIN_IO,
},
[MT_DEVICE_CACHED] = { /* ioremap_cached */
@@ -205,7 +205,13 @@ static struct mem_type mem_types[] = {
[MT_DEVICE_WC] = { /* ioremap_wc */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
.prot_l1 = PMD_TYPE_TABLE,
- .prot_sect = PROT_SECT_DEVICE | PMD_SECT_BUFFERABLE,
+ .prot_sect = PROT_SECT_DEVICE,
+ .domain = DOMAIN_IO,
+ },
+ [MT_UNCACHED] = {
+ .prot_pte = PROT_PTE_DEVICE,
+ .prot_l1 = PMD_TYPE_TABLE,
+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_IO,
},
[MT_CACHECLEAN] = {
@@ -273,22 +279,23 @@ static void __init build_mem_type_table(void)
#endif
/*
- * On non-Xscale3 ARMv5-and-older systems, use CB=01
- * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3
- * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable
- * in xsc3 parlance, Uncached Normal in ARMv6 parlance).
+ * Strip out features not present on earlier architectures.
+ * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
+ * without extended page tables don't have the 'Shared' bit.
*/
- if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) {
- mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
- mem_types[MT_DEVICE_WC].prot_sect &= ~PMD_SECT_BUFFERABLE;
- }
+ if (cpu_arch < CPU_ARCH_ARMv5)
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+ mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
+ if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
+ for (i = 0; i < ARRAY_SIZE(mem_types); i++)
+ mem_types[i].prot_sect &= ~PMD_SECT_S;
/*
- * ARMv5 and lower, bit 4 must be set for page tables.
- * (was: cache "update-able on write" bit on ARM610)
- * However, Xscale cores require this bit to be cleared.
+ * ARMv5 and lower, bit 4 must be set for page tables (was: cache
+ * "update-able on write" bit on ARM610). However, Xscale and
+ * Xscale3 require this bit to be cleared.
*/
- if (cpu_is_xscale()) {
+ if (cpu_is_xscale() || cpu_is_xsc3()) {
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_sect &= ~PMD_BIT4;
mem_types[i].prot_l1 &= ~PMD_BIT4;
@@ -302,6 +309,64 @@ static void __init build_mem_type_table(void)
}
}
+ /*
+ * Mark the device areas according to the CPU/architecture.
+ */
+ if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
+ if (!cpu_is_xsc3()) {
+ /*
+ * Mark device regions on ARMv6+ as execute-never
+ * to prevent speculative instruction fetches.
+ */
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
+ mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
+ }
+ if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
+ /*
+ * For ARMv7 with TEX remapping,
+ * - shared device is SXCB=1100
+ * - nonshared device is SXCB=0100
+ * - write combine device mem is SXCB=0001
+ * (Uncached Normal memory)
+ */
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
+ } else if (cpu_is_xsc3()) {
+ /*
+ * For Xscale3,
+ * - shared device is TEXCB=00101
+ * - nonshared device is TEXCB=01000
+ * - write combine device mem is TEXCB=00100
+ * (Inner/Outer Uncacheable in xsc3 parlance)
+ */
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
+ } else {
+ /*
+ * For ARMv6 and ARMv7 without TEX remapping,
+ * - shared device is TEXCB=00001
+ * - nonshared device is TEXCB=01000
+ * - write combine device mem is TEXCB=00100
+ * (Uncached Normal in ARMv6 parlance).
+ */
+ mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
+ mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
+ }
+ } else {
+ /*
+ * On others, write combining is "Uncached/Buffered"
+ */
+ mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
+ }
+
+ /*
+ * Now deal with the memory-type mappings
+ */
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
@@ -317,12 +382,8 @@ static void __init build_mem_type_table(void)
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
*/
- if (arch_is_coherent()) {
- if (cpu_is_xsc3()) {
- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
- }
- }
+ if (arch_is_coherent() && cpu_is_xsc3())
+ mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
/*
* ARMv6 and above have extended page tables.
@@ -336,11 +397,6 @@ static void __init build_mem_type_table(void)
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
- /*
- * Mark the device area as "shared device"
- */
- mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
-
#ifdef CONFIG_SMP
/*
* Mark memory with the "shared" attribute for SMP systems
@@ -360,9 +416,6 @@ static void __init build_mem_type_table(void)
mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
- if (cpu_arch < CPU_ARCH_ARMv5)
- mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
-
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
L_PTE_DIRTY | L_PTE_WRITE |
@@ -654,7 +707,7 @@ static inline void prepare_page_table(struct meminfo *mi)
/*
* Clear out all the mappings below the kernel image.
*/
- for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
+ for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
#ifdef CONFIG_XIP_KERNEL
@@ -766,7 +819,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
*/
#ifdef CONFIG_XIP_KERNEL
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
- map.virtual = MODULE_START;
+ map.virtual = MODULES_VADDR;
map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
map.type = MT_ROM;
create_mapping(&map);
@@ -843,7 +896,7 @@ void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
* allocate the zero page. Note that we count on this going ok.
*/
zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
- memzero(zero_page, PAGE_SIZE);
+ memset(zero_page, 0, PAGE_SIZE);
empty_zero_page = virt_to_page(zero_page);
flush_dcache_page(empty_zero_page);
}
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index e0f19ab91163..2690146161ba 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -31,7 +31,7 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
if (!new_pgd)
goto no_pgd;
- memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
+ memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
/*
* Copy over the kernel and IO PGD entries
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0e11d9716a7d..d1ebec42521d 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -124,7 +124,7 @@ ENTRY(cpu_v7_set_pte_ext)
orr r3, r3, r2
orr r3, r3, #PTE_EXT_AP0 | 2
- tst r2, #1 << 4
+ tst r1, #1 << 4
orrne r3, r3, #PTE_EXT_TEX(1)
tst r1, #L_PTE_WRITE
@@ -205,11 +205,11 @@ __v7_setup:
mov pc, lr @ return to head.S:__ret
ENDPROC(__v7_setup)
- /*
- * V X F I D LR
- * .... ...E PUI. .T.T 4RVI ZFRS BLDP WCAM
- * rrrr rrrx xxx0 0101 xxxx xxxx x111 xxxx < forced
- * 0 110 0011 1.00 .111 1101 < we want
+ /* AT
+ * TFR EV X F I D LR
+ * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM
+ * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
+ * 1 0 110 0011 1.00 .111 1101 < we want
*/
.type v7_crval, #object
v7_crval:
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index 04dc8b65401b..8f6cf56c11c0 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -349,7 +349,7 @@ ENTRY(cpu_xsc3_switch_mm)
cpu_xsc3_mt_table:
.long 0x00 @ L_PTE_MT_UNCACHED
.long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE
- .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
+ .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
.long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
.long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
.long 0x00 @ unused
OpenPOWER on IntegriCloud