summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c1
-rw-r--r--arch/arm/mm/cache-l2x0.c11
-rw-r--r--arch/arm/mm/cache-v3.S137
-rw-r--r--arch/arm/mm/cache-v4.S2
-rw-r--r--arch/arm/mm/context.c32
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/flush.c15
-rw-r--r--arch/arm/mm/idmap.c33
-rw-r--r--arch/arm/mm/init.c50
-rw-r--r--arch/arm/mm/mmu.c75
-rw-r--r--arch/arm/mm/proc-arm740.S30
-rw-r--r--arch/arm/mm/proc-arm920.S2
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-mohawk.S2
-rw-r--r--arch/arm/mm/proc-sa1100.S2
-rw-r--r--arch/arm/mm/proc-syms.c2
-rw-r--r--arch/arm/mm/proc-v6.S4
-rw-r--r--arch/arm/mm/proc-v7-2level.S3
-rw-r--r--arch/arm/mm/proc-v7-3level.S5
-rw-r--r--arch/arm/mm/proc-v7.S26
-rw-r--r--arch/arm/mm/proc-xsc3.S2
-rw-r--r--arch/arm/mm/proc-xscale.S2
-rw-r--r--arch/arm/mm/tcm.h17
25 files changed, 184 insertions, 297 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index cce78b070825..9e8101ecd63e 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -43,7 +43,7 @@ config CPU_ARM740T
depends on !MMU
select CPU_32v4T
select CPU_ABRT_LV4T
- select CPU_CACHE_V3 # although the core is v4t
+ select CPU_CACHE_V4
select CPU_CP15_MPU
select CPU_PABRT_LEGACY
help
@@ -488,9 +488,6 @@ config CPU_PABRT_V7
bool
# The cache model
-config CPU_CACHE_V3
- bool
-
config CPU_CACHE_V4
bool
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 317b57559340..ee558a01f390 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -33,7 +33,6 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
-obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index dd3d59122cc3..48bc3c0a87ce 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -343,6 +343,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
outer_cache.inv_range = feroceon_l2_inv_range;
outer_cache.clean_range = feroceon_l2_clean_range;
outer_cache.flush_range = feroceon_l2_flush_range;
+ outer_cache.inv_all = l2_inv_all;
enable_l2();
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c2f37390308a..c465faca51b0 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
int lockregs;
int i;
- switch (cache_id) {
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
lockregs = 8;
break;
@@ -333,15 +333,14 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
if (cache_id_part_number_from_dt)
cache_id = cache_id_part_number_from_dt;
else
- cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
- & L2X0_CACHE_ID_PART_MASK;
+ cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
/* Determine the number of ways */
- switch (cache_id) {
+ switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
if (aux & (1 << 16))
ways = 16;
@@ -725,7 +724,6 @@ static const struct l2x0_of_data pl310_data = {
.flush_all = l2x0_flush_all,
.inv_all = l2x0_inv_all,
.disable = l2x0_disable,
- .set_debug = pl310_set_debug,
},
};
@@ -814,9 +812,8 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
data->save();
of_init = true;
- l2x0_init(l2x0_base, aux_val, aux_mask);
-
memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
+ l2x0_init(l2x0_base, aux_val, aux_mask);
return 0;
}
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S
deleted file mode 100644
index 8a3fadece8d3..000000000000
--- a/arch/arm/mm/cache-v3.S
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * linux/arch/arm/mm/cache-v3.S
- *
- * Copyright (C) 1997-2002 Russell king
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <linux/init.h>
-#include <asm/page.h>
-#include "proc-macros.S"
-
-/*
- * flush_icache_all()
- *
- * Unconditionally clean and invalidate the entire icache.
- */
-ENTRY(v3_flush_icache_all)
- mov pc, lr
-ENDPROC(v3_flush_icache_all)
-
-/*
- * flush_user_cache_all()
- *
- * Invalidate all cache entries in a particular address
- * space.
- *
- * - mm - mm_struct describing address space
- */
-ENTRY(v3_flush_user_cache_all)
- /* FALLTHROUGH */
-/*
- * flush_kern_cache_all()
- *
- * Clean and invalidate the entire cache.
- */
-ENTRY(v3_flush_kern_cache_all)
- /* FALLTHROUGH */
-
-/*
- * flush_user_cache_range(start, end, flags)
- *
- * Invalidate a range of cache entries in the specified
- * address space.
- *
- * - start - start address (may not be aligned)
- * - end - end address (exclusive, may not be aligned)
- * - flags - vma_area_struct flags describing address space
- */
-ENTRY(v3_flush_user_cache_range)
- mov ip, #0
- mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
- mov pc, lr
-
-/*
- * coherent_kern_range(start, end)
- *
- * Ensure coherency between the Icache and the Dcache in the
- * region described by start. If you have non-snooping
- * Harvard caches, you need to implement this function.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_coherent_kern_range)
- /* FALLTHROUGH */
-
-/*
- * coherent_user_range(start, end)
- *
- * Ensure coherency between the Icache and the Dcache in the
- * region described by start. If you have non-snooping
- * Harvard caches, you need to implement this function.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_coherent_user_range)
- mov r0, #0
- mov pc, lr
-
-/*
- * flush_kern_dcache_area(void *page, size_t size)
- *
- * Ensure no D cache aliasing occurs, either with itself or
- * the I cache
- *
- * - addr - kernel address
- * - size - region size
- */
-ENTRY(v3_flush_kern_dcache_area)
- /* FALLTHROUGH */
-
-/*
- * dma_flush_range(start, end)
- *
- * Clean and invalidate the specified virtual address range.
- *
- * - start - virtual start address
- * - end - virtual end address
- */
-ENTRY(v3_dma_flush_range)
- mov r0, #0
- mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
- mov pc, lr
-
-/*
- * dma_unmap_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-ENTRY(v3_dma_unmap_area)
- teq r2, #DMA_TO_DEVICE
- bne v3_dma_flush_range
- /* FALLTHROUGH */
-
-/*
- * dma_map_area(start, size, dir)
- * - start - kernel virtual start address
- * - size - size of region
- * - dir - DMA direction
- */
-ENTRY(v3_dma_map_area)
- mov pc, lr
-ENDPROC(v3_dma_unmap_area)
-ENDPROC(v3_dma_map_area)
-
- .globl v3_flush_kern_cache_louis
- .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all
-
- __INITDATA
-
- @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v3
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index 43e5d77be677..a7ba68f59f0c 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)
ENTRY(v4_flush_user_cache_range)
#ifdef CONFIG_CPU_CP15
mov ip, #0
- mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
+ mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
mov pc, lr
#else
/* FALLTHROUGH */
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 7a0511191f6b..2ac37372ef52 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
-static DEFINE_PER_CPU(atomic64_t, active_asids);
+DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
@@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid)
return 0;
}
-static void new_context(struct mm_struct *mm, unsigned int cpu)
+static u64 new_context(struct mm_struct *mm, unsigned int cpu)
{
- u64 asid = mm->context.id;
+ u64 asid = atomic64_read(&mm->context.id);
u64 generation = atomic64_read(&asid_generation);
if (asid != 0 && is_reserved_asid(asid)) {
@@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu)
cpumask_clear(mm_cpumask(mm));
}
- mm->context.id = asid;
+ return asid;
}
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
+ u64 asid;
if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
__check_vmalloc_seq(mm);
@@ -198,20 +199,27 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
*/
cpu_set_reserved_ttbr0();
- if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
- && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id))
+ asid = atomic64_read(&mm->context.id);
+ if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
+ && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
goto switch_mm_fastpath;
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */
- if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS)
- new_context(mm, cpu);
-
- atomic64_set(&per_cpu(active_asids, cpu), mm->context.id);
- cpumask_set_cpu(cpu, mm_cpumask(mm));
+ asid = atomic64_read(&mm->context.id);
+ if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
+ asid = new_context(mm, cpu);
+ atomic64_set(&mm->context.id, asid);
+ }
- if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
+ if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
+ local_flush_bp_all();
local_flush_tlb_all();
+ dummy_flush_tlb_a15_erratum();
+ }
+
+ atomic64_set(&per_cpu(active_asids, cpu), asid);
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
switch_mm_fastpath:
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c7e3759f16d3..ef3e0f3aac96 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -342,6 +342,7 @@ static int __init atomic_pool_init(void)
{
struct dma_pool *pool = &atomic_pool;
pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+ gfp_t gfp = GFP_KERNEL | GFP_DMA;
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
unsigned long *bitmap;
struct page *page;
@@ -361,8 +362,8 @@ static int __init atomic_pool_init(void)
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
atomic_pool_init);
else
- ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
- &page, atomic_pool_init);
+ ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
+ atomic_pool_init);
if (ptr) {
int i;
@@ -822,16 +823,17 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
if (PageHighMem(page)) {
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- vaddr = kmap_high_get(page);
- if (vaddr) {
- vaddr += offset;
- op(vaddr, len, dir);
- kunmap_high(page);
- } else if (cache_is_vipt()) {
- /* unmapped pages might still be cached */
+
+ if (cache_is_vipt_nonaliasing()) {
vaddr = kmap_atomic(page);
op(vaddr + offset, len, dir);
kunmap_atomic(vaddr);
+ } else {
+ vaddr = kmap_high_get(page);
+ if (vaddr) {
+ op(vaddr + offset, len, dir);
+ kunmap_high(page);
+ }
}
} else {
vaddr = page_address(page) + offset;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 1c8f7f564175..0d473cce501c 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -170,15 +170,18 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
if (!PageHighMem(page)) {
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
} else {
- void *addr = kmap_high_get(page);
- if (addr) {
- __cpuc_flush_dcache_area(addr, PAGE_SIZE);
- kunmap_high(page);
- } else if (cache_is_vipt()) {
- /* unmapped pages might still be cached */
+ void *addr;
+
+ if (cache_is_vipt_nonaliasing()) {
addr = kmap_atomic(page);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_atomic(addr);
+ } else {
+ addr = kmap_high_get(page);
+ if (addr) {
+ __cpuc_flush_dcache_area(addr, PAGE_SIZE);
+ kunmap_high(page);
+ }
}
}
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 2dffc010cc41..83cb3ac27095 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -8,7 +8,6 @@
#include <asm/pgtable.h>
#include <asm/sections.h>
#include <asm/system_info.h>
-#include <asm/virt.h>
pgd_t *idmap_pgd;
@@ -83,37 +82,10 @@ static void identity_mapping_add(pgd_t *pgd, const char *text_start,
} while (pgd++, addr = next, addr != end);
}
-#if defined(CONFIG_ARM_VIRT_EXT) && defined(CONFIG_ARM_LPAE)
-pgd_t *hyp_pgd;
-
-extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
-
-static int __init init_static_idmap_hyp(void)
-{
- hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
- if (!hyp_pgd)
- return -ENOMEM;
-
- pr_info("Setting up static HYP identity map for 0x%p - 0x%p\n",
- __hyp_idmap_text_start, __hyp_idmap_text_end);
- identity_mapping_add(hyp_pgd, __hyp_idmap_text_start,
- __hyp_idmap_text_end, PMD_SECT_AP1);
-
- return 0;
-}
-#else
-static int __init init_static_idmap_hyp(void)
-{
- return 0;
-}
-#endif
-
extern char __idmap_text_start[], __idmap_text_end[];
static int __init init_static_idmap(void)
{
- int ret;
-
idmap_pgd = pgd_alloc(&init_mm);
if (!idmap_pgd)
return -ENOMEM;
@@ -123,12 +95,10 @@ static int __init init_static_idmap(void)
identity_mapping_add(idmap_pgd, __idmap_text_start,
__idmap_text_end, 0);
- ret = init_static_idmap_hyp();
-
/* Flush L1 for the hardware to see this page table content */
flush_cache_louis();
- return ret;
+ return 0;
}
early_initcall(init_static_idmap);
@@ -141,6 +111,7 @@ void setup_mm_for_reboot(void)
{
/* Switch to the identity mapping. */
cpu_switch_mm(idmap_pgd, &init_mm);
+ local_flush_bp_all();
#ifdef CONFIG_CPU_HAS_ASID
/*
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index ad722f1208a5..9a5cdc01fcdf 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -99,6 +99,9 @@ void show_mem(unsigned int filter)
printk("Mem-info:\n");
show_free_areas(filter);
+ if (filter & SHOW_MEM_FILTER_PAGE_COUNT)
+ return;
+
for_each_bank (i, mi) {
struct membank *bank = &mi->bank[i];
unsigned int pfn1, pfn2;
@@ -424,24 +427,6 @@ void __init bootmem_init(void)
max_pfn = max_high - PHYS_PFN_OFFSET;
}
-static inline int free_area(unsigned long pfn, unsigned long end, char *s)
-{
- unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
-
- for (; pfn < end; pfn++) {
- struct page *page = pfn_to_page(pfn);
- ClearPageReserved(page);
- init_page_count(page);
- __free_page(page);
- pages++;
- }
-
- if (size && s)
- printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
-
- return pages;
-}
-
/*
* Poison init memory with an undefined instruction (ARM) or a branch to an
* undefined instruction (Thumb).
@@ -534,6 +519,14 @@ static void __init free_unused_memmap(struct meminfo *mi)
#endif
}
+#ifdef CONFIG_HIGHMEM
+static inline void free_area_high(unsigned long pfn, unsigned long end)
+{
+ for (; pfn < end; pfn++)
+ free_highmem_page(pfn_to_page(pfn));
+}
+#endif
+
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
@@ -569,8 +562,7 @@ static void __init free_highpages(void)
if (res_end > end)
res_end = end;
if (res_start != start)
- totalhigh_pages += free_area(start, res_start,
- NULL);
+ free_area_high(start, res_start);
start = res_end;
if (start == end)
break;
@@ -578,9 +570,8 @@ static void __init free_highpages(void)
/* And now free anything which remains */
if (start < end)
- totalhigh_pages += free_area(start, end, NULL);
+ free_area_high(start, end);
}
- totalram_pages += totalhigh_pages;
#endif
}
@@ -609,8 +600,7 @@ void __init mem_init(void)
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
- totalram_pages += free_area(PHYS_PFN_OFFSET,
- __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
+ free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL);
#endif
free_highpages();
@@ -738,16 +728,12 @@ void free_initmem(void)
extern char __tcm_start, __tcm_end;
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
- totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
- __phys_to_pfn(__pa(&__tcm_end)),
- "TCM link");
+ free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
#endif
poison_init_mem(__init_begin, __init_end - __init_begin);
if (!machine_is_integrator() && !machine_is_cintegrator())
- totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
- __phys_to_pfn(__pa(__init_end)),
- "init");
+ free_initmem_default(0);
}
#ifdef CONFIG_BLK_DEV_INITRD
@@ -758,9 +744,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
{
if (!keep_initrd) {
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
- totalram_pages += free_area(__phys_to_pfn(__pa(start)),
- __phys_to_pfn(__pa(end)),
- "initrd");
+ free_reserved_area(start, end, 0, "initrd");
}
}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c6d45c87540e..e0d8565671a6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -34,6 +34,7 @@
#include <asm/mach/pci.h>
#include "mm.h"
+#include "tcm.h"
/*
* empty_zero_page is a special page that is used for
@@ -615,39 +616,60 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static void __init alloc_init_section(pud_t *pud, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
+static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
{
- pmd_t *pmd = pmd_offset(pud, addr);
-
+#ifndef CONFIG_ARM_LPAE
/*
- * Try a section mapping - end, addr and phys must all be aligned
- * to a section boundary. Note that PMDs refer to the individual
- * L1 entries, whereas PGDs refer to a group of L1 entries making
- * up one logical pointer to an L2 table.
+ * In classic MMU format, puds and pmds are folded in to
+ * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
+ * group of L1 entries making up one logical pointer to
+ * an L2 table (2MB), where as PMDs refer to the individual
+ * L1 entries (1MB). Hence increment to get the correct
+ * offset for odd 1MB sections.
+ * (See arch/arm/include/asm/pgtable-2level.h)
*/
- if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
- pmd_t *p = pmd;
-
-#ifndef CONFIG_ARM_LPAE
- if (addr & SECTION_SIZE)
- pmd++;
+ if (addr & SECTION_SIZE)
+ pmd++;
#endif
+ do {
+ *pmd = __pmd(phys | type->prot_sect);
+ phys += SECTION_SIZE;
+ } while (pmd++, addr += SECTION_SIZE, addr != end);
- do {
- *pmd = __pmd(phys | type->prot_sect);
- phys += SECTION_SIZE;
- } while (pmd++, addr += SECTION_SIZE, addr != end);
+ flush_pmd_entry(pmd);
+}
- flush_pmd_entry(p);
- } else {
+static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+ unsigned long end, phys_addr_t phys,
+ const struct mem_type *type)
+{
+ pmd_t *pmd = pmd_offset(pud, addr);
+ unsigned long next;
+
+ do {
/*
- * No need to loop; pte's aren't interested in the
- * individual L1 entries.
+ * With LPAE, we must loop over to map
+ * all the pmds for the given range.
*/
- alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
- }
+ next = pmd_addr_end(addr, end);
+
+ /*
+ * Try a section mapping - addr, next and phys must all be
+ * aligned to a section boundary.
+ */
+ if (type->prot_sect &&
+ ((addr | next | phys) & ~SECTION_MASK) == 0) {
+ map_init_section(pmd, addr, next, phys, type);
+ } else {
+ alloc_init_pte(pmd, addr, next,
+ __phys_to_pfn(phys), type);
+ }
+
+ phys += next - addr;
+
+ } while (pmd++, addr = next, addr != end);
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -658,7 +680,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
do {
next = pud_addr_end(addr, end);
- alloc_init_section(pud, addr, next, phys, type);
+ alloc_init_pmd(pud, addr, next, phys, type);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -1273,6 +1295,7 @@ void __init paging_init(struct machine_desc *mdesc)
dma_contiguous_remap();
devicemaps_init(mdesc);
kmap_init();
+ tcm_init();
top_pmd = pmd_off_k(0xffff0000);
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index dc5de5d53f20..fde2d2a794cf 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -77,24 +77,27 @@ __arm740_setup:
mcr p15, 0, r0, c6, c0 @ set area 0, default
ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
- ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
+ ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
+ mov r4, #10 @ 11 is the minimum (4KB)
+1: add r4, r4, #1 @ area size *= 2
+ movs r3, r3, lsr #1
bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the area register value
+ orr r0, r0, r4, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
mcr p15, 0, r0, c6, c1 @ set area 1, RAM
ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
- ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
- mov r2, #10 @ 11 is the minimum (4KB)
-1: add r2, r2, #1 @ area size *= 2
- mov r1, r1, lsr #1
+ ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
+ cmp r3, #0
+ moveq r0, #0
+ beq 2f
+ mov r4, #10 @ 11 is the minimum (4KB)
+1: add r4, r4, #1 @ area size *= 2
+ movs r3, r3, lsr #1
bne 1b @ count not zero r-shift
- orr r0, r0, r2, lsl #1 @ the area register value
+ orr r0, r0, r4, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
- mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
+2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
mov r0, #0x06
mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
@@ -137,13 +140,14 @@ __arm740_proc_info:
.long 0x41807400
.long 0xfffffff0
.long 0
+ .long 0
b __arm740_setup
.long cpu_arch_name
.long cpu_elf_name
- .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
+ .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
.long cpu_arm740_name
.long arm740_processor_functions
.long 0
.long 0
- .long v3_cache_fns @ cache model
+ .long v4_cache_fns @ cache model
.size __arm740_proc_info, . - __arm740_proc_info
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 2c3b9421ab5e..2556cf1c2da1 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index f1803f7e2972..344c8a548cc0 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size
.equ cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 82f9cdc751d6..0b60dd3d742a 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
.globl cpu_mohawk_suspend_size
.equ cpu_mohawk_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_mohawk_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 3aa0da11fd84..d92dfd081429 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
.globl cpu_sa1100_suspend_size
.equ cpu_sa1100_suspend_size, 4 * 3
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_sa1100_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 3e6210b4d6d4..054b491ff764 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -17,7 +17,9 @@
#ifndef MULTI_CPU
EXPORT_SYMBOL(cpu_dcache_clean_area);
+#ifdef CONFIG_MMU
EXPORT_SYMBOL(cpu_set_pte_ext);
+#endif
#else
EXPORT_SYMBOL(processor);
#endif
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index bcaaa8de9325..919405e20b80 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -80,12 +80,10 @@ ENTRY(cpu_v6_do_idle)
mov pc, lr
ENTRY(cpu_v6_dcache_clean_area)
-#ifndef TLB_CAN_READ_FROM_L1_CACHE
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #D_CACHE_LINE_SIZE
subs r1, r1, #D_CACHE_LINE_SIZE
bhi 1b
-#endif
mov pc, lr
/*
@@ -138,7 +136,7 @@ ENTRY(cpu_v6_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
.globl cpu_v6_suspend_size
.equ cpu_v6_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v6_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
index 78f520bc0e99..9704097c450e 100644
--- a/arch/arm/mm/proc-v7-2level.S
+++ b/arch/arm/mm/proc-v7-2level.S
@@ -110,7 +110,8 @@ ENTRY(cpu_v7_set_pte_ext)
ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] )
- mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ ALT_SMP(mov pc,lr)
+ ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S
index 50bf1dafc9ea..363027e811d6 100644
--- a/arch/arm/mm/proc-v7-3level.S
+++ b/arch/arm/mm/proc-v7-3level.S
@@ -48,7 +48,7 @@
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mmid r1, r1 @ get mm->context.id
- and r3, r1, #0xff
+ asid r3, r1
mov r3, r3, lsl #(48 - 32) @ ASID
mcrr p15, 0, r0, r3, c2 @ set TTB 0
isb
@@ -73,7 +73,8 @@ ENTRY(cpu_v7_set_pte_ext)
tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
orreq r2, #L_PTE_RDONLY
1: strd r2, r3, [r0]
- mcr p15, 0, r0, c7, c10, 1 @ flush_pte
+ ALT_SMP(mov pc, lr)
+ ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte
#endif
mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 3a3c015f8d5c..2c73a7301ff7 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -75,14 +75,14 @@ ENTRY(cpu_v7_do_idle)
ENDPROC(cpu_v7_do_idle)
ENTRY(cpu_v7_dcache_clean_area)
-#ifndef TLB_CAN_READ_FROM_L1_CACHE
+ ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW
+ ALT_UP(W(nop))
dcache_line_size r2, r3
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, r2
subs r1, r1, r2
bhi 1b
dsb
-#endif
mov pc, lr
ENDPROC(cpu_v7_dcache_clean_area)
@@ -402,6 +402,8 @@ __v7_ca9mp_proc_info:
__v7_proc __v7_ca9mp_setup
.size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
+#endif /* CONFIG_ARM_LPAE */
+
/*
* Marvell PJ4B processor.
*/
@@ -411,7 +413,6 @@ __v7_pj4b_proc_info:
.long 0xfffffff0
__v7_proc __v7_pj4b_setup
.size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
-#endif /* CONFIG_ARM_LPAE */
/*
* ARM Ltd. Cortex A7 processor.
@@ -420,7 +421,7 @@ __v7_pj4b_proc_info:
__v7_ca7mp_proc_info:
.long 0x410fc070
.long 0xff0ffff0
- __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_ca7mp_setup
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
/*
@@ -430,10 +431,25 @@ __v7_ca7mp_proc_info:
__v7_ca15mp_proc_info:
.long 0x410fc0f0
.long 0xff0ffff0
- __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
+ __v7_proc __v7_ca15mp_setup
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
/*
+ * Qualcomm Inc. Krait processors.
+ */
+ .type __krait_proc_info, #object
+__krait_proc_info:
+ .long 0x510f0400 @ Required ID value
+ .long 0xff0ffc00 @ Mask for ID
+ /*
+ * Some Krait processors don't indicate support for SDIV and UDIV
+ * instructions in the ARM instruction set, even though they actually
+ * do support them.
+ */
+ __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
+ .size __krait_proc_info, . - __krait_proc_info
+
+ /*
* Match any ARMv7 processor core.
*/
.type __v7_proc_info, #object
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index eb93d6487f35..e8efd83b6f25 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.globl cpu_xsc3_suspend_size
.equ cpu_xsc3_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_xsc3_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 25510361aa18..e766f889bfd6 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
.globl cpu_xscale_suspend_size
.equ cpu_xscale_suspend_size, 4 * 6
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_xscale_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/arch/arm/mm/tcm.h b/arch/arm/mm/tcm.h
new file mode 100644
index 000000000000..8015ad434a40
--- /dev/null
+++ b/arch/arm/mm/tcm.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * License terms: GNU General Public License (GPL) version 2
+ * TCM memory handling for ARM systems
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ * Author: Rickard Andersson <rickard.andersson@stericsson.com>
+ */
+
+#ifdef CONFIG_HAVE_TCM
+void __init tcm_init(void);
+#else
+/* No TCM support, just blank inlines to be optimized out */
+inline void tcm_init(void)
+{
+}
+#endif
OpenPOWER on IntegriCloud