summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-12-04 14:59:47 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-12-04 14:59:47 +0000
commit5cb2faa6ede7ada9cb2bffc832c4ce60f53d6834 (patch)
tree7b72b66081d042a41dc822575503133364857ce2 /arch/arm/mm
parente0ee98513d1a2e24d2ddbdecf4216bcca29d1158 (diff)
parent6060e8df517847bf445ebc61de7d4d9c7faae990 (diff)
downloadblackbird-op-linux-5cb2faa6ede7ada9cb2bffc832c4ce60f53d6834.tar.gz
blackbird-op-linux-5cb2faa6ede7ada9cb2bffc832c4ce60f53d6834.zip
Merge branch 'pending-misc' (early part) into devel
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/context.c5
-rw-r--r--arch/arm/mm/copypage-v6.c8
-rw-r--r--arch/arm/mm/dma-mapping.c4
-rw-r--r--arch/arm/mm/fault-armv.c9
-rw-r--r--arch/arm/mm/flush.c74
-rw-r--r--arch/arm/mm/init.c20
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmu.c9
-rw-r--r--arch/arm/mm/proc-v6.S7
-rw-r--r--arch/arm/mm/proc-v7.S14
11 files changed, 94 insertions, 63 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 9cf7706e0be0..7b7d4c36c11c 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -122,10 +122,7 @@ config CPU_ARM920T
select CPU_TLB_V4WBI if MMU
help
The ARM920T is licensed to be produced by numerous vendors,
- and is used in the Maverick EP9312 and the Samsung S3C2410.
-
- More information on the Maverick EP9312 at
- <http://linuxdevices.com/products/PD2382866068.html>.
+ and is used in the Cirrus EP93xx and the Samsung S3C2410.
Say Y if you want support for the ARM920T processor.
Otherwise, say N.
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 6bda76a43199..a9e22e31eaa1 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -50,10 +50,7 @@ void __new_context(struct mm_struct *mm)
isb();
flush_tlb_all();
if (icache_is_vivt_asid_tagged()) {
- asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
- "mcr p15, 0, %0, c7, c5, 6 @ flush BTAC/BTB\n"
- :
- : "r" (0));
+ __flush_icache_all();
dsb();
}
}
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 4127a7bddfe5..841f355319bf 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,6 +41,14 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
kfrom = kmap_atomic(from, KM_USER0);
kto = kmap_atomic(to, KM_USER1);
copy_page(kto, kfrom);
+#ifdef CONFIG_HIGHMEM
+ /*
+ * kmap_atomic() doesn't set the page virtual address, and
+ * kunmap_atomic() takes care of cache flushing already.
+ */
+ if (page_address(to) != NULL)
+#endif
+ __cpuc_flush_dcache_page(kto);
kunmap_atomic(kto, KM_USER1);
kunmap_atomic(kfrom, KM_USER0);
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index b30925fcbcdc..b9590a7085ca 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -205,7 +205,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
order = get_order(size);
- if (mask != 0xffffffff)
+ if (mask < 0xffffffffULL)
gfp |= GFP_DMA;
page = alloc_pages(gfp, order);
@@ -289,7 +289,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
if (!mask)
goto error;
- if (mask != 0xffffffff)
+ if (mask < 0xffffffffULL)
gfp |= GFP_DMA;
virt = kmalloc(size, gfp);
if (!virt)
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index d0d17b6a3703..729602291958 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -23,6 +23,8 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
+#include "mm.h"
+
static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
/*
@@ -151,7 +153,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
if (!pfn_valid(pfn))
return;
+ /*
+ * The zero page is never written to, so never has any dirty
+ * cache lines, and therefore never needs to be flushed.
+ */
page = pfn_to_page(pfn);
+ if (page == ZERO_PAGE(0))
+ return;
+
mapping = page_mapping(page);
#ifndef CONFIG_SMP
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index b27942909b23..329594e760cd 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -18,10 +18,6 @@
#include "mm.h"
-#ifdef CONFIG_ARM_ERRATA_411920
-extern void v6_icache_inval_all(void);
-#endif
-
#ifdef CONFIG_CPU_CACHE_VIPT
#define ALIAS_FLUSH_START 0xffff4000
@@ -35,77 +31,61 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
flush_tlb_kernel_page(to);
asm( "mcrr p15, 0, %1, %0, c14\n"
- " mcr p15, 0, %2, c7, c10, 4\n"
-#ifndef CONFIG_ARM_ERRATA_411920
- " mcr p15, 0, %2, c7, c5, 0\n"
-#endif
+ " mcr p15, 0, %2, c7, c10, 4"
:
: "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
: "cc");
-#ifdef CONFIG_ARM_ERRATA_411920
- v6_icache_inval_all();
-#endif
}
void flush_cache_mm(struct mm_struct *mm)
{
if (cache_is_vivt()) {
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
- __cpuc_flush_user_all();
+ vivt_flush_cache_mm(mm);
return;
}
if (cache_is_vipt_aliasing()) {
asm( "mcr p15, 0, %0, c7, c14, 0\n"
- " mcr p15, 0, %0, c7, c10, 4\n"
-#ifndef CONFIG_ARM_ERRATA_411920
- " mcr p15, 0, %0, c7, c5, 0\n"
-#endif
+ " mcr p15, 0, %0, c7, c10, 4"
:
: "r" (0)
: "cc");
-#ifdef CONFIG_ARM_ERRATA_411920
- v6_icache_inval_all();
-#endif
}
}
void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (cache_is_vivt()) {
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
- __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
- vma->vm_flags);
+ vivt_flush_cache_range(vma, start, end);
return;
}
if (cache_is_vipt_aliasing()) {
asm( "mcr p15, 0, %0, c7, c14, 0\n"
- " mcr p15, 0, %0, c7, c10, 4\n"
-#ifndef CONFIG_ARM_ERRATA_411920
- " mcr p15, 0, %0, c7, c5, 0\n"
-#endif
+ " mcr p15, 0, %0, c7, c10, 4"
:
: "r" (0)
: "cc");
-#ifdef CONFIG_ARM_ERRATA_411920
- v6_icache_inval_all();
-#endif
}
+
+ if (vma->vm_flags & VM_EXEC)
+ __flush_icache_all();
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
if (cache_is_vivt()) {
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
- unsigned long addr = user_addr & PAGE_MASK;
- __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
- }
+ vivt_flush_cache_page(vma, user_addr, pfn);
return;
}
- if (cache_is_vipt_aliasing())
+ if (cache_is_vipt_aliasing()) {
flush_pfn_alias(pfn, user_addr);
+ __flush_icache_all();
+ }
+
+ if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
+ __flush_icache_all();
}
void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
@@ -113,15 +93,13 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
unsigned long len, int write)
{
if (cache_is_vivt()) {
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
- unsigned long addr = (unsigned long)kaddr;
- __cpuc_coherent_kern_range(addr, addr + len);
- }
+ vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
return;
}
if (cache_is_vipt_aliasing()) {
flush_pfn_alias(page_to_pfn(page), uaddr);
+ __flush_icache_all();
return;
}
@@ -139,6 +117,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
void __flush_dcache_page(struct address_space *mapping, struct page *page)
{
+ void *addr = page_address(page);
+
/*
* Writeback any data associated with the kernel mapping of this
* page. This ensures that data in the physical page is mutually
@@ -149,9 +129,9 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
* kmap_atomic() doesn't set the page virtual address, and
* kunmap_atomic() takes care of cache flushing already.
*/
- if (page_address(page))
+ if (addr)
#endif
- __cpuc_flush_dcache_page(page_address(page));
+ __cpuc_flush_dcache_page(addr);
/*
* If this is a page cache page, and we have an aliasing VIPT cache,
@@ -215,7 +195,16 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
*/
void flush_dcache_page(struct page *page)
{
- struct address_space *mapping = page_mapping(page);
+ struct address_space *mapping;
+
+ /*
+ * The zero page is never written to, so never has any dirty
+ * cache lines, and therefore never needs to be flushed.
+ */
+ if (page == ZERO_PAGE(0))
+ return;
+
+ mapping = page_mapping(page);
#ifndef CONFIG_SMP
if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
@@ -261,6 +250,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
* userspace address only.
*/
flush_pfn_alias(pfn, vmaddr);
+ __flush_icache_all();
}
/*
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 40940d7ce4ff..52c40d155672 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -273,7 +273,6 @@ static void __init bootmem_init_node(int node, struct meminfo *mi,
struct membank *bank = &mi->bank[i];
if (!bank->highmem)
free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
- memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
}
/*
@@ -370,6 +369,19 @@ int pfn_valid(unsigned long pfn)
return 0;
}
EXPORT_SYMBOL(pfn_valid);
+
+static void arm_memory_present(struct meminfo *mi, int node)
+{
+}
+#else
+static void arm_memory_present(struct meminfo *mi, int node)
+{
+ int i;
+ for_each_nodebank(i, mi, node) {
+ struct membank *bank = &mi->bank[i];
+ memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
+ }
+}
#endif
static int __init meminfo_cmp(const void *_a, const void *_b)
@@ -427,6 +439,12 @@ void __init bootmem_init(void)
*/
if (node == initrd_node)
bootmem_reserve_initrd(node);
+
+ /*
+ * Sparsemem tries to allocate bootmem in memory_present(),
+ * so must be done after the fixed reservations
+ */
+ arm_memory_present(mi, node);
}
/*
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index c4f6f05198e0..a888363398f8 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -24,6 +24,8 @@ struct mem_type {
const struct mem_type *get_mem_type(unsigned int type);
+extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
+
#endif
struct map_desc;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 02243eeccf50..2427cdcd9098 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -117,6 +117,13 @@ static void __init early_cachepolicy(char **p)
}
if (i == ARRAY_SIZE(cache_policies))
printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
+ /*
+ * This restriction is partly to do with the way we boot; it is
+ * unpredictable to have memory mapped using two different sets of
+ * memory attributes (shared, type, and cache attribs). We can not
+ * change these attributes once the initial assembly has setup the
+ * page tables.
+ */
if (cpu_architecture() >= CPU_ARCH_ARMv6) {
printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
cachepolicy = CPOLICY_WRITEBACK;
@@ -1029,7 +1036,7 @@ void __init paging_init(struct machine_desc *mdesc)
*/
zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
empty_zero_page = virt_to_page(zero_page);
- flush_dcache_page(empty_zero_page);
+ __flush_dcache_page(NULL, empty_zero_page);
}
/*
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 194737d60a22..70f75d2e3ead 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -32,8 +32,10 @@
#ifndef CONFIG_SMP
#define TTB_FLAGS TTB_RGN_WBWA
+#define PMD_FLAGS PMD_SECT_WB
#else
#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
+#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
#endif
ENTRY(cpu_v6_proc_init)
@@ -222,10 +224,9 @@ __v6_proc_info:
.long 0x0007b000
.long 0x0007f000
.long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 23ebcf6eab9f..3a285218fd15 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -33,9 +33,11 @@
#ifndef CONFIG_SMP
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB
+#define PMD_FLAGS PMD_SECT_WB
#else
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
+#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
#endif
ENTRY(cpu_v7_proc_init)
@@ -184,9 +186,10 @@ cpu_v7_name:
*/
__v7_setup:
#ifdef CONFIG_SMP
- mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode and
- orr r0, r0, #(1 << 6) | (1 << 0) @ TLB ops broadcasting
- mcr p15, 0, r0, c1, c0, 1
+ mrc p15, 0, r0, c1, c0, 1
+ tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
+ orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
+ mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
#endif
adr r12, __v7_setup_stack @ the local stack
stmia r12, {r0-r5, r7, r9, r11, lr}
@@ -326,10 +329,9 @@ __v7_proc_info:
.long 0x000f0000 @ Required ID value
.long 0x000f0000 @ Mask for ID
.long PMD_TYPE_SECT | \
- PMD_SECT_BUFFERABLE | \
- PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
- PMD_SECT_AP_READ
+ PMD_SECT_AP_READ | \
+ PMD_FLAGS
.long PMD_TYPE_SECT | \
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
OpenPOWER on IntegriCloud