summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/abort-ev6.S3
-rw-r--r--arch/arm/mm/copypage-feroceon.c2
-rw-r--r--arch/arm/mm/copypage-v3.c2
-rw-r--r--arch/arm/mm/copypage-v4mc.c2
-rw-r--r--arch/arm/mm/copypage-v4wb.c2
-rw-r--r--arch/arm/mm/copypage-v4wt.c2
-rw-r--r--arch/arm/mm/copypage-xsc3.c2
-rw-r--r--arch/arm/mm/copypage-xscale.c2
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/fault-armv.c5
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm/mm/mmu.c3
13 files changed, 29 insertions, 20 deletions
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 8a7f65ba14b7..94077fbd96b7 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -23,7 +23,8 @@ ENTRY(v6_early_abort)
#ifdef CONFIG_CPU_32v6K
clrex
#else
- strex r0, r1, [sp] @ Clear the exclusive monitor
+ sub r1, sp, #4 @ Get unused stack location
+ strex r0, r1, [r1] @ Clear the exclusive monitor
#endif
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index c3ba6a94da0c..70997d5bee2d 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -13,7 +13,7 @@
#include <linux/init.h>
#include <linux/highmem.h>
-static void __attribute__((naked))
+static void __naked
feroceon_copy_user_page(void *kto, const void *kfrom)
{
asm("\
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
index 70ed96c8af8e..de9c06854ad7 100644
--- a/arch/arm/mm/copypage-v3.c
+++ b/arch/arm/mm/copypage-v3.c
@@ -15,7 +15,7 @@
*
* FIXME: do we need to handle cache stuff...
*/
-static void __attribute__((naked))
+static void __naked
v3_copy_user_page(void *kto, const void *kfrom)
{
asm("\n\
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 1601698b9800..7370a7142b04 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(minicache_lock);
* instruction. If your processor does not supply this, you have to write your
* own copy_user_highpage that does the right thing.
*/
-static void __attribute__((naked))
+static void __naked
mc_copy_user_page(void *from, void *to)
{
asm volatile(
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index 3ec93dab7656..9ab098414227 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -22,7 +22,7 @@
* instruction. If your processor does not supply this, you have to write your
* own copy_user_highpage that does the right thing.
*/
-static void __attribute__((naked))
+static void __naked
v4wb_copy_user_page(void *kto, const void *kfrom)
{
asm("\
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index 0f1188efae45..300efafd6643 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -20,7 +20,7 @@
* dirty data in the cache. However, we do have to ensure that
* subsequent reads are up to date.
*/
-static void __attribute__((naked))
+static void __naked
v4wt_copy_user_page(void *kto, const void *kfrom)
{
asm("\
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index 39a994542cad..bc4525f5ab23 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -29,7 +29,7 @@
* if we eventually end up using our copied page.
*
*/
-static void __attribute__((naked))
+static void __naked
xsc3_mc_copy_user_page(void *kto, const void *kfrom)
{
asm("\
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index d18f2397ee2d..76824d3e966a 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock);
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
* and merged as appropriate.
*/
-static void __attribute__((naked))
+static void __naked
mc_copy_user_page(void *from, void *to)
{
/*
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 310e479309ef..f1ef5613ccd4 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -490,26 +490,30 @@ core_initcall(consistent_init);
*/
void dma_cache_maint(const void *start, size_t size, int direction)
{
- const void *end = start + size;
+ void (*inner_op)(const void *, const void *);
+ void (*outer_op)(unsigned long, unsigned long);
- BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(end - 1));
+ BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */
- dmac_inv_range(start, end);
- outer_inv_range(__pa(start), __pa(end));
+ inner_op = dmac_inv_range;
+ outer_op = outer_inv_range;
break;
case DMA_TO_DEVICE: /* writeback only */
- dmac_clean_range(start, end);
- outer_clean_range(__pa(start), __pa(end));
+ inner_op = dmac_clean_range;
+ outer_op = outer_clean_range;
break;
case DMA_BIDIRECTIONAL: /* writeback and invalidate */
- dmac_flush_range(start, end);
- outer_flush_range(__pa(start), __pa(end));
+ inner_op = dmac_flush_range;
+ outer_op = outer_flush_range;
break;
default:
BUG();
}
+
+ inner_op(start, start + size);
+ outer_op(__pa(start), __pa(start) + size);
}
EXPORT_SYMBOL(dma_cache_maint);
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index 81d0b8772de3..bc0099d5ae85 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -66,7 +66,10 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
* fault (ie, is old), we can safely ignore any issues.
*/
if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
- flush_cache_page(vma, address, pte_pfn(entry));
+ unsigned long pfn = pte_pfn(entry);
+ flush_cache_page(vma, address, pfn);
+ outer_flush_range((pfn << PAGE_SHIFT),
+ (pfn << PAGE_SHIFT) + PAGE_SIZE);
pte_val(entry) &= ~L_PTE_MT_MASK;
pte_val(entry) |= shared_pte_mask;
set_pte_at(vma->vm_mm, address, pte, entry);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 34df4d9d03a6..80fd3b69ae1f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -382,7 +382,7 @@ void __init bootmem_init(void)
for_each_node(node)
bootmem_free_node(node, mi);
- high_memory = __va(memend_pfn << PAGE_SHIFT);
+ high_memory = __va((memend_pfn << PAGE_SHIFT) - 1) + 1;
/*
* This doesn't seem to be used by the Linux memory manager any
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 5358fcc7f61e..f7457fea6de8 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -124,7 +124,7 @@ int valid_phys_addr_range(unsigned long addr, size_t size)
{
if (addr < PHYS_OFFSET)
return 0;
- if (addr + size > __pa(high_memory))
+ if (addr + size >= __pa(high_memory - 1))
return 0;
return 1;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9b36c5cb5e9f..d4d082c5c2d4 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -693,7 +693,8 @@ static void __init sanity_check_meminfo(void)
* Check whether this memory bank would entirely overlap
* the vmalloc area.
*/
- if (__va(bank->start) >= VMALLOC_MIN) {
+ if (__va(bank->start) >= VMALLOC_MIN ||
+ __va(bank->start) < PAGE_OFFSET) {
printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
"(vmalloc region overlap).\n",
bank->start, bank->start + bank->size - 1);
OpenPOWER on IntegriCloud