summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig35
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c37
-rw-r--r--arch/arm/mm/cache-v6.S28
-rw-r--r--arch/arm/mm/cache-v7.S27
-rw-r--r--arch/arm/mm/cache-xsc3l2.c57
-rw-r--r--arch/arm/mm/dma-mapping.c35
-rw-r--r--arch/arm/mm/flush.c7
-rw-r--r--arch/arm/mm/highmem.c87
-rw-r--r--arch/arm/mm/ioremap.c8
-rw-r--r--arch/arm/mm/mmu.c6
-rw-r--r--arch/arm/mm/proc-macros.S29
-rw-r--r--arch/arm/mm/proc-v7.S13
12 files changed, 180 insertions, 189 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 4414a01e1e8a..8493ed04797a 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -599,6 +599,14 @@ config CPU_CP15_MPU
help
Processor has the CP15 register, which has MPU related registers.
+config CPU_USE_DOMAINS
+ bool
+ depends on MMU
+ default y if !CPU_32v6K
+ help
+ This option enables or disables the use of domain switching
+ via the set_fs() function.
+
#
# CPU supports 36-bit I/O
#
@@ -628,6 +636,33 @@ config ARM_THUMBEE
Say Y here if you have a CPU with the ThumbEE extension and code to
make use of it. Say N for code that can run on CPUs without ThumbEE.
+config SWP_EMULATE
+ bool "Emulate SWP/SWPB instructions"
+ depends on CPU_V7
+ select HAVE_PROC_CPU if PROC_FS
+ default y if SMP
+ help
+ ARMv6 architecture deprecates use of the SWP/SWPB instructions.
+ ARMv7 multiprocessing extensions introduce the ability to disable
+ these instructions, triggering an undefined instruction exception
+ when executed. Say Y here to enable software emulation of these
+ instructions for userspace (not kernel) using LDREX/STREX.
+ Also creates /proc/cpu/swp_emulation for statistics.
+
+ In some older versions of glibc [<=2.8] SWP is used during futex
+ trylock() operations with the assumption that the code will not
+ be preempted. This invalid assumption may be more likely to fail
+ with SWP emulation enabled, leading to deadlock of the user
+ application.
+
+ NOTE: when accessing uncached shared regions, LDREX/STREX rely
+ on an external transaction monitoring block called a global
+ monitor to maintain update atomicity. If your system does not
+ implement a global monitor, this option can cause programs that
+ perform SWP operations to uncached memory to deadlock.
+
+ If unsure, say Y.
+
config CPU_BIG_ENDIAN
bool "Build big-endian kernel"
depends on ARCH_SUPPORTS_BIG_ENDIAN
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 6e77c042d8e9..e0b0e7a4ec68 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -13,13 +13,9 @@
*/
#include <linux/init.h>
+#include <linux/highmem.h>
#include <asm/cacheflush.h>
-#include <asm/kmap_types.h>
-#include <asm/fixmap.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
#include <plat/cache-feroceon-l2.h>
-#include "mm.h"
/*
* Low-level cache maintenance operations.
@@ -39,27 +35,30 @@
* between which we don't want to be preempted.
*/
-static inline unsigned long l2_start_va(unsigned long paddr)
+static inline unsigned long l2_get_va(unsigned long paddr)
{
#ifdef CONFIG_HIGHMEM
/*
- * Let's do our own fixmap stuff in a minimal way here.
* Because range ops can't be done on physical addresses,
* we simply install a virtual mapping for it only for the
* TLB lookup to occur, hence no need to flush the untouched
- * memory mapping. This is protected with the disabling of
- * interrupts by the caller.
+ * memory mapping afterwards (note: a cache flush may happen
+ * in some circumstances depending on the path taken in kunmap_atomic).
*/
- unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
- unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
- local_flush_tlb_kernel_page(vaddr);
- return vaddr + (paddr & ~PAGE_MASK);
+ void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
+ return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
#else
return __phys_to_virt(paddr);
#endif
}
+static inline void l2_put_va(unsigned long vaddr)
+{
+#ifdef CONFIG_HIGHMEM
+ kunmap_atomic((void *)vaddr);
+#endif
+}
+
static inline void l2_clean_pa(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
@@ -76,13 +75,14 @@ static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
*/
BUG_ON((start ^ end) >> PAGE_SHIFT);
- raw_local_irq_save(flags);
- va_start = l2_start_va(start);
+ va_start = l2_get_va(start);
va_end = va_start + (end - start);
+ raw_local_irq_save(flags);
__asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
"mcr p15, 1, %1, c15, c9, 5"
: : "r" (va_start), "r" (va_end));
raw_local_irq_restore(flags);
+ l2_put_va(va_start);
}
static inline void l2_clean_inv_pa(unsigned long addr)
@@ -106,13 +106,14 @@ static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
*/
BUG_ON((start ^ end) >> PAGE_SHIFT);
- raw_local_irq_save(flags);
- va_start = l2_start_va(start);
+ va_start = l2_get_va(start);
va_end = va_start + (end - start);
+ raw_local_irq_save(flags);
__asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
"mcr p15, 1, %1, c15, c11, 5"
: : "r" (va_start), "r" (va_end));
raw_local_irq_restore(flags);
+ l2_put_va(va_start);
}
static inline void l2_inv_all(void)
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 99fa688dfadd..c96fa1b3f49f 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -203,6 +203,10 @@ ENTRY(v6_flush_kern_dcache_area)
* - end - virtual end address of region
*/
v6_dma_inv_range:
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldrb r2, [r0] @ read for ownership
+ strb r2, [r0] @ write for ownership
+#endif
tst r0, #D_CACHE_LINE_SIZE - 1
bic r0, r0, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
@@ -211,6 +215,10 @@ v6_dma_inv_range:
mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
#endif
tst r1, #D_CACHE_LINE_SIZE - 1
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldrneb r2, [r1, #-1] @ read for ownership
+ strneb r2, [r1, #-1] @ write for ownership
+#endif
bic r1, r1, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
@@ -218,10 +226,6 @@ v6_dma_inv_range:
mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
#endif
1:
-#ifdef CONFIG_DMA_CACHE_RWFO
- ldr r2, [r0] @ read for ownership
- str r2, [r0] @ write for ownership
-#endif
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
#else
@@ -229,6 +233,10 @@ v6_dma_inv_range:
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldrlo r2, [r0] @ read for ownership
+ strlo r2, [r0] @ write for ownership
+#endif
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -263,12 +271,12 @@ v6_dma_clean_range:
* - end - virtual end address of region
*/
ENTRY(v6_dma_flush_range)
- bic r0, r0, #D_CACHE_LINE_SIZE - 1
-1:
#ifdef CONFIG_DMA_CACHE_RWFO
- ldr r2, [r0] @ read for ownership
- str r2, [r0] @ write for ownership
+ ldrb r2, [r0] @ read for ownership
+ strb r2, [r0] @ write for ownership
#endif
+ bic r0, r0, #D_CACHE_LINE_SIZE - 1
+1:
#ifdef HARVARD_CACHE
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
#else
@@ -276,6 +284,10 @@ ENTRY(v6_dma_flush_range)
#endif
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
+#ifdef CONFIG_DMA_CACHE_RWFO
+ ldrlob r2, [r0] @ read for ownership
+ strlob r2, [r0] @ write for ownership
+#endif
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index a3ebf7a4f49b..6136e68ce953 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -173,15 +173,22 @@ ENTRY(v7_coherent_user_range)
UNWIND(.fnstart )
dcache_line_size r2, r3
sub r3, r2, #1
- bic r0, r0, r3
+ bic r12, r0, r3
1:
- USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification
+ USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
+ add r12, r12, r2
+ cmp r12, r1
+ blo 1b
dsb
- USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line
- add r0, r0, r2
+ icache_line_size r2, r3
+ sub r3, r2, #1
+ bic r12, r0, r3
2:
- cmp r0, r1
- blo 1b
+ USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line
+ add r12, r12, r2
+ cmp r12, r1
+ blo 2b
+3:
mov r0, #0
ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
@@ -194,10 +201,10 @@ ENTRY(v7_coherent_user_range)
* isn't mapped, just try the next page.
*/
9001:
- mov r0, r0, lsr #12
- mov r0, r0, lsl #12
- add r0, r0, #4096
- b 2b
+ mov r12, r12, lsr #12
+ mov r12, r12, lsl #12
+ add r12, r12, #4096
+ b 3b
UNWIND(.fnend )
ENDPROC(v7_coherent_kern_range)
ENDPROC(v7_coherent_user_range)
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index c3154928bccd..5a32020471e3 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -17,14 +17,10 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
+#include <linux/highmem.h>
#include <asm/system.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
-#include <asm/kmap_types.h>
-#include <asm/fixmap.h>
-#include <asm/pgtable.h>
-#include <asm/tlbflush.h>
-#include "mm.h"
#define CR_L2 (1 << 26)
@@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void)
dsb();
}
+static inline void l2_unmap_va(unsigned long va)
+{
#ifdef CONFIG_HIGHMEM
-#define l2_map_save_flags(x) raw_local_save_flags(x)
-#define l2_map_restore_flags(x) raw_local_irq_restore(x)
-#else
-#define l2_map_save_flags(x) ((x) = 0)
-#define l2_map_restore_flags(x) ((void)(x))
+ if (va != -1)
+ kunmap_atomic((void *)va);
#endif
+}
-static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
- unsigned long flags)
+static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
{
#ifdef CONFIG_HIGHMEM
unsigned long va = prev_va & PAGE_MASK;
@@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
/*
* Switching to a new page. Because cache ops are
* using virtual addresses only, we must put a mapping
- * in place for it. We also enable interrupts for a
- * short while and disable them again to protect this
- * mapping.
+ * in place for it.
*/
- unsigned long idx;
- raw_local_irq_restore(flags);
- idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
- va = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- raw_local_irq_restore(flags | PSR_I_BIT);
- set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0);
- local_flush_tlb_kernel_page(va);
+ l2_unmap_va(prev_va);
+ va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
}
return va + (pa_offset >> (32 - PAGE_SHIFT));
#else
@@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
{
- unsigned long vaddr, flags;
+ unsigned long vaddr;
if (start == 0 && end == -1ul) {
xsc3_l2_inv_all();
@@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
}
vaddr = -1; /* to force the first mapping */
- l2_map_save_flags(flags);
/*
* Clean and invalidate partial first cache line.
*/
if (start & (CACHE_LINE_SIZE - 1)) {
- vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags);
+ vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
start = (start | (CACHE_LINE_SIZE - 1)) + 1;
@@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
* Invalidate all full cache lines between 'start' and 'end'.
*/
while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
- vaddr = l2_map_va(start, vaddr, flags);
+ vaddr = l2_map_va(start, vaddr);
xsc3_l2_inv_mva(vaddr);
start += CACHE_LINE_SIZE;
}
@@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
* Clean and invalidate partial last cache line.
*/
if (start < end) {
- vaddr = l2_map_va(start, vaddr, flags);
+ vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
}
- l2_map_restore_flags(flags);
+ l2_unmap_va(vaddr);
dsb();
}
static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
{
- unsigned long vaddr, flags;
+ unsigned long vaddr;
vaddr = -1; /* to force the first mapping */
- l2_map_save_flags(flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
- vaddr = l2_map_va(start, vaddr, flags);
+ vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
start += CACHE_LINE_SIZE;
}
- l2_map_restore_flags(flags);
+ l2_unmap_va(vaddr);
dsb();
}
@@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void)
static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
{
- unsigned long vaddr, flags;
+ unsigned long vaddr;
if (start == 0 && end == -1ul) {
xsc3_l2_flush_all();
@@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
}
vaddr = -1; /* to force the first mapping */
- l2_map_save_flags(flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
- vaddr = l2_map_va(start, vaddr, flags);
+ vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
start += CACHE_LINE_SIZE;
}
- l2_map_restore_flags(flags);
+ l2_unmap_va(vaddr);
dsb();
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ac6a36142fcd..6b48e0a3d7aa 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#include <asm/memory.h>
#include <asm/highmem.h>
@@ -311,7 +312,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
addr = page_address(page);
if (addr)
- *handle = page_to_dma(dev, page);
+ *handle = pfn_to_dma(dev, page_to_pfn(page));
return addr;
}
@@ -406,7 +407,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
if (!arch_is_coherent())
__dma_free_remap(cpu_addr, size);
- __dma_free_buffer(dma_to_page(dev, handle), size);
+ __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
}
EXPORT_SYMBOL(dma_free_coherent);
@@ -480,10 +481,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
op(vaddr, len, dir);
kunmap_high(page);
} else if (cache_is_vipt()) {
- pte_t saved_pte;
- vaddr = kmap_high_l1_vipt(page, &saved_pte);
+ /* unmapped pages might still be cached */
+ vaddr = kmap_atomic(page);
op(vaddr + offset, len, dir);
- kunmap_high_l1_vipt(page, saved_pte);
+ kunmap_atomic(vaddr);
}
} else {
vaddr = page_address(page) + offset;
@@ -554,17 +555,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
struct scatterlist *s;
int i, j;
+ BUG_ON(!valid_dma_direction(dir));
+
for_each_sg(sg, s, nents, i) {
- s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
+ s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
s->length, dir);
if (dma_mapping_error(dev, s->dma_address))
goto bad_mapping;
}
+ debug_dma_map_sg(dev, sg, nents, nents, dir);
return nents;
bad_mapping:
for_each_sg(sg, s, i, j)
- dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
return 0;
}
EXPORT_SYMBOL(dma_map_sg);
@@ -585,8 +589,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
struct scatterlist *s;
int i;
+ debug_dma_unmap_sg(dev, sg, nents, dir);
+
for_each_sg(sg, s, nents, i)
- dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
}
EXPORT_SYMBOL(dma_unmap_sg);
@@ -611,6 +617,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
__dma_page_dev_to_cpu(sg_page(s), s->offset,
s->length, dir);
}
+
+ debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
}
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
@@ -635,5 +643,16 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
__dma_page_cpu_to_dev(sg_page(s), s->offset,
s->length, dir);
}
+
+ debug_dma_sync_sg_for_device(dev, sg, nents, dir);
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+#define PREALLOC_DMA_DEBUG_ENTRIES 4096
+
+static int __init dma_debug_do_init(void)
+{
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+ return 0;
+}
+fs_initcall(dma_debug_do_init);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 391ffae75098..c29f2839f1d2 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
+#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
@@ -180,10 +181,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_high(page);
} else if (cache_is_vipt()) {
- pte_t saved_pte;
- addr = kmap_high_l1_vipt(page, &saved_pte);
+ /* unmapped pages might still be cached */
+ addr = kmap_atomic(page);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
- kunmap_high_l1_vipt(page, saved_pte);
+ kunmap_atomic(addr);
}
}
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index c435fd9e1da9..807c0573abbe 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr)
pte = TOP_PTE(vaddr);
return pte_page(*pte);
}
-
-#ifdef CONFIG_CPU_CACHE_VIPT
-
-#include <linux/percpu.h>
-
-/*
- * The VIVT cache of a highmem page is always flushed before the page
- * is unmapped. Hence unmapped highmem pages need no cache maintenance
- * in that case.
- *
- * However unmapped pages may still be cached with a VIPT cache, and
- * it is not possible to perform cache maintenance on them using physical
- * addresses unfortunately. So we have no choice but to set up a temporary
- * virtual mapping for that purpose.
- *
- * Yet this VIPT cache maintenance may be triggered from DMA support
- * functions which are possibly called from interrupt context. As we don't
- * want to keep interrupt disabled all the time when such maintenance is
- * taking place, we therefore allow for some reentrancy by preserving and
- * restoring the previous fixmap entry before the interrupted context is
- * resumed. If the reentrancy depth is 0 then there is no need to restore
- * the previous fixmap, and leaving the current one in place allow it to
- * be reused the next time without a TLB flush (common with DMA).
- */
-
-static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
-
-void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
-{
- unsigned int idx, cpu;
- int *depth;
- unsigned long vaddr, flags;
- pte_t pte, *ptep;
-
- if (!in_interrupt())
- preempt_disable();
-
- cpu = smp_processor_id();
- depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
-
- idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- ptep = TOP_PTE(vaddr);
- pte = mk_pte(page, kmap_prot);
-
- raw_local_irq_save(flags);
- (*depth)++;
- if (pte_val(*ptep) == pte_val(pte)) {
- *saved_pte = pte;
- } else {
- *saved_pte = *ptep;
- set_pte_ext(ptep, pte, 0);
- local_flush_tlb_kernel_page(vaddr);
- }
- raw_local_irq_restore(flags);
-
- return (void *)vaddr;
-}
-
-void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
-{
- unsigned int idx, cpu = smp_processor_id();
- int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
- unsigned long vaddr, flags;
- pte_t pte, *ptep;
-
- idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- ptep = TOP_PTE(vaddr);
- pte = mk_pte(page, kmap_prot);
-
- BUG_ON(pte_val(*ptep) != pte_val(pte));
- BUG_ON(*depth <= 0);
-
- raw_local_irq_save(flags);
- (*depth)--;
- if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
- set_pte_ext(ptep, saved_pte, 0);
- local_flush_tlb_kernel_page(vaddr);
- }
- raw_local_irq_restore(flags);
-
- if (!in_interrupt())
- preempt_enable();
-}
-
-#endif /* CONFIG_CPU_CACHE_VIPT */
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 17e7b0b57e49..ab506272b2d3 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -204,12 +204,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
/*
* Don't allow RAM to be mapped - this causes problems with ARMv6+
*/
- if (pfn_valid(pfn)) {
- printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
- KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
- KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n");
- WARN_ON(1);
- }
+ if (WARN_ON(pfn_valid(pfn)))
+ return NULL;
type = get_mem_type(mtype);
if (!type)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 546e44734db0..3c67e92f7d59 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -24,6 +24,7 @@
#include <asm/smp_plat.h>
#include <asm/tlb.h>
#include <asm/highmem.h>
+#include <asm/traps.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -913,12 +914,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
{
struct map_desc map;
unsigned long addr;
- void *vectors;
/*
* Allocate the vector page early.
*/
- vectors = early_alloc(PAGE_SIZE);
+ vectors_page = early_alloc(PAGE_SIZE);
for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
@@ -958,7 +958,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
* location (0xffff0000). If we aren't using high-vectors, also
* create a mapping at the low-vectors virtual address.
*/
- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
+ map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
map.virtual = 0xffff0000;
map.length = PAGE_SIZE;
map.type = MT_HIGH_VECTORS;
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index f5ca6aaecdbd..e32fa499194c 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -61,17 +61,27 @@
.endm
/*
- * cache_line_size - get the cache line size from the CSIDR register
- * (available on ARMv7+). It assumes that the CSSR register was configured
- * to access the L1 data cache CSIDR.
+ * dcache_line_size - get the minimum D-cache line size from the CTR register
+ * on ARMv7.
*/
.macro dcache_line_size, reg, tmp
- mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR
- and \tmp, \tmp, #7 @ cache line size encoding
- mov \reg, #16 @ size offset
+ mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
+ lsr \tmp, \tmp, #16
+ and \tmp, \tmp, #0xf @ cache line size encoding
+ mov \reg, #4 @ bytes per word
mov \reg, \reg, lsl \tmp @ actual cache line size
.endm
+/*
+ * icache_line_size - get the minimum I-cache line size from the CTR register
+ * on ARMv7.
+ */
+ .macro icache_line_size, reg, tmp
+ mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
+ and \tmp, \tmp, #0xf @ cache line size encoding
+ mov \reg, #4 @ bytes per word
+ mov \reg, \reg, lsl \tmp @ actual cache line size
+ .endm
/*
* Sanity check the PTE configuration for the code below - which makes
@@ -99,6 +109,10 @@
* 110x 0 1 0 r/w r/o
* 11x0 0 1 0 r/w r/o
* 1111 0 1 1 r/w r/w
+ *
+ * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
+ * 110x 1 1 1 r/o r/o
+ * 11x0 1 1 1 r/o r/o
*/
.macro armv6_mt_table pfx
\pfx\()_mt_table:
@@ -138,8 +152,11 @@
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
+#ifdef CONFIG_CPU_USE_DOMAINS
+ @ allow kernel read/write access to read-only user pages
tstne r3, #PTE_EXT_APX
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+#endif
tst r1, #L_PTE_XN
orrne r3, r3, #PTE_EXT_XN
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 210d051c54d7..b49fab21517c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -146,8 +146,11 @@ ENTRY(cpu_v7_set_pte_ext)
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
+#ifdef CONFIG_CPU_USE_DOMAINS
+ @ allow kernel read/write access to read-only user pages
tstne r3, #PTE_EXT_APX
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
+#endif
tst r1, #L_PTE_XN
orrne r3, r3, #PTE_EXT_XN
@@ -271,8 +274,6 @@ __v7_setup:
ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
- mov r10, #0x1f @ domains 0, 1 = manager
- mcr p15, 0, r10, c3, c0, 0 @ load domain access register
/*
* Memory region attributes with SCTLR.TRE=1
*
@@ -311,6 +312,10 @@ __v7_setup:
#ifdef CONFIG_CPU_ENDIAN_BE8
orr r6, r6, #1 << 25 @ big-endian page tables
#endif
+#ifdef CONFIG_SWP_EMULATE
+ orr r5, r5, #(1 << 10) @ set SW bit in "clear"
+ bic r6, r6, #(1 << 10) @ clear it in "mmuset"
+#endif
mrc p15, 0, r0, c1, c0, 0 @ read control register
bic r0, r0, r5 @ clear bits them
orr r0, r0, r6 @ set them
@@ -379,7 +384,7 @@ __v7_ca9mp_proc_info:
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __v7_ca9mp_setup
+ W(b) __v7_ca9mp_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
@@ -411,7 +416,7 @@ __v7_proc_info:
PMD_SECT_XN | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
- b __v7_setup
+ W(b) __v7_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
OpenPOWER on IntegriCloud