summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig8
-rw-r--r--arch/arm/mm/alignment.c6
-rw-r--r--arch/arm/mm/cache-l2x0.c72
-rw-r--r--arch/arm/mm/context.c124
-rw-r--r--arch/arm/mm/dma-mapping.c3
-rw-r--r--arch/arm/mm/init.c113
-rw-r--r--arch/arm/mm/ioremap.c57
-rw-r--r--arch/arm/mm/mmu.c41
-rw-r--r--arch/arm/mm/nommu.c12
9 files changed, 343 insertions, 93 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index baf638487a2d..c4ed9f93f646 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -399,7 +399,7 @@ config CPU_V6
config CPU_32v6K
bool "Support ARM V6K processor extensions" if !SMP
depends on CPU_V6
- default y if SMP && !ARCH_MX3
+ default y if SMP && !(ARCH_MX3 || ARCH_OMAP2)
help
Say Y here if your ARMv6 processor supports the 'K' extension.
This enables the kernel to use some instructions not present
@@ -410,7 +410,7 @@ config CPU_32v6K
# ARMv7
config CPU_V7
bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
- select CPU_32v6K
+ select CPU_32v6K if !ARCH_OMAP2
select CPU_32v7
select CPU_ABRT_EV7
select CPU_PABRT_V7
@@ -754,7 +754,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
- REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK
+ REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
default y
select OUTER_CACHE
help
@@ -779,5 +779,5 @@ config CACHE_XSC3L2
config ARM_L1_CACHE_SHIFT
int
- default 6 if ARCH_OMAP3 || ARCH_S5PC1XX
+ default 6 if ARM_L1_CACHE_SHIFT_6
default 5
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index b270d6228fe2..0c5eb6983cef 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -898,11 +898,7 @@ static int __init alignment_init(void)
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *res;
- res = proc_mkdir("cpu", NULL);
- if (!res)
- return -ENOMEM;
-
- res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, res);
+ res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL);
if (!res)
return -ENOMEM;
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index cb8fc6573b1b..07334632d3e2 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -42,6 +42,57 @@ static inline void cache_sync(void)
cache_wait(base + L2X0_CACHE_SYNC, 1);
}
+static inline void l2x0_clean_line(unsigned long addr)
+{
+ void __iomem *base = l2x0_base;
+ cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
+ writel(addr, base + L2X0_CLEAN_LINE_PA);
+}
+
+static inline void l2x0_inv_line(unsigned long addr)
+{
+ void __iomem *base = l2x0_base;
+ cache_wait(base + L2X0_INV_LINE_PA, 1);
+ writel(addr, base + L2X0_INV_LINE_PA);
+}
+
+#ifdef CONFIG_PL310_ERRATA_588369
+static void debug_writel(unsigned long val)
+{
+ extern void omap_smc1(u32 fn, u32 arg);
+
+ /*
+ * Texas Instrument secure monitor api to modify the
+ * PL310 Debug Control Register.
+ */
+ omap_smc1(0x100, val);
+}
+
+static inline void l2x0_flush_line(unsigned long addr)
+{
+ void __iomem *base = l2x0_base;
+
+ /* Clean by PA followed by Invalidate by PA */
+ cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
+ writel(addr, base + L2X0_CLEAN_LINE_PA);
+ cache_wait(base + L2X0_INV_LINE_PA, 1);
+ writel(addr, base + L2X0_INV_LINE_PA);
+}
+#else
+
+/* Optimised out for non-errata case */
+static inline void debug_writel(unsigned long val)
+{
+}
+
+static inline void l2x0_flush_line(unsigned long addr)
+{
+ void __iomem *base = l2x0_base;
+ cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
+ writel(addr, base + L2X0_CLEAN_INV_LINE_PA);
+}
+#endif
+
static inline void l2x0_inv_all(void)
{
unsigned long flags;
@@ -62,23 +113,24 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
spin_lock_irqsave(&l2x0_lock, flags);
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
- cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
- writel(start, base + L2X0_CLEAN_INV_LINE_PA);
+ debug_writel(0x03);
+ l2x0_flush_line(start);
+ debug_writel(0x00);
start += CACHE_LINE_SIZE;
}
if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
- cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
- writel(end, base + L2X0_CLEAN_INV_LINE_PA);
+ debug_writel(0x03);
+ l2x0_flush_line(end);
+ debug_writel(0x00);
}
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
while (start < blk_end) {
- cache_wait(base + L2X0_INV_LINE_PA, 1);
- writel(start, base + L2X0_INV_LINE_PA);
+ l2x0_inv_line(start);
start += CACHE_LINE_SIZE;
}
@@ -103,8 +155,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
unsigned long blk_end = start + min(end - start, 4096UL);
while (start < blk_end) {
- cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
- writel(start, base + L2X0_CLEAN_LINE_PA);
+ l2x0_clean_line(start);
start += CACHE_LINE_SIZE;
}
@@ -128,11 +179,12 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
+ debug_writel(0x03);
while (start < blk_end) {
- cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
- writel(start, base + L2X0_CLEAN_INV_LINE_PA);
+ l2x0_flush_line(start);
start += CACHE_LINE_SIZE;
}
+ debug_writel(0x00);
if (blk_end < end) {
spin_unlock_irqrestore(&l2x0_lock, flags);
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index a9e22e31eaa1..b0ee9ba3cfab 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -10,12 +10,17 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/percpu.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
static DEFINE_SPINLOCK(cpu_asid_lock);
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
+#ifdef CONFIG_SMP
+DEFINE_PER_CPU(struct mm_struct *, current_mm);
+#endif
/*
* We fork()ed a process, and we need a new context for the child
@@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION;
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context.id = 0;
+ spin_lock_init(&mm->context.id_lock);
}
+static void flush_context(void)
+{
+ /* set the reserved ASID before flushing the TLB */
+ asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
+ isb();
+ local_flush_tlb_all();
+ if (icache_is_vivt_asid_tagged()) {
+ __flush_icache_all();
+ dsb();
+ }
+}
+
+#ifdef CONFIG_SMP
+
+static void set_mm_context(struct mm_struct *mm, unsigned int asid)
+{
+ unsigned long flags;
+
+ /*
+ * Locking needed for multi-threaded applications where the
+ * same mm->context.id could be set from different CPUs during
+ * the broadcast. This function is also called via IPI so the
+ * mm->context.id_lock has to be IRQ-safe.
+ */
+ spin_lock_irqsave(&mm->context.id_lock, flags);
+ if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
+ /*
+ * Old version of ASID found. Set the new one and
+ * reset mm_cpumask(mm).
+ */
+ mm->context.id = asid;
+ cpumask_clear(mm_cpumask(mm));
+ }
+ spin_unlock_irqrestore(&mm->context.id_lock, flags);
+
+ /*
+ * Set the mm_cpumask(mm) bit for the current CPU.
+ */
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+}
+
+/*
+ * Reset the ASID on the current CPU. This function call is broadcast
+ * from the CPU handling the ASID rollover and holding cpu_asid_lock.
+ */
+static void reset_context(void *info)
+{
+ unsigned int asid;
+ unsigned int cpu = smp_processor_id();
+ struct mm_struct *mm = per_cpu(current_mm, cpu);
+
+ /*
+ * Check if a current_mm was set on this CPU as it might still
+ * be in the early booting stages and using the reserved ASID.
+ */
+ if (!mm)
+ return;
+
+ smp_rmb();
+ asid = cpu_last_asid + cpu + 1;
+
+ flush_context();
+ set_mm_context(mm, asid);
+
+ /* set the new ASID */
+ asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
+ isb();
+}
+
+#else
+
+static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
+{
+ mm->context.id = asid;
+ cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
+}
+
+#endif
+
void __new_context(struct mm_struct *mm)
{
unsigned int asid;
spin_lock(&cpu_asid_lock);
+#ifdef CONFIG_SMP
+ /*
+ * Check the ASID again, in case the change was broadcast from
+ * another CPU before we acquired the lock.
+ */
+ if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
+ cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+ spin_unlock(&cpu_asid_lock);
+ return;
+ }
+#endif
+ /*
+ * At this point, it is guaranteed that the current mm (with
+ * an old ASID) isn't active on any other CPU since the ASIDs
+ * are changed simultaneously via IPI.
+ */
asid = ++cpu_last_asid;
if (asid == 0)
asid = cpu_last_asid = ASID_FIRST_VERSION;
@@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm)
* to start a new version and flush the TLB.
*/
if (unlikely((asid & ~ASID_MASK) == 0)) {
- asid = ++cpu_last_asid;
- /* set the reserved ASID before flushing the TLB */
- asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n"
- :
- : "r" (0));
- isb();
- flush_tlb_all();
- if (icache_is_vivt_asid_tagged()) {
- __flush_icache_all();
- dsb();
- }
+ asid = cpu_last_asid + smp_processor_id() + 1;
+ flush_context();
+#ifdef CONFIG_SMP
+ smp_wmb();
+ smp_call_function(reset_context, NULL, 1);
+#endif
+ cpu_last_asid += NR_CPUS;
}
- spin_unlock(&cpu_asid_lock);
- cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
- mm->context.id = asid;
+ set_mm_context(mm, asid);
+ spin_unlock(&cpu_asid_lock);
}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 64daef2173bd..0da7eccf7749 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -29,9 +29,6 @@
#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
#endif
-#define CONSISTENT_END (0xffe00000)
-#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
-
#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index a04ffbbbe253..7829cb5425f5 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -23,6 +23,7 @@
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
+#include <asm/fixmap.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -32,19 +33,21 @@
static unsigned long phys_initrd_start __initdata = 0;
static unsigned long phys_initrd_size __initdata = 0;
-static void __init early_initrd(char **p)
+static int __init early_initrd(char *p)
{
unsigned long start, size;
+ char *endp;
- start = memparse(*p, p);
- if (**p == ',') {
- size = memparse((*p) + 1, p);
+ start = memparse(p, &endp);
+ if (*endp == ',') {
+ size = memparse(endp + 1, NULL);
phys_initrd_start = start;
phys_initrd_size = size;
}
+ return 0;
}
-__early_param("initrd=", early_initrd);
+early_param("initrd", early_initrd);
static int __init parse_tag_initrd(const struct tag *tag)
{
@@ -560,7 +563,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
*/
void __init mem_init(void)
{
- unsigned int codesize, datasize, initsize;
+ unsigned long reserved_pages, free_pages;
int i, node;
#ifndef CONFIG_DISCONTIGMEM
@@ -596,6 +599,33 @@ void __init mem_init(void)
totalram_pages += totalhigh_pages;
#endif
+ reserved_pages = free_pages = 0;
+
+ for_each_online_node(node) {
+ pg_data_t *n = NODE_DATA(node);
+ struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn;
+
+ for_each_nodebank(i, &meminfo, node) {
+ struct membank *bank = &meminfo.bank[i];
+ unsigned int pfn1, pfn2;
+ struct page *page, *end;
+
+ pfn1 = bank_pfn_start(bank);
+ pfn2 = bank_pfn_end(bank);
+
+ page = map + pfn1;
+ end = map + pfn2;
+
+ do {
+ if (PageReserved(page))
+ reserved_pages++;
+ else if (!page_count(page))
+ free_pages++;
+ page++;
+ } while (page < end);
+ }
+ }
+
/*
* Since our memory may not be contiguous, calculate the
* real number of pages we have in this system
@@ -608,16 +638,71 @@ void __init mem_init(void)
}
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
- codesize = _etext - _text;
- datasize = _end - _data;
- initsize = __init_end - __init_begin;
-
- printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
- "%dK data, %dK init, %luK highmem)\n",
- nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10,
- datasize >> 10, initsize >> 10,
+ printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
+ nr_free_pages() << (PAGE_SHIFT-10),
+ free_pages << (PAGE_SHIFT-10),
+ reserved_pages << (PAGE_SHIFT-10),
totalhigh_pages << (PAGE_SHIFT-10));
+#define MLK(b, t) b, t, ((t) - (b)) >> 10
+#define MLM(b, t) b, t, ((t) - (b)) >> 20
+#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
+
+ printk(KERN_NOTICE "Virtual kernel memory layout:\n"
+ " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
+ " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
+#ifdef CONFIG_MMU
+ " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#ifdef CONFIG_HIGHMEM
+ " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
+ " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4d kB)\n",
+
+ MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
+ (PAGE_SIZE)),
+ MLK(FIXADDR_START, FIXADDR_TOP),
+#ifdef CONFIG_MMU
+ MLM(CONSISTENT_BASE, CONSISTENT_END),
+#endif
+ MLM(VMALLOC_START, VMALLOC_END),
+ MLM(PAGE_OFFSET, (unsigned long)high_memory),
+#ifdef CONFIG_HIGHMEM
+ MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
+ (PAGE_SIZE)),
+#endif
+ MLM(MODULES_VADDR, MODULES_END),
+
+ MLK_ROUNDUP(__init_begin, __init_end),
+ MLK_ROUNDUP(_text, _etext),
+ MLK_ROUNDUP(_data, _edata));
+
+#undef MLK
+#undef MLM
+#undef MLK_ROUNDUP
+
+ /*
+ * Check boundaries twice: Some fundamental inconsistencies can
+ * be detected at build time already.
+ */
+#ifdef CONFIG_MMU
+ BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE);
+ BUG_ON(VMALLOC_END > CONSISTENT_BASE);
+
+ BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
+ BUG_ON(TASK_SIZE > MODULES_VADDR);
+#endif
+
+#ifdef CONFIG_HIGHMEM
+ BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
+ BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
+#endif
+
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
extern int sysctl_overcommit_memory;
/*
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0ab75c60f7cf..28c8b950ef04 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm)
* which requires the new ioremap'd region to be referenced, the CPU will
* reference the _old_ region.
*
- * Note that get_vm_area() allocates a guard 4K page, so we need to mask
- * the size back to 1MB aligned or we will overflow in the loop below.
+ * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
+ * mask the size back to 1MB aligned or we will overflow in the loop below.
*/
static void unmap_area_sections(unsigned long virt, unsigned long size)
{
@@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
}
#endif
-
-/*
- * Remap an arbitrary physical address space into the kernel virtual
- * address space. Needed when the kernel wants to access high addresses
- * directly.
- *
- * NOTE! We need to allow non-page-aligned mappings too: we will obviously
- * have to convert them into an offset in a page-aligned mapping, but the
- * caller shouldn't need to know that small detail.
- *
- * 'flags' are the extra L_PTE_ flags that you want to specify for this
- * mapping. See <asm/pgtable.h> for more information.
- */
-void __iomem *
-__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
- unsigned int mtype)
+void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
+ unsigned long offset, size_t size, unsigned int mtype, void *caller)
{
const struct mem_type *type;
int err;
@@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
*/
size = PAGE_ALIGN(offset + size);
- area = get_vm_area(size, VM_IOREMAP);
+ area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
addr = (unsigned long)area->addr;
@@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
flush_cache_vmap(addr, addr + size);
return (void __iomem *) (offset + addr);
}
-EXPORT_SYMBOL(__arm_ioremap_pfn);
-void __iomem *
-__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
{
unsigned long last_addr;
unsigned long offset = phys_addr & ~PAGE_MASK;
@@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
if (!size || last_addr < phys_addr)
return NULL;
- return __arm_ioremap_pfn(pfn, offset, size, mtype);
+ return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
+ caller);
+}
+
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space. Needed when the kernel wants to access high addresses
+ * directly.
+ *
+ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
+ * have to convert them into an offset in a page-aligned mapping, but the
+ * caller shouldn't need to know that small detail.
+ */
+void __iomem *
+__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
+ unsigned int mtype)
+{
+ return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__arm_ioremap_pfn);
+
+void __iomem *
+__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
+{
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
}
EXPORT_SYMBOL(__arm_ioremap);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 1708da82da96..88f5d71248d9 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -100,18 +100,17 @@ static struct cachepolicy cache_policies[] __initdata = {
* writebuffer to be turned off. (Note: the write
* buffer should not be on and the cache off).
*/
-static void __init early_cachepolicy(char **p)
+static int __init early_cachepolicy(char *p)
{
int i;
for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
int len = strlen(cache_policies[i].policy);
- if (memcmp(*p, cache_policies[i].policy, len) == 0) {
+ if (memcmp(p, cache_policies[i].policy, len) == 0) {
cachepolicy = i;
cr_alignment &= ~cache_policies[i].cr_mask;
cr_no_alignment &= ~cache_policies[i].cr_mask;
- *p += len;
break;
}
}
@@ -130,36 +129,37 @@ static void __init early_cachepolicy(char **p)
}
flush_cache_all();
set_cr(cr_alignment);
+ return 0;
}
-__early_param("cachepolicy=", early_cachepolicy);
+early_param("cachepolicy", early_cachepolicy);
-static void __init early_nocache(char **__unused)
+static int __init early_nocache(char *__unused)
{
char *p = "buffered";
printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
- early_cachepolicy(&p);
+ early_cachepolicy(p);
+ return 0;
}
-__early_param("nocache", early_nocache);
+early_param("nocache", early_nocache);
-static void __init early_nowrite(char **__unused)
+static int __init early_nowrite(char *__unused)
{
char *p = "uncached";
printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
- early_cachepolicy(&p);
+ early_cachepolicy(p);
+ return 0;
}
-__early_param("nowb", early_nowrite);
+early_param("nowb", early_nowrite);
-static void __init early_ecc(char **p)
+static int __init early_ecc(char *p)
{
- if (memcmp(*p, "on", 2) == 0) {
+ if (memcmp(p, "on", 2) == 0)
ecc_mask = PMD_PROTECTION;
- *p += 2;
- } else if (memcmp(*p, "off", 3) == 0) {
+ else if (memcmp(p, "off", 3) == 0)
ecc_mask = 0;
- *p += 3;
- }
+ return 0;
}
-__early_param("ecc=", early_ecc);
+early_param("ecc", early_ecc);
static int __init noalign_setup(char *__unused)
{
@@ -670,9 +670,9 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M;
* bytes. This can be used to increase (or decrease) the vmalloc
* area - the default is 128m.
*/
-static void __init early_vmalloc(char **arg)
+static int __init early_vmalloc(char *arg)
{
- vmalloc_reserve = memparse(*arg, arg);
+ vmalloc_reserve = memparse(arg, NULL);
if (vmalloc_reserve < SZ_16M) {
vmalloc_reserve = SZ_16M;
@@ -687,8 +687,9 @@ static void __init early_vmalloc(char **arg)
"vmalloc area is too big, limiting to %luMB\n",
vmalloc_reserve >> 20);
}
+ return 0;
}
-__early_param("vmalloc=", early_vmalloc);
+early_param("vmalloc", early_vmalloc);
#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 374a8311bc84..9bfeb6b9509a 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -74,6 +74,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
}
EXPORT_SYMBOL(__arm_ioremap_pfn);
+void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
+ size_t size, unsigned int mtype, void *caller)
+{
+ return __arm_ioremap_pfn(pfn, offset, size, mtype);
+}
+
void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
unsigned int mtype)
{
@@ -81,6 +87,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
}
EXPORT_SYMBOL(__arm_ioremap);
+void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size,
+ unsigned int mtype, void *caller)
+{
+ return __arm_ioremap(phys_addr, size, mtype);
+}
+
void __iounmap(volatile void __iomem *addr)
{
}
OpenPOWER on IntegriCloud