From a849088aa1552b1a28eea3daff599ee22a734ae3 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 25 Aug 2012 09:03:15 +0100 Subject: ARM: Fix ioremap() of address zero Murali Nalajala reports a regression that ioremapping address zero results in an oops dump: Unable to handle kernel paging request at virtual address fa200000 pgd = d4f80000 [fa200000] *pgd=00000000 Internal error: Oops: 5 [#1] PREEMPT SMP ARM Modules linked in: CPU: 0 Tainted: G W (3.4.0-g3b5f728-00009-g638207a #13) PC is at msm_pm_config_rst_vector_before_pc+0x8/0x30 LR is at msm_pm_boot_config_before_pc+0x18/0x20 pc : [] lr : [] psr: a0000093 sp : c0837ef0 ip : cfe00000 fp : 0000000d r10: da7efc17 r9 : 225c4278 r8 : 00000006 r7 : 0003c000 r6 : c085c824 r5 : 00000001 r4 : fa101000 r3 : fa200000 r2 : c095080c r1 : 002250fc r0 : 00000000 Flags: NzCv IRQs off FIQs on Mode SVC_32 ISA ARM Segment kernel Control: 10c5387d Table: 25180059 DAC: 00000015 [] (msm_pm_config_rst_vector_before_pc+0x8/0x30) from [] (msm_pm_boot_config_before_pc+0x18/0x20) [] (msm_pm_boot_config_before_pc+0x18/0x20) from [] (msm_pm_power_collapse+0x410/0xb04) [] (msm_pm_power_collapse+0x410/0xb04) from [] (arch_idle+0x294/0x3e0) [] (arch_idle+0x294/0x3e0) from [] (default_idle+0x18/0x2c) [] (default_idle+0x18/0x2c) from [] (cpu_idle+0x90/0xe4) [] (cpu_idle+0x90/0xe4) from [] (rest_init+0x88/0xa0) [] (rest_init+0x88/0xa0) from [] (start_kernel+0x3a8/0x40c) Code: c0704256 e12fff1e e59f2020 e5923000 (e5930000) This is caused by the 'reserved' entries which we insert (see 19b52abe3c5d7 - ARM: 7438/1: fill possible PMD empty section gaps) which get matched for physical address zero. Resolve this by marking these reserved entries with a different flag. Cc: Tested-by: Murali Nalajala Acked-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm/mmu.c') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4c2d0451e84a..eab94bc6f805 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr) vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); vm->addr = (void *)addr; vm->size = SECTION_SIZE; - vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; + vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; vm->caller = pmd_empty_section_gap; vm_area_add_early(vm); } @@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void) /* we're still single threaded hence no lock needed here */ for (vm = vmlist; vm; vm = vm->next) { - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) + if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) continue; addr = (unsigned long)vm->addr; if (addr < next) -- cgit v1.2.1