summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/fsl_booke_mmu.c
diff options
context:
space:
mode:
authorKevin Hao <haokexin@gmail.com>2013-12-24 15:12:10 +0800
committerScott Wood <scottwood@freescale.com>2014-01-09 17:52:17 -0600
commit7d2471f9fa85089beb1cb9436ffc28f9e11e518d (patch)
tree1d3c786feb97e2ef82a417a9a95dcecf82bcd5da /arch/powerpc/mm/fsl_booke_mmu.c
parent813125d83372e19edecaba811d4d0dc115d36819 (diff)
downloadtalos-obmc-linux-7d2471f9fa85089beb1cb9436ffc28f9e11e518d.tar.gz
talos-obmc-linux-7d2471f9fa85089beb1cb9436ffc28f9e11e518d.zip
powerpc/fsl_booke: make sure PAGE_OFFSET map to memstart_addr for relocatable kernel
This is always true for a non-relocatable kernel. Otherwise the kernel would get stuck. But for a relocatable kernel, it seems a little complicated. When booting a relocatable kernel, we just align the kernel start addr to 64M and map the PAGE_OFFSET from there. The relocation will base on this virtual address. But if this address is not the same as the memstart_addr, we will have to change the map of PAGE_OFFSET to the real memstart_addr and do another relocation again. Signed-off-by: Kevin Hao <haokexin@gmail.com> [scottwood@freescale.com: make offset long and non-negative in simple case] Signed-off-by: Scott Wood <scottwood@freescale.com>
Diffstat (limited to 'arch/powerpc/mm/fsl_booke_mmu.c')
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c42
1 files changed, 36 insertions, 6 deletions
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index ca956c83e3a2..95deb9fdf92f 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -231,7 +231,7 @@ void __init adjust_total_lowmem(void)
i = switch_to_as1();
__max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM);
- restore_to_as0(i);
+ restore_to_as0(i, 0, 0);
pr_info("Memory CAM mapping: ");
for (i = 0; i < tlbcam_index - 1; i++)
@@ -252,17 +252,25 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
}
#ifdef CONFIG_RELOCATABLE
-notrace void __init relocate_init(phys_addr_t start)
+int __initdata is_second_reloc;
+notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
{
unsigned long base = KERNELBASE;
+ kernstart_addr = start;
+ if (is_second_reloc) {
+ virt_phys_offset = PAGE_OFFSET - memstart_addr;
+ return;
+ }
+
/*
* Relocatable kernel support based on processing of dynamic
- * relocation entries.
- * Compute the virt_phys_offset :
+ * relocation entries. Before we get the real memstart_addr,
+ * We will compute the virt_phys_offset like this:
* virt_phys_offset = stext.run - kernstart_addr
*
- * stext.run = (KERNELBASE & ~0x3ffffff) + (kernstart_addr & 0x3ffffff)
+ * stext.run = (KERNELBASE & ~0x3ffffff) +
+ * (kernstart_addr & 0x3ffffff)
* When we relocate, we have :
*
* (kernstart_addr & 0x3ffffff) = (stext.run & 0x3ffffff)
@@ -272,10 +280,32 @@ notrace void __init relocate_init(phys_addr_t start)
* (kernstart_addr & ~0x3ffffff)
*
*/
- kernstart_addr = start;
start &= ~0x3ffffff;
base &= ~0x3ffffff;
virt_phys_offset = base - start;
+ early_get_first_memblock_info(__va(dt_ptr), NULL);
+ /*
+ * We now get the memstart_addr, then we should check if this
+ * address is the same as what the PAGE_OFFSET map to now. If
+ * not we have to change the map of PAGE_OFFSET to memstart_addr
+ * and do a second relocation.
+ */
+ if (start != memstart_addr) {
+ int n;
+ long offset = start - memstart_addr;
+
+ is_second_reloc = 1;
+ n = switch_to_as1();
+ /* map a 64M area for the second relocation */
+ if (memstart_addr > start)
+ map_mem_in_cams(0x4000000, CONFIG_LOWMEM_CAM_NUM);
+ else
+ map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
+ 0x4000000, CONFIG_LOWMEM_CAM_NUM);
+ restore_to_as0(n, offset, __va(dt_ptr));
+ /* We should never reach here */
+ panic("Relocation error");
+ }
}
#endif
#endif
OpenPOWER on IntegriCloud