summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2014-07-29 09:27:13 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2014-08-02 08:51:55 +0100
commit3bb70de692f70861f5c5729cd2b870d0104a7cc9 (patch)
treee805359f9252e9c51373e98c876e539f195d7b95 /arch/arm/mm/mmu.c
parentc716483c3db10b31bf9bf43c5f45f2c3117ca13a (diff)
downloadblackbird-op-linux-3bb70de692f70861f5c5729cd2b870d0104a7cc9.tar.gz
blackbird-op-linux-3bb70de692f70861f5c5729cd2b870d0104a7cc9.zip
ARM: add comments to the early page table remap code
Add further comments to the early page table remap code to explain what the code is doing, why it is doing it, but more importantly to explain that the code is not architecturally compliant and is squarely in "UNPREDICTABLE" behaviour territory. Add a warning and tainting of the kernel too. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c51
1 files changed, 46 insertions, 5 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ab14b79b03f0..8348ed6b2efe 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1406,8 +1406,8 @@ void __init early_paging_init(const struct machine_desc *mdesc,
return;
/* remap kernel code and data */
- map_start = init_mm.start_code;
- map_end = init_mm.brk;
+ map_start = init_mm.start_code & PMD_MASK;
+ map_end = ALIGN(init_mm.brk, PMD_SIZE);
/* get a handle on things... */
pgd0 = pgd_offset_k(0);
@@ -1434,23 +1434,64 @@ void __init early_paging_init(const struct machine_desc *mdesc,
dsb(ishst);
isb();
- /* remap level 1 table */
+ /*
+ * FIXME: This code is not architecturally compliant: we modify
+ * the mappings in-place, indeed while they are in use by this
+ * very same code. This may lead to unpredictable behaviour of
+ * the CPU.
+ *
+ * Even modifying the mappings in a separate page table does
+ * not resolve this.
+ *
+ * The architecture strongly recommends that when a mapping is
+ * changed, that it is changed by first going via an invalid
+ * mapping and back to the new mapping. This is to ensure that
+ * no TLB conflicts (caused by the TLB having more than one TLB
+ * entry match a translation) can occur. However, doing that
+ * here will result in unmapping the code we are running.
+ */
+ pr_warn("WARNING: unsafe modification of in-place page tables - tainting kernel\n");
+ add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
+
+ /*
+ * Remap level 1 table. This changes the physical addresses
+ * used to refer to the level 2 page tables to the high
+ * physical address alias, leaving everything else the same.
+ */
for (i = 0; i < PTRS_PER_PGD; pud0++, i++) {
set_pud(pud0,
__pud(__pa(pmd0) | PMD_TYPE_TABLE | L_PGD_SWAPPER));
pmd0 += PTRS_PER_PMD;
}
- /* remap pmds for kernel mapping */
- phys = __pa(map_start) & PMD_MASK;
+ /*
+ * Remap the level 2 table, pointing the mappings at the high
+ * physical address alias of these pages.
+ */
+ phys = __pa(map_start);
do {
*pmdk++ = __pmd(phys | pmdprot);
phys += PMD_SIZE;
} while (phys < map_end);
+ /*
+ * Ensure that the above updates are flushed out of the cache.
+ * This is not strictly correct; on a system where the caches
+ * are coherent with each other, but the MMU page table walks
+ * may not be coherent, flush_cache_all() may be a no-op, and
+ * this will fail.
+ */
flush_cache_all();
+
+ /*
+ * Re-write the TTBR values to point them at the high physical
+ * alias of the page tables. We expect __va() will work on
+ * cpu_get_pgd(), which returns the value of TTBR0.
+ */
cpu_switch_mm(pgd0, &init_mm);
cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET);
+
+ /* Finally flush any stale TLB values. */
local_flush_bp_all();
local_flush_tlb_all();
}
OpenPOWER on IntegriCloud