summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/init_64.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 17:32:28 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-25 17:32:28 -0700
commit77cd3d0c43b7e6c0bb49ca641cf936891f6e1766 (patch)
tree44885bb7ec9def5a34b8e9f2073a166f78bddcf9 /arch/x86/mm/init_64.c
parent0f657262d5f99ad86b9a63fb5dcd29036c2ed916 (diff)
parent6a79296cb15d947bcb4558011fe066e5d8252b35 (diff)
downloadblackbird-op-linux-77cd3d0c43b7e6c0bb49ca641cf936891f6e1766.tar.gz
blackbird-op-linux-77cd3d0c43b7e6c0bb49ca641cf936891f6e1766.zip
Merge branch 'x86-boot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 boot updates from Ingo Molnar: "The main changes: - add initial commits to randomize kernel memory section virtual addresses, enabled via a new kernel option: RANDOMIZE_MEMORY (Thomas Garnier, Kees Cook, Baoquan He, Yinghai Lu) - enhance KASLR (RANDOMIZE_BASE) physical memory randomization (Kees Cook) - EBDA/BIOS region boot quirk cleanups (Andy Lutomirski, Ingo Molnar) - misc cleanups/fixes" * 'x86-boot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/boot: Simplify EBDA-vs-BIOS reservation logic x86/boot: Clarify what x86_legacy_features.reserve_bios_regions does x86/boot: Reorganize and clean up the BIOS area reservation code x86/mm: Do not reference phys addr beyond kernel x86/mm: Add memory hotplug support for KASLR memory randomization x86/mm: Enable KASLR for vmalloc memory regions x86/mm: Enable KASLR for physical mapping memory regions x86/mm: Implement ASLR for kernel memory regions x86/mm: Separate variable for trampoline PGD x86/mm: Add PUD VA support for physical mapping x86/mm: Update physical mapping variable names x86/mm: Refactor KASLR entropy functions x86/KASLR: Fix boot crash with certain memory configurations x86/boot/64: Add forgotten end of function marker x86/KASLR: Allow randomization below the load address x86/KASLR: Extend kernel image physical address randomization to addresses larger than 4G x86/KASLR: Randomize virtual address separately x86/KASLR: Clarify identity map interface x86/boot: Refuse to build with data relocations x86/KASLR, x86/power: Remove x86 hibernation restrictions
Diffstat (limited to 'arch/x86/mm/init_64.c')
-rw-r--r--arch/x86/mm/init_64.c167
1 files changed, 101 insertions, 66 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index e14f87057c3f..53cc2256cf23 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -328,22 +328,30 @@ void __init cleanup_highmap(void)
}
}
+/*
+ * Create PTE level page table mapping for physical addresses.
+ * It returns the last physical address mapped.
+ */
static unsigned long __meminit
-phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
+phys_pte_init(pte_t *pte_page, unsigned long paddr, unsigned long paddr_end,
pgprot_t prot)
{
- unsigned long pages = 0, next;
- unsigned long last_map_addr = end;
+ unsigned long pages = 0, paddr_next;
+ unsigned long paddr_last = paddr_end;
+ pte_t *pte;
int i;
- pte_t *pte = pte_page + pte_index(addr);
+ pte = pte_page + pte_index(paddr);
+ i = pte_index(paddr);
- for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
- next = (addr & PAGE_MASK) + PAGE_SIZE;
- if (addr >= end) {
+ for (; i < PTRS_PER_PTE; i++, paddr = paddr_next, pte++) {
+ paddr_next = (paddr & PAGE_MASK) + PAGE_SIZE;
+ if (paddr >= paddr_end) {
if (!after_bootmem &&
- !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
- !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
+ !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
+ E820_RAM) &&
+ !e820_any_mapped(paddr & PAGE_MASK, paddr_next,
+ E820_RESERVED_KERN))
set_pte(pte, __pte(0));
continue;
}
@@ -361,37 +369,44 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
}
if (0)
- printk(" pte=%p addr=%lx pte=%016lx\n",
- pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
+ pr_info(" pte=%p addr=%lx pte=%016lx\n", pte, paddr,
+ pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL).pte);
pages++;
- set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
- last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
+ set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, prot));
+ paddr_last = (paddr & PAGE_MASK) + PAGE_SIZE;
}
update_page_count(PG_LEVEL_4K, pages);
- return last_map_addr;
+ return paddr_last;
}
+/*
+ * Create PMD level page table mapping for physical addresses. The virtual
+ * and physical address have to be aligned at this level.
+ * It returns the last physical address mapped.
+ */
static unsigned long __meminit
-phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
+phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
unsigned long page_size_mask, pgprot_t prot)
{
- unsigned long pages = 0, next;
- unsigned long last_map_addr = end;
+ unsigned long pages = 0, paddr_next;
+ unsigned long paddr_last = paddr_end;
- int i = pmd_index(address);
+ int i = pmd_index(paddr);
- for (; i < PTRS_PER_PMD; i++, address = next) {
- pmd_t *pmd = pmd_page + pmd_index(address);
+ for (; i < PTRS_PER_PMD; i++, paddr = paddr_next) {
+ pmd_t *pmd = pmd_page + pmd_index(paddr);
pte_t *pte;
pgprot_t new_prot = prot;
- next = (address & PMD_MASK) + PMD_SIZE;
- if (address >= end) {
+ paddr_next = (paddr & PMD_MASK) + PMD_SIZE;
+ if (paddr >= paddr_end) {
if (!after_bootmem &&
- !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
- !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
+ !e820_any_mapped(paddr & PMD_MASK, paddr_next,
+ E820_RAM) &&
+ !e820_any_mapped(paddr & PMD_MASK, paddr_next,
+ E820_RESERVED_KERN))
set_pmd(pmd, __pmd(0));
continue;
}
@@ -400,8 +415,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
if (!pmd_large(*pmd)) {
spin_lock(&init_mm.page_table_lock);
pte = (pte_t *)pmd_page_vaddr(*pmd);
- last_map_addr = phys_pte_init(pte, address,
- end, prot);
+ paddr_last = phys_pte_init(pte, paddr,
+ paddr_end, prot);
spin_unlock(&init_mm.page_table_lock);
continue;
}
@@ -420,7 +435,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
if (page_size_mask & (1 << PG_LEVEL_2M)) {
if (!after_bootmem)
pages++;
- last_map_addr = next;
+ paddr_last = paddr_next;
continue;
}
new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
@@ -430,42 +445,54 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
pages++;
spin_lock(&init_mm.page_table_lock);
set_pte((pte_t *)pmd,
- pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
+ pfn_pte((paddr & PMD_MASK) >> PAGE_SHIFT,
__pgprot(pgprot_val(prot) | _PAGE_PSE)));
spin_unlock(&init_mm.page_table_lock);
- last_map_addr = next;
+ paddr_last = paddr_next;
continue;
}
pte = alloc_low_page();
- last_map_addr = phys_pte_init(pte, address, end, new_prot);
+ paddr_last = phys_pte_init(pte, paddr, paddr_end, new_prot);
spin_lock(&init_mm.page_table_lock);
pmd_populate_kernel(&init_mm, pmd, pte);
spin_unlock(&init_mm.page_table_lock);
}
update_page_count(PG_LEVEL_2M, pages);
- return last_map_addr;
+ return paddr_last;
}
+/*
+ * Create PUD level page table mapping for physical addresses. The virtual
+ * and physical address do not have to be aligned at this level. KASLR can
+ * randomize virtual addresses up to this level.
+ * It returns the last physical address mapped.
+ */
static unsigned long __meminit
-phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
- unsigned long page_size_mask)
+phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
+ unsigned long page_size_mask)
{
- unsigned long pages = 0, next;
- unsigned long last_map_addr = end;
- int i = pud_index(addr);
+ unsigned long pages = 0, paddr_next;
+ unsigned long paddr_last = paddr_end;
+ unsigned long vaddr = (unsigned long)__va(paddr);
+ int i = pud_index(vaddr);
- for (; i < PTRS_PER_PUD; i++, addr = next) {
- pud_t *pud = pud_page + pud_index(addr);
+ for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
+ pud_t *pud;
pmd_t *pmd;
pgprot_t prot = PAGE_KERNEL;
- next = (addr & PUD_MASK) + PUD_SIZE;
- if (addr >= end) {
+ vaddr = (unsigned long)__va(paddr);
+ pud = pud_page + pud_index(vaddr);
+ paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
+
+ if (paddr >= paddr_end) {
if (!after_bootmem &&
- !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
- !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
+ !e820_any_mapped(paddr & PUD_MASK, paddr_next,
+ E820_RAM) &&
+ !e820_any_mapped(paddr & PUD_MASK, paddr_next,
+ E820_RESERVED_KERN))
set_pud(pud, __pud(0));
continue;
}
@@ -473,8 +500,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
if (!pud_none(*pud)) {
if (!pud_large(*pud)) {
pmd = pmd_offset(pud, 0);
- last_map_addr = phys_pmd_init(pmd, addr, end,
- page_size_mask, prot);
+ paddr_last = phys_pmd_init(pmd, paddr,
+ paddr_end,
+ page_size_mask,
+ prot);
__flush_tlb_all();
continue;
}
@@ -493,7 +522,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
if (page_size_mask & (1 << PG_LEVEL_1G)) {
if (!after_bootmem)
pages++;
- last_map_addr = next;
+ paddr_last = paddr_next;
continue;
}
prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
@@ -503,16 +532,16 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
pages++;
spin_lock(&init_mm.page_table_lock);
set_pte((pte_t *)pud,
- pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
+ pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
PAGE_KERNEL_LARGE));
spin_unlock(&init_mm.page_table_lock);
- last_map_addr = next;
+ paddr_last = paddr_next;
continue;
}
pmd = alloc_low_page();
- last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
- prot);
+ paddr_last = phys_pmd_init(pmd, paddr, paddr_end,
+ page_size_mask, prot);
spin_lock(&init_mm.page_table_lock);
pud_populate(&init_mm, pud, pmd);
@@ -522,38 +551,44 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
update_page_count(PG_LEVEL_1G, pages);
- return last_map_addr;
+ return paddr_last;
}
+/*
+ * Create page table mapping for the physical memory for specific physical
+ * addresses. The virtual and physical addresses have to be aligned on PMD level
+ * down. It returns the last physical address mapped.
+ */
unsigned long __meminit
-kernel_physical_mapping_init(unsigned long start,
- unsigned long end,
+kernel_physical_mapping_init(unsigned long paddr_start,
+ unsigned long paddr_end,
unsigned long page_size_mask)
{
bool pgd_changed = false;
- unsigned long next, last_map_addr = end;
- unsigned long addr;
+ unsigned long vaddr, vaddr_start, vaddr_end, vaddr_next, paddr_last;
- start = (unsigned long)__va(start);
- end = (unsigned long)__va(end);
- addr = start;
+ paddr_last = paddr_end;
+ vaddr = (unsigned long)__va(paddr_start);
+ vaddr_end = (unsigned long)__va(paddr_end);
+ vaddr_start = vaddr;
- for (; start < end; start = next) {
- pgd_t *pgd = pgd_offset_k(start);
+ for (; vaddr < vaddr_end; vaddr = vaddr_next) {
+ pgd_t *pgd = pgd_offset_k(vaddr);
pud_t *pud;
- next = (start & PGDIR_MASK) + PGDIR_SIZE;
+ vaddr_next = (vaddr & PGDIR_MASK) + PGDIR_SIZE;
if (pgd_val(*pgd)) {
pud = (pud_t *)pgd_page_vaddr(*pgd);
- last_map_addr = phys_pud_init(pud, __pa(start),
- __pa(end), page_size_mask);
+ paddr_last = phys_pud_init(pud, __pa(vaddr),
+ __pa(vaddr_end),
+ page_size_mask);
continue;
}
pud = alloc_low_page();
- last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
- page_size_mask);
+ paddr_last = phys_pud_init(pud, __pa(vaddr), __pa(vaddr_end),
+ page_size_mask);
spin_lock(&init_mm.page_table_lock);
pgd_populate(&init_mm, pgd, pud);
@@ -562,11 +597,11 @@ kernel_physical_mapping_init(unsigned long start,
}
if (pgd_changed)
- sync_global_pgds(addr, end - 1, 0);
+ sync_global_pgds(vaddr_start, vaddr_end - 1, 0);
__flush_tlb_all();
- return last_map_addr;
+ return paddr_last;
}
#ifndef CONFIG_NUMA
OpenPOWER on IntegriCloud