summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorThomas Garnier <thgarnie@google.com>2016-06-21 17:47:00 -0700
committerIngo Molnar <mingo@kernel.org>2016-07-08 17:33:46 +0200
commitfaa379332f3cb3375db1849e27386f8bc9b97da4 (patch)
tree0fa6986ca49db4f1c4528fc3ebed248e9fe14c57 /arch
parent59b3d0206d74a700069e49160e8194b2ca93b703 (diff)
downloadblackbird-op-linux-faa379332f3cb3375db1849e27386f8bc9b97da4.tar.gz
blackbird-op-linux-faa379332f3cb3375db1849e27386f8bc9b97da4.zip
x86/mm: Add PUD VA support for physical mapping
Minor change that allows early boot physical mapping of PUD level virtual addresses. The current implementation expects the virtual address to be PUD aligned. For KASLR memory randomization, we need to be able to randomize the offset used on the PUD table. It has no impact on current usage. Signed-off-by: Thomas Garnier <thgarnie@google.com> Signed-off-by: Kees Cook <keescook@chromium.org> Cc: Alexander Kuleshov <kuleshovmail@gmail.com> Cc: Alexander Popov <alpopov@ptsecurity.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: Baoquan He <bhe@redhat.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Borislav Petkov <bp@suse.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Dave Young <dyoung@redhat.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Jan Beulich <JBeulich@suse.com> Cc: Joerg Roedel <jroedel@suse.de> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Juergen Gross <jgross@suse.com> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Lv Zheng <lv.zheng@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Matt Fleming <matt@codeblueprint.co.uk> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephen Smalley <sds@tycho.nsa.gov> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Toshi Kani <toshi.kani@hpe.com> Cc: Xiao Guangrong <guangrong.xiao@linux.intel.com> Cc: Yinghai Lu <yinghai@kernel.org> Cc: kernel-hardening@lists.openwall.com Cc: linux-doc@vger.kernel.org Link: http://lkml.kernel.org/r/1466556426-32664-4-git-send-email-keescook@chromium.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/init_64.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 6714712bd5da..7bf1ddb54537 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -465,7 +465,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long paddr, unsigned long paddr_end,
/*
* Create PUD level page table mapping for physical addresses. The virtual
- * and physical address have to be aligned at this level.
+ * and physical address do not have to be aligned at this level. KASLR can
+ * randomize virtual addresses up to this level.
* It returns the last physical address mapped.
*/
static unsigned long __meminit
@@ -474,14 +475,18 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
{
unsigned long pages = 0, paddr_next;
unsigned long paddr_last = paddr_end;
- int i = pud_index(paddr);
+ unsigned long vaddr = (unsigned long)__va(paddr);
+ int i = pud_index(vaddr);
for (; i < PTRS_PER_PUD; i++, paddr = paddr_next) {
- pud_t *pud = pud_page + pud_index(paddr);
+ pud_t *pud;
pmd_t *pmd;
pgprot_t prot = PAGE_KERNEL;
+ vaddr = (unsigned long)__va(paddr);
+ pud = pud_page + pud_index(vaddr);
paddr_next = (paddr & PUD_MASK) + PUD_SIZE;
+
if (paddr >= paddr_end) {
if (!after_bootmem &&
!e820_any_mapped(paddr & PUD_MASK, paddr_next,
@@ -551,7 +556,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
/*
* Create page table mapping for the physical memory for specific physical
- * addresses. The virtual and physical addresses have to be aligned on PUD level
+ * addresses. The virtual and physical addresses have to be aligned on PMD level
* down. It returns the last physical address mapped.
*/
unsigned long __meminit
OpenPOWER on IntegriCloud