summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/amdtopology.c1
-rw-r--r--arch/x86/mm/dump_pagetables.c6
-rw-r--r--arch/x86/mm/highmem_32.c2
-rw-r--r--arch/x86/mm/ident_map.c19
-rw-r--r--arch/x86/mm/init.c25
-rw-r--r--arch/x86/mm/init_32.c1
-rw-r--r--arch/x86/mm/init_64.c1
-rw-r--r--arch/x86/mm/iomap_32.c2
-rw-r--r--arch/x86/mm/ioremap.c1
-rw-r--r--arch/x86/mm/kaslr.c4
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c1
-rw-r--r--arch/x86/mm/kmemcheck/shadow.c2
-rw-r--r--arch/x86/mm/kmmio.c2
-rw-r--r--arch/x86/mm/mmio-mod.c2
-rw-r--r--arch/x86/mm/numa.c1
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/pageattr.c21
-rw-r--r--arch/x86/mm/pat.c18
-rw-r--r--arch/x86/mm/pat_rbtree.c1
-rw-r--r--arch/x86/mm/pf_in.c1
-rw-r--r--arch/x86/mm/pgtable_32.c1
-rw-r--r--arch/x86/mm/physaddr.c2
-rw-r--r--arch/x86/mm/srat.c2
-rw-r--r--arch/x86/mm/tlb.c2
24 files changed, 58 insertions, 62 deletions
diff --git a/arch/x86/mm/amdtopology.c b/arch/x86/mm/amdtopology.c
index 2ca15b59fb3f..ba47524f56e8 100644
--- a/arch/x86/mm/amdtopology.c
+++ b/arch/x86/mm/amdtopology.c
@@ -9,7 +9,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/string.h>
-#include <linux/module.h>
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 9a17250bcbe0..ea9c49adaa1f 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -14,7 +14,7 @@
#include <linux/debugfs.h>
#include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/seq_file.h>
#include <asm/pgtable.h>
@@ -454,8 +454,4 @@ static int __init pt_dump_init(void)
return 0;
}
-
__initcall(pt_dump_init);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
-MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables");
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index a6d739258137..6d18b70ed5a9 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -1,5 +1,5 @@
#include <linux/highmem.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/swap.h> /* for totalram_pages */
#include <linux/bootmem.h>
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index ec21796ac5fd..4473cb4f8b90 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -3,15 +3,17 @@
* included by both the compressed kernel and the regular kernel.
*/
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
unsigned long addr, unsigned long end)
{
addr &= PMD_MASK;
for (; addr < end; addr += PMD_SIZE) {
pmd_t *pmd = pmd_page + pmd_index(addr);
- if (!pmd_present(*pmd))
- set_pmd(pmd, __pmd(addr | pmd_flag));
+ if (pmd_present(*pmd))
+ continue;
+
+ set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
}
}
@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
if (pud_present(*pud)) {
pmd = pmd_offset(pud, 0);
- ident_pmd_init(info->pmd_flag, pmd, addr, next);
+ ident_pmd_init(info, pmd, addr, next);
continue;
}
pmd = (pmd_t *)info->alloc_pgt_page(info->context);
if (!pmd)
return -ENOMEM;
- ident_pmd_init(info->pmd_flag, pmd, addr, next);
+ ident_pmd_init(info, pmd, addr, next);
set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
}
@@ -44,14 +46,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
}
int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
- unsigned long addr, unsigned long end)
+ unsigned long pstart, unsigned long pend)
{
+ unsigned long addr = pstart + info->offset;
+ unsigned long end = pend + info->offset;
unsigned long next;
int result;
- int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
for (; addr < end; addr = next) {
- pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+ pgd_t *pgd = pgd_page + pgd_index(addr);
pud_t *pud;
next = (addr & PGDIR_MASK) + PGDIR_SIZE;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index cc82830bc8c4..d28a2d741f9e 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -122,8 +122,18 @@ __ref void *alloc_low_pages(unsigned int num)
return __va(pfn << PAGE_SHIFT);
}
-/* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */
-#define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE)
+/*
+ * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS.
+ * With KASLR memory randomization, depending on the machine e820 memory
+ * and the PUD alignment. We may need twice more pages when KASLR memory
+ * randomization is enabled.
+ */
+#ifndef CONFIG_RANDOMIZE_MEMORY
+#define INIT_PGD_PAGE_COUNT 6
+#else
+#define INIT_PGD_PAGE_COUNT 12
+#endif
+#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
void __init early_alloc_pgt_buf(void)
{
@@ -208,7 +218,7 @@ static int __meminit save_mr(struct map_range *mr, int nr_range,
* adjust the page_size_mask for small range to go with
* big page size instead small one if nearby are ram too.
*/
-static void __init_refok adjust_range_page_size_mask(struct map_range *mr,
+static void __ref adjust_range_page_size_mask(struct map_range *mr,
int nr_range)
{
int i;
@@ -396,7 +406,7 @@ bool pfn_range_is_mapped(unsigned long start_pfn, unsigned long end_pfn)
* This runs before bootmem is initialized and gets pages directly from
* the physical memory. To access them they are temporarily mapped.
*/
-unsigned long __init_refok init_memory_mapping(unsigned long start,
+unsigned long __ref init_memory_mapping(unsigned long start,
unsigned long end)
{
struct map_range mr[NR_RANGE_MR];
@@ -700,13 +710,6 @@ void free_initmem(void)
void __init free_initrd_mem(unsigned long start, unsigned long end)
{
/*
- * Remember, initrd memory may contain microcode or other useful things.
- * Before we lose initrd mem, we need to find a place to hold them
- * now that normal virtual memory is enabled.
- */
- save_microcode_in_initrd();
-
- /*
* end could be not aligned, and We can not align that,
* decompresser could be confused by aligned initrd_end
* We already reserve the end partial page before in
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 84df150ee77e..cf8059016ec8 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -5,7 +5,6 @@
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*/
-#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 53cc2256cf23..14b9dd71d9e8 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -27,7 +27,6 @@
#include <linux/pfn.h>
#include <linux/poison.h>
#include <linux/dma-mapping.h>
-#include <linux/module.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include <linux/memremap.h>
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index 9c0ff045fdd4..ada98b39b8ad 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -18,7 +18,7 @@
#include <asm/iomap.h>
#include <asm/pat.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/highmem.h>
static int is_io_mapping_possible(resource_size_t base, unsigned long size)
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index f0894910bdd7..7aaa2635862d 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -9,7 +9,6 @@
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/io.h>
-#include <linux/module.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mmiotrace.h>
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 26dccd6c0df1..bda8d5eef04d 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -77,7 +77,7 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
*/
static inline bool kaslr_memory_enabled(void)
{
- return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
+ return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
}
/* Initialize base and padding for each memory region randomized with KASLR */
@@ -97,7 +97,7 @@ void __init kernel_randomize_memory(void)
* add padding if needed (especially for memory hotplug support).
*/
BUG_ON(kaslr_regions[0].base != &page_offset_base);
- memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT) +
+ memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
/* Adapt phyiscal memory region size based on available memory */
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index b4f2e7e9e907..4515bae36bbe 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/kmemcheck.h>
#include <linux/mm.h>
-#include <linux/module.h>
#include <linux/page-flags.h>
#include <linux/percpu.h>
#include <linux/ptrace.h>
diff --git a/arch/x86/mm/kmemcheck/shadow.c b/arch/x86/mm/kmemcheck/shadow.c
index aec124214d97..c2638a7d2c10 100644
--- a/arch/x86/mm/kmemcheck/shadow.c
+++ b/arch/x86/mm/kmemcheck/shadow.c
@@ -1,5 +1,5 @@
#include <linux/kmemcheck.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <asm/page.h>
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
index ddb2244b06a1..afc47f5c9531 100644
--- a/arch/x86/mm/kmmio.c
+++ b/arch/x86/mm/kmmio.c
@@ -11,7 +11,7 @@
#include <linux/rculist.h>
#include <linux/spinlock.h>
#include <linux/hash.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/uaccess.h>
#include <linux/ptrace.h>
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
index 0057a7accfb1..bef36622e408 100644
--- a/arch/x86/mm/mmio-mod.c
+++ b/arch/x86/mm/mmio-mod.c
@@ -24,7 +24,7 @@
#define DEBUG 1
-#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 968ac028c34e..fb682108f4dc 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -8,7 +8,6 @@
#include <linux/memblock.h>
#include <linux/mmzone.h>
#include <linux/ctype.h>
-#include <linux/module.h>
#include <linux/nodemask.h>
#include <linux/sched.h>
#include <linux/topology.h>
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 47b6436e41c2..6b7ce6279133 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -24,7 +24,7 @@
#include <linux/bootmem.h>
#include <linux/memblock.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include "numa_internal.h"
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 849dc09fa4f0..e3353c97d086 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -917,11 +917,11 @@ static void populate_pte(struct cpa_data *cpa,
}
}
-static int populate_pmd(struct cpa_data *cpa,
- unsigned long start, unsigned long end,
- unsigned num_pages, pud_t *pud, pgprot_t pgprot)
+static long populate_pmd(struct cpa_data *cpa,
+ unsigned long start, unsigned long end,
+ unsigned num_pages, pud_t *pud, pgprot_t pgprot)
{
- unsigned int cur_pages = 0;
+ long cur_pages = 0;
pmd_t *pmd;
pgprot_t pmd_pgprot;
@@ -991,12 +991,12 @@ static int populate_pmd(struct cpa_data *cpa,
return num_pages;
}
-static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
- pgprot_t pgprot)
+static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
+ pgprot_t pgprot)
{
pud_t *pud;
unsigned long end;
- int cur_pages = 0;
+ long cur_pages = 0;
pgprot_t pud_pgprot;
end = start + (cpa->numpages << PAGE_SHIFT);
@@ -1052,7 +1052,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
/* Map trailing leftover */
if (start < end) {
- int tmp;
+ long tmp;
pud = pud_offset(pgd, start);
if (pud_none(*pud))
@@ -1078,7 +1078,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
pud_t *pud = NULL; /* shut up gcc */
pgd_t *pgd_entry;
- int ret;
+ long ret;
pgd_entry = cpa->pgd + pgd_index(addr);
@@ -1327,7 +1327,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
{
- int ret, numpages = cpa->numpages;
+ unsigned long numpages = cpa->numpages;
+ int ret;
while (numpages) {
/*
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index db00e3e2f3dc..170cc4ff057b 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -11,7 +11,6 @@
#include <linux/bootmem.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/pfn_t.h>
#include <linux/slab.h>
#include <linux/mm.h>
@@ -928,9 +927,10 @@ int track_pfn_copy(struct vm_area_struct *vma)
}
/*
- * prot is passed in as a parameter for the new mapping. If the vma has a
- * linear pfn mapping for the entire range reserve the entire vma range with
- * single reserve_pfn_range call.
+ * prot is passed in as a parameter for the new mapping. If the vma has
+ * a linear pfn mapping for the entire range, or no vma is provided,
+ * reserve the entire pfn + size range with single reserve_pfn_range
+ * call.
*/
int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
unsigned long pfn, unsigned long addr, unsigned long size)
@@ -939,11 +939,12 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
enum page_cache_mode pcm;
/* reserve the whole chunk starting from paddr */
- if (addr == vma->vm_start && size == (vma->vm_end - vma->vm_start)) {
+ if (!vma || (addr == vma->vm_start
+ && size == (vma->vm_end - vma->vm_start))) {
int ret;
ret = reserve_pfn_range(paddr, size, prot, 0);
- if (!ret)
+ if (ret == 0 && vma)
vma->vm_flags |= VM_PAT;
return ret;
}
@@ -998,7 +999,7 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
resource_size_t paddr;
unsigned long prot;
- if (!(vma->vm_flags & VM_PAT))
+ if (vma && !(vma->vm_flags & VM_PAT))
return;
/* free the chunk starting from pfn or the whole chunk */
@@ -1012,7 +1013,8 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
size = vma->vm_end - vma->vm_start;
}
free_pfn_range(paddr, size);
- vma->vm_flags &= ~VM_PAT;
+ if (vma)
+ vma->vm_flags &= ~VM_PAT;
}
/*
diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
index 2f7702253ccf..de391b7bc19a 100644
--- a/arch/x86/mm/pat_rbtree.c
+++ b/arch/x86/mm/pat_rbtree.c
@@ -11,7 +11,6 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/rbtree_augmented.h>
#include <linux/sched.h>
#include <linux/gfp.h>
diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
index 9f0614daea85..a235869532bc 100644
--- a/arch/x86/mm/pf_in.c
+++ b/arch/x86/mm/pf_in.c
@@ -26,7 +26,6 @@
* Bjorn Steinbrink (B.Steinbrink@gmx.de), 2007
*/
-#include <linux/module.h>
#include <linux/ptrace.h> /* struct pt_regs */
#include "pf_in.h"
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index e67ae0e6c59d..9adce776852b 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -8,7 +8,6 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/spinlock.h>
-#include <linux/module.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
index e666cbbb9261..cfc3b9121ce4 100644
--- a/arch/x86/mm/physaddr.c
+++ b/arch/x86/mm/physaddr.c
@@ -1,6 +1,6 @@
#include <linux/bootmem.h>
#include <linux/mmdebug.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/mm.h>
#include <asm/page.h>
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index b1ecff460a46..35fe69529bc1 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -13,7 +13,7 @@
#include <linux/acpi.h>
#include <linux/mmzone.h>
#include <linux/bitmap.h>
-#include <linux/module.h>
+#include <linux/init.h>
#include <linux/topology.h>
#include <linux/mm.h>
#include <asm/proto.h>
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 5643fd0b1a7d..4dbe65622810 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -4,7 +4,7 @@
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/cpu.h>
#include <asm/tlbflush.h>
OpenPOWER on IntegriCloud