From caff0710ebf6f2c44cbd2b8b31fd6329148bed2e Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Tue, 26 Sep 2006 10:52:31 +0200 Subject: [PATCH] initialize end of memory variables as early as possible While an earlier patch already did a small step into that direction, this patch moves initialization of all memory end variables to as early as possible, so that dependent code doesn't need to check whether these variables have already been set. Also, remove a misleading (perhaps just outdated) comment, and make static a variable only used in a single file. Signed-off-by: Jan Beulich Signed-off-by: Andi Kleen --- arch/x86_64/mm/init.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch/x86_64/mm/init.c') diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index d14fb2dfbfc4..d40134bd6399 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -597,12 +597,6 @@ void __init mem_init(void) pci_iommu_alloc(); - /* How many end-of-memory variables you have, grandma! */ - max_low_pfn = end_pfn; - max_pfn = end_pfn; - num_physpages = end_pfn; - high_memory = (void *) __va(end_pfn * PAGE_SIZE); - /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); -- cgit v1.2.1 From 6ad916581181a105d7832a7dec9e1eb58f7a1621 Mon Sep 17 00:00:00 2001 From: Keith Mannthey Date: Tue, 26 Sep 2006 10:52:36 +0200 Subject: [PATCH] x86_64 kernel mapping fix Fix for the x86_64 kernel mapping code. Without this patch the update path only inits one pmd_page worth of memory and tramples any entries on it. now the calling convention to phys_pmd_init and phys_init is to always pass a [pmd/pud] page not an offset within a page. Signed-off-by: Keith Mannthey Signed-off-by: Andi Kleen Cc: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton --- arch/x86_64/mm/init.c | 51 ++++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 25 deletions(-) (limited to 'arch/x86_64/mm/init.c') diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index d40134bd6399..984155b75e4c 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -250,12 +250,13 @@ __init void early_iounmap(void *addr, unsigned long size) } static void __meminit -phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) +phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) { - int i; + int i = pmd_index(address); - for (i = 0; i < PTRS_PER_PMD; pmd++, i++, address += PMD_SIZE) { + for (; i < PTRS_PER_PMD; i++, address += PMD_SIZE) { unsigned long entry; + pmd_t *pmd = pmd_page + pmd_index(address); if (address >= end) { if (!after_bootmem) @@ -263,6 +264,10 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) set_pmd(pmd, __pmd(0)); break; } + + if (pmd_val(*pmd)) + continue; + entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; entry &= __supported_pte_mask; set_pmd(pmd, __pmd(entry)); @@ -272,45 +277,41 @@ phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end) static void __meminit phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end) { - pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address)); - - if (pmd_none(*pmd)) { - spin_lock(&init_mm.page_table_lock); - phys_pmd_init(pmd, address, end); - spin_unlock(&init_mm.page_table_lock); - __flush_tlb_all(); - } + pmd_t *pmd = pmd_offset(pud,0); + spin_lock(&init_mm.page_table_lock); + phys_pmd_init(pmd, address, end); + spin_unlock(&init_mm.page_table_lock); + __flush_tlb_all(); } -static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end) +static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end) { - long i = pud_index(address); - - pud = pud + i; + int i = pud_index(addr); - if (after_bootmem && pud_val(*pud)) { - phys_pmd_update(pud, address, end); - return; - } - for (; i < PTRS_PER_PUD; pud++, i++) { + for (; i < PTRS_PER_PUD; i++, addr = (addr & PUD_MASK) + PUD_SIZE ) { int map; - unsigned long paddr, pmd_phys; + unsigned long pmd_phys; + pud_t *pud = pud_page + pud_index(addr); pmd_t *pmd; - paddr = (address & PGDIR_MASK) + i*PUD_SIZE; - if (paddr >= end) + if (addr >= end) break; - if (!after_bootmem && !e820_any_mapped(paddr, paddr+PUD_SIZE, 0)) { + if (!after_bootmem && !e820_any_mapped(addr,addr+PUD_SIZE,0)) { set_pud(pud, __pud(0)); continue; } + if (pud_val(*pud)) { + phys_pmd_update(pud, addr, end); + continue; + } + pmd = alloc_low_page(&map, &pmd_phys); spin_lock(&init_mm.page_table_lock); set_pud(pud, __pud(pmd_phys | _KERNPG_TABLE)); - phys_pmd_init(pmd, paddr, end); + phys_pmd_init(pmd, addr, end); spin_unlock(&init_mm.page_table_lock); unmap_low_page(map); } -- cgit v1.2.1 From d3cf7f061521c78ad62e275eb6fbdc8f43fc75a7 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Tue, 26 Sep 2006 10:52:37 +0200 Subject: [PATCH] Remove bogus warning from early_ioremap It is correct for its only caller right now, but not for possible future others. Signed-off-by: Andi Kleen --- arch/x86_64/mm/init.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86_64/mm/init.c') diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 984155b75e4c..53b49c86d7e0 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -229,7 +229,6 @@ __init void *early_ioremap(unsigned long addr, unsigned long size) /* actually usually some more */ if (size >= LARGE_PAGE_SIZE) { - printk("SMBIOS area too long %lu\n", size); return NULL; } set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE)); -- cgit v1.2.1