summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-01-30 13:34:09 +0100
committerIngo Molnar <mingo@elte.hu>2008-01-30 13:34:09 +0100
commit86f03989d99cfa2e1216cdd7aa996852236909cf (patch)
tree6fae63f51c4adf08f94975b48e656b31c6bced62
parentaba8391f7323294e88e3a665513434aba4042a7d (diff)
downloadblackbird-op-linux-86f03989d99cfa2e1216cdd7aa996852236909cf.tar.gz
blackbird-op-linux-86f03989d99cfa2e1216cdd7aa996852236909cf.zip
x86: cpa: fix the self-test
Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/Kconfig.debug2
-rw-r--r--arch/x86/mm/init_32.c4
-rw-r--r--arch/x86/mm/init_64.c16
-rw-r--r--arch/x86/mm/pageattr-test.c15
-rw-r--r--arch/x86/mm/pageattr.c74
-rw-r--r--include/asm-x86/pgtable.h1
6 files changed, 48 insertions, 64 deletions
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 2d0bd33b73aa..2e1e3af28c3a 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -40,7 +40,7 @@ comment "Page alloc debug is incompatible with Software Suspend on i386"
config DEBUG_PAGEALLOC
bool "Debug page memory allocations"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && X86_32
help
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 8d7f723cfc28..8ed5c189d7aa 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -781,8 +781,6 @@ void mark_rodata_ro(void)
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
- unsigned long addr;
-
#ifdef CONFIG_DEBUG_PAGEALLOC
/*
* If debugging page accesses then do not free this memory but
@@ -793,6 +791,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
begin, PAGE_ALIGN(end));
set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else
+ unsigned long addr;
+
/*
* We just marked the kernel text read only above, now that
* we are going to free part of that, we need to make that
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index e0c1e98ad1bf..8a7b725ce3c7 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -569,22 +569,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
free_page(addr);
totalram_pages++;
}
-#ifdef CONFIG_DEBUG_RODATA
- /*
- * This will make the __init pages not present and
- * not executable, so that any attempt to use a
- * __init function from now on will fault immediately
- * rather than supriously later when memory gets reused.
- *
- * We only do this for DEBUG_RODATA to not break up the
- * 2Mb kernel mapping just for this debug feature.
- */
- if (begin >= __START_KERNEL_map) {
- set_memory_rw(begin, (end - begin)/PAGE_SIZE);
- set_memory_np(begin, (end - begin)/PAGE_SIZE);
- set_memory_nx(begin, (end - begin)/PAGE_SIZE);
- }
-#endif
#endif
}
diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
index 554820265b95..06353d43f72e 100644
--- a/arch/x86/mm/pageattr-test.c
+++ b/arch/x86/mm/pageattr-test.c
@@ -15,8 +15,7 @@
#include <asm/kdebug.h>
enum {
- NTEST = 400,
- LOWEST_LEVEL = PG_LEVEL_4K,
+ NTEST = 4000,
#ifdef CONFIG_X86_64
LPS = (1 << PMD_SHIFT),
#elif defined(CONFIG_X86_PAE)
@@ -59,10 +58,10 @@ static __init int print_split(struct split_state *s)
continue;
}
- if (level == 2 && sizeof(long) == 8) {
+ if (level == PG_LEVEL_1G && sizeof(long) == 8) {
s->gpg++;
i += GPS/PAGE_SIZE;
- } else if (level != LOWEST_LEVEL) {
+ } else if (level == PG_LEVEL_2M) {
if (!(pte_val(*pte) & _PAGE_PSE)) {
printk(KERN_ERR
"%lx level %d but not PSE %Lx\n",
@@ -162,7 +161,7 @@ static __init int exercise_pageattr(void)
continue;
}
- err = __change_page_attr_clear(addr[i], len[i],
+ err = change_page_attr_clear(addr[i], len[i],
__pgprot(_PAGE_GLOBAL));
if (err < 0) {
printk(KERN_ERR "CPA %d failed %d\n", i, err);
@@ -175,7 +174,7 @@ static __init int exercise_pageattr(void)
pte ? (u64)pte_val(*pte) : 0ULL);
failed++;
}
- if (level != LOWEST_LEVEL) {
+ if (level != PG_LEVEL_4K) {
printk(KERN_ERR "CPA %lx: unexpected level %d\n",
addr[i], level);
failed++;
@@ -183,7 +182,6 @@ static __init int exercise_pageattr(void)
}
vfree(bm);
- cpa_flush_all();
failed += print_split(&sb);
@@ -197,7 +195,7 @@ static __init int exercise_pageattr(void)
failed++;
continue;
}
- err = __change_page_attr_set(addr[i], len[i],
+ err = change_page_attr_set(addr[i], len[i],
__pgprot(_PAGE_GLOBAL));
if (err < 0) {
printk(KERN_ERR "CPA reverting failed: %d\n", err);
@@ -211,7 +209,6 @@ static __init int exercise_pageattr(void)
}
}
- cpa_flush_all();
failed += print_split(&sc);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 97ec9e7d29d9..532a40bc0e7e 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -197,10 +197,11 @@ static int split_large_page(pte_t *kpte, unsigned long address)
unsigned long addr;
pte_t *pbase, *tmp;
struct page *base;
- int i, level;
+ unsigned int i, level;
#ifdef CONFIG_DEBUG_PAGEALLOC
- gfp_flags = GFP_ATOMIC;
+ gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN;
+ gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
#endif
base = alloc_pages(gfp_flags, 0);
if (!base)
@@ -224,6 +225,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
paravirt_alloc_pt(&init_mm, page_to_pfn(base));
#endif
+ pgprot_val(ref_prot) &= ~_PAGE_NX;
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
@@ -248,7 +250,8 @@ out_unlock:
}
static int
-__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
+__change_page_attr(unsigned long address, unsigned long pfn,
+ pgprot_t mask_set, pgprot_t mask_clr)
{
struct page *kpte_page;
int level, err = 0;
@@ -267,15 +270,20 @@ repeat:
BUG_ON(PageLRU(kpte_page));
BUG_ON(PageCompound(kpte_page));
- prot = static_protections(prot, address);
-
if (level == PG_LEVEL_4K) {
- WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
- set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
- } else {
- /* Clear the PSE bit for the 4k level pages ! */
- pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;
+ pgprot_t new_prot = pte_pgprot(*kpte);
+ pte_t new_pte, old_pte = *kpte;
+
+ pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
+ pgprot_val(new_prot) |= pgprot_val(mask_set);
+
+ new_prot = static_protections(new_prot, address);
+
+ new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
+ BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte));
+ set_pte_atomic(kpte, new_pte);
+ } else {
err = split_large_page(kpte, address);
if (!err)
goto repeat;
@@ -297,22 +305,26 @@ repeat:
* Modules and drivers should use the set_memory_* APIs instead.
*/
-static int change_page_attr_addr(unsigned long address, pgprot_t prot)
+static int
+change_page_attr_addr(unsigned long address, pgprot_t mask_set,
+ pgprot_t mask_clr)
{
int err = 0, kernel_map = 0;
- unsigned long pfn = __pa(address) >> PAGE_SHIFT;
+ unsigned long pfn;
#ifdef CONFIG_X86_64
if (address >= __START_KERNEL_map &&
address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
- address = (unsigned long)__va(__pa(address));
+ address = (unsigned long)__va(__pa((void *)address));
kernel_map = 1;
}
#endif
- if (!kernel_map || pte_present(pfn_pte(0, prot))) {
- err = __change_page_attr(address, pfn, prot);
+ pfn = __pa(address) >> PAGE_SHIFT;
+
+ if (!kernel_map || 1) {
+ err = __change_page_attr(address, pfn, mask_set, mask_clr);
if (err)
return err;
}
@@ -324,12 +336,15 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot)
*/
if (__pa(address) < KERNEL_TEXT_SIZE) {
unsigned long addr2;
- pgprot_t prot2;
- addr2 = __START_KERNEL_map + __pa(address);
+ addr2 = __pa(address) + __START_KERNEL_map - phys_base;
/* Make sure the kernel mappings stay executable */
- prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
- err = __change_page_attr(addr2, pfn, prot2);
+ pgprot_val(mask_clr) |= _PAGE_NX;
+ /*
+ * Our high aliases are imprecise, so do not propagate
+ * failures back to users:
+ */
+ __change_page_attr(addr2, pfn, mask_set, mask_clr);
}
#endif
@@ -339,26 +354,13 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot)
static int __change_page_attr_set_clr(unsigned long addr, int numpages,
pgprot_t mask_set, pgprot_t mask_clr)
{
- pgprot_t new_prot;
- int level;
- pte_t *pte;
- int i, ret;
-
- for (i = 0; i < numpages ; i++) {
-
- pte = lookup_address(addr, &level);
- if (!pte)
- return -EINVAL;
-
- new_prot = pte_pgprot(*pte);
-
- pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
- pgprot_val(new_prot) |= pgprot_val(mask_set);
+ unsigned int i;
+ int ret;
- ret = change_page_attr_addr(addr, new_prot);
+ for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) {
+ ret = change_page_attr_addr(addr, mask_set, mask_clr);
if (ret)
return ret;
- addr += PAGE_SIZE;
}
return 0;
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h
index ee40a88882f6..269e7e29ea8e 100644
--- a/include/asm-x86/pgtable.h
+++ b/include/asm-x86/pgtable.h
@@ -240,6 +240,7 @@ enum {
PG_LEVEL_NONE,
PG_LEVEL_4K,
PG_LEVEL_2M,
+ PG_LEVEL_1G,
};
/*
OpenPOWER on IntegriCloud