diff options
author | Venki Pallipadi <venkatesh.pallipadi@intel.com> | 2008-05-14 16:05:51 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-14 19:11:15 -0700 |
commit | 1c12c4cf9411eb130b245fa8d0fbbaf989477c7b (patch) | |
tree | f61d58e955b3159007ef77513c1c4f1ed2c9ec23 | |
parent | 44c81433e8b05dbc85985d939046f10f95901184 (diff) | |
download | blackbird-op-linux-1c12c4cf9411eb130b245fa8d0fbbaf989477c7b.tar.gz blackbird-op-linux-1c12c4cf9411eb130b245fa8d0fbbaf989477c7b.zip |
mprotect: prevent alteration of the PAT bits
There is a defect in mprotect, which lets the user change the page cache
type bits by-passing the kernel reserve_memtype and free_memtype
wrappers. Fix the problem by not letting mprotect change the PAT bits.
Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/asm-x86/pgtable.h | 16 | ||||
-rw-r--r-- | mm/mprotect.c | 11 |
2 files changed, 23 insertions, 4 deletions
diff --git a/include/asm-x86/pgtable.h b/include/asm-x86/pgtable.h index 801b31f71452..55c3a0e3a8ce 100644 --- a/include/asm-x86/pgtable.h +++ b/include/asm-x86/pgtable.h @@ -57,7 +57,8 @@ #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \ _PAGE_DIRTY) -#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_PCD | _PAGE_PWT | \ + _PAGE_ACCESSED | _PAGE_DIRTY) #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT) #define _PAGE_CACHE_WB (0) @@ -288,12 +289,21 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * Chop off the NX bit (if present), and add the NX portion of * the newprot (if present): */ - val &= _PAGE_CHG_MASK & ~_PAGE_NX; - val |= pgprot_val(newprot) & __supported_pte_mask; + val &= _PAGE_CHG_MASK; + val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask; return __pte(val); } +/* mprotect needs to preserve PAT bits when updating vm_page_prot */ +#define pgprot_modify pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; + pgprotval_t addbits = pgprot_val(newprot); + return __pgprot(preservebits | addbits); +} + #define pte_pgprot(x) __pgprot(pte_val(x) & (0xfff | _PAGE_NX)) #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask) diff --git a/mm/mprotect.c b/mm/mprotect.c index 4de546899dc1..a5bf31c27375 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -26,6 +26,13 @@ #include <asm/cacheflush.h> #include <asm/tlbflush.h> +#ifndef pgprot_modify +static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +{ + return newprot; +} +#endif + static void change_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pgprot_t newprot, int dirty_accountable) @@ -192,7 +199,9 @@ success: * held in write mode. */ vma->vm_flags = newflags; - vma->vm_page_prot = vm_get_page_prot(newflags); + vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, + vm_get_page_prot(newflags)); + if (vma_wants_writenotify(vma)) { vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); dirty_accountable = 1; |