summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sh/mm/tlb-sh4.c30
-rw-r--r--include/asm-sh/cpu-sh4/mmu_context.h14
-rw-r--r--include/asm-sh/page.h4
-rw-r--r--include/asm-sh/pgtable.h112
4 files changed, 88 insertions, 72 deletions
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index 13fde8cc7179..2d1dd6044307 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -4,27 +4,14 @@
* SH-4 specific TLB operations
*
* Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2002 Paul Mundt
+ * Copyright (C) 2002 - 2007 Paul Mundt
*
* Released under the terms of the GNU GPL v2.0.
*/
-#include <linux/signal.h>
-#include <linux/sched.h>
#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
-
+#include <linux/io.h>
#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
@@ -62,12 +49,22 @@ void update_mmu_cache(struct vm_area_struct * vma,
vpn = (address & MMU_VPN_MASK) | get_asid();
ctrl_outl(vpn, MMU_PTEH);
- pteval = pte_val(pte);
+ pteval = pte.pte_low;
/* Set PTEA register */
+#ifdef CONFIG_X2TLB
+ /*
+ * For the extended mode TLB this is trivial, only the ESZ and
+ * EPR bits need to be written out to PTEA, with the remainder of
+ * the protection bits (with the exception of the compat-mode SZ
+ * and PR bits, which are cleared) being written out in PTEL.
+ */
+ ctrl_outl(pte.pte_high, MMU_PTEA);
+#else
if (cpu_data->flags & CPU_HAS_PTEA)
/* TODO: make this look less hacky */
ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
+#endif
/* Set PTEL register */
pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
@@ -98,4 +95,3 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
ctrl_outl(data, addr);
back_to_P1();
}
-
diff --git a/include/asm-sh/cpu-sh4/mmu_context.h b/include/asm-sh/cpu-sh4/mmu_context.h
index ff4c5fbbfaf0..979acddc0f8e 100644
--- a/include/asm-sh/cpu-sh4/mmu_context.h
+++ b/include/asm-sh/cpu-sh4/mmu_context.h
@@ -22,13 +22,21 @@
#define MMU_UTLB_ADDRESS_ARRAY 0xF6000000
#define MMU_PAGE_ASSOC_BIT 0x80
-#define MMU_NTLB_ENTRIES 64 /* for 7750 */
+#ifdef CONFIG_X2TLB
+#define MMUCR_ME (1 << 7)
+#else
+#define MMUCR_ME (0)
+#endif
+
#ifdef CONFIG_SH_STORE_QUEUES
-#define MMU_CONTROL_INIT 0x05 /* SQMD=0, SV=0, TI=1, AT=1 */
+#define MMUCR_SQMD (1 << 9)
#else
-#define MMU_CONTROL_INIT 0x205 /* SQMD=1, SV=0, TI=1, AT=1 */
+#define MMUCR_SQMD (0)
#endif
+#define MMU_NTLB_ENTRIES 64
+#define MMU_CONTROL_INIT (0x05|MMUCR_SQMD|MMUCR_ME)
+
#define MMU_ITLB_DATA_ARRAY 0xF3000000
#define MMU_UTLB_DATA_ARRAY 0xF7000000
diff --git a/include/asm-sh/page.h b/include/asm-sh/page.h
index 48b718e7455c..cb3d46c59eab 100644
--- a/include/asm-sh/page.h
+++ b/include/asm-sh/page.h
@@ -88,6 +88,7 @@ extern void __copy_user_page(void *to, void *from, void *orig_to);
#ifdef CONFIG_X2TLB
typedef struct { unsigned long pte_low, pte_high; } pte_t;
typedef struct { unsigned long long pgprot; } pgprot_t;
+typedef struct { unsigned long long pgd; } pgd_t;
#define pte_val(x) \
((x).pte_low | ((unsigned long long)(x).pte_high << 32))
#define __pte(x) \
@@ -95,12 +96,11 @@ typedef struct { unsigned long long pgprot; } pgprot_t;
#else
typedef struct { unsigned long pte_low; } pte_t;
typedef struct { unsigned long pgprot; } pgprot_t;
+typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low)
#define __pte(x) ((pte_t) { (x) } )
#endif
-typedef struct { unsigned long pgd; } pgd_t;
-
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
diff --git a/include/asm-sh/pgtable.h b/include/asm-sh/pgtable.h
index 54ad5037fe40..cf0dd2b648c2 100644
--- a/include/asm-sh/pgtable.h
+++ b/include/asm-sh/pgtable.h
@@ -42,13 +42,12 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
/* PGD bits */
#define PGDIR_SHIFT (PTE_SHIFT + PTE_BITS)
-#define PGDIR_BITS (32 - PGDIR_SHIFT)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/* Entries per level */
#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
-#define PTRS_PER_PGD (PAGE_SIZE / 4)
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0
@@ -100,17 +99,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define _PAGE_HW_SHARED 0x002 /* SH-bit : shared among processes */
#define _PAGE_DIRTY 0x004 /* D-bit : page changed */
#define _PAGE_CACHABLE 0x008 /* C-bit : cachable */
-#ifndef CONFIG_X2TLB
-# define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */
-# define _PAGE_RW 0x020 /* PR0-bit : write access allowed */
-# define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/
-# define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */
-#endif
+#define _PAGE_SZ0 0x010 /* SZ0-bit : Size of page */
+#define _PAGE_RW 0x020 /* PR0-bit : write access allowed */
+#define _PAGE_USER 0x040 /* PR1-bit : user space access allowed*/
+#define _PAGE_SZ1 0x080 /* SZ1-bit : Size of page (on SH-4) */
#define _PAGE_PRESENT 0x100 /* V-bit : page is valid */
#define _PAGE_PROTNONE 0x200 /* software: if not present */
#define _PAGE_ACCESSED 0x400 /* software: page referenced */
#define _PAGE_FILE _PAGE_WT /* software: pagecache or swap? */
+#define _PAGE_SZ_MASK (_PAGE_SZ0 | _PAGE_SZ1)
+#define _PAGE_PR_MASK (_PAGE_RW | _PAGE_USER)
+
/* Extended mode bits */
#define _PAGE_EXT_ESZ0 0x0010 /* ESZ0-bit: Size of page */
#define _PAGE_EXT_ESZ1 0x0020 /* ESZ1-bit: Size of page */
@@ -126,11 +126,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */
/* Wrapper for extended mode pgprot twiddling */
-#ifdef CONFIG_X2TLB
-# define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
-#else
-# define _PAGE_EXT(x) (0)
-#endif
+#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
/* software: moves to PTEA.TC (Timing Control) */
#define _PAGE_PCC_AREA5 0x00000000 /* use BSC registers for area5 */
@@ -146,10 +142,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define _PAGE_PCC_ATR16 0x60000001 /* Attribute Memory space, 6 bit bus */
/* Mask which drops unused bits from the PTEL value */
-#ifdef CONFIG_CPU_SH3
+#if defined(CONFIG_CPU_SH3)
#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED| \
_PAGE_FILE | _PAGE_SZ1 | \
_PAGE_HW_SHARED)
+#elif defined(CONFIG_X2TLB)
+/* Get rid of the legacy PR/SZ bits when using extended mode */
+#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | \
+ _PAGE_FILE | _PAGE_PR_MASK | _PAGE_SZ_MASK)
#else
#define _PAGE_CLEAR_FLAGS (_PAGE_PROTNONE | _PAGE_ACCESSED | _PAGE_FILE)
#endif
@@ -212,27 +212,36 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
_PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
- _PAGE_EXT(_PAGE_EXT_USER_READ | \
+ _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+ _PAGE_EXT_KERN_WRITE | \
+ _PAGE_EXT_USER_READ | \
_PAGE_EXT_USER_WRITE))
#define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
_PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
- _PAGE_EXT(_PAGE_EXT_USER_EXEC | \
+ _PAGE_EXT(_PAGE_EXT_KERN_EXEC | \
+ _PAGE_EXT_KERN_READ | \
+ _PAGE_EXT_USER_EXEC | \
_PAGE_EXT_USER_READ))
#define PAGE_COPY PAGE_EXECREAD
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
_PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
- _PAGE_EXT(_PAGE_EXT_USER_READ))
+ _PAGE_EXT(_PAGE_EXT_KERN_READ | \
+ _PAGE_EXT_USER_READ))
#define PAGE_WRITEONLY __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
_PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
- _PAGE_EXT(_PAGE_EXT_USER_WRITE))
+ _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
+ _PAGE_EXT_USER_WRITE))
#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
_PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
- _PAGE_EXT(_PAGE_EXT_USER_WRITE | \
+ _PAGE_EXT(_PAGE_EXT_KERN_WRITE | \
+ _PAGE_EXT_KERN_READ | \
+ _PAGE_EXT_KERN_EXEC | \
+ _PAGE_EXT_USER_WRITE | \
_PAGE_EXT_USER_READ | \
_PAGE_EXT_USER_EXEC))
@@ -373,11 +382,15 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
-#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
-#define pte_none(x) (!pte_val(x))
-#define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE))
+#define pfn_pte(pfn, prot) \
+ __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
+#define pfn_pmd(pfn, prot) \
+ __pmd(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
+
+#define pte_none(x) (!pte_val(x))
+#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
+
#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
#define pmd_none(x) (!pmd_val(x))
@@ -392,15 +405,15 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
-#define pte_not_present(pte) (!(pte_val(pte) & _PAGE_PRESENT))
-#define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
-#define pte_file(pte) (pte_val(pte) & _PAGE_FILE)
+#define pte_not_present(pte) (!((pte).pte_low & _PAGE_PRESENT))
+#define pte_dirty(pte) ((pte).pte_low & _PAGE_DIRTY)
+#define pte_young(pte) ((pte).pte_low & _PAGE_ACCESSED)
+#define pte_file(pte) ((pte).pte_low & _PAGE_FILE)
#ifdef CONFIG_X2TLB
#define pte_write(pte) ((pte).pte_high & _PAGE_EXT_USER_WRITE)
#else
-#define pte_write(pte) (pte_val(pte) & _PAGE_RW)
+#define pte_write(pte) ((pte).pte_low & _PAGE_RW)
#endif
#define PTE_BIT_FUNC(h,fn,op) \
@@ -429,17 +442,10 @@ PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
/*
* Macro and implementation to make a page protection as uncachable.
*/
-#define pgprot_noncached pgprot_noncached
+#define pgprot_writecombine(prot) \
+ __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
-static inline pgprot_t pgprot_noncached(pgprot_t _prot)
-{
- unsigned long prot = pgprot_val(_prot);
-
- prot &= ~_PAGE_CACHABLE;
- return __pgprot(prot);
-}
-
-#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
+#define pgprot_noncached pgprot_writecombine
/*
* Conversion functions: convert a page and protection to a page entry,
@@ -451,28 +457,33 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) |
- pgprot_val(newprot)));
+ pte.pte_low &= _PAGE_CHG_MASK;
+ pte.pte_low |= pgprot_val(newprot);
+
+#ifdef CONFIG_X2TLB
+ pte.pte_high |= pgprot_val(newprot) >> 32;
+#endif
+
return pte;
}
-#define pmd_page_vaddr(pmd) pmd_val(pmd)
+#define pmd_page_vaddr(pmd) ((unsigned long)pmd_val(pmd))
#define pmd_page(pmd) (virt_to_page(pmd_val(pmd)))
/* to find an entry in a page-table-directory. */
-#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
-#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
+#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
/* to find an entry in a kernel page-table-directory */
-#define pgd_offset_k(address) pgd_offset(&init_mm, address)
+#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the third-level page table.. */
-#define pte_index(address) \
- ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define pte_offset_kernel(dir, address) \
((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
-#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
-#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
+#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
+#define pte_offset_map_nested(dir, address) pte_offset_kernel(dir, address)
+
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
@@ -480,13 +491,14 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pte_ERROR(e) \
printk("%s:%d: bad pte %p(%08lx%08lx).\n", __FILE__, __LINE__, \
&(e), (e).pte_high, (e).pte_low)
+#define pgd_ERROR(e) \
+ printk("%s:%d: bad pgd %016llx.\n", __FILE__, __LINE__, pgd_val(e))
#else
#define pte_ERROR(e) \
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
-#endif
-
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
+#endif
struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma,
OpenPOWER on IntegriCloud