diff options
author | Matt Fleming <matt@console-pimps.org> | 2009-11-17 21:05:31 +0000 |
---|---|---|
committer | Matt Fleming <matt@console-pimps.org> | 2010-01-16 14:28:57 +0000 |
commit | 8eda55142080f0373b1f0268fe6d6807f193e713 (patch) | |
tree | 6d103af69153dc5bfd78ebe89930cf3c66ec5b2b /arch | |
parent | 7dcaa8e8e67b2cfbe0097c9bb52e23aed5443b8b (diff) | |
download | blackbird-op-linux-8eda55142080f0373b1f0268fe6d6807f193e713.tar.gz blackbird-op-linux-8eda55142080f0373b1f0268fe6d6807f193e713.zip |
sh: New extended page flag to wire/unwire TLB entries
Provide a new extended page flag, _PAGE_WIRED and an SH4 implementation
for wiring TLB entries and use it in the fixmap code path so that we can
wire the fixmap TLB entry.
Signed-off-by: Matt Fleming <matt@console-pimps.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sh/include/asm/pgtable_32.h | 4 | ||||
-rw-r--r-- | arch/sh/include/asm/tlb.h | 16 | ||||
-rw-r--r-- | arch/sh/include/cpu-sh4/cpu/mmu_context.h | 4 | ||||
-rw-r--r-- | arch/sh/mm/tlb-pteaex.c | 66 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 66 |
5 files changed, 156 insertions, 0 deletions
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index 5003ee86f67b..c573d45f1286 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h @@ -71,6 +71,8 @@ #define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */ #define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */ +#define _PAGE_EXT_WIRED 0x4000 /* software: Wire TLB entry */ + /* Wrapper for extended mode pgprot twiddling */ #define _PAGE_EXT(x) ((unsigned long long)(x) << 32) @@ -164,6 +166,8 @@ static inline unsigned long copy_ptea_attributes(unsigned long x) (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \ _PAGE_DIRTY | _PAGE_SPECIAL) +#define _PAGE_WIRED (_PAGE_EXT(_PAGE_EXT_WIRED)) + #ifndef __ASSEMBLY__ #if defined(CONFIG_X2TLB) /* SH-X2 TLB */ diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index da8fe7ab8728..3ed2f7a05416 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -97,6 +97,22 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) #define tlb_migrate_finish(mm) do { } while (0) +#ifdef CONFIG_CPU_SH4 +extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); +extern void tlb_unwire_entry(void); +#else +static inline void tlb_wire_entry(struct vm_area_struct *vma , + unsigned long addr, pte_t pte) +{ + BUG(); +} + +static inline void tlb_unwire_entry(void) +{ + BUG(); +} +#endif /* CONFIG_CPU_SH4 */ + #else /* CONFIG_MMU */ #define tlb_start_vma(tlb, vma) do { } while (0) diff --git a/arch/sh/include/cpu-sh4/cpu/mmu_context.h b/arch/sh/include/cpu-sh4/cpu/mmu_context.h index 3ce7ef6c2978..03ea75c5315d 100644 --- a/arch/sh/include/cpu-sh4/cpu/mmu_context.h +++ b/arch/sh/include/cpu-sh4/cpu/mmu_context.h @@ -25,6 +25,10 @@ #define MMUCR_TI (1<<2) +#define MMUCR_URB 0x00FC0000 +#define MMUCR_URB_SHIFT 18 +#define MMUCR_URB_NENTRIES 64 + #if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40) #define MMUCR_SE (1 << 4) #else diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index 409b7c2b4b9d..88c8bb05e16d 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c @@ -76,3 +76,69 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); back_to_cached(); } + +/* + * Load the entry for 'addr' into the TLB and wire the entry. + */ +void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +{ + unsigned long status, flags; + int urb; + + local_irq_save(flags); + + /* Load the entry into the TLB */ + __update_tlb(vma, addr, pte); + + /* ... and wire it up. */ + status = ctrl_inl(MMUCR); + urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; + status &= ~MMUCR_URB; + + /* + * Make sure we're not trying to wire the last TLB entry slot. + */ + BUG_ON(!--urb); + + urb = urb % MMUCR_URB_NENTRIES; + + status |= (urb << MMUCR_URB_SHIFT); + ctrl_outl(status, MMUCR); + ctrl_barrier(); + + local_irq_restore(flags); +} + +/* + * Unwire the last wired TLB entry. + * + * It should also be noted that it is not possible to wire and unwire + * TLB entries in an arbitrary order. If you wire TLB entry N, followed + * by entry N+1, you must unwire entry N+1 first, then entry N. In this + * respect, it works like a stack or LIFO queue. + */ +void tlb_unwire_entry(void) +{ + unsigned long status, flags; + int urb; + + local_irq_save(flags); + + status = ctrl_inl(MMUCR); + urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; + status &= ~MMUCR_URB; + + /* + * Make sure we're not trying to unwire a TLB entry when none + * have been wired. + */ + BUG_ON(urb++ == MMUCR_URB_NENTRIES); + + urb = urb % MMUCR_URB_NENTRIES; + + status |= (urb << MMUCR_URB_SHIFT); + ctrl_outl(status, MMUCR); + ctrl_barrier(); + + local_irq_restore(flags); +} diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 8cf550e2570f..4c6234743318 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -81,3 +81,69 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, ctrl_outl(data, addr); back_to_cached(); } + +/* + * Load the entry for 'addr' into the TLB and wire the entry. + */ +void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) +{ + unsigned long status, flags; + int urb; + + local_irq_save(flags); + + /* Load the entry into the TLB */ + __update_tlb(vma, addr, pte); + + /* ... and wire it up. */ + status = ctrl_inl(MMUCR); + urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; + status &= ~MMUCR_URB; + + /* + * Make sure we're not trying to wire the last TLB entry slot. + */ + BUG_ON(!--urb); + + urb = urb % MMUCR_URB_NENTRIES; + + status |= (urb << MMUCR_URB_SHIFT); + ctrl_outl(status, MMUCR); + ctrl_barrier(); + + local_irq_restore(flags); +} + +/* + * Unwire the last wired TLB entry. + * + * It should also be noted that it is not possible to wire and unwire + * TLB entries in an arbitrary order. If you wire TLB entry N, followed + * by entry N+1, you must unwire entry N+1 first, then entry N. In this + * respect, it works like a stack or LIFO queue. + */ +void tlb_unwire_entry(void) +{ + unsigned long status, flags; + int urb; + + local_irq_save(flags); + + status = ctrl_inl(MMUCR); + urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; + status &= ~MMUCR_URB; + + /* + * Make sure we're not trying to unwire a TLB entry when none + * have been wired. + */ + BUG_ON(urb++ == MMUCR_URB_NENTRIES); + + urb = urb % MMUCR_URB_NENTRIES; + + status |= (urb << MMUCR_URB_SHIFT); + ctrl_outl(status, MMUCR); + ctrl_barrier(); + + local_irq_restore(flags); +} |