summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-01-19 15:20:35 +0900
committerPaul Mundt <lethal@linux-sh.org>2010-01-19 15:20:35 +0900
commitbb29c677b366fdf4f6522cd82228a32567aa98c7 (patch)
tree0235c7477ed635c8c21131b90094d151663ae889
parent046581f9623b53f551a93864bb74e15ad2514f0c (diff)
downloadblackbird-op-linux-bb29c677b366fdf4f6522cd82228a32567aa98c7.tar.gz
blackbird-op-linux-bb29c677b366fdf4f6522cd82228a32567aa98c7.zip
sh: Split out MMUCR.URB based entry wiring in to shared helper.
Presently this is duplicated between tlb-sh4 and tlb-pteaex. Split the helpers out in to a generic tlb-urb that can be used by any parts equipped with MMUCR.URB. At the same time, move the SH-5 code out-of-line, as we require single global state for DTLB entry wiring. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r--arch/sh/include/asm/tlb.h44
-rw-r--r--arch/sh/mm/Makefile4
-rw-r--r--arch/sh/mm/tlb-pteaex.c66
-rw-r--r--arch/sh/mm/tlb-sh4.c66
-rw-r--r--arch/sh/mm/tlb-sh5.c39
-rw-r--r--arch/sh/mm/tlb-urb.c81
6 files changed, 124 insertions, 176 deletions
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index dfc8fcd8ee50..75abb38dffd5 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -98,49 +98,9 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
#define tlb_migrate_finish(mm) do { } while (0)
-#ifdef CONFIG_CPU_SH4
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
extern void tlb_unwire_entry(void);
-#elif defined(CONFIG_SUPERH64)
-static int dtlb_entry;
-static unsigned long long dtlb_entries[64];
-
-static inline void tlb_wire_entry(struct vm_area_struct *vma,
- unsigned long addr, pte_t pte)
-{
- unsigned long long entry;
- unsigned long paddr, flags;
-
- BUG_ON(dtlb_entry == 64);
-
- local_irq_save(flags);
-
- entry = sh64_get_wired_dtlb_entry();
- dtlb_entries[dtlb_entry++] = entry;
-
- paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
- paddr &= ~PAGE_MASK;
-
- sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
-
- local_irq_restore(flags);
-}
-
-static inline void tlb_unwire_entry(void)
-{
- unsigned long long entry;
- unsigned long flags;
-
- BUG_ON(!dtlb_entry);
-
- local_irq_save(flags);
- entry = dtlb_entries[dtlb_entry--];
-
- sh64_teardown_tlb_slot(entry);
- sh64_put_wired_dtlb_entry(entry);
-
- local_irq_restore(flags);
-}
#else
static inline void tlb_wire_entry(struct vm_area_struct *vma ,
unsigned long addr, pte_t pte)
@@ -152,7 +112,7 @@ static inline void tlb_unwire_entry(void)
{
BUG();
}
-#endif /* CONFIG_CPU_SH4 */
+#endif
#else /* CONFIG_MMU */
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 0027cdea2c20..de714cbd961a 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -26,9 +26,9 @@ endif
ifdef CONFIG_MMU
tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
-tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o
+tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o
tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o
-tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o
+tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o
obj-y += $(tlb-y)
endif
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c
index 88c8bb05e16d..409b7c2b4b9d 100644
--- a/arch/sh/mm/tlb-pteaex.c
+++ b/arch/sh/mm/tlb-pteaex.c
@@ -76,69 +76,3 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
__raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
back_to_cached();
}
-
-/*
- * Load the entry for 'addr' into the TLB and wire the entry.
- */
-void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
-{
- unsigned long status, flags;
- int urb;
-
- local_irq_save(flags);
-
- /* Load the entry into the TLB */
- __update_tlb(vma, addr, pte);
-
- /* ... and wire it up. */
- status = ctrl_inl(MMUCR);
- urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
- status &= ~MMUCR_URB;
-
- /*
- * Make sure we're not trying to wire the last TLB entry slot.
- */
- BUG_ON(!--urb);
-
- urb = urb % MMUCR_URB_NENTRIES;
-
- status |= (urb << MMUCR_URB_SHIFT);
- ctrl_outl(status, MMUCR);
- ctrl_barrier();
-
- local_irq_restore(flags);
-}
-
-/*
- * Unwire the last wired TLB entry.
- *
- * It should also be noted that it is not possible to wire and unwire
- * TLB entries in an arbitrary order. If you wire TLB entry N, followed
- * by entry N+1, you must unwire entry N+1 first, then entry N. In this
- * respect, it works like a stack or LIFO queue.
- */
-void tlb_unwire_entry(void)
-{
- unsigned long status, flags;
- int urb;
-
- local_irq_save(flags);
-
- status = ctrl_inl(MMUCR);
- urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
- status &= ~MMUCR_URB;
-
- /*
- * Make sure we're not trying to unwire a TLB entry when none
- * have been wired.
- */
- BUG_ON(urb++ == MMUCR_URB_NENTRIES);
-
- urb = urb % MMUCR_URB_NENTRIES;
-
- status |= (urb << MMUCR_URB_SHIFT);
- ctrl_outl(status, MMUCR);
- ctrl_barrier();
-
- local_irq_restore(flags);
-}
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index 4c6234743318..8cf550e2570f 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -81,69 +81,3 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
ctrl_outl(data, addr);
back_to_cached();
}
-
-/*
- * Load the entry for 'addr' into the TLB and wire the entry.
- */
-void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
-{
- unsigned long status, flags;
- int urb;
-
- local_irq_save(flags);
-
- /* Load the entry into the TLB */
- __update_tlb(vma, addr, pte);
-
- /* ... and wire it up. */
- status = ctrl_inl(MMUCR);
- urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
- status &= ~MMUCR_URB;
-
- /*
- * Make sure we're not trying to wire the last TLB entry slot.
- */
- BUG_ON(!--urb);
-
- urb = urb % MMUCR_URB_NENTRIES;
-
- status |= (urb << MMUCR_URB_SHIFT);
- ctrl_outl(status, MMUCR);
- ctrl_barrier();
-
- local_irq_restore(flags);
-}
-
-/*
- * Unwire the last wired TLB entry.
- *
- * It should also be noted that it is not possible to wire and unwire
- * TLB entries in an arbitrary order. If you wire TLB entry N, followed
- * by entry N+1, you must unwire entry N+1 first, then entry N. In this
- * respect, it works like a stack or LIFO queue.
- */
-void tlb_unwire_entry(void)
-{
- unsigned long status, flags;
- int urb;
-
- local_irq_save(flags);
-
- status = ctrl_inl(MMUCR);
- urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
- status &= ~MMUCR_URB;
-
- /*
- * Make sure we're not trying to unwire a TLB entry when none
- * have been wired.
- */
- BUG_ON(urb++ == MMUCR_URB_NENTRIES);
-
- urb = urb % MMUCR_URB_NENTRIES;
-
- status |= (urb << MMUCR_URB_SHIFT);
- ctrl_outl(status, MMUCR);
- ctrl_barrier();
-
- local_irq_restore(flags);
-}
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
index fdb64e41ec50..f27dbe1c1599 100644
--- a/arch/sh/mm/tlb-sh5.c
+++ b/arch/sh/mm/tlb-sh5.c
@@ -143,3 +143,42 @@ void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
*/
void sh64_teardown_tlb_slot(unsigned long long config_addr)
__attribute__ ((alias("__flush_tlb_slot")));
+
+static int dtlb_entry;
+static unsigned long long dtlb_entries[64];
+
+void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+ unsigned long long entry;
+ unsigned long paddr, flags;
+
+ BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries));
+
+ local_irq_save(flags);
+
+ entry = sh64_get_wired_dtlb_entry();
+ dtlb_entries[dtlb_entry++] = entry;
+
+ paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
+ paddr &= ~PAGE_MASK;
+
+ sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
+
+ local_irq_restore(flags);
+}
+
+void tlb_unwire_entry(void)
+{
+ unsigned long long entry;
+ unsigned long flags;
+
+ BUG_ON(!dtlb_entry);
+
+ local_irq_save(flags);
+ entry = dtlb_entries[dtlb_entry--];
+
+ sh64_teardown_tlb_slot(entry);
+ sh64_put_wired_dtlb_entry(entry);
+
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-urb.c b/arch/sh/mm/tlb-urb.c
new file mode 100644
index 000000000000..bb5b9098956d
--- /dev/null
+++ b/arch/sh/mm/tlb-urb.c
@@ -0,0 +1,81 @@
+/*
+ * arch/sh/mm/tlb-urb.c
+ *
+ * TLB entry wiring helpers for URB-equipped parts.
+ *
+ * Copyright (C) 2010 Matt Fleming
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/mm.h>
+#include <linux/io.h>
+#include <asm/tlb.h>
+#include <asm/mmu_context.h>
+
+/*
+ * Load the entry for 'addr' into the TLB and wire the entry.
+ */
+void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+{
+ unsigned long status, flags;
+ int urb;
+
+ local_irq_save(flags);
+
+ /* Load the entry into the TLB */
+ __update_tlb(vma, addr, pte);
+
+ /* ... and wire it up. */
+ status = __raw_readl(MMUCR);
+ urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
+ status &= ~MMUCR_URB;
+
+ /*
+ * Make sure we're not trying to wire the last TLB entry slot.
+ */
+ BUG_ON(!--urb);
+
+ urb = urb % MMUCR_URB_NENTRIES;
+
+ status |= (urb << MMUCR_URB_SHIFT);
+ __raw_writel(status, MMUCR);
+ ctrl_barrier();
+
+ local_irq_restore(flags);
+}
+
+/*
+ * Unwire the last wired TLB entry.
+ *
+ * It should also be noted that it is not possible to wire and unwire
+ * TLB entries in an arbitrary order. If you wire TLB entry N, followed
+ * by entry N+1, you must unwire entry N+1 first, then entry N. In this
+ * respect, it works like a stack or LIFO queue.
+ */
+void tlb_unwire_entry(void)
+{
+ unsigned long status, flags;
+ int urb;
+
+ local_irq_save(flags);
+
+ status = __raw_readl(MMUCR);
+ urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
+ status &= ~MMUCR_URB;
+
+ /*
+ * Make sure we're not trying to unwire a TLB entry when none
+ * have been wired.
+ */
+ BUG_ON(urb++ == MMUCR_URB_NENTRIES);
+
+ urb = urb % MMUCR_URB_NENTRIES;
+
+ status |= (urb << MMUCR_URB_SHIFT);
+ __raw_writel(status, MMUCR);
+ ctrl_barrier();
+
+ local_irq_restore(flags);
+}
OpenPOWER on IntegriCloud