summaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel/cpu
diff options
context:
space:
mode:
authorMichal Simek <monstr@monstr.eu>2009-03-27 14:25:16 +0100
committerMichal Simek <monstr@monstr.eu>2009-03-27 14:25:16 +0100
commit8beb8503bfa305cd7d9efa590517a9c01e2f97b4 (patch)
tree910b5b96454482b4ad8540c528456a0a53291c0b /arch/microblaze/kernel/cpu
parent37069abf2973f51aa12aa9dcb86c7403c9828161 (diff)
downloadblackbird-op-linux-8beb8503bfa305cd7d9efa590517a9c01e2f97b4.tar.gz
blackbird-op-linux-8beb8503bfa305cd7d9efa590517a9c01e2f97b4.zip
microblaze_v8: cache support
Reviewed-by: Ingo Molnar <mingo@elte.hu> Acked-by: Stephen Neuendorffer <stephen.neuendorffer@xilinx.com> Acked-by: John Linn <john.linn@xilinx.com> Acked-by: John Williams <john.williams@petalogix.com> Signed-off-by: Michal Simek <monstr@monstr.eu>
Diffstat (limited to 'arch/microblaze/kernel/cpu')
-rw-r--r--arch/microblaze/kernel/cpu/cache.c258
1 files changed, 258 insertions, 0 deletions
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
new file mode 100644
index 000000000000..be9fecca4f91
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -0,0 +1,258 @@
+/*
+ * Cache control for MicroBlaze cache memories
+ *
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/cache.h>
+#include <asm/cpuinfo.h>
+
+/* Exported functions */
+
+void _enable_icache(void)
+{
+ if (cpuinfo.use_icache) {
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+ __asm__ __volatile__ (" \
+ msrset r0, %0; \
+ nop; " \
+ : \
+ : "i" (MSR_ICE) \
+ : "memory");
+#else
+ __asm__ __volatile__ (" \
+ mfs r12, rmsr; \
+ nop; \
+ ori r12, r12, %0; \
+ mts rmsr, r12; \
+ nop; " \
+ : \
+ : "i" (MSR_ICE) \
+ : "memory", "r12");
+#endif
+ }
+}
+
+void _disable_icache(void)
+{
+ if (cpuinfo.use_icache) {
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+ __asm__ __volatile__ (" \
+ msrclr r0, %0; \
+ nop; " \
+ : \
+ : "i" (MSR_ICE) \
+ : "memory");
+#else
+ __asm__ __volatile__ (" \
+ mfs r12, rmsr; \
+ nop; \
+ andi r12, r12, ~%0; \
+ mts rmsr, r12; \
+ nop; " \
+ : \
+ : "i" (MSR_ICE) \
+ : "memory", "r12");
+#endif
+ }
+}
+
+void _invalidate_icache(unsigned int addr)
+{
+ if (cpuinfo.use_icache) {
+ __asm__ __volatile__ (" \
+ wic %0, r0" \
+ : \
+ : "r" (addr));
+ }
+}
+
+void _enable_dcache(void)
+{
+ if (cpuinfo.use_dcache) {
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+ __asm__ __volatile__ (" \
+ msrset r0, %0; \
+ nop; " \
+ : \
+ : "i" (MSR_DCE) \
+ : "memory");
+#else
+ __asm__ __volatile__ (" \
+ mfs r12, rmsr; \
+ nop; \
+ ori r12, r12, %0; \
+ mts rmsr, r12; \
+ nop; " \
+ : \
+ : "i" (MSR_DCE) \
+ : "memory", "r12");
+#endif
+ }
+}
+
+void _disable_dcache(void)
+{
+ if (cpuinfo.use_dcache) {
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+ __asm__ __volatile__ (" \
+ msrclr r0, %0; \
+ nop; " \
+ : \
+ : "i" (MSR_DCE) \
+ : "memory");
+#else
+ __asm__ __volatile__ (" \
+ mfs r12, rmsr; \
+ nop; \
+ andi r12, r12, ~%0; \
+ mts rmsr, r12; \
+ nop; " \
+ : \
+ : "i" (MSR_DCE) \
+ : "memory", "r12");
+#endif
+ }
+}
+
+void _invalidate_dcache(unsigned int addr)
+{
+ if (cpuinfo.use_dcache)
+ __asm__ __volatile__ (" \
+ wdc %0, r0" \
+ : \
+ : "r" (addr));
+}
+
+void __invalidate_icache_all(void)
+{
+ unsigned int i;
+ unsigned flags;
+
+ if (cpuinfo.use_icache) {
+ local_irq_save(flags);
+ __disable_icache();
+
+ /* Just loop through cache size and invalidate, no need to add
+ CACHE_BASE address */
+ for (i = 0; i < cpuinfo.icache_size;
+ i += cpuinfo.icache_line)
+ __invalidate_icache(i);
+
+ __enable_icache();
+ local_irq_restore(flags);
+ }
+}
+
+void __invalidate_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned int i;
+ unsigned flags;
+ unsigned int align;
+
+ if (cpuinfo.use_icache) {
+ /*
+ * No need to cover entire cache range,
+ * just cover cache footprint
+ */
+ end = min(start + cpuinfo.icache_size, end);
+ align = ~(cpuinfo.icache_line - 1);
+ start &= align; /* Make sure we are aligned */
+ /* Push end up to the next cache line */
+ end = ((end & align) + cpuinfo.icache_line);
+
+ local_irq_save(flags);
+ __disable_icache();
+
+ for (i = start; i < end; i += cpuinfo.icache_line)
+ __invalidate_icache(i);
+
+ __enable_icache();
+ local_irq_restore(flags);
+ }
+}
+
+void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ __invalidate_icache_all();
+}
+
+void __invalidate_icache_user_range(struct vm_area_struct *vma,
+ struct page *page, unsigned long adr,
+ int len)
+{
+ __invalidate_icache_all();
+}
+
+void __invalidate_cache_sigtramp(unsigned long addr)
+{
+ __invalidate_icache_range(addr, addr + 8);
+}
+
+void __invalidate_dcache_all(void)
+{
+ unsigned int i;
+ unsigned flags;
+
+ if (cpuinfo.use_dcache) {
+ local_irq_save(flags);
+ __disable_dcache();
+
+ /*
+ * Just loop through cache size and invalidate,
+ * no need to add CACHE_BASE address
+ */
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line)
+ __invalidate_dcache(i);
+
+ __enable_dcache();
+ local_irq_restore(flags);
+ }
+}
+
+void __invalidate_dcache_range(unsigned long start, unsigned long end)
+{
+ unsigned int i;
+ unsigned flags;
+ unsigned int align;
+
+ if (cpuinfo.use_dcache) {
+ /*
+ * No need to cover entire cache range,
+ * just cover cache footprint
+ */
+ end = min(start + cpuinfo.dcache_size, end);
+ align = ~(cpuinfo.dcache_line - 1);
+ start &= align; /* Make sure we are aligned */
+ /* Push end up to the next cache line */
+ end = ((end & align) + cpuinfo.dcache_line);
+ local_irq_save(flags);
+ __disable_dcache();
+
+ for (i = start; i < end; i += cpuinfo.dcache_line)
+ __invalidate_dcache(i);
+
+ __enable_dcache();
+ local_irq_restore(flags);
+ }
+}
+
+void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page)
+{
+ __invalidate_dcache_all();
+}
+
+void __invalidate_dcache_user_range(struct vm_area_struct *vma,
+ struct page *page, unsigned long adr,
+ int len)
+{
+ __invalidate_dcache_all();
+}
OpenPOWER on IntegriCloud