summaryrefslogtreecommitdiffstats
path: root/arch/mips/mm/c-r4k.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm/c-r4k.c')
-rw-r--r--arch/mips/mm/c-r4k.c61
1 files changed, 42 insertions, 19 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fa7d8d3790bf..88cfaf81c958 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -17,7 +17,7 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/bitops.h>
#include <asm/bcache.h>
@@ -722,11 +722,13 @@ struct flush_icache_range_args {
unsigned long start;
unsigned long end;
unsigned int type;
+ bool user;
};
static inline void __local_r4k_flush_icache_range(unsigned long start,
unsigned long end,
- unsigned int type)
+ unsigned int type,
+ bool user)
{
if (!cpu_has_ic_fills_f_dc) {
if (type == R4K_INDEX ||
@@ -734,7 +736,10 @@ static inline void __local_r4k_flush_icache_range(unsigned long start,
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
- protected_blast_dcache_range(start, end);
+ if (user)
+ protected_blast_dcache_range(start, end);
+ else
+ blast_dcache_range(start, end);
}
}
@@ -748,27 +753,25 @@ static inline void __local_r4k_flush_icache_range(unsigned long start,
break;
default:
- protected_blast_icache_range(start, end);
+ if (user)
+ protected_blast_icache_range(start, end);
+ else
+ blast_icache_range(start, end);
break;
}
}
-#ifdef CONFIG_EVA
- /*
- * Due to all possible segment mappings, there might cache aliases
- * caused by the bootloader being in non-EVA mode, and the CPU switching
- * to EVA during early kernel init. It's best to flush the scache
- * to avoid having secondary cores fetching stale data and lead to
- * kernel crashes.
- */
- bc_wback_inv(start, (end - start));
- __sync();
-#endif
}
static inline void local_r4k_flush_icache_range(unsigned long start,
unsigned long end)
{
- __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX);
+ __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false);
+}
+
+static inline void local_r4k_flush_icache_user_range(unsigned long start,
+ unsigned long end)
+{
+ __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true);
}
static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -777,11 +780,13 @@ static inline void local_r4k_flush_icache_range_ipi(void *args)
unsigned long start = fir_args->start;
unsigned long end = fir_args->end;
unsigned int type = fir_args->type;
+ bool user = fir_args->user;
- __local_r4k_flush_icache_range(start, end, type);
+ __local_r4k_flush_icache_range(start, end, type, user);
}
-static void r4k_flush_icache_range(unsigned long start, unsigned long end)
+static void __r4k_flush_icache_range(unsigned long start, unsigned long end,
+ bool user)
{
struct flush_icache_range_args args;
unsigned long size, cache_size;
@@ -789,6 +794,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
args.start = start;
args.end = end;
args.type = R4K_HIT | R4K_INDEX;
+ args.user = user;
/*
* Indexed cache ops require an SMP call.
@@ -814,6 +820,16 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
instruction_hazard();
}
+static void r4k_flush_icache_range(unsigned long start, unsigned long end)
+{
+ return __r4k_flush_icache_range(start, end, false);
+}
+
+static void r4k_flush_icache_user_range(unsigned long start, unsigned long end)
+{
+ return __r4k_flush_icache_range(start, end, true);
+}
+
#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
@@ -1915,9 +1931,16 @@ void r4k_cache_init(void)
flush_data_cache_page = r4k_flush_data_cache_page;
flush_icache_range = r4k_flush_icache_range;
local_flush_icache_range = local_r4k_flush_icache_range;
+ __flush_icache_user_range = r4k_flush_icache_user_range;
+ __local_flush_icache_user_range = local_r4k_flush_icache_user_range;
#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT)
- if (coherentio) {
+# if defined(CONFIG_DMA_PERDEV_COHERENT)
+ if (0) {
+# else
+ if ((coherentio == IO_COHERENCE_ENABLED) ||
+ ((coherentio == IO_COHERENCE_DEFAULT) && hw_coherentio)) {
+# endif
_dma_cache_wback_inv = (void *)cache_noop;
_dma_cache_wback = (void *)cache_noop;
_dma_cache_inv = (void *)cache_noop;
OpenPOWER on IntegriCloud