summaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/cache-sh4.c87
-rw-r--r--arch/sh/mm/cache-sh5.c4
-rw-r--r--arch/sh/mm/cache.c70
-rw-r--r--arch/sh/mm/flush-sh4.c13
-rw-r--r--arch/sh/mm/init.c5
5 files changed, 133 insertions, 46 deletions
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index b5860535e61f..05cb04bc3940 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -26,13 +26,6 @@
#define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
#define MAX_ICACHE_PAGES 32
-static void __flush_dcache_segment_1way(unsigned long start,
- unsigned long extent);
-static void __flush_dcache_segment_2way(unsigned long start,
- unsigned long extent);
-static void __flush_dcache_segment_4way(unsigned long start,
- unsigned long extent);
-
static void __flush_cache_4096(unsigned long addr, unsigned long phys,
unsigned long exec_offset);
@@ -45,38 +38,12 @@ static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
(void (*)(unsigned long, unsigned long))0xdeadbeef;
/*
- * SH-4 has virtually indexed and physically tagged cache.
- */
-void __init sh4_cache_init(void)
-{
- printk("PVR=%08x CVR=%08x PRR=%08x\n",
- ctrl_inl(CCN_PVR),
- ctrl_inl(CCN_CVR),
- ctrl_inl(CCN_PRR));
-
- switch (boot_cpu_data.dcache.ways) {
- case 1:
- __flush_dcache_segment_fn = __flush_dcache_segment_1way;
- break;
- case 2:
- __flush_dcache_segment_fn = __flush_dcache_segment_2way;
- break;
- case 4:
- __flush_dcache_segment_fn = __flush_dcache_segment_4way;
- break;
- default:
- panic("unknown number of cache ways\n");
- break;
- }
-}
-
-/*
* Write back the range of D-cache, and purge the I-cache.
*
* Called from kernel/module.c:sys_init_module and routine for a.out format,
* signal handler code and kprobes code
*/
-void flush_icache_range(unsigned long start, unsigned long end)
+static void sh4_flush_icache_range(unsigned long start, unsigned long end)
{
int icacheaddr;
unsigned long flags, v;
@@ -137,7 +104,7 @@ static inline void flush_cache_4096(unsigned long start,
* Write back & invalidate the D-cache of the page.
* (To avoid "alias" issues)
*/
-void flush_dcache_page(struct page *page)
+static void sh4_flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
@@ -188,7 +155,7 @@ static inline void flush_dcache_all(void)
wmb();
}
-void flush_cache_all(void)
+static void sh4_flush_cache_all(void)
{
flush_dcache_all();
flush_icache_all();
@@ -280,7 +247,7 @@ loop_exit:
*
* Caller takes mm->mmap_sem.
*/
-void flush_cache_mm(struct mm_struct *mm)
+static void sh4_flush_cache_mm(struct mm_struct *mm)
{
if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
return;
@@ -320,8 +287,8 @@ void flush_cache_mm(struct mm_struct *mm)
* ADDR: Virtual Address (U0 address)
* PFN: Physical page number
*/
-void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
- unsigned long pfn)
+static void sh4_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long address, unsigned long pfn)
{
unsigned long phys = pfn << PAGE_SHIFT;
unsigned int alias_mask;
@@ -368,8 +335,8 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
* Flushing the cache lines for U0 only isn't enough.
* We need to flush for P1 too, which may contain aliases.
*/
-void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
+static void sh4_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
{
if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
return;
@@ -668,3 +635,41 @@ static void __flush_dcache_segment_4way(unsigned long start,
a3 += linesz;
} while (a0 < a0e);
}
+
+extern void __weak sh4__flush_region_init(void);
+
+/*
+ * SH-4 has virtually indexed and physically tagged cache.
+ */
+void __init sh4_cache_init(void)
+{
+ printk("PVR=%08x CVR=%08x PRR=%08x\n",
+ ctrl_inl(CCN_PVR),
+ ctrl_inl(CCN_CVR),
+ ctrl_inl(CCN_PRR));
+
+ switch (boot_cpu_data.dcache.ways) {
+ case 1:
+ __flush_dcache_segment_fn = __flush_dcache_segment_1way;
+ break;
+ case 2:
+ __flush_dcache_segment_fn = __flush_dcache_segment_2way;
+ break;
+ case 4:
+ __flush_dcache_segment_fn = __flush_dcache_segment_4way;
+ break;
+ default:
+ panic("unknown number of cache ways\n");
+ break;
+ }
+
+ flush_icache_range = sh4_flush_icache_range;
+ flush_dcache_page = sh4_flush_dcache_page;
+ flush_cache_all = sh4_flush_cache_all;
+ flush_cache_mm = sh4_flush_cache_mm;
+ flush_cache_dup_mm = sh4_flush_cache_mm;
+ flush_cache_page = sh4_flush_cache_page;
+ flush_cache_range = sh4_flush_cache_range;
+
+ sh4__flush_region_init();
+}
diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c
index a50d23caf015..a8f5142dc2cf 100644
--- a/arch/sh/mm/cache-sh5.c
+++ b/arch/sh/mm/cache-sh5.c
@@ -20,6 +20,8 @@
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
+extern void __weak sh4__flush_region_init(void);
+
/* Wired TLB entry for the D-cache */
static unsigned long long dtlb_cache_slot;
@@ -27,6 +29,8 @@ void __init cpu_cache_init(void)
{
/* Reserve a slot for dcache colouring in the DTLB */
dtlb_cache_slot = sh64_get_wired_dtlb_entry();
+
+ sh4__flush_region_init();
}
void __init kmap_coherent_init(void)
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index a31e5c46e7a6..da5bc6ac1b28 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -15,6 +15,62 @@
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
+void (*flush_cache_all)(void);
+void (*flush_cache_mm)(struct mm_struct *mm);
+void (*flush_cache_dup_mm)(struct mm_struct *mm);
+void (*flush_cache_page)(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn);
+void (*flush_cache_range)(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+void (*flush_dcache_page)(struct page *page);
+void (*flush_icache_range)(unsigned long start, unsigned long end);
+void (*flush_icache_page)(struct vm_area_struct *vma,
+ struct page *page);
+void (*flush_cache_sigtramp)(unsigned long address);
+void (*__flush_wback_region)(void *start, int size);
+void (*__flush_purge_region)(void *start, int size);
+void (*__flush_invalidate_region)(void *start, int size);
+
+static inline void noop_flush_cache_all(void)
+{
+}
+
+static inline void noop_flush_cache_mm(struct mm_struct *mm)
+{
+}
+
+static inline void noop_flush_cache_page(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+}
+
+static inline void noop_flush_cache_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+}
+
+static inline void noop_flush_dcache_page(struct page *page)
+{
+}
+
+static inline void noop_flush_icache_range(unsigned long start,
+ unsigned long end)
+{
+}
+
+static inline void noop_flush_icache_page(struct vm_area_struct *vma,
+ struct page *page)
+{
+}
+
+static inline void noop_flush_cache_sigtramp(unsigned long address)
+{
+}
+
+static inline void noop__flush_region(void *start, int size)
+{
+}
+
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
@@ -174,6 +230,20 @@ void __init cpu_cache_init(void)
compute_alias(&boot_cpu_data.dcache);
compute_alias(&boot_cpu_data.scache);
+ flush_cache_all = noop_flush_cache_all;
+ flush_cache_mm = noop_flush_cache_mm;
+ flush_cache_dup_mm = noop_flush_cache_mm;
+ flush_cache_page = noop_flush_cache_page;
+ flush_cache_range = noop_flush_cache_range;
+ flush_dcache_page = noop_flush_dcache_page;
+ flush_icache_range = noop_flush_icache_range;
+ flush_icache_page = noop_flush_icache_page;
+ flush_cache_sigtramp = noop_flush_cache_sigtramp;
+
+ __flush_wback_region = noop__flush_region;
+ __flush_purge_region = noop__flush_region;
+ __flush_invalidate_region = noop__flush_region;
+
if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
(boot_cpu_data.family == CPU_FAMILY_SH4A) ||
(boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
diff --git a/arch/sh/mm/flush-sh4.c b/arch/sh/mm/flush-sh4.c
index 1b6b6a12a99b..99c50dc7551e 100644
--- a/arch/sh/mm/flush-sh4.c
+++ b/arch/sh/mm/flush-sh4.c
@@ -8,7 +8,7 @@
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
-void __weak __flush_wback_region(void *start, int size)
+static void sh4__flush_wback_region(void *start, int size)
{
reg_size_t aligned_start, v, cnt, end;
@@ -51,7 +51,7 @@ void __weak __flush_wback_region(void *start, int size)
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
-void __weak __flush_purge_region(void *start, int size)
+static void sh4__flush_purge_region(void *start, int size)
{
reg_size_t aligned_start, v, cnt, end;
@@ -90,7 +90,7 @@ void __weak __flush_purge_region(void *start, int size)
/*
* No write back please
*/
-void __weak __flush_invalidate_region(void *start, int size)
+static void sh4__flush_invalidate_region(void *start, int size)
{
reg_size_t aligned_start, v, cnt, end;
@@ -126,3 +126,10 @@ void __weak __flush_invalidate_region(void *start, int size)
cnt--;
}
}
+
+void __init sh4__flush_region_init(void)
+{
+ __flush_wback_region = sh4__flush_wback_region;
+ __flush_invalidate_region = sh4__flush_invalidate_region;
+ __flush_purge_region = sh4__flush_purge_region;
+}
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index cf0e9c5146b1..0a9b4d855bc9 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -210,6 +210,9 @@ void __init mem_init(void)
high_memory = node_high_memory;
}
+ /* Set this up early, so we can take care of the zero page */
+ cpu_cache_init();
+
/* clear the zero-page */
memset(empty_zero_page, 0, PAGE_SIZE);
__flush_wback_region(empty_zero_page, PAGE_SIZE);
@@ -230,8 +233,6 @@ void __init mem_init(void)
datasize >> 10,
initsize >> 10);
- cpu_cache_init();
-
/* Initialize the vDSO */
vsyscall_init();
}
OpenPOWER on IntegriCloud