summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/kernel/cpu/sh4/probe.c4
-rw-r--r--arch/sh/mm/Kconfig2
-rw-r--r--arch/sh/mm/cache-sh4.c12
-rw-r--r--arch/sh/mm/cache-sh7705.c9
-rw-r--r--arch/sh/mm/pg-sh4.c22
-rw-r--r--arch/sh/mm/pg-sh7705.c31
-rw-r--r--arch/sh/mm/tlb-flush.c55
-rw-r--r--arch/sh/mm/tlb-sh3.c63
-rw-r--r--arch/sh/mm/tlb-sh4.c68
9 files changed, 76 insertions, 190 deletions
diff --git a/arch/sh/kernel/cpu/sh4/probe.c b/arch/sh/kernel/cpu/sh4/probe.c
index 9031a22a2ce7..b26e2bc5894d 100644
--- a/arch/sh/kernel/cpu/sh4/probe.c
+++ b/arch/sh/kernel/cpu/sh4/probe.c
@@ -181,10 +181,6 @@ int __init detect_cpu_and_cache_system(void)
cpu_data->dcache.ways = 1;
#endif
-#ifdef CONFIG_CPU_HAS_PTEA
- cpu_data->flags |= CPU_HAS_PTEA;
-#endif
-
/*
* On anything that's not a direct-mapped cache, look to the CVR
* for I/D-cache specifics.
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index fddf6680ec4f..28b5102e1cdc 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -20,7 +20,7 @@ config CPU_SH4
bool
select CPU_HAS_INTEVT
select CPU_HAS_SR_RB
- select CPU_HAS_PTEA if !CPU_SUBTYPE_ST40
+ select CPU_HAS_PTEA if (!CPU_SUBTYPE_ST40 && !CPU_SH4A) || CPU_SHX2
config CPU_SH4A
bool
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index c6955157c989..72bb48773337 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -236,10 +236,20 @@ static inline void flush_cache_4096(unsigned long start,
/*
* Write back & invalidate the D-cache of the page.
* (To avoid "alias" issues)
+ *
+ * This uses a lazy write-back on UP, which is explicitly
+ * disabled on SMP.
*/
void flush_dcache_page(struct page *page)
{
- if (test_bit(PG_mapped, &page->flags)) {
+#ifndef CONFIG_SMP
+ struct address_space *mapping = page_mapping(page);
+
+ if (mapping && !mapping_mapped(mapping))
+ set_bit(PG_dcache_dirty, &page->flags);
+ else
+#endif
+ {
unsigned long phys = PHYSADDR(page_address(page));
unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
int i, n;
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index 045abdf078f5..2808b580d984 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -3,11 +3,11 @@
*
* Copyright (C) 1999, 2000 Niibe Yutaka
* Copyright (C) 2004 Alex Song
+ * Copyright (C) 2006 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
- *
*/
#include <linux/init.h>
#include <linux/mman.h>
@@ -51,7 +51,6 @@ static inline void cache_wback_all(void)
if ((data & v) == v)
ctrl_outl(data & ~v, addr);
-
}
addrstart += cpu_data->dcache.way_incr;
@@ -128,7 +127,11 @@ static void __flush_dcache_page(unsigned long phys)
*/
void flush_dcache_page(struct page *page)
{
- if (test_bit(PG_mapped, &page->flags))
+ struct address_space *mapping = page_mapping(page);
+
+ if (mapping && !mapping_mapped(mapping))
+ set_bit(PG_dcache_dirty, &page->flags);
+ else
__flush_dcache_page(PHYSADDR(page_address(page)));
}
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index 3f98d2a4f936..cfc323551741 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -23,7 +23,6 @@ extern struct mutex p3map_mutex[];
*/
void clear_user_page(void *to, unsigned long address, struct page *page)
{
- __set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
clear_page(to);
else {
@@ -59,7 +58,6 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
void copy_user_page(void *to, void *from, unsigned long address,
struct page *page)
{
- __set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0)
copy_page(to, from);
else {
@@ -84,23 +82,3 @@ void copy_user_page(void *to, void *from, unsigned long address,
mutex_unlock(&p3map_mutex[(address & CACHE_ALIAS)>>12]);
}
}
-
-/*
- * For SH-4, we have our own implementation for ptep_get_and_clear
- */
-inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
- pte_t pte = *ptep;
-
- pte_clear(mm, addr, ptep);
- if (!pte_not_present(pte)) {
- unsigned long pfn = pte_pfn(pte);
- if (pfn_valid(pfn)) {
- struct page *page = pfn_to_page(pfn);
- struct address_space *mapping = page_mapping(page);
- if (!mapping || !mapping_writably_mapped(mapping))
- __clear_bit(PG_mapped, &page->flags);
- }
- }
- return pte;
-}
diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c
index ff9ece986cbc..b052d0fee827 100644
--- a/arch/sh/mm/pg-sh7705.c
+++ b/arch/sh/mm/pg-sh7705.c
@@ -7,9 +7,7 @@
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
- *
*/
-
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/mm.h>
@@ -76,7 +74,6 @@ void clear_user_page(void *to, unsigned long address, struct page *pg)
{
struct page *page = virt_to_page(to);
- __set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
clear_page(to);
__flush_wback_region(to, PAGE_SIZE);
@@ -95,12 +92,11 @@ void clear_user_page(void *to, unsigned long address, struct page *pg)
* @from: P1 address
* @address: U0 address to be mapped
*/
-void copy_user_page(void *to, void *from, unsigned long address, struct page *pg)
+void copy_user_page(void *to, void *from, unsigned long address,
+ struct page *pg)
{
struct page *page = virt_to_page(to);
-
- __set_bit(PG_mapped, &page->flags);
if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
copy_page(to, from);
__flush_wback_region(to, PAGE_SIZE);
@@ -112,26 +108,3 @@ void copy_user_page(void *to, void *from, unsigned long address, struct page *pg
__flush_wback_region(to, PAGE_SIZE);
}
}
-
-/*
- * For SH7705, we have our own implementation for ptep_get_and_clear
- * Copied from pg-sh4.c
- */
-inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
-{
- pte_t pte = *ptep;
-
- pte_clear(mm, addr, ptep);
- if (!pte_not_present(pte)) {
- unsigned long pfn = pte_pfn(pte);
- if (pfn_valid(pfn)) {
- struct page *page = pfn_to_page(pfn);
- struct address_space *mapping = page_mapping(page);
- if (!mapping || !mapping_writably_mapped(mapping))
- __clear_bit(PG_mapped, &page->flags);
- }
- }
-
- return pte;
-}
-
diff --git a/arch/sh/mm/tlb-flush.c b/arch/sh/mm/tlb-flush.c
index 73ec7f6084fa..9347534aa894 100644
--- a/arch/sh/mm/tlb-flush.c
+++ b/arch/sh/mm/tlb-flush.c
@@ -2,15 +2,17 @@
* TLB flushing operations for SH with an MMU.
*
* Copyright (C) 1999 Niibe Yutaka
- * Copyright (C) 2003 Paul Mundt
+ * Copyright (C) 2003 - 2006 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/mm.h>
+#include <linux/io.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
@@ -132,3 +134,54 @@ void flush_tlb_all(void)
ctrl_barrier();
local_irq_restore(flags);
}
+
+void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t pte)
+{
+ unsigned long flags;
+ unsigned long pteval;
+ unsigned long vpn;
+ struct page *page;
+ unsigned long pfn = pte_pfn(pte);
+ struct address_space *mapping;
+
+ if (!pfn_valid(pfn))
+ return;
+
+ page = pfn_to_page(pfn);
+ mapping = page_mapping(page);
+ if (mapping) {
+ unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
+ int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
+
+ if (dirty)
+ __flush_wback_region((void *)P1SEGADDR(phys),
+ PAGE_SIZE);
+ }
+
+ local_irq_save(flags);
+
+ /* Set PTEH register */
+ vpn = (address & MMU_VPN_MASK) | get_asid();
+ ctrl_outl(vpn, MMU_PTEH);
+
+ pteval = pte_val(pte);
+
+#ifdef CONFIG_CPU_HAS_PTEA
+ /* Set PTEA register */
+ /* TODO: make this look less hacky */
+ ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
+#endif
+
+ /* Set PTEL register */
+ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
+#ifdef CONFIG_SH_WRITETHROUGH
+ pteval |= _PAGE_WT;
+#endif
+ /* conveniently, we want all the software flags to be 0 anyway */
+ ctrl_outl(pteval, MMU_PTEL);
+
+ /* Load the TLB */
+ asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 46b09e26e082..16627069c536 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -8,69 +8,9 @@
*
* Released under the terms of the GNU GPL v2.0.
*/
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
-
+#include <linux/io.h>
#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
-#include <asm/cacheflush.h>
-
-void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte)
-{
- unsigned long flags;
- unsigned long pteval;
- unsigned long vpn;
-
- /* Ptrace may call this routine. */
- if (vma && current->active_mm != vma->vm_mm)
- return;
-
-#if defined(CONFIG_SH7705_CACHE_32KB)
- {
- struct page *page = pte_page(pte);
- unsigned long pfn = pte_pfn(pte);
-
- if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) {
- unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
-
- __flush_wback_region((void *)P1SEGADDR(phys),
- PAGE_SIZE);
- __set_bit(PG_mapped, &page->flags);
- }
- }
-#endif
-
- local_irq_save(flags);
-
- /* Set PTEH register */
- vpn = (address & MMU_VPN_MASK) | get_asid();
- ctrl_outl(vpn, MMU_PTEH);
-
- pteval = pte_val(pte);
-
- /* Set PTEL register */
- pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
- /* conveniently, we want all the software flags to be 0 anyway */
- ctrl_outl(pteval, MMU_PTEL);
-
- /* Load the TLB */
- asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
- local_irq_restore(flags);
-}
void __flush_tlb_page(unsigned long asid, unsigned long page)
{
@@ -94,4 +34,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page)
for (i = 0; i < ways; i++)
ctrl_outl(data, addr + (i << 8));
}
-
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index 812b2d567de2..758d8dec622b 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -8,74 +8,9 @@
*
* Released under the terms of the GNU GPL v2.0.
*/
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/types.h>
-#include <linux/ptrace.h>
-#include <linux/mman.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/interrupt.h>
-
+#include <linux/io.h>
#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
-#include <asm/cacheflush.h>
-
-void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte)
-{
- unsigned long flags;
- unsigned long pteval;
- unsigned long vpn;
- struct page *page;
- unsigned long pfn;
-
- /* Ptrace may call this routine. */
- if (vma && current->active_mm != vma->vm_mm)
- return;
-
- pfn = pte_pfn(pte);
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
- if (!test_bit(PG_mapped, &page->flags)) {
- unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
- __flush_wback_region((void *)P1SEGADDR(phys), PAGE_SIZE);
- __set_bit(PG_mapped, &page->flags);
- }
- }
-
- local_irq_save(flags);
-
- /* Set PTEH register */
- vpn = (address & MMU_VPN_MASK) | get_asid();
- ctrl_outl(vpn, MMU_PTEH);
-
- pteval = pte_val(pte);
-
- /* Set PTEA register */
- if (cpu_data->flags & CPU_HAS_PTEA)
- /* TODO: make this look less hacky */
- ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
-
- /* Set PTEL register */
- pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
-#ifdef CONFIG_SH_WRITETHROUGH
- pteval |= _PAGE_WT;
-#endif
- /* conveniently, we want all the software flags to be 0 anyway */
- ctrl_outl(pteval, MMU_PTEL);
-
- /* Load the TLB */
- asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
- local_irq_restore(flags);
-}
void __flush_tlb_page(unsigned long asid, unsigned long page)
{
@@ -93,4 +28,3 @@ void __flush_tlb_page(unsigned long asid, unsigned long page)
ctrl_outl(data, addr);
back_to_P1();
}
-
OpenPOWER on IntegriCloud