summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/copypage-feroceon.c23
-rw-r--r--arch/arm/mm/copypage-v3.c23
-rw-r--r--arch/arm/mm/copypage-v4mc.c21
-rw-r--r--arch/arm/mm/copypage-v4wb.c25
-rw-r--r--arch/arm/mm/copypage-v4wt.c23
-rw-r--r--arch/arm/mm/copypage-v6.c61
-rw-r--r--arch/arm/mm/copypage-xsc3.c24
-rw-r--r--arch/arm/mm/copypage-xscale.c19
-rw-r--r--arch/arm/mm/proc-syms.c2
9 files changed, 147 insertions, 74 deletions
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index c8347670ab00..edd71686b8df 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -7,15 +7,14 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * This handles copy_user_page and clear_user_page on Feroceon
+ * This handles copy_user_highpage and clear_user_page on Feroceon
* more optimally than the generic implementations.
*/
#include <linux/init.h>
+#include <linux/highmem.h>
-#include <asm/page.h>
-
-void __attribute__((naked))
-feroceon_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+static void __attribute__((naked))
+feroceon_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4-r9, lr} \n\
@@ -68,6 +67,18 @@ feroceon_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
: "I" (PAGE_SIZE));
}
+void feroceon_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr)
+{
+ void *kto, *kfrom;
+
+ kto = kmap_atomic(to, KM_USER0);
+ kfrom = kmap_atomic(from, KM_USER1);
+ feroceon_copy_user_page(kto, kfrom);
+ kunmap_atomic(kfrom, KM_USER1);
+ kunmap_atomic(kto, KM_USER0);
+}
+
void __attribute__((naked))
feroceon_clear_user_page(void *kaddr, unsigned long vaddr)
{
@@ -95,6 +106,6 @@ feroceon_clear_user_page(void *kaddr, unsigned long vaddr)
struct cpu_user_fns feroceon_user_fns __initdata = {
.cpu_clear_user_page = feroceon_clear_user_page,
- .cpu_copy_user_page = feroceon_copy_user_page,
+ .cpu_copy_user_highpage = feroceon_copy_user_highpage,
};
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
index 184911089e6c..52df8f04d3f7 100644
--- a/arch/arm/mm/copypage-v3.c
+++ b/arch/arm/mm/copypage-v3.c
@@ -8,16 +8,15 @@
* published by the Free Software Foundation.
*/
#include <linux/init.h>
-
-#include <asm/page.h>
+#include <linux/highmem.h>
/*
- * ARMv3 optimised copy_user_page
+ * ARMv3 optimised copy_user_highpage
*
* FIXME: do we need to handle cache stuff...
*/
-void __attribute__((naked))
-v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+static void __attribute__((naked))
+v3_copy_user_page(void *kto, const void *kfrom)
{
asm("\n\
stmfd sp!, {r4, lr} @ 2\n\
@@ -38,6 +37,18 @@ v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
: "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64));
}
+void v3_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr)
+{
+ void *kto, *kfrom;
+
+ kto = kmap_atomic(to, KM_USER0);
+ kfrom = kmap_atomic(from, KM_USER1);
+ v3_copy_user_page(kto, kfrom);
+ kunmap_atomic(kfrom, KM_USER1);
+ kunmap_atomic(kto, KM_USER0);
+}
+
/*
* ARMv3 optimised clear_user_page
*
@@ -65,5 +76,5 @@ void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr)
struct cpu_user_fns v3_user_fns __initdata = {
.cpu_clear_user_page = v3_clear_user_page,
- .cpu_copy_user_page = v3_copy_user_page,
+ .cpu_copy_user_highpage = v3_copy_user_highpage,
};
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 8d33e2549344..a7dc838fee76 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -15,8 +15,8 @@
*/
#include <linux/init.h>
#include <linux/mm.h>
+#include <linux/highmem.h>
-#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -33,7 +33,7 @@
static DEFINE_SPINLOCK(minicache_lock);
/*
- * ARMv4 mini-dcache optimised copy_user_page
+ * ARMv4 mini-dcache optimised copy_user_highpage
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock);
*
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
* instruction. If your processor does not supply this, you have to write your
- * own copy_user_page that does the right thing.
+ * own copy_user_highpage that does the right thing.
*/
static void __attribute__((naked))
mc_copy_user_page(void *from, void *to)
@@ -68,21 +68,24 @@ mc_copy_user_page(void *from, void *to)
: "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
}
-void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+void v4_mc_copy_user_highpage(struct page *from, struct page *to,
+ unsigned long vaddr)
{
- struct page *page = virt_to_page(kfrom);
+ void *kto = kmap_atomic(to, KM_USER1);
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
- __flush_dcache_page(page_mapping(page), page);
+ if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
+ __flush_dcache_page(page_mapping(from), from);
spin_lock(&minicache_lock);
- set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
+ set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
flush_tlb_kernel_page(0xffff8000);
mc_copy_user_page((void *)0xffff8000, kto);
spin_unlock(&minicache_lock);
+
+ kunmap_atomic(kto, KM_USER1);
}
/*
@@ -113,5 +116,5 @@ v4_mc_clear_user_page(void *kaddr, unsigned long vaddr)
struct cpu_user_fns v4_mc_user_fns __initdata = {
.cpu_clear_user_page = v4_mc_clear_user_page,
- .cpu_copy_user_page = v4_mc_copy_user_page,
+ .cpu_copy_user_highpage = v4_mc_copy_user_highpage,
};
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index 230210822961..186a68a794a9 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -8,11 +8,10 @@
* published by the Free Software Foundation.
*/
#include <linux/init.h>
-
-#include <asm/page.h>
+#include <linux/highmem.h>
/*
- * ARMv4 optimised copy_user_page
+ * ARMv4 optimised copy_user_highpage
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
@@ -21,10 +20,10 @@
*
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
* instruction. If your processor does not supply this, you have to write your
- * own copy_user_page that does the right thing.
+ * own copy_user_highpage that does the right thing.
*/
-void __attribute__((naked))
-v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+static void __attribute__((naked))
+v4wb_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4, lr} @ 2\n\
@@ -48,6 +47,18 @@ v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
: "I" (PAGE_SIZE / 64));
}
+void v4wb_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr)
+{
+ void *kto, *kfrom;
+
+ kto = kmap_atomic(to, KM_USER0);
+ kfrom = kmap_atomic(from, KM_USER1);
+ v4wb_copy_user_page(kto, kfrom);
+ kunmap_atomic(kfrom, KM_USER1);
+ kunmap_atomic(kto, KM_USER0);
+}
+
/*
* ARMv4 optimised clear_user_page
*
@@ -79,5 +90,5 @@ v4wb_clear_user_page(void *kaddr, unsigned long vaddr)
struct cpu_user_fns v4wb_user_fns __initdata = {
.cpu_clear_user_page = v4wb_clear_user_page,
- .cpu_copy_user_page = v4wb_copy_user_page,
+ .cpu_copy_user_highpage = v4wb_copy_user_highpage,
};
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index d8ef39503ff0..86c2cfdbde03 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -11,18 +11,17 @@
* the only supported cache operation.
*/
#include <linux/init.h>
-
-#include <asm/page.h>
+#include <linux/highmem.h>
/*
- * ARMv4 optimised copy_user_page
+ * ARMv4 optimised copy_user_highpage
*
* Since we have writethrough caches, we don't have to worry about
* dirty data in the cache. However, we do have to ensure that
* subsequent reads are up to date.
*/
-void __attribute__((naked))
-v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+static void __attribute__((naked))
+v4wt_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4, lr} @ 2\n\
@@ -44,6 +43,18 @@ v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
: "I" (PAGE_SIZE / 64));
}
+void v4wt_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr)
+{
+ void *kto, *kfrom;
+
+ kto = kmap_atomic(to, KM_USER0);
+ kfrom = kmap_atomic(from, KM_USER1);
+ v4wt_copy_user_page(kto, kfrom);
+ kunmap_atomic(kfrom, KM_USER1);
+ kunmap_atomic(kto, KM_USER0);
+}
+
/*
* ARMv4 optimised clear_user_page
*
@@ -73,5 +84,5 @@ v4wt_clear_user_page(void *kaddr, unsigned long vaddr)
struct cpu_user_fns v4wt_user_fns __initdata = {
.cpu_clear_user_page = v4wt_clear_user_page,
- .cpu_copy_user_page = v4wt_copy_user_page,
+ .cpu_copy_user_highpage = v4wt_copy_user_highpage,
};
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 0e21c0767580..2ea75d0f5048 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -10,8 +10,8 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mm.h>
+#include <linux/highmem.h>
-#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/shmparam.h>
#include <asm/tlbflush.h>
@@ -33,9 +33,16 @@ static DEFINE_SPINLOCK(v6_lock);
* Copy the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of these pages.
*/
-static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
+static void v6_copy_user_highpage_nonaliasing(struct page *to,
+ struct page *from, unsigned long vaddr)
{
+ void *kto, *kfrom;
+
+ kfrom = kmap_atomic(from, KM_USER0);
+ kto = kmap_atomic(to, KM_USER1);
copy_page(kto, kfrom);
+ kunmap_atomic(kto, KM_USER1);
+ kunmap_atomic(kfrom, KM_USER0);
}
/*
@@ -48,26 +55,32 @@ static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
}
/*
- * Copy the page, taking account of the cache colour.
+ * Discard data in the kernel mapping for the new page.
+ * FIXME: needs this MCRR to be supported.
*/
-static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
+static void discard_old_kernel_data(void *kto)
{
- unsigned int offset = CACHE_COLOUR(vaddr);
- unsigned long from, to;
- struct page *page = virt_to_page(kfrom);
-
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
- __flush_dcache_page(page_mapping(page), page);
-
- /*
- * Discard data in the kernel mapping for the new page.
- * FIXME: needs this MCRR to be supported.
- */
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
:
: "r" (kto),
"r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
: "cc");
+}
+
+/*
+ * Copy the page, taking account of the cache colour.
+ */
+static void v6_copy_user_highpage_aliasing(struct page *to,
+ struct page *from, unsigned long vaddr)
+{
+ unsigned int offset = CACHE_COLOUR(vaddr);
+ unsigned long kfrom, kto;
+
+ if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
+ __flush_dcache_page(page_mapping(from), from);
+
+ /* FIXME: not highmem safe */
+ discard_old_kernel_data(page_address(to));
/*
* Now copy the page using the same cache colour as the
@@ -75,16 +88,16 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo
*/
spin_lock(&v6_lock);
- set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0);
- set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0);
+ set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
+ set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
- from = from_address + (offset << PAGE_SHIFT);
- to = to_address + (offset << PAGE_SHIFT);
+ kfrom = from_address + (offset << PAGE_SHIFT);
+ kto = to_address + (offset << PAGE_SHIFT);
- flush_tlb_kernel_page(from);
- flush_tlb_kernel_page(to);
+ flush_tlb_kernel_page(kfrom);
+ flush_tlb_kernel_page(kto);
- copy_page((void *)to, (void *)from);
+ copy_page((void *)kto, (void *)kfrom);
spin_unlock(&v6_lock);
}
@@ -124,14 +137,14 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
struct cpu_user_fns v6_user_fns __initdata = {
.cpu_clear_user_page = v6_clear_user_page_nonaliasing,
- .cpu_copy_user_page = v6_copy_user_page_nonaliasing,
+ .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
};
static int __init v6_userpage_init(void)
{
if (cache_is_vipt_aliasing()) {
cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing;
- cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing;
+ cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
}
return 0;
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index 51ed502e5777..caa697ccd8db 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -11,8 +11,7 @@
* Author: Matt Gilbert (matthew.m.gilbert@intel.com)
*/
#include <linux/init.h>
-
-#include <asm/page.h>
+#include <linux/highmem.h>
/*
* General note:
@@ -21,18 +20,17 @@
*/
/*
- * XSC3 optimised copy_user_page
+ * XSC3 optimised copy_user_highpage
* r0 = destination
* r1 = source
- * r2 = virtual user address of ultimate destination page
*
* The source page may have some clean entries in the cache already, but we
* can safely ignore them - break_cow() will flush them out of the cache
* if we eventually end up using our copied page.
*
*/
-void __attribute__((naked))
-xsc3_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+static void __attribute__((naked))
+xsc3_mc_copy_user_page(void *kto, const void *kfrom)
{
asm("\
stmfd sp!, {r4, r5, lr} \n\
@@ -72,6 +70,18 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
: "I" (PAGE_SIZE / 64 - 1));
}
+void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr)
+{
+ void *kto, *kfrom;
+
+ kto = kmap_atomic(to, KM_USER0);
+ kfrom = kmap_atomic(from, KM_USER1);
+ xsc3_mc_copy_user_page(kto, kfrom);
+ kunmap_atomic(kfrom, KM_USER1);
+ kunmap_atomic(kto, KM_USER0);
+}
+
/*
* XScale optimised clear_user_page
* r0 = destination
@@ -98,5 +108,5 @@ xsc3_mc_clear_user_page(void *kaddr, unsigned long vaddr)
struct cpu_user_fns xsc3_mc_user_fns __initdata = {
.cpu_clear_user_page = xsc3_mc_clear_user_page,
- .cpu_copy_user_page = xsc3_mc_copy_user_page,
+ .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
};
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index bad49331bbf9..01bafafce181 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -15,8 +15,8 @@
*/
#include <linux/init.h>
#include <linux/mm.h>
+#include <linux/highmem.h>
-#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/cacheflush.h>
@@ -35,7 +35,7 @@
static DEFINE_SPINLOCK(minicache_lock);
/*
- * XScale mini-dcache optimised copy_user_page
+ * XScale mini-dcache optimised copy_user_highpage
*
* We flush the destination cache lines just before we write the data into the
* corresponding address. Since the Dcache is read-allocate, this removes the
@@ -90,21 +90,24 @@ mc_copy_user_page(void *from, void *to)
: "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
}
-void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
+void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr)
{
- struct page *page = virt_to_page(kfrom);
+ void *kto = kmap_atomic(to, KM_USER1);
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
- __flush_dcache_page(page_mapping(page), page);
+ if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
+ __flush_dcache_page(page_mapping(from), from);
spin_lock(&minicache_lock);
- set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
+ set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
flush_tlb_kernel_page(COPYPAGE_MINICACHE);
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
spin_unlock(&minicache_lock);
+
+ kunmap_atomic(kto, KM_USER1);
}
/*
@@ -133,5 +136,5 @@ xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
struct cpu_user_fns xscale_mc_user_fns __initdata = {
.cpu_clear_user_page = xscale_mc_clear_user_page,
- .cpu_copy_user_page = xscale_mc_copy_user_page,
+ .cpu_copy_user_highpage = xscale_mc_copy_user_highpage,
};
diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c
index 2b5ba396e3a6..b9743e6416c4 100644
--- a/arch/arm/mm/proc-syms.c
+++ b/arch/arm/mm/proc-syms.c
@@ -34,7 +34,7 @@ EXPORT_SYMBOL(cpu_cache);
#ifdef CONFIG_MMU
#ifndef MULTI_USER
EXPORT_SYMBOL(__cpu_clear_user_page);
-EXPORT_SYMBOL(__cpu_copy_user_page);
+EXPORT_SYMBOL(__cpu_copy_user_highpage);
#else
EXPORT_SYMBOL(cpu_user);
#endif
OpenPOWER on IntegriCloud