summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorJeff Garzik <jgarzik@pobox.com>2005-11-05 14:38:55 -0500
committerJeff Garzik <jgarzik@pobox.com>2005-11-05 14:38:55 -0500
commit328198acb7407301ddf6005c0fa1e04bd0c539c8 (patch)
tree9936112bd195bfbaacc9a75f2ea7ff757a2c0546 /arch/arm/mm
parent9e0cb06b17be7e562cbdaba2768649f025826dc6 (diff)
parentfecb4a0c87c2bcaee1f3cf800126eef752a07ed3 (diff)
downloadblackbird-op-linux-328198acb7407301ddf6005c0fa1e04bd0c539c8.tar.gz
blackbird-op-linux-328198acb7407301ddf6005c0fa1e04bd0c539c8.zip
Merge branch 'master'
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig18
-rw-r--r--arch/arm/mm/copypage-v6.c16
-rw-r--r--arch/arm/mm/init.c9
-rw-r--r--arch/arm/mm/mm-armv.c8
4 files changed, 34 insertions, 17 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c54e04c995ee..e3c14d6b4328 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -120,8 +120,8 @@ config CPU_ARM925T
# ARM926T
config CPU_ARM926T
- bool "Support ARM926T processor" if ARCH_INTEGRATOR
- depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX
+ bool "Support ARM926T processor"
+ depends on ARCH_INTEGRATOR || ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX || MACH_REALVIEW_EB
default y if ARCH_VERSATILE_PB || MACH_VERSATILE_AB || ARCH_OMAP730 || ARCH_OMAP16XX
select CPU_32v5
select CPU_ABRT_EV5TJ
@@ -242,7 +242,7 @@ config CPU_XSCALE
# ARMv6
config CPU_V6
bool "Support ARM V6 processor"
- depends on ARCH_INTEGRATOR
+ depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB
select CPU_32v6
select CPU_ABRT_EV6
select CPU_CACHE_V6
@@ -250,6 +250,18 @@ config CPU_V6
select CPU_COPY_V6
select CPU_TLB_V6
+# ARMv6k
+config CPU_32v6K
+ bool "Support ARM V6K processor extensions" if !SMP
+ depends on CPU_V6
+ default y if SMP
+ help
+ Say Y here if your ARMv6 processor supports the 'K' extension.
+ This enables the kernel to use some instructions not present
+ on previous processors, and as such a kernel build with this
+ enabled will not boot on processors with do not support these
+ instructions.
+
# Figure out what processor architecture version we should be using.
# This defines the compiler instruction set which depends on the machine type.
config CPU_32v3
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 27d041574ea7..269ce6913ee9 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -22,9 +22,7 @@
#endif
#define from_address (0xffff8000)
-#define from_pgprot PAGE_KERNEL
#define to_address (0xffffc000)
-#define to_pgprot PAGE_KERNEL
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
@@ -34,7 +32,7 @@ static DEFINE_SPINLOCK(v6_lock);
* Copy the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of these pages.
*/
-void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
+static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
copy_page(kto, kfrom);
}
@@ -43,7 +41,7 @@ void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long v
* Clear the user page. No aliasing to deal with so we can just
* attack the kernel's existing mapping of this page.
*/
-void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
+static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
{
clear_page(kaddr);
}
@@ -51,7 +49,7 @@ void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
/*
* Copy the page, taking account of the cache colour.
*/
-void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
+static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long from, to;
@@ -72,8 +70,8 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
*/
spin_lock(&v6_lock);
- set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, from_pgprot));
- set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, to_pgprot));
+ set_pte(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL));
+ set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL));
from = from_address + (offset << PAGE_SHIFT);
to = to_address + (offset << PAGE_SHIFT);
@@ -91,7 +89,7 @@ void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vadd
* so remap the kernel page into the same cache colour as the user
* page.
*/
-void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
+static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
{
unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long to = to_address + (offset << PAGE_SHIFT);
@@ -112,7 +110,7 @@ void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
*/
spin_lock(&v6_lock);
- set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, to_pgprot));
+ set_pte(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL));
flush_tlb_kernel_page(to);
clear_page((void *)to);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index fd079ff1fc53..c168f322ef8c 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -486,10 +486,17 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
/*
* Ask the machine support to map in the statically mapped devices.
- * After this point, we can start to touch devices again.
*/
if (mdesc->map_io)
mdesc->map_io();
+
+ /*
+ * Finally flush the tlb again - this ensures that we're in a
+ * consistent state wrt the writebuffer if the writebuffer needs
+ * draining. After this point, we can start to touch devices
+ * again.
+ */
+ local_flush_tlb_all();
}
/*
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
index 1221fdde1769..fb5b40289de2 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mm-armv.c
@@ -469,14 +469,14 @@ void __init create_mapping(struct map_desc *md)
if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
printk(KERN_WARNING "BUG: not creating mapping for "
- "0x%016llx at 0x%08lx in user region\n",
+ "0x%08llx at 0x%08lx in user region\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
- printk(KERN_WARNING "BUG: mapping for 0x%016llx at 0x%08lx "
+ printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
"overlaps vmalloc space\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
}
@@ -492,14 +492,14 @@ void __init create_mapping(struct map_desc *md)
if(md->pfn >= 0x100000) {
if(domain) {
printk(KERN_ERR "MM: invalid domain in supersection "
- "mapping for 0x%016llx at 0x%08lx\n",
+ "mapping for 0x%08llx at 0x%08lx\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
if((md->virtual | md->length | __pfn_to_phys(md->pfn))
& ~SUPERSECTION_MASK) {
printk(KERN_ERR "MM: cannot create mapping for "
- "0x%016llx at 0x%08lx invalid alignment\n",
+ "0x%08llx at 0x%08lx invalid alignment\n",
__pfn_to_phys((u64)md->pfn), md->virtual);
return;
}
OpenPOWER on IntegriCloud