summaryrefslogtreecommitdiffstats
path: root/arch/ia64/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r--arch/ia64/mm/init.c8
-rw-r--r--arch/ia64/mm/ioremap.c6
-rw-r--r--arch/ia64/mm/tlb.c12
3 files changed, 17 insertions, 9 deletions
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 2ef1151cde90..cafa8776a53d 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -109,6 +109,7 @@ lazy_mmu_prot_update (pte_t pte)
{
unsigned long addr;
struct page *page;
+ unsigned long order;
if (!pte_exec(pte))
return; /* not an executable page... */
@@ -119,7 +120,12 @@ lazy_mmu_prot_update (pte_t pte)
if (test_bit(PG_arch_1, &page->flags))
return; /* i-cache is already coherent with d-cache */
- flush_icache_range(addr, addr + PAGE_SIZE);
+ if (PageCompound(page)) {
+ order = (unsigned long) (page[1].lru.prev);
+ flush_icache_range(addr, addr + (1UL << order << PAGE_SHIFT));
+ }
+ else
+ flush_icache_range(addr, addr + PAGE_SIZE);
set_bit(PG_arch_1, &page->flags); /* mark page as clean */
}
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 62328621f99c..643ccc6960ce 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -21,12 +21,12 @@ __ioremap (unsigned long offset, unsigned long size)
void __iomem *
ioremap (unsigned long offset, unsigned long size)
{
- if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC))
- return __ioremap(offset, size);
-
if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB))
return phys_to_virt(offset);
+ if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC))
+ return __ioremap(offset, size);
+
/*
* Someday this should check ACPI resources so we
* can do the right thing for hot-plugged regions.
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 6a4eec9113e8..4dbbca0b5e9c 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -156,17 +156,19 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
nbits = purge.max_bits;
start &= ~((1UL << nbits) - 1);
-# ifdef CONFIG_SMP
- platform_global_tlb_purge(mm, start, end, nbits);
-# else
preempt_disable();
+#ifdef CONFIG_SMP
+ if (mm != current->active_mm || cpus_weight(mm->cpu_vm_mask) != 1) {
+ platform_global_tlb_purge(mm, start, end, nbits);
+ preempt_enable();
+ return;
+ }
+#endif
do {
ia64_ptcl(start, (nbits<<2));
start += (1UL << nbits);
} while (start < end);
preempt_enable();
-# endif
-
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
EXPORT_SYMBOL(flush_tlb_range);
OpenPOWER on IntegriCloud