summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/tlb.h9
-rw-r--r--arch/x86/include/asm/tlbflush.h17
-rw-r--r--arch/x86/mm/tlb.c112
3 files changed, 68 insertions, 70 deletions
diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h
index 829215fef9ee..4fef20773b8f 100644
--- a/arch/x86/include/asm/tlb.h
+++ b/arch/x86/include/asm/tlb.h
@@ -4,7 +4,14 @@
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
-#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
+
+#define tlb_flush(tlb) \
+{ \
+ if (tlb->fullmm == 0) \
+ flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
+ else \
+ flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
+}
#include <asm-generic/tlb.h>
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index 33608d96d68b..621b959e1dbf 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -105,6 +105,13 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
__flush_tlb();
}
+static inline void flush_tlb_mm_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end, unsigned long vmflag)
+{
+ if (vma->vm_mm == current->active_mm)
+ __flush_tlb();
+}
+
static inline void native_flush_tlb_others(const struct cpumask *cpumask,
struct mm_struct *mm,
unsigned long start,
@@ -122,12 +129,16 @@ static inline void reset_lazy_tlbstate(void)
#define local_flush_tlb() __flush_tlb()
+#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL)
+
+#define flush_tlb_range(vma, start, end) \
+ flush_tlb_mm_range(vma->vm_mm, start, end, vma->vm_flags)
+
extern void flush_tlb_all(void);
extern void flush_tlb_current_task(void);
-extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
-extern void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end);
+extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned long vmflag);
#define flush_tlb() flush_tlb_current_task()
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 5911f61e300e..481737def84a 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -301,23 +301,10 @@ void flush_tlb_current_task(void)
preempt_enable();
}
-void flush_tlb_mm(struct mm_struct *mm)
-{
- preempt_disable();
-
- if (current->active_mm == mm) {
- if (current->mm)
- local_flush_tlb();
- else
- leave_mm(smp_processor_id());
- }
- if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
-
- preempt_enable();
-}
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+/*
+ * It can find out the THP large page, or
+ * HUGETLB page in tlb_flush when THP disabled
+ */
static inline unsigned long has_large_page(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
@@ -339,68 +326,61 @@ static inline unsigned long has_large_page(struct mm_struct *mm,
}
return 0;
}
-#else
-static inline unsigned long has_large_page(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- return 0;
-}
-#endif
-void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
- struct mm_struct *mm;
- if (vma->vm_flags & VM_HUGETLB || tlb_flushall_shift == -1) {
-flush_all:
- flush_tlb_mm(vma->vm_mm);
- return;
- }
+void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned long vmflag)
+{
+ unsigned long addr;
+ unsigned act_entries, tlb_entries = 0;
preempt_disable();
- mm = vma->vm_mm;
- if (current->active_mm == mm) {
- if (current->mm) {
- unsigned long addr, vmflag = vma->vm_flags;
- unsigned act_entries, tlb_entries = 0;
+ if (current->active_mm != mm)
+ goto flush_all;
- if (vmflag & VM_EXEC)
- tlb_entries = tlb_lli_4k[ENTRIES];
- else
- tlb_entries = tlb_lld_4k[ENTRIES];
-
- act_entries = tlb_entries > mm->total_vm ?
- mm->total_vm : tlb_entries;
+ if (!current->mm) {
+ leave_mm(smp_processor_id());
+ goto flush_all;
+ }
- if ((end - start) >> PAGE_SHIFT >
- act_entries >> tlb_flushall_shift)
- local_flush_tlb();
- else {
- if (has_large_page(mm, start, end)) {
- preempt_enable();
- goto flush_all;
- }
- for (addr = start; addr < end;
- addr += PAGE_SIZE)
- __flush_tlb_single(addr);
+ if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
+ || vmflag == VM_HUGETLB) {
+ local_flush_tlb();
+ goto flush_all;
+ }
- if (cpumask_any_but(mm_cpumask(mm),
- smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), mm,
- start, end);
- preempt_enable();
- return;
- }
- } else {
- leave_mm(smp_processor_id());
+ /* In modern CPU, last level tlb used for both data/ins */
+ if (vmflag & VM_EXEC)
+ tlb_entries = tlb_lli_4k[ENTRIES];
+ else
+ tlb_entries = tlb_lld_4k[ENTRIES];
+ /* Assume all of TLB entries was occupied by this task */
+ act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
+
+ /* tlb_flushall_shift is on balance point, details in commit log */
+ if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift)
+ local_flush_tlb();
+ else {
+ if (has_large_page(mm, start, end)) {
+ local_flush_tlb();
+ goto flush_all;
}
+ /* flush range by one by one 'invlpg' */
+ for (addr = start; addr < end; addr += PAGE_SIZE)
+ __flush_tlb_single(addr);
+
+ if (cpumask_any_but(mm_cpumask(mm),
+ smp_processor_id()) < nr_cpu_ids)
+ flush_tlb_others(mm_cpumask(mm), mm, start, end);
+ preempt_enable();
+ return;
}
+
+flush_all:
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable();
}
-
void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
{
struct mm_struct *mm = vma->vm_mm;
OpenPOWER on IntegriCloud