summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2017-08-11 14:29:01 +0200
committerIngo Molnar <mingo@kernel.org>2017-08-11 14:35:29 +0200
commitccde85ba00cccd28436e19c5bf503165c55a04f3 (patch)
treee0342f48c0c742f18cb92d1b697bbcf30d1500f0 /mm
parent040cca3ab2f6f8b8d26e0e4965abea2b9aa14818 (diff)
downloadtalos-obmc-linux-ccde85ba00cccd28436e19c5bf503165c55a04f3.tar.gz
talos-obmc-linux-ccde85ba00cccd28436e19c5bf503165c55a04f3.zip
mm, locking: Fix up flush_tlb_pending() related merge in do_huge_pmd_numa_page()
Merge commit: 040cca3ab2f6 ("Merge branch 'linus' into locking/core, to resolve conflicts") overlooked the fact that do_huge_pmd_numa_page() now does two TLB flushes. Commit: 8b1b436dd1cc ("mm, locking: Rework {set,clear,mm}_tlb_flush_pending()") and commit: a9b802500ebb ("Revert "mm: numa: defer TLB flush for THP migration as long as possible"") Both moved the TLB flush around but slightly different, the end result being that what was one became two. Clean this up. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David S. Miller <davem@davemloft.net> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Rik van Riel <riel@redhat.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c22
1 files changed, 5 insertions, 17 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ce883459e246..08f6c1993832 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1410,7 +1410,6 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
int page_nid = -1, this_nid = numa_node_id();
int target_nid, last_cpupid = -1;
- bool need_flush = false;
bool page_locked;
bool migrated = false;
bool was_writable;
@@ -1497,22 +1496,18 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
}
/*
- * The page_table_lock above provides a memory barrier
- * with change_protection_range.
- */
- if (mm_tlb_flush_pending(vma->vm_mm))
- flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
-
- /*
* Since we took the NUMA fault, we must have observed the !accessible
* bit. Make sure all other CPUs agree with that, to avoid them
* modifying the page we're about to migrate.
*
* Must be done under PTL such that we'll observe the relevant
- * set_tlb_flush_pending().
+ * inc_tlb_flush_pending().
+ *
+ * We are not sure a pending tlb flush here is for a huge page
+ * mapping or not. Hence use the tlb range variant
*/
if (mm_tlb_flush_pending(vma->vm_mm))
- need_flush = true;
+ flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
/*
* Migrate the THP to the requested node, returns with page unlocked
@@ -1520,13 +1515,6 @@ int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
*/
spin_unlock(vmf->ptl);
- /*
- * We are not sure a pending tlb flush here is for a huge page
- * mapping or not. Hence use the tlb range variant
- */
- if (need_flush)
- flush_tlb_range(vma, haddr, haddr + HPAGE_PMD_SIZE);
-
migrated = migrate_misplaced_transhuge_page(vma->vm_mm, vma,
vmf->pmd, pmd, vmf->address, page, target_nid);
if (migrated) {
OpenPOWER on IntegriCloud