diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-04-06 05:57:19 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-04-06 05:57:19 -0400 |
commit | 1bb025f6db789ea0bb674eaed15ee843ef0b2e88 (patch) | |
tree | cdbaa7f57ce541abb5b973803712a9f55307c355 /mm | |
parent | f55532a0c0b8bb6148f4e07853b876ef73bc69ca (diff) | |
parent | f75587b8ca69768c6cf8a38a0b61e68e1bea3d36 (diff) | |
download | talos-op-linux-1bb025f6db789ea0bb674eaed15ee843ef0b2e88.tar.gz talos-op-linux-1bb025f6db789ea0bb674eaed15ee843ef0b2e88.zip |
Merge tag 'extcon-fixes-for-4.6-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/chanwoo/extcon into char-misc-linus
Chanwoo writes:
Update extcon for v4.6-rc3
This patch fixes the following one issue:
- In extcon-palmas.c, the external abort happen when wake-up from suspend state
on BeagleBoard-X15 platform. So, drop the IRQF_EARLY_RESUME flag.
Diffstat (limited to 'mm')
-rw-r--r-- | mm/kasan/kasan.c | 2 | ||||
-rw-r--r-- | mm/oom_kill.c | 6 | ||||
-rw-r--r-- | mm/page_isolation.c | 10 | ||||
-rw-r--r-- | mm/rmap.c | 28 |
4 files changed, 18 insertions, 28 deletions
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c index acb3b6c4dd89..38f1dd79acdb 100644 --- a/mm/kasan/kasan.c +++ b/mm/kasan/kasan.c @@ -498,7 +498,7 @@ void kasan_slab_free(struct kmem_cache *cache, void *object) struct kasan_alloc_meta *alloc_info = get_alloc_info(cache, object); alloc_info->state = KASAN_STATE_FREE; - set_track(&free_info->track); + set_track(&free_info->track, GFP_NOWAIT); } #endif diff --git a/mm/oom_kill.c b/mm/oom_kill.c index b34d279a7ee6..86349586eacb 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -547,7 +547,11 @@ static int oom_reaper(void *unused) static void wake_oom_reaper(struct task_struct *tsk) { - if (!oom_reaper_th || tsk->oom_reaper_list) + if (!oom_reaper_th) + return; + + /* tsk is already queued? */ + if (tsk == oom_reaper_list || tsk->oom_reaper_list) return; get_task_struct(tsk); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 92c4c36501e7..c4f568206544 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -215,7 +215,7 @@ int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, * all pages in [start_pfn...end_pfn) must be in the same zone. * zone->lock must be held before call this. * - * Returns 1 if all pages in the range are isolated. + * Returns the last tested pfn. */ static unsigned long __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, @@ -289,11 +289,11 @@ struct page *alloc_migrate_target(struct page *page, unsigned long private, * now as a simple work-around, we use the next node for destination. */ if (PageHuge(page)) { - nodemask_t src = nodemask_of_node(page_to_nid(page)); - nodemask_t dst; - nodes_complement(dst, src); + int node = next_online_node(page_to_nid(page)); + if (node == MAX_NUMNODES) + node = first_online_node; return alloc_huge_page_node(page_hstate(compound_head(page)), - next_node(page_to_nid(page), dst)); + node); } if (PageHighMem(page)) diff --git a/mm/rmap.c b/mm/rmap.c index c399a0d41b31..395e314b7996 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -569,19 +569,6 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma) } #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH -static void percpu_flush_tlb_batch_pages(void *data) -{ - /* - * All TLB entries are flushed on the assumption that it is - * cheaper to flush all TLBs and let them be refilled than - * flushing individual PFNs. Note that we do not track mm's - * to flush as that might simply be multiple full TLB flushes - * for no gain. - */ - count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); - flush_tlb_local(); -} - /* * Flush TLB entries for recently unmapped pages from remote CPUs. It is * important if a PTE was dirty when it was unmapped that it's flushed @@ -598,15 +585,14 @@ void try_to_unmap_flush(void) cpu = get_cpu(); - trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, -1UL); - - if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) - percpu_flush_tlb_batch_pages(&tlb_ubc->cpumask); - - if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) { - smp_call_function_many(&tlb_ubc->cpumask, - percpu_flush_tlb_batch_pages, (void *)tlb_ubc, true); + if (cpumask_test_cpu(cpu, &tlb_ubc->cpumask)) { + count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); + local_flush_tlb(); + trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); } + + if (cpumask_any_but(&tlb_ubc->cpumask, cpu) < nr_cpu_ids) + flush_tlb_others(&tlb_ubc->cpumask, NULL, 0, TLB_FLUSH_ALL); cpumask_clear(&tlb_ubc->cpumask); tlb_ubc->flush_required = false; tlb_ubc->writable = false; |