diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 5 | ||||
-rw-r--r-- | mm/percpu.c | 3 | ||||
-rw-r--r-- | mm/slab.c | 7 |
3 files changed, 5 insertions, 10 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index 5b4dd03130da..69568388c699 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -135,10 +135,9 @@ static int page_cache_tree_insert(struct address_space *mapping, } else { /* DAX can replace empty locked entry with a hole */ WARN_ON_ONCE(p != - (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | - RADIX_DAX_ENTRY_LOCK)); + dax_radix_locked_entry(0, RADIX_DAX_EMPTY)); /* Wakeup waiters for exceptional entry lock */ - dax_wake_mapping_entry_waiter(mapping, page->index, + dax_wake_mapping_entry_waiter(mapping, page->index, p, false); } } diff --git a/mm/percpu.c b/mm/percpu.c index f696385bcc44..0686f566d347 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -886,7 +886,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, size = ALIGN(size, 2); - if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) { + if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || + !is_power_of_2(align))) { WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", size, align); return NULL; diff --git a/mm/slab.c b/mm/slab.c index 87b29e76cafd..29bc6c0dedd0 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -552,12 +552,7 @@ static void start_cpu_timer(int cpu) { struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); - /* - * When this gets called from do_initcalls via cpucache_init(), - * init_workqueues() has already run, so keventd will be setup - * at that time. - */ - if (keventd_up() && reap_work->work.func == NULL) { + if (reap_work->work.func == NULL) { init_reap_node(cpu); INIT_DEFERRABLE_WORK(reap_work, cache_reap); schedule_delayed_work_on(cpu, reap_work, |