diff options
Diffstat (limited to 'mm/slab.c')
-rw-r--r-- | mm/slab.c | 11 |
1 files changed, 6 insertions, 5 deletions
diff --git a/mm/slab.c b/mm/slab.c index 66e5d8032bae..a89633603b2d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1247,9 +1247,10 @@ void __init kmem_cache_init(void) * structures first. Without this, further allocations will bug. */ kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache( - kmalloc_info[INDEX_NODE].name, - kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS, - 0, kmalloc_size(INDEX_NODE)); + kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL], + kmalloc_info[INDEX_NODE].size, + ARCH_KMALLOC_FLAGS, 0, + kmalloc_info[INDEX_NODE].size); slab_state = PARTIAL_NODE; setup_kmalloc_cache_index_table(); @@ -1415,7 +1416,7 @@ static void kmem_rcu_free(struct rcu_head *head) #if DEBUG static bool is_debug_pagealloc_cache(struct kmem_cache *cachep) { - if (debug_pagealloc_enabled() && OFF_SLAB(cachep) && + if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) && (cachep->size % PAGE_SIZE) == 0) return true; @@ -2007,7 +2008,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) * to check size >= 256. It guarantees that all necessary small * sized slab is initialized in current slab initialization sequence. */ - if (debug_pagealloc_enabled() && (flags & SLAB_POISON) && + if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) && size >= 256 && cachep->object_size > cache_line_size()) { if (size < PAGE_SIZE || size % PAGE_SIZE == 0) { size_t tmp_size = ALIGN(size, PAGE_SIZE); |