summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/slub_def.h25
-rw-r--r--mm/slub.c26
2 files changed, 35 insertions, 16 deletions
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 0764c829d967..a0ad37463d62 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -70,11 +70,8 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
*/
static inline int kmalloc_index(size_t size)
{
- /*
- * We should return 0 if size == 0 but we use the smallest object
- * here for SLAB legacy reasons.
- */
- WARN_ON_ONCE(size == 0);
+ if (!size)
+ return 0;
if (size > KMALLOC_MAX_SIZE)
return -1;
@@ -153,13 +150,25 @@ static inline struct kmem_cache *kmalloc_slab(size_t size)
#define SLUB_DMA 0
#endif
+
+/*
+ * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
+ *
+ * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
+ *
+ * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
+ * Both make kfree a no-op.
+ */
+#define ZERO_SIZE_PTR ((void *)16)
+
+
static inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size) && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
if (!s)
- return NULL;
+ return ZERO_SIZE_PTR;
return kmem_cache_alloc(s, flags);
} else
@@ -172,7 +181,7 @@ static inline void *kzalloc(size_t size, gfp_t flags)
struct kmem_cache *s = kmalloc_slab(size);
if (!s)
- return NULL;
+ return ZERO_SIZE_PTR;
return kmem_cache_zalloc(s, flags);
} else
@@ -188,7 +197,7 @@ static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
struct kmem_cache *s = kmalloc_slab(size);
if (!s)
- return NULL;
+ return ZERO_SIZE_PTR;
return kmem_cache_alloc_node(s, flags, node);
} else
diff --git a/mm/slub.c b/mm/slub.c
index 51663a3c3c24..c9ab68881b43 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2241,7 +2241,7 @@ void *__kmalloc(size_t size, gfp_t flags)
if (s)
return slab_alloc(s, flags, -1, __builtin_return_address(0));
- return NULL;
+ return ZERO_SIZE_PTR;
}
EXPORT_SYMBOL(__kmalloc);
@@ -2252,16 +2252,20 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
if (s)
return slab_alloc(s, flags, node, __builtin_return_address(0));
- return NULL;
+ return ZERO_SIZE_PTR;
}
EXPORT_SYMBOL(__kmalloc_node);
#endif
size_t ksize(const void *object)
{
- struct page *page = get_object_page(object);
+ struct page *page;
struct kmem_cache *s;
+ if (object == ZERO_SIZE_PTR)
+ return 0;
+
+ page = get_object_page(object);
BUG_ON(!page);
s = page->slab;
BUG_ON(!s);
@@ -2293,7 +2297,13 @@ void kfree(const void *x)
struct kmem_cache *s;
struct page *page;
- if (!x)
+ /*
+ * This has to be an unsigned comparison. According to Linus
+ * some gcc version treat a pointer as a signed entity. Then
+ * this comparison would be true for all "negative" pointers
+ * (which would cover the whole upper half of the address space).
+ */
+ if ((unsigned long)x <= (unsigned long)ZERO_SIZE_PTR)
return;
page = virt_to_head_page(x);
@@ -2398,12 +2408,12 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
void *ret;
size_t ks;
- if (unlikely(!p))
+ if (unlikely(!p || p == ZERO_SIZE_PTR))
return kmalloc(new_size, flags);
if (unlikely(!new_size)) {
kfree(p);
- return NULL;
+ return ZERO_SIZE_PTR;
}
ks = ksize(p);
@@ -2652,7 +2662,7 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
struct kmem_cache *s = get_slab(size, gfpflags);
if (!s)
- return NULL;
+ return ZERO_SIZE_PTR;
return slab_alloc(s, gfpflags, -1, caller);
}
@@ -2663,7 +2673,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
struct kmem_cache *s = get_slab(size, gfpflags);
if (!s)
- return NULL;
+ return ZERO_SIZE_PTR;
return slab_alloc(s, gfpflags, node, caller);
}
OpenPOWER on IntegriCloud