summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-03-12 18:46:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2015-03-12 18:46:19 -0700
commitc202baf017aea0c860e53131bc55bb1af7177e76 (patch)
treef3b19d008aac14979e115542f4b689e6c99f33cd /mm
parentba68bc0115ebfc37f911db4e87bf5f7991f89698 (diff)
parent7feee590bb18ffc42636975f74c2c3120ce1901c (diff)
downloadtalos-obmc-linux-c202baf017aea0c860e53131bc55bb1af7177e76.tar.gz
talos-obmc-linux-c202baf017aea0c860e53131bc55bb1af7177e76.zip
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "13 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: memcg: disable hierarchy support if bound to the legacy cgroup hierarchy mm: reorder can_do_mlock to fix audit denial kasan, module: move MODULE_ALIGN macro into <linux/moduleloader.h> kasan, module, vmalloc: rework shadow allocation for modules fanotify: fix event filtering with FAN_ONDIR set mm/nommu.c: export symbol max_mapnr arch/c6x/include/asm/pgtable.h: define dummy pgprot_writecombine for !MMU nilfs2: fix deadlock of segment constructor during recovery mm: cma: fix CMA aligned offset calculation mm, hugetlb: close race when setting PageTail for gigantic pages mm, oom: do not fail __GFP_NOFAIL allocation if oom killer is disabled drivers/rtc/rtc-s3c.c: add .needs_src_clk to s3c6410 RTC data ocfs2: make append_dio an incompat feature
Diffstat (limited to 'mm')
-rw-r--r--mm/cma.c12
-rw-r--r--mm/hugetlb.c4
-rw-r--r--mm/kasan/kasan.c14
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/page_alloc.c3
-rw-r--r--mm/vmalloc.c1
8 files changed, 30 insertions, 13 deletions
diff --git a/mm/cma.c b/mm/cma.c
index 75016fd1de90..68ecb7a42983 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -64,15 +64,17 @@ static unsigned long cma_bitmap_aligned_mask(struct cma *cma, int align_order)
return (1UL << (align_order - cma->order_per_bit)) - 1;
}
+/*
+ * Find a PFN aligned to the specified order and return an offset represented in
+ * order_per_bits.
+ */
static unsigned long cma_bitmap_aligned_offset(struct cma *cma, int align_order)
{
- unsigned int alignment;
-
if (align_order <= cma->order_per_bit)
return 0;
- alignment = 1UL << (align_order - cma->order_per_bit);
- return ALIGN(cma->base_pfn, alignment) -
- (cma->base_pfn >> cma->order_per_bit);
+
+ return (ALIGN(cma->base_pfn, (1UL << align_order))
+ - cma->base_pfn) >> cma->order_per_bit;
}
static unsigned long cma_bitmap_maxno(struct cma *cma)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 0a9ac6c26832..c41b2a0ee273 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -917,7 +917,6 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
__SetPageHead(page);
__ClearPageReserved(page);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
- __SetPageTail(p);
/*
* For gigantic hugepages allocated through bootmem at
* boot, it's safer to be consistent with the not-gigantic
@@ -933,6 +932,9 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
__ClearPageReserved(p);
set_page_count(p, 0);
p->first_page = page;
+ /* Make sure p->first_page is always valid for PageTail() */
+ smp_wmb();
+ __SetPageTail(p);
}
}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 78fee632a7ee..936d81661c47 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -29,6 +29,7 @@
#include <linux/stacktrace.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/vmalloc.h>
#include <linux/kasan.h>
#include "kasan.h"
@@ -414,12 +415,19 @@ int kasan_module_alloc(void *addr, size_t size)
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
__builtin_return_address(0));
- return ret ? 0 : -ENOMEM;
+
+ if (ret) {
+ find_vm_area(addr)->flags |= VM_KASAN;
+ return 0;
+ }
+
+ return -ENOMEM;
}
-void kasan_module_free(void *addr)
+void kasan_free_shadow(const struct vm_struct *vm)
{
- vfree(kasan_mem_to_shadow(addr));
+ if (vm->flags & VM_KASAN)
+ vfree(kasan_mem_to_shadow(vm->addr));
}
static void register_global(struct kasan_global *global)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9fe07692eaad..b34ef4a32a3b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5232,7 +5232,9 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
* on for the root memcg is enough.
*/
if (cgroup_on_dfl(root_css->cgroup))
- mem_cgroup_from_css(root_css)->use_hierarchy = true;
+ root_mem_cgroup->use_hierarchy = true;
+ else
+ root_mem_cgroup->use_hierarchy = false;
}
static u64 memory_current_read(struct cgroup_subsys_state *css,
diff --git a/mm/mlock.c b/mm/mlock.c
index 73cf0987088c..8a54cd214925 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -26,10 +26,10 @@
int can_do_mlock(void)
{
- if (capable(CAP_IPC_LOCK))
- return 1;
if (rlimit(RLIMIT_MEMLOCK) != 0)
return 1;
+ if (capable(CAP_IPC_LOCK))
+ return 1;
return 0;
}
EXPORT_SYMBOL(can_do_mlock);
diff --git a/mm/nommu.c b/mm/nommu.c
index 3e67e7538ecf..3fba2dc97c44 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -62,6 +62,7 @@ void *high_memory;
EXPORT_SYMBOL(high_memory);
struct page *mem_map;
unsigned long max_mapnr;
+EXPORT_SYMBOL(max_mapnr);
unsigned long highest_memmap_pfn;
struct percpu_counter vm_committed_as;
int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7abfa70cdc1a..40e29429e7b0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2373,7 +2373,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
goto out;
}
/* Exhausted what can be done so it's blamo time */
- if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false))
+ if (out_of_memory(ac->zonelist, gfp_mask, order, ac->nodemask, false)
+ || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL))
*did_some_progress = 1;
out:
oom_zonelist_unlock(ac->zonelist, gfp_mask);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 35b25e1340ca..49abccf29a29 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1418,6 +1418,7 @@ struct vm_struct *remove_vm_area(const void *addr)
spin_unlock(&vmap_area_lock);
vmap_debug_free_range(va->va_start, va->va_end);
+ kasan_free_shadow(vm);
free_unmap_vmap_area(va);
vm->size -= PAGE_SIZE;
OpenPOWER on IntegriCloud