summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArun KS <arunks@codeaurora.org>2018-12-28 00:34:32 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 12:11:47 -0800
commit476567e8735a0d06225f3873a86dfa0efd95f3a5 (patch)
tree0e6724684a8ac631d8ae0fbbf53b123342279fcc
parentca79b0c211af63fa3276f0e3fd7dd9ada2439839 (diff)
downloadblackbird-obmc-linux-476567e8735a0d06225f3873a86dfa0efd95f3a5.tar.gz
blackbird-obmc-linux-476567e8735a0d06225f3873a86dfa0efd95f3a5.zip
mm: remove managed_page_count_lock spinlock
Now that totalram_pages and managed_pages are atomic varibles, no need of managed_page_count spinlock. The lock had really a weak consistency guarantee. It hasn't been used for anything but the update but no reader actually cares about all the values being updated to be in sync. Link: http://lkml.kernel.org/r/1542090790-21750-5-git-send-email-arunks@codeaurora.org Signed-off-by: Arun KS <arunks@codeaurora.org> Reviewed-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: David Hildenbrand <david@redhat.com> Reviewed-by: Pavel Tatashin <pasha.tatashin@soleen.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--mm/page_alloc.c5
2 files changed, 0 insertions, 11 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a23e34e21178..bc0990c1f1c3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -428,12 +428,6 @@ struct zone {
* Write access to present_pages at runtime should be protected by
* mem_hotplug_begin/end(). Any reader who can't tolerant drift of
* present_pages should get_online_mems() to get a stable value.
- *
- * Read access to managed_pages should be safe because it's unsigned
- * long. Write access to zone->managed_pages and totalram_pages are
- * protected by managed_page_count_lock at runtime. Idealy only
- * adjust_managed_page_count() should be used instead of directly
- * touching zone->managed_pages and totalram_pages.
*/
atomic_long_t managed_pages;
unsigned long spanned_pages;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index eb2027892ef9..6f3d2c7af84b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -122,9 +122,6 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
};
EXPORT_SYMBOL(node_states);
-/* Protect totalram_pages and zone->managed_pages */
-static DEFINE_SPINLOCK(managed_page_count_lock);
-
atomic_long_t _totalram_pages __read_mostly;
EXPORT_SYMBOL(_totalram_pages);
unsigned long totalreserve_pages __read_mostly;
@@ -7077,14 +7074,12 @@ early_param("movablecore", cmdline_parse_movablecore);
void adjust_managed_page_count(struct page *page, long count)
{
- spin_lock(&managed_page_count_lock);
atomic_long_add(count, &page_zone(page)->managed_pages);
totalram_pages_add(count);
#ifdef CONFIG_HIGHMEM
if (PageHighMem(page))
totalhigh_pages_add(count);
#endif
- spin_unlock(&managed_page_count_lock);
}
EXPORT_SYMBOL(adjust_managed_page_count);
OpenPOWER on IntegriCloud