summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-10-07 11:29:37 +0100
committerIngo Molnar <mingo@kernel.org>2013-10-09 14:48:18 +0200
commit930aa174fcc8b0efaad102fd80f677b92f35eaa2 (patch)
tree7746bbcf350f3ce305d9e55435f7a5e3c41b9c8e /kernel/sched
parent04bb2f9475054298f0c67a89ca92cade42d3fe5e (diff)
downloadblackbird-op-linux-930aa174fcc8b0efaad102fd80f677b92f35eaa2.tar.gz
blackbird-op-linux-930aa174fcc8b0efaad102fd80f677b92f35eaa2.zip
sched/numa: Remove the numa_balancing_scan_period_reset sysctl
With scan rate adaptions based on whether the workload has properly converged or not there should be no need for the scan period reset hammer. Get rid of it. Signed-off-by: Mel Gorman <mgorman@suse.de> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1381141781-10992-60-git-send-email-mgorman@suse.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/fair.c18
2 files changed, 1 insertions, 18 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8cfd51f62241..89c5ae836f66 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1721,7 +1721,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
#ifdef CONFIG_NUMA_BALANCING
if (p->mm && atomic_read(&p->mm->mm_users) == 1) {
p->mm->numa_next_scan = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
- p->mm->numa_next_reset = jiffies + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
p->mm->numa_scan_seq = 0;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 66237ff8b01e..da6fa22be000 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -826,7 +826,6 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
*/
unsigned int sysctl_numa_balancing_scan_period_min = 1000;
unsigned int sysctl_numa_balancing_scan_period_max = 60000;
-unsigned int sysctl_numa_balancing_scan_period_reset = 60000;
/* Portion of address space to scan in MB */
unsigned int sysctl_numa_balancing_scan_size = 256;
@@ -1685,24 +1684,9 @@ void task_numa_work(struct callback_head *work)
if (p->flags & PF_EXITING)
return;
- if (!mm->numa_next_reset || !mm->numa_next_scan) {
+ if (!mm->numa_next_scan) {
mm->numa_next_scan = now +
msecs_to_jiffies(sysctl_numa_balancing_scan_delay);
- mm->numa_next_reset = now +
- msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
- }
-
- /*
- * Reset the scan period if enough time has gone by. Objective is that
- * scanning will be reduced if pages are properly placed. As tasks
- * can enter different phases this needs to be re-examined. Lacking
- * proper tracking of reference behaviour, this blunt hammer is used.
- */
- migrate = mm->numa_next_reset;
- if (time_after(now, migrate)) {
- p->numa_scan_period = task_scan_min(p);
- next_scan = now + msecs_to_jiffies(sysctl_numa_balancing_scan_period_reset);
- xchg(&mm->numa_next_reset, next_scan);
}
/*
OpenPOWER on IntegriCloud