summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/time/hrtimer.c18
-rw-r--r--kernel/time/tick-internal.h6
-rw-r--r--kernel/time/timer.c9
3 files changed, 14 insertions, 19 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index e6a78ae103ca..1c68bf21f603 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -685,21 +685,24 @@ static void hrtimer_reprogram(struct hrtimer *timer,
/* Update the pointer to the next expiring timer */
cpu_base->next_timer = timer;
+ cpu_base->expires_next = expires;
/*
+ * If hres is not active, hardware does not have to be
+ * programmed yet.
+ *
* If a hang was detected in the last timer interrupt then we
* do not schedule a timer which is earlier than the expiry
* which we enforced in the hang detection. We want the system
* to make progress.
*/
- if (cpu_base->hang_detected)
+ if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
return;
/*
* Program the timer hardware. We enforce the expiry for
* events which are already in the past.
*/
- cpu_base->expires_next = expires;
tick_program_event(expires, 1);
}
@@ -936,16 +939,7 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
if (!leftmost)
goto unlock;
- if (!hrtimer_is_hres_active(timer)) {
- /*
- * Kick to reschedule the next tick to handle the new timer
- * on dynticks target.
- */
- if (is_timers_nohz_active())
- wake_up_nohz_cpu(new_base->cpu_base->cpu);
- } else {
- hrtimer_reprogram(timer, new_base);
- }
+ hrtimer_reprogram(timer, new_base);
unlock:
unlock_hrtimer_base(timer, &flags);
}
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index f690628e068c..e277284c2831 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -151,18 +151,12 @@ static inline void tick_nohz_init(void) { }
#ifdef CONFIG_NO_HZ_COMMON
extern unsigned long tick_nohz_active;
extern void timers_update_nohz(void);
-extern struct static_key_false timers_nohz_active;
-static inline bool is_timers_nohz_active(void)
-{
- return static_branch_likely(&timers_nohz_active);
-}
# ifdef CONFIG_SMP
extern struct static_key_false timers_migration_enabled;
# endif
#else /* CONFIG_NO_HZ_COMMON */
static inline void timers_update_nohz(void) { }
#define tick_nohz_active (0)
-static inline bool is_timers_nohz_active(void) { return false; }
#endif
DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index d530f72b32f9..48150ab42de9 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -210,7 +210,7 @@ static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
#ifdef CONFIG_NO_HZ_COMMON
-DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
+static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
static DEFINE_MUTEX(timer_keys_mutex);
static void timer_update_keys(struct work_struct *work);
@@ -258,6 +258,13 @@ int timer_migration_handler(struct ctl_table *table, int write,
mutex_unlock(&timer_keys_mutex);
return ret;
}
+
+static inline bool is_timers_nohz_active(void)
+{
+ return static_branch_unlikely(&timers_nohz_active);
+}
+#else
+static inline bool is_timers_nohz_active(void) { return false; }
#endif /* NO_HZ_COMMON */
static unsigned long round_jiffies_common(unsigned long j, int cpu,
OpenPOWER on IntegriCloud