summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJohn Stultz <john.stultz@linaro.org>2012-02-28 16:50:11 -0800
committerJohn Stultz <john.stultz@linaro.org>2012-11-13 14:08:23 -0500
commitd6ad418763888f617ac5b4849823e4cd670df1dd (patch)
tree92d75b05849f98e1a20eb8fa8bb8a26818707cd2 /kernel
parent47c8c91b2d60006df22146effe79ac4cdafd9205 (diff)
downloadblackbird-op-linux-d6ad418763888f617ac5b4849823e4cd670df1dd.tar.gz
blackbird-op-linux-d6ad418763888f617ac5b4849823e4cd670df1dd.zip
time: Kill xtime_lock, replacing it with jiffies_lock
Now that timekeeping is protected by its own locks, rename the xtime_lock to jifffies_lock to better describe what it protects. CC: Thomas Gleixner <tglx@linutronix.de> CC: Eric Dumazet <eric.dumazet@gmail.com> CC: Richard Cochran <richardcochran@gmail.com> Signed-off-by: John Stultz <john.stultz@linaro.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/jiffies.c6
-rw-r--r--kernel/time/tick-common.c8
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/tick-sched.c22
-rw-r--r--kernel/time/timekeeping.c14
5 files changed, 22 insertions, 29 deletions
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 25f5b2699d37..7a925ba456fb 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -67,6 +67,8 @@ static struct clocksource clocksource_jiffies = {
.shift = JIFFIES_SHIFT,
};
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
+
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void)
{
@@ -74,9 +76,9 @@ u64 get_jiffies_64(void)
u64 ret;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&jiffies_lock);
ret = jiffies_64;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&jiffies_lock, seq));
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index da6c9ecad4e4..b1600a6973f4 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -63,13 +63,13 @@ int tick_is_oneshot_available(void)
static void tick_periodic(int cpu)
{
if (tick_do_timer_cpu == cpu) {
- write_seqlock(&xtime_lock);
+ write_seqlock(&jiffies_lock);
/* Keep track of the next tick event */
tick_next_period = ktime_add(tick_next_period, tick_period);
do_timer(1);
- write_sequnlock(&xtime_lock);
+ write_sequnlock(&jiffies_lock);
}
update_process_times(user_mode(get_irq_regs()));
@@ -130,9 +130,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
ktime_t next;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&jiffies_lock);
next = tick_next_period;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&jiffies_lock, seq));
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 4e265b901fed..cf3e59ed6dc0 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -141,4 +141,3 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
#endif
extern void do_timer(unsigned long ticks);
-extern seqlock_t xtime_lock;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a40260885265..a678046c3e5e 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -31,7 +31,7 @@
static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
/*
- * The time, when the last jiffy update happened. Protected by xtime_lock.
+ * The time, when the last jiffy update happened. Protected by jiffies_lock.
*/
static ktime_t last_jiffies_update;
@@ -49,14 +49,14 @@ static void tick_do_update_jiffies64(ktime_t now)
ktime_t delta;
/*
- * Do a quick check without holding xtime_lock:
+ * Do a quick check without holding jiffies_lock:
*/
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 < tick_period.tv64)
return;
- /* Reevalute with xtime_lock held */
- write_seqlock(&xtime_lock);
+ /* Reevalute with jiffies_lock held */
+ write_seqlock(&jiffies_lock);
delta = ktime_sub(now, last_jiffies_update);
if (delta.tv64 >= tick_period.tv64) {
@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(ktime_t now)
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
}
- write_sequnlock(&xtime_lock);
+ write_sequnlock(&jiffies_lock);
}
/*
@@ -89,12 +89,12 @@ static ktime_t tick_init_jiffy_update(void)
{
ktime_t period;
- write_seqlock(&xtime_lock);
+ write_seqlock(&jiffies_lock);
/* Did we start the jiffies update yet ? */
if (last_jiffies_update.tv64 == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&xtime_lock);
+ write_sequnlock(&jiffies_lock);
return period;
}
@@ -282,11 +282,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&jiffies_lock);
last_update = last_jiffies_update;
last_jiffies = jiffies;
time_delta = timekeeping_max_deferment();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&jiffies_lock, seq));
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
arch_needs_cpu(cpu)) {
@@ -658,7 +658,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
* concurrency: This happens only when the cpu in charge went
* into a long sleep. If two cpus happen to assign themself to
* this duty, then the jiffies update is still serialized by
- * xtime_lock.
+ * jiffies_lock.
*/
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
tick_do_timer_cpu = cpu;
@@ -810,7 +810,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
* concurrency: This happens only when the cpu in charge went
* into a long sleep. If two cpus happen to assign themself to
* this duty, then the jiffies update is still serialized by
- * xtime_lock.
+ * jiffies_lock.
*/
if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
tick_do_timer_cpu = cpu;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e424970bb562..4c7de02eacdc 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -25,12 +25,6 @@
static struct timekeeper timekeeper;
-/*
- * This read-write spinlock protects us from races in SMP while
- * playing with xtime.
- */
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
-
/* flag for if timekeeping is suspended */
int __read_mostly timekeeping_suspended;
@@ -1299,9 +1293,7 @@ struct timespec get_monotonic_coarse(void)
}
/*
- * The 64-bit jiffies value is not atomic - you MUST NOT read it
- * without sampling the sequence number in xtime_lock.
- * jiffies is defined in the linker script...
+ * Must hold jiffies_lock
*/
void do_timer(unsigned long ticks)
{
@@ -1389,7 +1381,7 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
*/
void xtime_update(unsigned long ticks)
{
- write_seqlock(&xtime_lock);
+ write_seqlock(&jiffies_lock);
do_timer(ticks);
- write_sequnlock(&xtime_lock);
+ write_sequnlock(&jiffies_lock);
}
OpenPOWER on IntegriCloud