summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/cpu.c10
-rw-r--r--kernel/cpuset.c26
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/fork.c15
-rw-r--r--kernel/kprobes.c58
-rw-r--r--kernel/mutex-debug.c12
-rw-r--r--kernel/mutex-debug.h25
-rw-r--r--kernel/mutex.c21
-rw-r--r--kernel/mutex.h6
-rw-r--r--kernel/ptrace.c23
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/signal.c35
-rw-r--r--kernel/time.c2
-rw-r--r--kernel/time/Makefile1
-rw-r--r--kernel/time/clocksource.c349
-rw-r--r--kernel/time/jiffies.c73
-rw-r--r--kernel/timer.c396
18 files changed, 938 insertions, 124 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index a31276e190f5..752bd7d383af 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -10,6 +10,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
hrtimer.o
+obj-y += time/
obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
obj-$(CONFIG_FUTEX) += futex.o
ifeq ($(CONFIG_COMPAT),y)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index fe2b8d0bfe4c..03dcd981846a 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -13,10 +13,10 @@
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/stop_machine.h>
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
/* This protects CPUs going up and down... */
-static DECLARE_MUTEX(cpucontrol);
+static DEFINE_MUTEX(cpucontrol);
static BLOCKING_NOTIFIER_HEAD(cpu_chain);
@@ -30,9 +30,9 @@ static int __lock_cpu_hotplug(int interruptible)
if (lock_cpu_hotplug_owner != current) {
if (interruptible)
- ret = down_interruptible(&cpucontrol);
+ ret = mutex_lock_interruptible(&cpucontrol);
else
- down(&cpucontrol);
+ mutex_lock(&cpucontrol);
}
/*
@@ -56,7 +56,7 @@ void unlock_cpu_hotplug(void)
{
if (--lock_cpu_hotplug_depth == 0) {
lock_cpu_hotplug_owner = NULL;
- up(&cpucontrol);
+ mutex_unlock(&cpucontrol);
}
}
EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b602f73fb38d..1535af3a912d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2442,31 +2442,43 @@ void __cpuset_memory_pressure_bump(void)
*/
static int proc_cpuset_show(struct seq_file *m, void *v)
{
+ struct pid *pid;
struct task_struct *tsk;
char *buf;
- int retval = 0;
+ int retval;
+ retval = -ENOMEM;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!buf)
- return -ENOMEM;
+ goto out;
+
+ retval = -ESRCH;
+ pid = m->private;
+ tsk = get_pid_task(pid, PIDTYPE_PID);
+ if (!tsk)
+ goto out_free;
- tsk = m->private;
+ retval = -EINVAL;
mutex_lock(&manage_mutex);
+
retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE);
if (retval < 0)
- goto out;
+ goto out_unlock;
seq_puts(m, buf);
seq_putc(m, '\n');
-out:
+out_unlock:
mutex_unlock(&manage_mutex);
+ put_task_struct(tsk);
+out_free:
kfree(buf);
+out:
return retval;
}
static int cpuset_open(struct inode *inode, struct file *file)
{
- struct task_struct *tsk = PROC_I(inode)->task;
- return single_open(file, proc_cpuset_show, tsk);
+ struct pid *pid = PROC_I(inode)->pid;
+ return single_open(file, proc_cpuset_show, pid);
}
struct file_operations proc_cpuset_operations = {
diff --git a/kernel/exit.c b/kernel/exit.c
index e76bd02e930e..304ef637be6c 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -137,12 +137,8 @@ void release_task(struct task_struct * p)
{
int zap_leader;
task_t *leader;
- struct dentry *proc_dentry;
-
repeat:
atomic_dec(&p->user->processes);
- spin_lock(&p->proc_lock);
- proc_dentry = proc_pid_unhash(p);
write_lock_irq(&tasklist_lock);
ptrace_unlink(p);
BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
@@ -171,8 +167,7 @@ repeat:
sched_exit(p);
write_unlock_irq(&tasklist_lock);
- spin_unlock(&p->proc_lock);
- proc_pid_flush(proc_dentry);
+ proc_flush_task(p);
release_thread(p);
call_rcu(&p->rcu, delayed_put_task_struct);
diff --git a/kernel/fork.c b/kernel/fork.c
index dfd10cb370c3..9b4e54ef0225 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -993,13 +993,10 @@ static task_t *copy_process(unsigned long clone_flags,
if (put_user(p->pid, parent_tidptr))
goto bad_fork_cleanup;
- p->proc_dentry = NULL;
-
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
- spin_lock_init(&p->proc_lock);
clear_tsk_thread_flag(p, TIF_SIGPENDING);
init_sigpending(&p->pending);
@@ -1159,18 +1156,6 @@ static task_t *copy_process(unsigned long clone_flags,
}
if (clone_flags & CLONE_THREAD) {
- /*
- * Important: if an exit-all has been started then
- * do not create this new thread - the whole thread
- * group is supposed to exit anyway.
- */
- if (current->signal->flags & SIGNAL_GROUP_EXIT) {
- spin_unlock(&current->sighand->siglock);
- write_unlock_irq(&tasklist_lock);
- retval = -EAGAIN;
- goto bad_fork_cleanup_namespace;
- }
-
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1fbf466a29aa..64aab081153b 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -47,11 +47,17 @@
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
+static atomic_t kprobe_count;
DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
+static struct notifier_block kprobe_page_fault_nb = {
+ .notifier_call = kprobe_exceptions_notify,
+ .priority = 0x7fffffff /* we need to notified first */
+};
+
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
@@ -368,16 +374,15 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
*/
static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
{
- struct kprobe *kp;
-
if (p->break_handler) {
- list_for_each_entry_rcu(kp, &old_p->list, list) {
- if (kp->break_handler)
- return -EEXIST;
- }
+ if (old_p->break_handler)
+ return -EEXIST;
list_add_tail_rcu(&p->list, &old_p->list);
+ old_p->break_handler = aggr_break_handler;
} else
list_add_rcu(&p->list, &old_p->list);
+ if (p->post_handler && !old_p->post_handler)
+ old_p->post_handler = aggr_post_handler;
return 0;
}
@@ -390,9 +395,11 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
copy_kprobe(p, ap);
ap->addr = p->addr;
ap->pre_handler = aggr_pre_handler;
- ap->post_handler = aggr_post_handler;
ap->fault_handler = aggr_fault_handler;
- ap->break_handler = aggr_break_handler;
+ if (p->post_handler)
+ ap->post_handler = aggr_post_handler;
+ if (p->break_handler)
+ ap->break_handler = aggr_break_handler;
INIT_LIST_HEAD(&ap->list);
list_add_rcu(&p->list, &ap->list);
@@ -464,6 +471,8 @@ static int __kprobes __register_kprobe(struct kprobe *p,
old_p = get_kprobe(p->addr);
if (old_p) {
ret = register_aggr_kprobe(old_p, p);
+ if (!ret)
+ atomic_inc(&kprobe_count);
goto out;
}
@@ -474,6 +483,10 @@ static int __kprobes __register_kprobe(struct kprobe *p,
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
+ if (atomic_add_return(1, &kprobe_count) == \
+ (ARCH_INACTIVE_KPROBE_COUNT + 1))
+ register_page_fault_notifier(&kprobe_page_fault_nb);
+
arch_arm_kprobe(p);
out:
@@ -536,14 +549,40 @@ valid_p:
kfree(old_p);
}
arch_remove_kprobe(p);
+ } else {
+ mutex_lock(&kprobe_mutex);
+ if (p->break_handler)
+ old_p->break_handler = NULL;
+ if (p->post_handler){
+ list_for_each_entry_rcu(list_p, &old_p->list, list){
+ if (list_p->post_handler){
+ cleanup_p = 2;
+ break;
+ }
+ }
+ if (cleanup_p == 0)
+ old_p->post_handler = NULL;
+ }
+ mutex_unlock(&kprobe_mutex);
}
+
+ /* Call unregister_page_fault_notifier()
+ * if no probes are active
+ */
+ mutex_lock(&kprobe_mutex);
+ if (atomic_add_return(-1, &kprobe_count) == \
+ ARCH_INACTIVE_KPROBE_COUNT)
+ unregister_page_fault_notifier(&kprobe_page_fault_nb);
+ mutex_unlock(&kprobe_mutex);
+ return;
}
static struct notifier_block kprobe_exceptions_nb = {
.notifier_call = kprobe_exceptions_notify,
- .priority = 0x7fffffff /* we need to notified first */
+ .priority = 0x7fffffff /* we need to be notified first */
};
+
int __kprobes register_jprobe(struct jprobe *jp)
{
/* Todo: Verify probepoint is a function entry point */
@@ -652,6 +691,7 @@ static int __init init_kprobes(void)
INIT_HLIST_HEAD(&kprobe_table[i]);
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
}
+ atomic_set(&kprobe_count, 0);
err = arch_init_kprobes();
if (!err)
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index f4913c376950..036b6285b15c 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -153,13 +153,13 @@ next:
continue;
count++;
cursor = curr->next;
- debug_spin_lock_restore(&debug_mutex_lock, flags);
+ debug_spin_unlock_restore(&debug_mutex_lock, flags);
printk("\n#%03d: ", count);
printk_lock(lock, filter ? 0 : 1);
goto next;
}
- debug_spin_lock_restore(&debug_mutex_lock, flags);
+ debug_spin_unlock_restore(&debug_mutex_lock, flags);
printk("\n");
}
@@ -316,7 +316,7 @@ void mutex_debug_check_no_locks_held(struct task_struct *task)
continue;
list_del_init(curr);
DEBUG_OFF();
- debug_spin_lock_restore(&debug_mutex_lock, flags);
+ debug_spin_unlock_restore(&debug_mutex_lock, flags);
printk("BUG: %s/%d, lock held at task exit time!\n",
task->comm, task->pid);
@@ -325,7 +325,7 @@ void mutex_debug_check_no_locks_held(struct task_struct *task)
printk("exiting task is not even the owner??\n");
return;
}
- debug_spin_lock_restore(&debug_mutex_lock, flags);
+ debug_spin_unlock_restore(&debug_mutex_lock, flags);
}
/*
@@ -352,7 +352,7 @@ void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
continue;
list_del_init(curr);
DEBUG_OFF();
- debug_spin_lock_restore(&debug_mutex_lock, flags);
+ debug_spin_unlock_restore(&debug_mutex_lock, flags);
printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
current->comm, current->pid, lock, from, to);
@@ -362,7 +362,7 @@ void mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
printk("freeing task is not even the owner??\n");
return;
}
- debug_spin_lock_restore(&debug_mutex_lock, flags);
+ debug_spin_unlock_restore(&debug_mutex_lock, flags);
}
/*
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index fd384050acb1..a5196c36a5fd 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -46,21 +46,6 @@ extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name);
-#define debug_spin_lock(lock) \
- do { \
- local_irq_disable(); \
- if (debug_mutex_on) \
- spin_lock(lock); \
- } while (0)
-
-#define debug_spin_unlock(lock) \
- do { \
- if (debug_mutex_on) \
- spin_unlock(lock); \
- local_irq_enable(); \
- preempt_check_resched(); \
- } while (0)
-
#define debug_spin_lock_save(lock, flags) \
do { \
local_irq_save(flags); \
@@ -68,7 +53,7 @@ extern void debug_mutex_init(struct mutex *lock, const char *name);
spin_lock(lock); \
} while (0)
-#define debug_spin_lock_restore(lock, flags) \
+#define debug_spin_unlock_restore(lock, flags) \
do { \
if (debug_mutex_on) \
spin_unlock(lock); \
@@ -76,20 +61,20 @@ extern void debug_mutex_init(struct mutex *lock, const char *name);
preempt_check_resched(); \
} while (0)
-#define spin_lock_mutex(lock) \
+#define spin_lock_mutex(lock, flags) \
do { \
struct mutex *l = container_of(lock, struct mutex, wait_lock); \
\
DEBUG_WARN_ON(in_interrupt()); \
- debug_spin_lock(&debug_mutex_lock); \
+ debug_spin_lock_save(&debug_mutex_lock, flags); \
spin_lock(lock); \
DEBUG_WARN_ON(l->magic != l); \
} while (0)
-#define spin_unlock_mutex(lock) \
+#define spin_unlock_mutex(lock, flags) \
do { \
spin_unlock(lock); \
- debug_spin_unlock(&debug_mutex_lock); \
+ debug_spin_unlock_restore(&debug_mutex_lock, flags); \
} while (0)
#define DEBUG_OFF() \
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 5449b210d9ed..7043db21bbce 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -125,10 +125,11 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
struct task_struct *task = current;
struct mutex_waiter waiter;
unsigned int old_val;
+ unsigned long flags;
debug_mutex_init_waiter(&waiter);
- spin_lock_mutex(&lock->wait_lock);
+ spin_lock_mutex(&lock->wait_lock, flags);
debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip);
@@ -157,7 +158,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
if (unlikely(state == TASK_INTERRUPTIBLE &&
signal_pending(task))) {
mutex_remove_waiter(lock, &waiter, task->thread_info);
- spin_unlock_mutex(&lock->wait_lock);
+ spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
return -EINTR;
@@ -165,9 +166,9 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
__set_task_state(task, state);
/* didnt get the lock, go to sleep: */
- spin_unlock_mutex(&lock->wait_lock);
+ spin_unlock_mutex(&lock->wait_lock, flags);
schedule();
- spin_lock_mutex(&lock->wait_lock);
+ spin_lock_mutex(&lock->wait_lock, flags);
}
/* got the lock - rejoice! */
@@ -178,7 +179,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);
- spin_unlock_mutex(&lock->wait_lock);
+ spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
@@ -203,10 +204,11 @@ static fastcall noinline void
__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
+ unsigned long flags;
DEBUG_WARN_ON(lock->owner != current_thread_info());
- spin_lock_mutex(&lock->wait_lock);
+ spin_lock_mutex(&lock->wait_lock, flags);
/*
* some architectures leave the lock unlocked in the fastpath failure
@@ -231,7 +233,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
debug_mutex_clear_owner(lock);
- spin_unlock_mutex(&lock->wait_lock);
+ spin_unlock_mutex(&lock->wait_lock, flags);
}
/*
@@ -276,9 +278,10 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__)
static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
{
struct mutex *lock = container_of(lock_count, struct mutex, count);
+ unsigned long flags;
int prev;
- spin_lock_mutex(&lock->wait_lock);
+ spin_lock_mutex(&lock->wait_lock, flags);
prev = atomic_xchg(&lock->count, -1);
if (likely(prev == 1))
@@ -287,7 +290,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);
- spin_unlock_mutex(&lock->wait_lock);
+ spin_unlock_mutex(&lock->wait_lock, flags);
return prev == 1;
}
diff --git a/kernel/mutex.h b/kernel/mutex.h
index 00fe84e7b672..069189947257 100644
--- a/kernel/mutex.h
+++ b/kernel/mutex.h
@@ -9,8 +9,10 @@
* !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/
-#define spin_lock_mutex(lock) spin_lock(lock)
-#define spin_unlock_mutex(lock) spin_unlock(lock)
+#define spin_lock_mutex(lock, flags) \
+ do { spin_lock(lock); (void)(flags); } while (0)
+#define spin_unlock_mutex(lock, flags) \
+ do { spin_unlock(lock); (void)(flags); } while (0)
#define mutex_remove_waiter(lock, waiter, ti) \
__list_del((waiter)->list.prev, (waiter)->list.next)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 921c22ad16e4..335c5b932e14 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -120,8 +120,18 @@ int ptrace_check_attach(struct task_struct *child, int kill)
static int may_attach(struct task_struct *task)
{
- if (!task->mm)
- return -EPERM;
+ /* May we inspect the given task?
+ * This check is used both for attaching with ptrace
+ * and for allowing access to sensitive information in /proc.
+ *
+ * ptrace_attach denies several cases that /proc allows
+ * because setting up the necessary parent/child relationship
+ * or halting the specified task is impossible.
+ */
+ int dumpable = 0;
+ /* Don't let security modules deny introspection */
+ if (task == current)
+ return 0;
if (((current->uid != task->euid) ||
(current->uid != task->suid) ||
(current->uid != task->uid) ||
@@ -130,7 +140,9 @@ static int may_attach(struct task_struct *task)
(current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
return -EPERM;
smp_rmb();
- if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
+ if (task->mm)
+ dumpable = task->mm->dumpable;
+ if (!dumpable && !capable(CAP_SYS_PTRACE))
return -EPERM;
return security_ptrace(current, task);
@@ -176,6 +188,8 @@ repeat:
goto repeat;
}
+ if (!task->mm)
+ goto bad;
/* the same process cannot be attached many times */
if (task->ptrace & PT_PTRACED)
goto bad;
@@ -200,7 +214,7 @@ out:
return retval;
}
-void __ptrace_detach(struct task_struct *child, unsigned int data)
+static inline void __ptrace_detach(struct task_struct *child, unsigned int data)
{
child->exit_code = data;
/* .. re-parent .. */
@@ -219,6 +233,7 @@ int ptrace_detach(struct task_struct *child, unsigned int data)
ptrace_disable(child);
write_lock_irq(&tasklist_lock);
+ /* protect against de_thread()->release_task() */
if (child->ptrace)
__ptrace_detach(child, data);
write_unlock_irq(&tasklist_lock);
diff --git a/kernel/sched.c b/kernel/sched.c
index 7d1027a4dd21..a856040c200a 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4252,7 +4252,7 @@ long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
if (retval)
goto out_unlock;
- jiffies_to_timespec(p->policy & SCHED_FIFO ?
+ jiffies_to_timespec(p->policy == SCHED_FIFO ?
0 : task_timeslice(p), &t);
read_unlock(&tasklist_lock);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
diff --git a/kernel/signal.c b/kernel/signal.c
index 1b3c921737e2..52adf53929f6 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1531,6 +1531,35 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
spin_unlock_irqrestore(&sighand->siglock, flags);
}
+static inline int may_ptrace_stop(void)
+{
+ if (!likely(current->ptrace & PT_PTRACED))
+ return 0;
+
+ if (unlikely(current->parent == current->real_parent &&
+ (current->ptrace & PT_ATTACHED)))
+ return 0;
+
+ if (unlikely(current->signal == current->parent->signal) &&
+ unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
+ return 0;
+
+ /*
+ * Are we in the middle of do_coredump?
+ * If so and our tracer is also part of the coredump stopping
+ * is a deadlock situation, and pointless because our tracer
+ * is dead so don't allow us to stop.
+ * If SIGKILL was already sent before the caller unlocked
+ * ->siglock we must see ->core_waiters != 0. Otherwise it
+ * is safe to enter schedule().
+ */
+ if (unlikely(current->mm->core_waiters) &&
+ unlikely(current->mm == current->parent->mm))
+ return 0;
+
+ return 1;
+}
+
/*
* This must be called with current->sighand->siglock held.
*
@@ -1559,11 +1588,7 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
spin_unlock_irq(&current->sighand->siglock);
try_to_freeze();
read_lock(&tasklist_lock);
- if (likely(current->ptrace & PT_PTRACED) &&
- likely(current->parent != current->real_parent ||
- !(current->ptrace & PT_ATTACHED)) &&
- (likely(current->parent->signal != current->signal) ||
- !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) {
+ if (may_ptrace_stop()) {
do_notify_parent_cldstop(current, CLD_TRAPPED);
read_unlock(&tasklist_lock);
schedule();
diff --git a/kernel/time.c b/kernel/time.c
index b00ddc71cedb..5bd489747643 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -523,6 +523,7 @@ EXPORT_SYMBOL(do_gettimeofday);
#else
+#ifndef CONFIG_GENERIC_TIME
/*
* Simulate gettimeofday using do_gettimeofday which only allows a timeval
* and therefore only yields usec accuracy
@@ -537,6 +538,7 @@ void getnstimeofday(struct timespec *tv)
}
EXPORT_SYMBOL_GPL(getnstimeofday);
#endif
+#endif
/* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
* Assumes input in normal date format, i.e. 1980-12-31 23:59:59
diff --git a/kernel/time/Makefile b/kernel/time/Makefile
new file mode 100644
index 000000000000..e1dfd8e86cce
--- /dev/null
+++ b/kernel/time/Makefile
@@ -0,0 +1 @@
+obj-y += clocksource.o jiffies.o
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
new file mode 100644
index 000000000000..74eca5939bd9
--- /dev/null
+++ b/kernel/time/clocksource.c
@@ -0,0 +1,349 @@
+/*
+ * linux/kernel/time/clocksource.c
+ *
+ * This file contains the functions which manage clocksource drivers.
+ *
+ * Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * TODO WishList:
+ * o Allow clocksource drivers to be unregistered
+ * o get rid of clocksource_jiffies extern
+ */
+
+#include <linux/clocksource.h>
+#include <linux/sysdev.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+/* XXX - Would like a better way for initializing curr_clocksource */
+extern struct clocksource clocksource_jiffies;
+
+/*[Clocksource internal variables]---------
+ * curr_clocksource:
+ * currently selected clocksource. Initialized to clocksource_jiffies.
+ * next_clocksource:
+ * pending next selected clocksource.
+ * clocksource_list:
+ * linked list with the registered clocksources
+ * clocksource_lock:
+ * protects manipulations to curr_clocksource and next_clocksource
+ * and the clocksource_list
+ * override_name:
+ * Name of the user-specified clocksource.
+ */
+static struct clocksource *curr_clocksource = &clocksource_jiffies;
+static struct clocksource *next_clocksource;
+static LIST_HEAD(clocksource_list);
+static DEFINE_SPINLOCK(clocksource_lock);
+static char override_name[32];
+static int finished_booting;
+
+/* clocksource_done_booting - Called near the end of bootup
+ *
+ * Hack to avoid lots of clocksource churn at boot time
+ */
+static int __init clocksource_done_booting(void)
+{
+ finished_booting = 1;
+ return 0;
+}
+
+late_initcall(clocksource_done_booting);
+
+/**
+ * clocksource_get_next - Returns the selected clocksource
+ *
+ */
+struct clocksource *clocksource_get_next(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocksource_lock, flags);
+ if (next_clocksource && finished_booting) {
+ curr_clocksource = next_clocksource;
+ next_clocksource = NULL;
+ }
+ spin_unlock_irqrestore(&clocksource_lock, flags);
+
+ return curr_clocksource;
+}
+
+/**
+ * select_clocksource - Finds the best registered clocksource.
+ *
+ * Private function. Must hold clocksource_lock when called.
+ *
+ * Looks through the list of registered clocksources, returning
+ * the one with the highest rating value. If there is a clocksource
+ * name that matches the override string, it returns that clocksource.
+ */
+static struct clocksource *select_clocksource(void)
+{
+ struct clocksource *best = NULL;
+ struct list_head *tmp;
+
+ list_for_each(tmp, &clocksource_list) {
+ struct clocksource *src;
+
+ src = list_entry(tmp, struct clocksource, list);
+ if (!best)
+ best = src;
+
+ /* check for override: */
+ if (strlen(src->name) == strlen(override_name) &&
+ !strcmp(src->name, override_name)) {
+ best = src;
+ break;
+ }
+ /* pick the highest rating: */
+ if (src->rating > best->rating)
+ best = src;
+ }
+
+ return best;
+}
+
+/**
+ * is_registered_source - Checks if clocksource is registered
+ * @c: pointer to a clocksource
+ *
+ * Private helper function. Must hold clocksource_lock when called.
+ *
+ * Returns one if the clocksource is already registered, zero otherwise.
+ */
+static int is_registered_source(struct clocksource *c)
+{
+ int len = strlen(c->name);
+ struct list_head *tmp;
+
+ list_for_each(tmp, &clocksource_list) {
+ struct clocksource *src;
+
+ src = list_entry(tmp, struct clocksource, list);
+ if (strlen(src->name) == len && !strcmp(src->name, c->name))
+ return 1;
+ }
+
+ return 0;
+}
+
+/**
+ * clocksource_register - Used to install new clocksources
+ * @t: clocksource to be registered
+ *
+ * Returns -EBUSY if registration fails, zero otherwise.
+ */
+int clocksource_register(struct clocksource *c)
+{
+ int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocksource_lock, flags);
+ /* check if clocksource is already registered */
+ if (is_registered_source(c)) {
+ printk("register_clocksource: Cannot register %s. "
+ "Already registered!", c->name);
+ ret = -EBUSY;
+ } else {
+ /* register it */
+ list_add(&c->list, &clocksource_list);
+ /* scan the registered clocksources, and pick the best one */
+ next_clocksource = select_clocksource();
+ }
+ spin_unlock_irqrestore(&clocksource_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(clocksource_register);
+
+/**
+ * clocksource_reselect - Rescan list for next clocksource
+ *
+ * A quick helper function to be used if a clocksource changes its
+ * rating. Forces the clocksource list to be re-scanned for the best
+ * clocksource.
+ */
+void clocksource_reselect(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&clocksource_lock, flags);
+ next_clocksource = select_clocksource();
+ spin_unlock_irqrestore(&clocksource_lock, flags);
+}
+EXPORT_SYMBOL(clocksource_reselect);
+
+/**
+ * sysfs_show_current_clocksources - sysfs interface for current clocksource
+ * @dev: unused
+ * @buf: char buffer to be filled with clocksource list
+ *
+ * Provides sysfs interface for listing current clocksource.
+ */
+static ssize_t
+sysfs_show_current_clocksources(struct sys_device *dev, char *buf)
+{
+ char *curr = buf;
+
+ spin_lock_irq(&clocksource_lock);
+ curr += sprintf(curr, "%s ", curr_clocksource->name);
+ spin_unlock_irq(&clocksource_lock);
+
+ curr += sprintf(curr, "\n");
+
+ return curr - buf;
+}
+
+/**
+ * sysfs_override_clocksource - interface for manually overriding clocksource
+ * @dev: unused
+ * @buf: name of override clocksource
+ * @count: length of buffer
+ *
+ * Takes input from sysfs interface for manually overriding the default
+ * clocksource selction.
+ */
+static ssize_t sysfs_override_clocksource(struct sys_device *dev,
+ const char *buf, size_t count)
+{
+ size_t ret = count;
+ /* strings from sysfs write are not 0 terminated! */
+ if (count >= sizeof(override_name))
+ return -EINVAL;
+
+ /* strip of \n: */
+ if (buf[count-1] == '\n')
+ count--;
+ if (count < 1)
+ return -EINVAL;
+
+ spin_lock_irq(&clocksource_lock);
+
+ /* copy the name given: */
+ memcpy(override_name, buf, count);
+ override_name[count] = 0;
+
+ /* try to select it: */
+ next_clocksource = select_clocksource();
+
+ spin_unlock_irq(&clocksource_lock);
+
+ return ret;
+}
+
+/**
+ * sysfs_show_available_clocksources - sysfs interface for listing clocksource
+ * @dev: unused
+ * @buf: char buffer to be filled with clocksource list
+ *
+ * Provides sysfs interface for listing registered clocksources
+ */
+static ssize_t
+sysfs_show_available_clocksources(struct sys_device *dev, char *buf)
+{
+ struct list_head *tmp;
+ char *curr = buf;
+
+ spin_lock_irq(&clocksource_lock);
+ list_for_each(tmp, &clocksource_list) {
+ struct clocksource *src;
+
+ src = list_entry(tmp, struct clocksource, list);
+ curr += sprintf(curr, "%s ", src->name);
+ }
+ spin_unlock_irq(&clocksource_lock);
+
+ curr += sprintf(curr, "\n");
+
+ return curr - buf;
+}
+
+/*
+ * Sysfs setup bits:
+ */
+static SYSDEV_ATTR(current_clocksource, 0600, sysfs_show_current_clocksources,
+ sysfs_override_clocksource);
+
+static SYSDEV_ATTR(available_clocksource, 0600,
+ sysfs_show_available_clocksources, NULL);
+
+static struct sysdev_class clocksource_sysclass = {
+ set_kset_name("clocksource"),
+};
+
+static struct sys_device device_clocksource = {
+ .id = 0,
+ .cls = &clocksource_sysclass,
+};
+
+static int __init init_clocksource_sysfs(void)
+{
+ int error = sysdev_class_register(&clocksource_sysclass);
+
+ if (!error)
+ error = sysdev_register(&device_clocksource);
+ if (!error)
+ error = sysdev_create_file(
+ &device_clocksource,
+ &attr_current_clocksource);
+ if (!error)
+ error = sysdev_create_file(
+ &device_clocksource,
+ &attr_available_clocksource);
+ return error;
+}
+
+device_initcall(init_clocksource_sysfs);
+
+/**
+ * boot_override_clocksource - boot clock override
+ * @str: override name
+ *
+ * Takes a clocksource= boot argument and uses it
+ * as the clocksource override name.
+ */
+static int __init boot_override_clocksource(char* str)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&clocksource_lock, flags);
+ if (str)
+ strlcpy(override_name, str, sizeof(override_name));
+ spin_unlock_irqrestore(&clocksource_lock, flags);
+ return 1;
+}
+
+__setup("clocksource=", boot_override_clocksource);
+
+/**
+ * boot_override_clock - Compatibility layer for deprecated boot option
+ * @str: override name
+ *
+ * DEPRECATED! Takes a clock= boot argument and uses it
+ * as the clocksource override name
+ */
+static int __init boot_override_clock(char* str)
+{
+ if (!strcmp(str, "pmtmr")) {
+ printk("Warning: clock=pmtmr is deprecated. "
+ "Use clocksource=acpi_pm.\n");
+ return boot_override_clocksource("acpi_pm");
+ }
+ printk("Warning! clock= boot option is deprecated. "
+ "Use clocksource=xyz\n");
+ return boot_override_clocksource(str);
+}
+
+__setup("clock=", boot_override_clock);
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
new file mode 100644
index 000000000000..126bb30c4afe
--- /dev/null
+++ b/kernel/time/jiffies.c
@@ -0,0 +1,73 @@
+/***********************************************************************
+* linux/kernel/time/jiffies.c
+*
+* This file contains the jiffies based clocksource.
+*
+* Copyright (C) 2004, 2005 IBM, John Stultz (johnstul@us.ibm.com)
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+* This program is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+* GNU General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+*
+************************************************************************/
+#include <linux/clocksource.h>
+#include <linux/jiffies.h>
+#include <linux/init.h>
+
+/* The Jiffies based clocksource is the lowest common
+ * denominator clock source which should function on
+ * all systems. It has the same coarse resolution as
+ * the timer interrupt frequency HZ and it suffers
+ * inaccuracies caused by missed or lost timer
+ * interrupts and the inability for the timer
+ * interrupt hardware to accuratly tick at the
+ * requested HZ value. It is also not reccomended
+ * for "tick-less" systems.
+ */
+#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ))
+
+/* Since jiffies uses a simple NSEC_PER_JIFFY multiplier
+ * conversion, the .shift value could be zero. However
+ * this would make NTP adjustments impossible as they are
+ * in units of 1/2^.shift. Thus we use JIFFIES_SHIFT to
+ * shift both the nominator and denominator the same
+ * amount, and give ntp adjustments in units of 1/2^8
+ *
+ * The value 8 is somewhat carefully chosen, as anything
+ * larger can result in overflows. NSEC_PER_JIFFY grows as
+ * HZ shrinks, so values greater then 8 overflow 32bits when
+ * HZ=100.
+ */
+#define JIFFIES_SHIFT 8
+
+static cycle_t jiffies_read(void)
+{
+ return (cycle_t) jiffies;
+}
+
+struct clocksource clocksource_jiffies = {
+ .name = "jiffies",
+ .rating = 0, /* lowest rating*/
+ .read = jiffies_read,
+ .mask = 0xffffffff, /*32bits*/
+ .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
+ .shift = JIFFIES_SHIFT,
+ .is_continuous = 0, /* tick based, not free running */
+};
+
+static int __init init_jiffies_clocksource(void)
+{
+ return clocksource_register(&clocksource_jiffies);
+}
+
+module_init(init_jiffies_clocksource);
diff --git a/kernel/timer.c b/kernel/timer.c
index eb97371b87d8..5bb6b7976eec 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -597,7 +597,6 @@ long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
long time_precision = 1; /* clock precision (us) */
long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
-static long time_phase; /* phase offset (scaled us) */
long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
/* frequency offset (scaled ppm)*/
static long time_adj; /* tick adjust (scaled 1 / HZ) */
@@ -747,27 +746,14 @@ static long adjtime_adjustment(void)
}
/* in the NTP reference this is called "hardclock()" */
-static void update_wall_time_one_tick(void)
+static void update_ntp_one_tick(void)
{
- long time_adjust_step, delta_nsec;
+ long time_adjust_step;
time_adjust_step = adjtime_adjustment();
if (time_adjust_step)
/* Reduce by this step the amount of time left */
time_adjust -= time_adjust_step;
- delta_nsec = tick_nsec + time_adjust_step * 1000;
- /*
- * Advance the phase, once it gets to one microsecond, then
- * advance the tick more.
- */
- time_phase += time_adj;
- if ((time_phase >= FINENSEC) || (time_phase <= -FINENSEC)) {
- long ltemp = shift_right(time_phase, (SHIFT_SCALE - 10));
- time_phase -= ltemp << (SHIFT_SCALE - 10);
- delta_nsec += ltemp;
- }
- xtime.tv_nsec += delta_nsec;
- time_interpolator_update(delta_nsec);
/* Changes by adjtime() do not take effect till next tick. */
if (time_next_adjust != 0) {
@@ -780,36 +766,378 @@ static void update_wall_time_one_tick(void)
* Return how long ticks are at the moment, that is, how much time
* update_wall_time_one_tick will add to xtime next time we call it
* (assuming no calls to do_adjtimex in the meantime).
- * The return value is in fixed-point nanoseconds with SHIFT_SCALE-10
- * bits to the right of the binary point.
+ * The return value is in fixed-point nanoseconds shifted by the
+ * specified number of bits to the right of the binary point.
* This function has no side-effects.
*/
u64 current_tick_length(void)
{
long delta_nsec;
+ u64 ret;
+ /* calculate the finest interval NTP will allow.
+ * ie: nanosecond value shifted by (SHIFT_SCALE - 10)
+ */
delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
- return ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
+ ret = (u64)delta_nsec << TICK_LENGTH_SHIFT;
+ ret += (s64)time_adj << (TICK_LENGTH_SHIFT - (SHIFT_SCALE - 10));
+
+ return ret;
}
-/*
- * Using a loop looks inefficient, but "ticks" is
- * usually just one (we shouldn't be losing ticks,
- * we're doing this this way mainly for interrupt
- * latency reasons, not because we think we'll
- * have lots of lost timer ticks
+/* XXX - all of this timekeeping code should be later moved to time.c */
+#include <linux/clocksource.h>
+static struct clocksource *clock; /* pointer to current clocksource */
+
+#ifdef CONFIG_GENERIC_TIME
+/**
+ * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
+ *
+ * private function, must hold xtime_lock lock when being
+ * called. Returns the number of nanoseconds since the
+ * last call to update_wall_time() (adjusted by NTP scaling)
+ */
+static inline s64 __get_nsec_offset(void)
+{
+ cycle_t cycle_now, cycle_delta;
+ s64 ns_offset;
+
+ /* read clocksource: */
+ cycle_now = clocksource_read(clock);
+
+ /* calculate the delta since the last update_wall_time: */
+ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
+
+ /* convert to nanoseconds: */
+ ns_offset = cyc2ns(clock, cycle_delta);
+
+ return ns_offset;
+}
+
+/**
+ * __get_realtime_clock_ts - Returns the time of day in a timespec
+ * @ts: pointer to the timespec to be set
+ *
+ * Returns the time of day in a timespec. Used by
+ * do_gettimeofday() and get_realtime_clock_ts().
*/
-static void update_wall_time(unsigned long ticks)
+static inline void __get_realtime_clock_ts(struct timespec *ts)
{
+ unsigned long seq;
+ s64 nsecs;
+
+ do {
+ seq = read_seqbegin(&xtime_lock);
+
+ *ts = xtime;
+ nsecs = __get_nsec_offset();
+
+ } while (read_seqretry(&xtime_lock, seq));
+
+ timespec_add_ns(ts, nsecs);
+}
+
+/**
+ * getnstimeofday - Returns the time of day in a timespec
+ * @ts: pointer to the timespec to be set
+ *
+ * Returns the time of day in a timespec.
+ */
+void getnstimeofday(struct timespec *ts)
+{
+ __get_realtime_clock_ts(ts);
+}
+
+EXPORT_SYMBOL(getnstimeofday);
+
+/**
+ * do_gettimeofday - Returns the time of day in a timeval
+ * @tv: pointer to the timeval to be set
+ *
+ * NOTE: Users should be converted to using get_realtime_clock_ts()
+ */
+void do_gettimeofday(struct timeval *tv)
+{
+ struct timespec now;
+
+ __get_realtime_clock_ts(&now);
+ tv->tv_sec = now.tv_sec;
+ tv->tv_usec = now.tv_nsec/1000;
+}
+
+EXPORT_SYMBOL(do_gettimeofday);
+/**
+ * do_settimeofday - Sets the time of day
+ * @tv: pointer to the timespec variable containing the new time
+ *
+ * Sets the time of day to the new time and update NTP and notify hrtimers
+ */
+int do_settimeofday(struct timespec *tv)
+{
+ unsigned long flags;
+ time_t wtm_sec, sec = tv->tv_sec;
+ long wtm_nsec, nsec = tv->tv_nsec;
+
+ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
+ return -EINVAL;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+
+ nsec -= __get_nsec_offset();
+
+ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
+ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
+
+ set_normalized_timespec(&xtime, sec, nsec);
+ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
+
+ ntp_clear();
+
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+
+ /* signal hrtimers about time change */
+ clock_was_set();
+
+ return 0;
+}
+
+EXPORT_SYMBOL(do_settimeofday);
+
+/**
+ * change_clocksource - Swaps clocksources if a new one is available
+ *
+ * Accumulates current time interval and initializes new clocksource
+ */
+static int change_clocksource(void)
+{
+ struct clocksource *new;
+ cycle_t now;
+ u64 nsec;
+ new = clocksource_get_next();
+ if (clock != new) {
+ now = clocksource_read(new);
+ nsec = __get_nsec_offset();
+ timespec_add_ns(&xtime, nsec);
+
+ clock = new;
+ clock->cycle_last = now;
+ printk(KERN_INFO "Time: %s clocksource has been installed.\n",
+ clock->name);
+ return 1;
+ } else if (clock->update_callback) {
+ return clock->update_callback();
+ }
+ return 0;
+}
+#else
+#define change_clocksource() (0)
+#endif
+
+/**
+ * timeofday_is_continuous - check to see if timekeeping is free running
+ */
+int timekeeping_is_continuous(void)
+{
+ unsigned long seq;
+ int ret;
+
do {
- ticks--;
- update_wall_time_one_tick();
- if (xtime.tv_nsec >= 1000000000) {
- xtime.tv_nsec -= 1000000000;
+ seq = read_seqbegin(&xtime_lock);
+
+ ret = clock->is_continuous;
+
+ } while (read_seqretry(&xtime_lock, seq));
+
+ return ret;
+}
+
+/*
+ * timekeeping_init - Initializes the clocksource and common timekeeping values
+ */
+void __init timekeeping_init(void)
+{
+ unsigned long flags;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+ clock = clocksource_get_next();
+ clocksource_calculate_interval(clock, tick_nsec);
+ clock->cycle_last = clocksource_read(clock);
+ ntp_clear();
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+}
+
+
+/*
+ * timekeeping_resume - Resumes the generic timekeeping subsystem.
+ * @dev: unused
+ *
+ * This is for the generic clocksource timekeeping.
+ * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are
+ * still managed by arch specific suspend/resume code.
+ */
+static int timekeeping_resume(struct sys_device *dev)
+{
+ unsigned long flags;
+
+ write_seqlock_irqsave(&xtime_lock, flags);
+ /* restart the last cycle value */
+ clock->cycle_last = clocksource_read(clock);
+ write_sequnlock_irqrestore(&xtime_lock, flags);
+ return 0;
+}
+
+/* sysfs resume/suspend bits for timekeeping */
+static struct sysdev_class timekeeping_sysclass = {
+ .resume = timekeeping_resume,
+ set_kset_name("timekeeping"),
+};
+
+static struct sys_device device_timer = {
+ .id = 0,
+ .cls = &timekeeping_sysclass,
+};
+
+static int __init timekeeping_init_device(void)
+{
+ int error = sysdev_class_register(&timekeeping_sysclass);
+ if (!error)
+ error = sysdev_register(&device_timer);
+ return error;
+}
+
+device_initcall(timekeeping_init_device);
+
+/*
+ * If the error is already larger, we look ahead another tick,
+ * to compensate for late or lost adjustments.
+ */
+static __always_inline int clocksource_bigadjust(int sign, s64 error, s64 *interval, s64 *offset)
+{
+ int adj;
+
+ /*
+ * As soon as the machine is synchronized to the external time
+ * source this should be the common case.
+ */
+ error >>= 2;
+ if (likely(sign > 0 ? error <= *interval : error >= *interval))
+ return sign;
+
+ /*
+ * An extra look ahead dampens the effect of the current error,
+ * which can grow quite large with continously late updates, as
+ * it would dominate the adjustment value and can lead to
+ * oscillation.
+ */
+ error += current_tick_length() >> (TICK_LENGTH_SHIFT - clock->shift + 1);
+ error -= clock->xtime_interval >> 1;
+
+ adj = 0;
+ while (1) {
+ error >>= 1;
+ if (sign > 0 ? error <= *interval : error >= *interval)
+ break;
+ adj++;
+ }
+
+ /*
+ * Add the current adjustments to the error and take the offset
+ * into account, the latter can cause the error to be hardly
+ * reduced at the next tick. Check the error again if there's
+ * room for another adjustment, thus further reducing the error
+ * which otherwise had to be corrected at the next update.
+ */
+ error = (error << 1) - *interval + *offset;
+ if (sign > 0 ? error > *interval : error < *interval)
+ adj++;
+
+ *interval <<= adj;
+ *offset <<= adj;
+ return sign << adj;
+}
+
+/*
+ * Adjust the multiplier to reduce the error value,
+ * this is optimized for the most common adjustments of -1,0,1,
+ * for other values we can do a bit more work.
+ */
+static void clocksource_adjust(struct clocksource *clock, s64 offset)
+{
+ s64 error, interval = clock->cycle_interval;
+ int adj;
+
+ error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
+ if (error > interval) {
+ adj = clocksource_bigadjust(1, error, &interval, &offset);
+ } else if (error < -interval) {
+ interval = -interval;
+ offset = -offset;
+ adj = clocksource_bigadjust(-1, error, &interval, &offset);
+ } else
+ return;
+
+ clock->mult += adj;
+ clock->xtime_interval += interval;
+ clock->xtime_nsec -= offset;
+ clock->error -= (interval - offset) << (TICK_LENGTH_SHIFT - clock->shift);
+}
+
+/*
+ * update_wall_time - Uses the current clocksource to increment the wall time
+ *
+ * Called from the timer interrupt, must hold a write on xtime_lock.
+ */
+static void update_wall_time(void)
+{
+ cycle_t offset;
+
+ clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
+
+#ifdef CONFIG_GENERIC_TIME
+ offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
+#else
+ offset = clock->cycle_interval;
+#endif
+
+ /* normally this loop will run just once, however in the
+ * case of lost or late ticks, it will accumulate correctly.
+ */
+ while (offset >= clock->cycle_interval) {
+ /* accumulate one interval */
+ clock->xtime_nsec += clock->xtime_interval;
+ clock->cycle_last += clock->cycle_interval;
+ offset -= clock->cycle_interval;
+
+ if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
+ clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
xtime.tv_sec++;
second_overflow();
}
- } while (ticks);
+
+ /* interpolator bits */
+ time_interpolator_update(clock->xtime_interval
+ >> clock->shift);
+ /* increment the NTP state machine */
+ update_ntp_one_tick();
+
+ /* accumulate error between NTP and clock interval */
+ clock->error += current_tick_length();
+ clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
+ }
+
+ /* correct the clock when NTP error is too big */
+ clocksource_adjust(clock, offset);
+
+ /* store full nanoseconds into xtime */
+ xtime.tv_nsec = clock->xtime_nsec >> clock->shift;
+ clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
+
+ /* check to see if there is a new clocksource to use */
+ if (change_clocksource()) {
+ clock->error = 0;
+ clock->xtime_nsec = 0;
+ clocksource_calculate_interval(clock, tick_nsec);
+ }
}
/*
@@ -915,10 +1243,8 @@ static inline void update_times(void)
unsigned long ticks;
ticks = jiffies - wall_jiffies;
- if (ticks) {
- wall_jiffies += ticks;
- update_wall_time(ticks);
- }
+ wall_jiffies += ticks;
+ update_wall_time();
calc_load(ticks);
}
OpenPOWER on IntegriCloud