summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2007-10-15 17:00:14 +0200
committerIngo Molnar <mingo@elte.hu>2007-10-15 17:00:14 +0200
commit8cbbe86dfcfd68ad69916164bdc838d9e09adca8 (patch)
tree32e949b7a0fac548d7668f01f5d53b727c885502
parent3a5c359a58c39801d838c508f127bdb228af28b0 (diff)
downloadblackbird-op-linux-8cbbe86dfcfd68ad69916164bdc838d9e09adca8.tar.gz
blackbird-op-linux-8cbbe86dfcfd68ad69916164bdc838d9e09adca8.zip
sched: cleanup: refactor common code of sleep_on / wait_for_completion
Refactor common code of sleep_on / wait_for_completion These functions were largely cut'n'pasted. This moves the common code into single helpers instead. Advantage is about 1k less code on x86-64 and 91 lines of code removed. It adds one function call to the non timeout version of the functions; i don't expect this to be measurable. Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c188
1 files changed, 49 insertions, 139 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 4c15b1726196..db88b5655aca 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3697,206 +3697,116 @@ void fastcall complete_all(struct completion *x)
}
EXPORT_SYMBOL(complete_all);
-void fastcall __sched wait_for_completion(struct completion *x)
+static inline long __sched
+do_wait_for_common(struct completion *x, long timeout, int state)
{
- might_sleep();
-
- spin_lock_irq(&x->wait.lock);
if (!x->done) {
DECLARE_WAITQUEUE(wait, current);
wait.flags |= WQ_FLAG_EXCLUSIVE;
__add_wait_queue_tail(&x->wait, &wait);
do {
- __set_current_state(TASK_UNINTERRUPTIBLE);
- spin_unlock_irq(&x->wait.lock);
- schedule();
- spin_lock_irq(&x->wait.lock);
- } while (!x->done);
- __remove_wait_queue(&x->wait, &wait);
- }
- x->done--;
- spin_unlock_irq(&x->wait.lock);
-}
-EXPORT_SYMBOL(wait_for_completion);
-
-unsigned long fastcall __sched
-wait_for_completion_timeout(struct completion *x, unsigned long timeout)
-{
- might_sleep();
-
- spin_lock_irq(&x->wait.lock);
- if (!x->done) {
- DECLARE_WAITQUEUE(wait, current);
-
- wait.flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue_tail(&x->wait, &wait);
- do {
- __set_current_state(TASK_UNINTERRUPTIBLE);
+ if (state == TASK_INTERRUPTIBLE &&
+ signal_pending(current)) {
+ __remove_wait_queue(&x->wait, &wait);
+ return -ERESTARTSYS;
+ }
+ __set_current_state(state);
spin_unlock_irq(&x->wait.lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&x->wait.lock);
if (!timeout) {
__remove_wait_queue(&x->wait, &wait);
- goto out;
+ return timeout;
}
} while (!x->done);
__remove_wait_queue(&x->wait, &wait);
}
x->done--;
-out:
- spin_unlock_irq(&x->wait.lock);
return timeout;
}
-EXPORT_SYMBOL(wait_for_completion_timeout);
-int fastcall __sched wait_for_completion_interruptible(struct completion *x)
+static long __sched
+wait_for_common(struct completion *x, long timeout, int state)
{
- int ret = 0;
-
might_sleep();
spin_lock_irq(&x->wait.lock);
- if (!x->done) {
- DECLARE_WAITQUEUE(wait, current);
-
- wait.flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue_tail(&x->wait, &wait);
- do {
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- __remove_wait_queue(&x->wait, &wait);
- goto out;
- }
- __set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&x->wait.lock);
- schedule();
- spin_lock_irq(&x->wait.lock);
- } while (!x->done);
- __remove_wait_queue(&x->wait, &wait);
- }
- x->done--;
-out:
+ timeout = do_wait_for_common(x, timeout, state);
spin_unlock_irq(&x->wait.lock);
+ return timeout;
+}
- return ret;
+void fastcall __sched wait_for_completion(struct completion *x)
+{
+ wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
}
-EXPORT_SYMBOL(wait_for_completion_interruptible);
+EXPORT_SYMBOL(wait_for_completion);
unsigned long fastcall __sched
-wait_for_completion_interruptible_timeout(struct completion *x,
- unsigned long timeout)
+wait_for_completion_timeout(struct completion *x, unsigned long timeout)
{
- might_sleep();
-
- spin_lock_irq(&x->wait.lock);
- if (!x->done) {
- DECLARE_WAITQUEUE(wait, current);
-
- wait.flags |= WQ_FLAG_EXCLUSIVE;
- __add_wait_queue_tail(&x->wait, &wait);
- do {
- if (signal_pending(current)) {
- timeout = -ERESTARTSYS;
- __remove_wait_queue(&x->wait, &wait);
- goto out;
- }
- __set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(&x->wait.lock);
- timeout = schedule_timeout(timeout);
- spin_lock_irq(&x->wait.lock);
- if (!timeout) {
- __remove_wait_queue(&x->wait, &wait);
- goto out;
- }
- } while (!x->done);
- __remove_wait_queue(&x->wait, &wait);
- }
- x->done--;
-out:
- spin_unlock_irq(&x->wait.lock);
- return timeout;
+ return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
}
-EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
+EXPORT_SYMBOL(wait_for_completion_timeout);
-static inline void
-sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+int __sched wait_for_completion_interruptible(struct completion *x)
{
- spin_lock_irqsave(&q->lock, *flags);
- __add_wait_queue(q, wait);
- spin_unlock(&q->lock);
+ return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
}
+EXPORT_SYMBOL(wait_for_completion_interruptible);
-static inline void
-sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
+unsigned long fastcall __sched
+wait_for_completion_interruptible_timeout(struct completion *x,
+ unsigned long timeout)
{
- spin_lock_irq(&q->lock);
- __remove_wait_queue(q, wait);
- spin_unlock_irqrestore(&q->lock, *flags);
+ return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
}
+EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
-void __sched interruptible_sleep_on(wait_queue_head_t *q)
+static long __sched
+sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
unsigned long flags;
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
- current->state = TASK_INTERRUPTIBLE;
+ __set_current_state(state);
- sleep_on_head(q, &wait, &flags);
- schedule();
- sleep_on_tail(q, &wait, &flags);
+ spin_lock_irqsave(&q->lock, flags);
+ __add_wait_queue(q, &wait);
+ spin_unlock(&q->lock);
+ timeout = schedule_timeout(timeout);
+ spin_lock_irq(&q->lock);
+ __remove_wait_queue(q, &wait);
+ spin_unlock_irqrestore(&q->lock, flags);
+
+ return timeout;
+}
+
+void __sched interruptible_sleep_on(wait_queue_head_t *q)
+{
+ sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(interruptible_sleep_on);
long __sched
interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
- unsigned long flags;
- wait_queue_t wait;
-
- init_waitqueue_entry(&wait, current);
-
- current->state = TASK_INTERRUPTIBLE;
-
- sleep_on_head(q, &wait, &flags);
- timeout = schedule_timeout(timeout);
- sleep_on_tail(q, &wait, &flags);
-
- return timeout;
+ return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(interruptible_sleep_on_timeout);
void __sched sleep_on(wait_queue_head_t *q)
{
- unsigned long flags;
- wait_queue_t wait;
-
- init_waitqueue_entry(&wait, current);
-
- current->state = TASK_UNINTERRUPTIBLE;
-
- sleep_on_head(q, &wait, &flags);
- schedule();
- sleep_on_tail(q, &wait, &flags);
+ sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
EXPORT_SYMBOL(sleep_on);
long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
{
- unsigned long flags;
- wait_queue_t wait;
-
- init_waitqueue_entry(&wait, current);
-
- current->state = TASK_UNINTERRUPTIBLE;
-
- sleep_on_head(q, &wait, &flags);
- timeout = schedule_timeout(timeout);
- sleep_on_tail(q, &wait, &flags);
-
- return timeout;
+ return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
}
EXPORT_SYMBOL(sleep_on_timeout);
OpenPOWER on IntegriCloud