diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2011-10-07 18:22:09 +0200 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2011-12-11 10:31:38 -0800 |
commit | 98ad1cc14a5c4fd658f9d72c6ba5c86dfd3ce0d5 (patch) | |
tree | 4b5d75e3c9595f341ece93a3ed018d49dad28bf5 | |
parent | e37e112de3ac64032df45c2db0dbe1e8f1af86b4 (diff) | |
download | blackbird-op-linux-98ad1cc14a5c4fd658f9d72c6ba5c86dfd3ce0d5.tar.gz blackbird-op-linux-98ad1cc14a5c4fd658f9d72c6ba5c86dfd3ce0d5.zip |
x86: Call idle notifier after irq_enter()
Interrupts notify the idle exit state before calling irq_enter().
But the notifier code calls rcu_read_lock() and this is not
allowed while rcu is in an extended quiescent state. We need
to wait for irq_enter() -> rcu_idle_exit() to be called before
doing so otherwise this results in a grumpy RCU:
[ 0.099991] WARNING: at include/linux/rcupdate.h:194 __atomic_notifier_call_chain+0xd2/0x110()
[ 0.099991] Hardware name: AMD690VM-FMH
[ 0.099991] Modules linked in:
[ 0.099991] Pid: 0, comm: swapper Not tainted 3.0.0-rc6+ #255
[ 0.099991] Call Trace:
[ 0.099991] <IRQ> [<ffffffff81051c8a>] warn_slowpath_common+0x7a/0xb0
[ 0.099991] [<ffffffff81051cd5>] warn_slowpath_null+0x15/0x20
[ 0.099991] [<ffffffff817d6fa2>] __atomic_notifier_call_chain+0xd2/0x110
[ 0.099991] [<ffffffff817d6ff1>] atomic_notifier_call_chain+0x11/0x20
[ 0.099991] [<ffffffff81001873>] exit_idle+0x43/0x50
[ 0.099991] [<ffffffff81020439>] smp_apic_timer_interrupt+0x39/0xa0
[ 0.099991] [<ffffffff817da253>] apic_timer_interrupt+0x13/0x20
[ 0.099991] <EOI> [<ffffffff8100ae67>] ? default_idle+0xa7/0x350
[ 0.099991] [<ffffffff8100ae65>] ? default_idle+0xa5/0x350
[ 0.099991] [<ffffffff8100b19b>] amd_e400_idle+0x8b/0x110
[ 0.099991] [<ffffffff810cb01f>] ? rcu_enter_nohz+0x8f/0x160
[ 0.099991] [<ffffffff810019a0>] cpu_idle+0xb0/0x110
[ 0.099991] [<ffffffff817a7505>] rest_init+0xe5/0x140
[ 0.099991] [<ffffffff817a7468>] ? rest_init+0x48/0x140
[ 0.099991] [<ffffffff81cc5ca3>] start_kernel+0x3d1/0x3dc
[ 0.099991] [<ffffffff81cc5321>] x86_64_start_reservations+0x131/0x135
[ 0.099991] [<ffffffff81cc5412>] x86_64_start_kernel+0xed/0xf4
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Andy Henroid <andrew.d.henroid@intel.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/therm_throt.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/threshold.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/irq.c | 6 |
5 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index f98d84caf94c..2cd2d93643dc 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -876,8 +876,8 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs) * Besides, if we don't timer interrupts ignore the global * interrupt lock, which is the WrongThing (tm) to do. */ - exit_idle(); irq_enter(); + exit_idle(); local_apic_timer_interrupt(); irq_exit(); @@ -1809,8 +1809,8 @@ void smp_spurious_interrupt(struct pt_regs *regs) { u32 v; - exit_idle(); irq_enter(); + exit_idle(); /* * Check if this really is a spurious interrupt and ACK it * if it is a vectored one. Just in case... @@ -1846,8 +1846,8 @@ void smp_error_interrupt(struct pt_regs *regs) "Illegal register address", /* APIC Error Bit 7 */ }; - exit_idle(); irq_enter(); + exit_idle(); /* First tickle the hardware, only then report what went on. -- REW */ v0 = apic_read(APIC_ESR); apic_write(APIC_ESR, 0); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 6d939d7847e2..898055585516 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2421,8 +2421,8 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) unsigned vector, me; ack_APIC_irq(); - exit_idle(); irq_enter(); + exit_idle(); me = smp_processor_id(); for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 787e06c84ea6..ce215616d5b9 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -397,8 +397,8 @@ static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) { - exit_idle(); irq_enter(); + exit_idle(); inc_irq_stat(irq_thermal_count); smp_thermal_vector(); irq_exit(); diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c index d746df2909c9..aa578cadb940 100644 --- a/arch/x86/kernel/cpu/mcheck/threshold.c +++ b/arch/x86/kernel/cpu/mcheck/threshold.c @@ -19,8 +19,8 @@ void (*mce_threshold_vector)(void) = default_threshold_interrupt; asmlinkage void smp_threshold_interrupt(void) { - exit_idle(); irq_enter(); + exit_idle(); inc_irq_stat(irq_threshold_count); mce_threshold_vector(); irq_exit(); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 429e0c92924e..5d31e5bdbf85 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -181,8 +181,8 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs) unsigned vector = ~regs->orig_ax; unsigned irq; - exit_idle(); irq_enter(); + exit_idle(); irq = __this_cpu_read(vector_irq[vector]); @@ -209,10 +209,10 @@ void smp_x86_platform_ipi(struct pt_regs *regs) ack_APIC_irq(); - exit_idle(); - irq_enter(); + exit_idle(); + inc_irq_stat(x86_platform_ipis); if (x86_platform_ipi_callback) |