summaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel/pSeries_setup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/kernel/pSeries_setup.c')
-rw-r--r--arch/ppc64/kernel/pSeries_setup.c156
1 files changed, 156 insertions, 0 deletions
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c
index 44d9af72d225..5bec956e44a0 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/ppc64/kernel/pSeries_setup.c
@@ -19,6 +19,7 @@
#undef DEBUG
#include <linux/config.h>
+#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
@@ -82,6 +83,9 @@ int fwnmi_active; /* TRUE if an FWNMI handler is present */
extern void pSeries_system_reset_exception(struct pt_regs *regs);
extern int pSeries_machine_check_exception(struct pt_regs *regs);
+static int pseries_shared_idle(void);
+static int pseries_dedicated_idle(void);
+
static volatile void __iomem * chrp_int_ack_special;
struct mpic *pSeries_mpic;
@@ -229,6 +233,20 @@ static void __init pSeries_setup_arch(void)
if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
vpa_init(boot_cpuid);
+
+ /* Choose an idle loop */
+ if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR) {
+ if (get_paca()->lppaca.shared_proc) {
+ printk(KERN_INFO "Using shared processor idle loop\n");
+ ppc_md.idle_loop = pseries_shared_idle;
+ } else {
+ printk(KERN_INFO "Using dedicated idle loop\n");
+ ppc_md.idle_loop = pseries_dedicated_idle;
+ }
+ } else {
+ printk(KERN_INFO "Using default idle loop\n");
+ ppc_md.idle_loop = default_idle;
+ }
}
static int __init pSeries_init_panel(void)
@@ -418,6 +436,144 @@ static int __init pSeries_probe(int platform)
return 1;
}
+DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
+
+static inline void dedicated_idle_sleep(unsigned int cpu)
+{
+ struct paca_struct *ppaca = &paca[cpu ^ 1];
+
+ /* Only sleep if the other thread is not idle */
+ if (!(ppaca->lppaca.idle)) {
+ local_irq_disable();
+
+ /*
+ * We are about to sleep the thread and so wont be polling any
+ * more.
+ */
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+
+ /*
+ * SMT dynamic mode. Cede will result in this thread going
+ * dormant, if the partner thread is still doing work. Thread
+ * wakes up if partner goes idle, an interrupt is presented, or
+ * a prod occurs. Returning from the cede enables external
+ * interrupts.
+ */
+ if (!need_resched())
+ cede_processor();
+ else
+ local_irq_enable();
+ } else {
+ /*
+ * Give the HV an opportunity at the processor, since we are
+ * not doing any work.
+ */
+ poll_pending();
+ }
+}
+
+static int pseries_dedicated_idle(void)
+{
+ long oldval;
+ struct paca_struct *lpaca = get_paca();
+ unsigned int cpu = smp_processor_id();
+ unsigned long start_snooze;
+ unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
+
+ while (1) {
+ /*
+ * Indicate to the HV that we are idle. Now would be
+ * a good time to find other work to dispatch.
+ */
+ lpaca->lppaca.idle = 1;
+
+ oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+ if (!oldval) {
+ set_thread_flag(TIF_POLLING_NRFLAG);
+
+ start_snooze = __get_tb() +
+ *smt_snooze_delay * tb_ticks_per_usec;
+
+ while (!need_resched() && !cpu_is_offline(cpu)) {
+ ppc64_runlatch_off();
+
+ /*
+ * Go into low thread priority and possibly
+ * low power mode.
+ */
+ HMT_low();
+ HMT_very_low();
+
+ if (*smt_snooze_delay != 0 &&
+ __get_tb() > start_snooze) {
+ HMT_medium();
+ dedicated_idle_sleep(cpu);
+ }
+
+ }
+
+ HMT_medium();
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+ } else {
+ set_need_resched();
+ }
+
+ lpaca->lppaca.idle = 0;
+ ppc64_runlatch_on();
+
+ schedule();
+
+ if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
+ cpu_die();
+ }
+}
+
+static int pseries_shared_idle(void)
+{
+ struct paca_struct *lpaca = get_paca();
+ unsigned int cpu = smp_processor_id();
+
+ while (1) {
+ /*
+ * Indicate to the HV that we are idle. Now would be
+ * a good time to find other work to dispatch.
+ */
+ lpaca->lppaca.idle = 1;
+
+ while (!need_resched() && !cpu_is_offline(cpu)) {
+ local_irq_disable();
+ ppc64_runlatch_off();
+
+ /*
+ * Yield the processor to the hypervisor. We return if
+ * an external interrupt occurs (which are driven prior
+ * to returning here) or if a prod occurs from another
+ * processor. When returning here, external interrupts
+ * are enabled.
+ *
+ * Check need_resched() again with interrupts disabled
+ * to avoid a race.
+ */
+ if (!need_resched())
+ cede_processor();
+ else
+ local_irq_enable();
+
+ HMT_medium();
+ }
+
+ lpaca->lppaca.idle = 0;
+ ppc64_runlatch_on();
+
+ schedule();
+
+ if (cpu_is_offline(cpu) && system_state == SYSTEM_RUNNING)
+ cpu_die();
+ }
+
+ return 0;
+}
+
struct machdep_calls __initdata pSeries_md = {
.probe = pSeries_probe,
.setup_arch = pSeries_setup_arch,
OpenPOWER on IntegriCloud