summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorStephane Eranian <eranian@hpl.hp.com>2005-04-11 13:45:00 -0700
committerTony Luck <tony.luck@intel.com>2005-05-03 15:44:48 -0700
commit8df5a500a3e97f7811cdce0f553ca1917ccd4220 (patch)
treecac08cc58a5e9fb846a9946a52026fcbe7718634 /arch/ia64
parent012914dad25bd5cacf88af4429eecda62a06020d (diff)
downloadblackbird-op-linux-8df5a500a3e97f7811cdce0f553ca1917ccd4220.tar.gz
blackbird-op-linux-8df5a500a3e97f7811cdce0f553ca1917ccd4220.zip
[IA64] perfmon & PAL_HALT again
The pmu_active test is based on the values of PSR.up. THIS IS THE PROBLEM as it does not take into account the lazy restore logic which is as follow (simplified): context switch out: save PMDs clear psr.up release ownership context switch in: if (ctx->last_cpu == smp_processor_id() && ctx->cpu_activation == cpu_activation) { set psr.up return } restore PMD restore PMC ctx->last_cpu = smp_processor_id(); ctx->activation = ++cpu_activation; set psr.up The key here is that on context switch out, we clear psr.up and on context switch in we check if nobody else used the PMU on that processor since last time we came. In that case, we assume the PMD/PMC are ours and we simply reactivate. The Caliper problem is that between the moment we context switch out and the moment we come back, nobody effectively used the PMU BUT the processor went idle. Normally this would have no incidence but PAL_HALT does alter the PMU registers. In default_idle(), the test on psr.up is not strong enough to cover this case and we go into PAL which trashed the PMU resgisters. When we come back we falsely assume that this is our state yet it is corrupted. Very nasty indeed. To avoid the problem it is necessary to forbid going to PAL_HALT as soon as perfmon installs some valid state in the PMU registers. This happens with an application attaches a context to a thread or CPU. It is not enough to check the psr/dcr bits. Hence I propose the attached patch. It adds a callback in process.c to modify the condition to enter PAL on idle. Basically, now it is conditional to pal_halt=1 AND perfmon saying it is okay. Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/kernel/perfmon.c13
-rw-r--r--arch/ia64/kernel/process.c14
2 files changed, 24 insertions, 3 deletions
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 376fcbc3f8da..fd4f3be6e856 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -1265,6 +1265,8 @@ out:
}
EXPORT_SYMBOL(pfm_unregister_buffer_fmt);
+extern void update_pal_halt_status(int);
+
static int
pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
{
@@ -1311,6 +1313,11 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
is_syswide,
cpu));
+ /*
+ * disable default_idle() to go to PAL_HALT
+ */
+ update_pal_halt_status(0);
+
UNLOCK_PFS(flags);
return 0;
@@ -1366,6 +1373,12 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)
is_syswide,
cpu));
+ /*
+ * if possible, enable default_idle() to go into PAL_HALT
+ */
+ if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0)
+ update_pal_halt_status(1);
+
UNLOCK_PFS(flags);
return 0;
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index c0140f4235e4..474d75f9de8a 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -173,7 +173,9 @@ do_notify_resume_user (sigset_t *oldset, struct sigscratch *scr, long in_syscall
ia64_do_signal(oldset, scr, in_syscall);
}
-static int pal_halt = 1;
+static int pal_halt = 1;
+static int can_do_pal_halt = 1;
+
static int __init nohalt_setup(char * str)
{
pal_halt = 0;
@@ -181,16 +183,22 @@ static int __init nohalt_setup(char * str)
}
__setup("nohalt", nohalt_setup);
+int
+update_pal_halt_status(int status)
+{
+ can_do_pal_halt = pal_halt && status;
+}
+
/*
* We use this if we don't have any better idle routine..
*/
void
default_idle (void)
{
- unsigned long pmu_active = ia64_getreg(_IA64_REG_PSR) & (IA64_PSR_PP | IA64_PSR_UP);
+ int can_do_pal;
while (!need_resched())
- if (pal_halt && !pmu_active)
+ if (can_do_pal_halt)
safe_halt();
else
cpu_relax();
OpenPOWER on IntegriCloud