summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/oprofile/cell
diff options
context:
space:
mode:
authorCarl Love <cel@us.ibm.com>2008-12-01 16:18:34 -0800
committerRobert Richter <robert.richter@amd.com>2009-01-08 15:49:39 +0100
commit9b93418e7ee59dbc96d44cfde7f65f886e54dba9 (patch)
tree39f2e913f17b3a9dc50b6af39a32489a735ce3a6 /arch/powerpc/oprofile/cell
parent4a6908a3a050aacc9c3a2f36b276b46c0629ad91 (diff)
downloadblackbird-op-linux-9b93418e7ee59dbc96d44cfde7f65f886e54dba9.tar.gz
blackbird-op-linux-9b93418e7ee59dbc96d44cfde7f65f886e54dba9.zip
powerpc/oprofile: IBM CELL: cleanup and restructuring
This patch restructures and cleans up the code a bit to make it easier to add new functionality later. The patch makes no functional changes to the existing code. Signed-off-by: Carl Love <carll@us.ibm.com> Signed-off-by: Robert Richter <robert.richter@amd.com>
Diffstat (limited to 'arch/powerpc/oprofile/cell')
-rw-r--r--arch/powerpc/oprofile/cell/spu_profiler.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c
index dd499c3e9da7..8b1b9ccaff9f 100644
--- a/arch/powerpc/oprofile/cell/spu_profiler.c
+++ b/arch/powerpc/oprofile/cell/spu_profiler.c
@@ -31,8 +31,8 @@ static unsigned int profiling_interval;
#define SPU_PC_MASK 0xFFFF
-static DEFINE_SPINLOCK(sample_array_lock);
-unsigned long sample_array_lock_flags;
+static DEFINE_SPINLOCK(oprof_spu_smpl_arry_lck);
+unsigned long oprof_spu_smpl_arry_lck_flags;
void set_spu_profiling_frequency(unsigned int freq_khz, unsigned int cycles_reset)
{
@@ -145,13 +145,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
* sample array must be loaded and then processed for a given
* cpu. The sample array is not per cpu.
*/
- spin_lock_irqsave(&sample_array_lock,
- sample_array_lock_flags);
+ spin_lock_irqsave(&oprof_spu_smpl_arry_lck,
+ oprof_spu_smpl_arry_lck_flags);
num_samples = cell_spu_pc_collection(cpu);
if (num_samples == 0) {
- spin_unlock_irqrestore(&sample_array_lock,
- sample_array_lock_flags);
+ spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+ oprof_spu_smpl_arry_lck_flags);
continue;
}
@@ -162,8 +162,8 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
num_samples);
}
- spin_unlock_irqrestore(&sample_array_lock,
- sample_array_lock_flags);
+ spin_unlock_irqrestore(&oprof_spu_smpl_arry_lck,
+ oprof_spu_smpl_arry_lck_flags);
}
smp_wmb(); /* insure spu event buffer updates are written */
@@ -182,13 +182,13 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer)
static struct hrtimer timer;
/*
- * Entry point for SPU profiling.
+ * Entry point for SPU cycle profiling.
* NOTE: SPU profiling is done system-wide, not per-CPU.
*
* cycles_reset is the count value specified by the user when
* setting up OProfile to count SPU_CYCLES.
*/
-int start_spu_profiling(unsigned int cycles_reset)
+int start_spu_profiling_cycles(unsigned int cycles_reset)
{
ktime_t kt;
@@ -212,10 +212,10 @@ int start_spu_profiling(unsigned int cycles_reset)
return 0;
}
-void stop_spu_profiling(void)
+void stop_spu_profiling_cycles(void)
{
spu_prof_running = 0;
hrtimer_cancel(&timer);
kfree(samples);
- pr_debug("SPU_PROF: stop_spu_profiling issued\n");
+ pr_debug("SPU_PROF: stop_spu_profiling_cycles issued\n");
}
OpenPOWER on IntegriCloud