summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r--arch/arm/kernel/smp.c87
1 files changed, 0 insertions, 87 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c2b4f8f0be9a..3a98192a3118 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -41,7 +41,6 @@
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
-#include <asm/localtimer.h>
#include <asm/smp_plat.h>
#include <asm/virt.h>
#include <asm/mach/arch.h>
@@ -146,8 +145,6 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle)
}
#ifdef CONFIG_HOTPLUG_CPU
-static void percpu_timer_stop(void);
-
static int platform_cpu_kill(unsigned int cpu)
{
if (smp_ops.cpu_kill)
@@ -191,11 +188,6 @@ int __cpu_disable(void)
migrate_irqs();
/*
- * Stop the local timer for this CPU.
- */
- percpu_timer_stop();
-
- /*
* Flush user cache and TLB mappings, and then remove this CPU
* from the vm mask set of all processes.
*
@@ -316,8 +308,6 @@ static void smp_store_cpu_info(unsigned int cpuid)
store_cpu_topology(cpuid);
}
-static void percpu_timer_setup(void);
-
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -372,11 +362,6 @@ asmlinkage void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
- /*
- * Setup the percpu timer for this CPU.
- */
- percpu_timer_setup();
-
local_irq_enable();
local_fiq_enable();
@@ -423,12 +408,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
max_cpus = ncores;
if (ncores > 1 && max_cpus) {
/*
- * Enable the local timer or broadcast device for the
- * boot CPU, but only if we have more than one CPU.
- */
- percpu_timer_setup();
-
- /*
* Initialise the present map, which describes the set of CPUs
* actually populated at the present time. A platform should
* re-initialize the map in the platforms smp_prepare_cpus()
@@ -504,11 +483,6 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
return sum;
}
-/*
- * Timer (local or broadcast) support
- */
-static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
-
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
void tick_broadcast(const struct cpumask *mask)
{
@@ -516,67 +490,6 @@ void tick_broadcast(const struct cpumask *mask)
}
#endif
-static void broadcast_timer_set_mode(enum clock_event_mode mode,
- struct clock_event_device *evt)
-{
-}
-
-static void broadcast_timer_setup(struct clock_event_device *evt)
-{
- evt->name = "dummy_timer";
- evt->features = CLOCK_EVT_FEAT_ONESHOT |
- CLOCK_EVT_FEAT_PERIODIC |
- CLOCK_EVT_FEAT_DUMMY;
- evt->rating = 100;
- evt->mult = 1;
- evt->set_mode = broadcast_timer_set_mode;
-
- clockevents_register_device(evt);
-}
-
-static struct local_timer_ops *lt_ops;
-
-#ifdef CONFIG_LOCAL_TIMERS
-int local_timer_register(struct local_timer_ops *ops)
-{
- if (!is_smp() || !setup_max_cpus)
- return -ENXIO;
-
- if (lt_ops)
- return -EBUSY;
-
- lt_ops = ops;
- return 0;
-}
-#endif
-
-static void percpu_timer_setup(void)
-{
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
-
- evt->cpumask = cpumask_of(cpu);
-
- if (!lt_ops || lt_ops->setup(evt))
- broadcast_timer_setup(evt);
-}
-
-#ifdef CONFIG_HOTPLUG_CPU
-/*
- * The generic clock events code purposely does not stop the local timer
- * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it
- * manually here.
- */
-static void percpu_timer_stop(void)
-{
- unsigned int cpu = smp_processor_id();
- struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
-
- if (lt_ops)
- lt_ops->stop(evt);
-}
-#endif
-
static DEFINE_RAW_SPINLOCK(stop_lock);
/*
OpenPOWER on IntegriCloud