summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c6
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/irq.c4
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/mce.c24
-rw-r--r--arch/powerpc/kernel/process.c10
-rw-r--r--arch/powerpc/kernel/smp.c6
-rw-r--r--arch/powerpc/kernel/sysfs.c4
-rw-r--r--arch/powerpc/kernel/time.c22
-rw-r--r--arch/powerpc/kernel/traps.c8
12 files changed, 48 insertions, 48 deletions
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index d55c76c571f3..f4217819cc31 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs)
may_hard_irq_enable();
- __get_cpu_var(irq_stat).doorbell_irqs++;
+ __this_cpu_inc(irq_stat.doorbell_irqs);
smp_ipi_demux();
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 1f7d84e2e8b2..05e804cdecaa 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -63,7 +63,7 @@ int hw_breakpoint_slots(int type)
int arch_install_hw_breakpoint(struct perf_event *bp)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- struct perf_event **slot = &__get_cpu_var(bp_per_reg);
+ struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
*slot = bp;
@@ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
*/
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
- struct perf_event **slot = &__get_cpu_var(bp_per_reg);
+ struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
if (*slot != bp) {
WARN_ONCE(1, "Can't find the breakpoint");
@@ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
*/
rcu_read_lock();
- bp = __get_cpu_var(bp_per_reg);
+ bp = __this_cpu_read(bp_per_reg);
if (!bp)
goto out;
info = counter_arch_bp(bp);
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index a10642a0d861..71e60bfb89e2 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -208,7 +208,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
* We don't need to disable preemption here because any CPU can
* safely use any IOMMU pool.
*/
- pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1);
+ pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
if (largealloc)
pool = &(tbl->large_pool);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c14383575fe8..8d87bb162ecd 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -114,7 +114,7 @@ static inline notrace void set_soft_enabled(unsigned long enable)
static inline notrace int decrementer_check_overflow(void)
{
u64 now = get_tb_or_rtc();
- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
return now >= *next_tb;
}
@@ -499,7 +499,7 @@ void __do_irq(struct pt_regs *regs)
/* And finally process it */
if (unlikely(irq == NO_IRQ))
- __get_cpu_var(irq_stat).spurious_irqs++;
+ __this_cpu_inc(irq_stat.spurious_irqs);
else
generic_handle_irq(irq);
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 8504657379f1..e77c3ccf8dcf 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -155,7 +155,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
{
struct thread_info *thread_info, *exception_thread_info;
struct thread_info *backup_current_thread_info =
- &__get_cpu_var(kgdb_thread_info);
+ this_cpu_ptr(&kgdb_thread_info);
if (user_mode(regs))
return 0;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 2f72af82513c..7c053f281406 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -119,7 +119,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
}
@@ -127,7 +127,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = p;
+ __this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_msr = regs->msr;
}
@@ -192,7 +192,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
ret = 1;
goto no_kprobe;
}
- p = __get_cpu_var(current_kprobe);
+ p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
}
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index a7fd4cb78b78..15c99b649b04 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -73,8 +73,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
uint64_t nip, uint64_t addr)
{
uint64_t srr1;
- int index = __get_cpu_var(mce_nest_count)++;
- struct machine_check_event *mce = &__get_cpu_var(mce_event[index]);
+ int index = __this_cpu_inc_return(mce_nest_count);
+ struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
/*
* Return if we don't have enough space to log mce event.
@@ -143,7 +143,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
*/
int get_mce_event(struct machine_check_event *mce, bool release)
{
- int index = __get_cpu_var(mce_nest_count) - 1;
+ int index = __this_cpu_read(mce_nest_count) - 1;
struct machine_check_event *mc_evt;
int ret = 0;
@@ -153,7 +153,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
/* Check if we have MCE info to process. */
if (index < MAX_MC_EVT) {
- mc_evt = &__get_cpu_var(mce_event[index]);
+ mc_evt = this_cpu_ptr(&mce_event[index]);
/* Copy the event structure and release the original */
if (mce)
*mce = *mc_evt;
@@ -163,7 +163,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
}
/* Decrement the count to free the slot. */
if (release)
- __get_cpu_var(mce_nest_count)--;
+ __this_cpu_dec(mce_nest_count);
return ret;
}
@@ -184,13 +184,13 @@ void machine_check_queue_event(void)
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
return;
- index = __get_cpu_var(mce_queue_count)++;
+ index = __this_cpu_inc_return(mce_queue_count);
/* If queue is full, just return for now. */
if (index >= MAX_MC_EVT) {
- __get_cpu_var(mce_queue_count)--;
+ __this_cpu_dec(mce_queue_count);
return;
}
- __get_cpu_var(mce_event_queue[index]) = evt;
+ memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
/* Queue irq work to process this event later. */
irq_work_queue(&mce_event_process_work);
@@ -208,11 +208,11 @@ static void machine_check_process_queued_event(struct irq_work *work)
* For now just print it to console.
* TODO: log this error event to FSP or nvram.
*/
- while (__get_cpu_var(mce_queue_count) > 0) {
- index = __get_cpu_var(mce_queue_count) - 1;
+ while (__this_cpu_read(mce_queue_count) > 0) {
+ index = __this_cpu_read(mce_queue_count) - 1;
machine_check_print_event_info(
- &__get_cpu_var(mce_event_queue[index]));
- __get_cpu_var(mce_queue_count)--;
+ this_cpu_ptr(&mce_event_queue[index]));
+ __this_cpu_dec(mce_queue_count);
}
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 923cd2daba89..91e132b495bf 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -499,7 +499,7 @@ static inline int set_dawr(struct arch_hw_breakpoint *brk)
void __set_breakpoint(struct arch_hw_breakpoint *brk)
{
- __get_cpu_var(current_brk) = *brk;
+ memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
if (cpu_has_feature(CPU_FTR_DAWR))
set_dawr(brk);
@@ -842,7 +842,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* schedule DABR
*/
#ifndef CONFIG_HAVE_HW_BREAKPOINT
- if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
+ if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
__set_breakpoint(&new->thread.hw_brk);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
#endif
@@ -856,7 +856,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
* Collect processor utilization data per process
*/
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
- struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
+ struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
long unsigned start_tb, current_tb;
start_tb = old_thread->start_tb;
cu->current_tb = current_tb = mfspr(SPRN_PURR);
@@ -866,7 +866,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC_BOOK3S_64
- batch = &__get_cpu_var(ppc64_tlb_batch);
+ batch = this_cpu_ptr(&ppc64_tlb_batch);
if (batch->active) {
current_thread_info()->local_flags |= _TLF_LAZY_MMU;
if (batch->index)
@@ -889,7 +889,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
#ifdef CONFIG_PPC_BOOK3S_64
if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
- batch = &__get_cpu_var(ppc64_tlb_batch);
+ batch = this_cpu_ptr(&ppc64_tlb_batch);
batch->active = 1;
}
#endif /* CONFIG_PPC_BOOK3S_64 */
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 71e186d5f331..8b2d2dc8ef10 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -243,7 +243,7 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
irqreturn_t smp_ipi_demux(void)
{
- struct cpu_messages *info = &__get_cpu_var(ipi_message);
+ struct cpu_messages *info = this_cpu_ptr(&ipi_message);
unsigned int all;
mb(); /* order any irq clear */
@@ -442,9 +442,9 @@ void generic_mach_cpu_die(void)
idle_task_exit();
cpu = smp_processor_id();
printk(KERN_DEBUG "CPU%d offline\n", cpu);
- __get_cpu_var(cpu_state) = CPU_DEAD;
+ __this_cpu_write(cpu_state, CPU_DEAD);
smp_wmb();
- while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
+ while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE)
cpu_relax();
}
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 67fd2fd2620a..fa1fd8a0c867 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -394,10 +394,10 @@ void ppc_enable_pmcs(void)
ppc_set_pmu_inuse(1);
/* Only need to enable them once */
- if (__get_cpu_var(pmcs_enabled))
+ if (__this_cpu_read(pmcs_enabled))
return;
- __get_cpu_var(pmcs_enabled) = 1;
+ __this_cpu_write(pmcs_enabled, 1);
if (ppc_md.enable_pmcs)
ppc_md.enable_pmcs();
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 7505599c2593..9f8ea617ff2c 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -458,9 +458,9 @@ static inline void clear_irq_work_pending(void)
DEFINE_PER_CPU(u8, irq_work_pending);
-#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
-#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
-#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
+#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
+#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
+#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
#endif /* 32 vs 64 bit */
@@ -482,8 +482,8 @@ void arch_irq_work_raise(void)
static void __timer_interrupt(void)
{
struct pt_regs *regs = get_irq_regs();
- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
- struct clock_event_device *evt = &__get_cpu_var(decrementers);
+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
+ struct clock_event_device *evt = this_cpu_ptr(&decrementers);
u64 now;
trace_timer_interrupt_entry(regs);
@@ -498,7 +498,7 @@ static void __timer_interrupt(void)
*next_tb = ~(u64)0;
if (evt->event_handler)
evt->event_handler(evt);
- __get_cpu_var(irq_stat).timer_irqs_event++;
+ __this_cpu_inc(irq_stat.timer_irqs_event);
} else {
now = *next_tb - now;
if (now <= DECREMENTER_MAX)
@@ -506,13 +506,13 @@ static void __timer_interrupt(void)
/* We may have raced with new irq work */
if (test_irq_work_pending())
set_dec(1);
- __get_cpu_var(irq_stat).timer_irqs_others++;
+ __this_cpu_inc(irq_stat.timer_irqs_others);
}
#ifdef CONFIG_PPC64
/* collect purr register values often, for accurate calculations */
if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
- struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
+ struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
cu->current_tb = mfspr(SPRN_PURR);
}
#endif
@@ -527,7 +527,7 @@ static void __timer_interrupt(void)
void timer_interrupt(struct pt_regs * regs)
{
struct pt_regs *old_regs;
- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
/* Ensure a positive value is written to the decrementer, or else
* some CPUs will continue to take decrementer exceptions.
@@ -813,7 +813,7 @@ static void __init clocksource_init(void)
static int decrementer_set_next_event(unsigned long evt,
struct clock_event_device *dev)
{
- __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt;
+ __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
set_dec(evt);
/* We may have raced with new irq work */
@@ -833,7 +833,7 @@ static void decrementer_set_mode(enum clock_event_mode mode,
/* Interrupt handler for the timer broadcast IPI */
void tick_broadcast_ipi_handler(void)
{
- u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
+ u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
*next_tb = get_tb_or_rtc();
__timer_interrupt();
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0dc43f9932cf..e6595b72269b 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -295,7 +295,7 @@ long machine_check_early(struct pt_regs *regs)
{
long handled = 0;
- __get_cpu_var(irq_stat).mce_exceptions++;
+ __this_cpu_inc(irq_stat.mce_exceptions);
if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
handled = cur_cpu_spec->machine_check_early(regs);
@@ -304,7 +304,7 @@ long machine_check_early(struct pt_regs *regs)
long hmi_exception_realmode(struct pt_regs *regs)
{
- __get_cpu_var(irq_stat).hmi_exceptions++;
+ __this_cpu_inc(irq_stat.hmi_exceptions);
if (ppc_md.hmi_exception_early)
ppc_md.hmi_exception_early(regs);
@@ -700,7 +700,7 @@ void machine_check_exception(struct pt_regs *regs)
enum ctx_state prev_state = exception_enter();
int recover = 0;
- __get_cpu_var(irq_stat).mce_exceptions++;
+ __this_cpu_inc(irq_stat.mce_exceptions);
/* See if any machine dependent calls. In theory, we would want
* to call the CPU first, and call the ppc_md. one if the CPU
@@ -1519,7 +1519,7 @@ void vsx_unavailable_tm(struct pt_regs *regs)
void performance_monitor_exception(struct pt_regs *regs)
{
- __get_cpu_var(irq_stat).pmu_irqs++;
+ __this_cpu_inc(irq_stat.pmu_irqs);
perf_irq(regs);
}
OpenPOWER on IntegriCloud