summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/alpha/kernel/perf_event.c16
-rw-r--r--arch/alpha/kernel/time.c6
-rw-r--r--arch/arm/kernel/smp_twd.c12
-rw-r--r--arch/avr32/kernel/kprobes.c2
-rw-r--r--arch/blackfin/include/asm/ipipe.h2
-rw-r--r--arch/blackfin/kernel/perf_event.c10
-rw-r--r--arch/blackfin/mach-common/ints-priority.c8
-rw-r--r--arch/blackfin/mach-common/smp.c2
-rw-r--r--arch/ia64/include/asm/hw_irq.h2
-rw-r--r--arch/ia64/include/asm/sn/arch.h4
-rw-r--r--arch/ia64/include/asm/sn/nodepda.h2
-rw-r--r--arch/ia64/include/asm/switch_to.h2
-rw-r--r--arch/ia64/include/asm/uv/uv_hub.h2
-rw-r--r--arch/ia64/kernel/irq.c2
-rw-r--r--arch/ia64/kernel/irq_ia64.c4
-rw-r--r--arch/ia64/kernel/kprobes.c6
-rw-r--r--arch/ia64/kernel/mca.c16
-rw-r--r--arch/ia64/kernel/process.c6
-rw-r--r--arch/ia64/kernel/traps.c2
-rw-r--r--arch/ia64/sn/kernel/setup.c2
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c28
-rw-r--r--arch/metag/kernel/perf/perf_event.c14
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c30
-rw-r--r--arch/mips/include/asm/fpu_emulator.h24
-rw-r--r--arch/mips/kernel/kprobes.c6
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c14
-rw-r--r--arch/mips/kernel/smp-bmips.c2
-rw-r--r--arch/mips/loongson/loongson-3/smp.c6
-rw-r--r--arch/powerpc/include/asm/cputime.h6
-rw-r--r--arch/s390/include/asm/irq.h2
-rw-r--r--arch/s390/include/asm/percpu.h16
-rw-r--r--arch/s390/kernel/idle.c2
-rw-r--r--arch/s390/kernel/kprobes.c8
-rw-r--r--arch/s390/kernel/nmi.c10
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c22
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c16
-rw-r--r--arch/s390/kernel/processor.c2
-rw-r--r--arch/s390/kernel/time.c6
-rw-r--r--arch/s390/oprofile/hwsampler.c2
-rw-r--r--arch/sparc/include/asm/cpudata_32.h2
-rw-r--r--arch/sparc/include/asm/cpudata_64.h2
-rw-r--r--arch/sparc/kernel/kprobes.c6
-rw-r--r--arch/sparc/kernel/leon_smp.c2
-rw-r--r--arch/sparc/kernel/nmi.c16
-rw-r--r--arch/sparc/kernel/pci_sun4v.c8
-rw-r--r--arch/sparc/kernel/perf_event.c26
-rw-r--r--arch/sparc/kernel/sun4d_smp.c2
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/sparc/mm/tlb.c4
-rw-r--r--arch/tile/include/asm/irqflags.h4
-rw-r--r--arch/tile/include/asm/mmu_context.h6
-rw-r--r--arch/tile/kernel/irq.c14
-rw-r--r--arch/tile/kernel/messaging.c4
-rw-r--r--arch/tile/kernel/perf_event.c12
-rw-r--r--arch/tile/kernel/process.c2
-rw-r--r--arch/tile/kernel/setup.c3
-rw-r--r--arch/tile/kernel/single_step.c4
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/smpboot.c6
-rw-r--r--arch/tile/kernel/time.c8
-rw-r--r--arch/tile/mm/highmem.c2
-rw-r--r--arch/tile/mm/init.c4
-rw-r--r--arch/x86/include/asm/debugreg.h4
-rw-r--r--arch/x86/include/asm/perf_event_p4.h2
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h12
-rw-r--r--arch/x86/kernel/apb_timer.c4
-rw-r--r--arch/x86/kernel/apic/apic.c4
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/cpu/common.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-inject.c6
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c46
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_intel.c22
-rw-r--r--arch/x86/kernel/cpu/perf_event.c22
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c18
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c20
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_knc.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c6
-rw-r--r--arch/x86/kernel/hw_breakpoint.c8
-rw-r--r--arch/x86/kernel/irq_64.c6
-rw-r--r--arch/x86/kernel/kvm.c22
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c10
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/mm/kmemcheck/kmemcheck.c14
-rw-r--r--arch/x86/oprofile/nmi_int.c8
-rw-r--r--arch/x86/oprofile/op_model_p4.c2
-rw-r--r--arch/x86/platform/uv/uv_nmi.c40
-rw-r--r--arch/x86/platform/uv/uv_time.c2
-rw-r--r--arch/x86/xen/enlighten.c4
-rw-r--r--arch/x86/xen/multicalls.c8
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--arch/x86/xen/time.c10
-rw-r--r--drivers/char/random.c2
-rw-r--r--drivers/clocksource/dummy_timer.c2
-rw-r--r--drivers/clocksource/metag_generic.c2
-rw-r--r--drivers/clocksource/qcom-timer.c2
-rw-r--r--drivers/cpuidle/governors/ladder.c4
-rw-r--r--drivers/cpuidle/governors/menu.c6
-rw-r--r--drivers/irqchip/irq-gic.c10
-rw-r--r--drivers/md/dm-stats.c2
-rw-r--r--drivers/net/ethernet/tile/tilegx.c22
-rw-r--r--drivers/net/ethernet/tile/tilepro.c8
-rw-r--r--drivers/oprofile/cpu_buffer.c10
-rw-r--r--drivers/oprofile/timer_int.c2
-rw-r--r--drivers/s390/cio/ccwreq.c2
-rw-r--r--drivers/s390/cio/chsc_sch.c2
-rw-r--r--drivers/s390/cio/cio.c6
-rw-r--r--drivers/s390/cio/device_fsm.c4
-rw-r--r--drivers/s390/cio/eadm_sch.c2
-rw-r--r--fs/ext4/mballoc.c2
-rw-r--r--include/linux/cpumask.h11
-rw-r--r--include/linux/kernel_stat.h4
-rw-r--r--include/linux/percpu-defs.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/snmp.h6
-rw-r--r--kernel/events/callchain.c4
-rw-r--r--kernel/events/core.c24
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq_work.c12
-rw-r--r--kernel/printk/printk.c4
-rw-r--r--kernel/sched/clock.c2
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h4
-rw-r--r--kernel/smp.c6
-rw-r--r--kernel/softirq.c4
-rw-r--r--kernel/taskstats.c2
-rw-r--r--kernel/time/hrtimer.c22
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-common.c6
-rw-r--r--kernel/time/tick-oneshot.c2
-rw-r--r--kernel/time/tick-sched.c24
-rw-r--r--kernel/time/timer.c2
-rw-r--r--kernel/user-return-notifier.c4
-rw-r--r--kernel/watchdog.c12
-rw-r--r--net/core/dev.c14
-rw-r--r--net/core/drop_monitor.c2
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/route.c4
-rw-r--r--net/ipv4/syncookies.c2
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/syncookies.c2
-rw-r--r--net/rds/ib_rdma.c2
149 files changed, 560 insertions, 547 deletions
diff --git a/arch/alpha/kernel/perf_event.c b/arch/alpha/kernel/perf_event.c
index c52e7f0ee5f6..5c218aa3f3df 100644
--- a/arch/alpha/kernel/perf_event.c
+++ b/arch/alpha/kernel/perf_event.c
@@ -431,7 +431,7 @@ static void maybe_change_configuration(struct cpu_hw_events *cpuc)
*/
static int alpha_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int n0;
int ret;
@@ -483,7 +483,7 @@ static int alpha_pmu_add(struct perf_event *event, int flags)
*/
static void alpha_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
unsigned long irq_flags;
int j;
@@ -531,7 +531,7 @@ static void alpha_pmu_read(struct perf_event *event)
static void alpha_pmu_stop(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!(hwc->state & PERF_HES_STOPPED)) {
cpuc->idx_mask &= ~(1UL<<hwc->idx);
@@ -551,7 +551,7 @@ static void alpha_pmu_stop(struct perf_event *event, int flags)
static void alpha_pmu_start(struct perf_event *event, int flags)
{
struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
return;
@@ -724,7 +724,7 @@ static int alpha_pmu_event_init(struct perf_event *event)
*/
static void alpha_pmu_enable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->enabled)
return;
@@ -750,7 +750,7 @@ static void alpha_pmu_enable(struct pmu *pmu)
static void alpha_pmu_disable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!cpuc->enabled)
return;
@@ -814,8 +814,8 @@ static void alpha_perf_event_irq_handler(unsigned long la_ptr,
struct hw_perf_event *hwc;
int idx, j;
- __get_cpu_var(irq_pmi_count)++;
- cpuc = &__get_cpu_var(cpu_hw_events);
+ __this_cpu_inc(irq_pmi_count);
+ cpuc = this_cpu_ptr(&cpu_hw_events);
/* Completely counting through the PMC's period to trigger a new PMC
* overflow interrupt while in this interrupt routine is utterly
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c
index ee39cee8064c..643a9dcdf093 100644
--- a/arch/alpha/kernel/time.c
+++ b/arch/alpha/kernel/time.c
@@ -56,9 +56,9 @@ unsigned long est_cycle_freq;
DEFINE_PER_CPU(u8, irq_work_pending);
-#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1
-#define test_irq_work_pending() __get_cpu_var(irq_work_pending)
-#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0
+#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
+#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
+#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
void arch_irq_work_raise(void)
{
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index dfc32130bc44..93090213c71c 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -92,7 +92,7 @@ static int twd_timer_ack(void)
static void twd_timer_stop(void)
{
- struct clock_event_device *clk = __this_cpu_ptr(twd_evt);
+ struct clock_event_device *clk = raw_cpu_ptr(twd_evt);
twd_set_mode(CLOCK_EVT_MODE_UNUSED, clk);
disable_percpu_irq(clk->irq);
@@ -108,7 +108,7 @@ static void twd_update_frequency(void *new_rate)
{
twd_timer_rate = *((unsigned long *) new_rate);
- clockevents_update_freq(__this_cpu_ptr(twd_evt), twd_timer_rate);
+ clockevents_update_freq(raw_cpu_ptr(twd_evt), twd_timer_rate);
}
static int twd_rate_change(struct notifier_block *nb,
@@ -134,7 +134,7 @@ static struct notifier_block twd_clk_nb = {
static int twd_clk_init(void)
{
- if (twd_evt && __this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
+ if (twd_evt && raw_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
return clk_notifier_register(twd_clk, &twd_clk_nb);
return 0;
@@ -153,7 +153,7 @@ static void twd_update_frequency(void *data)
{
twd_timer_rate = clk_get_rate(twd_clk);
- clockevents_update_freq(__this_cpu_ptr(twd_evt), twd_timer_rate);
+ clockevents_update_freq(raw_cpu_ptr(twd_evt), twd_timer_rate);
}
static int twd_cpufreq_transition(struct notifier_block *nb,
@@ -179,7 +179,7 @@ static struct notifier_block twd_cpufreq_nb = {
static int twd_cpufreq_init(void)
{
- if (twd_evt && __this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
+ if (twd_evt && raw_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
return cpufreq_register_notifier(&twd_cpufreq_nb,
CPUFREQ_TRANSITION_NOTIFIER);
@@ -269,7 +269,7 @@ static void twd_get_clock(struct device_node *np)
*/
static void twd_timer_setup(void)
{
- struct clock_event_device *clk = __this_cpu_ptr(twd_evt);
+ struct clock_event_device *clk = raw_cpu_ptr(twd_evt);
int cpu = smp_processor_id();
/*
diff --git a/arch/avr32/kernel/kprobes.c b/arch/avr32/kernel/kprobes.c
index f820e9f25520..a94ece4a72c8 100644
--- a/arch/avr32/kernel/kprobes.c
+++ b/arch/avr32/kernel/kprobes.c
@@ -104,7 +104,7 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
static void __kprobes set_current_kprobe(struct kprobe *p)
{
- __get_cpu_var(current_kprobe) = p;
+ __this_cpu_write(current_kprobe, p);
}
static int __kprobes kprobe_handler(struct pt_regs *regs)
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h
index 17b5e92e3bc6..fe1160fbff91 100644
--- a/arch/blackfin/include/asm/ipipe.h
+++ b/arch/blackfin/include/asm/ipipe.h
@@ -157,7 +157,7 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul)
}
#define __ipipe_do_root_xirq(ipd, irq) \
- ((ipd)->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)))
+ ((ipd)->irqs[irq].handler(irq, raw_cpu_ptr(&__ipipe_tick_regs)))
#define __ipipe_run_irqtail(irq) /* Must be a macro */ \
do { \
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c
index ea2032013cc2..1e9c8b0bf486 100644
--- a/arch/blackfin/kernel/perf_event.c
+++ b/arch/blackfin/kernel/perf_event.c
@@ -300,7 +300,7 @@ again:
static void bfin_pmu_stop(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -318,7 +318,7 @@ static void bfin_pmu_stop(struct perf_event *event, int flags)
static void bfin_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -335,7 +335,7 @@ static void bfin_pmu_start(struct perf_event *event, int flags)
static void bfin_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
bfin_pmu_stop(event, PERF_EF_UPDATE);
__clear_bit(event->hw.idx, cpuc->used_mask);
@@ -345,7 +345,7 @@ static void bfin_pmu_del(struct perf_event *event, int flags)
static int bfin_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
int ret = -EAGAIN;
@@ -421,7 +421,7 @@ static int bfin_pmu_event_init(struct perf_event *event)
static void bfin_pmu_enable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
int i;
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 694619365265..dd2af74aff80 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1309,12 +1309,12 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
#endif
/* This is basically what we need from the register frame. */
- __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
- __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
+ __this_cpu_write(__ipipe_tick_regs.ipend, regs->ipend);
+ __this_cpu_write(__ipipe_tick_regs.pc, regs->pc);
if (this_domain != ipipe_root_domain)
- __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
+ __this_cpu_and(__ipipe_tick_regs.ipend, ~0x10);
else
- __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
+ __this_cpu_or(__ipipe_tick_regs.ipend, 0x10);
}
/*
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index ba6c30d8534d..8ad3e90cc8fc 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -146,7 +146,7 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
platform_clear_ipi(cpu, IRQ_SUPPLE_1);
smp_rmb();
- bfin_ipi_data = &__get_cpu_var(bfin_ipi);
+ bfin_ipi_data = this_cpu_ptr(&bfin_ipi);
while ((pending = atomic_xchg(&bfin_ipi_data->bits, 0)) != 0) {
msg = 0;
do {
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index 029bab36cd91..668786e84af8 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -159,7 +159,7 @@ static inline ia64_vector __ia64_irq_to_vector(int irq)
static inline unsigned int
__ia64_local_vector_to_irq (ia64_vector vec)
{
- return __get_cpu_var(vector_irq)[vec];
+ return __this_cpu_read(vector_irq[vec]);
}
#endif
diff --git a/arch/ia64/include/asm/sn/arch.h b/arch/ia64/include/asm/sn/arch.h
index 7caa1f44cd95..31eb784866f8 100644
--- a/arch/ia64/include/asm/sn/arch.h
+++ b/arch/ia64/include/asm/sn/arch.h
@@ -57,7 +57,7 @@ struct sn_hub_info_s {
u16 nasid_bitmask;
};
DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
-#define sn_hub_info (&__get_cpu_var(__sn_hub_info))
+#define sn_hub_info this_cpu_ptr(&__sn_hub_info)
#define is_shub2() (sn_hub_info->shub2)
#define is_shub1() (sn_hub_info->shub2 == 0)
@@ -72,7 +72,7 @@ DECLARE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
* cpu.
*/
DECLARE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
-#define sn_cnodeid_to_nasid (&__get_cpu_var(__sn_cnodeid_to_nasid[0]))
+#define sn_cnodeid_to_nasid this_cpu_ptr(&__sn_cnodeid_to_nasid[0])
extern u8 sn_partition_id;
diff --git a/arch/ia64/include/asm/sn/nodepda.h b/arch/ia64/include/asm/sn/nodepda.h
index ee118b901de4..7c8b4710f071 100644
--- a/arch/ia64/include/asm/sn/nodepda.h
+++ b/arch/ia64/include/asm/sn/nodepda.h
@@ -70,7 +70,7 @@ typedef struct nodepda_s nodepda_t;
*/
DECLARE_PER_CPU(struct nodepda_s *, __sn_nodepda);
-#define sn_nodepda (__get_cpu_var(__sn_nodepda))
+#define sn_nodepda __this_cpu_read(__sn_nodepda)
#define NODEPDA(cnodeid) (sn_nodepda->pernode_pdaindr[cnodeid])
/*
diff --git a/arch/ia64/include/asm/switch_to.h b/arch/ia64/include/asm/switch_to.h
index d38c7ea5eea5..e8f3585e7e7a 100644
--- a/arch/ia64/include/asm/switch_to.h
+++ b/arch/ia64/include/asm/switch_to.h
@@ -32,7 +32,7 @@ extern void ia64_load_extra (struct task_struct *task);
#ifdef CONFIG_PERFMON
DECLARE_PER_CPU(unsigned long, pfm_syst_info);
-# define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1)
+# define PERFMON_IS_SYSWIDE() (__this_cpu_read(pfm_syst_info) & 0x1)
#else
# define PERFMON_IS_SYSWIDE() (0)
#endif
diff --git a/arch/ia64/include/asm/uv/uv_hub.h b/arch/ia64/include/asm/uv/uv_hub.h
index 53e9dfacd073..2a88c7204e52 100644
--- a/arch/ia64/include/asm/uv/uv_hub.h
+++ b/arch/ia64/include/asm/uv/uv_hub.h
@@ -108,7 +108,7 @@ struct uv_hub_info_s {
unsigned char n_val;
};
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
-#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
+#define uv_hub_info this_cpu_ptr(&__uv_hub_info)
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
/*
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index f2c418281130..812a1e6b3179 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -42,7 +42,7 @@ ia64_vector __ia64_irq_to_vector(int irq)
unsigned int __ia64_local_vector_to_irq (ia64_vector vec)
{
- return __get_cpu_var(vector_irq)[vec];
+ return __this_cpu_read(vector_irq[vec]);
}
#endif
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 03ea78ed64a9..698d8fefde6c 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -330,7 +330,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
int irq;
struct irq_desc *desc;
struct irq_cfg *cfg;
- irq = __get_cpu_var(vector_irq)[vector];
+ irq = __this_cpu_read(vector_irq[vector]);
if (irq < 0)
continue;
@@ -344,7 +344,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
goto unlock;
spin_lock_irqsave(&vector_lock, flags);
- __get_cpu_var(vector_irq)[vector] = -1;
+ __this_cpu_write(vector_irq[vector], -1);
cpu_clear(me, vector_table[vector]);
spin_unlock_irqrestore(&vector_lock, flags);
cfg->move_cleanup_count--;
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 074fde49c9e6..c7c51445c3be 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -396,7 +396,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
unsigned int i;
i = atomic_read(&kcb->prev_kprobe_index);
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe[i-1].kp;
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe[i-1].kp);
kcb->kprobe_status = kcb->prev_kprobe[i-1].status;
atomic_sub(1, &kcb->prev_kprobe_index);
}
@@ -404,7 +404,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes set_current_kprobe(struct kprobe *p,
struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = p;
+ __this_cpu_write(current_kprobe, p);
}
static void kretprobe_trampoline(void)
@@ -823,7 +823,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
/*
* jprobe instrumented function just completed
*/
- p = __get_cpu_var(current_kprobe);
+ p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
goto ss_probe;
}
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index db7b36bb068b..8bfd36af46f8 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -1341,7 +1341,7 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
ia64_mlogbuf_finish(1);
}
- if (__get_cpu_var(ia64_mca_tr_reload)) {
+ if (__this_cpu_read(ia64_mca_tr_reload)) {
mca_insert_tr(0x1); /*Reload dynamic itrs*/
mca_insert_tr(0x2); /*Reload dynamic itrs*/
}
@@ -1868,14 +1868,14 @@ ia64_mca_cpu_init(void *cpu_data)
"MCA", cpu);
format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
"INIT", cpu);
- __get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data);
+ __this_cpu_write(ia64_mca_data, (__per_cpu_mca[cpu] = __pa(data)));
/*
* Stash away a copy of the PTE needed to map the per-CPU page.
* We may need it during MCA recovery.
*/
- __get_cpu_var(ia64_mca_per_cpu_pte) =
- pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
+ __this_cpu_write(ia64_mca_per_cpu_pte,
+ pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)));
/*
* Also, stash away a copy of the PAL address and the PTE
@@ -1884,10 +1884,10 @@ ia64_mca_cpu_init(void *cpu_data)
pal_vaddr = efi_get_pal_addr();
if (!pal_vaddr)
return;
- __get_cpu_var(ia64_mca_pal_base) =
- GRANULEROUNDDOWN((unsigned long) pal_vaddr);
- __get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
- PAGE_KERNEL));
+ __this_cpu_write(ia64_mca_pal_base,
+ GRANULEROUNDDOWN((unsigned long) pal_vaddr));
+ __this_cpu_write(ia64_mca_pal_pte, pte_val(mk_pte_phys(__pa(pal_vaddr),
+ PAGE_KERNEL)));
}
static void ia64_mca_cmc_vector_adjust(void *dummy)
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index deed6fa96bb0..b51514957620 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -215,7 +215,7 @@ static inline void play_dead(void)
unsigned int this_cpu = smp_processor_id();
/* Ack it */
- __get_cpu_var(cpu_state) = CPU_DEAD;
+ __this_cpu_write(cpu_state, CPU_DEAD);
max_xtp();
local_irq_disable();
@@ -273,7 +273,7 @@ ia64_save_extra (struct task_struct *task)
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_save_regs(task);
- info = __get_cpu_var(pfm_syst_info);
+ info = __this_cpu_read(pfm_syst_info);
if (info & PFM_CPUINFO_SYST_WIDE)
pfm_syst_wide_update_task(task, info, 0);
#endif
@@ -293,7 +293,7 @@ ia64_load_extra (struct task_struct *task)
if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
pfm_load_regs(task);
- info = __get_cpu_var(pfm_syst_info);
+ info = __this_cpu_read(pfm_syst_info);
if (info & PFM_CPUINFO_SYST_WIDE)
pfm_syst_wide_update_task(task, info, 1);
#endif
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index d3636e67a98e..6f7d4a4dcf24 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -299,7 +299,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
if (!(current->thread.flags & IA64_THREAD_FPEMU_NOPRINT)) {
unsigned long count, current_jiffies = jiffies;
- struct fpu_swa_msg *cp = &__get_cpu_var(cpulast);
+ struct fpu_swa_msg *cp = this_cpu_ptr(&cpulast);
if (unlikely(current_jiffies > cp->time))
cp->count = 0;
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 36182c84363c..5f6b6b48c1d5 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -629,7 +629,7 @@ void sn_cpu_init(void)
cnode = nasid_to_cnodeid(nasid);
- sn_nodepda = nodepdaindr[cnode];
+ __this_cpu_write(__sn_nodepda, nodepdaindr[cnode]);
pda->led_address =
(typeof(pda->led_address)) (LED0 + (slice << LED_CPU_SHIFT));
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 68c845411624..f9c8d9fc5939 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -134,8 +134,8 @@ sn2_ipi_flush_all_tlb(struct mm_struct *mm)
itc = ia64_get_itc();
smp_flush_tlb_cpumask(*mm_cpumask(mm));
itc = ia64_get_itc() - itc;
- __get_cpu_var(ptcstats).shub_ipi_flushes_itc_clocks += itc;
- __get_cpu_var(ptcstats).shub_ipi_flushes++;
+ __this_cpu_add(ptcstats.shub_ipi_flushes_itc_clocks, itc);
+ __this_cpu_inc(ptcstats.shub_ipi_flushes);
}
/**
@@ -199,14 +199,14 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
start += (1UL << nbits);
} while (start < end);
ia64_srlz_i();
- __get_cpu_var(ptcstats).ptc_l++;
+ __this_cpu_inc(ptcstats.ptc_l);
preempt_enable();
return;
}
if (atomic_read(&mm->mm_users) == 1 && mymm) {
flush_tlb_mm(mm);
- __get_cpu_var(ptcstats).change_rid++;
+ __this_cpu_inc(ptcstats.change_rid);
preempt_enable();
return;
}
@@ -250,11 +250,11 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
spin_lock_irqsave(PTC_LOCK(shub1), flags);
itc2 = ia64_get_itc();
- __get_cpu_var(ptcstats).lock_itc_clocks += itc2 - itc;
- __get_cpu_var(ptcstats).shub_ptc_flushes++;
- __get_cpu_var(ptcstats).nodes_flushed += nix;
+ __this_cpu_add(ptcstats.lock_itc_clocks, itc2 - itc);
+ __this_cpu_inc(ptcstats.shub_ptc_flushes);
+ __this_cpu_add(ptcstats.nodes_flushed, nix);
if (!mymm)
- __get_cpu_var(ptcstats).shub_ptc_flushes_not_my_mm++;
+ __this_cpu_inc(ptcstats.shub_ptc_flushes_not_my_mm);
if (use_cpu_ptcga && !mymm) {
old_rr = ia64_get_rr(start);
@@ -299,9 +299,9 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
done:
itc2 = ia64_get_itc() - itc2;
- __get_cpu_var(ptcstats).shub_itc_clocks += itc2;
- if (itc2 > __get_cpu_var(ptcstats).shub_itc_clocks_max)
- __get_cpu_var(ptcstats).shub_itc_clocks_max = itc2;
+ __this_cpu_add(ptcstats.shub_itc_clocks, itc2);
+ if (itc2 > __this_cpu_read(ptcstats.shub_itc_clocks_max))
+ __this_cpu_write(ptcstats.shub_itc_clocks_max, itc2);
if (old_rr) {
ia64_set_rr(start, old_rr);
@@ -311,7 +311,7 @@ done:
spin_unlock_irqrestore(PTC_LOCK(shub1), flags);
if (flush_opt == 1 && deadlock) {
- __get_cpu_var(ptcstats).deadlocks++;
+ __this_cpu_inc(ptcstats.deadlocks);
sn2_ipi_flush_all_tlb(mm);
}
@@ -334,7 +334,7 @@ sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid,
short nasid, i;
unsigned long *piows, zeroval, n;
- __get_cpu_var(ptcstats).deadlocks++;
+ __this_cpu_inc(ptcstats.deadlocks);
piows = (unsigned long *) pda->pio_write_status_addr;
zeroval = pda->pio_write_status_val;
@@ -349,7 +349,7 @@ sn2_ptc_deadlock_recovery(short *nasids, short ib, short ie, int mynasid,
ptc1 = CHANGE_NASID(nasid, ptc1);
n = sn2_ptc_deadlock_recovery_core(ptc0, data0, ptc1, data1, piows, zeroval);
- __get_cpu_var(ptcstats).deadlocks2 += n;
+ __this_cpu_add(ptcstats.deadlocks2, n);
}
}
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 02c08737f6aa..2478ec6d23c9 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -258,7 +258,7 @@ int metag_pmu_event_set_period(struct perf_event *event,
static void metag_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -306,7 +306,7 @@ static void metag_pmu_stop(struct perf_event *event, int flags)
static int metag_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = 0, ret = 0;
@@ -348,7 +348,7 @@ out:
static void metag_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -597,7 +597,7 @@ static int _hw_perf_event_init(struct perf_event *event)
static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
{
- struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events);
unsigned int config = event->config;
unsigned int tmp = config & 0xf0;
unsigned long flags;
@@ -670,7 +670,7 @@ unlock:
static void metag_pmu_disable_counter(struct hw_perf_event *event, int idx)
{
- struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events);
unsigned int tmp = 0;
unsigned long flags;
@@ -718,7 +718,7 @@ out:
static void metag_pmu_write_counter(int idx, u32 val)
{
- struct cpu_hw_events *events = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *events = this_cpu_ptr(&cpu_hw_events);
u32 tmp = 0;
unsigned long flags;
@@ -751,7 +751,7 @@ static int metag_pmu_event_map(int idx)
static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev)
{
int idx = (int)dev;
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event = cpuhw->events[idx];
struct hw_perf_event *hwc = &event->hw;
struct pt_regs *regs = get_irq_regs();
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 1b82ac6921e0..741734049675 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -264,13 +264,13 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data)
unsigned long *pen;
unsigned long flags;
union octeon_ciu_chip_data cd;
- raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock);
+ raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
cd.p = irq_data_get_irq_chip_data(data);
raw_spin_lock_irqsave(lock, flags);
if (cd.s.line == 0) {
- pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
+ pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
__set_bit(cd.s.bit, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
@@ -279,7 +279,7 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data)
wmb();
cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
} else {
- pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
+ pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
__set_bit(cd.s.bit, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
@@ -296,13 +296,13 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data)
unsigned long *pen;
unsigned long flags;
union octeon_ciu_chip_data cd;
- raw_spinlock_t *lock = &__get_cpu_var(octeon_irq_ciu_spinlock);
+ raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
cd.p = irq_data_get_irq_chip_data(data);
raw_spin_lock_irqsave(lock, flags);
if (cd.s.line == 0) {
- pen = &__get_cpu_var(octeon_irq_ciu0_en_mirror);
+ pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
__clear_bit(cd.s.bit, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
@@ -311,7 +311,7 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data)
wmb();
cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
} else {
- pen = &__get_cpu_var(octeon_irq_ciu1_en_mirror);
+ pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
__clear_bit(cd.s.bit, pen);
/*
* Must be visible to octeon_irq_ip{2,3}_ciu() before
@@ -431,11 +431,11 @@ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
if (cd.s.line == 0) {
int index = cvmx_get_core_num() * 2;
- set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
+ set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
} else {
int index = cvmx_get_core_num() * 2 + 1;
- set_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
+ set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
}
}
@@ -450,11 +450,11 @@ static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
if (cd.s.line == 0) {
int index = cvmx_get_core_num() * 2;
- clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu0_en_mirror));
+ clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
} else {
int index = cvmx_get_core_num() * 2 + 1;
- clear_bit(cd.s.bit, &__get_cpu_var(octeon_irq_ciu1_en_mirror));
+ clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
}
}
@@ -1063,7 +1063,7 @@ static void octeon_irq_ip2_ciu(void)
const unsigned long core_id = cvmx_get_core_num();
u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INTX_SUM0(core_id * 2));
- ciu_sum &= __get_cpu_var(octeon_irq_ciu0_en_mirror);
+ ciu_sum &= __this_cpu_read(octeon_irq_ciu0_en_mirror);
if (likely(ciu_sum)) {
int bit = fls64(ciu_sum) - 1;
int irq = octeon_irq_ciu_to_irq[0][bit];
@@ -1080,7 +1080,7 @@ static void octeon_irq_ip3_ciu(void)
{
u64 ciu_sum = cvmx_read_csr(CVMX_CIU_INT_SUM1);
- ciu_sum &= __get_cpu_var(octeon_irq_ciu1_en_mirror);
+ ciu_sum &= __this_cpu_read(octeon_irq_ciu1_en_mirror);
if (likely(ciu_sum)) {
int bit = fls64(ciu_sum) - 1;
int irq = octeon_irq_ciu_to_irq[1][bit];
@@ -1129,10 +1129,10 @@ static void octeon_irq_init_ciu_percpu(void)
int coreid = cvmx_get_core_num();
- __get_cpu_var(octeon_irq_ciu0_en_mirror) = 0;
- __get_cpu_var(octeon_irq_ciu1_en_mirror) = 0;
+ __this_cpu_write(octeon_irq_ciu0_en_mirror, 0);
+ __this_cpu_write(octeon_irq_ciu1_en_mirror, 0);
wmb();
- raw_spin_lock_init(&__get_cpu_var(octeon_irq_ciu_spinlock));
+ raw_spin_lock_init(this_cpu_ptr(&octeon_irq_ciu_spinlock));
/*
* Disable All CIU Interrupts. The ones we need will be
* enabled later. Read the SUM register so we know the write
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 0195745b4b1b..3ee347713307 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -33,17 +33,17 @@
#ifdef CONFIG_DEBUG_FS
struct mips_fpu_emulator_stats {
- local_t emulated;
- local_t loads;
- local_t stores;
- local_t cp1ops;
- local_t cp1xops;
- local_t errors;
- local_t ieee754_inexact;
- local_t ieee754_underflow;
- local_t ieee754_overflow;
- local_t ieee754_zerodiv;
- local_t ieee754_invalidop;
+ unsigned long emulated;
+ unsigned long loads;
+ unsigned long stores;
+ unsigned long cp1ops;
+ unsigned long cp1xops;
+ unsigned long errors;
+ unsigned long ieee754_inexact;
+ unsigned long ieee754_underflow;
+ unsigned long ieee754_overflow;
+ unsigned long ieee754_zerodiv;
+ unsigned long ieee754_invalidop;
};
DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
@@ -51,7 +51,7 @@ DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
#define MIPS_FPU_EMU_INC_STATS(M) \
do { \
preempt_disable(); \
- __local_inc(&__get_cpu_var(fpuemustats).M); \
+ __this_cpu_inc(fpuemustats.M); \
preempt_enable(); \
} while (0)
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 1f8187ab0997..212f46f2014e 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -224,7 +224,7 @@ static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
@@ -234,7 +234,7 @@ static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = p;
+ __this_cpu_write(current_kprobe, p);
kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
kcb->kprobe_saved_epc = regs->cp0_epc;
}
@@ -385,7 +385,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
ret = 1;
goto no_kprobe;
}
- p = __get_cpu_var(current_kprobe);
+ p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs))
goto ss_probe;
}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index b63f2482f288..a8f9cdc6f8b0 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -340,7 +340,7 @@ static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
@@ -360,7 +360,7 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
static void mipsxx_pmu_disable_event(int idx)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned long flags;
WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
@@ -460,7 +460,7 @@ static void mipspmu_stop(struct perf_event *event, int flags)
static int mipspmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
int err = 0;
@@ -496,7 +496,7 @@ out:
static void mipspmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -1275,7 +1275,7 @@ static int __hw_perf_event_init(struct perf_event *event)
static void pause_local_counters(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int ctr = mipspmu.num_counters;
unsigned long flags;
@@ -1291,7 +1291,7 @@ static void pause_local_counters(void)
static void resume_local_counters(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int ctr = mipspmu.num_counters;
do {
@@ -1302,7 +1302,7 @@ static void resume_local_counters(void)
static int mipsxx_pmu_handle_shared_irq(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_sample_data data;
unsigned int counters = mipspmu.num_counters;
u64 counter;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index df9e2bd9b2c2..06bb5ed6d80a 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -346,7 +346,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
int action, cpu = irq - IPI0_IRQ;
spin_lock_irqsave(&ipi_lock, flags);
- action = __get_cpu_var(ipi_action_mask);
+ action = __this_cpu_read(ipi_action_mask);
per_cpu(ipi_action_mask, cpu) = 0;
clear_c0_cause(cpu ? C_SW1 : C_SW0);
spin_unlock_irqrestore(&ipi_lock, flags);
diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c
index 74e827b4ec8f..d8c63af6c7cc 100644
--- a/arch/mips/loongson/loongson-3/smp.c
+++ b/arch/mips/loongson/loongson-3/smp.c
@@ -299,16 +299,16 @@ static void loongson3_init_secondary(void)
per_cpu(cpu_state, cpu) = CPU_ONLINE;
i = 0;
- __get_cpu_var(core0_c0count) = 0;
+ __this_cpu_write(core0_c0count, 0);
loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
- while (!__get_cpu_var(core0_c0count)) {
+ while (!__this_cpu_read(core0_c0count)) {
i++;
cpu_relax();
}
if (i > MAX_LOOPS)
i = MAX_LOOPS;
- initcount = __get_cpu_var(core0_c0count) + i;
+ initcount = __this_cpu_read(core0_c0count) + i;
write_c0_count(initcount);
}
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 6c840ceab820..e2452550bcb1 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -58,10 +58,10 @@ static inline unsigned long cputime_to_jiffies(const cputime_t ct)
static inline cputime_t cputime_to_scaled(const cputime_t ct)
{
if (cpu_has_feature(CPU_FTR_SPURR) &&
- __get_cpu_var(cputime_last_delta))
+ __this_cpu_read(cputime_last_delta))
return (__force u64) ct *
- __get_cpu_var(cputime_scaled_last_delta) /
- __get_cpu_var(cputime_last_delta);
+ __this_cpu_read(cputime_scaled_last_delta) /
+ __this_cpu_read(cputime_last_delta);
return ct;
}
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index e787cc1bff8f..b0d5f0a97a01 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -82,7 +82,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
static __always_inline void inc_irq_stat(enum interruption_class irq)
{
- __get_cpu_var(irq_stat).irqs[irq]++;
+ __this_cpu_inc(irq_stat.irqs[irq]);
}
struct ext_code {
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index fa91e0097458..933355e0d091 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -31,7 +31,7 @@
pcp_op_T__ old__, new__, prev__; \
pcp_op_T__ *ptr__; \
preempt_disable(); \
- ptr__ = __this_cpu_ptr(&(pcp)); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
prev__ = *ptr__; \
do { \
old__ = prev__; \
@@ -70,7 +70,7 @@
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
preempt_disable(); \
- ptr__ = __this_cpu_ptr(&(pcp)); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
if (__builtin_constant_p(val__) && \
((szcast)val__ > -129) && ((szcast)val__ < 128)) { \
asm volatile( \
@@ -97,7 +97,7 @@
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
preempt_disable(); \
- ptr__ = __this_cpu_ptr(&(pcp)); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
@@ -116,7 +116,7 @@
pcp_op_T__ val__ = (val); \
pcp_op_T__ old__, *ptr__; \
preempt_disable(); \
- ptr__ = __this_cpu_ptr(&(pcp)); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
asm volatile( \
op " %[old__],%[val__],%[ptr__]\n" \
: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__) \
@@ -138,7 +138,7 @@
pcp_op_T__ ret__; \
pcp_op_T__ *ptr__; \
preempt_disable(); \
- ptr__ = __this_cpu_ptr(&(pcp)); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = cmpxchg(ptr__, oval, nval); \
preempt_enable(); \
ret__; \
@@ -154,7 +154,7 @@
typeof(pcp) *ptr__; \
typeof(pcp) ret__; \
preempt_disable(); \
- ptr__ = __this_cpu_ptr(&(pcp)); \
+ ptr__ = raw_cpu_ptr(&(pcp)); \
ret__ = xchg(ptr__, nval); \
preempt_enable(); \
ret__; \
@@ -173,8 +173,8 @@
typeof(pcp2) *p2__; \
int ret__; \
preempt_disable(); \
- p1__ = __this_cpu_ptr(&(pcp1)); \
- p2__ = __this_cpu_ptr(&(pcp2)); \
+ p1__ = raw_cpu_ptr(&(pcp1)); \
+ p2__ = raw_cpu_ptr(&(pcp2)); \
ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__); \
preempt_enable(); \
ret__; \
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index c846aee7372f..7559f1beab29 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
void __kprobes enabled_wait(void)
{
- struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
+ struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
unsigned long long idle_time;
unsigned long psw_mask;
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 27ae5433fe4d..014d4729b134 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -218,9 +218,9 @@ static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
*/
static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
{
- kcb->prev_kprobe.kp = __get_cpu_var(current_kprobe);
+ kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
kcb->prev_kprobe.status = kcb->kprobe_status;
- __get_cpu_var(current_kprobe) = p;
+ __this_cpu_write(current_kprobe, p);
}
/*
@@ -230,7 +230,7 @@ static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
*/
static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
@@ -311,7 +311,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
enable_singlestep(kcb, regs, (unsigned long) p->ainsn.insn);
return 1;
} else if (kprobe_running()) {
- p = __get_cpu_var(current_kprobe);
+ p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs)) {
/*
* Continuation after the jprobe completed and
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index db96b418160a..dd1c24ceda50 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -54,8 +54,12 @@ void s390_handle_mcck(void)
*/
local_irq_save(flags);
local_mcck_disable();
- mcck = __get_cpu_var(cpu_mcck);
- memset(&__get_cpu_var(cpu_mcck), 0, sizeof(struct mcck_struct));
+ /*
+ * Ummm... Does this make sense at all? Copying the percpu struct
+ * and then zapping it one statement later?
+ */
+ memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck));
+ memset(&mcck, 0, sizeof(struct mcck_struct));
clear_cpu_flag(CIF_MCCK_PENDING);
local_mcck_enable();
local_irq_restore(flags);
@@ -269,7 +273,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs)
nmi_enter();
inc_irq_stat(NMI_NMI);
mci = (struct mci *) &S390_lowcore.mcck_interruption_code;
- mcck = &__get_cpu_var(cpu_mcck);
+ mcck = this_cpu_ptr(&cpu_mcck);
umode = user_mode(regs);
if (mci->sd) {
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index d3194de7ae1e..56fdad479115 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -173,7 +173,7 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc)
*/
static void cpumf_pmu_enable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
int err;
if (cpuhw->flags & PMU_F_ENABLED)
@@ -196,7 +196,7 @@ static void cpumf_pmu_enable(struct pmu *pmu)
*/
static void cpumf_pmu_disable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
int err;
u64 inactive;
@@ -230,7 +230,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
return;
inc_irq_stat(IRQEXT_CMC);
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
/* Measurement alerts are shared and might happen when the PMU
* is not reserved. Ignore these alerts in this case. */
@@ -250,7 +250,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
#define PMC_RELEASE 1
static void setup_pmc_cpu(void *flags)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
switch (*((int *) flags)) {
case PMC_INIT:
@@ -475,7 +475,7 @@ static void cpumf_pmu_read(struct perf_event *event)
static void cpumf_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
@@ -506,7 +506,7 @@ static void cpumf_pmu_start(struct perf_event *event, int flags)
static void cpumf_pmu_stop(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
if (!(hwc->state & PERF_HES_STOPPED)) {
@@ -527,7 +527,7 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags)
static int cpumf_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
/* Check authorization for the counter set to which this
* counter belongs.
@@ -551,7 +551,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
static void cpumf_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
cpumf_pmu_stop(event, PERF_EF_UPDATE);
@@ -575,7 +575,7 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
*/
static void cpumf_pmu_start_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
perf_pmu_disable(pmu);
cpuhw->flags |= PERF_EVENT_TXN;
@@ -589,7 +589,7 @@ static void cpumf_pmu_start_txn(struct pmu *pmu)
*/
static void cpumf_pmu_cancel_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
WARN_ON(cpuhw->tx_state != cpuhw->state);
@@ -604,7 +604,7 @@ static void cpumf_pmu_cancel_txn(struct pmu *pmu)
*/
static int cpumf_pmu_commit_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
u64 state;
/* check if the updated state can be scheduled */
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index ea0c7b2ef030..08e761318c17 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -562,7 +562,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex);
static void setup_pmc_cpu(void *flags)
{
int err;
- struct cpu_hw_sf *cpusf = &__get_cpu_var(cpu_hw_sf);
+ struct cpu_hw_sf *cpusf = this_cpu_ptr(&cpu_hw_sf);
err = 0;
switch (*((int *) flags)) {
@@ -849,7 +849,7 @@ static int cpumsf_pmu_event_init(struct perf_event *event)
static void cpumsf_pmu_enable(struct pmu *pmu)
{
- struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
+ struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
struct hw_perf_event *hwc;
int err;
@@ -898,7 +898,7 @@ static void cpumsf_pmu_enable(struct pmu *pmu)
static void cpumsf_pmu_disable(struct pmu *pmu)
{
- struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
+ struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
struct hws_lsctl_request_block inactive;
struct hws_qsi_info_block si;
int err;
@@ -1306,7 +1306,7 @@ static void cpumsf_pmu_read(struct perf_event *event)
*/
static void cpumsf_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
+ struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
return;
@@ -1327,7 +1327,7 @@ static void cpumsf_pmu_start(struct perf_event *event, int flags)
*/
static void cpumsf_pmu_stop(struct perf_event *event, int flags)
{
- struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
+ struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
if (event->hw.state & PERF_HES_STOPPED)
return;
@@ -1346,7 +1346,7 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags)
static int cpumsf_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
+ struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
int err;
if (cpuhw->flags & PMU_F_IN_USE)
@@ -1397,7 +1397,7 @@ out:
static void cpumsf_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_sf *cpuhw = &__get_cpu_var(cpu_hw_sf);
+ struct cpu_hw_sf *cpuhw = this_cpu_ptr(&cpu_hw_sf);
perf_pmu_disable(event->pmu);
cpumsf_pmu_stop(event, PERF_EF_UPDATE);
@@ -1470,7 +1470,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
if (!(alert & CPU_MF_INT_SF_MASK))
return;
inc_irq_stat(IRQEXT_CMS);
- cpuhw = &__get_cpu_var(cpu_hw_sf);
+ cpuhw = this_cpu_ptr(&cpu_hw_sf);
/* Measurement alerts are shared and might happen when the PMU
* is not reserved. Ignore these alerts in this case. */
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index edefead3b43a..dbdd33ee0102 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -23,7 +23,7 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
*/
void cpu_init(void)
{
- struct cpuid *id = &__get_cpu_var(cpu_id);
+ struct cpuid *id = this_cpu_ptr(&cpu_id);
get_cpu_id(id);
atomic_inc(&init_mm.mm_count);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 69e980de0f62..005d665fe4a5 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -92,7 +92,7 @@ void clock_comparator_work(void)
struct clock_event_device *cd;
S390_lowcore.clock_comparator = -1ULL;
- cd = &__get_cpu_var(comparators);
+ cd = this_cpu_ptr(&comparators);
cd->event_handler(cd);
}
@@ -373,7 +373,7 @@ EXPORT_SYMBOL(get_sync_clock);
*/
static void disable_sync_clock(void *dummy)
{
- atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
+ atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
/*
* Clear the in-sync bit 2^31. All get_sync_clock calls will
* fail until the sync bit is turned back on. In addition
@@ -390,7 +390,7 @@ static void disable_sync_clock(void *dummy)
*/
static void enable_sync_clock(void)
{
- atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word);
+ atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
atomic_set_mask(0x80000000, sw_ptr);
}
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index e53c6f268807..ff9b4eb34589 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -178,7 +178,7 @@ static int smp_ctl_qsi(int cpu)
static void hws_ext_handler(struct ext_code ext_code,
unsigned int param32, unsigned long param64)
{
- struct hws_cpu_buffer *cb = &__get_cpu_var(sampler_cpu_buffer);
+ struct hws_cpu_buffer *cb = this_cpu_ptr(&sampler_cpu_buffer);
if (!(param32 & CPU_MF_INT_SF_MASK))
return;
diff --git a/arch/sparc/include/asm/cpudata_32.h b/arch/sparc/include/asm/cpudata_32.h
index 0300d94c25b3..05f366379f53 100644
--- a/arch/sparc/include/asm/cpudata_32.h
+++ b/arch/sparc/include/asm/cpudata_32.h
@@ -26,6 +26,6 @@ typedef struct {
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
-#define local_cpu_data() __get_cpu_var(__cpu_data)
+#define local_cpu_data() (*this_cpu_ptr(&__cpu_data))
#endif /* _SPARC_CPUDATA_H */
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h
index 0e594076912c..a6e424d185d0 100644
--- a/arch/sparc/include/asm/cpudata_64.h
+++ b/arch/sparc/include/asm/cpudata_64.h
@@ -30,7 +30,7 @@ typedef struct {
DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
#define cpu_data(__cpu) per_cpu(__cpu_data, (__cpu))
-#define local_cpu_data() __get_cpu_var(__cpu_data)
+#define local_cpu_data() (*this_cpu_ptr(&__cpu_data))
#endif /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c
index 98d712843413..cd83be527586 100644
--- a/arch/sparc/kernel/kprobes.c
+++ b/arch/sparc/kernel/kprobes.c
@@ -83,7 +83,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
@@ -92,7 +92,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(current_kprobe) = p;
+ __this_cpu_write(current_kprobe, p);
kcb->kprobe_orig_tnpc = regs->tnpc;
kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
}
@@ -155,7 +155,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
ret = 1;
goto no_kprobe;
}
- p = __get_cpu_var(current_kprobe);
+ p = __this_cpu_read(current_kprobe);
if (p->break_handler && p->break_handler(p, regs))
goto ss_probe;
}
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index 018ef11f57df..ea2bad306f93 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -343,7 +343,7 @@ static void leon_ipi_resched(int cpu)
void leonsmp_ipi_interrupt(void)
{
- struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work);
+ struct leon_ipi_work *work = this_cpu_ptr(&leon_ipi_work);
if (work->single) {
work->single = 0;
diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c
index 5b1151dcba13..a9973bb4a1b2 100644
--- a/arch/sparc/kernel/nmi.c
+++ b/arch/sparc/kernel/nmi.c
@@ -100,20 +100,20 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
sum = local_cpu_data().irq0_irqs;
- if (__get_cpu_var(nmi_touch)) {
- __get_cpu_var(nmi_touch) = 0;
+ if (__this_cpu_read(nmi_touch)) {
+ __this_cpu_write(nmi_touch, 0);
touched = 1;
}
- if (!touched && __get_cpu_var(last_irq_sum) == sum) {
+ if (!touched && __this_cpu_read(last_irq_sum) == sum) {
__this_cpu_inc(alert_counter);
if (__this_cpu_read(alert_counter) == 30 * nmi_hz)
die_nmi("BUG: NMI Watchdog detected LOCKUP",
regs, panic_on_timeout);
} else {
- __get_cpu_var(last_irq_sum) = sum;
+ __this_cpu_write(last_irq_sum, sum);
__this_cpu_write(alert_counter, 0);
}
- if (__get_cpu_var(wd_enabled)) {
+ if (__this_cpu_read(wd_enabled)) {
pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz));
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable);
}
@@ -154,7 +154,7 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count)
void stop_nmi_watchdog(void *unused)
{
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
- __get_cpu_var(wd_enabled) = 0;
+ __this_cpu_write(wd_enabled, 0);
atomic_dec(&nmi_active);
}
@@ -207,7 +207,7 @@ error:
void start_nmi_watchdog(void *unused)
{
- __get_cpu_var(wd_enabled) = 1;
+ __this_cpu_write(wd_enabled, 1);
atomic_inc(&nmi_active);
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
@@ -218,7 +218,7 @@ void start_nmi_watchdog(void *unused)
static void nmi_adjust_hz_one(void *unused)
{
- if (!__get_cpu_var(wd_enabled))
+ if (!__this_cpu_read(wd_enabled))
return;
pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable);
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
index d07f6b29aed8..49d33b178793 100644
--- a/arch/sparc/kernel/pci_sun4v.c
+++ b/arch/sparc/kernel/pci_sun4v.c
@@ -48,7 +48,7 @@ static int iommu_batch_initialized;
/* Interrupts must be disabled. */
static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
{
- struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+ struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
p->dev = dev;
p->prot = prot;
@@ -94,7 +94,7 @@ static long iommu_batch_flush(struct iommu_batch *p)
static inline void iommu_batch_new_entry(unsigned long entry)
{
- struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+ struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
if (p->entry + p->npages == entry)
return;
@@ -106,7 +106,7 @@ static inline void iommu_batch_new_entry(unsigned long entry)
/* Interrupts must be disabled. */
static inline long iommu_batch_add(u64 phys_page)
{
- struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+ struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
@@ -120,7 +120,7 @@ static inline long iommu_batch_add(u64 phys_page)
/* Interrupts must be disabled. */
static inline long iommu_batch_end(void)
{
- struct iommu_batch *p = &__get_cpu_var(iommu_batch);
+ struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index c9759ad3f34a..46a5e4508752 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1013,7 +1013,7 @@ static void update_pcrs_for_enable(struct cpu_hw_events *cpuc)
static void sparc_pmu_enable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
if (cpuc->enabled)
@@ -1031,7 +1031,7 @@ static void sparc_pmu_enable(struct pmu *pmu)
static void sparc_pmu_disable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
if (!cpuc->enabled)
@@ -1065,7 +1065,7 @@ static int active_event_index(struct cpu_hw_events *cpuc,
static void sparc_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = active_event_index(cpuc, event);
if (flags & PERF_EF_RELOAD) {
@@ -1080,7 +1080,7 @@ static void sparc_pmu_start(struct perf_event *event, int flags)
static void sparc_pmu_stop(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = active_event_index(cpuc, event);
if (!(event->hw.state & PERF_HES_STOPPED)) {
@@ -1096,7 +1096,7 @@ static void sparc_pmu_stop(struct perf_event *event, int flags)
static void sparc_pmu_del(struct perf_event *event, int _flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned long flags;
int i;
@@ -1133,7 +1133,7 @@ static void sparc_pmu_del(struct perf_event *event, int _flags)
static void sparc_pmu_read(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = active_event_index(cpuc, event);
struct hw_perf_event *hwc = &event->hw;
@@ -1145,7 +1145,7 @@ static DEFINE_MUTEX(pmc_grab_mutex);
static void perf_stop_nmi_watchdog(void *unused)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
stop_nmi_watchdog(NULL);
@@ -1356,7 +1356,7 @@ static int collect_events(struct perf_event *group, int max_count,
static int sparc_pmu_add(struct perf_event *event, int ef_flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int n0, ret = -EAGAIN;
unsigned long flags;
@@ -1498,7 +1498,7 @@ static int sparc_pmu_event_init(struct perf_event *event)
*/
static void sparc_pmu_start_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
@@ -1511,7 +1511,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
*/
static void sparc_pmu_cancel_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
@@ -1524,13 +1524,13 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu)
*/
static int sparc_pmu_commit_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int n;
if (!sparc_pmu)
return -EINVAL;
- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);
n = cpuc->n_events;
if (check_excludes(cpuc->event, 0, n))
return -EINVAL;
@@ -1601,7 +1601,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
regs = args->regs;
- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);
/* If the PMU has the TOE IRQ enable bits, we need to do a
* dummy write to the %pcr to clear the overflow bits and thus
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c
index d5c319553fd0..9d98e5002a09 100644
--- a/arch/sparc/kernel/sun4d_smp.c
+++ b/arch/sparc/kernel/sun4d_smp.c
@@ -204,7 +204,7 @@ static void __init smp4d_ipi_init(void)
void sun4d_ipi_interrupt(void)
{
- struct sun4d_ipi_work *work = &__get_cpu_var(sun4d_ipi_work);
+ struct sun4d_ipi_work *work = this_cpu_ptr(&sun4d_ipi_work);
if (work->single) {
work->single = 0;
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 3fddf64c7fc6..59da0c3ea788 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -765,7 +765,7 @@ void setup_sparc64_timer(void)
: /* no outputs */
: "r" (pstate));
- sevt = &__get_cpu_var(sparc64_events);
+ sevt = this_cpu_ptr(&sparc64_events);
memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
sevt->cpumask = cpumask_of(smp_processor_id());
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index b89aba217e3b..9df2190c097e 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -52,14 +52,14 @@ out:
void arch_enter_lazy_mmu_mode(void)
{
- struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
+ struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
tb->active = 1;
}
void arch_leave_lazy_mmu_mode(void)
{
- struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
+ struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
if (tb->tlb_nr)
flush_tlb_pending();
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 71af5747874d..60d62a292fce 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -140,12 +140,12 @@ extern unsigned int debug_smp_processor_id(void);
/*
* Read the set of maskable interrupts.
- * We avoid the preemption warning here via __this_cpu_ptr since even
+ * We avoid the preemption warning here via raw_cpu_ptr since even
* if irqs are already enabled, it's harmless to read the wrong cpu's
* enabled mask.
*/
#define arch_local_irqs_enabled() \
- (*__this_cpu_ptr(&interrupts_enabled_mask))
+ (*raw_cpu_ptr(&interrupts_enabled_mask))
/* Re-enable all maskable interrupts. */
#define arch_local_irq_enable() \
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 4734215e2ad4..f67753db1f78 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -84,7 +84,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t)
* clear any pending DMA interrupts.
*/
if (current->thread.tile_dma_state.enabled)
- install_page_table(mm->pgd, __get_cpu_var(current_asid));
+ install_page_table(mm->pgd, __this_cpu_read(current_asid));
#endif
}
@@ -96,12 +96,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
int cpu = smp_processor_id();
/* Pick new ASID. */
- int asid = __get_cpu_var(current_asid) + 1;
+ int asid = __this_cpu_read(current_asid) + 1;
if (asid > max_asid) {
asid = min_asid;
local_flush_tlb();
}
- __get_cpu_var(current_asid) = asid;
+ __this_cpu_write(current_asid, asid);
/* Clear cpu from the old mm, and set it in the new one. */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 637f2ffaa5f5..ba85765e1436 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -73,7 +73,7 @@ static DEFINE_PER_CPU(int, irq_depth);
*/
void tile_dev_intr(struct pt_regs *regs, int intnum)
{
- int depth = __get_cpu_var(irq_depth)++;
+ int depth = __this_cpu_inc_return(irq_depth);
unsigned long original_irqs;
unsigned long remaining_irqs;
struct pt_regs *old_regs;
@@ -120,7 +120,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
/* Count device irqs; Linux IPIs are counted elsewhere. */
if (irq != IRQ_RESCHEDULE)
- __get_cpu_var(irq_stat).irq_dev_intr_count++;
+ __this_cpu_inc(irq_stat.irq_dev_intr_count);
generic_handle_irq(irq);
}
@@ -130,10 +130,10 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
* including any that were reenabled during interrupt
* handling.
*/
- if (depth == 0)
- unmask_irqs(~__get_cpu_var(irq_disable_mask));
+ if (depth == 1)
+ unmask_irqs(~__this_cpu_read(irq_disable_mask));
- __get_cpu_var(irq_depth)--;
+ __this_cpu_dec(irq_depth);
/*
* Track time spent against the current process again and
@@ -151,7 +151,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
static void tile_irq_chip_enable(struct irq_data *d)
{
get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
- if (__get_cpu_var(irq_depth) == 0)
+ if (__this_cpu_read(irq_depth) == 0)
unmask_irqs(1UL << d->irq);
put_cpu_var(irq_disable_mask);
}
@@ -197,7 +197,7 @@ static void tile_irq_chip_ack(struct irq_data *d)
*/
static void tile_irq_chip_eoi(struct irq_data *d)
{
- if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq)))
+ if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq)))
unmask_irqs(1UL << d->irq);
}
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 7867266f9716..ac950be1318e 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(HV_MsgState, msg_state);
void init_messaging(void)
{
/* Allocate storage for messages in kernel space */
- HV_MsgState *state = &__get_cpu_var(msg_state);
+ HV_MsgState *state = this_cpu_ptr(&msg_state);
int rc = hv_register_message_state(state);
if (rc != HV_OK)
panic("hv_register_message_state: error %d", rc);
@@ -96,7 +96,7 @@ void hv_message_intr(struct pt_regs *regs, int intnum)
struct hv_driver_cb *cb =
(struct hv_driver_cb *)him->intarg;
cb->callback(cb, him->intdata);
- __get_cpu_var(irq_stat).irq_hv_msg_count++;
+ __this_cpu_inc(irq_stat.irq_hv_msg_count);
}
}
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index 2bf6c9c135c1..bb509cee3b59 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -590,7 +590,7 @@ static int tile_event_set_period(struct perf_event *event)
*/
static void tile_pmu_stop(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -616,7 +616,7 @@ static void tile_pmu_stop(struct perf_event *event, int flags)
*/
static void tile_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = event->hw.idx;
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
@@ -650,7 +650,7 @@ static void tile_pmu_start(struct perf_event *event, int flags)
*/
static int tile_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc;
unsigned long mask;
int b, max_cnt;
@@ -706,7 +706,7 @@ static int tile_pmu_add(struct perf_event *event, int flags)
*/
static void tile_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
/*
@@ -880,14 +880,14 @@ static struct pmu tilera_pmu = {
int tile_pmu_handle_irq(struct pt_regs *regs, int fault)
{
struct perf_sample_data data;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
u64 val;
unsigned long status;
int bit;
- __get_cpu_var(perf_irqs)++;
+ __this_cpu_inc(perf_irqs);
if (!atomic_read(&tile_active_events))
return 0;
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 16ed58948757..0050cbc1d9de 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -64,7 +64,7 @@ early_param("idle", idle_setup);
void arch_cpu_idle(void)
{
- __get_cpu_var(irq_stat).idle_timestamp = jiffies;
+ __this_cpu_write(irq_stat.idle_timestamp, jiffies);
_cpu_idle();
}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 112ababa9e55..b9736ded06f2 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1218,7 +1218,8 @@ static void __init validate_hv(void)
* various asid variables to their appropriate initial states.
*/
asid_range = hv_inquire_asid(0);
- __get_cpu_var(current_asid) = min_asid = asid_range.start;
+ min_asid = asid_range.start;
+ __this_cpu_write(current_asid, min_asid);
max_asid = asid_range.start + asid_range.size - 1;
if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index de07fa7d1315..6cb2ce31b5a2 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -740,7 +740,7 @@ static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
{
- unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+ unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
struct thread_info *info = (void *)current_thread_info();
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
@@ -766,7 +766,7 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
void single_step_once(struct pt_regs *regs)
{
- unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+ unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
*ss_pc = regs->pc;
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 19eaa62d456a..d3c4ed780ce2 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -189,7 +189,7 @@ EXPORT_SYMBOL(flush_icache_range);
/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
static irqreturn_t handle_reschedule_ipi(int irq, void *token)
{
- __get_cpu_var(irq_stat).irq_resched_count++;
+ __this_cpu_inc(irq_stat.irq_resched_count);
scheduler_ipi();
return IRQ_HANDLED;
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 732e9d138661..0d59a1b60c74 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -41,7 +41,7 @@ void __init smp_prepare_boot_cpu(void)
int cpu = smp_processor_id();
set_cpu_online(cpu, 1);
set_cpu_present(cpu, 1);
- __get_cpu_var(cpu_state) = CPU_ONLINE;
+ __this_cpu_write(cpu_state, CPU_ONLINE);
init_messaging();
}
@@ -158,7 +158,7 @@ static void start_secondary(void)
/* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */
/* Initialize the current asid for our first page table. */
- __get_cpu_var(current_asid) = min_asid;
+ __this_cpu_write(current_asid, min_asid);
/* Set up this thread as another owner of the init_mm */
atomic_inc(&init_mm.mm_count);
@@ -201,7 +201,7 @@ void online_secondary(void)
notify_cpu_starting(smp_processor_id());
set_cpu_online(smp_processor_id(), 1);
- __get_cpu_var(cpu_state) = CPU_ONLINE;
+ __this_cpu_write(cpu_state, CPU_ONLINE);
/* Set up tile-specific state for this cpu. */
setup_cpu(0);
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index c1b362277fb7..b854a1cd0079 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -162,7 +162,7 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
void setup_tile_timer(void)
{
- struct clock_event_device *evt = &__get_cpu_var(tile_timer);
+ struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
/* Fill in fields that are speed-specific. */
clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC);
@@ -182,7 +182,7 @@ void setup_tile_timer(void)
void do_timer_interrupt(struct pt_regs *regs, int fault_num)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- struct clock_event_device *evt = &__get_cpu_var(tile_timer);
+ struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
/*
* Mask the timer interrupt here, since we are a oneshot timer
@@ -194,7 +194,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
irq_enter();
/* Track interrupt count. */
- __get_cpu_var(irq_stat).irq_timer_count++;
+ __this_cpu_inc(irq_stat.irq_timer_count);
/* Call the generic timer handler */
evt->event_handler(evt);
@@ -235,7 +235,7 @@ cycles_t ns2cycles(unsigned long nsecs)
* We do not have to disable preemption here as each core has the same
* clock frequency.
*/
- struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer);
+ struct clock_event_device *dev = raw_cpu_ptr(&tile_timer);
/*
* as in clocksource.h and x86's timer.h, we split the calculation
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 0dc218294770..6aa2f2625447 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -103,7 +103,7 @@ static void kmap_atomic_register(struct page *page, int type,
spin_lock(&amp_lock);
/* With interrupts disabled, now fill in the per-cpu info. */
- amp = &__get_cpu_var(amps).per_type[type];
+ amp = this_cpu_ptr(&amps.per_type[type]);
amp->page = page;
amp->cpu = smp_processor_id();
amp->va = va;
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index a092e393bd20..caa270165f86 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -593,14 +593,14 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
interrupt_mask_set_mask(-1ULL);
rc = flush_and_install_context(__pa(pgtables),
init_pgprot((unsigned long)pgtables),
- __get_cpu_var(current_asid),
+ __this_cpu_read(current_asid),
cpumask_bits(my_cpu_mask));
interrupt_mask_restore_mask(irqmask);
BUG_ON(rc != 0);
/* Copy the page table back to the normal swapper_pg_dir. */
memcpy(pgd_base, pgtables, sizeof(pgtables));
- __install_page_table(pgd_base, __get_cpu_var(current_asid),
+ __install_page_table(pgd_base, __this_cpu_read(current_asid),
swapper_pgprot);
/*
diff --git a/arch/x86/include/asm/debugreg.h b/arch/x86/include/asm/debugreg.h
index 4b528a970bd4..61fd18b83b6c 100644
--- a/arch/x86/include/asm/debugreg.h
+++ b/arch/x86/include/asm/debugreg.h
@@ -97,11 +97,11 @@ extern void hw_breakpoint_restore(void);
DECLARE_PER_CPU(int, debug_stack_usage);
static inline void debug_stack_usage_inc(void)
{
- __get_cpu_var(debug_stack_usage)++;
+ __this_cpu_inc(debug_stack_usage);
}
static inline void debug_stack_usage_dec(void)
{
- __get_cpu_var(debug_stack_usage)--;
+ __this_cpu_dec(debug_stack_usage);
}
int is_debug_stack(unsigned long addr);
void debug_stack_set_zero(void);
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index 85e13ccf15c4..d725382c2ae0 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -189,7 +189,7 @@ static inline int p4_ht_thread(int cpu)
{
#ifdef CONFIG_SMP
if (smp_num_siblings == 2)
- return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
+ return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map));
#endif
return 0;
}
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index c63e925fd6b7..a00ad8f2a657 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -164,7 +164,7 @@ struct uv_hub_info_s {
};
DECLARE_PER_CPU(struct uv_hub_info_s, __uv_hub_info);
-#define uv_hub_info (&__get_cpu_var(__uv_hub_info))
+#define uv_hub_info this_cpu_ptr(&__uv_hub_info)
#define uv_cpu_hub_info(cpu) (&per_cpu(__uv_hub_info, cpu))
/*
@@ -601,16 +601,16 @@ struct uv_hub_nmi_s {
struct uv_cpu_nmi_s {
struct uv_hub_nmi_s *hub;
- atomic_t state;
- atomic_t pinging;
+ int state;
+ int pinging;
int queries;
int pings;
};
-DECLARE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi);
-#define uv_cpu_nmi (__get_cpu_var(__uv_cpu_nmi))
+DECLARE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
+
#define uv_hub_nmi (uv_cpu_nmi.hub)
-#define uv_cpu_nmi_per(cpu) (per_cpu(__uv_cpu_nmi, cpu))
+#define uv_cpu_nmi_per(cpu) (per_cpu(uv_cpu_nmi, cpu))
#define uv_hub_nmi_per(cpu) (uv_cpu_nmi_per(cpu).hub)
/* uv_cpu_nmi_states */
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index af5b08ab3b71..5972b108f15a 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -146,7 +146,7 @@ static inline int is_apbt_capable(void)
static int __init apbt_clockevent_register(void)
{
struct sfi_timer_table_entry *mtmr;
- struct apbt_dev *adev = &__get_cpu_var(cpu_apbt_dev);
+ struct apbt_dev *adev = this_cpu_ptr(&cpu_apbt_dev);
mtmr = sfi_get_mtmr(APBT_CLOCKEVENT0_NUM);
if (mtmr == NULL) {
@@ -200,7 +200,7 @@ void apbt_setup_secondary_clock(void)
if (!cpu)
return;
- adev = &__get_cpu_var(cpu_apbt_dev);
+ adev = this_cpu_ptr(&cpu_apbt_dev);
if (!adev->timer) {
adev->timer = dw_apb_clockevent_init(cpu, adev->name,
APBT_CLOCKEVENT_RATING, adev_virt_addr(adev),
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 67760275544b..00853b254ab0 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -561,7 +561,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
*/
static void setup_APIC_timer(void)
{
- struct clock_event_device *levt = &__get_cpu_var(lapic_events);
+ struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
if (this_cpu_has(X86_FEATURE_ARAT)) {
lapic_clockevent.features &= ~CLOCK_EVT_FEAT_C3STOP;
@@ -696,7 +696,7 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
static int __init calibrate_APIC_clock(void)
{
- struct clock_event_device *levt = &__get_cpu_var(lapic_events);
+ struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
void (*real_handler)(struct clock_event_device *dev);
unsigned long deltaj;
long delta, deltatsc;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 6ce600f9bc78..e658f21681c8 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -42,7 +42,7 @@ __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
* We are to modify mask, so we need an own copy
* and be sure it's manipulated with irq off.
*/
- ipi_mask_ptr = __raw_get_cpu_var(ipi_mask);
+ ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask);
cpumask_copy(ipi_mask_ptr, mask);
/*
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 3eff36f719fb..4b4f78c9ba19 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1200,9 +1200,9 @@ DEFINE_PER_CPU(int, debug_stack_usage);
int is_debug_stack(unsigned long addr)
{
- return __get_cpu_var(debug_stack_usage) ||
- (addr <= __get_cpu_var(debug_stack_addr) &&
- addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
+ return __this_cpu_read(debug_stack_usage) ||
+ (addr <= __this_cpu_read(debug_stack_addr) &&
+ addr > (__this_cpu_read(debug_stack_addr) - DEBUG_STKSZ));
}
NOKPROBE_SYMBOL(is_debug_stack);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
index 5ac2d1fb28bc..4cfba4371a71 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
@@ -83,7 +83,7 @@ static DEFINE_MUTEX(mce_inject_mutex);
static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
{
int cpu = smp_processor_id();
- struct mce *m = &__get_cpu_var(injectm);
+ struct mce *m = this_cpu_ptr(&injectm);
if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
return NMI_DONE;
cpumask_clear_cpu(cpu, mce_inject_cpumask);
@@ -97,7 +97,7 @@ static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
static void mce_irq_ipi(void *info)
{
int cpu = smp_processor_id();
- struct mce *m = &__get_cpu_var(injectm);
+ struct mce *m = this_cpu_ptr(&injectm);
if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
m->inject_flags & MCJ_EXCEPTION) {
@@ -109,7 +109,7 @@ static void mce_irq_ipi(void *info)
/* Inject mce on current CPU */
static int raise_local(void)
{
- struct mce *m = &__get_cpu_var(injectm);
+ struct mce *m = this_cpu_ptr(&injectm);
int context = MCJ_CTX(m->inject_flags);
int ret = 0;
int cpu = m->extcpu;
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bd9ccda8087f..61a9668cebfd 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -400,7 +400,7 @@ static u64 mce_rdmsrl(u32 msr)
if (offset < 0)
return 0;
- return *(u64 *)((char *)&__get_cpu_var(injectm) + offset);
+ return *(u64 *)((char *)this_cpu_ptr(&injectm) + offset);
}
if (rdmsrl_safe(msr, &v)) {
@@ -422,7 +422,7 @@ static void mce_wrmsrl(u32 msr, u64 v)
int offset = msr_to_offset(msr);
if (offset >= 0)
- *(u64 *)((char *)&__get_cpu_var(injectm) + offset) = v;
+ *(u64 *)((char *)this_cpu_ptr(&injectm) + offset) = v;
return;
}
wrmsrl(msr, v);
@@ -478,7 +478,7 @@ static DEFINE_PER_CPU(struct mce_ring, mce_ring);
/* Runs with CPU affinity in workqueue */
static int mce_ring_empty(void)
{
- struct mce_ring *r = &__get_cpu_var(mce_ring);
+ struct mce_ring *r = this_cpu_ptr(&mce_ring);
return r->start == r->end;
}
@@ -490,7 +490,7 @@ static int mce_ring_get(unsigned long *pfn)
*pfn = 0;
get_cpu();
- r = &__get_cpu_var(mce_ring);
+ r = this_cpu_ptr(&mce_ring);
if (r->start == r->end)
goto out;
*pfn = r->ring[r->start];
@@ -504,7 +504,7 @@ out:
/* Always runs in MCE context with preempt off */
static int mce_ring_add(unsigned long pfn)
{
- struct mce_ring *r = &__get_cpu_var(mce_ring);
+ struct mce_ring *r = this_cpu_ptr(&mce_ring);
unsigned next;
next = (r->end + 1) % MCE_RING_SIZE;
@@ -526,7 +526,7 @@ int mce_available(struct cpuinfo_x86 *c)
static void mce_schedule_work(void)
{
if (!mce_ring_empty())
- schedule_work(&__get_cpu_var(mce_work));
+ schedule_work(this_cpu_ptr(&mce_work));
}
DEFINE_PER_CPU(struct irq_work, mce_irq_work);
@@ -551,7 +551,7 @@ static void mce_report_event(struct pt_regs *regs)
return;
}
- irq_work_queue(&__get_cpu_var(mce_irq_work));
+ irq_work_queue(this_cpu_ptr(&mce_irq_work));
}
/*
@@ -1045,7 +1045,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
mce_gather_info(&m, regs);
- final = &__get_cpu_var(mces_seen);
+ final = this_cpu_ptr(&mces_seen);
*final = m;
memset(valid_banks, 0, sizeof(valid_banks));
@@ -1278,22 +1278,22 @@ static unsigned long (*mce_adjust_timer)(unsigned long interval) =
static int cmc_error_seen(void)
{
- unsigned long *v = &__get_cpu_var(mce_polled_error);
+ unsigned long *v = this_cpu_ptr(&mce_polled_error);
return test_and_clear_bit(0, v);
}
static void mce_timer_fn(unsigned long data)
{
- struct timer_list *t = &__get_cpu_var(mce_timer);
+ struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned long iv;
int notify;
WARN_ON(smp_processor_id() != data);
- if (mce_available(__this_cpu_ptr(&cpu_info))) {
+ if (mce_available(this_cpu_ptr(&cpu_info))) {
machine_check_poll(MCP_TIMESTAMP,
- &__get_cpu_var(mce_poll_banks));
+ this_cpu_ptr(&mce_poll_banks));
mce_intel_cmci_poll();
}
@@ -1323,7 +1323,7 @@ static void mce_timer_fn(unsigned long data)
*/
void mce_timer_kick(unsigned long interval)
{
- struct timer_list *t = &__get_cpu_var(mce_timer);
+ struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned long when = jiffies + interval;
unsigned long iv = __this_cpu_read(mce_next_interval);
@@ -1659,7 +1659,7 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t)
static void __mcheck_cpu_init_timer(void)
{
- struct timer_list *t = &__get_cpu_var(mce_timer);
+ struct timer_list *t = this_cpu_ptr(&mce_timer);
unsigned int cpu = smp_processor_id();
setup_timer(t, mce_timer_fn, cpu);
@@ -1702,8 +1702,8 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
__mcheck_cpu_init_generic();
__mcheck_cpu_init_vendor(c);
__mcheck_cpu_init_timer();
- INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
- init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb);
+ INIT_WORK(this_cpu_ptr(&mce_work), mce_process_work);
+ init_irq_work(this_cpu_ptr(&mce_irq_work), &mce_irq_work_cb);
}
/*
@@ -1955,7 +1955,7 @@ static struct miscdevice mce_chrdev_device = {
static void __mce_disable_bank(void *arg)
{
int bank = *((int *)arg);
- __clear_bit(bank, __get_cpu_var(mce_poll_banks));
+ __clear_bit(bank, this_cpu_ptr(mce_poll_banks));
cmci_disable_bank(bank);
}
@@ -2065,7 +2065,7 @@ static void mce_syscore_shutdown(void)
static void mce_syscore_resume(void)
{
__mcheck_cpu_init_generic();
- __mcheck_cpu_init_vendor(__this_cpu_ptr(&cpu_info));
+ __mcheck_cpu_init_vendor(raw_cpu_ptr(&cpu_info));
}
static struct syscore_ops mce_syscore_ops = {
@@ -2080,7 +2080,7 @@ static struct syscore_ops mce_syscore_ops = {
static void mce_cpu_restart(void *data)
{
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
__mcheck_cpu_init_generic();
__mcheck_cpu_init_timer();
@@ -2096,14 +2096,14 @@ static void mce_restart(void)
/* Toggle features for corrected errors */
static void mce_disable_cmci(void *data)
{
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
cmci_clear();
}
static void mce_enable_ce(void *all)
{
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
cmci_reenable();
cmci_recheck();
@@ -2336,7 +2336,7 @@ static void mce_disable_cpu(void *h)
unsigned long action = *(unsigned long *)h;
int i;
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
if (!(action & CPU_TASKS_FROZEN))
@@ -2354,7 +2354,7 @@ static void mce_reenable_cpu(void *h)
unsigned long action = *(unsigned long *)h;
int i;
- if (!mce_available(__this_cpu_ptr(&cpu_info)))
+ if (!mce_available(raw_cpu_ptr(&cpu_info)))
return;
if (!(action & CPU_TASKS_FROZEN))
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 1e49f8f41276..5d4999f95aec 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -310,7 +310,7 @@ static void amd_threshold_interrupt(void)
* event.
*/
machine_check_poll(MCP_TIMESTAMP,
- &__get_cpu_var(mce_poll_banks));
+ this_cpu_ptr(&mce_poll_banks));
if (high & MASK_OVERFLOW_HI) {
rdmsrl(address, m.misc);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c
index 3bdb95ae8c43..b3c97bafc123 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_intel.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c
@@ -86,7 +86,7 @@ void mce_intel_cmci_poll(void)
{
if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
return;
- machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
+ machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
}
void mce_intel_hcpu_update(unsigned long cpu)
@@ -145,7 +145,7 @@ static void cmci_storm_disable_banks(void)
u64 val;
raw_spin_lock_irqsave(&cmci_discover_lock, flags);
- owned = __get_cpu_var(mce_banks_owned);
+ owned = this_cpu_ptr(mce_banks_owned);
for_each_set_bit(bank, owned, MAX_NR_BANKS) {
rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
val &= ~MCI_CTL2_CMCI_EN;
@@ -195,7 +195,7 @@ static void intel_threshold_interrupt(void)
{
if (cmci_storm_detect())
return;
- machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
+ machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
mce_notify_irq();
}
@@ -206,7 +206,7 @@ static void intel_threshold_interrupt(void)
*/
static void cmci_discover(int banks)
{
- unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
+ unsigned long *owned = (void *)this_cpu_ptr(&mce_banks_owned);
unsigned long flags;
int i;
int bios_wrong_thresh = 0;
@@ -228,7 +228,7 @@ static void cmci_discover(int banks)
/* Already owned by someone else? */
if (val & MCI_CTL2_CMCI_EN) {
clear_bit(i, owned);
- __clear_bit(i, __get_cpu_var(mce_poll_banks));
+ __clear_bit(i, this_cpu_ptr(mce_poll_banks));
continue;
}
@@ -252,7 +252,7 @@ static void cmci_discover(int banks)
/* Did the enable bit stick? -- the bank supports CMCI */
if (val & MCI_CTL2_CMCI_EN) {
set_bit(i, owned);
- __clear_bit(i, __get_cpu_var(mce_poll_banks));
+ __clear_bit(i, this_cpu_ptr(mce_poll_banks));
/*
* We are able to set thresholds for some banks that
* had a threshold of 0. This means the BIOS has not
@@ -263,7 +263,7 @@ static void cmci_discover(int banks)
(val & MCI_CTL2_CMCI_THRESHOLD_MASK))
bios_wrong_thresh = 1;
} else {
- WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
+ WARN_ON(!test_bit(i, this_cpu_ptr(mce_poll_banks)));
}
}
raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
@@ -284,10 +284,10 @@ void cmci_recheck(void)
unsigned long flags;
int banks;
- if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
+ if (!mce_available(raw_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
return;
local_irq_save(flags);
- machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
+ machine_check_poll(MCP_TIMESTAMP, this_cpu_ptr(&mce_banks_owned));
local_irq_restore(flags);
}
@@ -296,12 +296,12 @@ static void __cmci_disable_bank(int bank)
{
u64 val;
- if (!test_bit(bank, __get_cpu_var(mce_banks_owned)))
+ if (!test_bit(bank, this_cpu_ptr(mce_banks_owned)))
return;
rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
val &= ~MCI_CTL2_CMCI_EN;
wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
- __clear_bit(bank, __get_cpu_var(mce_banks_owned));
+ __clear_bit(bank, this_cpu_ptr(mce_banks_owned));
}
/*
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 16c73022306e..1b8299dd3d91 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -494,7 +494,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
void x86_pmu_disable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -512,7 +512,7 @@ void x86_pmu_disable_all(void)
static void x86_pmu_disable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!x86_pmu_initialized())
return;
@@ -529,7 +529,7 @@ static void x86_pmu_disable(struct pmu *pmu)
void x86_pmu_enable_all(int added)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -876,7 +876,7 @@ static void x86_pmu_start(struct perf_event *event, int flags);
static void x86_pmu_enable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
int i, added = cpuc->n_added;
@@ -1030,7 +1030,7 @@ void x86_pmu_enable_event(struct perf_event *event)
*/
static int x86_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc;
int assign[X86_PMC_IDX_MAX];
int n, n0, ret;
@@ -1081,7 +1081,7 @@ out:
static void x86_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = event->hw.idx;
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
@@ -1160,7 +1160,7 @@ void perf_event_print_debug(void)
void x86_pmu_stop(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
@@ -1182,7 +1182,7 @@ void x86_pmu_stop(struct perf_event *event, int flags)
static void x86_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
/*
@@ -1237,7 +1237,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs)
int idx, handled = 0;
u64 val;
- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);
/*
* Some chipsets need to unmask the LVTPC in a particular spot
@@ -1646,7 +1646,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
*/
static int x86_pmu_commit_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int assign[X86_PMC_IDX_MAX];
int n, ret;
@@ -2005,7 +2005,7 @@ static unsigned long get_segment_base(unsigned int segment)
if (idx > GDT_ENTRIES)
return 0;
- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
+ desc = raw_cpu_ptr(gdt_page.gdt);
}
return get_desc_base(desc + idx);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index beeb7cc07044..28926311aac1 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -699,7 +699,7 @@ __init int amd_pmu_init(void)
void amd_pmu_enable_virt(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
cpuc->perf_ctr_virt_mask = 0;
@@ -711,7 +711,7 @@ EXPORT_SYMBOL_GPL(amd_pmu_enable_virt);
void amd_pmu_disable_virt(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
/*
* We only mask out the Host-only bit so that host-only counting works
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 3851def5057c..a73947c53b65 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1174,7 +1174,7 @@ static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
static void intel_pmu_disable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
@@ -1187,7 +1187,7 @@ static void intel_pmu_disable_all(void)
static void intel_pmu_enable_all(int added)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
intel_pmu_pebs_enable_all();
intel_pmu_lbr_enable_all();
@@ -1221,7 +1221,7 @@ static void intel_pmu_enable_all(int added)
*/
static void intel_pmu_nhm_workaround(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
static const unsigned long nhm_magic[4] = {
0x4300B5,
0x4300D2,
@@ -1320,7 +1320,7 @@ static inline bool event_is_checkpointed(struct perf_event *event)
static void intel_pmu_disable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
intel_pmu_disable_bts();
@@ -1384,7 +1384,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
static void intel_pmu_enable_event(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (unlikely(hwc->idx == INTEL_PMC_IDX_FIXED_BTS)) {
if (!__this_cpu_read(cpu_hw_events.enabled))
@@ -1478,7 +1478,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
u64 status;
int handled;
- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);
/*
* No known reason to not always do late ACK,
@@ -1910,7 +1910,7 @@ EXPORT_SYMBOL_GPL(perf_guest_get_msrs);
static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
@@ -1931,7 +1931,7 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
int idx;
@@ -1965,7 +1965,7 @@ static void core_pmu_enable_event(struct perf_event *event)
static void core_pmu_enable_all(int added)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index b1553d05a5cb..46211bcc813e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -474,7 +474,7 @@ void intel_pmu_enable_bts(u64 config)
void intel_pmu_disable_bts(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned long debugctlmsr;
if (!cpuc->ds)
@@ -491,7 +491,7 @@ void intel_pmu_disable_bts(void)
int intel_pmu_drain_bts_buffer(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct bts_record {
u64 from;
@@ -669,7 +669,7 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
void intel_pmu_pebs_enable(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
@@ -684,7 +684,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
void intel_pmu_pebs_disable(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
@@ -702,7 +702,7 @@ void intel_pmu_pebs_disable(struct perf_event *event)
void intel_pmu_pebs_enable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->pebs_enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
@@ -710,7 +710,7 @@ void intel_pmu_pebs_enable_all(void)
void intel_pmu_pebs_disable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->pebs_enabled)
wrmsrl(MSR_IA32_PEBS_ENABLE, 0);
@@ -718,7 +718,7 @@ void intel_pmu_pebs_disable_all(void)
static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned long from = cpuc->lbr_entries[0].from;
unsigned long old_to, to = cpuc->lbr_entries[0].to;
unsigned long ip = regs->ip;
@@ -829,7 +829,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
* We cast to the biggest pebs_record but are careful not to
* unconditionally access the 'extra' entries.
*/
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct pebs_record_hsw *pebs = __pebs;
struct perf_sample_data data;
struct pt_regs regs;
@@ -916,7 +916,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct perf_event *event = cpuc->events[0]; /* PMC0 only */
struct pebs_record_core *at, *top;
@@ -957,7 +957,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct perf_event *event = NULL;
void *at, *top;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 4af10617de33..45fa730a5283 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -133,7 +133,7 @@ static void intel_pmu_lbr_filter(struct cpu_hw_events *cpuc);
static void __intel_pmu_lbr_enable(void)
{
u64 debugctl;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->lbr_sel)
wrmsrl(MSR_LBR_SELECT, cpuc->lbr_sel->config);
@@ -183,7 +183,7 @@ void intel_pmu_lbr_reset(void)
void intel_pmu_lbr_enable(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!x86_pmu.lbr_nr)
return;
@@ -203,7 +203,7 @@ void intel_pmu_lbr_enable(struct perf_event *event)
void intel_pmu_lbr_disable(struct perf_event *event)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!x86_pmu.lbr_nr)
return;
@@ -220,7 +220,7 @@ void intel_pmu_lbr_disable(struct perf_event *event)
void intel_pmu_lbr_enable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->lbr_users)
__intel_pmu_lbr_enable();
@@ -228,7 +228,7 @@ void intel_pmu_lbr_enable_all(void)
void intel_pmu_lbr_disable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (cpuc->lbr_users)
__intel_pmu_lbr_disable();
@@ -332,7 +332,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
void intel_pmu_lbr_read(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (!cpuc->lbr_users)
return;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 619f7699487a..d64f275fe274 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -135,7 +135,7 @@ static inline u64 rapl_scale(u64 v)
* or use ldexp(count, -32).
* Watts = Joules/Time delta
*/
- return v << (32 - __get_cpu_var(rapl_pmu)->hw_unit);
+ return v << (32 - __this_cpu_read(rapl_pmu->hw_unit));
}
static u64 rapl_event_update(struct perf_event *event)
@@ -187,7 +187,7 @@ static void rapl_stop_hrtimer(struct rapl_pmu *pmu)
static enum hrtimer_restart rapl_hrtimer_handle(struct hrtimer *hrtimer)
{
- struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
struct perf_event *event;
unsigned long flags;
@@ -234,7 +234,7 @@ static void __rapl_pmu_event_start(struct rapl_pmu *pmu,
static void rapl_pmu_event_start(struct perf_event *event, int mode)
{
- struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
unsigned long flags;
spin_lock_irqsave(&pmu->lock, flags);
@@ -244,7 +244,7 @@ static void rapl_pmu_event_start(struct perf_event *event, int mode)
static void rapl_pmu_event_stop(struct perf_event *event, int mode)
{
- struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
struct hw_perf_event *hwc = &event->hw;
unsigned long flags;
@@ -278,7 +278,7 @@ static void rapl_pmu_event_stop(struct perf_event *event, int mode)
static int rapl_pmu_event_add(struct perf_event *event, int mode)
{
- struct rapl_pmu *pmu = __get_cpu_var(rapl_pmu);
+ struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu);
struct hw_perf_event *hwc = &event->hw;
unsigned long flags;
@@ -696,7 +696,7 @@ static int __init rapl_pmu_init(void)
return -1;
}
- pmu = __get_cpu_var(rapl_pmu);
+ pmu = __this_cpu_read(rapl_pmu);
pr_info("RAPL PMU detected, hw unit 2^-%d Joules,"
" API unit is 2^-32 Joules,"
diff --git a/arch/x86/kernel/cpu/perf_event_knc.c b/arch/x86/kernel/cpu/perf_event_knc.c
index 838fa8772c62..5b0c232d1ee6 100644
--- a/arch/x86/kernel/cpu/perf_event_knc.c
+++ b/arch/x86/kernel/cpu/perf_event_knc.c
@@ -217,7 +217,7 @@ static int knc_pmu_handle_irq(struct pt_regs *regs)
int bit, loops;
u64 status;
- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);
knc_pmu_disable_all();
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index 5d466b7d8609..f2e56783af3d 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -915,7 +915,7 @@ static inline void p4_pmu_disable_event(struct perf_event *event)
static void p4_pmu_disable_all(void)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -984,7 +984,7 @@ static void p4_pmu_enable_event(struct perf_event *event)
static void p4_pmu_enable_all(int added)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
@@ -1004,7 +1004,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
int idx, handled = 0;
u64 val;
- cpuc = &__get_cpu_var(cpu_hw_events);
+ cpuc = this_cpu_ptr(&cpu_hw_events);
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
int overflow;
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index 5f9cf20cdb68..3d5fb509bdeb 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -108,7 +108,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
int i;
for (i = 0; i < HBP_NUM; i++) {
- struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
+ struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
if (!*slot) {
*slot = bp;
@@ -122,7 +122,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
set_debugreg(info->address, i);
__this_cpu_write(cpu_debugreg[i], info->address);
- dr7 = &__get_cpu_var(cpu_dr7);
+ dr7 = this_cpu_ptr(&cpu_dr7);
*dr7 |= encode_dr7(i, info->len, info->type);
set_debugreg(*dr7, 7);
@@ -146,7 +146,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
int i;
for (i = 0; i < HBP_NUM; i++) {
- struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
+ struct perf_event **slot = this_cpu_ptr(&bp_per_reg[i]);
if (*slot == bp) {
*slot = NULL;
@@ -157,7 +157,7 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
return;
- dr7 = &__get_cpu_var(cpu_dr7);
+ dr7 = this_cpu_ptr(&cpu_dr7);
*dr7 &= ~__encode_dr7(i, info->len, info->type);
set_debugreg(*dr7, 7);
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 4d1c746892eb..e4b503d5558c 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -52,13 +52,13 @@ static inline void stack_overflow_check(struct pt_regs *regs)
regs->sp <= curbase + THREAD_SIZE)
return;
- irq_stack_top = (u64)__get_cpu_var(irq_stack_union.irq_stack) +
+ irq_stack_top = (u64)this_cpu_ptr(irq_stack_union.irq_stack) +
STACK_TOP_MARGIN;
- irq_stack_bottom = (u64)__get_cpu_var(irq_stack_ptr);
+ irq_stack_bottom = (u64)__this_cpu_read(irq_stack_ptr);
if (regs->sp >= irq_stack_top && regs->sp <= irq_stack_bottom)
return;
- oist = &__get_cpu_var(orig_ist);
+ oist = this_cpu_ptr(&orig_ist);
estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
if (regs->sp >= estack_top && regs->sp <= estack_bottom)
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 95c3cb16af3e..f6945bef2cd1 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -244,9 +244,9 @@ u32 kvm_read_and_reset_pf_reason(void)
{
u32 reason = 0;
- if (__get_cpu_var(apf_reason).enabled) {
- reason = __get_cpu_var(apf_reason).reason;
- __get_cpu_var(apf_reason).reason = 0;
+ if (__this_cpu_read(apf_reason.enabled)) {
+ reason = __this_cpu_read(apf_reason.reason);
+ __this_cpu_write(apf_reason.reason, 0);
}
return reason;
@@ -319,7 +319,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
* there's no need for lock or memory barriers.
* An optimization barrier is implied in apic write.
*/
- if (__test_and_clear_bit(KVM_PV_EOI_BIT, &__get_cpu_var(kvm_apic_eoi)))
+ if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
return;
apic_write(APIC_EOI, APIC_EOI_ACK);
}
@@ -330,13 +330,13 @@ void kvm_guest_cpu_init(void)
return;
if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
- u64 pa = slow_virt_to_phys(&__get_cpu_var(apf_reason));
+ u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
#ifdef CONFIG_PREEMPT
pa |= KVM_ASYNC_PF_SEND_ALWAYS;
#endif
wrmsrl(MSR_KVM_ASYNC_PF_EN, pa | KVM_ASYNC_PF_ENABLED);
- __get_cpu_var(apf_reason).enabled = 1;
+ __this_cpu_write(apf_reason.enabled, 1);
printk(KERN_INFO"KVM setup async PF for cpu %d\n",
smp_processor_id());
}
@@ -345,8 +345,8 @@ void kvm_guest_cpu_init(void)
unsigned long pa;
/* Size alignment is implied but just to make it explicit. */
BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
- __get_cpu_var(kvm_apic_eoi) = 0;
- pa = slow_virt_to_phys(&__get_cpu_var(kvm_apic_eoi))
+ __this_cpu_write(kvm_apic_eoi, 0);
+ pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
| KVM_MSR_ENABLED;
wrmsrl(MSR_KVM_PV_EOI_EN, pa);
}
@@ -357,11 +357,11 @@ void kvm_guest_cpu_init(void)
static void kvm_pv_disable_apf(void)
{
- if (!__get_cpu_var(apf_reason).enabled)
+ if (!__this_cpu_read(apf_reason.enabled))
return;
wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
- __get_cpu_var(apf_reason).enabled = 0;
+ __this_cpu_write(apf_reason.enabled, 0);
printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
smp_processor_id());
@@ -724,7 +724,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
if (in_nmi())
return;
- w = &__get_cpu_var(klock_waiting);
+ w = this_cpu_ptr(&klock_waiting);
cpu = smp_processor_id();
start = spin_time_start();
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f7f6a4a157a6..65510f624dfe 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -670,7 +670,7 @@ static int svm_hardware_enable(void)
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
- __get_cpu_var(current_tsc_ratio) = TSC_RATIO_DEFAULT;
+ __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
}
@@ -1313,8 +1313,8 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
if (static_cpu_has(X86_FEATURE_TSCRATEMSR) &&
- svm->tsc_ratio != __get_cpu_var(current_tsc_ratio)) {
- __get_cpu_var(current_tsc_ratio) = svm->tsc_ratio;
+ svm->tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
+ __this_cpu_write(current_tsc_ratio, svm->tsc_ratio);
wrmsrl(MSR_AMD64_TSC_RATIO, svm->tsc_ratio);
}
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 04fa1b8298c8..d9dcfa27aa84 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1626,7 +1626,7 @@ static void reload_tss(void)
/*
* VT restores TR but not its size. Useless.
*/
- struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
+ struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
struct desc_struct *descs;
descs = (void *)gdt->address;
@@ -1672,7 +1672,7 @@ static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
static unsigned long segment_base(u16 selector)
{
- struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
+ struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
struct desc_struct *d;
unsigned long table_base;
unsigned long v;
@@ -1802,7 +1802,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
*/
if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
stts();
- load_gdt(&__get_cpu_var(host_gdt));
+ load_gdt(this_cpu_ptr(&host_gdt));
}
static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -1832,7 +1832,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
if (vmx->loaded_vmcs->cpu != cpu) {
- struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
+ struct desc_ptr *gdt = this_cpu_ptr(&host_gdt);
unsigned long sysenter_esp;
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
@@ -2771,7 +2771,7 @@ static int hardware_enable(void)
ept_sync_global();
}
- native_store_gdt(&__get_cpu_var(host_gdt));
+ native_store_gdt(this_cpu_ptr(&host_gdt));
return 0;
}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5430e4b0af29..34c8f94331f8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1559,7 +1559,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
- this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
+ this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
if (unlikely(this_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
index dd89a13f1051..b4f2e7e9e907 100644
--- a/arch/x86/mm/kmemcheck/kmemcheck.c
+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
@@ -140,7 +140,7 @@ static DEFINE_PER_CPU(struct kmemcheck_context, kmemcheck_context);
bool kmemcheck_active(struct pt_regs *regs)
{
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
return data->balance > 0;
}
@@ -148,7 +148,7 @@ bool kmemcheck_active(struct pt_regs *regs)
/* Save an address that needs to be shown/hidden */
static void kmemcheck_save_addr(unsigned long addr)
{
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
BUG_ON(data->n_addrs >= ARRAY_SIZE(data->addr));
data->addr[data->n_addrs++] = addr;
@@ -156,7 +156,7 @@ static void kmemcheck_save_addr(unsigned long addr)
static unsigned int kmemcheck_show_all(void)
{
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
unsigned int i;
unsigned int n;
@@ -169,7 +169,7 @@ static unsigned int kmemcheck_show_all(void)
static unsigned int kmemcheck_hide_all(void)
{
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
unsigned int i;
unsigned int n;
@@ -185,7 +185,7 @@ static unsigned int kmemcheck_hide_all(void)
*/
void kmemcheck_show(struct pt_regs *regs)
{
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
BUG_ON(!irqs_disabled());
@@ -226,7 +226,7 @@ void kmemcheck_show(struct pt_regs *regs)
*/
void kmemcheck_hide(struct pt_regs *regs)
{
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
int n;
BUG_ON(!irqs_disabled());
@@ -528,7 +528,7 @@ static void kmemcheck_access(struct pt_regs *regs,
const uint8_t *insn_primary;
unsigned int size;
- struct kmemcheck_context *data = &__get_cpu_var(kmemcheck_context);
+ struct kmemcheck_context *data = this_cpu_ptr(&kmemcheck_context);
/* Recursive fault -- ouch. */
if (data->busy) {
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index 379e8bd0deea..1d2e6392f5fa 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -64,11 +64,11 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model,
static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs)
{
if (ctr_running)
- model->check_ctrs(regs, &__get_cpu_var(cpu_msrs));
+ model->check_ctrs(regs, this_cpu_ptr(&cpu_msrs));
else if (!nmi_enabled)
return NMI_DONE;
else
- model->stop(&__get_cpu_var(cpu_msrs));
+ model->stop(this_cpu_ptr(&cpu_msrs));
return NMI_HANDLED;
}
@@ -91,7 +91,7 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs)
static void nmi_cpu_start(void *dummy)
{
- struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
+ struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
if (!msrs->controls)
WARN_ON_ONCE(1);
else
@@ -111,7 +111,7 @@ static int nmi_start(void)
static void nmi_cpu_stop(void *dummy)
{
- struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs);
+ struct op_msrs const *msrs = this_cpu_ptr(&cpu_msrs);
if (!msrs->controls)
WARN_ON_ONCE(1);
else
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 98ab13058f89..ad1d91f475ab 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -372,7 +372,7 @@ static unsigned int get_stagger(void)
{
#ifdef CONFIG_SMP
int cpu = smp_processor_id();
- return cpu != cpumask_first(__get_cpu_var(cpu_sibling_map));
+ return cpu != cpumask_first(this_cpu_cpumask_var_ptr(cpu_sibling_map));
#endif
return 0;
}
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index c89c93320c12..c6b146e67116 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -63,8 +63,8 @@
static struct uv_hub_nmi_s **uv_hub_nmi_list;
-DEFINE_PER_CPU(struct uv_cpu_nmi_s, __uv_cpu_nmi);
-EXPORT_PER_CPU_SYMBOL_GPL(__uv_cpu_nmi);
+DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
+EXPORT_PER_CPU_SYMBOL_GPL(uv_cpu_nmi);
static unsigned long nmi_mmr;
static unsigned long nmi_mmr_clear;
@@ -215,7 +215,7 @@ static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
int nmi = 0;
local64_inc(&uv_nmi_count);
- uv_cpu_nmi.queries++;
+ this_cpu_inc(uv_cpu_nmi.queries);
do {
nmi = atomic_read(&hub_nmi->in_nmi);
@@ -293,7 +293,7 @@ static void uv_nmi_nr_cpus_ping(void)
int cpu;
for_each_cpu(cpu, uv_nmi_cpu_mask)
- atomic_set(&uv_cpu_nmi_per(cpu).pinging, 1);
+ uv_cpu_nmi_per(cpu).pinging = 1;
apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
}
@@ -304,8 +304,8 @@ static void uv_nmi_cleanup_mask(void)
int cpu;
for_each_cpu(cpu, uv_nmi_cpu_mask) {
- atomic_set(&uv_cpu_nmi_per(cpu).pinging, 0);
- atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_OUT);
+ uv_cpu_nmi_per(cpu).pinging = 0;
+ uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
}
}
@@ -328,7 +328,7 @@ static int uv_nmi_wait_cpus(int first)
int loop_delay = uv_nmi_loop_delay;
for_each_cpu(j, uv_nmi_cpu_mask) {
- if (atomic_read(&uv_cpu_nmi_per(j).state)) {
+ if (uv_cpu_nmi_per(j).state) {
cpumask_clear_cpu(j, uv_nmi_cpu_mask);
if (++k >= n)
break;
@@ -359,7 +359,7 @@ static int uv_nmi_wait_cpus(int first)
static void uv_nmi_wait(int master)
{
/* indicate this cpu is in */
- atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_IN);
+ this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
/* if not the first cpu in (the master), then we are a slave cpu */
if (!master)
@@ -419,7 +419,7 @@ static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
"UV:%sNMI process trace for CPU %d\n", dots, cpu);
show_regs(regs);
}
- atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
+ this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
}
/* Trigger a slave cpu to dump it's state */
@@ -427,20 +427,20 @@ static void uv_nmi_trigger_dump(int cpu)
{
int retry = uv_nmi_trigger_delay;
- if (atomic_read(&uv_cpu_nmi_per(cpu).state) != UV_NMI_STATE_IN)
+ if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
return;
- atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP);
+ uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
do {
cpu_relax();
udelay(10);
- if (atomic_read(&uv_cpu_nmi_per(cpu).state)
+ if (uv_cpu_nmi_per(cpu).state
!= UV_NMI_STATE_DUMP)
return;
} while (--retry > 0);
pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
- atomic_set(&uv_cpu_nmi_per(cpu).state, UV_NMI_STATE_DUMP_DONE);
+ uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
}
/* Wait until all cpus ready to exit */
@@ -488,7 +488,7 @@ static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
} else {
while (!atomic_read(&uv_nmi_slave_continue))
cpu_relax();
- while (atomic_read(&uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
+ while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
cpu_relax();
uv_nmi_dump_state_cpu(cpu, regs);
}
@@ -615,7 +615,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
local_irq_save(flags);
/* If not a UV System NMI, ignore */
- if (!atomic_read(&uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
+ if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
local_irq_restore(flags);
return NMI_DONE;
}
@@ -639,7 +639,7 @@ int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
uv_call_kgdb_kdb(cpu, regs, master);
/* Clear per_cpu "in nmi" flag */
- atomic_set(&uv_cpu_nmi.state, UV_NMI_STATE_OUT);
+ this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
/* Clear MMR NMI flag on each hub */
uv_clear_nmi(cpu);
@@ -666,16 +666,16 @@ static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
{
int ret;
- uv_cpu_nmi.queries++;
- if (!atomic_read(&uv_cpu_nmi.pinging)) {
+ this_cpu_inc(uv_cpu_nmi.queries);
+ if (!this_cpu_read(uv_cpu_nmi.pinging)) {
local64_inc(&uv_nmi_ping_misses);
return NMI_DONE;
}
- uv_cpu_nmi.pings++;
+ this_cpu_inc(uv_cpu_nmi.pings);
local64_inc(&uv_nmi_ping_count);
ret = uv_handle_nmi(reason, regs);
- atomic_set(&uv_cpu_nmi.pinging, 0);
+ this_cpu_write(uv_cpu_nmi.pinging, 0);
return ret;
}
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 5c86786bbfd2..a244237f3cfa 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -365,7 +365,7 @@ __setup("uvrtcevt", uv_enable_evt_rtc);
static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
{
- struct clock_event_device *ced = &__get_cpu_var(cpu_ced);
+ struct clock_event_device *ced = this_cpu_ptr(&cpu_ced);
*ced = clock_event_device_uv;
ced->cpumask = cpumask_of(smp_processor_id());
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index acb0effd8077..1a3f0445432a 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -821,7 +821,7 @@ static void xen_convert_trap_info(const struct desc_ptr *desc,
void xen_copy_trap_info(struct trap_info *traps)
{
- const struct desc_ptr *desc = &__get_cpu_var(idt_desc);
+ const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
xen_convert_trap_info(desc, traps);
}
@@ -838,7 +838,7 @@ static void xen_load_idt(const struct desc_ptr *desc)
spin_lock(&lock);
- __get_cpu_var(idt_desc) = *desc;
+ memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
xen_convert_trap_info(desc, traps);
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 0d82003e76ad..ea54a08d8301 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -54,7 +54,7 @@ DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags);
void xen_mc_flush(void)
{
- struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+ struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct multicall_entry *mc;
int ret = 0;
unsigned long flags;
@@ -131,7 +131,7 @@ void xen_mc_flush(void)
struct multicall_space __xen_mc_entry(size_t args)
{
- struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+ struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct multicall_space ret;
unsigned argidx = roundup(b->argidx, sizeof(u64));
@@ -162,7 +162,7 @@ struct multicall_space __xen_mc_entry(size_t args)
struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
{
- struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+ struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct multicall_space ret = { NULL, NULL };
BUG_ON(preemptible());
@@ -192,7 +192,7 @@ out:
void xen_mc_callback(void (*fn)(void *), void *data)
{
- struct mc_buffer *b = &__get_cpu_var(mc_buffer);
+ struct mc_buffer *b = this_cpu_ptr(&mc_buffer);
struct callback *cb;
if (b->cbidx == MC_BATCH) {
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 0ba5f3b967f0..23b45eb9a89c 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -109,7 +109,7 @@ static bool xen_pvspin = true;
__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
{
int irq = __this_cpu_read(lock_kicker_irq);
- struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting);
+ struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
int cpu = smp_processor_id();
u64 start;
unsigned long flags;
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index 5718b0b58b60..a1d430b112b3 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -80,7 +80,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
BUG_ON(preemptible());
- state = &__get_cpu_var(xen_runstate);
+ state = this_cpu_ptr(&xen_runstate);
/*
* The runstate info is always updated by the hypervisor on
@@ -123,7 +123,7 @@ static void do_stolen_accounting(void)
WARN_ON(state.state != RUNSTATE_running);
- snap = &__get_cpu_var(xen_runstate_snapshot);
+ snap = this_cpu_ptr(&xen_runstate_snapshot);
/* work out how much time the VCPU has not been runn*ing* */
runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
@@ -158,7 +158,7 @@ cycle_t xen_clocksource_read(void)
cycle_t ret;
preempt_disable_notrace();
- src = &__get_cpu_var(xen_vcpu)->time;
+ src = this_cpu_ptr(&xen_vcpu->time);
ret = pvclock_clocksource_read(src);
preempt_enable_notrace();
return ret;
@@ -397,7 +397,7 @@ static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.
static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
{
- struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt;
+ struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt);
irqreturn_t ret;
ret = IRQ_NONE;
@@ -460,7 +460,7 @@ void xen_setup_cpu_clockevents(void)
{
BUG_ON(preemptible());
- clockevents_register_device(&__get_cpu_var(xen_clock_events).evt);
+ clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
}
void xen_timer_resume(void)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c18d41db83d8..82759cef9043 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -874,7 +874,7 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
void add_interrupt_randomness(int irq, int irq_flags)
{
struct entropy_store *r;
- struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
+ struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
struct pt_regs *regs = get_irq_regs();
unsigned long now = jiffies;
cycles_t cycles = random_get_entropy();
diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c
index ad3572541728..31990600fcff 100644
--- a/drivers/clocksource/dummy_timer.c
+++ b/drivers/clocksource/dummy_timer.c
@@ -28,7 +28,7 @@ static void dummy_timer_set_mode(enum clock_event_mode mode,
static void dummy_timer_setup(void)
{
int cpu = smp_processor_id();
- struct clock_event_device *evt = __this_cpu_ptr(&dummy_timer_evt);
+ struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt);
evt->name = "dummy_timer";
evt->features = CLOCK_EVT_FEAT_PERIODIC |
diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
index 9e4db41abe3c..b7384b853e5a 100644
--- a/drivers/clocksource/metag_generic.c
+++ b/drivers/clocksource/metag_generic.c
@@ -90,7 +90,7 @@ static struct clocksource clocksource_metag = {
static irqreturn_t metag_timer_interrupt(int irq, void *dummy)
{
- struct clock_event_device *evt = &__get_cpu_var(local_clockevent);
+ struct clock_event_device *evt = this_cpu_ptr(&local_clockevent);
evt->event_handler(evt);
diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c
index 8d115db1e651..098c542e5c53 100644
--- a/drivers/clocksource/qcom-timer.c
+++ b/drivers/clocksource/qcom-timer.c
@@ -219,7 +219,7 @@ static void __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq,
}
/* Immediately configure the timer on the boot CPU */
- msm_local_timer_setup(__this_cpu_ptr(msm_evt));
+ msm_local_timer_setup(raw_cpu_ptr(msm_evt));
}
err:
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 044ee0df5871..06b57c4c4d80 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -66,7 +66,7 @@ static inline void ladder_do_selection(struct ladder_device *ldev,
static int ladder_select_state(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{
- struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
+ struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
struct ladder_device_state *last_state;
int last_residency, last_idx = ldev->last_state_idx;
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
@@ -170,7 +170,7 @@ static int ladder_enable_device(struct cpuidle_driver *drv,
*/
static void ladder_reflect(struct cpuidle_device *dev, int index)
{
- struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
+ struct ladder_device *ldev = this_cpu_ptr(&ladder_devices);
if (index > 0)
ldev->last_state_idx = index;
}
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 34db2fb3ef1e..710a233b9b0d 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -289,7 +289,7 @@ again:
*/
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
- struct menu_device *data = &__get_cpu_var(menu_devices);
+ struct menu_device *data = this_cpu_ptr(&menu_devices);
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
int i;
unsigned int interactivity_req;
@@ -372,7 +372,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
*/
static void menu_reflect(struct cpuidle_device *dev, int index)
{
- struct menu_device *data = &__get_cpu_var(menu_devices);
+ struct menu_device *data = this_cpu_ptr(&menu_devices);
data->last_state_idx = index;
if (index >= 0)
data->needs_update = 1;
@@ -385,7 +385,7 @@ static void menu_reflect(struct cpuidle_device *dev, int index)
*/
static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
{
- struct menu_device *data = &__get_cpu_var(menu_devices);
+ struct menu_device *data = this_cpu_ptr(&menu_devices);
int last_idx = data->last_state_idx;
struct cpuidle_state *target = &drv->states[last_idx];
unsigned int measured_us;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index f0a4800a15b0..38493ff28fa5 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -102,7 +102,7 @@ static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
#ifdef CONFIG_GIC_NON_BANKED
static void __iomem *gic_get_percpu_base(union gic_base *base)
{
- return *__this_cpu_ptr(base->percpu_base);
+ return raw_cpu_read(*base->percpu_base);
}
static void __iomem *gic_get_common_base(union gic_base *base)
@@ -522,11 +522,11 @@ static void gic_cpu_save(unsigned int gic_nr)
if (!dist_base || !cpu_base)
return;
- ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
- ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
@@ -548,11 +548,11 @@ static void gic_cpu_restore(unsigned int gic_nr)
if (!dist_base || !cpu_base)
return;
- ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
+ ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
- ptr = __this_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
+ ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 28a90122a5a8..87f86c77b094 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -548,7 +548,7 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
* A race condition can at worst result in the merged flag being
* misrepresented, so we don't have to disable preemption here.
*/
- last = __this_cpu_ptr(stats->last);
+ last = raw_cpu_ptr(stats->last);
stats_aux->merged =
(bi_sector == (ACCESS_ONCE(last->last_sector) &&
((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 69557a26f749..049747f558c9 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -423,7 +423,7 @@ static void tile_net_pop_all_buffers(int instance, int stack)
/* Provide linux buffers to mPIPE. */
static void tile_net_provide_needed_buffers(void)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
int instance, kind;
for (instance = 0; instance < NR_MPIPE_MAX &&
info->mpipe[instance].has_iqueue; instance++) {
@@ -551,7 +551,7 @@ static inline bool filter_packet(struct net_device *dev, void *buf)
static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
gxio_mpipe_idesc_t *idesc, unsigned long len)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev);
int instance = priv->instance;
@@ -585,7 +585,7 @@ static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
/* Handle a packet. Return true if "processed", false if "filtered". */
static bool tile_net_handle_packet(int instance, gxio_mpipe_idesc_t *idesc)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
struct mpipe_data *md = &mpipe_data[instance];
struct net_device *dev = md->tile_net_devs_for_channel[idesc->channel];
uint8_t l2_offset;
@@ -651,7 +651,7 @@ drop:
*/
static int tile_net_poll(struct napi_struct *napi, int budget)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
unsigned int work = 0;
gxio_mpipe_idesc_t *idesc;
int instance, i, n;
@@ -700,7 +700,7 @@ done:
/* Handle an ingress interrupt from an instance on the current cpu. */
static irqreturn_t tile_net_handle_ingress_irq(int irq, void *id)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
napi_schedule(&info->mpipe[(uint64_t)id].napi);
return IRQ_HANDLED;
}
@@ -763,7 +763,7 @@ static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
/* Make sure the egress timer is scheduled. */
static void tile_net_schedule_egress_timer(void)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
if (!info->egress_timer_scheduled) {
hrtimer_start(&info->egress_timer,
@@ -780,7 +780,7 @@ static void tile_net_schedule_egress_timer(void)
*/
static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
unsigned long irqflags;
bool pending = false;
int i, instance;
@@ -1927,7 +1927,7 @@ static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
*/
static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev);
int channel = priv->echannel;
int instance = priv->instance;
@@ -1996,7 +1996,7 @@ static unsigned int tile_net_tx_frags(struct frag *frags,
/* Help the kernel transmit a packet. */
static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
struct tile_net_priv *priv = netdev_priv(dev);
int instance = priv->instance;
struct mpipe_data *md = &mpipe_data[instance];
@@ -2138,7 +2138,7 @@ static int tile_net_set_mac_address(struct net_device *dev, void *p)
static void tile_net_netpoll(struct net_device *dev)
{
int instance = mpipe_instance(dev);
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
struct mpipe_data *md = &mpipe_data[instance];
disable_percpu_irq(md->ingress_irq);
@@ -2237,7 +2237,7 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
/* Per-cpu module initialization. */
static void tile_net_init_module_percpu(void *unused)
{
- struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
+ struct tile_net_info *info = this_cpu_ptr(&per_cpu_info);
int my_cpu = smp_processor_id();
int instance;
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 88818d5054ab..fb12d31cfcf6 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -996,13 +996,13 @@ static void tile_net_register(void *dev_ptr)
PDEBUG("tile_net_register(queue_id %d)\n", queue_id);
if (!strcmp(dev->name, "xgbe0"))
- info = &__get_cpu_var(hv_xgbe0);
+ info = this_cpu_ptr(&hv_xgbe0);
else if (!strcmp(dev->name, "xgbe1"))
- info = &__get_cpu_var(hv_xgbe1);
+ info = this_cpu_ptr(&hv_xgbe1);
else if (!strcmp(dev->name, "gbe0"))
- info = &__get_cpu_var(hv_gbe0);
+ info = this_cpu_ptr(&hv_gbe0);
else if (!strcmp(dev->name, "gbe1"))
- info = &__get_cpu_var(hv_gbe1);
+ info = this_cpu_ptr(&hv_gbe1);
else
BUG();
diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c
index 8aa73fac6ad4..0581461c3a67 100644
--- a/drivers/oprofile/cpu_buffer.c
+++ b/drivers/oprofile/cpu_buffer.c
@@ -45,7 +45,7 @@ unsigned long oprofile_get_cpu_buffer_size(void)
void oprofile_cpu_buffer_inc_smpl_lost(void)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
cpu_buf->sample_lost_overflow++;
}
@@ -297,7 +297,7 @@ __oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
unsigned long event, int is_kernel,
struct task_struct *task)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
unsigned long backtrace = oprofile_backtrace_depth;
/*
@@ -357,7 +357,7 @@ oprofile_write_reserve(struct op_entry *entry, struct pt_regs * const regs,
{
struct op_sample *sample;
int is_kernel = !user_mode(regs);
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
cpu_buf->sample_received++;
@@ -412,13 +412,13 @@ int oprofile_write_commit(struct op_entry *entry)
void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
log_sample(cpu_buf, pc, 0, is_kernel, event, NULL);
}
void oprofile_add_trace(unsigned long pc)
{
- struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(op_cpu_buffer);
+ struct oprofile_cpu_buffer *cpu_buf = this_cpu_ptr(&op_cpu_buffer);
if (!cpu_buf->tracing)
return;
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 61be1d9c16c8..bdef916e5dda 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -32,7 +32,7 @@ static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
static void __oprofile_hrtimer_start(void *unused)
{
- struct hrtimer *hrtimer = &__get_cpu_var(oprofile_hrtimer);
+ struct hrtimer *hrtimer = this_cpu_ptr(&oprofile_hrtimer);
if (!ctr_running)
return;
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 07676c22d514..79f59915f71b 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -252,7 +252,7 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
*/
void ccw_request_handler(struct ccw_device *cdev)
{
- struct irb *irb = &__get_cpu_var(cio_irb);
+ struct irb *irb = this_cpu_ptr(&cio_irb);
struct ccw_request *req = &cdev->private->req;
enum io_status status;
int rc = -EOPNOTSUPP;
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 3d22d2a4ce14..213159dec89e 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -58,7 +58,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
{
struct chsc_private *private = dev_get_drvdata(&sch->dev);
struct chsc_request *request = private->request;
- struct irb *irb = &__get_cpu_var(cio_irb);
+ struct irb *irb = this_cpu_ptr(&cio_irb);
CHSC_LOG(4, "irb");
CHSC_LOG_HEX(4, irb, sizeof(*irb));
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index d5a6f287d2fe..10eb738fc81a 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -563,7 +563,7 @@ static irqreturn_t do_cio_interrupt(int irq, void *dummy)
set_cpu_flag(CIF_NOHZ_DELAY);
tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
- irb = &__get_cpu_var(cio_irb);
+ irb = this_cpu_ptr(&cio_irb);
sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
if (!sch) {
/* Clear pending interrupt condition. */
@@ -613,7 +613,7 @@ void cio_tsch(struct subchannel *sch)
struct irb *irb;
int irq_context;
- irb = &__get_cpu_var(cio_irb);
+ irb = this_cpu_ptr(&cio_irb);
/* Store interrupt response block to lowcore. */
if (tsch(sch->schid, irb) != 0)
/* Not status pending or not operational. */
@@ -751,7 +751,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
struct tpi_info ti;
if (tpi(&ti)) {
- tsch(ti.schid, &__get_cpu_var(cio_irb));
+ tsch(ti.schid, this_cpu_ptr(&cio_irb));
if (schid_equal(&ti.schid, &schid))
return 0;
}
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 0bc902b3cd84..83da53c8e54c 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -739,7 +739,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
struct irb *irb;
int is_cmd;
- irb = &__get_cpu_var(cio_irb);
+ irb = this_cpu_ptr(&cio_irb);
is_cmd = !scsw_is_tm(&irb->scsw);
/* Check for unsolicited interrupt. */
if (!scsw_is_solicited(&irb->scsw)) {
@@ -805,7 +805,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
{
struct irb *irb;
- irb = &__get_cpu_var(cio_irb);
+ irb = this_cpu_ptr(&cio_irb);
/* Check for unsolicited interrupt. */
if (scsw_stctl(&irb->scsw) ==
(SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index c4f7bf3e24c2..37f0834300ea 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -134,7 +134,7 @@ static void eadm_subchannel_irq(struct subchannel *sch)
{
struct eadm_private *private = get_eadm_private(sch);
struct eadm_scsw *scsw = &sch->schib.scsw.eadm;
- struct irb *irb = &__get_cpu_var(cio_irb);
+ struct irb *irb = this_cpu_ptr(&cio_irb);
int error = 0;
EADM_LOG(6, "irq");
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 8b0f9ef517d6..748c9136a60a 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4134,7 +4134,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
* per cpu locality group is to reduce the contention between block
* request from multiple CPUs.
*/
- ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
+ ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups);
/* we're going to use group allocation */
ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 2997af6d2ccd..0a9a6da21e74 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -666,10 +666,19 @@ static inline size_t cpumask_size(void)
*
* This code makes NR_CPUS length memcopy and brings to a memory corruption.
* cpumask_copy() provide safe copy functionality.
+ *
+ * Note that there is another evil here: If you define a cpumask_var_t
+ * as a percpu variable then the way to obtain the address of the cpumask
+ * structure differently influences what this_cpu_* operation needs to be
+ * used. Please use this_cpu_cpumask_var_t in those cases. The direct use
+ * of this_cpu_ptr() or this_cpu_read() will lead to failures when the
+ * other type of cpumask_var_t implementation is configured.
*/
#ifdef CONFIG_CPUMASK_OFFSTACK
typedef struct cpumask *cpumask_var_t;
+#define this_cpu_cpumask_var_ptr(x) this_cpu_read(x)
+
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
@@ -681,6 +690,8 @@ void free_bootmem_cpumask_var(cpumask_var_t mask);
#else
typedef struct cpumask cpumask_var_t[1];
+#define this_cpu_cpumask_var_ptr(x) this_cpu_ptr(x)
+
static inline bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
{
return true;
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index ecbc52f9ff77..8422b4ed6882 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -44,8 +44,8 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
DECLARE_PER_CPU(struct kernel_cpustat, kernel_cpustat);
/* Must have preemption disabled for this to be meaningful. */
-#define kstat_this_cpu (&__get_cpu_var(kstat))
-#define kcpustat_this_cpu (&__get_cpu_var(kernel_cpustat))
+#define kstat_this_cpu this_cpu_ptr(&kstat)
+#define kcpustat_this_cpu this_cpu_ptr(&kernel_cpustat)
#define kstat_cpu(cpu) per_cpu(kstat, cpu)
#define kcpustat_cpu(cpu) per_cpu(kernel_cpustat, cpu)
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index cfd56046ecec..420032d41d27 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -257,9 +257,6 @@ do { \
#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
-/* keep until we have removed all uses of __this_cpu_ptr */
-#define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
-
/*
* Must be an lvalue. Since @var must be a simple identifier,
* we force a syntax error here if it isn't.
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 37252f71a380..c8a7db605e03 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -242,7 +242,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
static inline struct nf_conn *nf_ct_untracked_get(void)
{
- return &__raw_get_cpu_var(nf_conntrack_untracked);
+ return raw_cpu_ptr(&nf_conntrack_untracked);
}
void nf_ct_untracked_status_or(unsigned long bits);
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 8fd2f498782e..35512ac6dcfb 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -164,7 +164,7 @@ struct linux_xfrm_mib {
#define SNMP_ADD_STATS64_BH(mib, field, addend) \
do { \
- __typeof__(*mib) *ptr = __this_cpu_ptr(mib); \
+ __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[field] += addend; \
u64_stats_update_end(&ptr->syncp); \
@@ -185,8 +185,8 @@ struct linux_xfrm_mib {
#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
do { \
- __typeof__(*mib) *ptr; \
- ptr = __this_cpu_ptr(mib); \
+ __typeof__(*mib) *ptr; \
+ ptr = raw_cpu_ptr((mib)); \
u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend; \
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index f2a88de87a49..d659487254d5 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -137,7 +137,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
int cpu;
struct callchain_cpus_entries *entries;
- *rctx = get_recursion_context(__get_cpu_var(callchain_recursion));
+ *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
if (*rctx == -1)
return NULL;
@@ -153,7 +153,7 @@ static struct perf_callchain_entry *get_callchain_entry(int *rctx)
static void
put_callchain_entry(int rctx)
{
- put_recursion_context(__get_cpu_var(callchain_recursion), rctx);
+ put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
}
struct perf_callchain_entry *
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 094df8c0742d..1425d07018de 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -249,7 +249,7 @@ static void perf_duration_warn(struct irq_work *w)
u64 avg_local_sample_len;
u64 local_samples_len;
- local_samples_len = __get_cpu_var(running_sample_length);
+ local_samples_len = __this_cpu_read(running_sample_length);
avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
printk_ratelimited(KERN_WARNING
@@ -271,10 +271,10 @@ void perf_sample_event_took(u64 sample_len_ns)
return;
/* decay the counter by 1 average sample */
- local_samples_len = __get_cpu_var(running_sample_length);
+ local_samples_len = __this_cpu_read(running_sample_length);
local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
local_samples_len += sample_len_ns;
- __get_cpu_var(running_sample_length) = local_samples_len;
+ __this_cpu_write(running_sample_length, local_samples_len);
/*
* note: this will be biased artifically low until we have
@@ -882,7 +882,7 @@ static DEFINE_PER_CPU(struct list_head, rotation_list);
static void perf_pmu_rotate_start(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
- struct list_head *head = &__get_cpu_var(rotation_list);
+ struct list_head *head = this_cpu_ptr(&rotation_list);
WARN_ON(!irqs_disabled());
@@ -2462,7 +2462,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
* to check if we have to switch out PMU state.
* cgroup event are system-wide mode only
*/
- if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
+ if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
perf_cgroup_sched_out(task, next);
}
@@ -2705,11 +2705,11 @@ void __perf_event_task_sched_in(struct task_struct *prev,
* to check if we have to switch in PMU state.
* cgroup event are system-wide mode only
*/
- if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
+ if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
perf_cgroup_sched_in(prev, task);
/* check for system-wide branch_stack events */
- if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
+ if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
perf_branch_stack_sched_in(prev, task);
}
@@ -2964,7 +2964,7 @@ bool perf_event_can_stop_tick(void)
void perf_event_task_tick(void)
{
- struct list_head *head = &__get_cpu_var(rotation_list);
+ struct list_head *head = this_cpu_ptr(&rotation_list);
struct perf_cpu_context *cpuctx, *tmp;
struct perf_event_context *ctx;
int throttled;
@@ -5833,7 +5833,7 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
struct perf_sample_data *data,
struct pt_regs *regs)
{
- struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
struct perf_event *event;
struct hlist_head *head;
@@ -5852,7 +5852,7 @@ end:
int perf_swevent_get_recursion_context(void)
{
- struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
return get_recursion_context(swhash->recursion);
}
@@ -5860,7 +5860,7 @@ EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
inline void perf_swevent_put_recursion_context(int rctx)
{
- struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
put_recursion_context(swhash->recursion, rctx);
}
@@ -5889,7 +5889,7 @@ static void perf_swevent_read(struct perf_event *event)
static int perf_swevent_add(struct perf_event *event, int flags)
{
- struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
+ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
struct hw_perf_event *hwc = &event->hw;
struct hlist_head *head;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 8fb52e9bddc1..e5202f00cabc 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -699,7 +699,7 @@ void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irqaction *action = desc->action;
- void *dev_id = __this_cpu_ptr(action->percpu_dev_id);
+ void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
irqreturn_t res;
kstat_incr_irqs_this_cpu(irq, desc);
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 385b85aded19..3ab9048483fa 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -95,11 +95,11 @@ bool irq_work_queue(struct irq_work *work)
/* If the work is "lazy", handle it from next tick if any */
if (work->flags & IRQ_WORK_LAZY) {
- if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) &&
+ if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
tick_nohz_tick_stopped())
arch_irq_work_raise();
} else {
- if (llist_add(&work->llnode, &__get_cpu_var(raised_list)))
+ if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
arch_irq_work_raise();
}
@@ -113,8 +113,8 @@ bool irq_work_needs_cpu(void)
{
struct llist_head *raised, *lazy;
- raised = &__get_cpu_var(raised_list);
- lazy = &__get_cpu_var(lazy_list);
+ raised = this_cpu_ptr(&raised_list);
+ lazy = this_cpu_ptr(&lazy_list);
if (llist_empty(raised) || arch_irq_work_has_interrupt())
if (llist_empty(lazy))
@@ -168,8 +168,8 @@ static void irq_work_run_list(struct llist_head *list)
*/
void irq_work_run(void)
{
- irq_work_run_list(&__get_cpu_var(raised_list));
- irq_work_run_list(&__get_cpu_var(lazy_list));
+ irq_work_run_list(this_cpu_ptr(&raised_list));
+ irq_work_run_list(this_cpu_ptr(&lazy_list));
}
EXPORT_SYMBOL_GPL(irq_work_run);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index e3962d63e368..ced2b84b1cb7 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -2622,7 +2622,7 @@ void wake_up_klogd(void)
preempt_disable();
if (waitqueue_active(&log_wait)) {
this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
- irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
+ irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
}
preempt_enable();
}
@@ -2638,7 +2638,7 @@ int printk_deferred(const char *fmt, ...)
va_end(args);
__this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
- irq_work_queue(&__get_cpu_var(wake_up_klogd_work));
+ irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
preempt_enable();
return r;
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 3ef6451e972e..c27e4f8f4879 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -134,7 +134,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct sched_clock_data, sched_clock_data);
static inline struct sched_clock_data *this_scd(void)
{
- return &__get_cpu_var(sched_clock_data);
+ return this_cpu_ptr(&sched_clock_data);
}
static inline struct sched_clock_data *cpu_sdc(int cpu)
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index abfaf3d9a29f..256e577faf1b 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1153,7 +1153,7 @@ static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
static int find_later_rq(struct task_struct *task)
{
struct sched_domain *sd;
- struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl);
+ struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
int this_cpu = smp_processor_id();
int best_cpu, cpu = task_cpu(task);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index b78280c59b46..0b069bf3e708 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6615,7 +6615,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
struct sched_group *group;
struct rq *busiest;
unsigned long flags;
- struct cpumask *cpus = __get_cpu_var(load_balance_mask);
+ struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
struct lb_env env = {
.sd = sd,
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 87ea5bf1b87f..d024e6ce30ba 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1525,7 +1525,7 @@ static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
static int find_lowest_rq(struct task_struct *task)
{
struct sched_domain *sd;
- struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
+ struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
int this_cpu = smp_processor_id();
int cpu = task_cpu(task);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6130251de280..24156c8434d1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -663,10 +663,10 @@ static inline int cpu_of(struct rq *rq)
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
-#define this_rq() (&__get_cpu_var(runqueues))
+#define this_rq() this_cpu_ptr(&runqueues)
#define task_rq(p) cpu_rq(task_cpu(p))
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
-#define raw_rq() (&__raw_get_cpu_var(runqueues))
+#define raw_rq() raw_cpu_ptr(&runqueues)
static inline u64 rq_clock(struct rq *rq)
{
diff --git a/kernel/smp.c b/kernel/smp.c
index 9e0d0b289118..f38a1e692259 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -165,7 +165,7 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
if (!csd) {
csd = &csd_stack;
if (!wait)
- csd = &__get_cpu_var(csd_data);
+ csd = this_cpu_ptr(&csd_data);
}
csd_lock(csd);
@@ -230,7 +230,7 @@ static void flush_smp_call_function_queue(bool warn_cpu_offline)
WARN_ON(!irqs_disabled());
- head = &__get_cpu_var(call_single_queue);
+ head = this_cpu_ptr(&call_single_queue);
entry = llist_del_all(head);
entry = llist_reverse_order(entry);
@@ -420,7 +420,7 @@ void smp_call_function_many(const struct cpumask *mask,
return;
}
- cfd = &__get_cpu_var(cfd_data);
+ cfd = this_cpu_ptr(&cfd_data);
cpumask_and(cfd->cpumask, mask, cpu_online_mask);
cpumask_clear_cpu(this_cpu, cfd->cpumask);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 348ec763b104..0699add19164 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -485,7 +485,7 @@ static void tasklet_action(struct softirq_action *a)
local_irq_disable();
list = __this_cpu_read(tasklet_vec.head);
__this_cpu_write(tasklet_vec.head, NULL);
- __this_cpu_write(tasklet_vec.tail, &__get_cpu_var(tasklet_vec).head);
+ __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head));
local_irq_enable();
while (list) {
@@ -521,7 +521,7 @@ static void tasklet_hi_action(struct softirq_action *a)
local_irq_disable();
list = __this_cpu_read(tasklet_hi_vec.head);
__this_cpu_write(tasklet_hi_vec.head, NULL);
- __this_cpu_write(tasklet_hi_vec.tail, &__get_cpu_var(tasklet_hi_vec).head);
+ __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head));
local_irq_enable();
while (list) {
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index 13d2f7cd65db..b312fcc73024 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -638,7 +638,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
fill_tgid_exit(tsk);
}
- listeners = __this_cpu_ptr(&listener_array);
+ listeners = raw_cpu_ptr(&listener_array);
if (list_empty(&listeners->list))
return;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index ab370ffffd53..37e50aadd471 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -558,7 +558,7 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
static int hrtimer_reprogram(struct hrtimer *timer,
struct hrtimer_clock_base *base)
{
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
int res;
@@ -629,7 +629,7 @@ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
*/
static void retrigger_next_event(void *arg)
{
- struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
if (!hrtimer_hres_active())
return;
@@ -903,7 +903,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
*/
debug_deactivate(timer);
timer_stats_hrtimer_clear_start_info(timer);
- reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
+ reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
/*
* We must preserve the CALLBACK state flag here,
* otherwise we could move the timer base in
@@ -963,7 +963,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
* on dynticks target.
*/
wake_up_nohz_cpu(new_base->cpu_base->cpu);
- } else if (new_base->cpu_base == &__get_cpu_var(hrtimer_bases) &&
+ } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) &&
hrtimer_reprogram(timer, new_base)) {
/*
* Only allow reprogramming if the new base is on this CPU.
@@ -1103,7 +1103,7 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
*/
ktime_t hrtimer_get_next_event(void)
{
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
struct hrtimer_clock_base *base = cpu_base->clock_base;
ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
unsigned long flags;
@@ -1144,7 +1144,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
memset(timer, 0, sizeof(struct hrtimer));
- cpu_base = &__raw_get_cpu_var(hrtimer_bases);
+ cpu_base = raw_cpu_ptr(&hrtimer_bases);
if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
clock_id = CLOCK_MONOTONIC;
@@ -1187,7 +1187,7 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
struct hrtimer_cpu_base *cpu_base;
int base = hrtimer_clockid_to_base(which_clock);
- cpu_base = &__raw_get_cpu_var(hrtimer_bases);
+ cpu_base = raw_cpu_ptr(&hrtimer_bases);
*tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
return 0;
@@ -1242,7 +1242,7 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
*/
void hrtimer_interrupt(struct clock_event_device *dev)
{
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
ktime_t expires_next, now, entry_time, delta;
int i, retries = 0;
@@ -1376,7 +1376,7 @@ static void __hrtimer_peek_ahead_timers(void)
if (!hrtimer_hres_active())
return;
- td = &__get_cpu_var(tick_cpu_device);
+ td = this_cpu_ptr(&tick_cpu_device);
if (td && td->evtdev)
hrtimer_interrupt(td->evtdev);
}
@@ -1440,7 +1440,7 @@ void hrtimer_run_pending(void)
void hrtimer_run_queues(void)
{
struct timerqueue_node *node;
- struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
struct hrtimer_clock_base *base;
int index, gettime = 1;
@@ -1679,7 +1679,7 @@ static void migrate_hrtimers(int scpu)
local_irq_disable();
old_base = &per_cpu(hrtimer_bases, scpu);
- new_base = &__get_cpu_var(hrtimer_bases);
+ new_base = this_cpu_ptr(&hrtimer_bases);
/*
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 64c5990fd500..066f0ec05e48 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -554,7 +554,7 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
void tick_check_oneshot_broadcast_this_cpu(void)
{
if (cpumask_test_cpu(smp_processor_id(), tick_broadcast_oneshot_mask)) {
- struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
/*
* We might be in the middle of switching over from
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 052b4b53c3d6..7efeedf53ebd 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -224,7 +224,7 @@ static void tick_setup_device(struct tick_device *td,
void tick_install_replacement(struct clock_event_device *newdev)
{
- struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
int cpu = smp_processor_id();
clockevents_exchange_device(td->evtdev, newdev);
@@ -374,14 +374,14 @@ void tick_shutdown(unsigned int *cpup)
void tick_suspend(void)
{
- struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
clockevents_shutdown(td->evtdev);
}
void tick_resume(void)
{
- struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
int broadcast = tick_resume_broadcast();
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 824109060a33..7ce740e78e1b 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -59,7 +59,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
*/
int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
{
- struct tick_device *td = &__get_cpu_var(tick_cpu_device);
+ struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
struct clock_event_device *dev = td->evtdev;
if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index a73efdf6f696..7b5741fc4110 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -205,7 +205,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now);
*/
void __tick_nohz_full_check(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (tick_nohz_full_cpu(smp_processor_id())) {
if (ts->tick_stopped && !is_idle_task(current)) {
@@ -573,7 +573,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
ktime_t last_update, expires, ret = { .tv64 = 0 };
unsigned long rcu_delta_jiffies;
- struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
+ struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
u64 time_delta;
time_delta = timekeeping_max_deferment();
@@ -841,7 +841,7 @@ void tick_nohz_idle_enter(void)
local_irq_disable();
- ts = &__get_cpu_var(tick_cpu_sched);
+ ts = this_cpu_ptr(&tick_cpu_sched);
ts->inidle = 1;
__tick_nohz_idle_enter(ts);
@@ -859,7 +859,7 @@ EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
*/
void tick_nohz_irq_exit(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (ts->inidle)
__tick_nohz_idle_enter(ts);
@@ -874,7 +874,7 @@ void tick_nohz_irq_exit(void)
*/
ktime_t tick_nohz_get_sleep_length(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
return ts->sleep_length;
}
@@ -952,7 +952,7 @@ static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
*/
void tick_nohz_idle_exit(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t now;
local_irq_disable();
@@ -987,7 +987,7 @@ static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
*/
static void tick_nohz_handler(struct clock_event_device *dev)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
struct pt_regs *regs = get_irq_regs();
ktime_t now = ktime_get();
@@ -1011,7 +1011,7 @@ static void tick_nohz_handler(struct clock_event_device *dev)
*/
static void tick_nohz_switch_to_nohz(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t next;
if (!tick_nohz_enabled)
@@ -1073,7 +1073,7 @@ static void tick_nohz_kick_tick(struct tick_sched *ts, ktime_t now)
static inline void tick_nohz_irq_enter(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t now;
if (!ts->idle_active && !ts->tick_stopped)
@@ -1151,7 +1151,7 @@ early_param("skew_tick", skew_tick);
*/
void tick_setup_sched_timer(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
ktime_t now = ktime_get();
/*
@@ -1220,7 +1220,7 @@ void tick_clock_notify(void)
*/
void tick_oneshot_notify(void)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
set_bit(0, &ts->check_clocks);
}
@@ -1235,7 +1235,7 @@ void tick_oneshot_notify(void)
*/
int tick_check_oneshot_change(int allow_nohz)
{
- struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
+ struct tick_sched *ts = this_cpu_ptr(&tick_cpu_sched);
if (!test_and_clear_bit(0, &ts->check_clocks))
return 0;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 9bbb8344ed3b..3260ffdb368f 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -655,7 +655,7 @@ static inline void debug_assert_init(struct timer_list *timer)
static void do_init_timer(struct timer_list *timer, unsigned int flags,
const char *name, struct lock_class_key *key)
{
- struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
+ struct tvec_base *base = raw_cpu_read(tvec_bases);
timer->entry.next = NULL;
timer->base = (void *)((unsigned long)base | flags);
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
index 394f70b17162..9586b670a5b2 100644
--- a/kernel/user-return-notifier.c
+++ b/kernel/user-return-notifier.c
@@ -14,7 +14,7 @@ static DEFINE_PER_CPU(struct hlist_head, return_notifier_list);
void user_return_notifier_register(struct user_return_notifier *urn)
{
set_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
- hlist_add_head(&urn->link, &__get_cpu_var(return_notifier_list));
+ hlist_add_head(&urn->link, this_cpu_ptr(&return_notifier_list));
}
EXPORT_SYMBOL_GPL(user_return_notifier_register);
@@ -25,7 +25,7 @@ EXPORT_SYMBOL_GPL(user_return_notifier_register);
void user_return_notifier_unregister(struct user_return_notifier *urn)
{
hlist_del(&urn->link);
- if (hlist_empty(&__get_cpu_var(return_notifier_list)))
+ if (hlist_empty(this_cpu_ptr(&return_notifier_list)))
clear_tsk_thread_flag(current, TIF_USER_RETURN_NOTIFY);
}
EXPORT_SYMBOL_GPL(user_return_notifier_unregister);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 49e9537f3673..70bf11815f84 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -208,7 +208,7 @@ void touch_nmi_watchdog(void)
* case we shouldn't have to worry about the watchdog
* going off.
*/
- __raw_get_cpu_var(watchdog_nmi_touch) = true;
+ raw_cpu_write(watchdog_nmi_touch, true);
touch_softlockup_watchdog();
}
EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -217,8 +217,8 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
void touch_softlockup_watchdog_sync(void)
{
- __raw_get_cpu_var(softlockup_touch_sync) = true;
- __raw_get_cpu_var(watchdog_touch_ts) = 0;
+ __this_cpu_write(softlockup_touch_sync, true);
+ __this_cpu_write(watchdog_touch_ts, 0);
}
#ifdef CONFIG_HARDLOCKUP_DETECTOR
@@ -425,7 +425,7 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio)
static void watchdog_enable(unsigned int cpu)
{
- struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+ struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
/* kick off the timer for the hardlockup detector */
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -445,7 +445,7 @@ static void watchdog_enable(unsigned int cpu)
static void watchdog_disable(unsigned int cpu)
{
- struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+ struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
watchdog_set_prio(SCHED_NORMAL, 0);
hrtimer_cancel(hrtimer);
@@ -585,7 +585,7 @@ static struct smp_hotplug_thread watchdog_threads = {
static void restart_watchdog_hrtimer(void *info)
{
- struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer);
+ struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
int ret;
/*
diff --git a/net/core/dev.c b/net/core/dev.c
index 4699dcfdc4ab..6470716ddba4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2153,7 +2153,7 @@ static inline void __netif_reschedule(struct Qdisc *q)
unsigned long flags;
local_irq_save(flags);
- sd = &__get_cpu_var(softnet_data);
+ sd = this_cpu_ptr(&softnet_data);
q->next_sched = NULL;
*sd->output_queue_tailp = q;
sd->output_queue_tailp = &q->next_sched;
@@ -3233,7 +3233,7 @@ static void rps_trigger_softirq(void *data)
static int rps_ipi_queued(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
- struct softnet_data *mysd = &__get_cpu_var(softnet_data);
+ struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
if (sd != mysd) {
sd->rps_ipi_next = mysd->rps_ipi_list;
@@ -3260,7 +3260,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
if (qlen < (netdev_max_backlog >> 1))
return false;
- sd = &__get_cpu_var(softnet_data);
+ sd = this_cpu_ptr(&softnet_data);
rcu_read_lock();
fl = rcu_dereference(sd->flow_limit);
@@ -3407,7 +3407,7 @@ EXPORT_SYMBOL(netif_rx_ni);
static void net_tx_action(struct softirq_action *h)
{
- struct softnet_data *sd = &__get_cpu_var(softnet_data);
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
if (sd->completion_queue) {
struct sk_buff *clist;
@@ -3832,7 +3832,7 @@ EXPORT_SYMBOL(netif_receive_skb);
static void flush_backlog(void *arg)
{
struct net_device *dev = arg;
- struct softnet_data *sd = &__get_cpu_var(softnet_data);
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
struct sk_buff *skb, *tmp;
rps_lock(sd);
@@ -4379,7 +4379,7 @@ void __napi_schedule(struct napi_struct *n)
unsigned long flags;
local_irq_save(flags);
- ____napi_schedule(&__get_cpu_var(softnet_data), n);
+ ____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
}
EXPORT_SYMBOL(__napi_schedule);
@@ -4500,7 +4500,7 @@ EXPORT_SYMBOL(netif_napi_del);
static void net_rx_action(struct softirq_action *h)
{
- struct softnet_data *sd = &__get_cpu_var(softnet_data);
+ struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
void *have;
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 50f9a9db5792..252e155c837b 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -146,7 +146,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
unsigned long flags;
local_irq_save(flags);
- data = &__get_cpu_var(dm_cpu_data);
+ data = this_cpu_ptr(&dm_cpu_data);
spin_lock(&data->lock);
dskb = data->skb;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 829d013745ab..61059a05ec95 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -345,7 +345,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
unsigned long flags;
local_irq_save(flags);
- nc = &__get_cpu_var(netdev_alloc_cache);
+ nc = this_cpu_ptr(&netdev_alloc_cache);
if (unlikely(!nc->frag.page)) {
refill:
for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 793c0bb8c4fd..2d4ae469b471 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1311,7 +1311,7 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
if (rt_is_input_route(rt)) {
p = (struct rtable **)&nh->nh_rth_input;
} else {
- p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
+ p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
}
orig = *p;
@@ -1939,7 +1939,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
do_cache = false;
goto add;
}
- prth = __this_cpu_ptr(nh->nh_pcpu_rth_output);
+ prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
}
rth = rcu_dereference(*prth);
if (rt_cache_valid(rth)) {
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 0431a8f3c8f4..af660030e3c7 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -40,7 +40,7 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
net_get_random_once(syncookie_secret, sizeof(syncookie_secret));
- tmp = __get_cpu_var(ipv4_cookie_scratch);
+ tmp = this_cpu_ptr(ipv4_cookie_scratch);
memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
tmp[0] = (__force u32)saddr;
tmp[1] = (__force u32)daddr;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 86023b9be47f..1bec4e76d88c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2941,7 +2941,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
local_bh_disable();
p = ACCESS_ONCE(tcp_md5sig_pool);
if (p)
- return __this_cpu_ptr(p);
+ return raw_cpu_ptr(p);
local_bh_enable();
return NULL;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8d4eac793700..becd98ce9a1c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -852,7 +852,7 @@ void tcp_wfree(struct sk_buff *skb)
/* queue this socket to tasklet queue */
local_irq_save(flags);
- tsq = &__get_cpu_var(tsq_tasklet);
+ tsq = this_cpu_ptr(&tsq_tasklet);
list_add(&tp->tsq_node, &tsq->head);
tasklet_schedule(&tsq->tasklet);
local_irq_restore(flags);
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 9a2838e93cc5..e25b633266c3 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -67,7 +67,7 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd
net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret));
- tmp = __get_cpu_var(ipv6_cookie_scratch);
+ tmp = this_cpu_ptr(ipv6_cookie_scratch);
/*
* we have 320 bits of information to hash, copy in the remaining
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index e8fdb172adbb..273b8bff6ba4 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -267,7 +267,7 @@ static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
unsigned long *flag;
preempt_disable();
- flag = &__get_cpu_var(clean_list_grace);
+ flag = this_cpu_ptr(&clean_list_grace);
set_bit(CLEAN_LIST_BUSY_BIT, flag);
ret = llist_del_first(&pool->clean_list);
if (ret)
OpenPOWER on IntegriCloud