diff options
author | Tejun Heo <tj@kernel.org> | 2010-12-17 15:16:46 +0100 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-12-17 15:16:46 +0100 |
commit | 275c8b93288ef0c2281e414e069ea8ed4bad03f7 (patch) | |
tree | df906ef901d7719d66944921c7436f5db718dc2f /arch/x86/include | |
parent | 909ea96468096b07fbb41aaf69be060d92bd9271 (diff) | |
parent | 8f1d97c79eb65de1d05799d6b81d79cd94169114 (diff) | |
download | talos-op-linux-275c8b93288ef0c2281e414e069ea8ed4bad03f7.tar.gz talos-op-linux-275c8b93288ef0c2281e414e069ea8ed4bad03f7.zip |
Merge branch 'this_cpu_ops' into for-2.6.38
Diffstat (limited to 'arch/x86/include')
-rw-r--r-- | arch/x86/include/asm/percpu.h | 43 | ||||
-rw-r--r-- | arch/x86/include/asm/pvclock.h | 1 |
2 files changed, 44 insertions, 0 deletions
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index f899e01a8ac9..38f9e965ff96 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -177,6 +177,39 @@ do { \ } \ } while (0) +/* + * Add return operation + */ +#define percpu_add_return_op(var, val) \ +({ \ + typeof(var) paro_ret__ = val; \ + switch (sizeof(var)) { \ + case 1: \ + asm("xaddb %0, "__percpu_arg(1) \ + : "+q" (paro_ret__), "+m" (var) \ + : : "memory"); \ + break; \ + case 2: \ + asm("xaddw %0, "__percpu_arg(1) \ + : "+r" (paro_ret__), "+m" (var) \ + : : "memory"); \ + break; \ + case 4: \ + asm("xaddl %0, "__percpu_arg(1) \ + : "+r" (paro_ret__), "+m" (var) \ + : : "memory"); \ + break; \ + case 8: \ + asm("xaddq %0, "__percpu_arg(1) \ + : "+re" (paro_ret__), "+m" (var) \ + : : "memory"); \ + break; \ + default: __bad_percpu_size(); \ + } \ + paro_ret__ += val; \ + paro_ret__; \ +}) + #define percpu_from_op(op, var, constraint) \ ({ \ typeof(var) pfo_ret__; \ @@ -300,6 +333,14 @@ do { \ #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val) #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val) +#ifndef CONFIG_M386 +#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) +#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) +#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) +#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val) +#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val) +#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val) +#endif /* * Per cpu atomic 64 bit operations are only available under 64 bit. * 32 bit must fall back to generic operations. @@ -324,6 +365,8 @@ do { \ #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val) #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val) +#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) +#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val) #endif /* This is not atomic against other CPUs -- CPU preemption needs to be off */ diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 7f7e577a0e39..31d84acc1512 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -11,6 +11,7 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); void pvclock_read_wallclock(struct pvclock_wall_clock *wall, struct pvclock_vcpu_time_info *vcpu, struct timespec *ts); +void pvclock_resume(void); /* * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, |