summaryrefslogtreecommitdiffstats
path: root/tools/arch
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2019-04-09 11:44:46 +0200
committerAlexei Starovoitov <ast@kernel.org>2019-04-11 14:45:50 -0700
commit6b7a21140fca461c6d8d5c65a3746e7da50a409e (patch)
tree460753960398fedf6a942e734c59e5b07d0c00f7 /tools/arch
parent94c59aab42ced3af2140acf90b795b4d4f867012 (diff)
downloadtalos-op-linux-6b7a21140fca461c6d8d5c65a3746e7da50a409e.tar.gz
talos-op-linux-6b7a21140fca461c6d8d5c65a3746e7da50a409e.zip
tools: add smp_* barrier variants to include infrastructure
Add the definition for smp_rmb(), smp_wmb(), and smp_mb() to the tools include infrastructure: this patch adds the implementation for x86-64 and arm64, and have it fall back as currently is for other archs which do not have it implemented at this point. The x86-64 one uses lock + add combination for smp_mb() with address below red zone. This is on top of 09d62154f613 ("tools, perf: add and use optimized ring_buffer_{read_head, write_tail} helpers"), which didn't touch smp_* barrier implementations. Magnus recently rightfully reported however that the latter on x86-64 still wrongly falls back to sfence, lfence and mfence respectively, thus fix that for applications under tools making use of these to avoid such ugly surprises. The main header under tools (include/asm/barrier.h) will in that case not select the fallback implementation. Reported-by: Magnus Karlsson <magnus.karlsson@intel.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/arch')
-rw-r--r--tools/arch/arm64/include/asm/barrier.h10
-rw-r--r--tools/arch/x86/include/asm/barrier.h7
2 files changed, 15 insertions, 2 deletions
diff --git a/tools/arch/arm64/include/asm/barrier.h b/tools/arch/arm64/include/asm/barrier.h
index 378c051fa177..3b9b41331c4f 100644
--- a/tools/arch/arm64/include/asm/barrier.h
+++ b/tools/arch/arm64/include/asm/barrier.h
@@ -14,6 +14,16 @@
#define wmb() asm volatile("dmb ishst" ::: "memory")
#define rmb() asm volatile("dmb ishld" ::: "memory")
+/*
+ * Kernel uses dmb variants on arm64 for smp_*() barriers. Pretty much the same
+ * implementation as above mb()/wmb()/rmb(), though for the latter kernel uses
+ * dsb. In any case, should above mb()/wmb()/rmb() change, make sure the below
+ * smp_*() don't.
+ */
+#define smp_mb() asm volatile("dmb ish" ::: "memory")
+#define smp_wmb() asm volatile("dmb ishst" ::: "memory")
+#define smp_rmb() asm volatile("dmb ishld" ::: "memory")
+
#define smp_store_release(p, v) \
do { \
union { typeof(*p) __val; char __c[1]; } __u = \
diff --git a/tools/arch/x86/include/asm/barrier.h b/tools/arch/x86/include/asm/barrier.h
index 58919868473c..0adf295dd5b6 100644
--- a/tools/arch/x86/include/asm/barrier.h
+++ b/tools/arch/x86/include/asm/barrier.h
@@ -21,9 +21,12 @@
#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
#elif defined(__x86_64__)
-#define mb() asm volatile("mfence":::"memory")
-#define rmb() asm volatile("lfence":::"memory")
+#define mb() asm volatile("mfence" ::: "memory")
+#define rmb() asm volatile("lfence" ::: "memory")
#define wmb() asm volatile("sfence" ::: "memory")
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#define smp_mb() asm volatile("lock; addl $0,-132(%%rsp)" ::: "memory", "cc")
#endif
#if defined(__x86_64__)
OpenPOWER on IntegriCloud