summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHeejin Ahn <aheejin@gmail.com>2018-08-07 00:22:22 +0000
committerHeejin Ahn <aheejin@gmail.com>2018-08-07 00:22:22 +0000
commite8653bb89a8e3159c920eb063cc28d1254474c66 (patch)
tree037ee5da83389c8b5ead12feb8d02fc73ac514fb
parentf66d0ce10b1aea10bfdd52e5de2e0729d3827a0c (diff)
downloadbcm5719-llvm-e8653bb89a8e3159c920eb063cc28d1254474c66.tar.gz
bcm5719-llvm-e8653bb89a8e3159c920eb063cc28d1254474c66.zip
[WebAssembly] Enable atomic expansion for unsupported atomicrmws
Summary: Wasm does not have direct counterparts to some of LLVM IR's atomicrmw instructions (min, max, umin, umax, and nand). This enables atomic expansion using cmpxchg instruction within a loop for those atomicrmw instructions. Reviewers: dschuff Subscribers: sbc100, jgravelle-google, sunfish, llvm-commits Differential Revision: https://reviews.llvm.org/D49440 llvm-svn: 339084
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp17
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h1
-rw-r--r--llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td9
-rw-r--r--llvm/test/CodeGen/WebAssembly/atomic-rmw.ll240
4 files changed, 263 insertions, 4 deletions
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index aa3c0d03913..b4bd161583d 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -157,6 +157,23 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
setMaxAtomicSizeInBitsSupported(64);
}
+TargetLowering::AtomicExpansionKind
+WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
+ // We have wasm instructions for these
+ switch (AI->getOperation()) {
+ case AtomicRMWInst::Add:
+ case AtomicRMWInst::Sub:
+ case AtomicRMWInst::And:
+ case AtomicRMWInst::Or:
+ case AtomicRMWInst::Xor:
+ case AtomicRMWInst::Xchg:
+ return AtomicExpansionKind::None;
+ default:
+ break;
+ }
+ return AtomicExpansionKind::CmpXChg;
+}
+
FastISel *WebAssemblyTargetLowering::createFastISel(
FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
return WebAssembly::createFastISel(FuncInfo, LibInfo);
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
index 8e22f90c1af..8b78b0e92a4 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
@@ -44,6 +44,7 @@ class WebAssemblyTargetLowering final : public TargetLowering {
/// right decision when generating code for different targets.
const WebAssemblySubtarget *Subtarget;
+ AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
const TargetLibraryInfo *LibInfo) const override;
bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
index ba6800e005d..2c66efe09d8 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrAtomics.td
@@ -660,10 +660,11 @@ defm : BinRMWTruncExtPattern<
// Atomic ternary read-modify-writes
//===----------------------------------------------------------------------===//
-// TODO LLVM IR's cmpxchg instruction returns a pair of {loaded value,
-// success flag}. When we use a success flag or both values, we can't make use
-// of truncate/extend versions of instructions for now, which is suboptimal. Add
-// selection rules for those cases too.
+// TODO LLVM IR's cmpxchg instruction returns a pair of {loaded value, success
+// flag}. When we use the success flag or both values, we can't make use of i64
+// truncate/extend versions of instructions for now, which is suboptimal.
+// Consider adding a pass after instruction selection that optimizes this case
+// if it is frequent.
let Defs = [ARGUMENTS] in {
diff --git a/llvm/test/CodeGen/WebAssembly/atomic-rmw.ll b/llvm/test/CodeGen/WebAssembly/atomic-rmw.ll
index 3f138a2c002..d1ee85d4a6e 100644
--- a/llvm/test/CodeGen/WebAssembly/atomic-rmw.ll
+++ b/llvm/test/CodeGen/WebAssembly/atomic-rmw.ll
@@ -85,6 +85,58 @@ define i1 @cmpxchg_i32_success(i32* %p, i32 %exp, i32 %new) {
ret i1 %succ
}
+; Unsupported instructions are expanded using cmpxchg with a loop.
+
+; CHECK-LABEL: nand_i32:
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i32 @nand_i32(i32* %p, i32 %v) {
+ %old = atomicrmw nand i32* %p, i32 %v seq_cst
+ ret i32 %old
+}
+
+; CHECK-LABEL: max_i32:
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i32 @max_i32(i32* %p, i32 %v) {
+ %old = atomicrmw max i32* %p, i32 %v seq_cst
+ ret i32 %old
+}
+
+; CHECK-LABEL: min_i32:
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i32 @min_i32(i32* %p, i32 %v) {
+ %old = atomicrmw min i32* %p, i32 %v seq_cst
+ ret i32 %old
+}
+
+; CHECK-LABEL: umax_i32:
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i32 @umax_i32(i32* %p, i32 %v) {
+ %old = atomicrmw umax i32* %p, i32 %v seq_cst
+ ret i32 %old
+}
+
+; CHECK-LABEL: umin_i32:
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i32 @umin_i32(i32* %p, i32 %v) {
+ %old = atomicrmw umin i32* %p, i32 %v seq_cst
+ ret i32 %old
+}
+
;===----------------------------------------------------------------------------
; Atomic read-modify-writes: 64-bit
;===----------------------------------------------------------------------------
@@ -164,6 +216,58 @@ define i1 @cmpxchg_i64_success(i64* %p, i64 %exp, i64 %new) {
ret i1 %succ
}
+; Unsupported instructions are expanded using cmpxchg with a loop.
+
+; CHECK-LABEL: nand_i64:
+; CHECK: loop
+; CHECK: i64.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i64 @nand_i64(i64* %p, i64 %v) {
+ %old = atomicrmw nand i64* %p, i64 %v seq_cst
+ ret i64 %old
+}
+
+; CHECK-LABEL: max_i64:
+; CHECK: loop
+; CHECK: i64.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i64 @max_i64(i64* %p, i64 %v) {
+ %old = atomicrmw max i64* %p, i64 %v seq_cst
+ ret i64 %old
+}
+
+; CHECK-LABEL: min_i64:
+; CHECK: loop
+; CHECK: i64.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i64 @min_i64(i64* %p, i64 %v) {
+ %old = atomicrmw min i64* %p, i64 %v seq_cst
+ ret i64 %old
+}
+
+; CHECK-LABEL: umax_i64:
+; CHECK: loop
+; CHECK: i64.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i64 @umax_i64(i64* %p, i64 %v) {
+ %old = atomicrmw umax i64* %p, i64 %v seq_cst
+ ret i64 %old
+}
+
+; CHECK-LABEL: umin_i64:
+; CHECK: loop
+; CHECK: i64.atomic.rmw.cmpxchg
+; CHECK: br_if 0
+; CHECK: end_loop
+define i64 @umin_i64(i64* %p, i64 %v) {
+ %old = atomicrmw umin i64* %p, i64 %v seq_cst
+ ret i64 %old
+}
+
;===----------------------------------------------------------------------------
; Atomic truncating & sign-extending RMWs
;===----------------------------------------------------------------------------
@@ -627,6 +731,76 @@ define i64 @cmpxchg_sext_i32_i64(i32* %p, i64 %exp, i64 %new) {
ret i64 %e
}
+; Unsupported instructions are expanded using cmpxchg with a loop.
+; Here we take a nand as an example.
+
+; nand
+
+; CHECK-LABEL: nand_sext_i8_i32:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw8_u.cmpxchg
+; CHECK: i32.extend8_s
+define i32 @nand_sext_i8_i32(i8* %p, i32 %v) {
+ %t = trunc i32 %v to i8
+ %old = atomicrmw nand i8* %p, i8 %t seq_cst
+ %e = sext i8 %old to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: nand_sext_i16_i32:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw16_u.cmpxchg
+; CHECK: i32.extend16_s
+define i32 @nand_sext_i16_i32(i16* %p, i32 %v) {
+ %t = trunc i32 %v to i16
+ %old = atomicrmw nand i16* %p, i16 %t seq_cst
+ %e = sext i16 %old to i32
+ ret i32 %e
+}
+
+; FIXME Currently this cannot make use of i64.atomic.rmw8_u.cmpxchg
+; CHECK-LABEL: nand_sext_i8_i64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw8_u.cmpxchg
+; CHECK: i64.extend_u/i32
+; CHECK: i64.extend8_s
+define i64 @nand_sext_i8_i64(i8* %p, i64 %v) {
+ %t = trunc i64 %v to i8
+ %old = atomicrmw nand i8* %p, i8 %t seq_cst
+ %e = sext i8 %old to i64
+ ret i64 %e
+}
+
+; FIXME Currently this cannot make use of i64.atomic.rmw16_u.cmpxchg
+; CHECK-LABEL: nand_sext_i16_i64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw16_u.cmpxchg
+; CHECK: i64.extend_u/i32
+; CHECK: i64.extend16_s
+define i64 @nand_sext_i16_i64(i16* %p, i64 %v) {
+ %t = trunc i64 %v to i16
+ %old = atomicrmw nand i16* %p, i16 %t seq_cst
+ %e = sext i16 %old to i64
+ ret i64 %e
+}
+
+; 32->64 sext rmw gets selected as i32.atomic.rmw.nand, i64_extend_s/i32
+; CHECK-LABEL: nand_sext_i32_i64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: i64.extend_s/i32
+define i64 @nand_sext_i32_i64(i32* %p, i64 %v) {
+ %t = trunc i64 %v to i32
+ %old = atomicrmw nand i32* %p, i32 %t seq_cst
+ %e = sext i32 %old to i64
+ ret i64 %e
+}
+
;===----------------------------------------------------------------------------
; Atomic truncating & zero-extending RMWs
;===----------------------------------------------------------------------------
@@ -1039,3 +1213,69 @@ define i64 @cmpxchg_zext_i32_i64(i32* %p, i64 %exp, i64 %new) {
%e = zext i32 %old to i64
ret i64 %e
}
+
+; Unsupported instructions are expanded using cmpxchg with a loop.
+; Here we take a nand as an example.
+
+; nand
+
+; CHECK-LABEL: nand_zext_i8_i32:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw8_u.cmpxchg
+define i32 @nand_zext_i8_i32(i8* %p, i32 %v) {
+ %t = trunc i32 %v to i8
+ %old = atomicrmw nand i8* %p, i8 %t seq_cst
+ %e = zext i8 %old to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: nand_zext_i16_i32:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw16_u.cmpxchg
+define i32 @nand_zext_i16_i32(i16* %p, i32 %v) {
+ %t = trunc i32 %v to i16
+ %old = atomicrmw nand i16* %p, i16 %t seq_cst
+ %e = zext i16 %old to i32
+ ret i32 %e
+}
+
+; FIXME Currently this cannot make use of i64.atomic.rmw8_u.cmpxchg
+; CHECK-LABEL: nand_zext_i8_i64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw8_u.cmpxchg
+; CHECK: i64.extend_u/i32
+define i64 @nand_zext_i8_i64(i8* %p, i64 %v) {
+ %t = trunc i64 %v to i8
+ %old = atomicrmw nand i8* %p, i8 %t seq_cst
+ %e = zext i8 %old to i64
+ ret i64 %e
+}
+
+; FIXME Currently this cannot make use of i64.atomic.rmw16_u.cmpxchg
+; CHECK-LABEL: nand_zext_i16_i64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw16_u.cmpxchg
+; CHECK: i64.extend_u/i32
+define i64 @nand_zext_i16_i64(i16* %p, i64 %v) {
+ %t = trunc i64 %v to i16
+ %old = atomicrmw nand i16* %p, i16 %t seq_cst
+ %e = zext i16 %old to i64
+ ret i64 %e
+}
+
+; FIXME Currently this cannot make use of i64.atomic.rmw32_u.cmpxchg
+; CHECK-LABEL: nand_zext_i32_i64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK: loop
+; CHECK: i32.atomic.rmw.cmpxchg
+; CHECK: i64.extend_u/i32
+define i64 @nand_zext_i32_i64(i32* %p, i64 %v) {
+ %t = trunc i64 %v to i32
+ %old = atomicrmw nand i32* %p, i32 %t seq_cst
+ %e = zext i32 %old to i64
+ ret i64 %e
+}
OpenPOWER on IntegriCloud