summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll
diff options
context:
space:
mode:
authorDerek Schuff <dschuff@google.com>2017-10-05 21:18:42 +0000
committerDerek Schuff <dschuff@google.com>2017-10-05 21:18:42 +0000
commit885dc592973ceaef288958e838a9aa923646a3d1 (patch)
treef29eddde35422f3cd45a8dabbc6cd6105fd59b64 /llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll
parent7ac2db6a489f3f8e588589a69164882df7973d34 (diff)
downloadbcm5719-llvm-885dc592973ceaef288958e838a9aa923646a3d1.tar.gz
bcm5719-llvm-885dc592973ceaef288958e838a9aa923646a3d1.zip
[WebAssembly] Add the rest of the atomic loads
Add extending loads and constant offset patterns A bit more refactoring of the tablegen to make the patterns fairly nice and uniform between the regular and atomic loads. Differential Revision: https://reviews.llvm.org/D38523 llvm-svn: 315022
Diffstat (limited to 'llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll')
-rw-r--r--llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll102
1 files changed, 102 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll b/llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll
new file mode 100644
index 00000000000..0c4552dc9af
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/load-ext-atomic.ll
@@ -0,0 +1,102 @@
+; RUN: llc < %s -mattr=+atomics -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s
+
+; Test that extending loads are assembled properly.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown-wasm"
+
+; CHECK-LABEL: sext_i8_i32:
+; CHECK: i32.atomic.load8_u $push0=, 0($0){{$}}
+; CHECK-NEXT: i32.extend8_s $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i32 @sext_i8_i32(i8 *%p) {
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ %e = sext i8 %v to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: zext_i8_i32:
+; CHECK: i32.atomic.load8_u $push0=, 0($0){{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i32 @zext_i8_i32(i8 *%p) {
+e1:
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ %e = zext i8 %v to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: sext_i16_i32:
+; CHECK: i32.atomic.load16_u $push0=, 0($0){{$}}
+; CHECK-NEXT: i32.extend16_s $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i32 @sext_i16_i32(i16 *%p) {
+ %v = load atomic i16, i16* %p seq_cst, align 2
+ %e = sext i16 %v to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: zext_i16_i32:
+; CHECK: i32.atomic.load16_u $push0=, 0($0){{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i32 @zext_i16_i32(i16 *%p) {
+ %v = load atomic i16, i16* %p seq_cst, align 2
+ %e = zext i16 %v to i32
+ ret i32 %e
+}
+
+; CHECK-LABEL: sext_i8_i64:
+; CHECK: i64.atomic.load8_u $push0=, 0($0){{$}}
+; CHECK: i64.extend8_s $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i64 @sext_i8_i64(i8 *%p) {
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ %e = sext i8 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: zext_i8_i64:
+; CHECK: i64.atomic.load8_u $push0=, 0($0){{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i64 @zext_i8_i64(i8 *%p) {
+ %v = load atomic i8, i8* %p seq_cst, align 1
+ %e = zext i8 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: sext_i16_i64:
+; CHECK: i64.atomic.load16_u $push0=, 0($0){{$}}
+; CHECK: i64.extend16_s $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i64 @sext_i16_i64(i16 *%p) {
+ %v = load atomic i16, i16* %p seq_cst, align 2
+ %e = sext i16 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: zext_i16_i64:
+; CHECK: i64.atomic.load16_u $push0=, 0($0){{$}}
+; CHECK-NEXT: return $pop0{{$}}
+define i64 @zext_i16_i64(i16 *%p) {
+ %v = load atomic i16, i16* %p seq_cst, align 2
+ %e = zext i16 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: sext_i32_i64:
+; CHECK: i32.atomic.load $push0=, 0($0){{$}}
+; CHECK: i64.extend_s/i32 $push1=, $pop0{{$}}
+; CHECK-NEXT: return $pop1{{$}}
+define i64 @sext_i32_i64(i32 *%p) {
+ %v = load atomic i32, i32* %p seq_cst, align 4
+ %e = sext i32 %v to i64
+ ret i64 %e
+}
+
+; CHECK-LABEL: zext_i32_i64:
+; CHECK: i64.atomic.load32_u $push0=, 0($0){{$}}
+; CHECK: return $pop0{{$}}
+define i64 @zext_i32_i64(i32 *%p) {
+ %v = load atomic i32, i32* %p seq_cst, align 4
+ %e = zext i32 %v to i64
+ ret i64 %e
+}
OpenPOWER on IntegriCloud