summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/WebAssembly
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/WebAssembly')
-rw-r--r--llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll210
-rw-r--r--llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll323
-rw-r--r--llvm/test/CodeGen/WebAssembly/offset.ll25
-rw-r--r--llvm/test/CodeGen/WebAssembly/userstack.ll8
4 files changed, 556 insertions, 10 deletions
diff --git a/llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll b/llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
new file mode 100644
index 00000000000..89279fba1e5
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll
@@ -0,0 +1,210 @@
+; RUN: llc < %s -asm-verbose=false | FileCheck %s
+
+; Test loads and stores with custom alignment values.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+; CHECK-LABEL: ldi32_a1:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @ldi32_a1(i32 *%p) {
+ %v = load i32, i32* %p, align 1
+ ret i32 %v
+}
+
+; CHECK-LABEL: ldi32_a2:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($0):p2align=1{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @ldi32_a2(i32 *%p) {
+ %v = load i32, i32* %p, align 2
+ ret i32 %v
+}
+
+; 4 is the default alignment for i32 so no attribute is needed.
+
+; CHECK-LABEL: ldi32_a4:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @ldi32_a4(i32 *%p) {
+ %v = load i32, i32* %p, align 4
+ ret i32 %v
+}
+
+; The default alignment in LLVM is the same as the defualt alignment in wasm.
+
+; CHECK-LABEL: ldi32:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @ldi32(i32 *%p) {
+ %v = load i32, i32* %p
+ ret i32 %v
+}
+
+; CHECK-LABEL: ldi32_a8:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.load $push[[NUM:[0-9]+]]=, 0($0):p2align=3{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i32 @ldi32_a8(i32 *%p) {
+ %v = load i32, i32* %p, align 8
+ ret i32 %v
+}
+
+; Extending loads.
+
+; CHECK-LABEL: ldi8_a1:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i8 @ldi8_a1(i8 *%p) {
+ %v = load i8, i8* %p, align 1
+ ret i8 %v
+}
+
+; CHECK-LABEL: ldi8_a2:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.load8_u $push[[NUM:[0-9]+]]=, 0($0):p2align=1{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i8 @ldi8_a2(i8 *%p) {
+ %v = load i8, i8* %p, align 2
+ ret i8 %v
+}
+
+; CHECK-LABEL: ldi16_a1:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.load16_u $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i16 @ldi16_a1(i16 *%p) {
+ %v = load i16, i16* %p, align 1
+ ret i16 %v
+}
+
+; CHECK-LABEL: ldi16_a2:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.load16_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i16 @ldi16_a2(i16 *%p) {
+ %v = load i16, i16* %p, align 2
+ ret i16 %v
+}
+
+; CHECK-LABEL: ldi16_a4:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i32{{$}}
+; CHECK-NEXT: i32.load16_u $push[[NUM:[0-9]+]]=, 0($0):p2align=2{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i16 @ldi16_a4(i16 *%p) {
+ %v = load i16, i16* %p, align 4
+ ret i16 %v
+}
+
+; Stores.
+
+; CHECK-LABEL: sti32_a1:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.store $discard=, 0($0):p2align=0, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti32_a1(i32 *%p, i32 %v) {
+ store i32 %v, i32* %p, align 1
+ ret void
+}
+
+; CHECK-LABEL: sti32_a2:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.store $discard=, 0($0):p2align=1, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti32_a2(i32 *%p, i32 %v) {
+ store i32 %v, i32* %p, align 2
+ ret void
+}
+
+; 4 is the default alignment for i32 so no attribute is needed.
+
+; CHECK-LABEL: sti32_a4:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.store $discard=, 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti32_a4(i32 *%p, i32 %v) {
+ store i32 %v, i32* %p, align 4
+ ret void
+}
+
+; The default alignment in LLVM is the same as the defualt alignment in wasm.
+
+; CHECK-LABEL: sti32:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.store $discard=, 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti32(i32 *%p, i32 %v) {
+ store i32 %v, i32* %p
+ ret void
+}
+
+; CHECK-LABEL: sti32_a8:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.store $discard=, 0($0):p2align=3, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti32_a8(i32 *%p, i32 %v) {
+ store i32 %v, i32* %p, align 8
+ ret void
+}
+
+; Truncating stores.
+
+; CHECK-LABEL: sti8_a1:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.store8 $discard=, 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti8_a1(i8 *%p, i8 %v) {
+ store i8 %v, i8* %p, align 1
+ ret void
+}
+
+; CHECK-LABEL: sti8_a2:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.store8 $discard=, 0($0):p2align=1, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti8_a2(i8 *%p, i8 %v) {
+ store i8 %v, i8* %p, align 2
+ ret void
+}
+
+; CHECK-LABEL: sti16_a1:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.store16 $discard=, 0($0):p2align=0, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti16_a1(i16 *%p, i16 %v) {
+ store i16 %v, i16* %p, align 1
+ ret void
+}
+
+; CHECK-LABEL: sti16_a2:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.store16 $discard=, 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti16_a2(i16 *%p, i16 %v) {
+ store i16 %v, i16* %p, align 2
+ ret void
+}
+
+; CHECK-LABEL: sti16_a4:
+; CHECK-NEXT: .param i32, i32{{$}}
+; CHECK-NEXT: i32.store16 $discard=, 0($0):p2align=2, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti16_a4(i16 *%p, i16 %v) {
+ store i16 %v, i16* %p, align 4
+ ret void
+}
diff --git a/llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll b/llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
new file mode 100644
index 00000000000..3290b9c7376
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll
@@ -0,0 +1,323 @@
+; RUN: llc < %s -asm-verbose=false | FileCheck %s
+
+; Test loads and stores with custom alignment values.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+; CHECK-LABEL: ldi64_a1:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi64_a1(i64 *%p) {
+ %v = load i64, i64* %p, align 1
+ ret i64 %v
+}
+
+; CHECK-LABEL: ldi64_a2:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0):p2align=1{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi64_a2(i64 *%p) {
+ %v = load i64, i64* %p, align 2
+ ret i64 %v
+}
+
+; CHECK-LABEL: ldi64_a4:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0):p2align=2{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi64_a4(i64 *%p) {
+ %v = load i64, i64* %p, align 4
+ ret i64 %v
+}
+
+; 8 is the default alignment for i32 so no attribute is needed.
+
+; CHECK-LABEL: ldi64_a8:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi64_a8(i64 *%p) {
+ %v = load i64, i64* %p, align 8
+ ret i64 %v
+}
+
+; The default alignment in LLVM is the same as the defualt alignment in wasm.
+
+; CHECK-LABEL: ldi64:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi64(i64 *%p) {
+ %v = load i64, i64* %p
+ ret i64 %v
+}
+
+; CHECK-LABEL: ldi64_a16:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0):p2align=4{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi64_a16(i64 *%p) {
+ %v = load i64, i64* %p, align 16
+ ret i64 %v
+}
+
+; Extending loads.
+
+; CHECK-LABEL: ldi8_a1:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi8_a1(i8 *%p) {
+ %v = load i8, i8* %p, align 1
+ %w = zext i8 %v to i64
+ ret i64 %w
+}
+
+; CHECK-LABEL: ldi8_a2:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load8_u $push[[NUM:[0-9]+]]=, 0($0):p2align=1{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi8_a2(i8 *%p) {
+ %v = load i8, i8* %p, align 2
+ %w = zext i8 %v to i64
+ ret i64 %w
+}
+
+; CHECK-LABEL: ldi16_a1:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load16_u $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi16_a1(i16 *%p) {
+ %v = load i16, i16* %p, align 1
+ %w = zext i16 %v to i64
+ ret i64 %w
+}
+
+; CHECK-LABEL: ldi16_a2:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load16_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi16_a2(i16 *%p) {
+ %v = load i16, i16* %p, align 2
+ %w = zext i16 %v to i64
+ ret i64 %w
+}
+
+; CHECK-LABEL: ldi16_a4:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load16_u $push[[NUM:[0-9]+]]=, 0($0):p2align=2{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi16_a4(i16 *%p) {
+ %v = load i16, i16* %p, align 4
+ %w = zext i16 %v to i64
+ ret i64 %w
+}
+
+; CHECK-LABEL: ldi32_a1:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load32_u $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi32_a1(i32 *%p) {
+ %v = load i32, i32* %p, align 1
+ %w = zext i32 %v to i64
+ ret i64 %w
+}
+
+; CHECK-LABEL: ldi32_a2:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load32_u $push[[NUM:[0-9]+]]=, 0($0):p2align=1{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi32_a2(i32 *%p) {
+ %v = load i32, i32* %p, align 2
+ %w = zext i32 %v to i64
+ ret i64 %w
+}
+
+; CHECK-LABEL: ldi32_a4:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load32_u $push[[NUM:[0-9]+]]=, 0($0){{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi32_a4(i32 *%p) {
+ %v = load i32, i32* %p, align 4
+ %w = zext i32 %v to i64
+ ret i64 %w
+}
+
+; CHECK-LABEL: ldi32_a8:
+; CHECK-NEXT: .param i32{{$}}
+; CHECK-NEXT: .result i64{{$}}
+; CHECK-NEXT: i64.load32_u $push[[NUM:[0-9]+]]=, 0($0):p2align=3{{$}}
+; CHECK-NEXT: return $pop[[NUM]]{{$}}
+define i64 @ldi32_a8(i32 *%p) {
+ %v = load i32, i32* %p, align 8
+ %w = zext i32 %v to i64
+ ret i64 %w
+}
+
+; Stores.
+
+; CHECK-LABEL: sti64_a1:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store $discard=, 0($0):p2align=0, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti64_a1(i64 *%p, i64 %v) {
+ store i64 %v, i64* %p, align 1
+ ret void
+}
+
+; CHECK-LABEL: sti64_a2:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store $discard=, 0($0):p2align=1, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti64_a2(i64 *%p, i64 %v) {
+ store i64 %v, i64* %p, align 2
+ ret void
+}
+
+; CHECK-LABEL: sti64_a4:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store $discard=, 0($0):p2align=2, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti64_a4(i64 *%p, i64 %v) {
+ store i64 %v, i64* %p, align 4
+ ret void
+}
+
+; 8 is the default alignment for i32 so no attribute is needed.
+
+; CHECK-LABEL: sti64_a8:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store $discard=, 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti64_a8(i64 *%p, i64 %v) {
+ store i64 %v, i64* %p, align 8
+ ret void
+}
+
+; The default alignment in LLVM is the same as the defualt alignment in wasm.
+
+; CHECK-LABEL: sti64:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store $discard=, 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti64(i64 *%p, i64 %v) {
+ store i64 %v, i64* %p
+ ret void
+}
+
+; CHECK-LABEL: sti64_a16:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store $discard=, 0($0):p2align=4, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti64_a16(i64 *%p, i64 %v) {
+ store i64 %v, i64* %p, align 16
+ ret void
+}
+
+; Truncating stores.
+
+; CHECK-LABEL: sti8_a1:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store8 $discard=, 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti8_a1(i8 *%p, i64 %w) {
+ %v = trunc i64 %w to i8
+ store i8 %v, i8* %p, align 1
+ ret void
+}
+
+; CHECK-LABEL: sti8_a2:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store8 $discard=, 0($0):p2align=1, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti8_a2(i8 *%p, i64 %w) {
+ %v = trunc i64 %w to i8
+ store i8 %v, i8* %p, align 2
+ ret void
+}
+
+; CHECK-LABEL: sti16_a1:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store16 $discard=, 0($0):p2align=0, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti16_a1(i16 *%p, i64 %w) {
+ %v = trunc i64 %w to i16
+ store i16 %v, i16* %p, align 1
+ ret void
+}
+
+; CHECK-LABEL: sti16_a2:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store16 $discard=, 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti16_a2(i16 *%p, i64 %w) {
+ %v = trunc i64 %w to i16
+ store i16 %v, i16* %p, align 2
+ ret void
+}
+
+; CHECK-LABEL: sti16_a4:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store16 $discard=, 0($0):p2align=2, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti16_a4(i16 *%p, i64 %w) {
+ %v = trunc i64 %w to i16
+ store i16 %v, i16* %p, align 4
+ ret void
+}
+
+; CHECK-LABEL: sti32_a1:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store32 $discard=, 0($0):p2align=0, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti32_a1(i32 *%p, i64 %w) {
+ %v = trunc i64 %w to i32
+ store i32 %v, i32* %p, align 1
+ ret void
+}
+
+; CHECK-LABEL: sti32_a2:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store32 $discard=, 0($0):p2align=1, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti32_a2(i32 *%p, i64 %w) {
+ %v = trunc i64 %w to i32
+ store i32 %v, i32* %p, align 2
+ ret void
+}
+
+; CHECK-LABEL: sti32_a4:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store32 $discard=, 0($0), $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti32_a4(i32 *%p, i64 %w) {
+ %v = trunc i64 %w to i32
+ store i32 %v, i32* %p, align 4
+ ret void
+}
+
+; CHECK-LABEL: sti32_a8:
+; CHECK-NEXT: .param i32, i64{{$}}
+; CHECK-NEXT: i64.store32 $discard=, 0($0):p2align=3, $1{{$}}
+; CHECK-NEXT: return{{$}}
+define void @sti32_a8(i32 *%p, i64 %w) {
+ %v = trunc i64 %w to i32
+ store i32 %v, i32* %p, align 8
+ ret void
+}
diff --git a/llvm/test/CodeGen/WebAssembly/offset.ll b/llvm/test/CodeGen/WebAssembly/offset.ll
index ba0f72c75ed..9cf04c00822 100644
--- a/llvm/test/CodeGen/WebAssembly/offset.ll
+++ b/llvm/test/CodeGen/WebAssembly/offset.ll
@@ -372,14 +372,27 @@ define void @aggregate_load_store({i32,i32,i32,i32}* %p, {i32,i32,i32,i32}* %q)
ret void
}
-; Fold the offsets when lowering aggregate return values.
+; Fold the offsets when lowering aggregate return values. The stores get
+; merged into i64 stores.
; CHECK-LABEL: aggregate_return:
-; CHECK: i32.const $push0=, 0{{$}}
-; CHECK: i32.store $push1=, 12($0), $pop0{{$}}
-; CHECK: i32.store $push2=, 8($0), $pop1{{$}}
-; CHECK: i32.store $push3=, 4($0), $pop2{{$}}
-; CHECK: i32.store $discard=, 0($0), $pop3{{$}}
+; CHECK: i64.const $push0=, 0{{$}}
+; CHECK: i64.store $push1=, 8($0):p2align=2, $pop0{{$}}
+; CHECK: i64.store $discard=, 0($0):p2align=2, $pop1{{$}}
define {i32,i32,i32,i32} @aggregate_return() {
ret {i32,i32,i32,i32} zeroinitializer
}
+
+; Fold the offsets when lowering aggregate return values. The stores are not
+; merged.
+
+; CHECK-LABEL: aggregate_return_without_merge:
+; CHECK: i32.const $push0=, 0{{$}}
+; CHECK: i32.store8 $push1=, 14($0), $pop0{{$}}
+; CHECK: i32.store16 $push2=, 12($0), $pop1{{$}}
+; CHECK: i32.store $discard=, 8($0), $pop2{{$}}
+; CHECK: i64.const $push3=, 0{{$}}
+; CHECK: i64.store $discard=, 0($0), $pop3{{$}}
+define {i64,i32,i16,i8} @aggregate_return_without_merge() {
+ ret {i64,i32,i16,i8} zeroinitializer
+}
diff --git a/llvm/test/CodeGen/WebAssembly/userstack.ll b/llvm/test/CodeGen/WebAssembly/userstack.ll
index 60158ee9fec..222f996d1b3 100644
--- a/llvm/test/CodeGen/WebAssembly/userstack.ll
+++ b/llvm/test/CodeGen/WebAssembly/userstack.ll
@@ -57,7 +57,7 @@ define void @allocarray() {
; CHECK-NEXT: i32.store [[SP]]=, 0([[L2]]), [[SP]]
%r = alloca [5 x i32]
- ; CHECK-NEXT: i32.const $push[[L4:.+]]=, 4
+ ; CHECK-NEXT: i32.const $push[[L4:.+]]=, 12
; CHECK-NEXT: i32.const [[L5:.+]]=, 12
; CHECK-NEXT: i32.add [[L5]]=, [[SP]], [[L5]]
; CHECK-NEXT: i32.add $push[[L6:.+]]=, [[L5]], $pop[[L4]]
@@ -66,7 +66,7 @@ define void @allocarray() {
; CHECK-NEXT: i32.store $discard=, 0($pop3), $pop[[L10]]{{$}}
%p = getelementptr [5 x i32], [5 x i32]* %r, i32 0, i32 0
store i32 1, i32* %p
- %p2 = getelementptr [5 x i32], [5 x i32]* %r, i32 0, i32 1
+ %p2 = getelementptr [5 x i32], [5 x i32]* %r, i32 0, i32 3
store i32 1, i32* %p2
; CHECK-NEXT: i32.const [[L7:.+]]=, 32
@@ -89,8 +89,8 @@ define void @allocarray_inbounds() {
%p = getelementptr inbounds [5 x i32], [5 x i32]* %r, i32 0, i32 0
store i32 1, i32* %p
; This store should have both the GEP and the FI folded into it.
- ; CHECK-NEXT: i32.store {{.*}}=, 16([[SP]]), $pop
- %p2 = getelementptr inbounds [5 x i32], [5 x i32]* %r, i32 0, i32 1
+ ; CHECK-NEXT: i32.store {{.*}}=, 24([[SP]]), $pop
+ %p2 = getelementptr inbounds [5 x i32], [5 x i32]* %r, i32 0, i32 3
store i32 1, i32* %p2
; CHECK: i32.const [[L7:.+]]=, 32
; CHECK-NEXT: i32.add [[SP]]=, [[SP]], [[L7]]
OpenPOWER on IntegriCloud