diff options
Diffstat (limited to 'llvm/test/CodeGen')
4 files changed, 408 insertions, 1 deletions
diff --git a/llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll b/llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll index 63b37b87560..a4ce351dc26 100644 --- a/llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll +++ b/llvm/test/CodeGen/WebAssembly/i32-load-store-alignment.ll @@ -5,6 +5,8 @@ target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" target triple = "wasm32-unknown-unknown" +; Loads. + ; CHECK-LABEL: ldi32_a1: ; CHECK-NEXT: .param i32{{$}} ; CHECK-NEXT: .result i32{{$}} @@ -236,3 +238,21 @@ define i32 @ldi32_atomic_a8(i32 *%p) { %v = load atomic i32, i32* %p seq_cst, align 8 ret i32 %v } + +; CHECK-LABEL: sti32_atomic_a4: +; CHECK-NEXT: .param i32, i32{{$}} +; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}} +; CHECK-NEXT: return{{$}} +define void @sti32_atomic_a4(i32 *%p, i32 %v) { + store atomic i32 %v, i32* %p seq_cst, align 4 + ret void +} + +; CHECK-LABEL: sti32_atomic_a8: +; CHECK-NEXT: .param i32, i32{{$}} +; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}} +; CHECK-NEXT: return{{$}} +define void @sti32_atomic_a8(i32 *%p, i32 %v) { + store atomic i32 %v, i32* %p seq_cst, align 8 + ret void +} diff --git a/llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll b/llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll index a2e651915ab..f3b8e15b602 100644 --- a/llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll +++ b/llvm/test/CodeGen/WebAssembly/i64-load-store-alignment.ll @@ -5,6 +5,8 @@ target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" target triple = "wasm32-unknown-unknown" +; Loads. + ; CHECK-LABEL: ldi64_a1: ; CHECK-NEXT: .param i32{{$}} ; CHECK-NEXT: .result i64{{$}} @@ -325,6 +327,9 @@ define void @sti32_a8(i32 *%p, i64 %w) { } ; Atomics. +; Wasm atomics have the alignment field, but it must always have the type's +; natural alignment. + ; CHECK-LABEL: ldi64_atomic_a8: ; CHECK-NEXT: .param i32{{$}} ; CHECK-NEXT: .result i64{{$}} @@ -336,7 +341,6 @@ define i64 @ldi64_atomic_a8(i64 *%p) { } ; 16 is greater than the default alignment so it is ignored. - ; CHECK-LABEL: ldi64_atomic_a16: ; CHECK-NEXT: .param i32{{$}} ; CHECK-NEXT: .result i64{{$}} @@ -346,3 +350,22 @@ define i64 @ldi64_atomic_a16(i64 *%p) { %v = load atomic i64, i64* %p seq_cst, align 16 ret i64 %v } + +; CHECK-LABEL: sti64_atomic_a4: +; CHECK-NEXT: .param i32, i64{{$}} +; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}} +; CHECK-NEXT: return{{$}} +define void @sti64_atomic_a4(i64 *%p, i64 %v) { + store atomic i64 %v, i64* %p seq_cst, align 8 + ret void +} + +; 16 is greater than the default alignment so it is ignored. +; CHECK-LABEL: sti64_atomic_a8: +; CHECK-NEXT: .param i32, i64{{$}} +; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}} +; CHECK-NEXT: return{{$}} +define void @sti64_atomic_a8(i64 *%p, i64 %v) { + store atomic i64 %v, i64* %p seq_cst, align 16 + ret void +} diff --git a/llvm/test/CodeGen/WebAssembly/offset-atomics.ll b/llvm/test/CodeGen/WebAssembly/offset-atomics.ll index bb1799ba267..98c53b23432 100644 --- a/llvm/test/CodeGen/WebAssembly/offset-atomics.ll +++ b/llvm/test/CodeGen/WebAssembly/offset-atomics.ll @@ -6,6 +6,8 @@ target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" target triple = "wasm32-unknown-unknown" +; Basic load. + ; CHECK-LABEL: load_i32_no_offset: ; CHECK: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}} ; CHECK-NEXT: return $pop[[NUM]]{{$}} @@ -154,6 +156,148 @@ define i32 @load_i32_with_folded_or_offset(i32 %x) { ret i32 %conv } +; Same as above but with store. + +; CHECK-LABEL: store_i32_no_offset: +; CHECK-NEXT: .param i32, i32{{$}} +; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}} +; CHECK-NEXT: return{{$}} +define void @store_i32_no_offset(i32 *%p, i32 %v) { + store atomic i32 %v, i32* %p seq_cst, align 4 + ret void +} + +; Same as above but with store. + +; CHECK-LABEL: store_i32_with_folded_offset: +; CHECK: i32.atomic.store 24($0), $pop0{{$}} +define void @store_i32_with_folded_offset(i32* %p) { + %q = ptrtoint i32* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i32* + store atomic i32 0, i32* %s seq_cst, align 4 + ret void +} + +; Same as above but with store. + +; CHECK-LABEL: store_i32_with_folded_gep_offset: +; CHECK: i32.atomic.store 24($0), $pop0{{$}} +define void @store_i32_with_folded_gep_offset(i32* %p) { + %s = getelementptr inbounds i32, i32* %p, i32 6 + store atomic i32 0, i32* %s seq_cst, align 4 + ret void +} + +; Same as above but with store. + +; CHECK-LABEL: store_i32_with_unfolded_gep_negative_offset: +; CHECK: i32.const $push0=, -24{{$}} +; CHECK: i32.add $push1=, $0, $pop0{{$}} +; CHECK: i32.atomic.store 0($pop1), $pop2{{$}} +define void @store_i32_with_unfolded_gep_negative_offset(i32* %p) { + %s = getelementptr inbounds i32, i32* %p, i32 -6 + store atomic i32 0, i32* %s seq_cst, align 4 + ret void +} + +; Same as above but with store. + +; CHECK-LABEL: store_i32_with_unfolded_offset: +; CHECK: i32.const $push0=, 24{{$}} +; CHECK: i32.add $push1=, $0, $pop0{{$}} +; CHECK: i32.atomic.store 0($pop1), $pop2{{$}} +define void @store_i32_with_unfolded_offset(i32* %p) { + %q = ptrtoint i32* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i32* + store atomic i32 0, i32* %s seq_cst, align 4 + ret void +} + +; Same as above but with store. + +; CHECK-LABEL: store_i32_with_unfolded_gep_offset: +; CHECK: i32.const $push0=, 24{{$}} +; CHECK: i32.add $push1=, $0, $pop0{{$}} +; CHECK: i32.atomic.store 0($pop1), $pop2{{$}} +define void @store_i32_with_unfolded_gep_offset(i32* %p) { + %s = getelementptr i32, i32* %p, i32 6 + store atomic i32 0, i32* %s seq_cst, align 4 + ret void +} + +; Same as above but with store with i64. + +; CHECK-LABEL: store_i64_no_offset: +; CHECK-NEXT: .param i32, i64{{$}} +; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}} +; CHECK-NEXT: return{{$}} +define void @store_i64_no_offset(i64 *%p, i64 %v) { + store atomic i64 %v, i64* %p seq_cst, align 8 + ret void +} + +; Same as above but with store with i64. + +; CHECK-LABEL: store_i64_with_folded_offset: +; CHECK: i64.atomic.store 24($0), $pop0{{$}} +define void @store_i64_with_folded_offset(i64* %p) { + %q = ptrtoint i64* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i64* + store atomic i64 0, i64* %s seq_cst, align 8 + ret void +} + +; Same as above but with store with i64. + +; CHECK-LABEL: store_i64_with_folded_gep_offset: +; CHECK: i64.atomic.store 24($0), $pop0{{$}} +define void @store_i64_with_folded_gep_offset(i64* %p) { + %s = getelementptr inbounds i64, i64* %p, i32 3 + store atomic i64 0, i64* %s seq_cst, align 8 + ret void +} + +; Same as above but with store with i64. + +; CHECK-LABEL: store_i64_with_unfolded_gep_negative_offset: +; CHECK: i32.const $push0=, -24{{$}} +; CHECK: i32.add $push1=, $0, $pop0{{$}} +; CHECK: i64.atomic.store 0($pop1), $pop2{{$}} +define void @store_i64_with_unfolded_gep_negative_offset(i64* %p) { + %s = getelementptr inbounds i64, i64* %p, i32 -3 + store atomic i64 0, i64* %s seq_cst, align 8 + ret void +} + +; Same as above but with store with i64. + +; CHECK-LABEL: store_i64_with_unfolded_offset: +; CHECK: i32.const $push0=, 24{{$}} +; CHECK: i32.add $push1=, $0, $pop0{{$}} +; CHECK: i64.atomic.store 0($pop1), $pop2{{$}} +define void @store_i64_with_unfolded_offset(i64* %p) { + %q = ptrtoint i64* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i64* + store atomic i64 0, i64* %s seq_cst, align 8 + ret void +} + +; Same as above but with store with i64. + +; CHECK-LABEL: store_i64_with_unfolded_gep_offset: +; CHECK: i32.const $push0=, 24{{$}} +; CHECK: i32.add $push1=, $0, $pop0{{$}} +; CHECK: i64.atomic.store 0($pop1), $pop2{{$}} +define void @store_i64_with_unfolded_gep_offset(i64* %p) { + %s = getelementptr i64, i64* %p, i32 3 + store atomic i64 0, i64* %s seq_cst, align 8 + ret void +} + ; When loading from a fixed address, materialize a zero. ; CHECK-LABEL: load_i32_from_numeric_address @@ -175,6 +319,25 @@ define i32 @load_i32_from_global_address() { ret i32 %t } +; CHECK-LABEL: store_i32_to_numeric_address: +; CHECK-NEXT: i32.const $push0=, 0{{$}} +; CHECK-NEXT: i32.const $push1=, 0{{$}} +; CHECK-NEXT: i32.atomic.store 42($pop0), $pop1{{$}} +define void @store_i32_to_numeric_address() { + %s = inttoptr i32 42 to i32* + store atomic i32 0, i32* %s seq_cst, align 4 + ret void +} + +; CHECK-LABEL: store_i32_to_global_address: +; CHECK: i32.const $push0=, 0{{$}} +; CHECK: i32.const $push1=, 0{{$}} +; CHECK: i32.atomic.store gv($pop0), $pop1{{$}} +define void @store_i32_to_global_address() { + store atomic i32 0, i32* @gv seq_cst, align 4 + ret void +} + ; Fold an offset into a sign-extending load. ; CHECK-LABEL: load_i8_s_with_folded_offset: @@ -305,3 +468,158 @@ define i8 @ldi8_a1(i8 *%p) { %v = load atomic i8, i8* %p seq_cst, align 1 ret i8 %v } + +; Fold an offset into a truncating store. + +; CHECK-LABEL: store_i8_with_folded_offset: +; CHECK: i32.atomic.store8 24($0), $pop0{{$}} +define void @store_i8_with_folded_offset(i8* %p) { + %q = ptrtoint i8* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i8* + store atomic i8 0, i8* %s seq_cst, align 1 + ret void +} + +; CHECK-LABEL: store_i16_with_folded_offset: +; CHECK: i32.atomic.store16 24($0), $pop0{{$}} +define void @store_i16_with_folded_offset(i16* %p) { + %q = ptrtoint i16* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i16* + store atomic i16 0, i16* %s seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i8_i64_with_folded_offset: +; CHECK: i64.atomic.store8 24($0), $1{{$}} +define void @store_i8_i64_with_folded_offset(i8* %p, i64 %v) { + %q = ptrtoint i8* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i8* + %t = trunc i64 %v to i8 + store atomic i8 %t, i8* %s seq_cst, align 1 + ret void +} + +; CHECK-LABEL: store_i16_i64_with_folded_offset: +; CHECK: i64.atomic.store16 24($0), $1{{$}} +define void @store_i16_i64_with_folded_offset(i16* %p, i64 %v) { + %q = ptrtoint i16* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i16* + %t = trunc i64 %v to i16 + store atomic i16 %t, i16* %s seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i32_i64_with_folded_offset: +; CHECK: i64.atomic.store32 24($0), $1{{$}} +define void @store_i32_i64_with_folded_offset(i32* %p, i64 %v) { + %q = ptrtoint i32* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i32* + %t = trunc i64 %v to i32 + store atomic i32 %t, i32* %s seq_cst, align 4 + ret void +} + +; Fold a gep offset into a truncating store. + +; CHECK-LABEL: store_i8_with_folded_gep_offset: +; CHECK: i32.atomic.store8 24($0), $pop0{{$}} +define void @store_i8_with_folded_gep_offset(i8* %p) { + %s = getelementptr inbounds i8, i8* %p, i32 24 + store atomic i8 0, i8* %s seq_cst, align 1 + ret void +} + +; CHECK-LABEL: store_i16_with_folded_gep_offset: +; CHECK: i32.atomic.store16 48($0), $pop0{{$}} +define void @store_i16_with_folded_gep_offset(i16* %p) { + %s = getelementptr inbounds i16, i16* %p, i32 24 + store atomic i16 0, i16* %s seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i8_i64_with_folded_gep_offset: +; CHECK: i64.atomic.store8 24($0), $1{{$}} +define void @store_i8_i64_with_folded_gep_offset(i8* %p, i64 %v) { + %s = getelementptr inbounds i8, i8* %p, i32 24 + %t = trunc i64 %v to i8 + store atomic i8 %t, i8* %s seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i16_i64_with_folded_gep_offset: +; CHECK: i64.atomic.store16 48($0), $1{{$}} +define void @store_i16_i64_with_folded_gep_offset(i16* %p, i64 %v) { + %s = getelementptr inbounds i16, i16* %p, i32 24 + %t = trunc i64 %v to i16 + store atomic i16 %t, i16* %s seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i32_i64_with_folded_gep_offset: +; CHECK: i64.atomic.store32 96($0), $1{{$}} +define void @store_i32_i64_with_folded_gep_offset(i32* %p, i64 %v) { + %s = getelementptr inbounds i32, i32* %p, i32 24 + %t = trunc i64 %v to i32 + store atomic i32 %t, i32* %s seq_cst, align 4 + ret void +} + +; Fold an or_is_add pattern based offset into a truncating store. + +; CHECK-LABEL: store_i8_with_folded_or_offset: +; CHECK: i32.atomic.store8 2($pop{{[0-9]+}}), $pop{{[0-9]+}}{{$}} +define void @store_i8_with_folded_or_offset(i32 %x) { + %and = and i32 %x, -4 + %p = inttoptr i32 %and to i8* + %arrayidx = getelementptr inbounds i8, i8* %p, i32 2 + store atomic i8 0, i8* %arrayidx seq_cst, align 1 + ret void +} + +; CHECK-LABEL: store_i16_with_folded_or_offset: +; CHECK: i32.atomic.store16 4($pop{{[0-9]+}}), $pop{{[0-9]+}}{{$}} +define void @store_i16_with_folded_or_offset(i32 %x) { + %and = and i32 %x, -4 + %p = inttoptr i32 %and to i16* + %arrayidx = getelementptr inbounds i16, i16* %p, i32 2 + store atomic i16 0, i16* %arrayidx seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i8_i64_with_folded_or_offset: +; CHECK: i64.atomic.store8 2($pop{{[0-9]+}}), $1{{$}} +define void @store_i8_i64_with_folded_or_offset(i32 %x, i64 %v) { + %and = and i32 %x, -4 + %p = inttoptr i32 %and to i8* + %arrayidx = getelementptr inbounds i8, i8* %p, i32 2 + %t = trunc i64 %v to i8 + store atomic i8 %t, i8* %arrayidx seq_cst, align 1 + ret void +} + +; CHECK-LABEL: store_i16_i64_with_folded_or_offset: +; CHECK: i64.atomic.store16 4($pop{{[0-9]+}}), $1{{$}} +define void @store_i16_i64_with_folded_or_offset(i32 %x, i64 %v) { + %and = and i32 %x, -4 + %p = inttoptr i32 %and to i16* + %arrayidx = getelementptr inbounds i16, i16* %p, i32 2 + %t = trunc i64 %v to i16 + store atomic i16 %t, i16* %arrayidx seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i32_i64_with_folded_or_offset: +; CHECK: i64.atomic.store32 8($pop{{[0-9]+}}), $1{{$}} +define void @store_i32_i64_with_folded_or_offset(i32 %x, i64 %v) { + %and = and i32 %x, -4 + %p = inttoptr i32 %and to i32* + %arrayidx = getelementptr inbounds i32, i32* %p, i32 2 + %t = trunc i64 %v to i32 + store atomic i32 %t, i32* %arrayidx seq_cst, align 4 + ret void +} diff --git a/llvm/test/CodeGen/WebAssembly/store-trunc-atomic.ll b/llvm/test/CodeGen/WebAssembly/store-trunc-atomic.ll new file mode 100644 index 00000000000..39f9e1e5bea --- /dev/null +++ b/llvm/test/CodeGen/WebAssembly/store-trunc-atomic.ll @@ -0,0 +1,46 @@ +; RUN: llc < %s -mattr=+atomics,+sign-ext -asm-verbose=false -disable-wasm-explicit-locals | FileCheck %s + +; Test that truncating stores are assembled properly. + +target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" +target triple = "wasm32-unknown-unknown" + +; CHECK-LABEL: trunc_i8_i32: +; CHECK: i32.atomic.store8 0($0), $1{{$}} +define void @trunc_i8_i32(i8 *%p, i32 %v) { + %t = trunc i32 %v to i8 + store atomic i8 %t, i8* %p seq_cst, align 1 + ret void +} + +; CHECK-LABEL: trunc_i16_i32: +; CHECK: i32.atomic.store16 0($0), $1{{$}} +define void @trunc_i16_i32(i16 *%p, i32 %v) { + %t = trunc i32 %v to i16 + store atomic i16 %t, i16* %p seq_cst, align 2 + ret void +} + +; CHECK-LABEL: trunc_i8_i64: +; CHECK: i64.atomic.store8 0($0), $1{{$}} +define void @trunc_i8_i64(i8 *%p, i64 %v) { + %t = trunc i64 %v to i8 + store atomic i8 %t, i8* %p seq_cst, align 1 + ret void +} + +; CHECK-LABEL: trunc_i16_i64: +; CHECK: i64.atomic.store16 0($0), $1{{$}} +define void @trunc_i16_i64(i16 *%p, i64 %v) { + %t = trunc i64 %v to i16 + store atomic i16 %t, i16* %p seq_cst, align 2 + ret void +} + +; CHECK-LABEL: trunc_i32_i64: +; CHECK: i64.atomic.store32 0($0), $1{{$}} +define void @trunc_i32_i64(i32 *%p, i64 %v) { + %t = trunc i64 %v to i32 + store atomic i32 %t, i32* %p seq_cst, align 4 + ret void +} |