diff options
Diffstat (limited to 'llvm/test/CodeGen/WebAssembly/offset-atomics.ll')
-rw-r--r-- | llvm/test/CodeGen/WebAssembly/offset-atomics.ll | 318 |
1 files changed, 318 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/WebAssembly/offset-atomics.ll b/llvm/test/CodeGen/WebAssembly/offset-atomics.ll index bb1799ba267..98c53b23432 100644 --- a/llvm/test/CodeGen/WebAssembly/offset-atomics.ll +++ b/llvm/test/CodeGen/WebAssembly/offset-atomics.ll @@ -6,6 +6,8 @@ target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" target triple = "wasm32-unknown-unknown" +; Basic load. + ; CHECK-LABEL: load_i32_no_offset: ; CHECK: i32.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}} ; CHECK-NEXT: return $pop[[NUM]]{{$}} @@ -154,6 +156,148 @@ define i32 @load_i32_with_folded_or_offset(i32 %x) { ret i32 %conv } +; Same as above but with store. + +; CHECK-LABEL: store_i32_no_offset: +; CHECK-NEXT: .param i32, i32{{$}} +; CHECK-NEXT: i32.atomic.store 0($0), $1{{$}} +; CHECK-NEXT: return{{$}} +define void @store_i32_no_offset(i32 *%p, i32 %v) { + store atomic i32 %v, i32* %p seq_cst, align 4 + ret void +} + +; Same as above but with store. + +; CHECK-LABEL: store_i32_with_folded_offset: +; CHECK: i32.atomic.store 24($0), $pop0{{$}} +define void @store_i32_with_folded_offset(i32* %p) { + %q = ptrtoint i32* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i32* + store atomic i32 0, i32* %s seq_cst, align 4 + ret void +} + +; Same as above but with store. + +; CHECK-LABEL: store_i32_with_folded_gep_offset: +; CHECK: i32.atomic.store 24($0), $pop0{{$}} +define void @store_i32_with_folded_gep_offset(i32* %p) { + %s = getelementptr inbounds i32, i32* %p, i32 6 + store atomic i32 0, i32* %s seq_cst, align 4 + ret void +} + +; Same as above but with store. + +; CHECK-LABEL: store_i32_with_unfolded_gep_negative_offset: +; CHECK: i32.const $push0=, -24{{$}} +; CHECK: i32.add $push1=, $0, $pop0{{$}} +; CHECK: i32.atomic.store 0($pop1), $pop2{{$}} +define void @store_i32_with_unfolded_gep_negative_offset(i32* %p) { + %s = getelementptr inbounds i32, i32* %p, i32 -6 + store atomic i32 0, i32* %s seq_cst, align 4 + ret void +} + +; Same as above but with store. + +; CHECK-LABEL: store_i32_with_unfolded_offset: +; CHECK: i32.const $push0=, 24{{$}} +; CHECK: i32.add $push1=, $0, $pop0{{$}} +; CHECK: i32.atomic.store 0($pop1), $pop2{{$}} +define void @store_i32_with_unfolded_offset(i32* %p) { + %q = ptrtoint i32* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i32* + store atomic i32 0, i32* %s seq_cst, align 4 + ret void +} + +; Same as above but with store. + +; CHECK-LABEL: store_i32_with_unfolded_gep_offset: +; CHECK: i32.const $push0=, 24{{$}} +; CHECK: i32.add $push1=, $0, $pop0{{$}} +; CHECK: i32.atomic.store 0($pop1), $pop2{{$}} +define void @store_i32_with_unfolded_gep_offset(i32* %p) { + %s = getelementptr i32, i32* %p, i32 6 + store atomic i32 0, i32* %s seq_cst, align 4 + ret void +} + +; Same as above but with store with i64. + +; CHECK-LABEL: store_i64_no_offset: +; CHECK-NEXT: .param i32, i64{{$}} +; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}} +; CHECK-NEXT: return{{$}} +define void @store_i64_no_offset(i64 *%p, i64 %v) { + store atomic i64 %v, i64* %p seq_cst, align 8 + ret void +} + +; Same as above but with store with i64. + +; CHECK-LABEL: store_i64_with_folded_offset: +; CHECK: i64.atomic.store 24($0), $pop0{{$}} +define void @store_i64_with_folded_offset(i64* %p) { + %q = ptrtoint i64* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i64* + store atomic i64 0, i64* %s seq_cst, align 8 + ret void +} + +; Same as above but with store with i64. + +; CHECK-LABEL: store_i64_with_folded_gep_offset: +; CHECK: i64.atomic.store 24($0), $pop0{{$}} +define void @store_i64_with_folded_gep_offset(i64* %p) { + %s = getelementptr inbounds i64, i64* %p, i32 3 + store atomic i64 0, i64* %s seq_cst, align 8 + ret void +} + +; Same as above but with store with i64. + +; CHECK-LABEL: store_i64_with_unfolded_gep_negative_offset: +; CHECK: i32.const $push0=, -24{{$}} +; CHECK: i32.add $push1=, $0, $pop0{{$}} +; CHECK: i64.atomic.store 0($pop1), $pop2{{$}} +define void @store_i64_with_unfolded_gep_negative_offset(i64* %p) { + %s = getelementptr inbounds i64, i64* %p, i32 -3 + store atomic i64 0, i64* %s seq_cst, align 8 + ret void +} + +; Same as above but with store with i64. + +; CHECK-LABEL: store_i64_with_unfolded_offset: +; CHECK: i32.const $push0=, 24{{$}} +; CHECK: i32.add $push1=, $0, $pop0{{$}} +; CHECK: i64.atomic.store 0($pop1), $pop2{{$}} +define void @store_i64_with_unfolded_offset(i64* %p) { + %q = ptrtoint i64* %p to i32 + %r = add nsw i32 %q, 24 + %s = inttoptr i32 %r to i64* + store atomic i64 0, i64* %s seq_cst, align 8 + ret void +} + +; Same as above but with store with i64. + +; CHECK-LABEL: store_i64_with_unfolded_gep_offset: +; CHECK: i32.const $push0=, 24{{$}} +; CHECK: i32.add $push1=, $0, $pop0{{$}} +; CHECK: i64.atomic.store 0($pop1), $pop2{{$}} +define void @store_i64_with_unfolded_gep_offset(i64* %p) { + %s = getelementptr i64, i64* %p, i32 3 + store atomic i64 0, i64* %s seq_cst, align 8 + ret void +} + ; When loading from a fixed address, materialize a zero. ; CHECK-LABEL: load_i32_from_numeric_address @@ -175,6 +319,25 @@ define i32 @load_i32_from_global_address() { ret i32 %t } +; CHECK-LABEL: store_i32_to_numeric_address: +; CHECK-NEXT: i32.const $push0=, 0{{$}} +; CHECK-NEXT: i32.const $push1=, 0{{$}} +; CHECK-NEXT: i32.atomic.store 42($pop0), $pop1{{$}} +define void @store_i32_to_numeric_address() { + %s = inttoptr i32 42 to i32* + store atomic i32 0, i32* %s seq_cst, align 4 + ret void +} + +; CHECK-LABEL: store_i32_to_global_address: +; CHECK: i32.const $push0=, 0{{$}} +; CHECK: i32.const $push1=, 0{{$}} +; CHECK: i32.atomic.store gv($pop0), $pop1{{$}} +define void @store_i32_to_global_address() { + store atomic i32 0, i32* @gv seq_cst, align 4 + ret void +} + ; Fold an offset into a sign-extending load. ; CHECK-LABEL: load_i8_s_with_folded_offset: @@ -305,3 +468,158 @@ define i8 @ldi8_a1(i8 *%p) { %v = load atomic i8, i8* %p seq_cst, align 1 ret i8 %v } + +; Fold an offset into a truncating store. + +; CHECK-LABEL: store_i8_with_folded_offset: +; CHECK: i32.atomic.store8 24($0), $pop0{{$}} +define void @store_i8_with_folded_offset(i8* %p) { + %q = ptrtoint i8* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i8* + store atomic i8 0, i8* %s seq_cst, align 1 + ret void +} + +; CHECK-LABEL: store_i16_with_folded_offset: +; CHECK: i32.atomic.store16 24($0), $pop0{{$}} +define void @store_i16_with_folded_offset(i16* %p) { + %q = ptrtoint i16* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i16* + store atomic i16 0, i16* %s seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i8_i64_with_folded_offset: +; CHECK: i64.atomic.store8 24($0), $1{{$}} +define void @store_i8_i64_with_folded_offset(i8* %p, i64 %v) { + %q = ptrtoint i8* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i8* + %t = trunc i64 %v to i8 + store atomic i8 %t, i8* %s seq_cst, align 1 + ret void +} + +; CHECK-LABEL: store_i16_i64_with_folded_offset: +; CHECK: i64.atomic.store16 24($0), $1{{$}} +define void @store_i16_i64_with_folded_offset(i16* %p, i64 %v) { + %q = ptrtoint i16* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i16* + %t = trunc i64 %v to i16 + store atomic i16 %t, i16* %s seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i32_i64_with_folded_offset: +; CHECK: i64.atomic.store32 24($0), $1{{$}} +define void @store_i32_i64_with_folded_offset(i32* %p, i64 %v) { + %q = ptrtoint i32* %p to i32 + %r = add nuw i32 %q, 24 + %s = inttoptr i32 %r to i32* + %t = trunc i64 %v to i32 + store atomic i32 %t, i32* %s seq_cst, align 4 + ret void +} + +; Fold a gep offset into a truncating store. + +; CHECK-LABEL: store_i8_with_folded_gep_offset: +; CHECK: i32.atomic.store8 24($0), $pop0{{$}} +define void @store_i8_with_folded_gep_offset(i8* %p) { + %s = getelementptr inbounds i8, i8* %p, i32 24 + store atomic i8 0, i8* %s seq_cst, align 1 + ret void +} + +; CHECK-LABEL: store_i16_with_folded_gep_offset: +; CHECK: i32.atomic.store16 48($0), $pop0{{$}} +define void @store_i16_with_folded_gep_offset(i16* %p) { + %s = getelementptr inbounds i16, i16* %p, i32 24 + store atomic i16 0, i16* %s seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i8_i64_with_folded_gep_offset: +; CHECK: i64.atomic.store8 24($0), $1{{$}} +define void @store_i8_i64_with_folded_gep_offset(i8* %p, i64 %v) { + %s = getelementptr inbounds i8, i8* %p, i32 24 + %t = trunc i64 %v to i8 + store atomic i8 %t, i8* %s seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i16_i64_with_folded_gep_offset: +; CHECK: i64.atomic.store16 48($0), $1{{$}} +define void @store_i16_i64_with_folded_gep_offset(i16* %p, i64 %v) { + %s = getelementptr inbounds i16, i16* %p, i32 24 + %t = trunc i64 %v to i16 + store atomic i16 %t, i16* %s seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i32_i64_with_folded_gep_offset: +; CHECK: i64.atomic.store32 96($0), $1{{$}} +define void @store_i32_i64_with_folded_gep_offset(i32* %p, i64 %v) { + %s = getelementptr inbounds i32, i32* %p, i32 24 + %t = trunc i64 %v to i32 + store atomic i32 %t, i32* %s seq_cst, align 4 + ret void +} + +; Fold an or_is_add pattern based offset into a truncating store. + +; CHECK-LABEL: store_i8_with_folded_or_offset: +; CHECK: i32.atomic.store8 2($pop{{[0-9]+}}), $pop{{[0-9]+}}{{$}} +define void @store_i8_with_folded_or_offset(i32 %x) { + %and = and i32 %x, -4 + %p = inttoptr i32 %and to i8* + %arrayidx = getelementptr inbounds i8, i8* %p, i32 2 + store atomic i8 0, i8* %arrayidx seq_cst, align 1 + ret void +} + +; CHECK-LABEL: store_i16_with_folded_or_offset: +; CHECK: i32.atomic.store16 4($pop{{[0-9]+}}), $pop{{[0-9]+}}{{$}} +define void @store_i16_with_folded_or_offset(i32 %x) { + %and = and i32 %x, -4 + %p = inttoptr i32 %and to i16* + %arrayidx = getelementptr inbounds i16, i16* %p, i32 2 + store atomic i16 0, i16* %arrayidx seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i8_i64_with_folded_or_offset: +; CHECK: i64.atomic.store8 2($pop{{[0-9]+}}), $1{{$}} +define void @store_i8_i64_with_folded_or_offset(i32 %x, i64 %v) { + %and = and i32 %x, -4 + %p = inttoptr i32 %and to i8* + %arrayidx = getelementptr inbounds i8, i8* %p, i32 2 + %t = trunc i64 %v to i8 + store atomic i8 %t, i8* %arrayidx seq_cst, align 1 + ret void +} + +; CHECK-LABEL: store_i16_i64_with_folded_or_offset: +; CHECK: i64.atomic.store16 4($pop{{[0-9]+}}), $1{{$}} +define void @store_i16_i64_with_folded_or_offset(i32 %x, i64 %v) { + %and = and i32 %x, -4 + %p = inttoptr i32 %and to i16* + %arrayidx = getelementptr inbounds i16, i16* %p, i32 2 + %t = trunc i64 %v to i16 + store atomic i16 %t, i16* %arrayidx seq_cst, align 2 + ret void +} + +; CHECK-LABEL: store_i32_i64_with_folded_or_offset: +; CHECK: i64.atomic.store32 8($pop{{[0-9]+}}), $1{{$}} +define void @store_i32_i64_with_folded_or_offset(i32 %x, i64 %v) { + %and = and i32 %x, -4 + %p = inttoptr i32 %and to i32* + %arrayidx = getelementptr inbounds i32, i32* %p, i32 2 + %t = trunc i64 %v to i32 + store atomic i32 %t, i32* %arrayidx seq_cst, align 4 + ret void +} |