diff options
author | Elena Demikhovsky <elena.demikhovsky@intel.com> | 2014-12-19 23:27:57 +0000 |
---|---|---|
committer | Elena Demikhovsky <elena.demikhovsky@intel.com> | 2014-12-19 23:27:57 +0000 |
commit | fb73ca516b3639a85ebd5658411bb2a4437a8550 (patch) | |
tree | 260d475faf8adf6e3047c1e6355a4db041983429 /llvm/test/CodeGen | |
parent | dc103075249aeb0f1d187f8eedfe561634062c43 (diff) | |
download | bcm5719-llvm-fb73ca516b3639a85ebd5658411bb2a4437a8550.tar.gz bcm5719-llvm-fb73ca516b3639a85ebd5658411bb2a4437a8550.zip |
Masked load and store codegen - fixed 128-bit vectors
The codegen failed on 128-bit types on AVX2.
I added patterns and in td files and tests.
llvm-svn: 224647
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r-- | llvm/test/CodeGen/X86/masked_memop.ll | 86 |
1 files changed, 78 insertions, 8 deletions
diff --git a/llvm/test/CodeGen/X86/masked_memop.ll b/llvm/test/CodeGen/X86/masked_memop.ll index 8cb2d63d5f6..0b88ec6fe8d 100644 --- a/llvm/test/CodeGen/X86/masked_memop.ll +++ b/llvm/test/CodeGen/X86/masked_memop.ll @@ -41,8 +41,8 @@ define void @test3(<16 x i32> %trigger, i8* %addr, <16 x i32> %val) { ; AVX512: vmovups (%rdi), %zmm{{.*{%k[1-7]}}} ; AVX2-LABEL: test4 -; AVX2: vpmaskmovd {{.*}}(%rdi) -; AVX2: vpmaskmovd {{.*}}(%rdi) +; AVX2: vmaskmovps {{.*}}(%rdi) +; AVX2: vmaskmovps {{.*}}(%rdi) ; AVX2: blend define <16 x float> @test4(<16 x i32> %trigger, i8* %addr, <16 x float> %dst) { %mask = icmp eq <16 x i32> %trigger, zeroinitializer @@ -54,9 +54,9 @@ define <16 x float> @test4(<16 x i32> %trigger, i8* %addr, <16 x float> %dst) { ; AVX512: vmovupd (%rdi), %zmm1 {%k1} ; AVX2-LABEL: test5 -; AVX2: vpmaskmovq +; AVX2: vmaskmovpd ; AVX2: vblendvpd -; AVX2: vpmaskmovq +; AVX2: vmaskmovpd ; AVX2: vblendvpd define <8 x double> @test5(<8 x i32> %trigger, i8* %addr, <8 x double> %dst) { %mask = icmp eq <8 x i32> %trigger, zeroinitializer @@ -64,10 +64,80 @@ define <8 x double> @test5(<8 x i32> %trigger, i8* %addr, <8 x double> %dst) { ret <8 x double> %res } +; AVX2-LABEL: test6 +; AVX2: vmaskmovpd +; AVX2: vblendvpd +define <2 x double> @test6(<2 x i64> %trigger, i8* %addr, <2 x double> %dst) { + %mask = icmp eq <2 x i64> %trigger, zeroinitializer + %res = call <2 x double> @llvm.masked.load.v2f64(i8* %addr, <2 x double>%dst, i32 4, <2 x i1>%mask) + ret <2 x double> %res +} + +; AVX2-LABEL: test7 +; AVX2: vmaskmovps {{.*}}(%rdi) +; AVX2: blend +define <4 x float> @test7(<4 x i32> %trigger, i8* %addr, <4 x float> %dst) { + %mask = icmp eq <4 x i32> %trigger, zeroinitializer + %res = call <4 x float> @llvm.masked.load.v4f32(i8* %addr, <4 x float>%dst, i32 4, <4 x i1>%mask) + ret <4 x float> %res +} + +; AVX2-LABEL: test8 +; AVX2: vpmaskmovd {{.*}}(%rdi) +; AVX2: blend +define <4 x i32> @test8(<4 x i32> %trigger, i8* %addr, <4 x i32> %dst) { + %mask = icmp eq <4 x i32> %trigger, zeroinitializer + %res = call <4 x i32> @llvm.masked.load.v4i32(i8* %addr, <4 x i32>%dst, i32 4, <4 x i1>%mask) + ret <4 x i32> %res +} + +; AVX2-LABEL: test9 +; AVX2: vpmaskmovd %xmm +define void @test9(<4 x i32> %trigger, i8* %addr, <4 x i32> %val) { + %mask = icmp eq <4 x i32> %trigger, zeroinitializer + call void @llvm.masked.store.v4i32(i8* %addr, <4 x i32>%val, i32 4, <4 x i1>%mask) + ret void +} + +; AVX2-LABEL: test10 +; AVX2: vmaskmovpd (%rdi), %ymm +; AVX2: blend +define <4 x double> @test10(<4 x i32> %trigger, i8* %addr, <4 x double> %dst) { + %mask = icmp eq <4 x i32> %trigger, zeroinitializer + %res = call <4 x double> @llvm.masked.load.v4f64(i8* %addr, <4 x double>%dst, i32 4, <4 x i1>%mask) + ret <4 x double> %res +} + +; AVX2-LABEL: test11 +; AVX2: vmaskmovps +; AVX2: vblendvps +define <8 x float> @test11(<8 x i32> %trigger, i8* %addr, <8 x float> %dst) { + %mask = icmp eq <8 x i32> %trigger, zeroinitializer + %res = call <8 x float> @llvm.masked.load.v8f32(i8* %addr, <8 x float>%dst, i32 4, <8 x i1>%mask) + ret <8 x float> %res +} + +; AVX2-LABEL: test12 +; AVX2: vpmaskmovd %ymm +define void @test12(<8 x i32> %trigger, i8* %addr, <8 x i32> %val) { + %mask = icmp eq <8 x i32> %trigger, zeroinitializer + call void @llvm.masked.store.v8i32(i8* %addr, <8 x i32>%val, i32 4, <8 x i1>%mask) + ret void +} + declare <16 x i32> @llvm.masked.load.v16i32(i8*, <16 x i32>, i32, <16 x i1>) -declare void @llvm.masked.store.v16i32(i8*, <16 x i32>, i32, <16 x i1>) -declare <16 x float> @llvm.masked.load.v16f32(i8*, <16 x float>, i32, <16 x i1>) +declare <4 x i32> @llvm.masked.load.v4i32(i8*, <4 x i32>, i32, <4 x i1>) +declare void @llvm.masked.store.v16i32(i8*, <16 x i32>, i32, <16 x i1>) +declare void @llvm.masked.store.v8i32(i8*, <8 x i32>, i32, <8 x i1>) +declare void @llvm.masked.store.v4i32(i8*, <4 x i32>, i32, <4 x i1>) +declare <16 x float> @llvm.masked.load.v16f32(i8*, <16 x float>, i32, <16 x i1>) +declare <8 x float> @llvm.masked.load.v8f32(i8*, <8 x float>, i32, <8 x i1>) +declare <4 x float> @llvm.masked.load.v4f32(i8*, <4 x float>, i32, <4 x i1>) declare void @llvm.masked.store.v16f32(i8*, <16 x float>, i32, <16 x i1>) -declare <8 x double> @llvm.masked.load.v8f64(i8*, <8 x double>, i32, <8 x i1>) -declare void @llvm.masked.store.v8f64(i8*, <8 x double>, i32, <8 x i1>) +declare <8 x double> @llvm.masked.load.v8f64(i8*, <8 x double>, i32, <8 x i1>) +declare <4 x double> @llvm.masked.load.v4f64(i8*, <4 x double>, i32, <4 x i1>) +declare <2 x double> @llvm.masked.load.v2f64(i8*, <2 x double>, i32, <2 x i1>) +declare void @llvm.masked.store.v8f64(i8*, <8 x double>, i32, <8 x i1>) +declare void @llvm.masked.store.v2f64(i8*, <2 x double>, i32, <2 x i1>) +declare void @llvm.masked.store.v2i64(i8*, <2 x i64>, i32, <2 x i1>) |