diff options
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp | 8 | ||||
-rw-r--r-- | llvm/test/Transforms/InstCombine/x86-masked-memops.ll | 82 |
2 files changed, 89 insertions, 1 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 43b1bf0dc10..b8396eee844 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1663,7 +1663,13 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { break; case Intrinsic::x86_avx_maskload_ps: - // TODO: Add the other masked load variants. + case Intrinsic::x86_avx_maskload_pd: + case Intrinsic::x86_avx_maskload_ps_256: + case Intrinsic::x86_avx_maskload_pd_256: + case Intrinsic::x86_avx2_maskload_d: + case Intrinsic::x86_avx2_maskload_q: + case Intrinsic::x86_avx2_maskload_d_256: + case Intrinsic::x86_avx2_maskload_q_256: if (Instruction *I = simplifyX86MaskedLoad(*II, *this)) return I; break; diff --git a/llvm/test/Transforms/InstCombine/x86-masked-memops.ll b/llvm/test/Transforms/InstCombine/x86-masked-memops.ll index eb136392aaf..970ee04209e 100644 --- a/llvm/test/Transforms/InstCombine/x86-masked-memops.ll +++ b/llvm/test/Transforms/InstCombine/x86-masked-memops.ll @@ -57,6 +57,83 @@ define <4 x float> @mload_one_one(i8* %f) { ; CHECK-NEXT: ret <4 x float> %1 } +; Try doubles. + +define <2 x double> @mload_one_one_double(i8* %f) { + %ld = tail call <2 x double> @llvm.x86.avx.maskload.pd(i8* %f, <2 x i64> <i64 -1, i64 0>) + ret <2 x double> %ld + +; CHECK-LABEL: @mload_one_one_double( +; CHECK-NEXT: %castvec = bitcast i8* %f to <2 x double>* +; CHECK-NEXT: %1 = call <2 x double> @llvm.masked.load.v2f64(<2 x double>* %castvec, i32 1, <2 x i1> <i1 true, i1 false>, <2 x double> undef) +; CHECK-NEXT: ret <2 x double> %1 +} + +; Try 256-bit FP ops. + +define <8 x float> @mload_v8f32(i8* %f) { + %ld = tail call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %f, <8 x i32> <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 0>) + ret <8 x float> %ld + +; CHECK-LABEL: @mload_v8f32( +; CHECK-NEXT: %castvec = bitcast i8* %f to <8 x float>* +; CHECK-NEXT: %1 = call <8 x float> @llvm.masked.load.v8f32(<8 x float>* %castvec, i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x float> undef) +; CHECK-NEXT: ret <8 x float> %1 +} + +define <4 x double> @mload_v4f64(i8* %f) { + %ld = tail call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 0, i64 0>) + ret <4 x double> %ld + +; CHECK-LABEL: @mload_v4f64( +; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x double>* +; CHECK-NEXT: %1 = call <4 x double> @llvm.masked.load.v4f64(<4 x double>* %castvec, i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x double> undef) +; CHECK-NEXT: ret <4 x double> %1 +} + +; Try the AVX2 variants. + +define <4 x i32> @mload_v4i32(i8* %f) { + %ld = tail call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>) + ret <4 x i32> %ld + +; CHECK-LABEL: @mload_v4i32( +; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x i32>* +; CHECK-NEXT: %1 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %castvec, i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x i32> undef) +; CHECK-NEXT: ret <4 x i32> %1 +} + +define <2 x i64> @mload_v2i64(i8* %f) { + %ld = tail call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %f, <2 x i64> <i64 -1, i64 0>) + ret <2 x i64> %ld + +; CHECK-LABEL: @mload_v2i64( +; CHECK-NEXT: %castvec = bitcast i8* %f to <2 x i64>* +; CHECK-NEXT: %1 = call <2 x i64> @llvm.masked.load.v2i64(<2 x i64>* %castvec, i32 1, <2 x i1> <i1 true, i1 false>, <2 x i64> undef) +; CHECK-NEXT: ret <2 x i64> %1 +} + +define <8 x i32> @mload_v8i32(i8* %f) { + %ld = tail call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %f, <8 x i32> <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 0>) + ret <8 x i32> %ld + +; CHECK-LABEL: @mload_v8i32( +; CHECK-NEXT: %castvec = bitcast i8* %f to <8 x i32>* +; CHECK-NEXT: %1 = call <8 x i32> @llvm.masked.load.v8i32(<8 x i32>* %castvec, i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i32> undef) +; CHECK-NEXT: ret <8 x i32> %1 +} + +define <4 x i64> @mload_v4i64(i8* %f) { + %ld = tail call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 0, i64 0>) + ret <4 x i64> %ld + +; CHECK-LABEL: @mload_v4i64( +; CHECK-NEXT: %castvec = bitcast i8* %f to <4 x i64>* +; CHECK-NEXT: %1 = call <4 x i64> @llvm.masked.load.v4i64(<4 x i64>* %castvec, i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> undef) +; CHECK-NEXT: ret <4 x i64> %1 +} + + ;; MASKED STORES ; If the mask isn't constant, do nothing. @@ -195,6 +272,11 @@ declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>) declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>) declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>) +declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>) +declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>) +declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>) +declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>) + declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>) declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>) declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>) |