summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrea Di Biagio <Andrea_DiBiagio@sn.scee.net>2014-04-26 01:03:22 +0000
committerAndrea Di Biagio <Andrea_DiBiagio@sn.scee.net>2014-04-26 01:03:22 +0000
commit8cc9059ce8f3bd25fd61b0cf6936295c80518bf3 (patch)
tree8014fcc78e10c00ac649be3033b6f00c2d1a31cf
parent8d039e442036436c004d7b43333efe29d4386293 (diff)
downloadbcm5719-llvm-8cc9059ce8f3bd25fd61b0cf6936295c80518bf3.tar.gz
bcm5719-llvm-8cc9059ce8f3bd25fd61b0cf6936295c80518bf3.zip
[InstCombine][X86] Teach how to fold calls to SSE2/AVX2 packed logical shift
right intrinsics. A packed logical shift right with a shift count bigger than or equal to the element size always produces a zero vector. In all other cases, it can be safely replaced by a 'lshr' instruction. llvm-svn: 207299
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp50
-rw-r--r--llvm/test/Transforms/InstCombine/vec_demanded_elts.ll105
2 files changed, 144 insertions, 11 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 17ada47d2be..df217f19acd 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -570,8 +570,21 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
case Intrinsic::x86_avx2_psll_w:
case Intrinsic::x86_avx2_pslli_d:
case Intrinsic::x86_avx2_pslli_q:
- case Intrinsic::x86_avx2_pslli_w: {
- // Simplify if count is constant. To 0 if > BitWidth, otherwise to shl.
+ case Intrinsic::x86_avx2_pslli_w:
+ case Intrinsic::x86_sse2_psrl_d:
+ case Intrinsic::x86_sse2_psrl_q:
+ case Intrinsic::x86_sse2_psrl_w:
+ case Intrinsic::x86_sse2_psrli_d:
+ case Intrinsic::x86_sse2_psrli_q:
+ case Intrinsic::x86_sse2_psrli_w:
+ case Intrinsic::x86_avx2_psrl_d:
+ case Intrinsic::x86_avx2_psrl_q:
+ case Intrinsic::x86_avx2_psrl_w:
+ case Intrinsic::x86_avx2_psrli_d:
+ case Intrinsic::x86_avx2_psrli_q:
+ case Intrinsic::x86_avx2_psrli_w: {
+ // Simplify if count is constant. To 0 if >= BitWidth,
+ // otherwise to shl/lshr.
auto CDV = dyn_cast<ConstantDataVector>(II->getArgOperand(1));
auto CInt = dyn_cast<ConstantInt>(II->getArgOperand(1));
if (!CDV && !CInt)
@@ -588,14 +601,33 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
VT->getElementType()->getPrimitiveSizeInBits() - 1)
return ReplaceInstUsesWith(
CI, ConstantAggregateZero::get(Vec->getType()));
- else {
- unsigned VWidth = VT->getNumElements();
- // Get a constant vector of the same type as the first operand.
- auto VTCI = ConstantInt::get(VT->getElementType(), Count->getZExtValue());
- return BinaryOperator::CreateShl(
- Vec, Builder->CreateVectorSplat(VWidth, VTCI));
+
+ bool isPackedShiftLeft = true;
+ switch (II->getIntrinsicID()) {
+ default : break;
+ case Intrinsic::x86_sse2_psrl_d:
+ case Intrinsic::x86_sse2_psrl_q:
+ case Intrinsic::x86_sse2_psrl_w:
+ case Intrinsic::x86_sse2_psrli_d:
+ case Intrinsic::x86_sse2_psrli_q:
+ case Intrinsic::x86_sse2_psrli_w:
+ case Intrinsic::x86_avx2_psrl_d:
+ case Intrinsic::x86_avx2_psrl_q:
+ case Intrinsic::x86_avx2_psrl_w:
+ case Intrinsic::x86_avx2_psrli_d:
+ case Intrinsic::x86_avx2_psrli_q:
+ case Intrinsic::x86_avx2_psrli_w: isPackedShiftLeft = false; break;
}
- break;
+
+ unsigned VWidth = VT->getNumElements();
+ // Get a constant vector of the same type as the first operand.
+ auto VTCI = ConstantInt::get(VT->getElementType(), Count->getZExtValue());
+ if (isPackedShiftLeft)
+ return BinaryOperator::CreateShl(Vec,
+ Builder->CreateVectorSplat(VWidth, VTCI));
+
+ return BinaryOperator::CreateLShr(Vec,
+ Builder->CreateVectorSplat(VWidth, VTCI));
}
case Intrinsic::x86_sse41_pmovsxbw:
diff --git a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
index 35ba09313d6..a3e978141ad 100644
--- a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
+++ b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll
@@ -358,7 +358,6 @@ define <2 x i64> @test_sse2_1() nounwind readnone uwtable {
%15 = bitcast <4 x i32> %14 to <2 x i64>
%16 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %15, i32 %S)
ret <2 x i64> %16
-
; CHECK: test_sse2_1
; CHECK: ret <2 x i64> <i64 72058418680037440, i64 144117112246370624>
}
@@ -405,7 +404,6 @@ define <2 x i64> @test_sse2_0() nounwind readnone uwtable {
%15 = bitcast <4 x i32> %14 to <2 x i64>
%16 = tail call <2 x i64> @llvm.x86.sse2.pslli.q(<2 x i64> %15, i32 %S)
ret <2 x i64> %16
-
; CHECK: test_sse2_0
; CHECK: ret <2 x i64> zeroinitializer
}
@@ -432,6 +430,97 @@ define <4 x i64> @test_avx2_0() nounwind readnone uwtable {
; CHECK: test_avx2_0
; CHECK: ret <4 x i64> zeroinitializer
}
+define <2 x i64> @test_sse2_psrl_1() nounwind readnone uwtable {
+ %S = bitcast i32 1 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> <i16 16, i16 32, i16 64, i16 128, i16 256, i16 512, i16 1024, i16 2048>, <8 x i16> %4)
+ %6 = bitcast <8 x i16> %5 to <4 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <4 x i32> %8 to <2 x i64>
+ %10 = tail call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <2 x i64> %10 to <8 x i16>
+ %12 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %11, i32 %S)
+ %13 = bitcast <8 x i16> %12 to <4 x i32>
+ %14 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %13, i32 %S)
+ %15 = bitcast <4 x i32> %14 to <2 x i64>
+ %16 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %15, i32 %S)
+ ret <2 x i64> %16
+; CHECK: test_sse2_psrl_1
+; CHECK: ret <2 x i64> <i64 562954248421376, i64 9007267974742020>
+}
+
+define <4 x i64> @test_avx2_psrl_1() nounwind readnone uwtable {
+ %S = bitcast i32 1 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> <i16 1024, i16 0, i16 0, i16 0, i16 2048, i16 0, i16 0, i16 0, i16 4096, i16 0, i16 0, i16 0, i16 8192, i16 0, i16 0, i16 0>, <8 x i16> %4)
+ %6 = bitcast <16 x i16> %5 to <8 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <8 x i32> %8 to <4 x i64>
+ %10 = tail call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <4 x i64> %10 to <16 x i16>
+ %12 = tail call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %11, i32 %S)
+ %13 = bitcast <16 x i16> %12 to <8 x i32>
+ %14 = tail call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %13, i32 %S)
+ %15 = bitcast <8 x i32> %14 to <4 x i64>
+ %16 = tail call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %15, i32 %S)
+ ret <4 x i64> %16
+; CHECK: test_avx2_psrl_1
+; CHECK: ret <4 x i64> <i64 16, i64 32, i64 64, i64 128>
+}
+
+define <2 x i64> @test_sse2_psrl_0() nounwind readnone uwtable {
+ %S = bitcast i32 128 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16> <i16 32, i16 64, i16 128, i16 256, i16 512, i16 1024, i16 2048, i16 4096>, <8 x i16> %4)
+ %6 = bitcast <8 x i16> %5 to <4 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <4 x i32> %8 to <2 x i64>
+ %10 = tail call <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <2 x i64> %10 to <8 x i16>
+ %12 = tail call <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16> %11, i32 %S)
+ %13 = bitcast <8 x i16> %12 to <4 x i32>
+ %14 = tail call <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32> %13, i32 %S)
+ %15 = bitcast <4 x i32> %14 to <2 x i64>
+ %16 = tail call <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64> %15, i32 %S)
+ ret <2 x i64> %16
+; CHECK: test_sse2_psrl_0
+; CHECK: ret <2 x i64> zeroinitializer
+}
+
+define <4 x i64> @test_avx2_psrl_0() nounwind readnone uwtable {
+ %S = bitcast i32 128 to i32
+ %1 = zext i32 %S to i64
+ %2 = insertelement <2 x i64> undef, i64 %1, i32 0
+ %3 = insertelement <2 x i64> %2, i64 0, i32 1
+ %4 = bitcast <2 x i64> %3 to <8 x i16>
+ %5 = tail call <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16> <i16 1024, i16 0, i16 0, i16 0, i16 2048, i16 0, i16 0, i16 0, i16 4096, i16 0, i16 0, i16 0, i16 8192, i16 0, i16 0, i16 0>, <8 x i16> %4)
+ %6 = bitcast <16 x i16> %5 to <8 x i32>
+ %7 = bitcast <2 x i64> %3 to <4 x i32>
+ %8 = tail call <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32> %6, <4 x i32> %7)
+ %9 = bitcast <8 x i32> %8 to <4 x i64>
+ %10 = tail call <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64> %9, <2 x i64> %3)
+ %11 = bitcast <4 x i64> %10 to <16 x i16>
+ %12 = tail call <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16> %11, i32 %S)
+ %13 = bitcast <16 x i16> %12 to <8 x i32>
+ %14 = tail call <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32> %13, i32 %S)
+ %15 = bitcast <8 x i32> %14 to <4 x i64>
+ %16 = tail call <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64> %15, i32 %S)
+ ret <4 x i64> %16
+; CHECK: test_avx2_psrl_0
+; CHECK: ret <4 x i64> zeroinitializer
+}
declare <4 x i64> @llvm.x86.avx2.pslli.q(<4 x i64>, i32) #1
declare <8 x i32> @llvm.x86.avx2.pslli.d(<8 x i32>, i32) #1
@@ -445,5 +534,17 @@ declare <8 x i16> @llvm.x86.sse2.pslli.w(<8 x i16>, i32) #1
declare <2 x i64> @llvm.x86.sse2.psll.q(<2 x i64>, <2 x i64>) #1
declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) #1
declare <8 x i16> @llvm.x86.sse2.psll.w(<8 x i16>, <8 x i16>) #1
+declare <4 x i64> @llvm.x86.avx2.psrli.q(<4 x i64>, i32) #1
+declare <8 x i32> @llvm.x86.avx2.psrli.d(<8 x i32>, i32) #1
+declare <16 x i16> @llvm.x86.avx2.psrli.w(<16 x i16>, i32) #1
+declare <4 x i64> @llvm.x86.avx2.psrl.q(<4 x i64>, <2 x i64>) #1
+declare <8 x i32> @llvm.x86.avx2.psrl.d(<8 x i32>, <4 x i32>) #1
+declare <16 x i16> @llvm.x86.avx2.psrl.w(<16 x i16>, <8 x i16>) #1
+declare <2 x i64> @llvm.x86.sse2.psrli.q(<2 x i64>, i32) #1
+declare <4 x i32> @llvm.x86.sse2.psrli.d(<4 x i32>, i32) #1
+declare <8 x i16> @llvm.x86.sse2.psrli.w(<8 x i16>, i32) #1
+declare <2 x i64> @llvm.x86.sse2.psrl.q(<2 x i64>, <2 x i64>) #1
+declare <4 x i32> @llvm.x86.sse2.psrl.d(<4 x i32>, <4 x i32>) #1
+declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) #1
attributes #1 = { nounwind readnone }
OpenPOWER on IntegriCloud