summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp3
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll101
2 files changed, 21 insertions, 83 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index a896f24c365..6319c9cc223 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -33225,7 +33225,8 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
}
break;
}
- case X86ISD::PSHUFB: {
+ case X86ISD::PSHUFB:
+ case X86ISD::VPERMILPV: {
// TODO - simplify other variable shuffle masks.
SDValue Mask = Op.getOperand(1);
APInt MaskUndef, MaskZero;
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
index 1afbc2adf75..d87729b123e 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll
@@ -188,41 +188,10 @@ define <8 x float> @combine_vpermilvar_8f32_movshdup(<8 x float> %a0) {
ret <8 x float> %1
}
define <8 x float> @demandedelts_vpermilvar_8f32_movshdup(<8 x float> %a0, i32 %a1) {
-; X86-LABEL: demandedelts_vpermilvar_8f32_movshdup:
-; X86: # %bb.0:
-; X86-NEXT: vbroadcastss {{[0-9]+}}(%esp), %ymm1
-; X86-NEXT: vblendps {{.*#+}} ymm1 = mem[0,1,2,3,4,5,6],ymm1[7]
-; X86-NEXT: vpermilps %ymm1, %ymm0, %ymm0
-; X86-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,6]
-; X86-NEXT: retl
-;
-; X64-AVX1-LABEL: demandedelts_vpermilvar_8f32_movshdup:
-; X64-AVX1: # %bb.0:
-; X64-AVX1-NEXT: vmovd %edi, %xmm1
-; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; X64-AVX1-NEXT: vblendps {{.*#+}} ymm1 = mem[0,1,2,3,4,5,6],ymm1[7]
-; X64-AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
-; X64-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,6]
-; X64-AVX1-NEXT: retq
-;
-; X64-AVX2-LABEL: demandedelts_vpermilvar_8f32_movshdup:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vmovd %edi, %xmm1
-; X64-AVX2-NEXT: vpbroadcastd %xmm1, %ymm1
-; X64-AVX2-NEXT: vpblendd {{.*#+}} ymm1 = mem[0,1,2,3,4,5,6],ymm1[7]
-; X64-AVX2-NEXT: vpermilps %ymm1, %ymm0, %ymm0
-; X64-AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,6]
-; X64-AVX2-NEXT: retq
-;
-; X64-AVX512-LABEL: demandedelts_vpermilvar_8f32_movshdup:
-; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: vmovd %edi, %xmm1
-; X64-AVX512-NEXT: vpbroadcastd %xmm1, %ymm1
-; X64-AVX512-NEXT: vpblendd {{.*#+}} ymm1 = mem[0,1,2,3,4,5,6],ymm1[7]
-; X64-AVX512-NEXT: vpermilps %ymm1, %ymm0, %ymm0
-; X64-AVX512-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5,6,6]
-; X64-AVX512-NEXT: retq
+; CHECK-LABEL: demandedelts_vpermilvar_8f32_movshdup:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7]
+; CHECK-NEXT: ret{{[l|q]}}
%1 = insertelement <8 x i32> <i32 1, i32 1, i32 3, i32 3, i32 undef, i32 5, i32 7, i32 7>, i32 %a1, i32 7
%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %1)
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 6>
@@ -238,55 +207,23 @@ define <8 x float> @combine_vpermilvar_8f32_movsldup(<8 x float> %a0) {
ret <8 x float> %1
}
define <8 x float> @demandedelts_vpermilvar_8f32_movsldup(<8 x float> %a0, i32 %a1) {
-; X86-AVX1-LABEL: demandedelts_vpermilvar_8f32_movsldup:
-; X86-AVX1: # %bb.0:
-; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <u,0,2,2,4,4,6,6>
-; X86-AVX1-NEXT: vpinsrd $0, {{[0-9]+}}(%esp), %xmm1, %xmm1
-; X86-AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; X86-AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
-; X86-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7]
-; X86-AVX1-NEXT: retl
-;
-; X86-AVX2-LABEL: demandedelts_vpermilvar_8f32_movsldup:
-; X86-AVX2: # %bb.0:
-; X86-AVX2-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X86-AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],mem[1,2,3,4,5,6,7]
-; X86-AVX2-NEXT: vpermilps %ymm1, %ymm0, %ymm0
-; X86-AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7]
-; X86-AVX2-NEXT: retl
-;
-; X86-AVX512-LABEL: demandedelts_vpermilvar_8f32_movsldup:
-; X86-AVX512: # %bb.0:
-; X86-AVX512-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
-; X86-AVX512-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],mem[1,2,3,4,5,6,7]
-; X86-AVX512-NEXT: vpermilps %ymm1, %ymm0, %ymm0
-; X86-AVX512-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7]
-; X86-AVX512-NEXT: retl
-;
-; X64-AVX1-LABEL: demandedelts_vpermilvar_8f32_movsldup:
-; X64-AVX1: # %bb.0:
-; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <u,0,2,2,4,4,6,6>
-; X64-AVX1-NEXT: vpinsrd $0, %edi, %xmm1, %xmm1
-; X64-AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
-; X64-AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
-; X64-AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7]
-; X64-AVX1-NEXT: retq
+; AVX1-LABEL: demandedelts_vpermilvar_8f32_movsldup:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps {{.*#+}} xmm1 = <u,0,2,2,4,4,6,6>
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],mem[4,5,6,7]
+; AVX1-NEXT: vpermilps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7]
+; AVX1-NEXT: ret{{[l|q]}}
;
-; X64-AVX2-LABEL: demandedelts_vpermilvar_8f32_movsldup:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vmovd %edi, %xmm1
-; X64-AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2,3,4,5,6,7]
-; X64-AVX2-NEXT: vpermilps %ymm1, %ymm0, %ymm0
-; X64-AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7]
-; X64-AVX2-NEXT: retq
+; AVX2-LABEL: demandedelts_vpermilvar_8f32_movsldup:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
+; AVX2-NEXT: ret{{[l|q]}}
;
-; X64-AVX512-LABEL: demandedelts_vpermilvar_8f32_movsldup:
-; X64-AVX512: # %bb.0:
-; X64-AVX512-NEXT: vmovd %edi, %xmm1
-; X64-AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],mem[1,2,3,4,5,6,7]
-; X64-AVX512-NEXT: vpermilps %ymm1, %ymm0, %ymm0
-; X64-AVX512-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,1,2,3,4,5,6,7]
-; X64-AVX512-NEXT: retq
+; AVX512-LABEL: demandedelts_vpermilvar_8f32_movsldup:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6]
+; AVX512-NEXT: ret{{[l|q]}}
%1 = insertelement <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>, i32 %a1, i32 0
%2 = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %a0, <8 x i32> %1)
%3 = shufflevector <8 x float> %2, <8 x float> undef, <8 x i32> <i32 1, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
OpenPOWER on IntegriCloud