diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-04-11 15:29:15 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-04-11 15:29:15 +0000 |
commit | 40b647ae8e5f68caa49a56883449e63975c206a6 (patch) | |
tree | 6ab835e17f27d1b376e66dc4245846e2eb273931 | |
parent | 3742bb89f83f2fe5f2392da16f3638566622418a (diff) | |
download | bcm5719-llvm-40b647ae8e5f68caa49a56883449e63975c206a6.tar.gz bcm5719-llvm-40b647ae8e5f68caa49a56883449e63975c206a6.zip |
[X86] SimplifyDemandedVectorElts - add X86ISD::VPERMV3 mask support
Completes SimplifyDemandedVectorElts's basic variable shuffle mask support which should help D60512 + D60562
llvm-svn: 358186
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll | 6 |
2 files changed, 3 insertions, 5 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 60085493913..2258e0ce24e 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -33234,8 +33234,8 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode( break; } case X86ISD::PSHUFB: + case X86ISD::VPERMV3: case X86ISD::VPERMILPV: { - // TODO - simplify other variable shuffle masks. SDValue Mask = Op.getOperand(1); APInt MaskUndef, MaskZero; if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO, diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll index b180a517032..d796108aa5f 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll @@ -933,10 +933,8 @@ define <8 x double> @combine_vpermi2var_8f64_as_permpd(<8 x double> %x0, <8 x do ; ; X64-LABEL: combine_vpermi2var_8f64_as_permpd: ; X64: # %bb.0: -; X64-NEXT: vmovdqa {{.*#+}} xmm2 = <u,2,1,3,4,6,5,7> -; X64-NEXT: vpinsrq $0, %rdi, %xmm2, %xmm2 -; X64-NEXT: vmovdqa64 {{.*#+}} zmm3 = <u,2,1,3,4,6,5,7> -; X64-NEXT: vinserti32x4 $0, %xmm2, %zmm3, %zmm2 +; X64-NEXT: vmovapd {{.*#+}} zmm2 = <u,2,1,3,4,6,5,7> +; X64-NEXT: vinsertf32x4 $0, {{.*}}(%rip), %zmm2, %zmm2 ; X64-NEXT: vpermi2pd %zmm1, %zmm0, %zmm2 ; X64-NEXT: vpermpd {{.*#+}} zmm0 = zmm2[2,3,1,1,6,7,5,5] ; X64-NEXT: retq |