diff options
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 15 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/avx512-calling-conv.ll | 29 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/v8i1-masks.ll | 104 |
3 files changed, 117 insertions, 31 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 5ac5d0348f8..22bdace3453 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -33047,10 +33047,8 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG, // The right side has to be a 'trunc' or a constant vector. bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE && N1.getOperand(0).getValueType() == VT; - ConstantSDNode *RHSConstSplat = nullptr; - if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1)) - RHSConstSplat = RHSBV->getConstantSplatNode(); - if (!RHSTrunc && !RHSConstSplat) + if (!RHSTrunc && + !ISD::isBuildVectorOfConstantSDNodes(N1.getNode())) return SDValue(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); @@ -33060,13 +33058,10 @@ static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG, // Set N0 and N1 to hold the inputs to the new wide operation. N0 = N0->getOperand(0); - if (RHSConstSplat) { - N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT.getVectorElementType(), - SDValue(RHSConstSplat, 0)); - N1 = DAG.getSplatBuildVector(VT, DL, N1); - } else if (RHSTrunc) { + if (RHSTrunc) N1 = N1->getOperand(0); - } + else + N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1); // Generate the wide operation. SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, VT, N0, N1); diff --git a/llvm/test/CodeGen/X86/avx512-calling-conv.ll b/llvm/test/CodeGen/X86/avx512-calling-conv.ll index 248462d0de5..821c65bef06 100644 --- a/llvm/test/CodeGen/X86/avx512-calling-conv.ll +++ b/llvm/test/CodeGen/X86/avx512-calling-conv.ll @@ -228,14 +228,9 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) { ; KNL-NEXT: vpmovdw %zmm0, %ymm0 ; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 ; KNL-NEXT: callq _func8xi1 -; KNL-NEXT: vpmovsxwq %xmm0, %zmm0 -; KNL-NEXT: vpsllq $63, %zmm0, %zmm0 -; KNL-NEXT: movb $85, %al -; KNL-NEXT: kmovw %eax, %k1 -; KNL-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1} -; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; KNL-NEXT: vpmovdw %zmm0, %ymm0 -; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; KNL-NEXT: vpsllw $15, %xmm0, %xmm0 +; KNL-NEXT: vpsraw $15, %xmm0, %xmm0 ; KNL-NEXT: popq %rax ; KNL-NEXT: retq ; @@ -247,12 +242,9 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) { ; SKX-NEXT: vpmovm2w %k0, %xmm0 ; SKX-NEXT: vzeroupper ; SKX-NEXT: callq _func8xi1 +; SKX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 ; SKX-NEXT: vpsllw $15, %xmm0, %xmm0 -; SKX-NEXT: vpmovw2m %xmm0, %k0 -; SKX-NEXT: movb $85, %al -; SKX-NEXT: kmovd %eax, %k1 -; SKX-NEXT: kandb %k1, %k0, %k0 -; SKX-NEXT: vpmovm2w %k0, %xmm0 +; SKX-NEXT: vpsraw $15, %xmm0, %xmm0 ; SKX-NEXT: popq %rax ; SKX-NEXT: retq ; @@ -264,14 +256,9 @@ define <8 x i1> @test7a(<8 x i32>%a, <8 x i32>%b) { ; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0 ; KNL_X32-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 ; KNL_X32-NEXT: calll _func8xi1 -; KNL_X32-NEXT: vpmovsxwq %xmm0, %zmm0 -; KNL_X32-NEXT: vpsllq $63, %zmm0, %zmm0 -; KNL_X32-NEXT: movb $85, %al -; KNL_X32-NEXT: kmovw %eax, %k1 -; KNL_X32-NEXT: vptestmq %zmm0, %zmm0, %k1 {%k1} -; KNL_X32-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; KNL_X32-NEXT: vpmovdw %zmm0, %ymm0 -; KNL_X32-NEXT: ## kill: def %xmm0 killed %xmm0 killed %ymm0 +; KNL_X32-NEXT: vpand LCPI7_0, %xmm0, %xmm0 +; KNL_X32-NEXT: vpsllw $15, %xmm0, %xmm0 +; KNL_X32-NEXT: vpsraw $15, %xmm0, %xmm0 ; KNL_X32-NEXT: addl $12, %esp ; KNL_X32-NEXT: retl %cmpRes = icmp sgt <8 x i32>%a, %b diff --git a/llvm/test/CodeGen/X86/v8i1-masks.ll b/llvm/test/CodeGen/X86/v8i1-masks.ll index 5175850c734..e3755de3693 100644 --- a/llvm/test/CodeGen/X86/v8i1-masks.ll +++ b/llvm/test/CodeGen/X86/v8i1-masks.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X32 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X64 +; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=X32-AVX2 +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=X64-AVX2 define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp { ; X32-LABEL: and_masks: @@ -31,6 +33,37 @@ define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi ; X64-NEXT: vmovaps %ymm0, (%rax) ; X64-NEXT: vzeroupper ; X64-NEXT: retq +; +; X32-AVX2-LABEL: and_masks: +; X32-AVX2: ## %bb.0: +; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %edx +; X32-AVX2-NEXT: vmovups (%edx), %ymm0 +; X32-AVX2-NEXT: vmovups (%ecx), %ymm1 +; X32-AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm1 +; X32-AVX2-NEXT: vmovups (%eax), %ymm2 +; X32-AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0 +; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] +; X32-AVX2-NEXT: vandps %ymm2, %ymm1, %ymm1 +; X32-AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X32-AVX2-NEXT: vmovaps %ymm0, (%eax) +; X32-AVX2-NEXT: vzeroupper +; X32-AVX2-NEXT: retl +; +; X64-AVX2-LABEL: and_masks: +; X64-AVX2: ## %bb.0: +; X64-AVX2-NEXT: vmovups (%rdi), %ymm0 +; X64-AVX2-NEXT: vmovups (%rsi), %ymm1 +; X64-AVX2-NEXT: vcmpltps %ymm0, %ymm1, %ymm1 +; X64-AVX2-NEXT: vmovups (%rdx), %ymm2 +; X64-AVX2-NEXT: vcmpltps %ymm0, %ymm2, %ymm0 +; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] +; X64-AVX2-NEXT: vandps %ymm2, %ymm1, %ymm1 +; X64-AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vmovaps %ymm0, (%rax) +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq %v0 = load <8 x float>, <8 x float>* %a, align 16 %v1 = load <8 x float>, <8 x float>* %b, align 16 %m0 = fcmp olt <8 x float> %v1, %v0 @@ -62,6 +95,28 @@ define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi ; X64-NEXT: vmovaps %ymm0, (%rax) ; X64-NEXT: vzeroupper ; X64-NEXT: retq +; +; X32-AVX2-LABEL: neg_masks: +; X32-AVX2: ## %bb.0: +; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-AVX2-NEXT: vmovups (%ecx), %ymm0 +; X32-AVX2-NEXT: vcmpnltps (%eax), %ymm0, %ymm0 +; X32-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] +; X32-AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X32-AVX2-NEXT: vmovaps %ymm0, (%eax) +; X32-AVX2-NEXT: vzeroupper +; X32-AVX2-NEXT: retl +; +; X64-AVX2-LABEL: neg_masks: +; X64-AVX2: ## %bb.0: +; X64-AVX2-NEXT: vmovups (%rsi), %ymm0 +; X64-AVX2-NEXT: vcmpnltps (%rdi), %ymm0, %ymm0 +; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] +; X64-AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vmovaps %ymm0, (%rax) +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq %v0 = load <8 x float>, <8 x float>* %a, align 16 %v1 = load <8 x float>, <8 x float>* %b, align 16 %m0 = fcmp olt <8 x float> %v1, %v0 @@ -71,3 +126,52 @@ define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi ret void } +define <8 x i32> @and_mask_constant(<8 x i32> %v0, <8 x i32> %v1) { +; X32-LABEL: and_mask_constant: +; X32: ## %bb.0: +; X32-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; X32-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1 +; X32-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0 +; X32-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 +; X32-NEXT: vpand LCPI2_0, %xmm0, %xmm0 +; X32-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X32-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; X32-NEXT: retl +; +; X64-LABEL: and_mask_constant: +; X64: ## %bb.0: +; X64-NEXT: vextractf128 $1, %ymm0, %xmm1 +; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2 +; X64-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1 +; X64-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0 +; X64-NEXT: vpackssdw %xmm1, %xmm0, %xmm0 +; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; X64-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; X64-NEXT: retq +; +; X32-AVX2-LABEL: and_mask_constant: +; X32-AVX2: ## %bb.0: +; X32-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X32-AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 +; X32-AVX2-NEXT: vpand LCPI2_0, %ymm0, %ymm0 +; X32-AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15] +; X32-AVX2-NEXT: retl +; +; X64-AVX2-LABEL: and_mask_constant: +; X64-AVX2: ## %bb.0: +; X64-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; X64-AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 +; X64-AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 +; X64-AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15] +; X64-AVX2-NEXT: retq + %m = icmp eq <8 x i32> %v0, zeroinitializer + %mand = and <8 x i1> %m, <i1 true, i1 false, i1 false, i1 true, i1 false, i1 true, i1 true, i1 false> + %r = zext <8 x i1> %mand to <8 x i32> + ret <8 x i32> %r +} |

