summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2016-03-26 18:32:13 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2016-03-26 18:32:13 +0000
commitdcdf85033c3d4ab854c51b4b42d6ab6ab0fac59a (patch)
treee217a1065f749e394b73292eb0732a6c30d68e5a /llvm
parenta874d1a40d2410f3e0d5f28a47c1217c02770cb0 (diff)
downloadbcm5719-llvm-dcdf85033c3d4ab854c51b4b42d6ab6ab0fac59a.tar.gz
bcm5719-llvm-dcdf85033c3d4ab854c51b4b42d6ab6ab0fac59a.zip
[X86][AVX] Enabled SMUL_LOHI/UMUL_LOHI v8i32 vectors on AVX1 targets
Correct splitting of v8i32 vectors into v4i32 vectors to prevent scalarization llvm-svn: 264517
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp21
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll241
-rw-r--r--llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll217
3 files changed, 127 insertions, 352 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 7f1500a2c4e..a06faba204f 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1253,6 +1253,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::MUL, MVT::v16i16, Custom);
setOperationAction(ISD::MUL, MVT::v32i8, Custom);
+ setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
+ setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
+
setOperationAction(ISD::MULHU, MVT::v16i16, Custom);
setOperationAction(ISD::MULHS, MVT::v16i16, Custom);
setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
@@ -19219,6 +19222,24 @@ static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget &Subtarget,
MVT VT = Op0.getSimpleValueType();
SDLoc dl(Op);
+ // Decompose 256-bit ops into smaller 128-bit ops.
+ if (VT.is256BitVector() && !Subtarget.hasInt256()) {
+ unsigned Opcode = Op.getOpcode();
+ unsigned NumElems = VT.getVectorNumElements();
+ MVT HalfVT = MVT::getVectorVT(VT.getScalarType(), NumElems / 2);
+ SDValue Lo0 = extract128BitVector(Op0, 0, DAG, dl);
+ SDValue Lo1 = extract128BitVector(Op1, 0, DAG, dl);
+ SDValue Hi0 = extract128BitVector(Op0, NumElems / 2, DAG, dl);
+ SDValue Hi1 = extract128BitVector(Op1, NumElems / 2, DAG, dl);
+ SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Lo0, Lo1);
+ SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(HalfVT, HalfVT), Hi0, Hi1);
+ SDValue Ops[] = {
+ DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(0), Hi.getValue(0)),
+ DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo.getValue(1), Hi.getValue(1))
+ };
+ return DAG.getMergeValues(Ops, dl);
+ }
+
assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
(VT == MVT::v8i32 && Subtarget.hasInt256()));
diff --git a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll
index 3f81d2e7b8c..cfd2fc625a6 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-sdiv-256.ll
@@ -87,88 +87,30 @@ define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_div7_8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpextrd $1, %xmm1, %eax
-; AVX1-NEXT: cltq
-; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $31, %ecx
-; AVX1-NEXT: sarl $2, %eax
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: vmovd %xmm1, %ecx
-; AVX1-NEXT: movslq %ecx, %rcx
-; AVX1-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rdx
-; AVX1-NEXT: addl %edx, %ecx
-; AVX1-NEXT: movl %ecx, %edx
-; AVX1-NEXT: shrl $31, %edx
-; AVX1-NEXT: sarl $2, %ecx
-; AVX1-NEXT: addl %edx, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm1, %eax
-; AVX1-NEXT: cltq
-; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $31, %ecx
-; AVX1-NEXT: sarl $2, %eax
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm1, %eax
-; AVX1-NEXT: cltq
-; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $31, %ecx
-; AVX1-NEXT: sarl $2, %eax
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX1-NEXT: vpextrd $1, %xmm0, %eax
-; AVX1-NEXT: cltq
-; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $31, %ecx
-; AVX1-NEXT: sarl $2, %eax
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: vmovd %xmm0, %ecx
-; AVX1-NEXT: movslq %ecx, %rcx
-; AVX1-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rdx
-; AVX1-NEXT: addl %edx, %ecx
-; AVX1-NEXT: movl %ecx, %edx
-; AVX1-NEXT: shrl $31, %edx
-; AVX1-NEXT: sarl $2, %ecx
-; AVX1-NEXT: addl %edx, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm0, %eax
-; AVX1-NEXT: cltq
-; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $31, %ecx
-; AVX1-NEXT: sarl $2, %eax
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm0, %eax
-; AVX1-NEXT: cltq
-; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: movl %eax, %ecx
-; AVX1-NEXT: shrl $31, %ecx
-; AVX1-NEXT: sarl $2, %eax
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX1-NEXT: vpmuldq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpmuldq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $31, %xmm2, %xmm3
+; AVX1-NEXT: vpsrad $2, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuldq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vpsrld $31, %xmm0, %xmm1
+; AVX1-NEXT: vpsrad $2, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_8i32:
@@ -406,112 +348,35 @@ define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_rem7_8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpextrd $1, %xmm1, %eax
-; AVX1-NEXT: cltq
-; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: addl %eax, %ecx
-; AVX1-NEXT: movl %ecx, %edx
-; AVX1-NEXT: shrl $31, %edx
-; AVX1-NEXT: sarl $2, %ecx
-; AVX1-NEXT: addl %edx, %ecx
-; AVX1-NEXT: leal (,%rcx,8), %edx
-; AVX1-NEXT: subl %ecx, %edx
-; AVX1-NEXT: subl %edx, %eax
-; AVX1-NEXT: vmovd %xmm1, %ecx
-; AVX1-NEXT: movslq %ecx, %rcx
-; AVX1-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rdx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: movl %edx, %esi
-; AVX1-NEXT: shrl $31, %esi
-; AVX1-NEXT: sarl $2, %edx
-; AVX1-NEXT: addl %esi, %edx
-; AVX1-NEXT: leal (,%rdx,8), %esi
-; AVX1-NEXT: subl %edx, %esi
-; AVX1-NEXT: subl %esi, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm1, %eax
-; AVX1-NEXT: cltq
-; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: addl %eax, %ecx
-; AVX1-NEXT: movl %ecx, %edx
-; AVX1-NEXT: shrl $31, %edx
-; AVX1-NEXT: sarl $2, %ecx
-; AVX1-NEXT: addl %edx, %ecx
-; AVX1-NEXT: leal (,%rcx,8), %edx
-; AVX1-NEXT: subl %ecx, %edx
-; AVX1-NEXT: subl %edx, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm1, %eax
-; AVX1-NEXT: cltq
-; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: addl %eax, %ecx
-; AVX1-NEXT: movl %ecx, %edx
-; AVX1-NEXT: shrl $31, %edx
-; AVX1-NEXT: sarl $2, %ecx
-; AVX1-NEXT: addl %edx, %ecx
-; AVX1-NEXT: leal (,%rcx,8), %edx
-; AVX1-NEXT: subl %ecx, %edx
-; AVX1-NEXT: subl %edx, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX1-NEXT: vpextrd $1, %xmm0, %eax
-; AVX1-NEXT: cltq
-; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: addl %eax, %ecx
-; AVX1-NEXT: movl %ecx, %edx
-; AVX1-NEXT: shrl $31, %edx
-; AVX1-NEXT: sarl $2, %ecx
-; AVX1-NEXT: addl %edx, %ecx
-; AVX1-NEXT: leal (,%rcx,8), %edx
-; AVX1-NEXT: subl %ecx, %edx
-; AVX1-NEXT: subl %edx, %eax
-; AVX1-NEXT: vmovd %xmm0, %ecx
-; AVX1-NEXT: movslq %ecx, %rcx
-; AVX1-NEXT: imulq $-1840700269, %rcx, %rdx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rdx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: movl %edx, %esi
-; AVX1-NEXT: shrl $31, %esi
-; AVX1-NEXT: sarl $2, %edx
-; AVX1-NEXT: addl %esi, %edx
-; AVX1-NEXT: leal (,%rdx,8), %esi
-; AVX1-NEXT: subl %edx, %esi
-; AVX1-NEXT: subl %esi, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm0, %eax
-; AVX1-NEXT: cltq
-; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: addl %eax, %ecx
-; AVX1-NEXT: movl %ecx, %edx
-; AVX1-NEXT: shrl $31, %edx
-; AVX1-NEXT: sarl $2, %ecx
-; AVX1-NEXT: addl %edx, %ecx
-; AVX1-NEXT: leal (,%rcx,8), %edx
-; AVX1-NEXT: subl %ecx, %edx
-; AVX1-NEXT: subl %edx, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm0, %eax
-; AVX1-NEXT: cltq
-; AVX1-NEXT: imulq $-1840700269, %rax, %rcx # imm = 0xFFFFFFFF92492493
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: addl %eax, %ecx
-; AVX1-NEXT: movl %ecx, %edx
-; AVX1-NEXT: shrl $31, %edx
-; AVX1-NEXT: sarl $2, %ecx
-; AVX1-NEXT: addl %edx, %ecx
-; AVX1-NEXT: leal (,%rcx,8), %edx
-; AVX1-NEXT: subl %ecx, %edx
-; AVX1-NEXT: subl %edx, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027,2454267027]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX1-NEXT: vpmuldq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpmuldq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpsrld $31, %xmm2, %xmm3
+; AVX1-NEXT: vpsrad $2, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [7,7,7,7]
+; AVX1-NEXT: vpmulld %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuldq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpmuldq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
+; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm1
+; AVX1-NEXT: vpsrld $31, %xmm1, %xmm4
+; AVX1-NEXT: vpsrad $2, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_8i32:
diff --git a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll
index 992be948127..a1d356a0e76 100644
--- a/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll
+++ b/llvm/test/CodeGen/X86/vector-idiv-udiv-256.ll
@@ -95,72 +95,30 @@ define <4 x i64> @test_div7_4i64(<4 x i64> %a) nounwind {
define <8 x i32> @test_div7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_div7_8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpextrd $1, %xmm1, %eax
-; AVX1-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: subl %ecx, %eax
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: shrl $2, %eax
-; AVX1-NEXT: vmovd %xmm1, %ecx
-; AVX1-NEXT: imulq $613566757, %rcx, %rdx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rdx
-; AVX1-NEXT: subl %edx, %ecx
-; AVX1-NEXT: shrl %ecx
-; AVX1-NEXT: addl %edx, %ecx
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm1, %eax
-; AVX1-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: subl %ecx, %eax
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: shrl $2, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm1, %eax
-; AVX1-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: subl %ecx, %eax
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: shrl $2, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX1-NEXT: vpextrd $1, %xmm0, %eax
-; AVX1-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: subl %ecx, %eax
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: shrl $2, %eax
-; AVX1-NEXT: vmovd %xmm0, %ecx
-; AVX1-NEXT: imulq $613566757, %rcx, %rdx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rdx
-; AVX1-NEXT: subl %edx, %ecx
-; AVX1-NEXT: shrl %ecx
-; AVX1-NEXT: addl %edx, %ecx
-; AVX1-NEXT: shrl $2, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm0, %eax
-; AVX1-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: subl %ecx, %eax
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: shrl $2, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm0, %eax
-; AVX1-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: subl %ecx, %eax
-; AVX1-NEXT: shrl %eax
-; AVX1-NEXT: addl %ecx, %eax
-; AVX1-NEXT: shrl $2, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovdqa {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; AVX1-NEXT: vpsubd %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsrld $2, %xmm2, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[1,1,3,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm3, %xmm4, %xmm3
+; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $1, %xmm0, %xmm0
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $2, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_div7_8i32:
@@ -401,104 +359,35 @@ define <4 x i64> @test_rem7_4i64(<4 x i64> %a) nounwind {
define <8 x i32> @test_rem7_8i32(<8 x i32> %a) nounwind {
; AVX1-LABEL: test_rem7_8i32:
; AVX1: # BB#0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpextrd $1, %xmm1, %eax
-; AVX1-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: subl %ecx, %edx
-; AVX1-NEXT: shrl %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: shrl $2, %edx
-; AVX1-NEXT: leal (,%rdx,8), %ecx
-; AVX1-NEXT: subl %edx, %ecx
-; AVX1-NEXT: subl %ecx, %eax
-; AVX1-NEXT: vmovd %xmm1, %ecx
-; AVX1-NEXT: imulq $613566757, %rcx, %rdx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rdx
-; AVX1-NEXT: movl %ecx, %esi
-; AVX1-NEXT: subl %edx, %esi
-; AVX1-NEXT: shrl %esi
-; AVX1-NEXT: addl %edx, %esi
-; AVX1-NEXT: shrl $2, %esi
-; AVX1-NEXT: leal (,%rsi,8), %edx
-; AVX1-NEXT: subl %esi, %edx
-; AVX1-NEXT: subl %edx, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm1, %eax
-; AVX1-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: subl %ecx, %edx
-; AVX1-NEXT: shrl %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: shrl $2, %edx
-; AVX1-NEXT: leal (,%rdx,8), %ecx
-; AVX1-NEXT: subl %edx, %ecx
-; AVX1-NEXT: subl %ecx, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm1, %eax
-; AVX1-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: subl %ecx, %edx
-; AVX1-NEXT: shrl %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: shrl $2, %edx
-; AVX1-NEXT: leal (,%rdx,8), %ecx
-; AVX1-NEXT: subl %edx, %ecx
-; AVX1-NEXT: subl %ecx, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1
-; AVX1-NEXT: vpextrd $1, %xmm0, %eax
-; AVX1-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: subl %ecx, %edx
-; AVX1-NEXT: shrl %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: shrl $2, %edx
-; AVX1-NEXT: leal (,%rdx,8), %ecx
-; AVX1-NEXT: subl %edx, %ecx
-; AVX1-NEXT: subl %ecx, %eax
-; AVX1-NEXT: vmovd %xmm0, %ecx
-; AVX1-NEXT: imulq $613566757, %rcx, %rdx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rdx
-; AVX1-NEXT: movl %ecx, %esi
-; AVX1-NEXT: subl %edx, %esi
-; AVX1-NEXT: shrl %esi
-; AVX1-NEXT: addl %edx, %esi
-; AVX1-NEXT: shrl $2, %esi
-; AVX1-NEXT: leal (,%rsi,8), %edx
-; AVX1-NEXT: subl %esi, %edx
-; AVX1-NEXT: subl %edx, %ecx
-; AVX1-NEXT: vmovd %ecx, %xmm2
-; AVX1-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $2, %xmm0, %eax
-; AVX1-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: subl %ecx, %edx
-; AVX1-NEXT: shrl %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: shrl $2, %edx
-; AVX1-NEXT: leal (,%rdx,8), %ecx
-; AVX1-NEXT: subl %edx, %ecx
-; AVX1-NEXT: subl %ecx, %eax
-; AVX1-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX1-NEXT: vpextrd $3, %xmm0, %eax
-; AVX1-NEXT: imulq $613566757, %rax, %rcx # imm = 0x24924925
-; AVX1-NEXT: shrq $32, %rcx
-; AVX1-NEXT: movl %eax, %edx
-; AVX1-NEXT: subl %ecx, %edx
-; AVX1-NEXT: shrl %edx
-; AVX1-NEXT: addl %ecx, %edx
-; AVX1-NEXT: shrl $2, %edx
-; AVX1-NEXT: leal (,%rdx,8), %ecx
-; AVX1-NEXT: subl %edx, %ecx
-; AVX1-NEXT: subl %ecx, %eax
-; AVX1-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [613566757,613566757,613566757,613566757,613566757,613566757,613566757,613566757]
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[1,1,3,3]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm4[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm3, %xmm5, %xmm3
+; AVX1-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5],xmm3[6,7]
+; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm3
+; AVX1-NEXT: vpsrld $1, %xmm3, %xmm3
+; AVX1-NEXT: vpaddd %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpsrld $2, %xmm2, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [7,7,7,7]
+; AVX1-NEXT: vpmulld %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpsubd %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
+; AVX1-NEXT: vpmuludq %xmm4, %xmm5, %xmm4
+; AVX1-NEXT: vpmuludq %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm4
+; AVX1-NEXT: vpsrld $1, %xmm4, %xmm4
+; AVX1-NEXT: vpaddd %xmm1, %xmm4, %xmm1
+; AVX1-NEXT: vpsrld $2, %xmm1, %xmm1
+; AVX1-NEXT: vpmulld %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: test_rem7_8i32:
OpenPOWER on IntegriCloud