summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/ARM/umulo-32.ll21
-rw-r--r--llvm/test/CodeGen/X86/mulo-pow2.ll62
2 files changed, 21 insertions, 62 deletions
diff --git a/llvm/test/CodeGen/ARM/umulo-32.ll b/llvm/test/CodeGen/ARM/umulo-32.ll
index 9b099c2a2b9..608788130a3 100644
--- a/llvm/test/CodeGen/ARM/umulo-32.ll
+++ b/llvm/test/CodeGen/ARM/umulo-32.ll
@@ -31,22 +31,23 @@ define i32 @test2(i32* %m_degree) ssp {
; CHECK-LABEL: test2:
; CHECK: @ %bb.0:
; CHECK-NEXT: push {r4, lr}
+; CHECK-NEXT: movs r1, #7
+; CHECK-NEXT: lsls r1, r1, #29
; CHECK-NEXT: ldr r0, [r0]
-; CHECK-NEXT: movs r2, #8
+; CHECK-NEXT: mov r2, r0
+; CHECK-NEXT: bics r2, r1
+; CHECK-NEXT: subs r1, r0, r2
+; CHECK-NEXT: subs r2, r1, #1
+; CHECK-NEXT: sbcs r1, r2
; CHECK-NEXT: movs r4, #0
-; CHECK-NEXT: mov r1, r4
-; CHECK-NEXT: mov r3, r4
-; CHECK-NEXT: bl __muldi3
; CHECK-NEXT: cmp r1, #0
-; CHECK-NEXT: beq .LBB1_2
+; CHECK-NEXT: bne .LBB1_2
; CHECK-NEXT: @ %bb.1:
-; CHECK-NEXT: movs r1, #1
+; CHECK-NEXT: lsls r0, r0, #3
+; CHECK-NEXT: b .LBB1_3
; CHECK-NEXT: .LBB1_2:
-; CHECK-NEXT: cmp r1, #0
-; CHECK-NEXT: beq .LBB1_4
-; CHECK-NEXT: @ %bb.3:
; CHECK-NEXT: mvns r0, r4
-; CHECK-NEXT: .LBB1_4:
+; CHECK-NEXT: .LBB1_3:
; CHECK-NEXT: bl _Znam
; CHECK-NEXT: mov r0, r4
; CHECK-NEXT: pop {r4, pc}
diff --git a/llvm/test/CodeGen/X86/mulo-pow2.ll b/llvm/test/CodeGen/X86/mulo-pow2.ll
index 691e81cefe7..9a7a5cf85ea 100644
--- a/llvm/test/CodeGen/X86/mulo-pow2.ll
+++ b/llvm/test/CodeGen/X86/mulo-pow2.ll
@@ -19,15 +19,6 @@ define <4 x i32> @umul_v4i32_0(<4 x i32> %a, <4 x i32> %b) nounwind {
define <4 x i32> @umul_v4i32_1(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX-LABEL: umul_v4i32_1:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1]
-; AVX-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%x = call { <4 x i32>, <4 x i1> } @llvm.umul.with.overflow.v4i32(<4 x i32> %a, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
%y = extractvalue { <4 x i32>, <4 x i1> } %x, 0
@@ -54,14 +45,8 @@ define <4 x i32> @umul_v4i32_2(<4 x i32> %a, <4 x i32> %b) nounwind {
define <4 x i32> @umul_v4i32_8(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX-LABEL: umul_v4i32_8:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8]
-; AVX-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm2
+; AVX-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm2
; AVX-NEXT: vpslld $3, %xmm0, %xmm0
; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
@@ -75,14 +60,8 @@ define <4 x i32> @umul_v4i32_8(<4 x i32> %a, <4 x i32> %b) nounwind {
define <4 x i32> @umul_v4i32_2pow31(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX-LABEL: umul_v4i32_2pow31:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
-; AVX-NEXT: vpmuludq %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpmuludq %xmm3, %xmm0, %xmm3
-; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm2
+; AVX-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm2
; AVX-NEXT: vpslld $31, %xmm0, %xmm0
; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
@@ -108,15 +87,6 @@ define <4 x i32> @smul_v4i32_0(<4 x i32> %a, <4 x i32> %b) nounwind {
define <4 x i32> @smul_v4i32_1(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX-LABEL: smul_v4i32_1:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1]
-; AVX-NEXT: vpmuldq %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpmuldq %xmm3, %xmm0, %xmm3
-; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; AVX-NEXT: vpsrad $31, %xmm0, %xmm3
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%x = call { <4 x i32>, <4 x i1> } @llvm.smul.with.overflow.v4i32(<4 x i32> %a, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
%y = extractvalue { <4 x i32>, <4 x i1> } %x, 0
@@ -148,16 +118,10 @@ define <4 x i32> @smul_v4i32_2(<4 x i32> %a, <4 x i32> %b) nounwind {
define <4 x i32> @smul_v4i32_8(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX-LABEL: smul_v4i32_8:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [8,8,8,8]
-; AVX-NEXT: vpmuldq %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpmuldq %xmm3, %xmm0, %xmm3
-; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
-; AVX-NEXT: vpslld $3, %xmm0, %xmm0
-; AVX-NEXT: vpsrad $31, %xmm0, %xmm3
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpslld $3, %xmm0, %xmm2
+; AVX-NEXT: vpsrad $3, %xmm2, %xmm3
+; AVX-NEXT: vpcmpeqd %xmm0, %xmm3, %xmm0
+; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0
; AVX-NEXT: retq
%x = call { <4 x i32>, <4 x i1> } @llvm.smul.with.overflow.v4i32(<4 x i32> %a, <4 x i32> <i32 8, i32 8, i32 8, i32 8>)
%y = extractvalue { <4 x i32>, <4 x i1> } %x, 0
@@ -169,15 +133,9 @@ define <4 x i32> @smul_v4i32_8(<4 x i32> %a, <4 x i32> %b) nounwind {
define <4 x i32> @smul_v4i32_2pow31(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX-LABEL: smul_v4i32_2pow31:
; AVX: # %bb.0:
-; AVX-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
-; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [2147483648,2147483648,2147483648,2147483648]
-; AVX-NEXT: vpmuldq %xmm3, %xmm2, %xmm2
-; AVX-NEXT: vpmuldq %xmm3, %xmm0, %xmm3
-; AVX-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
-; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3],xmm3[4,5],xmm2[6,7]
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm2
+; AVX-NEXT: vpcmpeqd %xmm0, %xmm2, %xmm2
; AVX-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX-NEXT: vpsrad $31, %xmm0, %xmm3
-; AVX-NEXT: vpcmpeqd %xmm3, %xmm2, %xmm2
; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%x = call { <4 x i32>, <4 x i1> } @llvm.smul.with.overflow.v4i32(<4 x i32> %a, <4 x i32> <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>)
OpenPOWER on IntegriCloud