summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2015-10-03 20:52:55 +0000
committerSanjay Patel <spatel@rotateright.com>2015-10-03 20:52:55 +0000
commit004ea240adaca661b27b75eeceeed38f29ebe34a (patch)
treedd273a2b4e52ab3601789f541e4b37ec12f6cd67
parentacd4baefcaa8a0020814be41414f3fef3803aad2 (diff)
downloadbcm5719-llvm-004ea240adaca661b27b75eeceeed38f29ebe34a.tar.gz
bcm5719-llvm-004ea240adaca661b27b75eeceeed38f29ebe34a.zip
add test cases that demonstrate bad behavior
These are based on PR25016 and likely caused by a bug in MachineCombiner's definition of improvesCriticalPathLen(). llvm-svn: 249249
-rw-r--r--llvm/test/CodeGen/X86/machine-combiner.ll52
1 files changed, 52 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/machine-combiner.ll b/llvm/test/CodeGen/X86/machine-combiner.ll
index b4340b34cc0..efcf51bba92 100644
--- a/llvm/test/CodeGen/X86/machine-combiner.ll
+++ b/llvm/test/CodeGen/X86/machine-combiner.ll
@@ -618,3 +618,55 @@ define <4 x double> @reassociate_maxs_v4f64(<4 x double> %x0, <4 x double> %x1,
ret <4 x double> %sel2
}
+; PR25016: https://llvm.org/bugs/show_bug.cgi?id=25016
+; Verify that reassociation is not happening needlessly or wrongly.
+
+declare double @bar()
+
+define double @reassociate_adds_from_calls() {
+; AVX-LABEL: reassociate_adds_from_calls:
+; AVX: callq bar
+; AVX-NEXT: vmovsd %xmm0, 16(%rsp)
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, 8(%rsp)
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, (%rsp)
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd (%rsp), %xmm1
+; AVX: vaddsd 8(%rsp), %xmm1, %xmm1
+; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vaddsd 16(%rsp), %xmm0, %xmm0
+
+ %x0 = call double @bar()
+ %x1 = call double @bar()
+ %x2 = call double @bar()
+ %x3 = call double @bar()
+ %t0 = fadd double %x0, %x1
+ %t1 = fadd double %t0, %x2
+ %t2 = fadd double %t1, %x3
+ ret double %t2
+}
+
+define double @already_reassociated() {
+; AVX-LABEL: already_reassociated:
+; AVX: callq bar
+; AVX-NEXT: vmovsd %xmm0, 16(%rsp)
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, 8(%rsp)
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, (%rsp)
+; AVX-NEXT: callq bar
+; AVX-NEXT: vaddsd (%rsp), %xmm0, %xmm0
+; AVX-NEXT: vaddsd 8(%rsp), %xmm0, %xmm0
+; AVX-NEXT: vaddsd 16(%rsp), %xmm0, %xmm0
+
+ %x0 = call double @bar()
+ %x1 = call double @bar()
+ %x2 = call double @bar()
+ %x3 = call double @bar()
+ %t0 = fadd double %x0, %x1
+ %t1 = fadd double %x2, %x3
+ %t2 = fadd double %t0, %t1
+ ret double %t2
+}
+
OpenPOWER on IntegriCloud