summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/machine-combiner.ll
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2015-08-21 18:06:49 +0000
committerSanjay Patel <spatel@rotateright.com>2015-08-21 18:06:49 +0000
commitcf942fa905d5ba47e90b91c248aacd9def24feb7 (patch)
treeb8465a00f1072b6be1235fc9ccec0dc7fc15ebcd /llvm/test/CodeGen/X86/machine-combiner.ll
parent2ac796d6ccaf2428a1a2f8a6b98a33e6635980d8 (diff)
downloadbcm5719-llvm-cf942fa905d5ba47e90b91c248aacd9def24feb7.tar.gz
bcm5719-llvm-cf942fa905d5ba47e90b91c248aacd9def24feb7.zip
[x86] enable machine combiner reassociations for 128-bit vector min/max
llvm-svn: 245715
Diffstat (limited to 'llvm/test/CodeGen/X86/machine-combiner.ll')
-rw-r--r--llvm/test/CodeGen/X86/machine-combiner.ll96
1 files changed, 96 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/machine-combiner.ll b/llvm/test/CodeGen/X86/machine-combiner.ll
index e9300b4c6e9..371747479e5 100644
--- a/llvm/test/CodeGen/X86/machine-combiner.ll
+++ b/llvm/test/CodeGen/X86/machine-combiner.ll
@@ -454,3 +454,99 @@ define double @reassociate_maxs_double(double %x0, double %x1, double %x2, doubl
ret double %sel2
}
+; Verify that SSE and AVX 128-bit vector single-precision minimum ops are reassociated.
+
+define <4 x float> @reassociate_mins_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
+; SSE-LABEL: reassociate_mins_v4f32:
+; SSE: # BB#0:
+; SSE-NEXT: addps %xmm1, %xmm0
+; SSE-NEXT: minps %xmm3, %xmm2
+; SSE-NEXT: minps %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: reassociate_mins_v4f32:
+; AVX: # BB#0:
+; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vminps %xmm3, %xmm2, %xmm1
+; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %t0 = fadd <4 x float> %x0, %x1
+ %cmp1 = fcmp olt <4 x float> %x2, %t0
+ %sel1 = select <4 x i1> %cmp1, <4 x float> %x2, <4 x float> %t0
+ %cmp2 = fcmp olt <4 x float> %x3, %sel1
+ %sel2 = select <4 x i1> %cmp2, <4 x float> %x3, <4 x float> %sel1
+ ret <4 x float> %sel2
+}
+
+; Verify that SSE and AVX 128-bit vector single-precision maximum ops are reassociated.
+
+define <4 x float> @reassociate_maxs_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) {
+; SSE-LABEL: reassociate_maxs_v4f32:
+; SSE: # BB#0:
+; SSE-NEXT: addps %xmm1, %xmm0
+; SSE-NEXT: maxps %xmm3, %xmm2
+; SSE-NEXT: maxps %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: reassociate_maxs_v4f32:
+; AVX: # BB#0:
+; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmaxps %xmm3, %xmm2, %xmm1
+; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %t0 = fadd <4 x float> %x0, %x1
+ %cmp1 = fcmp ogt <4 x float> %x2, %t0
+ %sel1 = select <4 x i1> %cmp1, <4 x float> %x2, <4 x float> %t0
+ %cmp2 = fcmp ogt <4 x float> %x3, %sel1
+ %sel2 = select <4 x i1> %cmp2, <4 x float> %x3, <4 x float> %sel1
+ ret <4 x float> %sel2
+}
+
+; Verify that SSE and AVX 128-bit vector double-precision minimum ops are reassociated.
+
+define <2 x double> @reassociate_mins_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
+; SSE-LABEL: reassociate_mins_v2f64:
+; SSE: # BB#0:
+; SSE-NEXT: addpd %xmm1, %xmm0
+; SSE-NEXT: minpd %xmm3, %xmm2
+; SSE-NEXT: minpd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: reassociate_mins_v2f64:
+; AVX: # BB#0:
+; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vminpd %xmm3, %xmm2, %xmm1
+; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %t0 = fadd <2 x double> %x0, %x1
+ %cmp1 = fcmp olt <2 x double> %x2, %t0
+ %sel1 = select <2 x i1> %cmp1, <2 x double> %x2, <2 x double> %t0
+ %cmp2 = fcmp olt <2 x double> %x3, %sel1
+ %sel2 = select <2 x i1> %cmp2, <2 x double> %x3, <2 x double> %sel1
+ ret <2 x double> %sel2
+}
+
+; Verify that SSE and AVX 128-bit vector double-precision maximum ops are reassociated.
+
+define <2 x double> @reassociate_maxs_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) {
+; SSE-LABEL: reassociate_maxs_v2f64:
+; SSE: # BB#0:
+; SSE-NEXT: addpd %xmm1, %xmm0
+; SSE-NEXT: maxpd %xmm3, %xmm2
+; SSE-NEXT: maxpd %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: reassociate_maxs_v2f64:
+; AVX: # BB#0:
+; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vmaxpd %xmm3, %xmm2, %xmm1
+; AVX-NEXT: vmaxpd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %t0 = fadd <2 x double> %x0, %x1
+ %cmp1 = fcmp ogt <2 x double> %x2, %t0
+ %sel1 = select <2 x i1> %cmp1, <2 x double> %x2, <2 x double> %t0
+ %cmp2 = fcmp ogt <2 x double> %x3, %sel1
+ %sel2 = select <2 x i1> %cmp2, <2 x double> %x3, <2 x double> %sel1
+ ret <2 x double> %sel2
+}
+
OpenPOWER on IntegriCloud