summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2015-08-21 21:04:21 +0000
committerSanjay Patel <spatel@rotateright.com>2015-08-21 21:04:21 +0000
commitf0bc07f7a51d53e96ea0c0ef408dbfe987a3fb3d (patch)
treea8146c845e5ca5390426fe09887423f7ac9ab2ec
parentbb79b06f4e40d85d6c1b0ff2098e9aa5d6afce5d (diff)
downloadbcm5719-llvm-f0bc07f7a51d53e96ea0c0ef408dbfe987a3fb3d.tar.gz
bcm5719-llvm-f0bc07f7a51d53e96ea0c0ef408dbfe987a3fb3d.zip
[x86] enable machine combiner reassociations for 256-bit vector min/max
llvm-svn: 245735
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp4
-rw-r--r--llvm/test/CodeGen/X86/machine-combiner.ll68
2 files changed, 72 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 9d799d8e780..c3e862b09ed 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -6409,10 +6409,14 @@ static bool isAssociativeAndCommutative(const MachineInstr &Inst) {
case X86::MINCSSrr:
case X86::VMAXCPDrr:
case X86::VMAXCPSrr:
+ case X86::VMAXCPDYrr:
+ case X86::VMAXCPSYrr:
case X86::VMAXCSDrr:
case X86::VMAXCSSrr:
case X86::VMINCPDrr:
case X86::VMINCPSrr:
+ case X86::VMINCPDYrr:
+ case X86::VMINCPSYrr:
case X86::VMINCSDrr:
case X86::VMINCSSrr:
return true;
diff --git a/llvm/test/CodeGen/X86/machine-combiner.ll b/llvm/test/CodeGen/X86/machine-combiner.ll
index 371747479e5..b4340b34cc0 100644
--- a/llvm/test/CodeGen/X86/machine-combiner.ll
+++ b/llvm/test/CodeGen/X86/machine-combiner.ll
@@ -550,3 +550,71 @@ define <2 x double> @reassociate_maxs_v2f64(<2 x double> %x0, <2 x double> %x1,
ret <2 x double> %sel2
}
+; Verify that AVX 256-bit vector single-precision minimum ops are reassociated.
+
+define <8 x float> @reassociate_mins_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
+; AVX-LABEL: reassociate_mins_v8f32:
+; AVX: # BB#0:
+; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vminps %ymm3, %ymm2, %ymm1
+; AVX-NEXT: vminps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %t0 = fadd <8 x float> %x0, %x1
+ %cmp1 = fcmp olt <8 x float> %x2, %t0
+ %sel1 = select <8 x i1> %cmp1, <8 x float> %x2, <8 x float> %t0
+ %cmp2 = fcmp olt <8 x float> %x3, %sel1
+ %sel2 = select <8 x i1> %cmp2, <8 x float> %x3, <8 x float> %sel1
+ ret <8 x float> %sel2
+}
+
+; Verify that AVX 256-bit vector single-precision maximum ops are reassociated.
+
+define <8 x float> @reassociate_maxs_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
+; AVX-LABEL: reassociate_maxs_v8f32:
+; AVX: # BB#0:
+; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmaxps %ymm3, %ymm2, %ymm1
+; AVX-NEXT: vmaxps %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %t0 = fadd <8 x float> %x0, %x1
+ %cmp1 = fcmp ogt <8 x float> %x2, %t0
+ %sel1 = select <8 x i1> %cmp1, <8 x float> %x2, <8 x float> %t0
+ %cmp2 = fcmp ogt <8 x float> %x3, %sel1
+ %sel2 = select <8 x i1> %cmp2, <8 x float> %x3, <8 x float> %sel1
+ ret <8 x float> %sel2
+}
+
+; Verify that AVX 256-bit vector double-precision minimum ops are reassociated.
+
+define <4 x double> @reassociate_mins_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
+; AVX-LABEL: reassociate_mins_v4f64:
+; AVX: # BB#0:
+; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vminpd %ymm3, %ymm2, %ymm1
+; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %t0 = fadd <4 x double> %x0, %x1
+ %cmp1 = fcmp olt <4 x double> %x2, %t0
+ %sel1 = select <4 x i1> %cmp1, <4 x double> %x2, <4 x double> %t0
+ %cmp2 = fcmp olt <4 x double> %x3, %sel1
+ %sel2 = select <4 x i1> %cmp2, <4 x double> %x3, <4 x double> %sel1
+ ret <4 x double> %sel2
+}
+
+; Verify that AVX 256-bit vector double-precision maximum ops are reassociated.
+
+define <4 x double> @reassociate_maxs_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
+; AVX-LABEL: reassociate_maxs_v4f64:
+; AVX: # BB#0:
+; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: vmaxpd %ymm3, %ymm2, %ymm1
+; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0
+; AVX-NEXT: retq
+ %t0 = fadd <4 x double> %x0, %x1
+ %cmp1 = fcmp ogt <4 x double> %x2, %t0
+ %sel1 = select <4 x i1> %cmp1, <4 x double> %x2, <4 x double> %t0
+ %cmp2 = fcmp ogt <4 x double> %x3, %sel1
+ %sel2 = select <4 x i1> %cmp2, <4 x double> %x3, <4 x double> %sel1
+ ret <4 x double> %sel2
+}
+
OpenPOWER on IntegriCloud