summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2019-01-14 12:34:31 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2019-01-14 12:34:31 +0000
commitcfa5f06dde800c9d9207199b3764405aa0afc57e (patch)
tree5a52cbf5f207c71f943f80f53363799d3b7a0d46
parent4c4c0377ca6c6d35fa60934f1624252c1e38f901 (diff)
downloadbcm5719-llvm-cfa5f06dde800c9d9207199b3764405aa0afc57e.tar.gz
bcm5719-llvm-cfa5f06dde800c9d9207199b3764405aa0afc57e.zip
[DAGCombiner] Enable add saturation constant folding
llvm-svn: 351060
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp5
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp2
-rw-r--r--llvm/test/CodeGen/X86/combine-add-ssat.ll29
-rw-r--r--llvm/test/CodeGen/X86/combine-add-usat.ll25
4 files changed, 41 insertions, 20 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 580a668100b..9a40774cff8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2198,8 +2198,9 @@ SDValue DAGCombiner::visitADDSAT(SDNode *N) {
// canonicalize constant to RHS
if (!DAG.isConstantIntBuildVectorOrConstantInt(N1))
return DAG.getNode(Opcode, DL, VT, N1, N0);
-
- // TODO Constant Folding
+ // fold (add_sat c1, c2) -> c3
+ return DAG.FoldConstantArithmetic(Opcode, DL, VT, N0.getNode(),
+ N1.getNode());
}
// fold (add_sat x, 0) -> x
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 23a7b3ece82..a0ee80c0dcd 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4488,6 +4488,8 @@ static std::pair<APInt, bool> FoldValue(unsigned Opcode, const APInt &C1,
case ISD::SMAX: return std::make_pair(C1.sge(C2) ? C1 : C2, true);
case ISD::UMIN: return std::make_pair(C1.ule(C2) ? C1 : C2, true);
case ISD::UMAX: return std::make_pair(C1.uge(C2) ? C1 : C2, true);
+ case ISD::SADDSAT: return std::make_pair(C1.sadd_sat(C2), true);
+ case ISD::UADDSAT: return std::make_pair(C1.uadd_sat(C2), true);
case ISD::UDIV:
if (!C2.getBoolValue())
break;
diff --git a/llvm/test/CodeGen/X86/combine-add-ssat.ll b/llvm/test/CodeGen/X86/combine-add-ssat.ll
index 63fde10a325..74b43180fde 100644
--- a/llvm/test/CodeGen/X86/combine-add-ssat.ll
+++ b/llvm/test/CodeGen/X86/combine-add-ssat.ll
@@ -16,13 +16,6 @@ define i32 @combine_constfold_i32() {
; CHECK-LABEL: combine_constfold_i32:
; CHECK: # %bb.0:
; CHECK-NEXT: movl $2147483647, %eax # imm = 0x7FFFFFFF
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: movl $2147483647, %edx # imm = 0x7FFFFFFF
-; CHECK-NEXT: addl $100, %edx
-; CHECK-NEXT: setns %cl
-; CHECK-NEXT: addl $2147483647, %ecx # imm = 0x7FFFFFFF
-; CHECK-NEXT: addl $100, %eax
-; CHECK-NEXT: cmovol %ecx, %eax
; CHECK-NEXT: retq
%res = call i32 @llvm.sadd.sat.i32(i32 2147483647, i32 100)
ret i32 %res
@@ -31,16 +24,30 @@ define i32 @combine_constfold_i32() {
define <8 x i16> @combine_constfold_v8i16() {
; SSE-LABEL: combine_constfold_v8i16:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1]
-; SSE-NEXT: paddsw {{.*}}(%rip), %xmm0
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,0,256,65534,0,65280,32768,0]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_constfold_v8i16:
; AVX: # %bb.0:
-; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1]
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,0,256,65534,0,65280,32768,0]
+; AVX-NEXT: retq
+ %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -32760, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 -10, i16 65535>)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @combine_constfold_undef_v8i16() {
+; SSE-LABEL: combine_constfold_undef_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = <u,1,u,65535,65535,65281,32776,1>
+; SSE-NEXT: paddsw {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_constfold_undef_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = <u,1,u,65535,65535,65281,32776,1>
; AVX-NEXT: vpaddsw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
- %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
+ %res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -32760, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 -10, i16 65535>)
ret <8 x i16> %res
}
diff --git a/llvm/test/CodeGen/X86/combine-add-usat.ll b/llvm/test/CodeGen/X86/combine-add-usat.ll
index d7ce71ef28e..b69bdd5596f 100644
--- a/llvm/test/CodeGen/X86/combine-add-usat.ll
+++ b/llvm/test/CodeGen/X86/combine-add-usat.ll
@@ -15,10 +15,7 @@ declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
define i32 @combine_constfold_i32() {
; CHECK-LABEL: combine_constfold_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl $-1, %ecx
; CHECK-NEXT: movl $-1, %eax
-; CHECK-NEXT: addl $100, %eax
-; CHECK-NEXT: cmovbl %ecx, %eax
; CHECK-NEXT: retq
%res = call i32 @llvm.uadd.sat.i32(i32 4294967295, i32 100)
ret i32 %res
@@ -27,19 +24,33 @@ define i32 @combine_constfold_i32() {
define <8 x i16> @combine_constfold_v8i16() {
; SSE-LABEL: combine_constfold_v8i16:
; SSE: # %bb.0:
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1]
-; SSE-NEXT: paddusw {{.*}}(%rip), %xmm0
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,65535,256,65535,65535,65535,2,65535]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_constfold_v8i16:
; AVX: # %bb.0:
-; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = [0,1,255,65535,65535,65281,1,1]
-; AVX-NEXT: vpaddusw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vmovaps {{.*#+}} xmm0 = [1,65535,256,65535,65535,65535,2,65535]
; AVX-NEXT: retq
%res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 0, i16 1, i16 255, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
ret <8 x i16> %res
}
+define <8 x i16> @combine_constfold_undef_v8i16() {
+; SSE-LABEL: combine_constfold_undef_v8i16:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = <u,1,u,65535,65535,65281,1,1>
+; SSE-NEXT: paddusw {{.*}}(%rip), %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: combine_constfold_undef_v8i16:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovdqa {{.*#+}} xmm0 = <u,1,u,65535,65535,65281,1,1>
+; AVX-NEXT: vpaddusw {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: retq
+ %res = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> <i16 undef, i16 1, i16 undef, i16 65535, i16 -1, i16 -255, i16 -65535, i16 1>, <8 x i16> <i16 1, i16 undef, i16 undef, i16 65535, i16 1, i16 65535, i16 1, i16 65535>)
+ ret <8 x i16> %res
+}
+
; fold (uadd_sat c, x) -> (add_ssat x, c)
define i32 @combine_constant_i32(i32 %a0) {
; CHECK-LABEL: combine_constant_i32:
OpenPOWER on IntegriCloud