diff options
| author | Evan Cheng <evan.cheng@apple.com> | 2010-01-18 21:38:44 +0000 |
|---|---|---|
| committer | Evan Cheng <evan.cheng@apple.com> | 2010-01-18 21:38:44 +0000 |
| commit | 88b65bc835444e4cac27fa8032948f7bb51781fa (patch) | |
| tree | 294ad7bf833fb1620b80aa4765c93a69c152508d /llvm | |
| parent | 5a52727ad089672ea447535aa627fa150e39c7ac (diff) | |
| download | bcm5719-llvm-88b65bc835444e4cac27fa8032948f7bb51781fa.tar.gz bcm5719-llvm-88b65bc835444e4cac27fa8032948f7bb51781fa.zip | |
Canonicalize -1 - x to ~x.
Instcombine does this but apparently there are situations where this pattern will escape the optimizer and / or created by isel. Here is a case that's seen in JavaScriptCore:
%t1 = sub i32 0, %a
%t2 = add i32 %t1, -1
The dag combiner pattern: ((c1-A)+c2) -> (c1+c2)-A
will fold it to -1 - %a.
llvm-svn: 93773
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 3 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/xor.ll | 11 |
2 files changed, 14 insertions, 0 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 549527c1a3f..7fe908c7302 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -1176,6 +1176,9 @@ SDValue DAGCombiner::visitSUB(SDNode *N) { if (N1C) return DAG.getNode(ISD::ADD, N->getDebugLoc(), VT, N0, DAG.getConstant(-N1C->getAPIntValue(), VT)); + // Canonicalize (sub -1, x) -> ~x, i.e. (xor x, -1) + if (N0C && N0C->isAllOnesValue()) + return DAG.getNode(ISD::XOR, N->getDebugLoc(), VT, N1, N0); // fold (A+B)-A -> B if (N0.getOpcode() == ISD::ADD && N0.getOperand(0) == N1) return N0.getOperand(1); diff --git a/llvm/test/CodeGen/X86/xor.ll b/llvm/test/CodeGen/X86/xor.ll index 7bd06bba4c3..9bfff8a06a8 100644 --- a/llvm/test/CodeGen/X86/xor.ll +++ b/llvm/test/CodeGen/X86/xor.ll @@ -131,3 +131,14 @@ bb12: ; X32: andl {{.*}}[[REG]] } +define i32 @test8(i32 %a) nounwind { +; rdar://7553032 +entry: + %t1 = sub i32 0, %a + %t2 = add i32 %t1, -1 + ret i32 %t2 +; X64: test8: +; X64: notl %eax +; X32: test8: +; X32: notl %eax +} |

