summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86/X86ISelLowering.cpp
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2017-08-21 08:45:19 +0000
committerChandler Carruth <chandlerc@gmail.com>2017-08-21 08:45:19 +0000
commit63dd5e0ef6fdf9adbd15049e6c467b675c5e561a (patch)
treeb8881806e5c0559871ffdd847d0bc0d36d078a77 /llvm/lib/Target/X86/X86ISelLowering.cpp
parentb252ffd2cca9e9f3ed65b8912603abc1f006ac5a (diff)
downloadbcm5719-llvm-63dd5e0ef6fdf9adbd15049e6c467b675c5e561a.tar.gz
bcm5719-llvm-63dd5e0ef6fdf9adbd15049e6c467b675c5e561a.zip
[x86] Handle more cases where we can re-use an atomic operation's flags
rather than doing a separate comparison. This both saves an explicit comparision and avoids the use of `xadd` which introduces register constraints and other challenges to the generated code. The motivating case is from atomic reference counts where `1` is the sentinel rather than `0` for whatever reason. This can and should be lowered efficiently on x86 by just using a different flag, however the x86 code only handled the `0` case. There remains some further opportunities here that are currently hidden due to canonicalization. I've included test cases that show these and FIXMEs. However, I don't at the moment have any production use cases and they seem substantially harder to address. Differential Revision: https://reviews.llvm.org/D36945 llvm-svn: 311317
Diffstat (limited to 'llvm/lib/Target/X86/X86ISelLowering.cpp')
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp36
1 files changed, 30 insertions, 6 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 8ecc035ec57..ce00a4a9665 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -30732,12 +30732,7 @@ static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
if (!CmpLHS.hasOneUse())
return SDValue();
- auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
- if (!CmpRHSC || CmpRHSC->getZExtValue() != 0)
- return SDValue();
-
- const unsigned Opc = CmpLHS.getOpcode();
-
+ unsigned Opc = CmpLHS.getOpcode();
if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
return SDValue();
@@ -30750,6 +30745,35 @@ static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
if (Opc == ISD::ATOMIC_LOAD_SUB)
Addend = -Addend;
+ auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
+ if (!CmpRHSC)
+ return SDValue();
+
+ APInt Comparison = CmpRHSC->getAPIntValue();
+
+ // If the addend is the negation of the comparison value, then we can do
+ // a full comparison by emitting the atomic arithmetic is a locked sub.
+ if (Comparison == -Addend) {
+ // The CC is fine, but we need to rewrite the LHS of the comparison as an
+ // atomic sub.
+ auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
+ auto AtomicSub = DAG.getAtomic(
+ ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
+ /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
+ /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
+ AN->getMemOperand());
+ auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG);
+ DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
+ DAG.getUNDEF(CmpLHS.getValueType()));
+ DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
+ return LockOp;
+ }
+
+ // We can handle comparisons with zero in a number of cases by manipulating
+ // the CC used.
+ if (!Comparison.isNullValue())
+ return SDValue();
+
if (CC == X86::COND_S && Addend == 1)
CC = X86::COND_LE;
else if (CC == X86::COND_NS && Addend == 1)
OpenPOWER on IntegriCloud