summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Jasper <djasper@google.com>2016-12-18 14:36:38 +0000
committerDaniel Jasper <djasper@google.com>2016-12-18 14:36:38 +0000
commit373f9a6a0c95fcdb6ee99eea8450d3cb546921a7 (patch)
tree3d4d4e88aedad439f5b934a532b9934a4f906dbb
parent4b88a770efda1a69fd7b276231c9dd5014417a3d (diff)
downloadbcm5719-llvm-373f9a6a0c95fcdb6ee99eea8450d3cb546921a7.tar.gz
bcm5719-llvm-373f9a6a0c95fcdb6ee99eea8450d3cb546921a7.zip
Revert r289955 and r289962. This is causing lots of ASAN failures for us.
Not sure whether it causes and ASAN false positive or whether it actually leads to incorrect code or whether it even exposes bad code. Hans, I'll get you instructions to reproduce this. llvm-svn: 290066
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp32
-rw-r--r--llvm/test/CodeGen/X86/atomic-eflags-reuse.ll41
2 files changed, 10 insertions, 63 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index e29c7cd6e3b..1470952f176 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -28985,19 +28985,11 @@ static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
-/// Combine brcond/cmov/setcc/.. based on comparing the result of
-/// atomic_load_add to use EFLAGS produced by the addition
-/// directly if possible. For example:
-///
-/// (setcc (cmp (atomic_load_add x, -C) C), COND_E)
-/// becomes:
-/// (setcc (LADD x, -C), COND_E)
-///
-/// and
+/// Combine:
/// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
-/// becomes:
+/// to:
/// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
-///
+/// i.e., reusing the EFLAGS produced by the LOCKed instruction.
/// Note that this is only legal for some op/cc combinations.
static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
SelectionDAG &DAG) {
@@ -29006,7 +28998,7 @@ static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
(Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
return SDValue();
- // This applies to variations of the common case:
+ // This only applies to variations of the common case:
// (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
// (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
// (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
@@ -29025,9 +29017,8 @@ static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
return SDValue();
auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
- if (!CmpRHSC)
+ if (!CmpRHSC || CmpRHSC->getZExtValue() != 0)
return SDValue();
- APInt Comparand = CmpRHSC->getAPIntValue();
const unsigned Opc = CmpLHS.getOpcode();
@@ -29043,19 +29034,16 @@ static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
if (Opc == ISD::ATOMIC_LOAD_SUB)
Addend = -Addend;
- if (Comparand == -Addend) {
- // No change to CC.
- } else if (CC == X86::COND_S && Comparand == 0 && Addend == 1) {
+ if (CC == X86::COND_S && Addend == 1)
CC = X86::COND_LE;
- } else if (CC == X86::COND_NS && Comparand == 0 && Addend == 1) {
+ else if (CC == X86::COND_NS && Addend == 1)
CC = X86::COND_G;
- } else if (CC == X86::COND_G && Comparand == 0 && Addend == -1) {
+ else if (CC == X86::COND_G && Addend == -1)
CC = X86::COND_GE;
- } else if (CC == X86::COND_LE && Comparand == 0 && Addend == -1) {
+ else if (CC == X86::COND_LE && Addend == -1)
CC = X86::COND_L;
- } else {
+ else
return SDValue();
- }
SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG);
DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
diff --git a/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll b/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
index 80215639ae5..dc1814b55cd 100644
--- a/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
+++ b/llvm/test/CodeGen/X86/atomic-eflags-reuse.ll
@@ -176,45 +176,4 @@ entry:
ret i8 %tmp2
}
-define i8 @test_sub_1_setcc_eq(i64* %p) #0 {
-; CHECK-LABEL: test_sub_1_setcc_eq:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: lock decq (%rdi)
-; CHECK-NEXT: sete %al
-; CHECK-NEXT: retq
-entry:
- %tmp0 = atomicrmw sub i64* %p, i64 1 seq_cst
- %tmp1 = icmp eq i64 %tmp0, 1
- %tmp2 = zext i1 %tmp1 to i8
- ret i8 %tmp2
-}
-
-define i8 @test_add_5_setcc_ne(i64* %p) #0 {
-; CHECK-LABEL: test_add_5_setcc_ne:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: lock addq $5, (%rdi)
-; CHECK-NEXT: setne %al
-; CHECK-NEXT: retq
-entry:
- %tmp0 = atomicrmw add i64* %p, i64 5 seq_cst
- %tmp1 = icmp ne i64 %tmp0, -5
- %tmp2 = zext i1 %tmp1 to i8
- ret i8 %tmp2
-}
-
-define i8 @test_add_5_setcc_ne_comparand_mismatch(i64* %p) #0 {
-; CHECK-LABEL: test_add_5_setcc_ne_comparand_mismatch:
-; CHECK: # BB#0: # %entry
-; CHECK-NEXT: movl $5, %eax
-; CHECK-NEXT: lock xaddq %rax, (%rdi)
-; CHECK-NEXT: testq %rax, %rax
-; CHECK-NEXT: setne %al
-; CHECK-NEXT: retq
-entry:
- %tmp0 = atomicrmw add i64* %p, i64 5 seq_cst
- %tmp1 = icmp ne i64 %tmp0, 0
- %tmp2 = zext i1 %tmp1 to i8
- ret i8 %tmp2
-}
-
attributes #0 = { nounwind }
OpenPOWER on IntegriCloud