diff options
| author | Craig Topper <craig.topper@intel.com> | 2019-05-15 04:15:46 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2019-05-15 04:15:46 +0000 |
| commit | 384d46c0d5be4cece38d8f7448ff80e49c1ce3b8 (patch) | |
| tree | 7d862c460e54cabc2f449d1d17ee0c5864e9c18a | |
| parent | a23cc727d8155d390a04f4ccf989eda561c9fc06 (diff) | |
| download | bcm5719-llvm-384d46c0d5be4cece38d8f7448ff80e49c1ce3b8.tar.gz bcm5719-llvm-384d46c0d5be4cece38d8f7448ff80e49c1ce3b8.zip | |
[X86] Use OR32mi8Locked instead of LOCK_OR32mi8 in emitLockedStackOp.
They encode the same way, but OR32mi8Locked sets hasUnmodeledSideEffects set
which should be stronger than the mayLoad/mayStore on LOCK_OR32mi8. I think
this makes sense since we are using it as a fence.
This also seems to hide the operation from the speculative load hardening pass
so I've reverted r360511.
llvm-svn: 360747
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 4 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/speculative-load-hardening.ll | 9 |
3 files changed, 5 insertions, 12 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index e3b97162020..c2cc3f33de0 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -25872,7 +25872,7 @@ static SDValue emitLockedStackOp(SelectionDAG &DAG, DAG.getRegister(0, MVT::i16), // Segment. Zero, Chain}; - SDNode *Res = DAG.getMachineNode(X86::LOCK_OR32mi8, DL, MVT::i32, + SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32, MVT::Other, Ops); return SDValue(Res, 1); } @@ -25887,7 +25887,7 @@ static SDValue emitLockedStackOp(SelectionDAG &DAG, Zero, Chain }; - SDNode *Res = DAG.getMachineNode(X86::LOCK_OR32mi8, DL, MVT::i32, + SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32, MVT::Other, Ops); return SDValue(Res, 1); } diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp index 7b043378819..02f07d88afc 100644 --- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp +++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp @@ -1719,11 +1719,9 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden( // If we have at least one (non-frame-index, non-RIP) register operand, // and neither operand is load-dependent, we need to check the load. - // Also handle explicit references to RSP as used by idempotent atomic - // or with 0. unsigned BaseReg = 0, IndexReg = 0; if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP && - BaseMO.getReg() != X86::RSP && BaseMO.getReg() != X86::NoRegister) + BaseMO.getReg() != X86::NoRegister) BaseReg = BaseMO.getReg(); if (IndexMO.getReg() != X86::NoRegister) IndexReg = IndexMO.getReg(); diff --git a/llvm/test/CodeGen/X86/speculative-load-hardening.ll b/llvm/test/CodeGen/X86/speculative-load-hardening.ll index f9f623570e0..158243ad972 100644 --- a/llvm/test/CodeGen/X86/speculative-load-hardening.ll +++ b/llvm/test/CodeGen/X86/speculative-load-hardening.ll @@ -1148,17 +1148,12 @@ entry: define void @idempotent_atomic(i32* %x) speculative_load_hardening { ; X64-LABEL: idempotent_atomic: ; X64: # %bb.0: -; X64-NEXT: movq %rsp, %rax -; X64-NEXT: movq $-1, %rcx -; X64-NEXT: sarq $63, %rax -; X64-NEXT: lock orl $0, -64(%rsp) -; X64-NEXT: shlq $47, %rax -; X64-NEXT: orq %rax, %rsp +; X64-NEXT: lock orl $0, -{{[0-9]+}}(%rsp) ; X64-NEXT: retq ; ; X64-LFENCE-LABEL: idempotent_atomic: ; X64-LFENCE: # %bb.0: -; X64-LFENCE-NEXT: lock orl $0, -64(%rsp) +; X64-LFENCE-NEXT: lock orl $0, -{{[0-9]+}}(%rsp) ; X64-LFENCE-NEXT: retq %tmp = atomicrmw or i32* %x, i32 0 seq_cst ret void |

