summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2019-05-11 04:00:27 +0000
committerCraig Topper <craig.topper@intel.com>2019-05-11 04:00:27 +0000
commitbdef12df8d6f0cf7ddb0a626cef377cbcd6d8e30 (patch)
tree6e9be8a9b5031e682164d6873ad3fdb1f51eb8ac
parentd0124bd7624426e4b7bdcec96759e7fdae20f13e (diff)
downloadbcm5719-llvm-bdef12df8d6f0cf7ddb0a626cef377cbcd6d8e30.tar.gz
bcm5719-llvm-bdef12df8d6f0cf7ddb0a626cef377cbcd6d8e30.zip
[X86] Add a test case for idempotent atomic operations with speculative load hardening. Fix an additional issue found by the test.
This test covers the fix from r360475 as well. llvm-svn: 360511
-rw-r--r--llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp4
-rw-r--r--llvm/test/CodeGen/X86/speculative-load-hardening.ll21
2 files changed, 24 insertions, 1 deletions
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index 02f07d88afc..7b043378819 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -1719,9 +1719,11 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
// If we have at least one (non-frame-index, non-RIP) register operand,
// and neither operand is load-dependent, we need to check the load.
+ // Also handle explicit references to RSP as used by idempotent atomic
+ // or with 0.
unsigned BaseReg = 0, IndexReg = 0;
if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP &&
- BaseMO.getReg() != X86::NoRegister)
+ BaseMO.getReg() != X86::RSP && BaseMO.getReg() != X86::NoRegister)
BaseReg = BaseMO.getReg();
if (IndexMO.getReg() != X86::NoRegister)
IndexReg = IndexMO.getReg();
diff --git a/llvm/test/CodeGen/X86/speculative-load-hardening.ll b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
index 54cde2c124e..5599b88a791 100644
--- a/llvm/test/CodeGen/X86/speculative-load-hardening.ll
+++ b/llvm/test/CodeGen/X86/speculative-load-hardening.ll
@@ -1142,3 +1142,24 @@ entry:
call void @sink(i32 %e7)
ret void
}
+
+; Make sure we don't crash on idempotent atomic operations which have a
+; hardcoded reference to RSP+offset.
+define void @idempotent_atomic(i32* %x) speculative_load_hardening {
+; X64-LABEL: idempotent_atomic:
+; X64: # %bb.0:
+; X64-NEXT: movq %rsp, %rax
+; X64-NEXT: movq $-1, %rcx
+; X64-NEXT: sarq $63, %rax
+; X64-NEXT: lock orl $0, (%rsp)
+; X64-NEXT: shlq $47, %rax
+; X64-NEXT: orq %rax, %rsp
+; X64-NEXT: retq
+;
+; X64-LFENCE-LABEL: idempotent_atomic:
+; X64-LFENCE: # %bb.0:
+; X64-LFENCE-NEXT: lock orl $0, (%rsp)
+; X64-LFENCE-NEXT: retq
+ %tmp = atomicrmw or i32* %x, i32 0 seq_cst
+ ret void
+}
OpenPOWER on IntegriCloud