summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAmara Emerson <aemerson@apple.com>2019-12-11 14:42:16 -0800
committerAmara Emerson <aemerson@apple.com>2020-01-06 11:46:42 -0800
commitdf3f4e0d77e53193acd423d1b02d3fd3bf065bf7 (patch)
tree0d5e44ee8297464ec2bdacccb013dcb2d5db813c
parente29a2e6be4e114b4233a2e0feedb74b2f34cf782 (diff)
downloadbcm5719-llvm-df3f4e0d77e53193acd423d1b02d3fd3bf065bf7.tar.gz
bcm5719-llvm-df3f4e0d77e53193acd423d1b02d3fd3bf065bf7.zip
[X86] Fix an 8 bit testb being selected when folding a volatile i32 load pattern.
Differential Revision: https://reviews.llvm.org/D71581
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp11
-rw-r--r--llvm/test/CodeGen/X86/select-testb-volatile-load.ll33
2 files changed, 44 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 4e29597e941..3e2406e93f2 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -5146,6 +5146,17 @@ void X86DAGToDAGISel::Select(SDNode *Node) {
MachineSDNode *NewNode;
SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
if (tryFoldLoad(Node, N0.getNode(), Reg, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
+ if (auto *LoadN = dyn_cast<LoadSDNode>(N0.getOperand(0).getNode())) {
+ if (!LoadN->isSimple()) {
+ unsigned NumVolBits = LoadN->getValueType(0).getSizeInBits();
+ if (MOpc == X86::TEST8mi && NumVolBits != 8)
+ break;
+ else if (MOpc == X86::TEST16mi && NumVolBits != 16)
+ break;
+ else if (MOpc == X86::TEST32mi && NumVolBits != 32)
+ break;
+ }
+ }
SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Imm,
Reg.getOperand(0) };
NewNode = CurDAG->getMachineNode(MOpc, dl, MVT::i32, MVT::Other, Ops);
diff --git a/llvm/test/CodeGen/X86/select-testb-volatile-load.ll b/llvm/test/CodeGen/X86/select-testb-volatile-load.ll
new file mode 100644
index 00000000000..69cfe17b486
--- /dev/null
+++ b/llvm/test/CodeGen/X86/select-testb-volatile-load.ll
@@ -0,0 +1,33 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=x86_64-apple-darwin < %s | FileCheck %s
+
+; This test checks that we don't try to narrow the volatile load by selecting
+; the pattern below into a testb instruction.
+
+define void @testb_volatile(i32 **%ptrptr) {
+; CHECK-LABEL: testb_volatile:
+; CHECK: ## %bb.0: ## %entry
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: testl $1, (%rax)
+; CHECK-NEXT: jne LBB0_1
+; CHECK-NEXT: ## %bb.2: ## %exit
+; CHECK-NEXT: movl $1, (%rax)
+; CHECK-NEXT: retq
+; CHECK-NEXT: LBB0_1: ## %bb2
+; CHECK-NEXT: movl $0, (%rax)
+; CHECK-NEXT: retq
+entry:
+ %ptr = load i32*, i32** %ptrptr, align 8
+ %vol_load = load volatile i32, i32* %ptr, align 4
+ %and = and i32 %vol_load, 1
+ %cmp = icmp eq i32 %and, 0
+ br i1 %cmp, label %exit, label %bb2
+
+bb2:
+ store i32 0, i32 *%ptr, align 4
+ ret void
+
+exit:
+ store i32 1, i32 *%ptr, align 4
+ ret void
+}
OpenPOWER on IntegriCloud