summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp13
-rw-r--r--llvm/test/CodeGen/X86/pr37063.ll31
2 files changed, 43 insertions, 1 deletions
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 4ec7e4ec1f0..6b4a6993983 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -525,10 +525,21 @@ X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
// addl 4(%esp), %eax
// The former is 2 bytes shorter. In case where the increment is 1, then
// the saving can be 4 bytes (by using incl %eax).
- if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
+ if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1)) {
if (Imm->getAPIntValue().isSignedIntN(8))
return false;
+ // If this is a 64-bit AND with an immediate that fits in 32-bits,
+ // prefer using the smaller and over folding the load. This is needed to
+ // make sure immediates created by shrinkAndImmediate are always folded.
+ // Ideally we would narrow the load during DAG combine and get the
+ // best of both worlds.
+ if (U->getOpcode() == ISD::AND &&
+ Imm->getAPIntValue().getBitWidth() == 64 &&
+ Imm->getAPIntValue().isIntN(32))
+ return false;
+ }
+
// If the other operand is a TLS address, we should fold it instead.
// This produces
// movl %gs:0, %eax
diff --git a/llvm/test/CodeGen/X86/pr37063.ll b/llvm/test/CodeGen/X86/pr37063.ll
new file mode 100644
index 00000000000..cf5e1fa5670
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr37063.ll
@@ -0,0 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+declare void @bar()
+
+define void @foo(i64*) {
+; CHECK-LABEL: foo:
+; CHECK: # %bb.0: # %start
+; CHECK-NEXT: movq (%rdi), %rax
+; CHECK-NEXT: andl $-2, %eax
+; CHECK-NEXT: cmpq $4, %rax
+; CHECK-NEXT: jne .LBB0_2
+; CHECK-NEXT: # %bb.1: # %bb1
+; CHECK-NEXT: retq
+; CHECK-NEXT: .LBB0_2: # %bb2.i
+; CHECK-NEXT: jmp bar # TAILCALL
+start:
+ %1 = load i64, i64* %0, align 8, !range !0
+ %2 = and i64 %1, 6
+ %3 = icmp eq i64 %2, 4
+ br i1 %3, label %bb1, label %bb2.i
+
+bb1: ; preds = %bb2.i, %start
+ ret void
+
+bb2.i: ; preds = %start
+ tail call fastcc void @bar()
+ br label %bb1
+}
+
+!0 = !{i64 0, i64 6}
OpenPOWER on IntegriCloud