summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/CodeGen/BreakFalseDeps.cpp11
-rw-r--r--llvm/test/CodeGen/ARM/a15-partial-update.ll6
-rw-r--r--llvm/test/CodeGen/X86/sqrt-partial.ll1
-rw-r--r--llvm/test/CodeGen/X86/stack-folding-fp-avx1.ll1
-rw-r--r--llvm/test/CodeGen/X86/stack-folding-fp-sse42.ll4
5 files changed, 16 insertions, 7 deletions
diff --git a/llvm/lib/CodeGen/BreakFalseDeps.cpp b/llvm/lib/CodeGen/BreakFalseDeps.cpp
index 382f76da321..9b6da64e043 100644
--- a/llvm/lib/CodeGen/BreakFalseDeps.cpp
+++ b/llvm/lib/CodeGen/BreakFalseDeps.cpp
@@ -177,6 +177,7 @@ void BreakFalseDeps::processDefs(MachineInstr *MI) {
assert(!MI->isDebugInstr() && "Won't process debug values");
// Break dependence on undef uses. Do this before updating LiveRegs below.
+ // This can remove a false dependence with no additional instructions.
unsigned OpNum;
unsigned Pref = TII->getUndefRegClearance(*MI, OpNum, TRI);
if (Pref) {
@@ -188,6 +189,11 @@ void BreakFalseDeps::processDefs(MachineInstr *MI) {
UndefReads.push_back(std::make_pair(MI, OpNum));
}
+ // The code below allows the target to create a new instruction to break the
+ // dependence. That opposes the goal of minimizing size, so bail out now.
+ if (MF->getFunction().hasMinSize())
+ return;
+
const MCInstrDesc &MCID = MI->getDesc();
for (unsigned i = 0,
e = MI->isVariadic() ? MI->getNumOperands() : MCID.getNumDefs();
@@ -208,6 +214,11 @@ void BreakFalseDeps::processUndefReads(MachineBasicBlock *MBB) {
if (UndefReads.empty())
return;
+ // The code below allows the target to create a new instruction to break the
+ // dependence. That opposes the goal of minimizing size, so bail out now.
+ if (MF->getFunction().hasMinSize())
+ return;
+
// Collect this block's live out register units.
LiveRegSet.init(*TRI);
// We do not need to care about pristine registers as they are just preserved
diff --git a/llvm/test/CodeGen/ARM/a15-partial-update.ll b/llvm/test/CodeGen/ARM/a15-partial-update.ll
index c20aaa2fa5d..a3971d6a034 100644
--- a/llvm/test/CodeGen/ARM/a15-partial-update.ll
+++ b/llvm/test/CodeGen/ARM/a15-partial-update.ll
@@ -56,6 +56,11 @@ ret:
ret void
}
+; If minimizing size, that overrides perf, so no extra vmov.f64 here.
+
+; TODO: This (and above) could use a splat load to remove the false
+; dependence with no extra instruction.
+
define void @t2_minsize(<4 x i8> *%in, <4 x i8> *%out, i32 %n) minsize {
; CHECK-LABEL: t2_minsize:
; CHECK: @ %bb.0: @ %entry
@@ -63,7 +68,6 @@ define void @t2_minsize(<4 x i8> *%in, <4 x i8> *%out, i32 %n) minsize {
; CHECK-NEXT: add r1, r1, #4
; CHECK-NEXT: .LBB2_1: @ %loop
; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vmov.f64 d16, #5.000000e-01
; CHECK-NEXT: vld1.32 {d16[0]}, [r0:32]
; CHECK-NEXT: vmovl.u8 q8, d16
; CHECK-NEXT: vuzp.8 d16, d18
diff --git a/llvm/test/CodeGen/X86/sqrt-partial.ll b/llvm/test/CodeGen/X86/sqrt-partial.ll
index d017b5c3600..7ed68c10849 100644
--- a/llvm/test/CodeGen/X86/sqrt-partial.ll
+++ b/llvm/test/CodeGen/X86/sqrt-partial.ll
@@ -67,7 +67,6 @@ define double @minsize(double %x, double %y) minsize {
; SSE-NEXT: mulsd %xmm0, %xmm0
; SSE-NEXT: mulsd %xmm1, %xmm1
; SSE-NEXT: addsd %xmm0, %xmm1
-; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: sqrtsd %xmm1, %xmm0
; SSE-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/stack-folding-fp-avx1.ll b/llvm/test/CodeGen/X86/stack-folding-fp-avx1.ll
index 3b69f2d9c52..6625cc4f07a 100644
--- a/llvm/test/CodeGen/X86/stack-folding-fp-avx1.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-fp-avx1.ll
@@ -2876,7 +2876,6 @@ define double @stack_fold_roundsd_minsize(double %a0) minsize {
; CHECK-NEXT: #APP
; CHECK-NEXT: nop
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT: vroundsd $9, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 8-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
diff --git a/llvm/test/CodeGen/X86/stack-folding-fp-sse42.ll b/llvm/test/CodeGen/X86/stack-folding-fp-sse42.ll
index 0c81f1ac73a..306ee310980 100644
--- a/llvm/test/CodeGen/X86/stack-folding-fp-sse42.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-fp-sse42.ll
@@ -583,7 +583,6 @@ define float @stack_fold_cvtsd2ss(double %a0) minsize {
; CHECK-NEXT: #APP
; CHECK-NEXT: nop
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: cvtsd2ss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 8-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
@@ -975,7 +974,6 @@ define double @stack_fold_cvtss2sd(float %a0) minsize {
; CHECK-NEXT: #APP
; CHECK-NEXT: nop
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: cvtss2sd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
@@ -2012,7 +2010,6 @@ define float @stack_fold_roundss(float %a0) minsize {
; CHECK-NEXT: #APP
; CHECK-NEXT: nop
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: roundss $9, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
@@ -2181,7 +2178,6 @@ define float @stack_fold_sqrtss(float %a0) minsize {
; CHECK-NEXT: #APP
; CHECK-NEXT: nop
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: sqrtss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
; CHECK-NEXT: retq
%1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
OpenPOWER on IntegriCloud