summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp15
-rw-r--r--llvm/test/Transforms/LoopUnroll/AArch64/unroll-optsize.ll171
2 files changed, 184 insertions, 2 deletions
diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index c113e4d3166..53015d25842 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -207,6 +207,7 @@ TargetTransformInfo::UnrollingPreferences llvm::gatherUnrollingPreferences(
if (OptForSize) {
UP.Threshold = UP.OptSizeThreshold;
UP.PartialThreshold = UP.PartialOptSizeThreshold;
+ UP.MaxPercentThresholdBoost = 100;
}
// Apply any user values specified by cl::opt
@@ -993,6 +994,7 @@ static LoopUnrollResult tryToUnrollLoop(
if (OnlyWhenForced && !(TM & TM_Enable))
return LoopUnrollResult::Unmodified;
+ bool OptForSize = L->getHeader()->getParent()->hasOptSize();
unsigned NumInlineCandidates;
bool NotDuplicatable;
bool Convergent;
@@ -1000,8 +1002,11 @@ static LoopUnrollResult tryToUnrollLoop(
L, SE, TTI, BFI, PSI, OptLevel, ProvidedThreshold, ProvidedCount,
ProvidedAllowPartial, ProvidedRuntime, ProvidedUpperBound,
ProvidedAllowPeeling);
- // Exit early if unrolling is disabled.
- if (UP.Threshold == 0 && (!UP.Partial || UP.PartialThreshold == 0))
+
+ // Exit early if unrolling is disabled. For OptForSize, we pick the loop size
+ // as threshold later on.
+ if (UP.Threshold == 0 && (!UP.Partial || UP.PartialThreshold == 0) &&
+ !OptForSize)
return LoopUnrollResult::Unmodified;
SmallPtrSet<const Value *, 32> EphValues;
@@ -1016,6 +1021,12 @@ static LoopUnrollResult tryToUnrollLoop(
<< " instructions.\n");
return LoopUnrollResult::Unmodified;
}
+
+ // When optimizing for size, use LoopSize as threshold, to (fully) unroll
+ // loops, if it does not increase code size.
+ if (OptForSize)
+ UP.Threshold = std::max(UP.Threshold, LoopSize);
+
if (NumInlineCandidates != 0) {
LLVM_DEBUG(dbgs() << " Not unrolling loop with inlinable calls.\n");
return LoopUnrollResult::Unmodified;
diff --git a/llvm/test/Transforms/LoopUnroll/AArch64/unroll-optsize.ll b/llvm/test/Transforms/LoopUnroll/AArch64/unroll-optsize.ll
new file mode 100644
index 00000000000..c0bf00b938a
--- /dev/null
+++ b/llvm/test/Transforms/LoopUnroll/AArch64/unroll-optsize.ll
@@ -0,0 +1,171 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -loop-unroll -mtriple=arm64-apple-iphoneos -S %s | FileCheck %s
+
+; Check we unroll even with optsize, if the result is smaller, either because
+; we have single iteration loops or bodies with constant folding opportunities
+; after fully unrolling.
+
+declare i32 @get()
+
+define void @fully_unrolled_single_iteration(i32* %src) #0 {
+; CHECK-LABEL: @fully_unrolled_single_iteration(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[V:%.*]] = load i32, i32* [[SRC:%.*]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
+; CHECK-NEXT: store i32 [[V]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
+; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %arr = alloca [4 x i32], align 4
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %src.idx = getelementptr inbounds i32, i32* %src, i64 %indvars.iv
+ %v = load i32, i32* %src.idx
+ %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
+ store i32 %v, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv.next, 1
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.cond
+ %ptr = bitcast [4 x i32]* %arr to i32*
+ call void @use(i32* nonnull %ptr) #4
+ ret void
+}
+
+
+define void @fully_unrolled_smaller() #0 {
+; CHECK-LABEL: @fully_unrolled_smaller(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
+; CHECK-NEXT: store i32 16, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 1
+; CHECK-NEXT: store i32 4104, i32* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 2
+; CHECK-NEXT: store i32 1048592, i32* [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 3
+; CHECK-NEXT: store i32 268435480, i32* [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
+; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %arr = alloca [4 x i32], align 4
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %indvars.iv.tr = trunc i64 %indvars.iv to i32
+ %shl.0 = shl i32 %indvars.iv.tr, 3
+ %shl.1 = shl i32 16, %shl.0
+ %or = or i32 %shl.1, %shl.0
+ %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
+ store i32 %or, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv, 3
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.cond
+ %ptr = bitcast [4 x i32]* %arr to i32*
+ call void @use(i32* nonnull %ptr) #4
+ ret void
+}
+
+define void @fully_unrolled_smaller_Oz() #1 {
+; CHECK-LABEL: @fully_unrolled_smaller_Oz(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
+; CHECK-NEXT: store i32 16, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 1
+; CHECK-NEXT: store i32 4104, i32* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 2
+; CHECK-NEXT: store i32 1048592, i32* [[ARRAYIDX_2]], align 4
+; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 3
+; CHECK-NEXT: store i32 268435480, i32* [[ARRAYIDX_3]], align 4
+; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
+; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %arr = alloca [4 x i32], align 4
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %indvars.iv.tr = trunc i64 %indvars.iv to i32
+ %shl.0 = shl i32 %indvars.iv.tr, 3
+ %shl.1 = shl i32 16, %shl.0
+ %or = or i32 %shl.1, %shl.0
+ %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
+ store i32 %or, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv, 3
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.cond
+ %ptr = bitcast [4 x i32]* %arr to i32*
+ call void @use(i32* nonnull %ptr) #4
+ ret void
+}
+
+
+define void @fully_unrolled_bigger() #0 {
+; CHECK-LABEL: @fully_unrolled_bigger(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ARR:%.*]] = alloca [4 x i32], align 4
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: [[INDVARS_IV_TR:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; CHECK-NEXT: [[SHL_0:%.*]] = shl i32 [[INDVARS_IV_TR]], 3
+; CHECK-NEXT: [[SHL_1:%.*]] = shl i32 16, [[SHL_0]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL_1]], [[SHL_0]]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: store i32 [[OR]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 6
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
+; CHECK: for.cond.cleanup:
+; CHECK-NEXT: [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
+; CHECK-NEXT: call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT: ret void
+;
+entry:
+ %arr = alloca [4 x i32], align 4
+ br label %for.body
+
+for.body: ; preds = %for.body, %entry
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+ %indvars.iv.tr = trunc i64 %indvars.iv to i32
+ %shl.0 = shl i32 %indvars.iv.tr, 3
+ %shl.1 = shl i32 16, %shl.0
+ %or = or i32 %shl.1, %shl.0
+ %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
+ store i32 %or, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond = icmp eq i64 %indvars.iv, 6
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup: ; preds = %for.cond
+ %ptr = bitcast [4 x i32]* %arr to i32*
+ call void @use(i32* nonnull %ptr) #4
+ ret void
+}
+
+declare void @use(i32*)
+
+attributes #0 = { optsize }
+attributes #1 = { minsize optsize }
OpenPOWER on IntegriCloud