summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Zolotukhin <mzolotukhin@apple.com>2015-03-17 19:37:28 +0000
committerMichael Zolotukhin <mzolotukhin@apple.com>2015-03-17 19:37:28 +0000
commitc3d60efb1de810325a26d864f94417425066c76f (patch)
tree118c3be67e7cf076aeb7d4fd807bf98a18fb6b6f
parent5e1c9ae34f5a6ab69bae6a1d3b2a429f8a2ba4d9 (diff)
downloadbcm5719-llvm-c3d60efb1de810325a26d864f94417425066c76f.tar.gz
bcm5719-llvm-c3d60efb1de810325a26d864f94417425066c76f.zip
TTI: Honour cost model for estimating cost of vector-intrinsic and calls.
Review: http://reviews.llvm.org/D8096 llvm-svn: 232528
-rw-r--r--llvm/include/llvm/CodeGen/BasicTTIImpl.h42
-rw-r--r--llvm/test/Analysis/CostModel/X86/intrinsic-cost.ll4
-rw-r--r--llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.ll28
3 files changed, 46 insertions, 28 deletions
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index d75b5eb95d0..4b80c482492 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -528,18 +528,29 @@ public:
// Assume that we need to scalarize this intrinsic.
unsigned ScalarizationCost = 0;
unsigned ScalarCalls = 1;
+ Type *ScalarRetTy = RetTy;
if (RetTy->isVectorTy()) {
ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
+ ScalarRetTy = RetTy->getScalarType();
}
+ SmallVector<Type *, 4> ScalarTys;
for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
- if (Tys[i]->isVectorTy()) {
- ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
- ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
+ Type *Ty = Tys[i];
+ if (Ty->isVectorTy()) {
+ ScalarizationCost += getScalarizationOverhead(Ty, false, true);
+ ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
+ Ty = Ty->getScalarType();
}
+ ScalarTys.push_back(Ty);
}
+ if (ScalarCalls == 1)
+ return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
+
+ unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
+ IID, ScalarRetTy, ScalarTys);
- return ScalarCalls + ScalarizationCost;
+ return ScalarCalls * ScalarCost + ScalarizationCost;
}
// Look for intrinsics that can be lowered directly or turned into a scalar
// intrinsic call.
@@ -649,10 +660,25 @@ public:
// this will emit a costly libcall, adding call overhead and spills. Make it
// very expensive.
if (RetTy->isVectorTy()) {
- unsigned Num = RetTy->getVectorNumElements();
- unsigned Cost = static_cast<T *>(this)->getIntrinsicInstrCost(
- IID, RetTy->getScalarType(), Tys);
- return 10 * Cost * Num;
+ unsigned ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
+ unsigned ScalarCalls = RetTy->getVectorNumElements();
+ SmallVector<Type *, 4> ScalarTys;
+ for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
+ Type *Ty = Tys[i];
+ if (Ty->isVectorTy())
+ Ty = Ty->getScalarType();
+ ScalarTys.push_back(Ty);
+ }
+ unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
+ IID, RetTy->getScalarType(), ScalarTys);
+ for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
+ if (Tys[i]->isVectorTy()) {
+ ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
+ ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
+ }
+ }
+
+ return ScalarCalls * ScalarCost + ScalarizationCost;
}
// This is going to be turned into a library call, make it expensive.
diff --git a/llvm/test/Analysis/CostModel/X86/intrinsic-cost.ll b/llvm/test/Analysis/CostModel/X86/intrinsic-cost.ll
index cbe409d7f47..efc1263373e 100644
--- a/llvm/test/Analysis/CostModel/X86/intrinsic-cost.ll
+++ b/llvm/test/Analysis/CostModel/X86/intrinsic-cost.ll
@@ -22,7 +22,7 @@ for.end: ; preds = %vector.body
ret void
; CORE2: Printing analysis 'Cost Model Analysis' for function 'test1':
-; CORE2: Cost Model: Found an estimated cost of 400 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load)
+; CORE2: Cost Model: Found an estimated cost of 46 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load)
; COREI7: Printing analysis 'Cost Model Analysis' for function 'test1':
; COREI7: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x float> @llvm.ceil.v4f32(<4 x float> %wide.load)
@@ -50,7 +50,7 @@ for.end: ; preds = %vector.body
ret void
; CORE2: Printing analysis 'Cost Model Analysis' for function 'test2':
-; CORE2: Cost Model: Found an estimated cost of 400 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load)
+; CORE2: Cost Model: Found an estimated cost of 46 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load)
; COREI7: Printing analysis 'Cost Model Analysis' for function 'test2':
; COREI7: Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x float> @llvm.nearbyint.v4f32(<4 x float> %wide.load)
diff --git a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.ll b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
index 10307568b85..c2a0fed1fe6 100644
--- a/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
+++ b/llvm/test/Transforms/LoopVectorize/X86/vect.omp.force.ll
@@ -15,9 +15,9 @@ target triple = "x86_64-apple-macosx10.8.0"
; The source code for the test:
;
; #include <math.h>
-; void foo(float* restrict A, float * restrict B, int size)
+; void foo(float* restrict A, float * restrict B)
; {
-; for (int i = 0; i < size; ++i) A[i] = sinf(B[i]);
+; for (int i = 0; i < 1000; i+=2) A[i] = sinf(B[i]);
; }
;
@@ -25,24 +25,20 @@ target triple = "x86_64-apple-macosx10.8.0"
; This loop will be vectorized, although the scalar cost is lower than any of vector costs, but vectorization is explicitly forced in metadata.
;
-define void @vectorized(float* noalias nocapture %A, float* noalias nocapture %B, i32 %size) {
+define void @vectorized(float* noalias nocapture %A, float* noalias nocapture %B) {
entry:
- %cmp6 = icmp sgt i32 %size, 0
- br i1 %cmp6, label %for.body.preheader, label %for.end
-
-for.body.preheader:
br label %for.body
for.body:
- %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
%0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !1
%call = tail call float @llvm.sin.f32(float %0)
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !1
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
- %exitcond = icmp eq i32 %lftr.wideiv, %size
+ %exitcond = icmp eq i32 %lftr.wideiv, 1000
br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !1
for.end.loopexit:
@@ -59,24 +55,20 @@ for.end:
; This method will not be vectorized, as scalar cost is lower than any of vector costs.
;
-define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture %B, i32 %size) {
+define void @not_vectorized(float* noalias nocapture %A, float* noalias nocapture %B) {
entry:
- %cmp6 = icmp sgt i32 %size, 0
- br i1 %cmp6, label %for.body.preheader, label %for.end
-
-for.body.preheader:
br label %for.body
for.body:
- %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %for.body.preheader ]
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds float, float* %B, i64 %indvars.iv
%0 = load float, float* %arrayidx, align 4, !llvm.mem.parallel_loop_access !3
%call = tail call float @llvm.sin.f32(float %0)
%arrayidx2 = getelementptr inbounds float, float* %A, i64 %indvars.iv
store float %call, float* %arrayidx2, align 4, !llvm.mem.parallel_loop_access !3
- %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 2
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
- %exitcond = icmp eq i32 %lftr.wideiv, %size
+ %exitcond = icmp eq i32 %lftr.wideiv, 1000
br i1 %exitcond, label %for.end.loopexit, label %for.body, !llvm.loop !3
for.end.loopexit:
OpenPOWER on IntegriCloud