summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2016-02-02 19:18:53 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2016-02-02 19:18:53 +0000
commitad1348459fd824e45f62bb099521249b6a523fac (patch)
tree56188b86763ecb3bf3b2374c79caf0c398c388ae /llvm/lib
parent853a1fc6d9d433c9d68f6ecd04477bf120e0cb73 (diff)
downloadbcm5719-llvm-ad1348459fd824e45f62bb099521249b6a523fac.tar.gz
bcm5719-llvm-ad1348459fd824e45f62bb099521249b6a523fac.zip
AMDGPU: Whitelist handled intrinsics
We shouldn't crash on unhandled intrinsics. Also simplify failure handling in loop. llvm-svn: 259546
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp44
1 files changed, 36 insertions, 8 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index cc09ced8f6e..e4f2d5e7e43 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -399,15 +399,37 @@ static bool tryPromoteAllocaToVector(AllocaInst *Alloca) {
return true;
}
+static bool isCallPromotable(CallInst *CI) {
+ // TODO: We might be able to handle some cases where the callee is a
+ // constantexpr bitcast of a function.
+ if (!CI->getCalledFunction())
+ return false;
+
+ IntrinsicInst *II = dyn_cast<IntrinsicInst>(CI);
+ if (!II)
+ return false;
+
+ switch (II->getIntrinsicID()) {
+ case Intrinsic::memcpy:
+ case Intrinsic::memset:
+ case Intrinsic::lifetime_start:
+ case Intrinsic::lifetime_end:
+ case Intrinsic::invariant_start:
+ case Intrinsic::invariant_end:
+ case Intrinsic::invariant_group_barrier:
+ return true;
+ default:
+ return false;
+ }
+}
+
static bool collectUsesWithPtrTypes(Value *Val, std::vector<Value*> &WorkList) {
- bool Success = true;
for (User *User : Val->users()) {
- if(std::find(WorkList.begin(), WorkList.end(), User) != WorkList.end())
+ if (std::find(WorkList.begin(), WorkList.end(), User) != WorkList.end())
continue;
+
if (CallInst *CI = dyn_cast<CallInst>(User)) {
- // TODO: We might be able to handle some cases where the callee is a
- // constantexpr bitcast of a function.
- if (!CI->getCalledFunction())
+ if (!isCallPromotable(CI))
return false;
WorkList.push_back(User);
@@ -429,10 +451,11 @@ static bool collectUsesWithPtrTypes(Value *Val, std::vector<Value*> &WorkList) {
continue;
WorkList.push_back(User);
-
- Success &= collectUsesWithPtrTypes(User, WorkList);
+ if (!collectUsesWithPtrTypes(User, WorkList))
+ return false;
}
- return Success;
+
+ return true;
}
void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
@@ -521,6 +544,11 @@ void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
IntrinsicInst *Intr = dyn_cast<IntrinsicInst>(Call);
if (!Intr) {
+ // FIXME: What is this for? It doesn't make sense to promote arbitrary
+ // function calls. If the call is to a defined function that can also be
+ // promoted, we should be able to do this once that function is also
+ // rewritten.
+
std::vector<Type*> ArgTypes;
for (unsigned ArgIdx = 0, ArgEnd = Call->getNumArgOperands();
ArgIdx != ArgEnd; ++ArgIdx) {
OpenPOWER on IntegriCloud