summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
diff options
context:
space:
mode:
authorErik Pilkington <erik.pilkington@gmail.com>2019-05-31 22:41:36 +0000
committerErik Pilkington <erik.pilkington@gmail.com>2019-05-31 22:41:36 +0000
commitabb2a93c5327ad21d057164539ee06d27f733b71 (patch)
tree6f7d175e85496b68aa4ff78c7820360a728d8439 /llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
parent5234921119f95138e2a0802369b7b092ddb7cc4a (diff)
downloadbcm5719-llvm-abb2a93c5327ad21d057164539ee06d27f733b71.tar.gz
bcm5719-llvm-abb2a93c5327ad21d057164539ee06d27f733b71.zip
[SimplifyLibCalls] Fold more fortified functions into non-fortified variants
When the object size argument is -1, no checking can be done, so calling the _chk variant is unnecessary. We already did this for a bunch of these functions. rdar://50797197 Differential revision: https://reviews.llvm.org/D62358 llvm-svn: 362272
Diffstat (limited to 'llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp')
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp147
1 files changed, 132 insertions, 15 deletions
diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index f5e3d76faa0..0c95f4ce485 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -2825,12 +2825,23 @@ void LibCallSimplifier::eraseFromParent(Instruction *I) {
// Fortified Library Call Optimizations
//===----------------------------------------------------------------------===//
-bool FortifiedLibCallSimplifier::isFortifiedCallFoldable(CallInst *CI,
- unsigned ObjSizeOp,
- unsigned SizeOp,
- bool isString) {
- if (CI->getArgOperand(ObjSizeOp) == CI->getArgOperand(SizeOp))
+bool
+FortifiedLibCallSimplifier::isFortifiedCallFoldable(CallInst *CI,
+ unsigned ObjSizeOp,
+ Optional<unsigned> SizeOp,
+ Optional<unsigned> StrOp,
+ Optional<unsigned> FlagOp) {
+ // If this function takes a flag argument, the implementation may use it to
+ // perform extra checks. Don't fold into the non-checking variant.
+ if (FlagOp) {
+ ConstantInt *Flag = dyn_cast<ConstantInt>(CI->getArgOperand(*FlagOp));
+ if (!Flag || !Flag->isZero())
+ return false;
+ }
+
+ if (SizeOp && CI->getArgOperand(ObjSizeOp) == CI->getArgOperand(*SizeOp))
return true;
+
if (ConstantInt *ObjSizeCI =
dyn_cast<ConstantInt>(CI->getArgOperand(ObjSizeOp))) {
if (ObjSizeCI->isMinusOne())
@@ -2838,23 +2849,27 @@ bool FortifiedLibCallSimplifier::isFortifiedCallFoldable(CallInst *CI,
// If the object size wasn't -1 (unknown), bail out if we were asked to.
if (OnlyLowerUnknownSize)
return false;
- if (isString) {
- uint64_t Len = GetStringLength(CI->getArgOperand(SizeOp));
+ if (StrOp) {
+ uint64_t Len = GetStringLength(CI->getArgOperand(*StrOp));
// If the length is 0 we don't know how long it is and so we can't
// remove the check.
if (Len == 0)
return false;
return ObjSizeCI->getZExtValue() >= Len;
}
- if (ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getArgOperand(SizeOp)))
- return ObjSizeCI->getZExtValue() >= SizeCI->getZExtValue();
+
+ if (SizeOp) {
+ if (ConstantInt *SizeCI =
+ dyn_cast<ConstantInt>(CI->getArgOperand(*SizeOp)))
+ return ObjSizeCI->getZExtValue() >= SizeCI->getZExtValue();
+ }
}
return false;
}
Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI,
IRBuilder<> &B) {
- if (isFortifiedCallFoldable(CI, 3, 2, false)) {
+ if (isFortifiedCallFoldable(CI, 3, 2)) {
B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1,
CI->getArgOperand(2));
return CI->getArgOperand(0);
@@ -2864,7 +2879,7 @@ Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI,
Value *FortifiedLibCallSimplifier::optimizeMemMoveChk(CallInst *CI,
IRBuilder<> &B) {
- if (isFortifiedCallFoldable(CI, 3, 2, false)) {
+ if (isFortifiedCallFoldable(CI, 3, 2)) {
B.CreateMemMove(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1,
CI->getArgOperand(2));
return CI->getArgOperand(0);
@@ -2876,7 +2891,7 @@ Value *FortifiedLibCallSimplifier::optimizeMemSetChk(CallInst *CI,
IRBuilder<> &B) {
// TODO: Try foldMallocMemset() here.
- if (isFortifiedCallFoldable(CI, 3, 2, false)) {
+ if (isFortifiedCallFoldable(CI, 3, 2)) {
Value *Val = B.CreateIntCast(CI->getArgOperand(1), B.getInt8Ty(), false);
B.CreateMemSet(CI->getArgOperand(0), Val, CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
@@ -2902,7 +2917,7 @@ Value *FortifiedLibCallSimplifier::optimizeStrpCpyChk(CallInst *CI,
// st[rp]cpy_chk call which may fail at runtime if the size is too long.
// TODO: It might be nice to get a maximum length out of the possible
// string lengths for varying.
- if (isFortifiedCallFoldable(CI, 2, 1, true)) {
+ if (isFortifiedCallFoldable(CI, 2, None, 1)) {
if (Func == LibFunc_strcpy_chk)
return emitStrCpy(Dst, Src, B, TLI);
else
@@ -2930,10 +2945,10 @@ Value *FortifiedLibCallSimplifier::optimizeStrpCpyChk(CallInst *CI,
Value *FortifiedLibCallSimplifier::optimizeStrpNCpyChk(CallInst *CI,
IRBuilder<> &B,
LibFunc Func) {
- if (isFortifiedCallFoldable(CI, 3, 2, false)) {
+ if (isFortifiedCallFoldable(CI, 3, 2)) {
if (Func == LibFunc_strncpy_chk)
return emitStrNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), B, TLI);
+ CI->getArgOperand(2), B, TLI);
else
return emitStpNCpy(CI->getArgOperand(0), CI->getArgOperand(1),
CI->getArgOperand(2), B, TLI);
@@ -2942,6 +2957,90 @@ Value *FortifiedLibCallSimplifier::optimizeStrpNCpyChk(CallInst *CI,
return nullptr;
}
+Value *FortifiedLibCallSimplifier::optimizeMemCCpyChk(CallInst *CI,
+ IRBuilder<> &B) {
+ if (isFortifiedCallFoldable(CI, 4, 3))
+ return emitMemCCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), CI->getArgOperand(3), B, TLI);
+
+ return nullptr;
+}
+
+Value *FortifiedLibCallSimplifier::optimizeSNPrintfChk(CallInst *CI,
+ IRBuilder<> &B) {
+ if (isFortifiedCallFoldable(CI, 3, 1, None, 2)) {
+ SmallVector<Value *, 8> VariadicArgs(CI->arg_begin() + 5, CI->arg_end());
+ return emitSNPrintf(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(4), VariadicArgs, B, TLI);
+ }
+
+ return nullptr;
+}
+
+Value *FortifiedLibCallSimplifier::optimizeSPrintfChk(CallInst *CI,
+ IRBuilder<> &B) {
+ if (isFortifiedCallFoldable(CI, 2, None, None, 1)) {
+ SmallVector<Value *, 8> VariadicArgs(CI->arg_begin() + 4, CI->arg_end());
+ return emitSPrintf(CI->getArgOperand(0), CI->getArgOperand(3), VariadicArgs,
+ B, TLI);
+ }
+
+ return nullptr;
+}
+
+Value *FortifiedLibCallSimplifier::optimizeStrCatChk(CallInst *CI,
+ IRBuilder<> &B) {
+ if (isFortifiedCallFoldable(CI, 2))
+ return emitStrCat(CI->getArgOperand(0), CI->getArgOperand(1), B, TLI);
+
+ return nullptr;
+}
+
+Value *FortifiedLibCallSimplifier::optimizeStrLCat(CallInst *CI,
+ IRBuilder<> &B) {
+ if (isFortifiedCallFoldable(CI, 3))
+ return emitStrLCat(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), B, TLI);
+
+ return nullptr;
+}
+
+Value *FortifiedLibCallSimplifier::optimizeStrNCatChk(CallInst *CI,
+ IRBuilder<> &B) {
+ if (isFortifiedCallFoldable(CI, 3))
+ return emitStrNCat(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), B, TLI);
+
+ return nullptr;
+}
+
+Value *FortifiedLibCallSimplifier::optimizeStrLCpyChk(CallInst *CI,
+ IRBuilder<> &B) {
+ if (isFortifiedCallFoldable(CI, 3))
+ return emitStrLCpy(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(2), B, TLI);
+
+ return nullptr;
+}
+
+Value *FortifiedLibCallSimplifier::optimizeVSNPrintfChk(CallInst *CI,
+ IRBuilder<> &B) {
+ if (isFortifiedCallFoldable(CI, 3, 1, None, 2))
+ return emitVSNPrintf(CI->getArgOperand(0), CI->getArgOperand(1),
+ CI->getArgOperand(4), CI->getArgOperand(5), B, TLI);
+
+ return nullptr;
+}
+
+Value *FortifiedLibCallSimplifier::optimizeVSPrintfChk(CallInst *CI,
+ IRBuilder<> &B) {
+ if (isFortifiedCallFoldable(CI, 2, None, None, 1))
+ return emitVSPrintf(CI->getArgOperand(0), CI->getArgOperand(3),
+ CI->getArgOperand(4), B, TLI);
+
+ return nullptr;
+}
+
Value *FortifiedLibCallSimplifier::optimizeCall(CallInst *CI) {
// FIXME: We shouldn't be changing "nobuiltin" or TLI unavailable calls here.
// Some clang users checked for _chk libcall availability using:
@@ -2986,6 +3085,24 @@ Value *FortifiedLibCallSimplifier::optimizeCall(CallInst *CI) {
case LibFunc_stpncpy_chk:
case LibFunc_strncpy_chk:
return optimizeStrpNCpyChk(CI, Builder, Func);
+ case LibFunc_memccpy_chk:
+ return optimizeMemCCpyChk(CI, Builder);
+ case LibFunc_snprintf_chk:
+ return optimizeSNPrintfChk(CI, Builder);
+ case LibFunc_sprintf_chk:
+ return optimizeSPrintfChk(CI, Builder);
+ case LibFunc_strcat_chk:
+ return optimizeStrCatChk(CI, Builder);
+ case LibFunc_strlcat_chk:
+ return optimizeStrLCat(CI, Builder);
+ case LibFunc_strncat_chk:
+ return optimizeStrNCatChk(CI, Builder);
+ case LibFunc_strlcpy_chk:
+ return optimizeStrLCpyChk(CI, Builder);
+ case LibFunc_vsnprintf_chk:
+ return optimizeVSNPrintfChk(CI, Builder);
+ case LibFunc_vsprintf_chk:
+ return optimizeVSPrintfChk(CI, Builder);
default:
break;
}
OpenPOWER on IntegriCloud