diff options
author | Juneyoung Lee <aqjune@gmail.com> | 2020-01-15 01:11:20 +0900 |
---|---|---|
committer | Juneyoung Lee <aqjune@gmail.com> | 2020-01-15 03:20:53 +0900 |
commit | 3e32b7e12701de772b1cdf855b42253650a1e997 (patch) | |
tree | cfaa7f905c9dcbf2145a9e646ff929892f6ab608 /llvm | |
parent | 40c5bd4212a51216a489fdaaf59060921d677009 (diff) | |
download | bcm5719-llvm-3e32b7e12701de772b1cdf855b42253650a1e997.tar.gz bcm5719-llvm-3e32b7e12701de772b1cdf855b42253650a1e997.zip |
[InstCombine] Let combineLoadToNewType preserve ABI alignment of the load (PR44543)
Summary:
If aligment on `LoadInst` isn't specified, load is assumed to be ABI-aligned.
And said aligment may be different for different types.
So if we change load type, but don't pay extra attention to the aligment
(i.e. keep it unspecified), we may either overpromise (if the default aligment
of the new type is higher), or underpromise (if the default aligment
of the new type is smaller).
Thus, if no alignment is specified, we need to manually preserve the implied ABI alignment.
This addresses https://bugs.llvm.org/show_bug.cgi?id=44543 by making combineLoadToNewType preserve ABI alignment of the load.
Reviewers: spatel, lebedev.ri
Subscribers: hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D72710
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp | 9 | ||||
-rw-r--r-- | llvm/test/Transforms/InstCombine/load-bitcast64.ll | 6 |
2 files changed, 11 insertions, 4 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index dc1ccc4bafc..f2c2d1cdf5a 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -462,8 +462,15 @@ static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewT NewPtr->getType()->getPointerAddressSpace() == AS)) NewPtr = IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)); + unsigned Align = LI.getAlignment(); + if (!Align) + // If old load did not have an explicit alignment specified, + // manually preserve the implied (ABI) alignment of the load. + // Else we may inadvertently incorrectly over-promise alignment. + Align = IC.getDataLayout().getABITypeAlignment(LI.getType()); + LoadInst *NewLoad = IC.Builder.CreateAlignedLoad( - NewTy, NewPtr, LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix); + NewTy, NewPtr, Align, LI.isVolatile(), LI.getName() + Suffix); NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); copyMetadataForLoad(*NewLoad, LI); return NewLoad; diff --git a/llvm/test/Transforms/InstCombine/load-bitcast64.ll b/llvm/test/Transforms/InstCombine/load-bitcast64.ll index 58bc923da69..270bd6f5f81 100644 --- a/llvm/test/Transforms/InstCombine/load-bitcast64.ll +++ b/llvm/test/Transforms/InstCombine/load-bitcast64.ll @@ -8,7 +8,7 @@ define i64* @test1(i8* %x) { ; CHECK-LABEL: @test1( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64** -; CHECK-NEXT: [[B1:%.*]] = load i64*, i64** [[TMP0]], align 8 +; CHECK-NEXT: [[B1:%.*]] = load i64*, i64** [[TMP0]], align 4 ; CHECK-NEXT: ret i64* [[B1]] ; entry: @@ -57,7 +57,7 @@ define i64 @test4(i8* %x) { ; CHECK-LABEL: @test4( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64* -; CHECK-NEXT: [[B1:%.*]] = load i64, i64* [[TMP0]], align 4 +; CHECK-NEXT: [[B1:%.*]] = load i64, i64* [[TMP0]], align 8 ; CHECK-NEXT: ret i64 [[B1]] ; entry: @@ -88,7 +88,7 @@ define i64 @test6(i8* %x) { ; CHECK-LABEL: @test6( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[X:%.*]] to i64* -; CHECK-NEXT: [[B1:%.*]] = load i64, i64* [[TMP0]], align 4 +; CHECK-NEXT: [[B1:%.*]] = load i64, i64* [[TMP0]], align 8 ; CHECK-NEXT: ret i64 [[B1]] ; entry: |