diff options
| author | Chandler Carruth <chandlerc@gmail.com> | 2012-09-26 10:45:28 +0000 |
|---|---|---|
| committer | Chandler Carruth <chandlerc@gmail.com> | 2012-09-26 10:45:28 +0000 |
| commit | 3e4273dd0c1e6d2ae31945777e8af4ed1398f698 (patch) | |
| tree | 0e6efa098f919bbe453635190af45bc04b27846f /llvm | |
| parent | 871ba7249c104534e056db04516b006b5e3cfec1 (diff) | |
| download | bcm5719-llvm-3e4273dd0c1e6d2ae31945777e8af4ed1398f698.tar.gz bcm5719-llvm-3e4273dd0c1e6d2ae31945777e8af4ed1398f698.zip | |
When rewriting the pointer operand to a load or store which has
alignment guarantees attached, re-compute the alignment so that we
consider offsets which impact alignment.
llvm-svn: 164690
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Transforms/Scalar/SROA.cpp | 6 | ||||
| -rw-r--r-- | llvm/test/Transforms/SROA/alignment.ll | 18 |
2 files changed, 24 insertions, 0 deletions
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp index f1cb947011c..67246a5a1cc 100644 --- a/llvm/lib/Transforms/Scalar/SROA.cpp +++ b/llvm/lib/Transforms/Scalar/SROA.cpp @@ -1992,6 +1992,9 @@ private: Value *NewPtr = getAdjustedAllocaPtr(IRB, LI.getPointerOperand()->getType()); LI.setOperand(0, NewPtr); + if (LI.getAlignment()) + LI.setAlignment(MinAlign(NewAI.getAlignment(), + BeginOffset - NewAllocaBeginOffset)); DEBUG(dbgs() << " to: " << LI << "\n"); deleteIfTriviallyDead(OldOp); @@ -2043,6 +2046,9 @@ private: Value *NewPtr = getAdjustedAllocaPtr(IRB, SI.getPointerOperand()->getType()); SI.setOperand(1, NewPtr); + if (SI.getAlignment()) + SI.setAlignment(MinAlign(NewAI.getAlignment(), + BeginOffset - NewAllocaBeginOffset)); DEBUG(dbgs() << " to: " << SI << "\n"); deleteIfTriviallyDead(OldOp); diff --git a/llvm/test/Transforms/SROA/alignment.ll b/llvm/test/Transforms/SROA/alignment.ll index 1223be362b6..953f5118ab5 100644 --- a/llvm/test/Transforms/SROA/alignment.ll +++ b/llvm/test/Transforms/SROA/alignment.ll @@ -28,6 +28,24 @@ entry: ret void } +define void @test2() { +; CHECK: @test2 +; CHECK: alloca i16, align 2 +; CHECK: load i8* %{{.*}}, align 1 +; CHECK: store i8 42, i8* %{{.*}}, align 1 +; CHECK: ret void + +entry: + %a = alloca { i8, i8, i8, i8 }, align 2 + %gep1 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 1 + %cast1 = bitcast i8* %gep1 to i16* + store volatile i16 0, i16* %cast1 + %gep2 = getelementptr { i8, i8, i8, i8 }* %a, i32 0, i32 2 + %result = load i8* %gep2, align 2 + store i8 42, i8* %gep2, align 2 + ret void +} + define void @PR13920(<2 x i64>* %a, i16* %b) { ; Test that alignments on memcpy intrinsics get propagated to loads and stores. ; CHECK: @PR13920 |

