diff options
author | Hal Finkel <hfinkel@anl.gov> | 2014-09-11 08:40:17 +0000 |
---|---|---|
committer | Hal Finkel <hfinkel@anl.gov> | 2014-09-11 08:40:17 +0000 |
commit | f83e1f7f6681e02b8f6d661ade76ddf10d55fd40 (patch) | |
tree | c4a38f33869b8d0767ef47da6b6f720a1d323fc6 | |
parent | fdfa8557c0c93ecc20df958addad5445c2e5580b (diff) | |
download | bcm5719-llvm-f83e1f7f6681e02b8f6d661ade76ddf10d55fd40.tar.gz bcm5719-llvm-f83e1f7f6681e02b8f6d661ade76ddf10d55fd40.zip |
[AlignmentFromAssumptions] Don't crash just because the target is 32-bit
We used to crash processing any relevant @llvm.assume on a 32-bit target
(because we'd ask SE to subtract expressions of differing types). I've copied
our 'simple.ll' test, but with the data layout from arm-linux-gnueabihf to get
some meaningful test coverage here.
llvm-svn: 217574
-rw-r--r-- | llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp | 4 | ||||
-rw-r--r-- | llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll | 215 |
2 files changed, 219 insertions, 0 deletions
diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp index 7ab16f13811..06c3dfdf637 100644 --- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp +++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp @@ -141,6 +141,10 @@ static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV, const SCEV *PtrSCEV = SE->getSCEV(Ptr); const SCEV *DiffSCEV = SE->getMinusSCEV(PtrSCEV, AASCEV); + // On 32-bit platforms, DiffSCEV might now have type i32 -- we've always + // sign-extended OffSCEV to i64, so make sure they agree again. + DiffSCEV = SE->getNoopOrSignExtend(DiffSCEV, OffSCEV->getType()); + // What we really want to know is the overall offset to the aligned // address. This address is displaced by the provided offset. DiffSCEV = SE->getMinusSCEV(DiffSCEV, OffSCEV); diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll b/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll new file mode 100644 index 00000000000..166e7ef3893 --- /dev/null +++ b/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll @@ -0,0 +1,215 @@ +target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-n32-S64" +; RUN: opt < %s -alignment-from-assumptions -S | FileCheck %s + +define i32 @foo(i32* nocapture %a) nounwind uwtable readonly { +entry: + %ptrint = ptrtoint i32* %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + %0 = load i32* %a, align 4 + ret i32 %0 + +; CHECK-LABEL: @foo +; CHECK: load i32* {{[^,]+}}, align 32 +; CHECK: ret i32 +} + +define i32 @foo2(i32* nocapture %a) nounwind uwtable readonly { +entry: + %ptrint = ptrtoint i32* %a to i64 + %offsetptr = add i64 %ptrint, 24 + %maskedptr = and i64 %offsetptr, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + %arrayidx = getelementptr inbounds i32* %a, i64 2 + %0 = load i32* %arrayidx, align 4 + ret i32 %0 + +; CHECK-LABEL: @foo2 +; CHECK: load i32* {{[^,]+}}, align 16 +; CHECK: ret i32 +} + +define i32 @foo2a(i32* nocapture %a) nounwind uwtable readonly { +entry: + %ptrint = ptrtoint i32* %a to i64 + %offsetptr = add i64 %ptrint, 28 + %maskedptr = and i64 %offsetptr, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + %arrayidx = getelementptr inbounds i32* %a, i64 -1 + %0 = load i32* %arrayidx, align 4 + ret i32 %0 + +; CHECK-LABEL: @foo2a +; CHECK: load i32* {{[^,]+}}, align 32 +; CHECK: ret i32 +} + +define i32 @goo(i32* nocapture %a) nounwind uwtable readonly { +entry: + %ptrint = ptrtoint i32* %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + %0 = load i32* %a, align 4 + ret i32 %0 + +; CHECK-LABEL: @goo +; CHECK: load i32* {{[^,]+}}, align 32 +; CHECK: ret i32 +} + +define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly { +entry: + %ptrint = ptrtoint i32* %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + br label %for.body + +for.body: ; preds = %entry, %for.body + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ] + %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv + %0 = load i32* %arrayidx, align 4 + %add = add nsw i32 %0, %r.06 + %indvars.iv.next = add i64 %indvars.iv, 8 + %1 = trunc i64 %indvars.iv.next to i32 + %cmp = icmp slt i32 %1, 2048 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + %add.lcssa = phi i32 [ %add, %for.body ] + ret i32 %add.lcssa + +; CHECK-LABEL: @hoo +; CHECK: load i32* %arrayidx, align 32 +; CHECK: ret i32 %add.lcssa +} + +define i32 @joo(i32* nocapture %a) nounwind uwtable readonly { +entry: + %ptrint = ptrtoint i32* %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + br label %for.body + +for.body: ; preds = %entry, %for.body + %indvars.iv = phi i64 [ 4, %entry ], [ %indvars.iv.next, %for.body ] + %r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ] + %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv + %0 = load i32* %arrayidx, align 4 + %add = add nsw i32 %0, %r.06 + %indvars.iv.next = add i64 %indvars.iv, 8 + %1 = trunc i64 %indvars.iv.next to i32 + %cmp = icmp slt i32 %1, 2048 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + %add.lcssa = phi i32 [ %add, %for.body ] + ret i32 %add.lcssa + +; CHECK-LABEL: @joo +; CHECK: load i32* %arrayidx, align 16 +; CHECK: ret i32 %add.lcssa +} + +define i32 @koo(i32* nocapture %a) nounwind uwtable readonly { +entry: + %ptrint = ptrtoint i32* %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + br label %for.body + +for.body: ; preds = %entry, %for.body + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ] + %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv + %0 = load i32* %arrayidx, align 4 + %add = add nsw i32 %0, %r.06 + %indvars.iv.next = add i64 %indvars.iv, 4 + %1 = trunc i64 %indvars.iv.next to i32 + %cmp = icmp slt i32 %1, 2048 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + %add.lcssa = phi i32 [ %add, %for.body ] + ret i32 %add.lcssa + +; CHECK-LABEL: @koo +; CHECK: load i32* %arrayidx, align 16 +; CHECK: ret i32 %add.lcssa +} + +define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly { +entry: + %ptrint = ptrtoint i32* %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + br label %for.body + +for.body: ; preds = %entry, %for.body + %indvars.iv = phi i64 [ -4, %entry ], [ %indvars.iv.next, %for.body ] + %r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ] + %arrayidx = getelementptr inbounds i32* %a, i64 %indvars.iv + %0 = load i32* %arrayidx, align 4 + %add = add nsw i32 %0, %r.06 + %indvars.iv.next = add i64 %indvars.iv, 4 + %1 = trunc i64 %indvars.iv.next to i32 + %cmp = icmp slt i32 %1, 2048 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + %add.lcssa = phi i32 [ %add, %for.body ] + ret i32 %add.lcssa + +; CHECK-LABEL: @koo2 +; CHECK: load i32* %arrayidx, align 16 +; CHECK: ret i32 %add.lcssa +} + +define i32 @moo(i32* nocapture %a) nounwind uwtable { +entry: + %ptrint = ptrtoint i32* %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + %0 = bitcast i32* %a to i8* + tail call void @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 64, i32 4, i1 false) + ret i32 undef + +; CHECK-LABEL: @moo +; CHECK: @llvm.memset.p0i8.i64(i8* %0, i8 0, i64 64, i32 32, i1 false) +; CHECK: ret i32 undef +} + +define i32 @moo2(i32* nocapture %a, i32* nocapture %b) nounwind uwtable { +entry: + %ptrint = ptrtoint i32* %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + %ptrint1 = ptrtoint i32* %b to i64 + %maskedptr3 = and i64 %ptrint1, 127 + %maskcond4 = icmp eq i64 %maskedptr3, 0 + tail call void @llvm.assume(i1 %maskcond4) + %0 = bitcast i32* %a to i8* + %1 = bitcast i32* %b to i8* + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 64, i32 4, i1 false) + ret i32 undef + +; CHECK-LABEL: @moo2 +; CHECK: @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 64, i32 32, i1 false) +; CHECK: ret i32 undef +} + +declare void @llvm.assume(i1) nounwind + +declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) nounwind +declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture, i64, i32, i1) nounwind + |