diff options
| author | Hal Finkel <hfinkel@anl.gov> | 2014-09-07 18:57:58 +0000 |
|---|---|---|
| committer | Hal Finkel <hfinkel@anl.gov> | 2014-09-07 18:57:58 +0000 |
| commit | 60db05896acea81b57a6678fb6166a9c35151571 (patch) | |
| tree | dde74426ebfce4a034f426d37473e0f920631dbf /llvm/test/Transforms | |
| parent | 88d5d9c2dadbd5d928ddd87625363447b2adb74f (diff) | |
| download | bcm5719-llvm-60db05896acea81b57a6678fb6166a9c35151571.tar.gz bcm5719-llvm-60db05896acea81b57a6678fb6166a9c35151571.zip | |
Make use of @llvm.assume in ValueTracking (computeKnownBits, etc.)
This change, which allows @llvm.assume to be used from within computeKnownBits
(and other associated functions in ValueTracking), adds some (optional)
parameters to computeKnownBits and friends. These functions now (optionally)
take a "context" instruction pointer, an AssumptionTracker pointer, and also a
DomTree pointer, and most of the changes are just to pass this new information
when it is easily available from InstSimplify, InstCombine, etc.
As explained below, the significant conceptual change is that known properties
of a value might depend on the control-flow location of the use (because we
care that the @llvm.assume dominates the use because assumptions have
control-flow dependencies). This means that, when we ask if bits are known in a
value, we might get different answers for different uses.
The significant changes are all in ValueTracking. Two main changes: First, as
with the rest of the code, new parameters need to be passed around. To make
this easier, I grouped them into a structure, and I made internal static
versions of the relevant functions that take this structure as a parameter. The
new code does as you might expect, it looks for @llvm.assume calls that make
use of the value we're trying to learn something about (often indirectly),
attempts to pattern match that expression, and uses the result if successful.
By making use of the AssumptionTracker, the process of finding @llvm.assume
calls is not expensive.
Part of the structure being passed around inside ValueTracking is a set of
already-considered @llvm.assume calls. This is to prevent a query using, for
example, the assume(a == b), to recurse on itself. The context and DT params
are used to find applicable assumptions. An assumption needs to dominate the
context instruction, or come after it deterministically. In this latter case we
only handle the specific case where both the assumption and the context
instruction are in the same block, and we need to exclude assumptions from
being used to simplify their own ephemeral values (those which contribute only
to the assumption) because otherwise the assumption would prove its feeding
comparison trivial and would be removed.
This commit adds the plumbing and the logic for a simple masked-bit propagation
(just enough to write a regression test). Future commits add more patterns
(and, correspondingly, more regression tests).
llvm-svn: 217342
Diffstat (limited to 'llvm/test/Transforms')
| -rw-r--r-- | llvm/test/Transforms/InstCombine/assume-loop-align.ll | 47 | ||||
| -rw-r--r-- | llvm/test/Transforms/InstCombine/assume.ll | 136 |
2 files changed, 183 insertions, 0 deletions
diff --git a/llvm/test/Transforms/InstCombine/assume-loop-align.ll b/llvm/test/Transforms/InstCombine/assume-loop-align.ll new file mode 100644 index 00000000000..19190de2cdd --- /dev/null +++ b/llvm/test/Transforms/InstCombine/assume-loop-align.ll @@ -0,0 +1,47 @@ +; RUN: opt -domtree -instcombine -loops -S < %s | FileCheck %s +; Note: The -loops above can be anything that requires the domtree, and is +; necessary to work around a pass-manager bug. + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +; Function Attrs: nounwind uwtable +define void @foo(i32* %a, i32* %b) #0 { +entry: + %ptrint = ptrtoint i32* %a to i64 + %maskedptr = and i64 %ptrint, 63 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + %ptrint1 = ptrtoint i32* %b to i64 + %maskedptr2 = and i64 %ptrint1, 63 + %maskcond3 = icmp eq i64 %maskedptr2, 0 + tail call void @llvm.assume(i1 %maskcond3) + br label %for.body + +; CHECK-LABEL: @foo +; CHECK: load i32* {{.*}} align 64 +; CHECK: store i32 {{.*}} align 64 +; CHECK: ret + +for.body: ; preds = %entry, %for.body + %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ] + %arrayidx = getelementptr inbounds i32* %b, i64 %indvars.iv + %0 = load i32* %arrayidx, align 4 + %add = add nsw i32 %0, 1 + %arrayidx5 = getelementptr inbounds i32* %a, i64 %indvars.iv + store i32 %add, i32* %arrayidx5, align 4 + %indvars.iv.next = add nuw nsw i64 %indvars.iv, 16 + %1 = trunc i64 %indvars.iv.next to i32 + %cmp = icmp slt i32 %1, 1648 + br i1 %cmp, label %for.body, label %for.end + +for.end: ; preds = %for.body + ret void +} + +; Function Attrs: nounwind +declare void @llvm.assume(i1) #1 + +attributes #0 = { nounwind uwtable } +attributes #1 = { nounwind } + diff --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll index d65a8f7bc5e..286ca1e8d77 100644 --- a/llvm/test/Transforms/InstCombine/assume.ll +++ b/llvm/test/Transforms/InstCombine/assume.ll @@ -2,6 +2,44 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" +; Function Attrs: nounwind uwtable +define i32 @foo1(i32* %a) #0 { +entry: + %0 = load i32* %a, align 4 + +; Check that the alignment has been upgraded and that the assume has not +; been removed: +; CHECK-LABEL: @foo1 +; CHECK-DAG: load i32* %a, align 32 +; CHECK-DAG: call void @llvm.assume +; CHECK: ret i32 + + %ptrint = ptrtoint i32* %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + + ret i32 %0 +} + +; Function Attrs: nounwind uwtable +define i32 @foo2(i32* %a) #0 { +entry: +; Same check as in @foo1, but make sure it works if the assume is first too. +; CHECK-LABEL: @foo2 +; CHECK-DAG: load i32* %a, align 32 +; CHECK-DAG: call void @llvm.assume +; CHECK: ret i32 + + %ptrint = ptrtoint i32* %a to i64 + %maskedptr = and i64 %ptrint, 31 + %maskcond = icmp eq i64 %maskedptr, 0 + tail call void @llvm.assume(i1 %maskcond) + + %0 = load i32* %a, align 4 + ret i32 %0 +} + ; Function Attrs: nounwind declare void @llvm.assume(i1) #1 @@ -38,6 +76,104 @@ entry: ret i32 5 } +define i32 @bar1(i32 %a) #0 { +entry: + %and1 = and i32 %a, 3 + +; CHECK-LABEL: @bar1 +; CHECK: call void @llvm.assume +; CHECK: ret i32 1 + + %and = and i32 %a, 7 + %cmp = icmp eq i32 %and, 1 + tail call void @llvm.assume(i1 %cmp) + + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @bar2(i32 %a) #0 { +entry: +; CHECK-LABEL: @bar2 +; CHECK: call void @llvm.assume +; CHECK: ret i32 1 + + %and = and i32 %a, 7 + %cmp = icmp eq i32 %and, 1 + tail call void @llvm.assume(i1 %cmp) + + %and1 = and i32 %a, 3 + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @bar3(i32 %a, i1 %x, i1 %y) #0 { +entry: + %and1 = and i32 %a, 3 + +; Don't be fooled by other assumes around. +; CHECK-LABEL: @bar3 +; CHECK: call void @llvm.assume +; CHECK: ret i32 1 + + tail call void @llvm.assume(i1 %x) + + %and = and i32 %a, 7 + %cmp = icmp eq i32 %and, 1 + tail call void @llvm.assume(i1 %cmp) + + tail call void @llvm.assume(i1 %y) + + ret i32 %and1 +} + +; Function Attrs: nounwind uwtable +define i32 @bar4(i32 %a, i32 %b) { +entry: + %and1 = and i32 %b, 3 + +; CHECK-LABEL: @bar4 +; CHECK: call void @llvm.assume +; CHECK: call void @llvm.assume +; CHECK: ret i32 1 + + %and = and i32 %a, 7 + %cmp = icmp eq i32 %and, 1 + tail call void @llvm.assume(i1 %cmp) + + %cmp2 = icmp eq i32 %a, %b + tail call void @llvm.assume(i1 %cmp2) + + ret i32 %and1 +} + +define i32 @icmp1(i32 %a) #0 { +entry: + %cmp = icmp sgt i32 %a, 5 + tail call void @llvm.assume(i1 %cmp) + %conv = zext i1 %cmp to i32 + ret i32 %conv + +; CHECK-LABEL: @icmp1 +; CHECK: call void @llvm.assume +; CHECK: ret i32 1 + +} + +; Function Attrs: nounwind uwtable +define i32 @icmp2(i32 %a) #0 { +entry: + %cmp = icmp sgt i32 %a, 5 + tail call void @llvm.assume(i1 %cmp) + %0 = zext i1 %cmp to i32 + %lnot.ext = xor i32 %0, 1 + ret i32 %lnot.ext + +; CHECK-LABEL: @icmp2 +; CHECK: call void @llvm.assume +; CHECK: ret i32 0 +} + attributes #0 = { nounwind uwtable } attributes #1 = { nounwind } |

