diff options
| author | Anna Thomas <anna@azul.com> | 2016-06-23 20:22:22 +0000 |
|---|---|---|
| committer | Anna Thomas <anna@azul.com> | 2016-06-23 20:22:22 +0000 |
| commit | 31a0b2088f37821b061ea2780e7ad804749f3647 (patch) | |
| tree | ba63d9293ec388b43717dcdaa5c09e5a2579cff5 /llvm/test | |
| parent | fb780bfc35968bede8c23331a7505ac11ef2c1fb (diff) | |
| download | bcm5719-llvm-31a0b2088f37821b061ea2780e7ad804749f3647.tar.gz bcm5719-llvm-31a0b2088f37821b061ea2780e7ad804749f3647.zip | |
InstCombine rule to fold trunc when value available
Summary:
This instcombine rule folds away trunc operations that have value available from a prior load or store.
This kind of code can be generated as a result of GVN widening the load or from source code as well.
Reviewers: reames, majnemer, sanjoy
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D21246
llvm-svn: 273608
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/Transforms/InstCombine/trunc.ll | 57 |
1 files changed, 57 insertions, 0 deletions
diff --git a/llvm/test/Transforms/InstCombine/trunc.ll b/llvm/test/Transforms/InstCombine/trunc.ll index 2019b3a6df4..63c25145f7a 100644 --- a/llvm/test/Transforms/InstCombine/trunc.ll +++ b/llvm/test/Transforms/InstCombine/trunc.ll @@ -181,3 +181,60 @@ bb1: bb2: unreachable } + +declare void @consume(i8) readonly +define i1 @trunc_load_store(i8* align 2 %a) { + store i8 0, i8 *%a, align 2 + %bca = bitcast i8* %a to i16* + %wide.load = load i16, i16* %bca, align 2 + %lowhalf.1 = trunc i16 %wide.load to i8 + call void @consume(i8 %lowhalf.1) + %cmp.2 = icmp ult i16 %wide.load, 256 + ret i1 %cmp.2 +; CHECK-LABEL: @trunc_load_store +; CHECK-NOT: trunc +; CHECK: call void @consume(i8 0) +} + + +; The trunc can be replaced with the load value. +define i1 @trunc_load_load(i8* align 2 %a) { + %pload = load i8, i8* %a, align 2 + %bca = bitcast i8* %a to i16* + %wide.load = load i16, i16* %bca, align 2 + %lowhalf = trunc i16 %wide.load to i8 + call void @consume(i8 %lowhalf) + call void @consume(i8 %pload) + %cmp.2 = icmp ult i16 %wide.load, 256 + ret i1 %cmp.2 +; CHECK-LABEL: @trunc_load_load +; CHECK-NOT: trunc +} + +; trunc should not be replaced since atomic load %wide.load has more than one use. +; different values can be seen by the uses of %wide.load in case of race. +define i1 @trunc_atomic_loads(i8* align 2 %a) { + %pload = load atomic i8, i8* %a unordered, align 2 + %bca = bitcast i8* %a to i16* + %wide.load = load atomic i16, i16* %bca unordered, align 2 + %lowhalf = trunc i16 %wide.load to i8 + call void @consume(i8 %lowhalf) + call void @consume(i8 %pload) + %cmp.2 = icmp ult i16 %wide.load, 256 + ret i1 %cmp.2 +; CHECK-LABEL: @trunc_atomic_loads +; CHECK: trunc +} + +; trunc cannot be replaced since store size is not trunc result size +define i1 @trunc_different_size_load(i16 * align 2 %a) { + store i16 0, i16 *%a, align 2 + %bca = bitcast i16* %a to i32* + %wide.load = load i32, i32* %bca, align 2 + %lowhalf = trunc i32 %wide.load to i8 + call void @consume(i8 %lowhalf) + %cmp.2 = icmp ult i32 %wide.load, 256 + ret i1 %cmp.2 +; CHECK-LABEL: @trunc_different_size_load +; CHECK: %lowhalf = trunc i32 %wide.load to i8 +} |

