diff options
| author | Amjad Aboud <amjad.aboud@intel.com> | 2018-01-24 12:42:42 +0000 |
|---|---|---|
| committer | Amjad Aboud <amjad.aboud@intel.com> | 2018-01-24 12:42:42 +0000 |
| commit | e4453233d78788989c4bf2ff927a9e67433fb63d (patch) | |
| tree | 852418eb4b403be8210679108562c8ddd6fb0033 /llvm/test | |
| parent | f26df4783132de2a534572a53847716a89d98339 (diff) | |
| download | bcm5719-llvm-e4453233d78788989c4bf2ff927a9e67433fb63d.tar.gz bcm5719-llvm-e4453233d78788989c4bf2ff927a9e67433fb63d.zip | |
[InstCombine] Introducing Aggressive Instruction Combine pass (-aggressive-instcombine).
Combine expression patterns to form expressions with fewer, simple instructions.
This pass does not modify the CFG.
For example, this pass reduce width of expressions post-dominated by TruncInst
into smaller width when applicable.
It differs from instcombine pass in that it contains pattern optimization that
requires higher complexity than the O(1), thus, it should run fewer times than
instcombine pass.
Differential Revision: https://reviews.llvm.org/D38313
llvm-svn: 323321
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/Other/new-pm-defaults.ll | 1 | ||||
| -rw-r--r-- | llvm/test/Other/new-pm-lto-defaults.ll | 6 | ||||
| -rw-r--r-- | llvm/test/Other/new-pm-thinlto-defaults.ll | 1 | ||||
| -rw-r--r-- | llvm/test/Transforms/AggressiveInstCombine/trunc_multi_uses.ll | 214 |
4 files changed, 220 insertions, 2 deletions
diff --git a/llvm/test/Other/new-pm-defaults.ll b/llvm/test/Other/new-pm-defaults.ll index 2be8fdeb06f..7305df147fa 100644 --- a/llvm/test/Other/new-pm-defaults.ll +++ b/llvm/test/Other/new-pm-defaults.ll @@ -126,6 +126,7 @@ ; CHECK-O-NEXT: Running analysis: LazyValueAnalysis ; CHECK-O-NEXT: Running pass: CorrelatedValuePropagationPass ; CHECK-O-NEXT: Running pass: SimplifyCFGPass +; CHECK-O3-NEXT: AggressiveInstCombinePass ; CHECK-O-NEXT: Running pass: InstCombinePass ; CHECK-O1-NEXT: Running pass: LibCallsShrinkWrapPass ; CHECK-O2-NEXT: Running pass: LibCallsShrinkWrapPass diff --git a/llvm/test/Other/new-pm-lto-defaults.ll b/llvm/test/Other/new-pm-lto-defaults.ll index 878198d1447..a2d14848117 100644 --- a/llvm/test/Other/new-pm-lto-defaults.ll +++ b/llvm/test/Other/new-pm-lto-defaults.ll @@ -10,7 +10,8 @@ ; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2 ; RUN: opt -disable-verify -debug-pass-manager \ ; RUN: -passes='lto<O3>' -S %s 2>&1 \ -; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2 +; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2 \ +; RUN: --check-prefix=CHECK-O3 ; RUN: opt -disable-verify -debug-pass-manager \ ; RUN: -passes='lto<Os>' -S %s 2>&1 \ ; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2 @@ -20,7 +21,7 @@ ; RUN: opt -disable-verify -debug-pass-manager \ ; RUN: -passes='lto<O3>' -S %s -passes-ep-peephole='no-op-function' 2>&1 \ ; RUN: | FileCheck %s --check-prefix=CHECK-O --check-prefix=CHECK-O2 \ -; RUN: --check-prefix=CHECK-EP-Peephole +; RUN: --check-prefix=CHECK-O3 --check-prefix=CHECK-EP-Peephole ; CHECK-O: Starting llvm::Module pass manager run. ; CHECK-O-NEXT: Running pass: PassManager<{{.*}}Module @@ -60,6 +61,7 @@ ; CHECK-O2-NEXT: Running pass: DeadArgumentEliminationPass ; CHECK-O2-NEXT: Running pass: ModuleToFunctionPassAdaptor<{{.*}}PassManager{{.*}}> ; CHECK-O2-NEXT: Starting llvm::Function pass manager run. +; CHECK-O3-NEXT: Running pass: AggressiveInstCombinePass ; CHECK-O2-NEXT: Running pass: InstCombinePass ; CHECK-EP-Peephole-NEXT: Running pass: NoOpFunctionPass ; CHECK-O2-NEXT: Finished llvm::Function pass manager run. diff --git a/llvm/test/Other/new-pm-thinlto-defaults.ll b/llvm/test/Other/new-pm-thinlto-defaults.ll index c40e46aee6e..9f4e4ae8276 100644 --- a/llvm/test/Other/new-pm-thinlto-defaults.ll +++ b/llvm/test/Other/new-pm-thinlto-defaults.ll @@ -111,6 +111,7 @@ ; CHECK-O-NEXT: Running analysis: LazyValueAnalysis ; CHECK-O-NEXT: Running pass: CorrelatedValuePropagationPass ; CHECK-O-NEXT: Running pass: SimplifyCFGPass +; CHECK-O3-NEXT: Running pass: AggressiveInstCombinePass ; CHECK-O-NEXT: Running pass: InstCombinePass ; CHECK-O1-NEXT: Running pass: LibCallsShrinkWrapPass ; CHECK-O2-NEXT: Running pass: LibCallsShrinkWrapPass diff --git a/llvm/test/Transforms/AggressiveInstCombine/trunc_multi_uses.ll b/llvm/test/Transforms/AggressiveInstCombine/trunc_multi_uses.ll new file mode 100644 index 00000000000..389f77d4c70 --- /dev/null +++ b/llvm/test/Transforms/AggressiveInstCombine/trunc_multi_uses.ll @@ -0,0 +1,214 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -aggressive-instcombine -S | FileCheck %s +; RUN: opt < %s -passes=aggressive-instcombine -S | FileCheck %s +target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" + +; Aggressive Instcombine should be able to reduce width of these expressions. + +declare i32 @use32(i32) +declare i32 @use64(i64) +declare <2 x i32> @use32_vec(<2 x i32>) +declare <2 x i32> @use64_vec(<2 x i64>) + +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; +;; These tests check cases where expression dag post-dominated by TruncInst +;; contains instruction, which has more than one usage. +;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; + +define void @multi_uses_add(i32 %X) { +; CHECK-LABEL: @multi_uses_add( +; CHECK-NEXT: [[A1:%.*]] = zext i32 [[X:%.*]] to i64 +; CHECK-NEXT: [[B1:%.*]] = add i32 [[X]], 15 +; CHECK-NEXT: [[C1:%.*]] = mul i32 [[B1]], [[B1]] +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 [[C1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use64(i64 [[A1]]) +; CHECK-NEXT: ret void +; + %A1 = zext i32 %X to i64 + %B1 = add i64 %A1, 15 + %C1 = mul i64 %B1, %B1 + %T1 = trunc i64 %C1 to i32 + call i32 @use32(i32 %T1) + ; make sure zext have another use that is not post-dominated by the TruncInst. + call i32 @use64(i64 %A1) + ret void +} + +define void @multi_uses_or(i32 %X) { +; CHECK-LABEL: @multi_uses_or( +; CHECK-NEXT: [[A1:%.*]] = zext i32 [[X:%.*]] to i64 +; CHECK-NEXT: [[B1:%.*]] = or i32 [[X]], 15 +; CHECK-NEXT: [[C1:%.*]] = mul i32 [[B1]], [[B1]] +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 [[C1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use64(i64 [[A1]]) +; CHECK-NEXT: ret void +; + %A1 = zext i32 %X to i64 + %B1 = or i64 %A1, 15 + %C1 = mul i64 %B1, %B1 + %T1 = trunc i64 %C1 to i32 + call i32 @use32(i32 %T1) + ; make sure zext have another use that is not post-dominated by the TruncInst. + call i32 @use64(i64 %A1) + ret void +} + +define void @multi_uses_xor(i32 %X) { +; CHECK-LABEL: @multi_uses_xor( +; CHECK-NEXT: [[A1:%.*]] = zext i32 [[X:%.*]] to i64 +; CHECK-NEXT: [[B1:%.*]] = xor i32 [[X]], 15 +; CHECK-NEXT: [[C1:%.*]] = mul i32 [[B1]], [[B1]] +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 [[C1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use64(i64 [[A1]]) +; CHECK-NEXT: ret void +; + %A1 = zext i32 %X to i64 + %B1 = xor i64 %A1, 15 + %C1 = mul i64 %B1, %B1 + %T1 = trunc i64 %C1 to i32 + call i32 @use32(i32 %T1) + ; make sure zext have another use that is not post-dominated by the TruncInst. + call i32 @use64(i64 %A1) + ret void +} + +define void @multi_uses_and(i32 %X) { +; CHECK-LABEL: @multi_uses_and( +; CHECK-NEXT: [[A1:%.*]] = zext i32 [[X:%.*]] to i64 +; CHECK-NEXT: [[B1:%.*]] = and i32 [[X]], 15 +; CHECK-NEXT: [[C1:%.*]] = mul i32 [[B1]], [[B1]] +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 [[C1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use64(i64 [[A1]]) +; CHECK-NEXT: ret void +; + %A1 = zext i32 %X to i64 + %B1 = and i64 %A1, 15 + %C1 = mul i64 %B1, %B1 + %T1 = trunc i64 %C1 to i32 + call i32 @use32(i32 %T1) + ; make sure zext have another use that is not post-dominated by the TruncInst. + call i32 @use64(i64 %A1) + ret void +} + +define void @multi_uses_sub(i32 %X, i32 %Y) { +; CHECK-LABEL: @multi_uses_sub( +; CHECK-NEXT: [[A1:%.*]] = zext i32 [[X:%.*]] to i64 +; CHECK-NEXT: [[A2:%.*]] = zext i32 [[Y:%.*]] to i64 +; CHECK-NEXT: [[B1:%.*]] = sub i32 [[X]], [[Y]] +; CHECK-NEXT: [[C1:%.*]] = mul i32 [[B1]], [[B1]] +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @use32(i32 [[C1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call i32 @use64(i64 [[A1]]) +; CHECK-NEXT: [[TMP3:%.*]] = call i32 @use64(i64 [[A2]]) +; CHECK-NEXT: ret void +; + %A1 = zext i32 %X to i64 + %A2 = zext i32 %Y to i64 + %B1 = sub i64 %A1, %A2 + %C1 = mul i64 %B1, %B1 + %T1 = trunc i64 %C1 to i32 + call i32 @use32(i32 %T1) + ; make sure zext have another use that is not post-dominated by the TruncInst. + call i32 @use64(i64 %A1) + call i32 @use64(i64 %A2) + ret void +} + +define void @multi_use_vec_add(<2 x i32> %X) { +; CHECK-LABEL: @multi_use_vec_add( +; CHECK-NEXT: [[A1:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64> +; CHECK-NEXT: [[B1:%.*]] = add <2 x i32> [[X]], <i32 15, i32 15> +; CHECK-NEXT: [[C1:%.*]] = mul <2 x i32> [[B1]], [[B1]] +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> [[C1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @use64_vec(<2 x i64> [[A1]]) +; CHECK-NEXT: ret void +; + %A1 = zext <2 x i32> %X to <2 x i64> + %B1 = add <2 x i64> %A1, <i64 15, i64 15> + %C1 = mul <2 x i64> %B1, %B1 + %T1 = trunc <2 x i64> %C1 to <2 x i32> + call <2 x i32> @use32_vec(<2 x i32> %T1) + ; make sure zext have another use that is not post-dominated by the TruncInst. + call <2 x i32> @use64_vec(<2 x i64> %A1) + ret void +} + +define void @multi_use_vec_or(<2 x i32> %X) { +; CHECK-LABEL: @multi_use_vec_or( +; CHECK-NEXT: [[A1:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64> +; CHECK-NEXT: [[B1:%.*]] = or <2 x i32> [[X]], <i32 15, i32 15> +; CHECK-NEXT: [[C1:%.*]] = mul <2 x i32> [[B1]], [[B1]] +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> [[C1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @use64_vec(<2 x i64> [[A1]]) +; CHECK-NEXT: ret void +; + %A1 = zext <2 x i32> %X to <2 x i64> + %B1 = or <2 x i64> %A1, <i64 15, i64 15> + %C1 = mul <2 x i64> %B1, %B1 + %T1 = trunc <2 x i64> %C1 to <2 x i32> + call <2 x i32> @use32_vec(<2 x i32> %T1) + ; make sure zext have another use that is not post-dominated by the TruncInst. + call <2 x i32> @use64_vec(<2 x i64> %A1) + ret void +} + +define void @multi_use_vec_xor(<2 x i32> %X) { +; CHECK-LABEL: @multi_use_vec_xor( +; CHECK-NEXT: [[A1:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64> +; CHECK-NEXT: [[B1:%.*]] = xor <2 x i32> [[X]], <i32 15, i32 15> +; CHECK-NEXT: [[C1:%.*]] = mul <2 x i32> [[B1]], [[B1]] +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> [[C1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @use64_vec(<2 x i64> [[A1]]) +; CHECK-NEXT: ret void +; + %A1 = zext <2 x i32> %X to <2 x i64> + %B1 = xor <2 x i64> %A1, <i64 15, i64 15> + %C1 = mul <2 x i64> %B1, %B1 + %T1 = trunc <2 x i64> %C1 to <2 x i32> + call <2 x i32> @use32_vec(<2 x i32> %T1) + ; make sure zext have another use that is not post-dominated by the TruncInst. + call <2 x i32> @use64_vec(<2 x i64> %A1) + ret void +} + +define void @multi_use_vec_and(<2 x i32> %X) { +; CHECK-LABEL: @multi_use_vec_and( +; CHECK-NEXT: [[A1:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64> +; CHECK-NEXT: [[B1:%.*]] = and <2 x i32> [[X]], <i32 15, i32 15> +; CHECK-NEXT: [[C1:%.*]] = mul <2 x i32> [[B1]], [[B1]] +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> [[C1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @use64_vec(<2 x i64> [[A1]]) +; CHECK-NEXT: ret void +; + %A1 = zext <2 x i32> %X to <2 x i64> + %B1 = and <2 x i64> %A1, <i64 15, i64 15> + %C1 = mul <2 x i64> %B1, %B1 + %T1 = trunc <2 x i64> %C1 to <2 x i32> + call <2 x i32> @use32_vec(<2 x i32> %T1) + ; make sure zext have another use that is not post-dominated by the TruncInst. + call <2 x i32> @use64_vec(<2 x i64> %A1) + ret void +} + +define void @multi_use_vec_sub(<2 x i32> %X, <2 x i32> %Y) { +; CHECK-LABEL: @multi_use_vec_sub( +; CHECK-NEXT: [[A1:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64> +; CHECK-NEXT: [[A2:%.*]] = zext <2 x i32> [[Y:%.*]] to <2 x i64> +; CHECK-NEXT: [[B1:%.*]] = sub <2 x i32> [[X]], [[Y]] +; CHECK-NEXT: [[C1:%.*]] = mul <2 x i32> [[B1]], [[B1]] +; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @use32_vec(<2 x i32> [[C1]]) +; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @use64_vec(<2 x i64> [[A1]]) +; CHECK-NEXT: [[TMP3:%.*]] = call <2 x i32> @use64_vec(<2 x i64> [[A2]]) +; CHECK-NEXT: ret void +; + %A1 = zext <2 x i32> %X to <2 x i64> + %A2 = zext <2 x i32> %Y to <2 x i64> + %B1 = sub <2 x i64> %A1, %A2 + %C1 = mul <2 x i64> %B1, %B1 + %T1 = trunc <2 x i64> %C1 to <2 x i32> + call <2 x i32> @use32_vec(<2 x i32> %T1) + ; make sure zext have another use that is not post-dominated by the TruncInst. + call <2 x i32> @use64_vec(<2 x i64> %A1) + call <2 x i32> @use64_vec(<2 x i64> %A2) + ret void +} |

