diff options
author | QingShan Zhang <qshanz@cn.ibm.com> | 2019-12-11 07:25:57 +0000 |
---|---|---|
committer | QingShan Zhang <qshanz@cn.ibm.com> | 2019-12-11 07:25:57 +0000 |
commit | f99297176cd9507393b69029406080de01ae41c7 (patch) | |
tree | a2955232f6d349a3a5e16749825fd8cced61f242 /llvm/test/CodeGen/PowerPC | |
parent | d4345636e678ccab8a87b09cdad9129e54c23100 (diff) | |
download | bcm5719-llvm-f99297176cd9507393b69029406080de01ae41c7.tar.gz bcm5719-llvm-f99297176cd9507393b69029406080de01ae41c7.zip |
[PowerPC] Exploitate the Vector Integer Average Instructions
PowerPC has instruction to do the semantics of this piece of code:
vector int foo(vector int m, vector int n) {
return (m + n + 1) >> 1;
}
This patch is adding the match rule to select it.
Differential Revision: https://reviews.llvm.org/D71002
Diffstat (limited to 'llvm/test/CodeGen/PowerPC')
-rw-r--r-- | llvm/test/CodeGen/PowerPC/vavg.ll | 195 |
1 files changed, 123 insertions, 72 deletions
diff --git a/llvm/test/CodeGen/PowerPC/vavg.ll b/llvm/test/CodeGen/PowerPC/vavg.ll index ad6c1c844de..735b39da805 100644 --- a/llvm/test/CodeGen/PowerPC/vavg.ll +++ b/llvm/test/CodeGen/PowerPC/vavg.ll @@ -5,26 +5,17 @@ define <8 x i16> @test_v8i16(<8 x i16> %m, <8 x i16> %n) { ; CHECK-P9-LABEL: test_v8i16: ; CHECK-P9: # %bb.0: # %entry -; CHECK-P9-NEXT: xxlnor 34, 34, 34 -; CHECK-P9-NEXT: vspltish 4, 1 -; CHECK-P9-NEXT: vsubuhm 2, 3, 2 -; CHECK-P9-NEXT: vsrh 2, 2, 4 +; CHECK-P9-NEXT: vavguh 2, 3, 2 ; CHECK-P9-NEXT: blr ; ; CHECK-P8-LABEL: test_v8i16: ; CHECK-P8: # %bb.0: # %entry -; CHECK-P8-NEXT: xxlnor 34, 34, 34 -; CHECK-P8-NEXT: vspltish 4, 1 -; CHECK-P8-NEXT: vsubuhm 2, 3, 2 -; CHECK-P8-NEXT: vsrh 2, 2, 4 +; CHECK-P8-NEXT: vavguh 2, 3, 2 ; CHECK-P8-NEXT: blr ; ; CHECK-P7-LABEL: test_v8i16: ; CHECK-P7: # %bb.0: # %entry -; CHECK-P7-NEXT: xxlnor 34, 34, 34 -; CHECK-P7-NEXT: vspltish 4, 1 -; CHECK-P7-NEXT: vsubuhm 2, 3, 2 -; CHECK-P7-NEXT: vsrh 2, 2, 4 +; CHECK-P7-NEXT: vavguh 2, 3, 2 ; CHECK-P7-NEXT: blr entry: %add = add <8 x i16> %m, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -36,26 +27,17 @@ entry: define <8 x i16> @test_v8i16_sign(<8 x i16> %m, <8 x i16> %n) { ; CHECK-P9-LABEL: test_v8i16_sign: ; CHECK-P9: # %bb.0: # %entry -; CHECK-P9-NEXT: xxlnor 34, 34, 34 -; CHECK-P9-NEXT: vspltish 4, 1 -; CHECK-P9-NEXT: vsubuhm 2, 3, 2 -; CHECK-P9-NEXT: vsrah 2, 2, 4 +; CHECK-P9-NEXT: vavgsh 2, 3, 2 ; CHECK-P9-NEXT: blr ; ; CHECK-P8-LABEL: test_v8i16_sign: ; CHECK-P8: # %bb.0: # %entry -; CHECK-P8-NEXT: xxlnor 34, 34, 34 -; CHECK-P8-NEXT: vspltish 4, 1 -; CHECK-P8-NEXT: vsubuhm 2, 3, 2 -; CHECK-P8-NEXT: vsrah 2, 2, 4 +; CHECK-P8-NEXT: vavgsh 2, 3, 2 ; CHECK-P8-NEXT: blr ; ; CHECK-P7-LABEL: test_v8i16_sign: ; CHECK-P7: # %bb.0: # %entry -; CHECK-P7-NEXT: xxlnor 34, 34, 34 -; CHECK-P7-NEXT: vspltish 4, 1 -; CHECK-P7-NEXT: vsubuhm 2, 3, 2 -; CHECK-P7-NEXT: vsrah 2, 2, 4 +; CHECK-P7-NEXT: vavgsh 2, 3, 2 ; CHECK-P7-NEXT: blr entry: %add = add <8 x i16> %m, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> @@ -67,26 +49,17 @@ entry: define <4 x i32> @test_v4i32(<4 x i32> %m, <4 x i32> %n) { ; CHECK-P9-LABEL: test_v4i32: ; CHECK-P9: # %bb.0: # %entry -; CHECK-P9-NEXT: xxlnor 34, 34, 34 -; CHECK-P9-NEXT: vspltisw 4, 1 -; CHECK-P9-NEXT: vsubuwm 2, 3, 2 -; CHECK-P9-NEXT: vsrw 2, 2, 4 +; CHECK-P9-NEXT: vavguw 2, 3, 2 ; CHECK-P9-NEXT: blr ; ; CHECK-P8-LABEL: test_v4i32: ; CHECK-P8: # %bb.0: # %entry -; CHECK-P8-NEXT: xxlnor 34, 34, 34 -; CHECK-P8-NEXT: vspltisw 4, 1 -; CHECK-P8-NEXT: vsubuwm 2, 3, 2 -; CHECK-P8-NEXT: vsrw 2, 2, 4 +; CHECK-P8-NEXT: vavguw 2, 3, 2 ; CHECK-P8-NEXT: blr ; ; CHECK-P7-LABEL: test_v4i32: ; CHECK-P7: # %bb.0: # %entry -; CHECK-P7-NEXT: xxlnor 34, 34, 34 -; CHECK-P7-NEXT: vspltisw 4, 1 -; CHECK-P7-NEXT: vsubuwm 2, 3, 2 -; CHECK-P7-NEXT: vsrw 2, 2, 4 +; CHECK-P7-NEXT: vavguw 2, 3, 2 ; CHECK-P7-NEXT: blr entry: %add = add <4 x i32> %m, <i32 1, i32 1, i32 1, i32 1> @@ -98,26 +71,17 @@ entry: define <4 x i32> @test_v4i32_sign(<4 x i32> %m, <4 x i32> %n) { ; CHECK-P9-LABEL: test_v4i32_sign: ; CHECK-P9: # %bb.0: # %entry -; CHECK-P9-NEXT: xxlnor 34, 34, 34 -; CHECK-P9-NEXT: vspltisw 4, 1 -; CHECK-P9-NEXT: vsubuwm 2, 3, 2 -; CHECK-P9-NEXT: vsraw 2, 2, 4 +; CHECK-P9-NEXT: vavgsw 2, 3, 2 ; CHECK-P9-NEXT: blr ; ; CHECK-P8-LABEL: test_v4i32_sign: ; CHECK-P8: # %bb.0: # %entry -; CHECK-P8-NEXT: xxlnor 34, 34, 34 -; CHECK-P8-NEXT: vspltisw 4, 1 -; CHECK-P8-NEXT: vsubuwm 2, 3, 2 -; CHECK-P8-NEXT: vsraw 2, 2, 4 +; CHECK-P8-NEXT: vavgsw 2, 3, 2 ; CHECK-P8-NEXT: blr ; ; CHECK-P7-LABEL: test_v4i32_sign: ; CHECK-P7: # %bb.0: # %entry -; CHECK-P7-NEXT: xxlnor 34, 34, 34 -; CHECK-P7-NEXT: vspltisw 4, 1 -; CHECK-P7-NEXT: vsubuwm 2, 3, 2 -; CHECK-P7-NEXT: vsraw 2, 2, 4 +; CHECK-P7-NEXT: vavgsw 2, 3, 2 ; CHECK-P7-NEXT: blr entry: %add = add <4 x i32> %m, <i32 1, i32 1, i32 1, i32 1> @@ -129,26 +93,17 @@ entry: define <16 x i8> @test_v16i8(<16 x i8> %m, <16 x i8> %n) { ; CHECK-P9-LABEL: test_v16i8: ; CHECK-P9: # %bb.0: # %entry -; CHECK-P9-NEXT: xxlnor 34, 34, 34 -; CHECK-P9-NEXT: xxspltib 36, 1 -; CHECK-P9-NEXT: vsububm 2, 3, 2 -; CHECK-P9-NEXT: vsrb 2, 2, 4 +; CHECK-P9-NEXT: vavgub 2, 3, 2 ; CHECK-P9-NEXT: blr ; ; CHECK-P8-LABEL: test_v16i8: ; CHECK-P8: # %bb.0: # %entry -; CHECK-P8-NEXT: xxlnor 34, 34, 34 -; CHECK-P8-NEXT: vspltisb 4, 1 -; CHECK-P8-NEXT: vsububm 2, 3, 2 -; CHECK-P8-NEXT: vsrb 2, 2, 4 +; CHECK-P8-NEXT: vavgub 2, 3, 2 ; CHECK-P8-NEXT: blr ; ; CHECK-P7-LABEL: test_v16i8: ; CHECK-P7: # %bb.0: # %entry -; CHECK-P7-NEXT: xxlnor 34, 34, 34 -; CHECK-P7-NEXT: vspltisb 4, 1 -; CHECK-P7-NEXT: vsububm 2, 3, 2 -; CHECK-P7-NEXT: vsrb 2, 2, 4 +; CHECK-P7-NEXT: vavgub 2, 3, 2 ; CHECK-P7-NEXT: blr entry: %add = add <16 x i8> %m, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> @@ -160,26 +115,17 @@ entry: define <16 x i8> @test_v16i8_sign(<16 x i8> %m, <16 x i8> %n) { ; CHECK-P9-LABEL: test_v16i8_sign: ; CHECK-P9: # %bb.0: # %entry -; CHECK-P9-NEXT: xxlnor 34, 34, 34 -; CHECK-P9-NEXT: xxspltib 36, 1 -; CHECK-P9-NEXT: vsububm 2, 3, 2 -; CHECK-P9-NEXT: vsrab 2, 2, 4 +; CHECK-P9-NEXT: vavgsb 2, 3, 2 ; CHECK-P9-NEXT: blr ; ; CHECK-P8-LABEL: test_v16i8_sign: ; CHECK-P8: # %bb.0: # %entry -; CHECK-P8-NEXT: xxlnor 34, 34, 34 -; CHECK-P8-NEXT: vspltisb 4, 1 -; CHECK-P8-NEXT: vsububm 2, 3, 2 -; CHECK-P8-NEXT: vsrab 2, 2, 4 +; CHECK-P8-NEXT: vavgsb 2, 3, 2 ; CHECK-P8-NEXT: blr ; ; CHECK-P7-LABEL: test_v16i8_sign: ; CHECK-P7: # %bb.0: # %entry -; CHECK-P7-NEXT: xxlnor 34, 34, 34 -; CHECK-P7-NEXT: vspltisb 4, 1 -; CHECK-P7-NEXT: vsububm 2, 3, 2 -; CHECK-P7-NEXT: vsrab 2, 2, 4 +; CHECK-P7-NEXT: vavgsb 2, 3, 2 ; CHECK-P7-NEXT: blr entry: %add = add <16 x i8> %m, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> @@ -187,3 +133,108 @@ entry: %shr = ashr <16 x i8> %add1, <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1> ret <16 x i8> %shr } + +define <8 x i16> @test_v8i16_sign_negative(<8 x i16> %m, <8 x i16> %n) { +; CHECK-P9-LABEL: test_v8i16_sign_negative: +; CHECK-P9: # %bb.0: # %entry +; CHECK-P9-NEXT: addis 3, 2, .LCPI6_0@toc@ha +; CHECK-P9-NEXT: addi 3, 3, .LCPI6_0@toc@l +; CHECK-P9-NEXT: vadduhm 2, 2, 3 +; CHECK-P9-NEXT: lxvx 35, 0, 3 +; CHECK-P9-NEXT: vadduhm 2, 2, 3 +; CHECK-P9-NEXT: vspltish 3, 1 +; CHECK-P9-NEXT: vsrah 2, 2, 3 +; CHECK-P9-NEXT: blr +; +; CHECK-P8-LABEL: test_v8i16_sign_negative: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: addis 3, 2, .LCPI6_0@toc@ha +; CHECK-P8-NEXT: vadduhm 2, 2, 3 +; CHECK-P8-NEXT: vspltish 4, 1 +; CHECK-P8-NEXT: addi 3, 3, .LCPI6_0@toc@l +; CHECK-P8-NEXT: lvx 3, 0, 3 +; CHECK-P8-NEXT: vadduhm 2, 2, 3 +; CHECK-P8-NEXT: vsrah 2, 2, 4 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: test_v8i16_sign_negative: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: addis 3, 2, .LCPI6_0@toc@ha +; CHECK-P7-NEXT: vadduhm 2, 2, 3 +; CHECK-P7-NEXT: vspltish 4, 1 +; CHECK-P7-NEXT: addi 3, 3, .LCPI6_0@toc@l +; CHECK-P7-NEXT: lvx 3, 0, 3 +; CHECK-P7-NEXT: vadduhm 2, 2, 3 +; CHECK-P7-NEXT: vsrah 2, 2, 4 +; CHECK-P7-NEXT: blr +entry: + %add = add <8 x i16> %m, <i16 1, i16 1, i16 1, i16 -1, i16 1, i16 1, i16 1, i16 1> + %add1 = add <8 x i16> %add, %n + %shr = ashr <8 x i16> %add1, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1> + ret <8 x i16> %shr +} + +define <4 x i32> @test_v4i32_negative(<4 x i32> %m, <4 x i32> %n) { +; CHECK-P9-LABEL: test_v4i32_negative: +; CHECK-P9: # %bb.0: # %entry +; CHECK-P9-NEXT: xxlnor 34, 34, 34 +; CHECK-P9-NEXT: vsubuwm 2, 3, 2 +; CHECK-P9-NEXT: vspltisw 3, 2 +; CHECK-P9-NEXT: vsrw 2, 2, 3 +; CHECK-P9-NEXT: blr +; +; CHECK-P8-LABEL: test_v4i32_negative: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: xxlnor 34, 34, 34 +; CHECK-P8-NEXT: vspltisw 4, 2 +; CHECK-P8-NEXT: vsubuwm 2, 3, 2 +; CHECK-P8-NEXT: vsrw 2, 2, 4 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: test_v4i32_negative: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: xxlnor 34, 34, 34 +; CHECK-P7-NEXT: vspltisw 4, 2 +; CHECK-P7-NEXT: vsubuwm 2, 3, 2 +; CHECK-P7-NEXT: vsrw 2, 2, 4 +; CHECK-P7-NEXT: blr +entry: + %add = add <4 x i32> %m, <i32 1, i32 1, i32 1, i32 1> + %add1 = add <4 x i32> %add, %n + %shr = lshr <4 x i32> %add1, <i32 2, i32 2, i32 2, i32 2> + ret <4 x i32> %shr +} + +define <4 x i32> @test_v4i32_sign_negative(<4 x i32> %m, <4 x i32> %n) { +; CHECK-P9-LABEL: test_v4i32_sign_negative: +; CHECK-P9: # %bb.0: # %entry +; CHECK-P9-NEXT: vadduwm 2, 2, 3 +; CHECK-P9-NEXT: xxleqv 35, 35, 35 +; CHECK-P9-NEXT: vadduwm 2, 2, 3 +; CHECK-P9-NEXT: vspltisw 3, 1 +; CHECK-P9-NEXT: vsraw 2, 2, 3 +; CHECK-P9-NEXT: blr +; +; CHECK-P8-LABEL: test_v4i32_sign_negative: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: xxleqv 36, 36, 36 +; CHECK-P8-NEXT: vadduwm 2, 2, 3 +; CHECK-P8-NEXT: vspltisw 3, 1 +; CHECK-P8-NEXT: vadduwm 2, 2, 4 +; CHECK-P8-NEXT: vsraw 2, 2, 3 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: test_v4i32_sign_negative: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: vspltisb 4, -1 +; CHECK-P7-NEXT: vadduwm 2, 2, 3 +; CHECK-P7-NEXT: vspltisw 3, 1 +; CHECK-P7-NEXT: vadduwm 2, 2, 4 +; CHECK-P7-NEXT: vsraw 2, 2, 3 +; CHECK-P7-NEXT: blr +entry: + %add = add <4 x i32> %m, <i32 -1, i32 -1, i32 -1, i32 -1> + %add1 = add <4 x i32> %add, %n + %shr = ashr <4 x i32> %add1, <i32 1, i32 1, i32 1, i32 1> + ret <4 x i32> %shr +} |