diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2017-04-26 20:26:46 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2017-04-26 20:26:46 +0000 |
| commit | a0547c3d9f71ae721eab6fb9f2dd4bd2c8d75223 (patch) | |
| tree | 5aba469bc6d533368442262979c3fbdac0fb6c61 /llvm/test/CodeGen/ARM | |
| parent | 2a906e1b34c39280947a1c9afbf2605c081c4376 (diff) | |
| download | bcm5719-llvm-a0547c3d9f71ae721eab6fb9f2dd4bd2c8d75223.tar.gz bcm5719-llvm-a0547c3d9f71ae721eab6fb9f2dd4bd2c8d75223.zip | |
[DAGCombiner] add (sext i1 X), 1 --> zext (not i1 X)
Besides better codegen, the motivation is to be able to canonicalize this pattern
in IR (currently we don't) knowing that the backend is prepared for that.
This may also allow removing code for special constant cases in
DAGCombiner::foldSelectOfConstants() that was added in D30180.
Differential Revision: https://reviews.llvm.org/D31944
llvm-svn: 301457
Diffstat (limited to 'llvm/test/CodeGen/ARM')
| -rw-r--r-- | llvm/test/CodeGen/ARM/bool-ext-inc.ll | 23 |
1 files changed, 10 insertions, 13 deletions
diff --git a/llvm/test/CodeGen/ARM/bool-ext-inc.ll b/llvm/test/CodeGen/ARM/bool-ext-inc.ll index b91b9b25899..5f2ba8b109a 100644 --- a/llvm/test/CodeGen/ARM/bool-ext-inc.ll +++ b/llvm/test/CodeGen/ARM/bool-ext-inc.ll @@ -4,7 +4,7 @@ define i32 @sext_inc(i1 zeroext %x) { ; CHECK-LABEL: sext_inc: ; CHECK: @ BB#0: -; CHECK-NEXT: rsb r0, r0, #1 +; CHECK-NEXT: eor r0, r0, #1 ; CHECK-NEXT: mov pc, lr %ext = sext i1 %x to i32 %add = add i32 %ext, 1 @@ -14,14 +14,12 @@ define i32 @sext_inc(i1 zeroext %x) { define <4 x i32> @sext_inc_vec(<4 x i1> %x) { ; CHECK-LABEL: sext_inc_vec: ; CHECK: @ BB#0: -; CHECK-NEXT: vmov d16, r0, r1 -; CHECK-NEXT: vmov.i32 q9, #0x1f -; CHECK-NEXT: vmov.i32 q10, #0x1 +; CHECK-NEXT: vmov.i16 d16, #0x1 +; CHECK-NEXT: vmov d17, r0, r1 +; CHECK-NEXT: vmov.i32 q9, #0x1 +; CHECK-NEXT: veor d16, d17, d16 ; CHECK-NEXT: vmovl.u16 q8, d16 -; CHECK-NEXT: vneg.s32 q9, q9 -; CHECK-NEXT: vshl.i32 q8, q8, #31 -; CHECK-NEXT: vshl.s32 q8, q8, q9 -; CHECK-NEXT: vadd.i32 q8, q8, q10 +; CHECK-NEXT: vand q8, q8, q9 ; CHECK-NEXT: vmov r0, r1, d16 ; CHECK-NEXT: vmov r2, r3, d17 ; CHECK-NEXT: mov pc, lr @@ -38,8 +36,8 @@ define <4 x i32> @cmpgt_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK-NEXT: vmov.i32 q10, #0x1 ; CHECK-NEXT: vld1.64 {d16, d17}, [r12] ; CHECK-NEXT: vmov d18, r0, r1 -; CHECK-NEXT: vcgt.s32 q8, q9, q8 -; CHECK-NEXT: vadd.i32 q8, q8, q10 +; CHECK-NEXT: vcge.s32 q8, q8, q9 +; CHECK-NEXT: vand q8, q8, q10 ; CHECK-NEXT: vmov r0, r1, d16 ; CHECK-NEXT: vmov r2, r3, d17 ; CHECK-NEXT: mov pc, lr @@ -54,12 +52,11 @@ define <4 x i32> @cmpne_sext_inc_vec(<4 x i32> %x, <4 x i32> %y) { ; CHECK: @ BB#0: ; CHECK-NEXT: mov r12, sp ; CHECK-NEXT: vmov d19, r2, r3 +; CHECK-NEXT: vmov.i32 q10, #0x1 ; CHECK-NEXT: vld1.64 {d16, d17}, [r12] ; CHECK-NEXT: vmov d18, r0, r1 ; CHECK-NEXT: vceq.i32 q8, q9, q8 -; CHECK-NEXT: vmov.i32 q9, #0x1 -; CHECK-NEXT: vmvn q8, q8 -; CHECK-NEXT: vadd.i32 q8, q8, q9 +; CHECK-NEXT: vand q8, q8, q10 ; CHECK-NEXT: vmov r0, r1, d16 ; CHECK-NEXT: vmov r2, r3, d17 ; CHECK-NEXT: mov pc, lr |

