diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2018-07-25 21:38:30 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2018-07-25 21:38:30 +0000 |
| commit | 215dcbf4db51bde695b993eff984cd605d796d58 (patch) | |
| tree | 29f10d5b9e2000355b7ae06b9854d55b831672f5 /llvm/test/CodeGen | |
| parent | 4f10a9d3a3a47cd60e5760ba4434a566054d9562 (diff) | |
| download | bcm5719-llvm-215dcbf4db51bde695b993eff984cd605d796d58.tar.gz bcm5719-llvm-215dcbf4db51bde695b993eff984cd605d796d58.zip | |
[SelectionDAG] try to convert funnel shift directly to rotate if legal
If the DAGCombiner's rotate matching was working as expected,
I don't think we'd see any test diffs here.
This sidesteps the issue of custom lowering for rotates raised in PR38243:
https://bugs.llvm.org/show_bug.cgi?id=38243
...by only dealing with legal operations.
llvm-svn: 337966
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/AArch64/funnel-shift-rot.ll | 6 | ||||
| -rw-r--r-- | llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll | 4 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/funnel-shift-rot.ll | 8 |
3 files changed, 6 insertions, 12 deletions
diff --git a/llvm/test/CodeGen/AArch64/funnel-shift-rot.ll b/llvm/test/CodeGen/AArch64/funnel-shift-rot.ll index 0b3bc665bdf..af612eafd33 100644 --- a/llvm/test/CodeGen/AArch64/funnel-shift-rot.ll +++ b/llvm/test/CodeGen/AArch64/funnel-shift-rot.ll @@ -163,11 +163,7 @@ define i32 @rotr_i32(i32 %x, i32 %z) { define i64 @rotr_i64(i64 %x, i64 %z) { ; CHECK-LABEL: rotr_i64: ; CHECK: // %bb.0: -; CHECK-NEXT: orr w9, wzr, #0x40 -; CHECK-NEXT: sub w9, w9, w1 -; CHECK-NEXT: lsr x8, x0, x1 -; CHECK-NEXT: lsl x9, x0, x9 -; CHECK-NEXT: orr x0, x9, x8 +; CHECK-NEXT: ror x0, x0, x1 ; CHECK-NEXT: ret %f = call i64 @llvm.fshr.i64(i64 %x, i64 %x, i64 %z) ret i64 %f diff --git a/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll b/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll index b238504edc1..d93c9361b35 100644 --- a/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll +++ b/llvm/test/CodeGen/PowerPC/funnel-shift-rot.ll @@ -55,7 +55,6 @@ define i16 @rotl_i16(i16 %x, i16 %z) { define i32 @rotl_i32(i32 %x, i32 %z) { ; CHECK-LABEL: rotl_i32: ; CHECK: # %bb.0: -; CHECK-NEXT: rlwinm 4, 4, 0, 27, 31 ; CHECK-NEXT: rlwnm 3, 3, 4, 0, 31 ; CHECK-NEXT: blr %f = call i32 @llvm.fshl.i32(i32 %x, i32 %x, i32 %z) @@ -65,8 +64,7 @@ define i32 @rotl_i32(i32 %x, i32 %z) { define i64 @rotl_i64(i64 %x, i64 %z) { ; CHECK-LABEL: rotl_i64: ; CHECK: # %bb.0: -; CHECK-NEXT: rlwinm 4, 4, 0, 26, 31 -; CHECK-NEXT: rotld 3, 3, 4 +; CHECK-NEXT: rldcl 3, 3, 4, 0 ; CHECK-NEXT: blr %f = call i64 @llvm.fshl.i64(i64 %x, i64 %x, i64 %z) ret i64 %f diff --git a/llvm/test/CodeGen/X86/funnel-shift-rot.ll b/llvm/test/CodeGen/X86/funnel-shift-rot.ll index 2cdea080eea..edbdd8d9b92 100644 --- a/llvm/test/CodeGen/X86/funnel-shift-rot.ll +++ b/llvm/test/CodeGen/X86/funnel-shift-rot.ll @@ -169,12 +169,12 @@ define i8 @rotr_i8_const_shift(i8 %x) nounwind { ; X32-SSE2-LABEL: rotr_i8_const_shift: ; X32-SSE2: # %bb.0: ; X32-SSE2-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-SSE2-NEXT: rolb $5, %al +; X32-SSE2-NEXT: rorb $3, %al ; X32-SSE2-NEXT: retl ; ; X64-AVX2-LABEL: rotr_i8_const_shift: ; X64-AVX2: # %bb.0: -; X64-AVX2-NEXT: rolb $5, %dil +; X64-AVX2-NEXT: rorb $3, %dil ; X64-AVX2-NEXT: movl %edi, %eax ; X64-AVX2-NEXT: retq %f = call i8 @llvm.fshr.i8(i8 %x, i8 %x, i8 3) @@ -185,12 +185,12 @@ define i32 @rotr_i32_const_shift(i32 %x) nounwind { ; X32-SSE2-LABEL: rotr_i32_const_shift: ; X32-SSE2: # %bb.0: ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE2-NEXT: roll $29, %eax +; X32-SSE2-NEXT: rorl $3, %eax ; X32-SSE2-NEXT: retl ; ; X64-AVX2-LABEL: rotr_i32_const_shift: ; X64-AVX2: # %bb.0: -; X64-AVX2-NEXT: roll $29, %edi +; X64-AVX2-NEXT: rorl $3, %edi ; X64-AVX2-NEXT: movl %edi, %eax ; X64-AVX2-NEXT: retq %f = call i32 @llvm.fshr.i32(i32 %x, i32 %x, i32 3) |

