summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/rotate.ll
diff options
context:
space:
mode:
authorDehao Chen <dehao@google.com>2017-05-12 19:29:27 +0000
committerDehao Chen <dehao@google.com>2017-05-12 19:29:27 +0000
commit65dd23e273c5ef69e8ae843ee4fd4664a61d95e9 (patch)
tree4b9e6e179fbbefa74060b0a71ce457f97b742a8c /llvm/test/CodeGen/X86/rotate.ll
parent10c64e6aea87a75da6dff41a95ede1935282d71d (diff)
downloadbcm5719-llvm-65dd23e273c5ef69e8ae843ee4fd4664a61d95e9.tar.gz
bcm5719-llvm-65dd23e273c5ef69e8ae843ee4fd4664a61d95e9.zip
Add LiveRangeShrink pass to shrink live range within BB.
Summary: LiveRangeShrink pass moves instruction right after the definition with the same BB if the instruction and its operands all have more than one use. This pass is inexpensive and guarantees optimal live-range within BB. Reviewers: davidxl, wmi, hfinkel, MatzeB, andreadb Reviewed By: MatzeB, andreadb Subscribers: hiraditya, jyknight, sanjoy, skatkov, gberry, jholewinski, qcolombet, javed.absar, krytarowski, atrick, spatel, RKSimon, andreadb, MatzeB, mehdi_amini, mgorny, efriedma, davide, dberlin, llvm-commits Differential Revision: https://reviews.llvm.org/D32563 llvm-svn: 302938
Diffstat (limited to 'llvm/test/CodeGen/X86/rotate.ll')
-rw-r--r--llvm/test/CodeGen/X86/rotate.ll16
1 files changed, 9 insertions, 7 deletions
diff --git a/llvm/test/CodeGen/X86/rotate.ll b/llvm/test/CodeGen/X86/rotate.ll
index 5d5150ad62d..4be3a4c2391 100644
--- a/llvm/test/CodeGen/X86/rotate.ll
+++ b/llvm/test/CodeGen/X86/rotate.ll
@@ -33,8 +33,8 @@ define i64 @rotl64(i64 %A, i8 %Amt) nounwind {
; 32-NEXT: movl %ebx, %esi
; 32-NEXT: xorl %ebx, %ebx
; 32-NEXT: .LBB0_4:
-; 32-NEXT: orl %esi, %eax
; 32-NEXT: orl %ebx, %edx
+; 32-NEXT: orl %esi, %eax
; 32-NEXT: popl %esi
; 32-NEXT: popl %edi
; 32-NEXT: popl %ebx
@@ -86,8 +86,8 @@ define i64 @rotr64(i64 %A, i8 %Amt) nounwind {
; 32-NEXT: movl %ebx, %esi
; 32-NEXT: xorl %ebx, %ebx
; 32-NEXT: .LBB1_4:
-; 32-NEXT: orl %ebx, %eax
; 32-NEXT: orl %esi, %edx
+; 32-NEXT: orl %ebx, %eax
; 32-NEXT: popl %esi
; 32-NEXT: popl %edi
; 32-NEXT: popl %ebx
@@ -546,7 +546,7 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind {
; 32-LABEL: rotr1_64_mem:
; 32: # BB#0:
; 32-NEXT: pushl %esi
-; 32-NEXT: movl 8(%esp), %eax
+; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: movl (%eax), %ecx
; 32-NEXT: movl 4(%eax), %edx
; 32-NEXT: movl %edx, %esi
@@ -555,11 +555,13 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind {
; 32-NEXT: movl %ecx, 4(%eax)
; 32-NEXT: movl %esi, (%eax)
; 32-NEXT: popl %esi
-
+; 32-NEXT: retl
+;
; 64-LABEL: rotr1_64_mem:
; 64: # BB#0:
; 64-NEXT: rorq (%rdi)
; 64-NEXT: retq
+
%A = load i64, i64 *%Aptr
%B = shl i64 %A, 63
%C = lshr i64 %A, 1
@@ -571,7 +573,7 @@ define void @rotr1_64_mem(i64* %Aptr) nounwind {
define void @rotr1_32_mem(i32* %Aptr) nounwind {
; 32-LABEL: rotr1_32_mem:
; 32: # BB#0:
-; 32-NEXT: movl 4(%esp), %eax
+; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorl (%eax)
; 32-NEXT: retl
;
@@ -590,7 +592,7 @@ define void @rotr1_32_mem(i32* %Aptr) nounwind {
define void @rotr1_16_mem(i16* %Aptr) nounwind {
; 32-LABEL: rotr1_16_mem:
; 32: # BB#0:
-; 32-NEXT: movl 4(%esp), %eax
+; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorw (%eax)
; 32-NEXT: retl
;
@@ -609,7 +611,7 @@ define void @rotr1_16_mem(i16* %Aptr) nounwind {
define void @rotr1_8_mem(i8* %Aptr) nounwind {
; 32-LABEL: rotr1_8_mem:
; 32: # BB#0:
-; 32-NEXT: movl 4(%esp), %eax
+; 32-NEXT: movl {{[0-9]+}}(%esp), %eax
; 32-NEXT: rorb (%eax)
; 32-NEXT: retl
;
OpenPOWER on IntegriCloud