summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/and-encoding.ll
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2018-09-19 18:59:08 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2018-09-19 18:59:08 +0000
commit2d0f20cc043458c945e4959c5b130c07a7f5b8b5 (patch)
tree4c6c2685582012433738444bea2cce36c82c7b04 /llvm/test/CodeGen/X86/and-encoding.ll
parent894c39f770298e8972d3518c9b3531b59c819f56 (diff)
downloadbcm5719-llvm-2d0f20cc043458c945e4959c5b130c07a7f5b8b5.tar.gz
bcm5719-llvm-2d0f20cc043458c945e4959c5b130c07a7f5b8b5.zip
[X86] Handle COPYs of physregs better (regalloc hints)
Enable enableMultipleCopyHints() on X86. Original Patch by @jonpa: While enabling the mischeduler for SystemZ, it was discovered that for some reason a test needed one extra seemingly needless COPY (test/CodeGen/SystemZ/call-03.ll). The handling for that is resulted in this patch, which improves the register coalescing by providing not just one copy hint, but a sorted list of copy hints. On SystemZ, this gives ~12500 less register moves on SPEC, as well as marginally less spilling. Instead of improving just the SystemZ backend, the improvement has been implemented in common-code (calculateSpillWeightAndHint(). This gives a lot of test failures, but since this should be a general improvement I hope that the involved targets will help and review the test updates. Differential Revision: https://reviews.llvm.org/D38128 llvm-svn: 342578
Diffstat (limited to 'llvm/test/CodeGen/X86/and-encoding.ll')
-rw-r--r--llvm/test/CodeGen/X86/and-encoding.ll18
1 files changed, 9 insertions, 9 deletions
diff --git a/llvm/test/CodeGen/X86/and-encoding.ll b/llvm/test/CodeGen/X86/and-encoding.ll
index 51cdbd9f6e1..52fd9979300 100644
--- a/llvm/test/CodeGen/X86/and-encoding.ll
+++ b/llvm/test/CodeGen/X86/and-encoding.ll
@@ -46,9 +46,9 @@ define void @f3(i32 %x, i1 *%y) nounwind {
define i32 @lopped32_32to8(i32 %x) {
; CHECK-LABEL: lopped32_32to8:
; CHECK: # %bb.0:
-; CHECK-NEXT: shrl $4, %edi # encoding: [0xc1,0xef,0x04]
-; CHECK-NEXT: andl $-16, %edi # encoding: [0x83,0xe7,0xf0]
; CHECK-NEXT: movl %edi, %eax # encoding: [0x89,0xf8]
+; CHECK-NEXT: shrl $4, %eax # encoding: [0xc1,0xe8,0x04]
+; CHECK-NEXT: andl $-16, %eax # encoding: [0x83,0xe0,0xf0]
; CHECK-NEXT: retq # encoding: [0xc3]
%shr = lshr i32 %x, 4
%and = and i32 %shr, 268435440
@@ -60,9 +60,9 @@ define i32 @lopped32_32to8(i32 %x) {
define i64 @lopped64_32to8(i64 %x) {
; CHECK-LABEL: lopped64_32to8:
; CHECK: # %bb.0:
-; CHECK-NEXT: shrq $36, %rdi # encoding: [0x48,0xc1,0xef,0x24]
-; CHECK-NEXT: andl $-16, %edi # encoding: [0x83,0xe7,0xf0]
; CHECK-NEXT: movq %rdi, %rax # encoding: [0x48,0x89,0xf8]
+; CHECK-NEXT: shrq $36, %rax # encoding: [0x48,0xc1,0xe8,0x24]
+; CHECK-NEXT: andl $-16, %eax # encoding: [0x83,0xe0,0xf0]
; CHECK-NEXT: retq # encoding: [0xc3]
%shr = lshr i64 %x, 36
%and = and i64 %shr, 268435440
@@ -74,9 +74,9 @@ define i64 @lopped64_32to8(i64 %x) {
define i64 @lopped64_64to8(i64 %x) {
; CHECK-LABEL: lopped64_64to8:
; CHECK: # %bb.0:
-; CHECK-NEXT: shrq $4, %rdi # encoding: [0x48,0xc1,0xef,0x04]
-; CHECK-NEXT: andq $-16, %rdi # encoding: [0x48,0x83,0xe7,0xf0]
; CHECK-NEXT: movq %rdi, %rax # encoding: [0x48,0x89,0xf8]
+; CHECK-NEXT: shrq $4, %rax # encoding: [0x48,0xc1,0xe8,0x04]
+; CHECK-NEXT: andq $-16, %rax # encoding: [0x48,0x83,0xe0,0xf0]
; CHECK-NEXT: retq # encoding: [0xc3]
%shr = lshr i64 %x, 4
%and = and i64 %shr, 1152921504606846960
@@ -88,10 +88,10 @@ define i64 @lopped64_64to8(i64 %x) {
define i64 @lopped64_64to32(i64 %x) {
; CHECK-LABEL: lopped64_64to32:
; CHECK: # %bb.0:
-; CHECK-NEXT: shrq $4, %rdi # encoding: [0x48,0xc1,0xef,0x04]
-; CHECK-NEXT: andq $-983056, %rdi # encoding: [0x48,0x81,0xe7,0xf0,0xff,0xf0,0xff]
-; CHECK-NEXT: # imm = 0xFFF0FFF0
; CHECK-NEXT: movq %rdi, %rax # encoding: [0x48,0x89,0xf8]
+; CHECK-NEXT: shrq $4, %rax # encoding: [0x48,0xc1,0xe8,0x04]
+; CHECK-NEXT: andq $-983056, %rax # encoding: [0x48,0x25,0xf0,0xff,0xf0,0xff]
+; CHECK-NEXT: # imm = 0xFFF0FFF0
; CHECK-NEXT: retq # encoding: [0xc3]
%shr = lshr i64 %x, 4
%and = and i64 %shr, 1152921504605863920
OpenPOWER on IntegriCloud