summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2016-08-13 18:42:14 +0000
committerSanjay Patel <spatel@rotateright.com>2016-08-13 18:42:14 +0000
commit08c876673ef186627353655c2cf66c019b30f417 (patch)
tree30a08d403ee6d9d7d4aa42b19f467c6eb3ed603f /llvm
parentf24939b1f44f7acc6c747c631d4c79bf144002a7 (diff)
downloadbcm5719-llvm-08c876673ef186627353655c2cf66c019b30f417.tar.gz
bcm5719-llvm-08c876673ef186627353655c2cf66c019b30f417.zip
[x86] add tests to show missed 64-bit immediate merging
Tests are slightly modified versions of those written by Sunita Marathe in D23391. llvm-svn: 278599
Diffstat (limited to 'llvm')
-rw-r--r--llvm/test/CodeGen/X86/immediate_merging64.ll54
1 files changed, 54 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/immediate_merging64.ll b/llvm/test/CodeGen/X86/immediate_merging64.ll
new file mode 100644
index 00000000000..b0d66669ba8
--- /dev/null
+++ b/llvm/test/CodeGen/X86/immediate_merging64.ll
@@ -0,0 +1,54 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s
+
+; Check that multiple instances of 64-bit constants encodable as
+; 32-bit immediates are merged for code size savings.
+
+@g1 = common global i64 0, align 8
+@g2 = common global i64 0, align 8
+@g3 = common global i64 0, align 8
+@g4 = common global i64 0, align 8
+
+; Immediates with multiple users should not be pulled into instructions when
+; optimizing for code size.
+define void @imm_multiple_users(i64 %l1, i64 %l2, i64 %l3, i64 %l4) optsize {
+; CHECK-LABEL: imm_multiple_users:
+; CHECK: # BB#0:
+; CHECK-NEXT: movq $-1, {{.*}}(%rip)
+; CHECK-NEXT: cmpq $-1, %rdx
+; CHECK-NEXT: cmovneq %rsi, %rdi
+; CHECK-NEXT: movq %rdi, {{.*}}(%rip)
+; CHECK-NEXT: movq $-1, %rax
+; CHECK-NEXT: # kill: %CL<def> %CL<kill> %RCX<kill>
+; CHECK-NEXT: shlq %cl, %rax
+; CHECK-NEXT: movq %rax, {{.*}}(%rip)
+; CHECK-NEXT: movq $0, {{.*}}(%rip)
+; CHECK-NEXT: retq
+;
+ store i64 -1, i64* @g1, align 8
+ %cmp = icmp eq i64 %l3, -1
+ %sel = select i1 %cmp, i64 %l1, i64 %l2
+ store i64 %sel, i64* @g2, align 8
+ %and = and i64 %l4, 63
+ %shl = shl i64 -1, %and
+ store i64 %shl, i64* @g3, align 8
+ store i64 0, i64* @g4, align 8
+ ret void
+}
+
+declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1)
+
+; Inlined memsets requiring multiple same-sized stores should be lowered using
+; the register, rather than immediate, form of stores when optimizing for
+; code size.
+define void @memset_zero(i8* noalias nocapture %D) optsize {
+; CHECK-LABEL: memset_zero:
+; CHECK: # BB#0:
+; CHECK-NEXT: movq $0, 7(%rdi)
+; CHECK-NEXT: movq $0, (%rdi)
+; CHECK-NEXT: retq
+;
+ tail call void @llvm.memset.p0i8.i64(i8* %D, i8 0, i64 15, i32 1, i1 false)
+ ret void
+}
+
OpenPOWER on IntegriCloud