summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2019-03-06 07:36:38 +0000
committerCraig Topper <craig.topper@intel.com>2019-03-06 07:36:38 +0000
commitc0e01d29a467f8e1f75e3a351aef9d08617b45cd (patch)
tree13b9a98c9d6da59ea96c13f94aa7dbeb7ed8373e
parent97a1c4c340b8f97eb0065a22fd96ef11896ba333 (diff)
downloadbcm5719-llvm-c0e01d29a467f8e1f75e3a351aef9d08617b45cd.tar.gz
bcm5719-llvm-c0e01d29a467f8e1f75e3a351aef9d08617b45cd.zip
[X86] Enable the add with 128 -> sub with -128 encoding trick with X86ISD::ADD when the carry flag isn't used.
This allows us to use an 8-bit sign extended immediate instead of a 16 or 32 bit immediate. Also do similar for 0x80000000 with 64-bit adds to avoid having to use a movabsq. llvm-svn: 355485
-rw-r--r--llvm/lib/Target/X86/X86InstrCompiler.td10
-rw-r--r--llvm/test/CodeGen/X86/add.ll153
-rw-r--r--llvm/test/CodeGen/X86/xaluo.ll5
3 files changed, 165 insertions, 3 deletions
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index ea7453d5b4b..e4f324e7380 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -1491,6 +1491,13 @@ def : Pat<(add GR64:$src1, 128),
def : Pat<(store (add (loadi64 addr:$dst), 128), addr:$dst),
(SUB64mi8 addr:$dst, -128)>;
+def : Pat<(X86add_flag_nocf GR16:$src1, 128),
+ (SUB16ri8 GR16:$src1, -128)>;
+def : Pat<(X86add_flag_nocf GR32:$src1, 128),
+ (SUB32ri8 GR32:$src1, -128)>;
+def : Pat<(X86add_flag_nocf GR64:$src1, 128),
+ (SUB64ri8 GR64:$src1, -128)>;
+
// The same trick applies for 32-bit immediate fields in 64-bit
// instructions.
def : Pat<(add GR64:$src1, 0x0000000080000000),
@@ -1498,6 +1505,9 @@ def : Pat<(add GR64:$src1, 0x0000000080000000),
def : Pat<(store (add (loadi64 addr:$dst), 0x0000000080000000), addr:$dst),
(SUB64mi32 addr:$dst, 0xffffffff80000000)>;
+def : Pat<(X86add_flag_nocf GR64:$src1, 0x0000000080000000),
+ (SUB64ri32 GR64:$src1, 0xffffffff80000000)>;
+
// To avoid needing to materialize an immediate in a register, use a 32-bit and
// with implicit zero-extension instead of a 64-bit and if the immediate has at
// least 32 bits of leading zeros. If in addition the last 32 bits can be
diff --git a/llvm/test/CodeGen/X86/add.ll b/llvm/test/CodeGen/X86/add.ll
index c54711a93a7..dcb34eda826 100644
--- a/llvm/test/CodeGen/X86/add.ll
+++ b/llvm/test/CodeGen/X86/add.ll
@@ -507,3 +507,156 @@ define i32 @add_to_sub(i32 %a, i32 %b) {
%r = add i32 %add, 1
ret i32 %r
}
+
+declare void @bar_i32(i32)
+declare void @bar_i64(i64)
+
+; Make sure we can use sub -128 for add 128 when the flags are used.
+define void @add_i32_128_flag(i32 %x) {
+; X32-LABEL: add_i32_128_flag:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl $128, %eax
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: je .LBB17_2
+; X32-NEXT: # %bb.1: # %if.then
+; X32-NEXT: pushl %eax
+; X32-NEXT: .cfi_adjust_cfa_offset 4
+; X32-NEXT: calll bar_i32
+; X32-NEXT: addl $4, %esp
+; X32-NEXT: .cfi_adjust_cfa_offset -4
+; X32-NEXT: .LBB17_2: # %if.end
+; X32-NEXT: retl
+;
+; X64-LINUX-LABEL: add_i32_128_flag:
+; X64-LINUX: # %bb.0: # %entry
+; X64-LINUX-NEXT: subl $-128, %edi
+; X64-LINUX-NEXT: je .LBB17_1
+; X64-LINUX-NEXT: # %bb.2: # %if.then
+; X64-LINUX-NEXT: jmp bar_i32 # TAILCALL
+; X64-LINUX-NEXT: .LBB17_1: # %if.end
+; X64-LINUX-NEXT: retq
+;
+; X64-WIN32-LABEL: add_i32_128_flag:
+; X64-WIN32: # %bb.0: # %entry
+; X64-WIN32-NEXT: subl $-128, %ecx
+; X64-WIN32-NEXT: je .LBB17_1
+; X64-WIN32-NEXT: # %bb.2: # %if.then
+; X64-WIN32-NEXT: jmp bar_i32 # TAILCALL
+; X64-WIN32-NEXT: .LBB17_1: # %if.end
+; X64-WIN32-NEXT: retq
+entry:
+ %add = add i32 %x, 128
+ %tobool = icmp eq i32 %add, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ tail call void @bar_i32(i32 %add)
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; Make sure we can use sub -128 for add 128 when the flags are used.
+define void @add_i64_128_flag(i64 %x) {
+; X32-LABEL: add_i64_128_flag:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $128, %eax
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl %eax, %edx
+; X32-NEXT: orl %ecx, %edx
+; X32-NEXT: je .LBB18_2
+; X32-NEXT: # %bb.1: # %if.then
+; X32-NEXT: pushl %ecx
+; X32-NEXT: .cfi_adjust_cfa_offset 4
+; X32-NEXT: pushl %eax
+; X32-NEXT: .cfi_adjust_cfa_offset 4
+; X32-NEXT: calll bar_i64
+; X32-NEXT: addl $8, %esp
+; X32-NEXT: .cfi_adjust_cfa_offset -8
+; X32-NEXT: .LBB18_2: # %if.end
+; X32-NEXT: retl
+;
+; X64-LINUX-LABEL: add_i64_128_flag:
+; X64-LINUX: # %bb.0: # %entry
+; X64-LINUX-NEXT: subq $-128, %rdi
+; X64-LINUX-NEXT: je .LBB18_1
+; X64-LINUX-NEXT: # %bb.2: # %if.then
+; X64-LINUX-NEXT: jmp bar_i64 # TAILCALL
+; X64-LINUX-NEXT: .LBB18_1: # %if.end
+; X64-LINUX-NEXT: retq
+;
+; X64-WIN32-LABEL: add_i64_128_flag:
+; X64-WIN32: # %bb.0: # %entry
+; X64-WIN32-NEXT: subq $-128, %rcx
+; X64-WIN32-NEXT: je .LBB18_1
+; X64-WIN32-NEXT: # %bb.2: # %if.then
+; X64-WIN32-NEXT: jmp bar_i64 # TAILCALL
+; X64-WIN32-NEXT: .LBB18_1: # %if.end
+; X64-WIN32-NEXT: retq
+entry:
+ %add = add i64 %x, 128
+ %tobool = icmp eq i64 %add, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ tail call void @bar_i64(i64 %add)
+ br label %if.end
+
+if.end:
+ ret void
+}
+
+; Make sure we can use sub -2147483648 for add 2147483648 when the flags are used.
+define void @add_i64_2147483648_flag(i64 %x) {
+; X32-LABEL: add_i64_2147483648_flag:
+; X32: # %bb.0: # %entry
+; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X32-NEXT: movl $-2147483648, %eax # imm = 0x80000000
+; X32-NEXT: addl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: adcl $0, %ecx
+; X32-NEXT: movl %eax, %edx
+; X32-NEXT: orl %ecx, %edx
+; X32-NEXT: je .LBB19_2
+; X32-NEXT: # %bb.1: # %if.then
+; X32-NEXT: pushl %ecx
+; X32-NEXT: .cfi_adjust_cfa_offset 4
+; X32-NEXT: pushl %eax
+; X32-NEXT: .cfi_adjust_cfa_offset 4
+; X32-NEXT: calll bar_i64
+; X32-NEXT: addl $8, %esp
+; X32-NEXT: .cfi_adjust_cfa_offset -8
+; X32-NEXT: .LBB19_2: # %if.end
+; X32-NEXT: retl
+;
+; X64-LINUX-LABEL: add_i64_2147483648_flag:
+; X64-LINUX: # %bb.0: # %entry
+; X64-LINUX-NEXT: subq $-2147483648, %rdi # imm = 0x80000000
+; X64-LINUX-NEXT: je .LBB19_1
+; X64-LINUX-NEXT: # %bb.2: # %if.then
+; X64-LINUX-NEXT: jmp bar_i64 # TAILCALL
+; X64-LINUX-NEXT: .LBB19_1: # %if.end
+; X64-LINUX-NEXT: retq
+;
+; X64-WIN32-LABEL: add_i64_2147483648_flag:
+; X64-WIN32: # %bb.0: # %entry
+; X64-WIN32-NEXT: subq $-2147483648, %rcx # imm = 0x80000000
+; X64-WIN32-NEXT: je .LBB19_1
+; X64-WIN32-NEXT: # %bb.2: # %if.then
+; X64-WIN32-NEXT: jmp bar_i64 # TAILCALL
+; X64-WIN32-NEXT: .LBB19_1: # %if.end
+; X64-WIN32-NEXT: retq
+entry:
+ %add = add i64 %x, 2147483648
+ %tobool = icmp eq i64 %add, 0
+ br i1 %tobool, label %if.end, label %if.then
+
+if.then:
+ tail call void @bar_i64(i64 %add)
+ br label %if.end
+
+if.end:
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/xaluo.ll b/llvm/test/CodeGen/X86/xaluo.ll
index 786c6178ec4..2cb664dafb6 100644
--- a/llvm/test/CodeGen/X86/xaluo.ll
+++ b/llvm/test/CodeGen/X86/xaluo.ll
@@ -293,10 +293,9 @@ define zeroext i1 @saddoi64imm4(i64 %v1, i64* %res) {
define zeroext i1 @saddoi64imm5(i64 %v1, i64* %res) {
; SDAG-LABEL: saddoi64imm5:
; SDAG: ## %bb.0:
-; SDAG-NEXT: movl $2147483648, %ecx ## imm = 0x80000000
-; SDAG-NEXT: addq %rdi, %rcx
+; SDAG-NEXT: subq $-2147483648, %rdi ## imm = 0x80000000
; SDAG-NEXT: seto %al
-; SDAG-NEXT: movq %rcx, (%rsi)
+; SDAG-NEXT: movq %rdi, (%rsi)
; SDAG-NEXT: retq
;
; FAST-LABEL: saddoi64imm5:
OpenPOWER on IntegriCloud