summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/AArch64/addsub_ext.ll
diff options
context:
space:
mode:
authorTim Northover <tnorthover@apple.com>2014-04-14 12:50:50 +0000
committerTim Northover <tnorthover@apple.com>2014-04-14 12:50:50 +0000
commit2f4830343676eb250431e83124500279ea156a26 (patch)
tree66167633195d94e1dc31e519ca90edc02bf76fa7 /llvm/test/CodeGen/AArch64/addsub_ext.ll
parent23b1f08282a379b8bcfbb1f4d5055445a822b7ce (diff)
downloadbcm5719-llvm-2f4830343676eb250431e83124500279ea156a26.tar.gz
bcm5719-llvm-2f4830343676eb250431e83124500279ea156a26.zip
ARM64: add support for AArch64's addsub_ext.ll
There was one definite issue in ARM64 (the off-by-1 check for whether a shift could be folded in) and one difference that is probably correct: ARM64 didn't fold nodes with multiple uses into the arithmetic operations unless optimising for code size. llvm-svn: 206168
Diffstat (limited to 'llvm/test/CodeGen/AArch64/addsub_ext.ll')
-rw-r--r--llvm/test/CodeGen/AArch64/addsub_ext.ll7
1 files changed, 4 insertions, 3 deletions
diff --git a/llvm/test/CodeGen/AArch64/addsub_ext.ll b/llvm/test/CodeGen/AArch64/addsub_ext.ll
index f0e11c65224..323cb839750 100644
--- a/llvm/test/CodeGen/AArch64/addsub_ext.ll
+++ b/llvm/test/CodeGen/AArch64/addsub_ext.ll
@@ -1,11 +1,12 @@
; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
+; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64 | FileCheck %s
@var8 = global i8 0
@var16 = global i16 0
@var32 = global i32 0
@var64 = global i64 0
-define void @addsub_i8rhs() {
+define void @addsub_i8rhs() minsize {
; CHECK-LABEL: addsub_i8rhs:
%val8_tmp = load i8* @var8
%lhs32 = load i32* @var32
@@ -80,7 +81,7 @@ end:
ret void
}
-define void @addsub_i16rhs() {
+define void @addsub_i16rhs() minsize {
; CHECK-LABEL: addsub_i16rhs:
%val16_tmp = load i16* @var16
%lhs32 = load i32* @var32
@@ -158,7 +159,7 @@ end:
; N.b. we could probably check more here ("add w2, w3, w1, uxtw" for
; example), but the remaining instructions are probably not idiomatic
; in the face of "add/sub (shifted register)" so I don't intend to.
-define void @addsub_i32rhs() {
+define void @addsub_i32rhs() minsize {
; CHECK-LABEL: addsub_i32rhs:
%val32_tmp = load i32* @var32
%lhs64 = load i64* @var64
OpenPOWER on IntegriCloud