summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJuergen Ributzka <juergen@apple.com>2014-08-27 21:38:33 +0000
committerJuergen Ributzka <juergen@apple.com>2014-08-27 21:38:33 +0000
commit3c1b2861524aa3b2f6009316f2bbe0beb23b21c0 (patch)
treed48bcf4469bbbeee013b316125c0352de04dd713
parent9a45fac6f73f6c772fe96773ae3beab6f24c534c (diff)
downloadbcm5719-llvm-3c1b2861524aa3b2f6009316f2bbe0beb23b21c0.tar.gz
bcm5719-llvm-3c1b2861524aa3b2f6009316f2bbe0beb23b21c0.zip
[FastISel][AArch64] Fix simplify address when the address comes from a shift.
When the address comes directly from a shift instruction then the address computation cannot be folded into the memory instruction, because the zero register is not available as a base register. Simplify addess needs to emit the shift instruction and use the result as base register. llvm-svn: 216621
-rw-r--r--llvm/lib/Target/AArch64/AArch64FastISel.cpp4
-rw-r--r--llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll21
2 files changed, 25 insertions, 0 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index a6e7daa0d9b..2faa1398064 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -708,6 +708,10 @@ bool AArch64FastISel::SimplifyAddress(Address &Addr, MVT VT) {
Addr.getOffsetReg())
RegisterOffsetNeedsLowering = true;
+ // Cannot encode zero register as base.
+ if (Addr.isRegBase() && Addr.getOffsetReg() && !Addr.getReg())
+ RegisterOffsetNeedsLowering = true;
+
// If this is a stack pointer and the offset needs to be simplified then put
// the alloca address into a register, set the base type back to register and
// continue. This should almost never happen.
diff --git a/llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll b/llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll
index 86ba400cff2..750e081d423 100644
--- a/llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll
+++ b/llvm/test/CodeGen/AArch64/fast-isel-addressing-modes.ll
@@ -107,6 +107,16 @@ define void @store_breg_f64(double* %a) {
ret void
}
+; Load Immediate
+define i32 @load_immoff_1() {
+; CHECK-LABEL: load_immoff_1
+; CHECK: orr {{w|x}}[[REG:[0-9]+]], {{wzr|xzr}}, #0x80
+; CHECK: ldr {{w[0-9]+}}, {{\[}}x[[REG]]{{\]}}
+ %1 = inttoptr i64 128 to i32*
+ %2 = load i32* %1
+ ret i32 %2
+}
+
; Load / Store Base Register + Immediate Offset
; Max supported negative offset
define i32 @load_breg_immoff_1(i64 %a) {
@@ -318,6 +328,17 @@ define i64 @load_breg_offreg_immoff_2(i64 %a, i64 %b) {
ret i64 %4
}
+; Load Scaled Register Offset
+define i32 @load_shift_offreg_1(i64 %a) {
+; CHECK-LABEL: load_shift_offreg_1
+; CHECK: lsl [[REG:x[0-9]+]], x0, #2
+; CHECK: ldr {{w[0-9]+}}, {{\[}}[[REG]]{{\]}}
+ %1 = shl i64 %a, 2
+ %2 = inttoptr i64 %1 to i32*
+ %3 = load i32* %2
+ ret i32 %3
+}
+
; Load Base Register + Scaled Register Offset
define i32 @load_breg_shift_offreg_1(i64 %a, i64 %b) {
; CHECK-LABEL: load_breg_shift_offreg_1
OpenPOWER on IntegriCloud