diff options
author | Tim Northover <tnorthover@apple.com> | 2014-06-17 11:31:42 +0000 |
---|---|---|
committer | Tim Northover <tnorthover@apple.com> | 2014-06-17 11:31:42 +0000 |
commit | d5531f72dc1d4c85997a13530fc783c8dfaab25c (patch) | |
tree | 4317bae42ddce5cd3c18893c42ac95402db4f342 | |
parent | 5d97293e26f32a3cc404f2d6cbc009ba8ab33669 (diff) | |
download | bcm5719-llvm-d5531f72dc1d4c85997a13530fc783c8dfaab25c.tar.gz bcm5719-llvm-d5531f72dc1d4c85997a13530fc783c8dfaab25c.zip |
AArch64: estimate inline asm length during branch relaxation
To make sure branches are in range, we need to do a better job of estimating
the length of an inline assembly block than "it's probably 1 instruction, who'd
write asm with more than that?".
Fortunately there's already a (highly suspect, see how many ways you can think
of to break it!) callback for this purpose, which is used by the other targets.
rdar://problem/17277590
llvm-svn: 211095
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/branch-relax-asm.ll | 35 |
2 files changed, 42 insertions, 1 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 8cec4a14807..cabf9ea8726 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -35,8 +35,14 @@ AArch64InstrInfo::AArch64InstrInfo(const AArch64Subtarget &STI) /// GetInstSize - Return the number of bytes of code the specified /// instruction may be. This returns the maximum number of bytes. unsigned AArch64InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { - const MCInstrDesc &Desc = MI->getDesc(); + const MachineBasicBlock &MBB = *MI->getParent(); + const MachineFunction *MF = MBB.getParent(); + const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo(); + + if (MI->getOpcode() == AArch64::INLINEASM) + return getInlineAsmLength(MI->getOperand(0).getSymbolName(), *MAI); + const MCInstrDesc &Desc = MI->getDesc(); switch (Desc.getOpcode()) { default: // Anything not explicitly designated otherwise is a nomal 4-byte insn. diff --git a/llvm/test/CodeGen/AArch64/branch-relax-asm.ll b/llvm/test/CodeGen/AArch64/branch-relax-asm.ll new file mode 100644 index 00000000000..7409c84e618 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/branch-relax-asm.ll @@ -0,0 +1,35 @@ +; RUN: llc -mtriple=aarch64-apple-ios7.0 -disable-block-placement -aarch64-tbz-offset-bits=4 -o - %s | FileCheck %s +define i32 @test_asm_length(i32 %in) { +; CHECK-LABEL: test_asm_length: + + ; It would be more natural to use just one "tbnz %false" here, but if the + ; number of instructions in the asm is counted reasonably, that block is out + ; of the limited range we gave tbz. So branch relaxation has to invert the + ; condition. +; CHECK: tbz w0, #0, [[TRUE:LBB[0-9]+_[0-9]+]] +; CHECK: b [[FALSE:LBB[0-9]+_[0-9]+]] + +; CHECK: [[TRUE]]: +; CHECK: orr w0, wzr, #0x4 +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: nop +; CHECK: ret + +; CHECK: [[FALSE]]: +; CHECK: ret + + %val = and i32 %in, 1 + %tst = icmp eq i32 %val, 0 + br i1 %tst, label %true, label %false + +true: + call void asm sideeffect "nop\0A\09nop\0A\09nop\0A\09nop\0A\09nop\0A\09nop", ""() + ret i32 4 + +false: + ret i32 0 +} |