diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2015-06-07 18:34:25 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2015-06-07 18:34:25 +0000 |
commit | 3a7718038d1d10b9c961399d8ea9e1ef6fe3e68f (patch) | |
tree | 9a349c9b65510900f1c10e3b49ca5d3c00b97337 | |
parent | 82f865277ea18313a1e60ac8bc8d349fd931c420 (diff) | |
download | bcm5719-llvm-3a7718038d1d10b9c961399d8ea9e1ef6fe3e68f.tar.gz bcm5719-llvm-3a7718038d1d10b9c961399d8ea9e1ef6fe3e68f.zip |
[X86] Added BitScanForward/BitScanReverse memory folding + tests
llvm-svn: 239257
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 6 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/stack-folding-x86_64.ll | 51 |
2 files changed, 57 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 43decf7cdda..44068f9190b 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -433,6 +433,12 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) } static const X86MemoryFoldTableEntry MemoryFoldTable1[] = { + { X86::BSF16rr, X86::BSF16rm, 0 }, + { X86::BSF32rr, X86::BSF32rm, 0 }, + { X86::BSF64rr, X86::BSF64rm, 0 }, + { X86::BSR16rr, X86::BSR16rm, 0 }, + { X86::BSR32rr, X86::BSR32rm, 0 }, + { X86::BSR64rr, X86::BSR64rm, 0 }, { X86::CMP16rr, X86::CMP16rm, 0 }, { X86::CMP32rr, X86::CMP32rm, 0 }, { X86::CMP64rr, X86::CMP64rm, 0 }, diff --git a/llvm/test/CodeGen/X86/stack-folding-x86_64.ll b/llvm/test/CodeGen/X86/stack-folding-x86_64.ll new file mode 100644 index 00000000000..211227916a0 --- /dev/null +++ b/llvm/test/CodeGen/X86/stack-folding-x86_64.ll @@ -0,0 +1,51 @@ +; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mcpu=x86-64 < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-unknown" + +; Stack reload folding tests. +; +; By including a nop call with sideeffects we can force a partial register spill of the +; relevant registers and check that the reload is correctly folded into the instruction. + +;TODO stack_fold_bsf_i16 +declare i16 @llvm.cttz.i16(i16, i1) + +define i32 @stack_fold_bsf_i32(i32 %a0) { + ;CHECK-LABEL: stack_fold_bsf_i32 + ;CHECK: bsfl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload + %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() + %2 = call i32 @llvm.cttz.i32(i32 %a0, i1 -1) + ret i32 %2 +} +declare i32 @llvm.cttz.i32(i32, i1) + +define i64 @stack_fold_bsf_i64(i64 %a0) { + ;CHECK-LABEL: stack_fold_bsf_i64 + ;CHECK: bsfq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload + %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() + %2 = call i64 @llvm.cttz.i64(i64 %a0, i1 -1) + ret i64 %2 +} +declare i64 @llvm.cttz.i64(i64, i1) + +;TODO stack_fold_bsr_i16 +declare i16 @llvm.ctlz.i16(i16, i1) + +define i32 @stack_fold_bsr_i32(i32 %a0) { + ;CHECK-LABEL: stack_fold_bsr_i32 + ;CHECK: bsrl {{-?[0-9]*}}(%rsp), %eax {{.*#+}} 4-byte Folded Reload + %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() + %2 = call i32 @llvm.ctlz.i32(i32 %a0, i1 -1) + ret i32 %2 +} +declare i32 @llvm.ctlz.i32(i32, i1) + +define i64 @stack_fold_bsr_i64(i64 %a0) { + ;CHECK-LABEL: stack_fold_bsr_i64 + ;CHECK: bsrq {{-?[0-9]*}}(%rsp), %rax {{.*#+}} 8-byte Folded Reload + %1 = tail call i64 asm sideeffect "nop", "=x,~{rax},~{rbx},~{rcx},~{rdx},~{rsi},~{rdi},~{rbp},~{r8},~{r9},~{r10},~{r11},~{r12},~{r13},~{r14},~{r15}"() + %2 = call i64 @llvm.ctlz.i64(i64 %a0, i1 -1) + ret i64 %2 +} +declare i64 @llvm.ctlz.i64(i64, i1) |