diff options
Diffstat (limited to 'llvm')
3 files changed, 70 insertions, 47 deletions
diff --git a/llvm/test/CodeGen/X86/x86-64-double-precision-shift-left.ll b/llvm/test/CodeGen/X86/x86-64-double-precision-shift-left.ll index 7515c46f7ce..fcdf4b656e9 100644 --- a/llvm/test/CodeGen/X86/x86-64-double-precision-shift-left.ll +++ b/llvm/test/CodeGen/X86/x86-64-double-precision-shift-left.ll @@ -1,6 +1,8 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=bdver1 | FileCheck %s + ; Verify that for the architectures that are known to have poor latency -; double precision shift instructions we generate alternative sequence +; double precision shift instructions we generate alternative sequence ; of instructions with lower latencies instead of shld instruction. ;uint64_t lshift1(uint64_t a, uint64_t b) @@ -8,11 +10,12 @@ ; return (a << 1) | (b >> 63); ;} -; CHECK-LABEL: lshift1: -; CHECK: shrq $63, %rsi -; CHECK-NEXT: leaq (%rsi,%rdi,2), %rax - define i64 @lshift1(i64 %a, i64 %b) nounwind readnone uwtable { +; CHECK-LABEL: lshift1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: shrq $63, %rsi +; CHECK-NEXT: leaq (%rsi,%rdi,2), %rax +; CHECK-NEXT: retq entry: %shl = shl i64 %a, 1 %shr = lshr i64 %b, 63 @@ -25,11 +28,12 @@ entry: ; return (a << 2) | (b >> 62); ;} -; CHECK-LABEL: lshift2: -; CHECK: shrq $62, %rsi -; CHECK-NEXT: leaq (%rsi,%rdi,4), %rax - define i64 @lshift2(i64 %a, i64 %b) nounwind readnone uwtable { +; CHECK-LABEL: lshift2: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: shrq $62, %rsi +; CHECK-NEXT: leaq (%rsi,%rdi,4), %rax +; CHECK-NEXT: retq entry: %shl = shl i64 %a, 2 %shr = lshr i64 %b, 62 @@ -42,12 +46,13 @@ entry: ; return (a << 7) | (b >> 57); ;} -; CHECK: lshift7: -; CHECK: shlq $7, {{.*}} -; CHECK-NEXT: shrq $57, {{.*}} -; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}} - define i64 @lshift7(i64 %a, i64 %b) nounwind readnone uwtable { +; CHECK-LABEL: lshift7: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: shlq $7, %rdi +; CHECK-NEXT: shrq $57, %rsi +; CHECK-NEXT: leaq (%rsi,%rdi), %rax +; CHECK-NEXT: retq entry: %shl = shl i64 %a, 7 %shr = lshr i64 %b, 57 @@ -60,12 +65,13 @@ entry: ; return (a << 63) | (b >> 1); ;} -; CHECK: lshift63: -; CHECK: shlq $63, {{.*}} -; CHECK-NEXT: shrq {{.*}} -; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}} - define i64 @lshift63(i64 %a, i64 %b) nounwind readnone uwtable { +; CHECK-LABEL: lshift63: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: shlq $63, %rdi +; CHECK-NEXT: shrq %rsi +; CHECK-NEXT: leaq (%rsi,%rdi), %rax +; CHECK-NEXT: retq entry: %shl = shl i64 %a, 63 %shr = lshr i64 %b, 1 diff --git a/llvm/test/CodeGen/X86/x86-64-double-precision-shift-right.ll b/llvm/test/CodeGen/X86/x86-64-double-precision-shift-right.ll index 5e3f2294171..42df39f98c2 100644 --- a/llvm/test/CodeGen/X86/x86-64-double-precision-shift-right.ll +++ b/llvm/test/CodeGen/X86/x86-64-double-precision-shift-right.ll @@ -1,6 +1,8 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=bdver1 | FileCheck %s + ; Verify that for the architectures that are known to have poor latency -; double precision shift instructions we generate alternative sequence +; double precision shift instructions we generate alternative sequence ; of instructions with lower latencies instead of shrd instruction. ;uint64_t rshift1(uint64_t a, uint64_t b) @@ -8,12 +10,13 @@ ; return (a >> 1) | (b << 63); ;} -; CHECK: rshift1: -; CHECK: shrq {{.*}} -; CHECK-NEXT: shlq $63, {{.*}} -; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}} - define i64 @rshift1(i64 %a, i64 %b) nounwind readnone uwtable { +; CHECK-LABEL: rshift1: +; CHECK: # %bb.0: +; CHECK-NEXT: shrq %rdi +; CHECK-NEXT: shlq $63, %rsi +; CHECK-NEXT: leaq (%rsi,%rdi), %rax +; CHECK-NEXT: retq %1 = lshr i64 %a, 1 %2 = shl i64 %b, 63 %3 = or i64 %2, %1 @@ -25,13 +28,13 @@ define i64 @rshift1(i64 %a, i64 %b) nounwind readnone uwtable { ; return (a >> 2) | (b << 62); ;} -; CHECK: rshift2: -; CHECK: shrq $2, {{.*}} -; CHECK-NEXT: shlq $62, {{.*}} -; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}} - - define i64 @rshift2(i64 %a, i64 %b) nounwind readnone uwtable { +; CHECK-LABEL: rshift2: +; CHECK: # %bb.0: +; CHECK-NEXT: shrq $2, %rdi +; CHECK-NEXT: shlq $62, %rsi +; CHECK-NEXT: leaq (%rsi,%rdi), %rax +; CHECK-NEXT: retq %1 = lshr i64 %a, 2 %2 = shl i64 %b, 62 %3 = or i64 %2, %1 @@ -43,13 +46,13 @@ define i64 @rshift2(i64 %a, i64 %b) nounwind readnone uwtable { ; return (a >> 7) | (b << 57); ;} -; CHECK: rshift7: -; CHECK: shrq $7, {{.*}} -; CHECK-NEXT: shlq $57, {{.*}} -; CHECK-NEXT: leaq ({{.*}},{{.*}}), {{.*}} - - define i64 @rshift7(i64 %a, i64 %b) nounwind readnone uwtable { +; CHECK-LABEL: rshift7: +; CHECK: # %bb.0: +; CHECK-NEXT: shrq $7, %rdi +; CHECK-NEXT: shlq $57, %rsi +; CHECK-NEXT: leaq (%rsi,%rdi), %rax +; CHECK-NEXT: retq %1 = lshr i64 %a, 7 %2 = shl i64 %b, 57 %3 = or i64 %2, %1 @@ -61,11 +64,12 @@ define i64 @rshift7(i64 %a, i64 %b) nounwind readnone uwtable { ; return (a >> 63) | (b << 1); ;} -; CHECK-LABEL: rshift63: -; CHECK: shrq $63, %rdi -; CHECK-NEXT: leaq (%rdi,%rsi,2), %rax - define i64 @rshift63(i64 %a, i64 %b) nounwind readnone uwtable { +; CHECK-LABEL: rshift63: +; CHECK: # %bb.0: +; CHECK-NEXT: shrq $63, %rdi +; CHECK-NEXT: leaq (%rdi,%rsi,2), %rax +; CHECK-NEXT: retq %1 = lshr i64 %a, 63 %2 = shl i64 %b, 1 %3 = or i64 %2, %1 diff --git a/llvm/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll b/llvm/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll index 2fd98727421..15386a30328 100644 --- a/llvm/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll +++ b/llvm/test/CodeGen/X86/x86-64-double-shifts-Oz-Os-O2.ll @@ -1,8 +1,9 @@ -; RUN: llc < %s -mtriple=x86_64-- -mcpu=bdver1 | FileCheck %s +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown -mcpu=bdver1 | FileCheck %s ; clang -Oz -c test1.cpp -emit-llvm -S -o ; Verify that we generate shld insruction when we are optimizing for size, -; even for X86_64 processors that are known to have poor latency double +; even for X86_64 processors that are known to have poor latency double ; precision shift instructions. ; uint64_t lshift10(uint64_t a, uint64_t b) ; { @@ -11,8 +12,12 @@ ; Function Attrs: minsize nounwind readnone uwtable define i64 @_Z8lshift10mm(i64 %a, i64 %b) #0 { +; CHECK-LABEL: _Z8lshift10mm: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: shldq $10, %rsi, %rdi +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: retq entry: -; CHECK: shldq $10 %shl = shl i64 %a, 10 %shr = lshr i64 %b, 54 %or = or i64 %shr, %shl @@ -33,8 +38,12 @@ attributes #0 = { minsize nounwind readnone uwtable "less-precise-fpmad"="false" ; Function Attrs: nounwind optsize readnone uwtable define i64 @_Z8lshift11mm(i64 %a, i64 %b) #1 { +; CHECK-LABEL: _Z8lshift11mm: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: shldq $11, %rsi, %rdi +; CHECK-NEXT: movq %rdi, %rax +; CHECK-NEXT: retq entry: -; CHECK: shldq $11 %shl = shl i64 %a, 11 %shr = lshr i64 %b, 53 %or = or i64 %shr, %shl @@ -54,9 +63,13 @@ attributes #1 = { nounwind optsize readnone uwtable "less-precise-fpmad"="false" ; Function Attrs: nounwind optsize readnone uwtable define i64 @_Z8lshift12mm(i64 %a, i64 %b) #2 { +; CHECK-LABEL: _Z8lshift12mm: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: shlq $12, %rdi +; CHECK-NEXT: shrq $52, %rsi +; CHECK-NEXT: leaq (%rsi,%rdi), %rax +; CHECK-NEXT: retq entry: -; CHECK: shlq $12 -; CHECK-NEXT: shrq $52 %shl = shl i64 %a, 12 %shr = lshr i64 %b, 52 %or = or i64 %shr, %shl |

