diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-02-15 14:09:35 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-02-15 14:09:35 +0000 |
commit | 766a659eb5ab50d6dd5d447efb939ba74321c20e (patch) | |
tree | cfa1669ee4c921f1d3a82cd7a68be87e4589e6ae /llvm | |
parent | a62170834d40f8c10b6b32ed6ac5202f98401bf4 (diff) | |
download | bcm5719-llvm-766a659eb5ab50d6dd5d447efb939ba74321c20e.tar.gz bcm5719-llvm-766a659eb5ab50d6dd5d447efb939ba74321c20e.zip |
[X86] More thorough partial-register division checks
For when grep counts are just not enough...
llvm-svn: 260891
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/test/CodeGen/X86/anyext.ll | 35 |
1 files changed, 34 insertions, 1 deletions
diff --git a/llvm/test/CodeGen/X86/anyext.ll b/llvm/test/CodeGen/X86/anyext.ll index 106fe83661b..0e0230f6be6 100644 --- a/llvm/test/CodeGen/X86/anyext.ll +++ b/llvm/test/CodeGen/X86/anyext.ll @@ -1,15 +1,48 @@ -; RUN: llc < %s -march=x86-64 | grep movzbl | count 2 +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64 ; Use movzbl to avoid partial-register updates. define i32 @foo(i32 %p, i8 zeroext %x) nounwind { +; X32-LABEL: foo: +; X32: # BB#0: +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; X32-NEXT: divb {{[0-9]+}}(%esp) +; X32-NEXT: movzbl %al, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: retl +; +; X64-LABEL: foo: +; X64: # BB#0: +; X64-NEXT: movzbl %dil, %eax +; X64-NEXT: divb %sil +; X64-NEXT: movzbl %al, %eax +; X64-NEXT: andl $1, %eax +; X64-NEXT: retq %q = trunc i32 %p to i8 %r = udiv i8 %q, %x %s = zext i8 %r to i32 %t = and i32 %s, 1 ret i32 %t } + define i32 @bar(i32 %p, i16 zeroext %x) nounwind { +; X32-LABEL: bar: +; X32: # BB#0: +; X32-NEXT: movw {{[0-9]+}}(%esp), %ax +; X32-NEXT: xorl %edx, %edx +; X32-NEXT: divw {{[0-9]+}}(%esp) +; X32-NEXT: andl $1, %eax +; X32-NEXT: retl +; +; X64-LABEL: bar: +; X64: # BB#0: +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: movw %di, %ax +; X64-NEXT: divw %si +; X64-NEXT: andl $1, %eax +; X64-NEXT: retq %q = trunc i32 %p to i16 %r = udiv i16 %q, %x %s = zext i16 %r to i32 |