From 766a659eb5ab50d6dd5d447efb939ba74321c20e Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Mon, 15 Feb 2016 14:09:35 +0000 Subject: [X86] More thorough partial-register division checks For when grep counts are just not enough... llvm-svn: 260891 --- llvm/test/CodeGen/X86/anyext.ll | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) (limited to 'llvm/test') diff --git a/llvm/test/CodeGen/X86/anyext.ll b/llvm/test/CodeGen/X86/anyext.ll index 106fe83661b..0e0230f6be6 100644 --- a/llvm/test/CodeGen/X86/anyext.ll +++ b/llvm/test/CodeGen/X86/anyext.ll @@ -1,15 +1,48 @@ -; RUN: llc < %s -march=x86-64 | grep movzbl | count 2 +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s --check-prefix=X32 +; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64 ; Use movzbl to avoid partial-register updates. define i32 @foo(i32 %p, i8 zeroext %x) nounwind { +; X32-LABEL: foo: +; X32: # BB#0: +; X32-NEXT: movzbl {{[0-9]+}}(%esp), %eax +; X32-NEXT: divb {{[0-9]+}}(%esp) +; X32-NEXT: movzbl %al, %eax +; X32-NEXT: andl $1, %eax +; X32-NEXT: retl +; +; X64-LABEL: foo: +; X64: # BB#0: +; X64-NEXT: movzbl %dil, %eax +; X64-NEXT: divb %sil +; X64-NEXT: movzbl %al, %eax +; X64-NEXT: andl $1, %eax +; X64-NEXT: retq %q = trunc i32 %p to i8 %r = udiv i8 %q, %x %s = zext i8 %r to i32 %t = and i32 %s, 1 ret i32 %t } + define i32 @bar(i32 %p, i16 zeroext %x) nounwind { +; X32-LABEL: bar: +; X32: # BB#0: +; X32-NEXT: movw {{[0-9]+}}(%esp), %ax +; X32-NEXT: xorl %edx, %edx +; X32-NEXT: divw {{[0-9]+}}(%esp) +; X32-NEXT: andl $1, %eax +; X32-NEXT: retl +; +; X64-LABEL: bar: +; X64: # BB#0: +; X64-NEXT: xorl %edx, %edx +; X64-NEXT: movw %di, %ax +; X64-NEXT: divw %si +; X64-NEXT: andl $1, %eax +; X64-NEXT: retq %q = trunc i32 %p to i16 %r = udiv i16 %q, %x %s = zext i16 %r to i32 -- cgit v1.2.3