From 2341c22ec71aed773101eef6bc725df2047e5154 Mon Sep 17 00:00:00 2001 From: Reid Spencer Date: Fri, 2 Feb 2007 02:16:23 +0000 Subject: Changes to support making the shift instructions be true BinaryOperators. This feature is needed in order to support shifts of more than 255 bits on large integer types. This changes the syntax for llvm assembly to make shl, ashr and lshr instructions look like a binary operator: shl i32 %X, 1 instead of shl i32 %X, i8 1 Additionally, this should help a few passes perform additional optimizations. llvm-svn: 33776 --- llvm/test/CodeGen/ARM/bits.ll | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) (limited to 'llvm/test/CodeGen/ARM/bits.ll') diff --git a/llvm/test/CodeGen/ARM/bits.ll b/llvm/test/CodeGen/ARM/bits.ll index c5052e5bacb..7a0a08c301f 100644 --- a/llvm/test/CodeGen/ARM/bits.ll +++ b/llvm/test/CodeGen/ARM/bits.ll @@ -1,36 +1,36 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm && -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep and | wc -l | grep 1 && -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep orr | wc -l | grep 1 && -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep eor | wc -l | grep 1 && -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep mov.*lsl | wc -l | grep 1 && -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep mov.*asr | wc -l | grep 1 +; RUN: llvm-as < %s | llc -march=arm && +; RUN: llvm-as < %s | llc -march=arm | grep and | wc -l | grep 1 && +; RUN: llvm-as < %s | llc -march=arm | grep orr | wc -l | grep 1 && +; RUN: llvm-as < %s | llc -march=arm | grep eor | wc -l | grep 1 && +; RUN: llvm-as < %s | llc -march=arm | grep mov.*lsl | wc -l | grep 1 && +; RUN: llvm-as < %s | llc -march=arm | grep mov.*asr | wc -l | grep 1 -int %f1(int %a, int %b) { +define i32 @f1(i32 %a, i32 %b) { entry: - %tmp2 = and int %b, %a ; [#uses=1] - ret int %tmp2 + %tmp2 = and i32 %b, %a ; [#uses=1] + ret i32 %tmp2 } -int %f2(int %a, int %b) { +define i32 @f2(i32 %a, i32 %b) { entry: - %tmp2 = or int %b, %a ; [#uses=1] - ret int %tmp2 + %tmp2 = or i32 %b, %a ; [#uses=1] + ret i32 %tmp2 } -int %f3(int %a, int %b) { +define i32 @f3(i32 %a, i32 %b) { entry: - %tmp2 = xor int %b, %a ; [#uses=1] - ret int %tmp2 + %tmp2 = xor i32 %b, %a ; [#uses=1] + ret i32 %tmp2 } -int %f4(int %a, ubyte %b) { +define i32 @f4(i32 %a, i32 %b) { entry: - %tmp3 = shl int %a, ubyte %b ; [#uses=1] - ret int %tmp3 + %tmp3 = shl i32 %a, %b ; [#uses=1] + ret i32 %tmp3 } -int %f5(int %a, ubyte %b) { +define i32 @f5(i32 %a, i32 %b) { entry: - %tmp3 = shr int %a, ubyte %b ; [#uses=1] - ret int %tmp3 + %tmp3 = ashr i32 %a, %b ; [#uses=1] + ret i32 %tmp3 } -- cgit v1.2.3