diff options
| author | Reid Spencer <rspencer@reidspencer.com> | 2007-02-02 02:16:23 +0000 |
|---|---|---|
| committer | Reid Spencer <rspencer@reidspencer.com> | 2007-02-02 02:16:23 +0000 |
| commit | 2341c22ec71aed773101eef6bc725df2047e5154 (patch) | |
| tree | 453d820479bbe83769e54e01abc702ac8ef71312 /llvm/test/CodeGen/ARM | |
| parent | 48b094d9ddbca690da41f5711d8e1fcb46c50e05 (diff) | |
| download | bcm5719-llvm-2341c22ec71aed773101eef6bc725df2047e5154.tar.gz bcm5719-llvm-2341c22ec71aed773101eef6bc725df2047e5154.zip | |
Changes to support making the shift instructions be true BinaryOperators.
This feature is needed in order to support shifts of more than 255 bits
on large integer types. This changes the syntax for llvm assembly to
make shl, ashr and lshr instructions look like a binary operator:
shl i32 %X, 1
instead of
shl i32 %X, i8 1
Additionally, this should help a few passes perform additional optimizations.
llvm-svn: 33776
Diffstat (limited to 'llvm/test/CodeGen/ARM')
| -rw-r--r-- | llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll | 12 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/bits.ll | 42 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/long_shift.ll | 16 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/sxt_rot.ll | 8 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/uxt_rot.ll | 4 |
6 files changed, 42 insertions, 42 deletions
diff --git a/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll b/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll index c69798b0b31..3661c4c06d6 100644 --- a/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll +++ b/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll @@ -19,13 +19,13 @@ cond_next489: ; preds = %cond_false, %bb471 %tmp502 = load i32* null ; <i32> [#uses=1] %tmp542 = getelementptr [6 x [4 x [4 x i32]]]* @quant_coef, i32 0, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=1] %tmp543 = load i32* %tmp542 ; <i32> [#uses=1] - %tmp548 = ashr i32 0, i8 0 ; <i32> [#uses=3] + %tmp548 = ashr i32 0, 0 ; <i32> [#uses=3] %tmp561 = sub i32 0, %tmp496 ; <i32> [#uses=3] %abscond563 = icmp sgt i32 %tmp561, -1 ; <i1> [#uses=1] %abs564 = select i1 %abscond563, i32 %tmp561, i32 0 ; <i32> [#uses=1] %tmp572 = mul i32 %abs564, %tmp543 ; <i32> [#uses=1] %tmp574 = add i32 %tmp572, 0 ; <i32> [#uses=1] - %tmp576 = ashr i32 %tmp574, i8 0 ; <i32> [#uses=7] + %tmp576 = ashr i32 %tmp574, 0 ; <i32> [#uses=7] %tmp579 = icmp eq i32 %tmp548, %tmp576 ; <i1> [#uses=1] br i1 %tmp579, label %bb712, label %cond_next589 @@ -40,8 +40,8 @@ cond_next589: ; preds = %cond_next489 %tmp642 = call fastcc i32 @sign( i32 %tmp576, i32 %tmp561 ) ; <i32> [#uses=1] %tmp650 = mul i32 %tmp606, %tmp642 ; <i32> [#uses=1] %tmp656 = mul i32 %tmp650, %tmp612 ; <i32> [#uses=1] - %tmp658 = shl i32 %tmp656, i8 0 ; <i32> [#uses=1] - %tmp659 = ashr i32 %tmp658, i8 6 ; <i32> [#uses=1] + %tmp658 = shl i32 %tmp656, 0 ; <i32> [#uses=1] + %tmp659 = ashr i32 %tmp658, 6 ; <i32> [#uses=1] %tmp660 = sub i32 0, %tmp659 ; <i32> [#uses=1] %tmp666 = sub i32 %tmp660, %tmp496 ; <i32> [#uses=1] %tmp667 = sitofp i32 %tmp666 to double ; <double> [#uses=2] @@ -85,8 +85,8 @@ cond_true740: ; preds = %bb737 %tmp786 = load i32* %tmp785 ; <i32> [#uses=1] %tmp781 = mul i32 %tmp780, %tmp761 ; <i32> [#uses=1] %tmp787 = mul i32 %tmp781, %tmp786 ; <i32> [#uses=1] - %tmp789 = shl i32 %tmp787, i8 0 ; <i32> [#uses=1] - %tmp790 = ashr i32 %tmp789, i8 6 ; <i32> [#uses=1] + %tmp789 = shl i32 %tmp787, 0 ; <i32> [#uses=1] + %tmp790 = ashr i32 %tmp789, 6 ; <i32> [#uses=1] br label %cond_next791 cond_next791: ; preds = %cond_true740, %bb737 diff --git a/llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll b/llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll index a3ccf6e2d63..a5fdf3ba226 100644 --- a/llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll +++ b/llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll @@ -7,7 +7,7 @@ define void @f1() { %D = alloca %struct.rtx_def, align 1 %tmp1 = bitcast %struct.rtx_def* %D to i32* %tmp7 = load i32* %tmp1 - %tmp14 = lshr i32 %tmp7, i8 1 + %tmp14 = lshr i32 %tmp7, 1 %tmp1415 = and i32 %tmp14, 1 call void (i32, ...)* @printf( i32 undef, i32 0, i32 %tmp1415 ) ret void diff --git a/llvm/test/CodeGen/ARM/bits.ll b/llvm/test/CodeGen/ARM/bits.ll index c5052e5bacb..7a0a08c301f 100644 --- a/llvm/test/CodeGen/ARM/bits.ll +++ b/llvm/test/CodeGen/ARM/bits.ll @@ -1,36 +1,36 @@ -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm && -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep and | wc -l | grep 1 && -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep orr | wc -l | grep 1 && -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep eor | wc -l | grep 1 && -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep mov.*lsl | wc -l | grep 1 && -; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep mov.*asr | wc -l | grep 1 +; RUN: llvm-as < %s | llc -march=arm && +; RUN: llvm-as < %s | llc -march=arm | grep and | wc -l | grep 1 && +; RUN: llvm-as < %s | llc -march=arm | grep orr | wc -l | grep 1 && +; RUN: llvm-as < %s | llc -march=arm | grep eor | wc -l | grep 1 && +; RUN: llvm-as < %s | llc -march=arm | grep mov.*lsl | wc -l | grep 1 && +; RUN: llvm-as < %s | llc -march=arm | grep mov.*asr | wc -l | grep 1 -int %f1(int %a, int %b) { +define i32 @f1(i32 %a, i32 %b) { entry: - %tmp2 = and int %b, %a ; <int> [#uses=1] - ret int %tmp2 + %tmp2 = and i32 %b, %a ; <i32> [#uses=1] + ret i32 %tmp2 } -int %f2(int %a, int %b) { +define i32 @f2(i32 %a, i32 %b) { entry: - %tmp2 = or int %b, %a ; <int> [#uses=1] - ret int %tmp2 + %tmp2 = or i32 %b, %a ; <i32> [#uses=1] + ret i32 %tmp2 } -int %f3(int %a, int %b) { +define i32 @f3(i32 %a, i32 %b) { entry: - %tmp2 = xor int %b, %a ; <int> [#uses=1] - ret int %tmp2 + %tmp2 = xor i32 %b, %a ; <i32> [#uses=1] + ret i32 %tmp2 } -int %f4(int %a, ubyte %b) { +define i32 @f4(i32 %a, i32 %b) { entry: - %tmp3 = shl int %a, ubyte %b ; <int> [#uses=1] - ret int %tmp3 + %tmp3 = shl i32 %a, %b ; <i32> [#uses=1] + ret i32 %tmp3 } -int %f5(int %a, ubyte %b) { +define i32 @f5(i32 %a, i32 %b) { entry: - %tmp3 = shr int %a, ubyte %b ; <int> [#uses=1] - ret int %tmp3 + %tmp3 = ashr i32 %a, %b ; <i32> [#uses=1] + ret i32 %tmp3 } diff --git a/llvm/test/CodeGen/ARM/long_shift.ll b/llvm/test/CodeGen/ARM/long_shift.ll index 09b3d1e1647..515cb178a60 100644 --- a/llvm/test/CodeGen/ARM/long_shift.ll +++ b/llvm/test/CodeGen/ARM/long_shift.ll @@ -5,27 +5,27 @@ ; RUN: llvm-as < %s | llc -march=arm | grep __lshrdi3 && ; RUN: llvm-as < %s | llc -march=arm -enable-thumb -define i64 @f00(i64 %A, i64 %B) { +define i64 @f0(i64 %A, i64 %B) { %tmp = bitcast i64 %A to i64 - %tmp2 = lshr i64 %B, i8 1 + %tmp2 = lshr i64 %B, 1 %tmp3 = sub i64 %tmp, %tmp2 ret i64 %tmp3 } -define i32 @f1(i64 %x, i8 %y) { - %a = shl i64 %x, i8 %y +define i32 @f1(i64 %x, i64 %y) { + %a = shl i64 %x, %y %b = trunc i64 %a to i32 ret i32 %b } -define i32 @f2(i64 %x, i8 %y) { - %a = ashr i64 %x, i8 %y +define i32 @f2(i64 %x, i64 %y) { + %a = ashr i64 %x, %y %b = trunc i64 %a to i32 ret i32 %b } -define i32 @f3(i64 %x, i8 %y) { - %a = lshr i64 %x, i8 %y +define i32 @f3(i64 %x, i64 %y) { + %a = lshr i64 %x, %y %b = trunc i64 %a to i32 ret i32 %b } diff --git a/llvm/test/CodeGen/ARM/sxt_rot.ll b/llvm/test/CodeGen/ARM/sxt_rot.ll index 3f1483b433a..f865ac131ae 100644 --- a/llvm/test/CodeGen/ARM/sxt_rot.ll +++ b/llvm/test/CodeGen/ARM/sxt_rot.ll @@ -4,16 +4,16 @@ ; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 | grep "sxtab" | wc -l | grep 1 define i8 @test1(i32 %A) sext { - %B = lshr i32 %A, i8 8 - %C = shl i32 %A, i8 24 + %B = lshr i32 %A, 8 + %C = shl i32 %A, 24 %D = or i32 %B, %C %E = trunc i32 %D to i8 ret i8 %E } define i32 @test2(i32 %A, i32 %X) sext { - %B = lshr i32 %A, i8 8 - %C = shl i32 %A, i8 24 + %B = lshr i32 %A, 8 + %C = shl i32 %A, 24 %D = or i32 %B, %C %E = trunc i32 %D to i8 %F = sext i8 %E to i32 diff --git a/llvm/test/CodeGen/ARM/uxt_rot.ll b/llvm/test/CodeGen/ARM/uxt_rot.ll index 0c7516ff716..d03ca736d4e 100644 --- a/llvm/test/CodeGen/ARM/uxt_rot.ll +++ b/llvm/test/CodeGen/ARM/uxt_rot.ll @@ -17,8 +17,8 @@ define i32 @test2(i32 %A.u, i32 %B.u) zext { } define i32 @test3(i32 %A.u) zext { - %B.u = lshr i32 %A.u, i8 8 - %C.u = shl i32 %A.u, i8 24 + %B.u = lshr i32 %A.u, 8 + %C.u = shl i32 %A.u, 24 %D.u = or i32 %B.u, %C.u %E.u = trunc i32 %D.u to i16 %F.u = zext i16 %E.u to i32 |

