summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Assembler/2003-05-21-MalformedShiftCrash.llx4
-rw-r--r--llvm/test/Assembler/2007-02-01-UpgradeShift.ll18
-rw-r--r--llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll12
-rw-r--r--llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll2
-rw-r--r--llvm/test/CodeGen/ARM/bits.ll42
-rw-r--r--llvm/test/CodeGen/ARM/long_shift.ll16
-rw-r--r--llvm/test/CodeGen/ARM/sxt_rot.ll8
-rw-r--r--llvm/test/CodeGen/ARM/uxt_rot.ll4
-rw-r--r--llvm/test/CodeGen/Alpha/add.ll32
-rw-r--r--llvm/test/CodeGen/PowerPC/and-elim.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/and_sext.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/rlwinm2.ll43
-rw-r--r--llvm/test/CodeGen/PowerPC/rotl.ll67
-rw-r--r--llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll22
-rw-r--r--llvm/test/CodeGen/X86/trunc-to-bool.ll3
-rw-r--r--llvm/test/Integer/BitBit.ll9
-rw-r--r--llvm/test/Integer/a1.ll8
-rw-r--r--llvm/test/Integer/a15.ll12
-rw-r--r--llvm/test/Integer/a17.ll12
-rw-r--r--llvm/test/Integer/a31.ll12
-rw-r--r--llvm/test/Integer/a33.ll12
-rw-r--r--llvm/test/Integer/a63.ll12
-rw-r--r--llvm/test/Integer/a7.ll14
-rw-r--r--llvm/test/Integer/a9.ll12
-rw-r--r--llvm/test/Integer/testarith_bt.ll6
-rw-r--r--llvm/test/Integer/testlogical_new_bt.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/add.ll6
-rw-r--r--llvm/test/Transforms/InstCombine/shift-simplify.ll33
-rw-r--r--llvm/test/Transforms/InstCombine/shift-sra.ll3
-rw-r--r--llvm/test/Transforms/InstCombine/signext.ll3
-rw-r--r--llvm/test/Transforms/Reassociate/shifttest.ll2
31 files changed, 224 insertions, 219 deletions
diff --git a/llvm/test/Assembler/2003-05-21-MalformedShiftCrash.llx b/llvm/test/Assembler/2003-05-21-MalformedShiftCrash.llx
index d1618dae969..ef3445b13fe 100644
--- a/llvm/test/Assembler/2003-05-21-MalformedShiftCrash.llx
+++ b/llvm/test/Assembler/2003-05-21-MalformedShiftCrash.llx
@@ -1,4 +1,4 @@
; Found by inspection of the code
-; RUN: llvm-as 2>&1 < %s > /dev/null | grep "Shift constant expression"
+; RUN: llvm-as 2>&1 < %s > /dev/null | grep "Logical operator requires integral"
-global i32 ashr (float 1.0, i8 2)
+global i32 ashr (float 1.0, float 2.0)
diff --git a/llvm/test/Assembler/2007-02-01-UpgradeShift.ll b/llvm/test/Assembler/2007-02-01-UpgradeShift.ll
new file mode 100644
index 00000000000..393124788f9
--- /dev/null
+++ b/llvm/test/Assembler/2007-02-01-UpgradeShift.ll
@@ -0,0 +1,18 @@
+; Test that upgrading shift instructions and constant expressions works
+; correctly.
+; RUN: llvm-upgrade < %s | grep 'ashr i32 .X, 2' &&
+; RUN: llvm-upgrade < %s | grep 'lshr i32 .X, 2' &&
+; RUN: llvm-upgrade < %s | grep 'shl i32 .X, 2' &&
+; RUN: llvm-upgrade < %s | grep 'ashr i32 .X, 6' &&
+; RUN: llvm-upgrade < %s | grep 'lshr i32 .X, 1' &&
+; RUN: llvm-upgrade < %s | grep 'shl i32 .X, 1'
+
+void %test(int %X) {
+ %A = ashr int %X, ubyte 2
+ %B = lshr int %X, ubyte 2
+ %C = shl int %X, ubyte 2
+ %D = ashr int %X, ubyte trunc ( int shl (int 3, ubyte 1) to ubyte )
+ %E = lshr int %X, ubyte trunc ( int ashr (int 3, ubyte 1) to ubyte )
+ %F = shl int %X, ubyte trunc ( int lshr (int 3, ubyte 1) to ubyte )
+ ret void
+}
diff --git a/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll b/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
index c69798b0b31..3661c4c06d6 100644
--- a/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
+++ b/llvm/test/CodeGen/ARM/2007-01-19-InfiniteLoop.ll
@@ -19,13 +19,13 @@ cond_next489: ; preds = %cond_false, %bb471
%tmp502 = load i32* null ; <i32> [#uses=1]
%tmp542 = getelementptr [6 x [4 x [4 x i32]]]* @quant_coef, i32 0, i32 0, i32 %i.8, i32 %j.7 ; <i32*> [#uses=1]
%tmp543 = load i32* %tmp542 ; <i32> [#uses=1]
- %tmp548 = ashr i32 0, i8 0 ; <i32> [#uses=3]
+ %tmp548 = ashr i32 0, 0 ; <i32> [#uses=3]
%tmp561 = sub i32 0, %tmp496 ; <i32> [#uses=3]
%abscond563 = icmp sgt i32 %tmp561, -1 ; <i1> [#uses=1]
%abs564 = select i1 %abscond563, i32 %tmp561, i32 0 ; <i32> [#uses=1]
%tmp572 = mul i32 %abs564, %tmp543 ; <i32> [#uses=1]
%tmp574 = add i32 %tmp572, 0 ; <i32> [#uses=1]
- %tmp576 = ashr i32 %tmp574, i8 0 ; <i32> [#uses=7]
+ %tmp576 = ashr i32 %tmp574, 0 ; <i32> [#uses=7]
%tmp579 = icmp eq i32 %tmp548, %tmp576 ; <i1> [#uses=1]
br i1 %tmp579, label %bb712, label %cond_next589
@@ -40,8 +40,8 @@ cond_next589: ; preds = %cond_next489
%tmp642 = call fastcc i32 @sign( i32 %tmp576, i32 %tmp561 ) ; <i32> [#uses=1]
%tmp650 = mul i32 %tmp606, %tmp642 ; <i32> [#uses=1]
%tmp656 = mul i32 %tmp650, %tmp612 ; <i32> [#uses=1]
- %tmp658 = shl i32 %tmp656, i8 0 ; <i32> [#uses=1]
- %tmp659 = ashr i32 %tmp658, i8 6 ; <i32> [#uses=1]
+ %tmp658 = shl i32 %tmp656, 0 ; <i32> [#uses=1]
+ %tmp659 = ashr i32 %tmp658, 6 ; <i32> [#uses=1]
%tmp660 = sub i32 0, %tmp659 ; <i32> [#uses=1]
%tmp666 = sub i32 %tmp660, %tmp496 ; <i32> [#uses=1]
%tmp667 = sitofp i32 %tmp666 to double ; <double> [#uses=2]
@@ -85,8 +85,8 @@ cond_true740: ; preds = %bb737
%tmp786 = load i32* %tmp785 ; <i32> [#uses=1]
%tmp781 = mul i32 %tmp780, %tmp761 ; <i32> [#uses=1]
%tmp787 = mul i32 %tmp781, %tmp786 ; <i32> [#uses=1]
- %tmp789 = shl i32 %tmp787, i8 0 ; <i32> [#uses=1]
- %tmp790 = ashr i32 %tmp789, i8 6 ; <i32> [#uses=1]
+ %tmp789 = shl i32 %tmp787, 0 ; <i32> [#uses=1]
+ %tmp790 = ashr i32 %tmp789, 6 ; <i32> [#uses=1]
br label %cond_next791
cond_next791: ; preds = %cond_true740, %bb737
diff --git a/llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll b/llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll
index a3ccf6e2d63..a5fdf3ba226 100644
--- a/llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll
+++ b/llvm/test/CodeGen/ARM/2007-01-31-RegInfoAssert.ll
@@ -7,7 +7,7 @@ define void @f1() {
%D = alloca %struct.rtx_def, align 1
%tmp1 = bitcast %struct.rtx_def* %D to i32*
%tmp7 = load i32* %tmp1
- %tmp14 = lshr i32 %tmp7, i8 1
+ %tmp14 = lshr i32 %tmp7, 1
%tmp1415 = and i32 %tmp14, 1
call void (i32, ...)* @printf( i32 undef, i32 0, i32 %tmp1415 )
ret void
diff --git a/llvm/test/CodeGen/ARM/bits.ll b/llvm/test/CodeGen/ARM/bits.ll
index c5052e5bacb..7a0a08c301f 100644
--- a/llvm/test/CodeGen/ARM/bits.ll
+++ b/llvm/test/CodeGen/ARM/bits.ll
@@ -1,36 +1,36 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep and | wc -l | grep 1 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep orr | wc -l | grep 1 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep eor | wc -l | grep 1 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep mov.*lsl | wc -l | grep 1 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=arm | grep mov.*asr | wc -l | grep 1
+; RUN: llvm-as < %s | llc -march=arm &&
+; RUN: llvm-as < %s | llc -march=arm | grep and | wc -l | grep 1 &&
+; RUN: llvm-as < %s | llc -march=arm | grep orr | wc -l | grep 1 &&
+; RUN: llvm-as < %s | llc -march=arm | grep eor | wc -l | grep 1 &&
+; RUN: llvm-as < %s | llc -march=arm | grep mov.*lsl | wc -l | grep 1 &&
+; RUN: llvm-as < %s | llc -march=arm | grep mov.*asr | wc -l | grep 1
-int %f1(int %a, int %b) {
+define i32 @f1(i32 %a, i32 %b) {
entry:
- %tmp2 = and int %b, %a ; <int> [#uses=1]
- ret int %tmp2
+ %tmp2 = and i32 %b, %a ; <i32> [#uses=1]
+ ret i32 %tmp2
}
-int %f2(int %a, int %b) {
+define i32 @f2(i32 %a, i32 %b) {
entry:
- %tmp2 = or int %b, %a ; <int> [#uses=1]
- ret int %tmp2
+ %tmp2 = or i32 %b, %a ; <i32> [#uses=1]
+ ret i32 %tmp2
}
-int %f3(int %a, int %b) {
+define i32 @f3(i32 %a, i32 %b) {
entry:
- %tmp2 = xor int %b, %a ; <int> [#uses=1]
- ret int %tmp2
+ %tmp2 = xor i32 %b, %a ; <i32> [#uses=1]
+ ret i32 %tmp2
}
-int %f4(int %a, ubyte %b) {
+define i32 @f4(i32 %a, i32 %b) {
entry:
- %tmp3 = shl int %a, ubyte %b ; <int> [#uses=1]
- ret int %tmp3
+ %tmp3 = shl i32 %a, %b ; <i32> [#uses=1]
+ ret i32 %tmp3
}
-int %f5(int %a, ubyte %b) {
+define i32 @f5(i32 %a, i32 %b) {
entry:
- %tmp3 = shr int %a, ubyte %b ; <int> [#uses=1]
- ret int %tmp3
+ %tmp3 = ashr i32 %a, %b ; <i32> [#uses=1]
+ ret i32 %tmp3
}
diff --git a/llvm/test/CodeGen/ARM/long_shift.ll b/llvm/test/CodeGen/ARM/long_shift.ll
index 09b3d1e1647..515cb178a60 100644
--- a/llvm/test/CodeGen/ARM/long_shift.ll
+++ b/llvm/test/CodeGen/ARM/long_shift.ll
@@ -5,27 +5,27 @@
; RUN: llvm-as < %s | llc -march=arm | grep __lshrdi3 &&
; RUN: llvm-as < %s | llc -march=arm -enable-thumb
-define i64 @f00(i64 %A, i64 %B) {
+define i64 @f0(i64 %A, i64 %B) {
%tmp = bitcast i64 %A to i64
- %tmp2 = lshr i64 %B, i8 1
+ %tmp2 = lshr i64 %B, 1
%tmp3 = sub i64 %tmp, %tmp2
ret i64 %tmp3
}
-define i32 @f1(i64 %x, i8 %y) {
- %a = shl i64 %x, i8 %y
+define i32 @f1(i64 %x, i64 %y) {
+ %a = shl i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
}
-define i32 @f2(i64 %x, i8 %y) {
- %a = ashr i64 %x, i8 %y
+define i32 @f2(i64 %x, i64 %y) {
+ %a = ashr i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
}
-define i32 @f3(i64 %x, i8 %y) {
- %a = lshr i64 %x, i8 %y
+define i32 @f3(i64 %x, i64 %y) {
+ %a = lshr i64 %x, %y
%b = trunc i64 %a to i32
ret i32 %b
}
diff --git a/llvm/test/CodeGen/ARM/sxt_rot.ll b/llvm/test/CodeGen/ARM/sxt_rot.ll
index 3f1483b433a..f865ac131ae 100644
--- a/llvm/test/CodeGen/ARM/sxt_rot.ll
+++ b/llvm/test/CodeGen/ARM/sxt_rot.ll
@@ -4,16 +4,16 @@
; RUN: llvm-as < %s | llc -march=arm -mattr=+v6 | grep "sxtab" | wc -l | grep 1
define i8 @test1(i32 %A) sext {
- %B = lshr i32 %A, i8 8
- %C = shl i32 %A, i8 24
+ %B = lshr i32 %A, 8
+ %C = shl i32 %A, 24
%D = or i32 %B, %C
%E = trunc i32 %D to i8
ret i8 %E
}
define i32 @test2(i32 %A, i32 %X) sext {
- %B = lshr i32 %A, i8 8
- %C = shl i32 %A, i8 24
+ %B = lshr i32 %A, 8
+ %C = shl i32 %A, 24
%D = or i32 %B, %C
%E = trunc i32 %D to i8
%F = sext i8 %E to i32
diff --git a/llvm/test/CodeGen/ARM/uxt_rot.ll b/llvm/test/CodeGen/ARM/uxt_rot.ll
index 0c7516ff716..d03ca736d4e 100644
--- a/llvm/test/CodeGen/ARM/uxt_rot.ll
+++ b/llvm/test/CodeGen/ARM/uxt_rot.ll
@@ -17,8 +17,8 @@ define i32 @test2(i32 %A.u, i32 %B.u) zext {
}
define i32 @test3(i32 %A.u) zext {
- %B.u = lshr i32 %A.u, i8 8
- %C.u = shl i32 %A.u, i8 24
+ %B.u = lshr i32 %A.u, 8
+ %C.u = shl i32 %A.u, 24
%D.u = or i32 %B.u, %C.u
%E.u = trunc i32 %D.u to i16
%F.u = zext i16 %E.u to i32
diff --git a/llvm/test/CodeGen/Alpha/add.ll b/llvm/test/CodeGen/Alpha/add.ll
index 7d1bd0c3a48..7fbfd1235d8 100644
--- a/llvm/test/CodeGen/Alpha/add.ll
+++ b/llvm/test/CodeGen/Alpha/add.ll
@@ -69,112 +69,112 @@ entry:
define i32 @a4l(i32 sext %x.s, i32 sext %y.s) sext {
entry:
- %tmp.1.s = shl i32 %y.s, i8 2 ; <i32> [#uses=1]
+ %tmp.1.s = shl i32 %y.s, 2 ; <i32> [#uses=1]
%tmp.3.s = add i32 %tmp.1.s, %x.s ; <i32> [#uses=1]
ret i32 %tmp.3.s
}
define i32 @a8l(i32 sext %x.s, i32 sext %y.s) sext {
entry:
- %tmp.1.s = shl i32 %y.s, i8 3 ; <i32> [#uses=1]
+ %tmp.1.s = shl i32 %y.s, 3 ; <i32> [#uses=1]
%tmp.3.s = add i32 %tmp.1.s, %x.s ; <i32> [#uses=1]
ret i32 %tmp.3.s
}
define i64 @a4q(i64 %x.s, i64 %y.s) {
entry:
- %tmp.1.s = shl i64 %y.s, i8 2 ; <i64> [#uses=1]
+ %tmp.1.s = shl i64 %y.s, 2 ; <i64> [#uses=1]
%tmp.3.s = add i64 %tmp.1.s, %x.s ; <i64> [#uses=1]
ret i64 %tmp.3.s
}
define i64 @a8q(i64 %x.s, i64 %y.s) {
entry:
- %tmp.1.s = shl i64 %y.s, i8 3 ; <i64> [#uses=1]
+ %tmp.1.s = shl i64 %y.s, 3 ; <i64> [#uses=1]
%tmp.3.s = add i64 %tmp.1.s, %x.s ; <i64> [#uses=1]
ret i64 %tmp.3.s
}
define i32 @a4li(i32 sext %y.s) sext {
entry:
- %tmp.1.s = shl i32 %y.s, i8 2 ; <i32> [#uses=1]
+ %tmp.1.s = shl i32 %y.s, 2 ; <i32> [#uses=1]
%tmp.3.s = add i32 100, %tmp.1.s ; <i32> [#uses=1]
ret i32 %tmp.3.s
}
define i32 @a8li(i32 sext %y.s) sext {
entry:
- %tmp.1.s = shl i32 %y.s, i8 3 ; <i32> [#uses=1]
+ %tmp.1.s = shl i32 %y.s, 3 ; <i32> [#uses=1]
%tmp.3.s = add i32 100, %tmp.1.s ; <i32> [#uses=1]
ret i32 %tmp.3.s
}
define i64 @a4qi(i64 %y.s) {
entry:
- %tmp.1.s = shl i64 %y.s, i8 2 ; <i64> [#uses=1]
+ %tmp.1.s = shl i64 %y.s, 2 ; <i64> [#uses=1]
%tmp.3.s = add i64 100, %tmp.1.s ; <i64> [#uses=1]
ret i64 %tmp.3.s
}
define i64 @a8qi(i64 %y.s) {
entry:
- %tmp.1.s = shl i64 %y.s, i8 3 ; <i64> [#uses=1]
+ %tmp.1.s = shl i64 %y.s, 3 ; <i64> [#uses=1]
%tmp.3.s = add i64 100, %tmp.1.s ; <i64> [#uses=1]
ret i64 %tmp.3.s
}
define i32 @s4l(i32 sext %x.s, i32 sext %y.s) sext {
entry:
- %tmp.1.s = shl i32 %y.s, i8 2 ; <i32> [#uses=1]
+ %tmp.1.s = shl i32 %y.s, 2 ; <i32> [#uses=1]
%tmp.3.s = sub i32 %tmp.1.s, %x.s ; <i32> [#uses=1]
ret i32 %tmp.3.s
}
define i32 @s8l(i32 sext %x.s, i32 sext %y.s) sext {
entry:
- %tmp.1.s = shl i32 %y.s, i8 3 ; <i32> [#uses=1]
+ %tmp.1.s = shl i32 %y.s, 3 ; <i32> [#uses=1]
%tmp.3.s = sub i32 %tmp.1.s, %x.s ; <i32> [#uses=1]
ret i32 %tmp.3.s
}
define i64 @s4q(i64 %x.s, i64 %y.s) {
entry:
- %tmp.1.s = shl i64 %y.s, i8 2 ; <i64> [#uses=1]
+ %tmp.1.s = shl i64 %y.s, 2 ; <i64> [#uses=1]
%tmp.3.s = sub i64 %tmp.1.s, %x.s ; <i64> [#uses=1]
ret i64 %tmp.3.s
}
define i64 @s8q(i64 %x.s, i64 %y.s) {
entry:
- %tmp.1.s = shl i64 %y.s, i8 3 ; <i64> [#uses=1]
+ %tmp.1.s = shl i64 %y.s, 3 ; <i64> [#uses=1]
%tmp.3.s = sub i64 %tmp.1.s, %x.s ; <i64> [#uses=1]
ret i64 %tmp.3.s
}
define i32 @s4li(i32 sext %y.s) sext {
entry:
- %tmp.1.s = shl i32 %y.s, i8 2 ; <i32> [#uses=1]
+ %tmp.1.s = shl i32 %y.s, 2 ; <i32> [#uses=1]
%tmp.3.s = sub i32 %tmp.1.s, 100 ; <i32> [#uses=1]
ret i32 %tmp.3.s
}
define i32 @s8li(i32 sext %y.s) sext {
entry:
- %tmp.1.s = shl i32 %y.s, i8 3 ; <i32> [#uses=1]
+ %tmp.1.s = shl i32 %y.s, 3 ; <i32> [#uses=1]
%tmp.3.s = sub i32 %tmp.1.s, 100 ; <i32> [#uses=1]
ret i32 %tmp.3.s
}
define i64 @s4qi(i64 %y.s) {
entry:
- %tmp.1.s = shl i64 %y.s, i8 2 ; <i64> [#uses=1]
+ %tmp.1.s = shl i64 %y.s, 2 ; <i64> [#uses=1]
%tmp.3.s = sub i64 %tmp.1.s, 100 ; <i64> [#uses=1]
ret i64 %tmp.3.s
}
define i64 @s8qi(i64 %y.s) {
entry:
- %tmp.1.s = shl i64 %y.s, i8 3 ; <i64> [#uses=1]
+ %tmp.1.s = shl i64 %y.s, 3 ; <i64> [#uses=1]
%tmp.3.s = sub i64 %tmp.1.s, 100 ; <i64> [#uses=1]
ret i64 %tmp.3.s
}
diff --git a/llvm/test/CodeGen/PowerPC/and-elim.ll b/llvm/test/CodeGen/PowerPC/and-elim.ll
index b7fe9d2fb26..958f1552513 100644
--- a/llvm/test/CodeGen/PowerPC/and-elim.ll
+++ b/llvm/test/CodeGen/PowerPC/and-elim.ll
@@ -3,7 +3,7 @@
define void @test(i8* %P) {
%W = load i8* %P
- %X = shl i8 %W, i8 1
+ %X = shl i8 %W, 1
%Y = add i8 %X, 2
%Z = and i8 %Y, 254 ; dead and
store i8 %Z, i8* %P
@@ -12,7 +12,7 @@ define void @test(i8* %P) {
define i16 @test2(i16 zext %crc) zext {
; No and's should be needed for the i16s here.
- %tmp.1 = lshr i16 %crc, i8 1
+ %tmp.1 = lshr i16 %crc, 1
%tmp.7 = xor i16 %tmp.1, 40961
ret i16 %tmp.7
}
diff --git a/llvm/test/CodeGen/PowerPC/and_sext.ll b/llvm/test/CodeGen/PowerPC/and_sext.ll
index 4c3fd14cbd1..84037e75907 100644
--- a/llvm/test/CodeGen/PowerPC/and_sext.ll
+++ b/llvm/test/CodeGen/PowerPC/and_sext.ll
@@ -14,7 +14,7 @@ define i16 @test2(i16 sext %X, i16 sext %x) sext {
%tmp = sext i16 %X to i32
%tmp1 = sext i16 %x to i32
%tmp2 = add i32 %tmp, %tmp1
- %tmp4 = ashr i32 %tmp2, i8 1
+ %tmp4 = ashr i32 %tmp2, 1
%tmp5 = trunc i32 %tmp4 to i16
%tmp45 = sext i16 %tmp5 to i32
%retval = trunc i32 %tmp45 to i16
@@ -22,7 +22,7 @@ define i16 @test2(i16 sext %X, i16 sext %x) sext {
}
define i16 @test3(i32 zext %X) sext {
- %tmp1 = lshr i32 %X, i8 16
+ %tmp1 = lshr i32 %X, 16
%tmp2 = trunc i32 %tmp1 to i16
ret i16 %tmp2
}
diff --git a/llvm/test/CodeGen/PowerPC/rlwinm2.ll b/llvm/test/CodeGen/PowerPC/rlwinm2.ll
index 70ad636e3bd..e55da87591a 100644
--- a/llvm/test/CodeGen/PowerPC/rlwinm2.ll
+++ b/llvm/test/CodeGen/PowerPC/rlwinm2.ll
@@ -1,30 +1,27 @@
; All of these ands and shifts should be folded into rlw[i]nm instructions
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep and &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep srawi &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep srwi &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep slwi &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | grep rlwnm | wc -l | grep 1 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | grep rlwinm | wc -l | grep 1
+; RUN: llvm-as < %s | llc -march=ppc32 | not grep and &&
+; RUN: llvm-as < %s | llc -march=ppc32 | not grep srawi &&
+; RUN: llvm-as < %s | llc -march=ppc32 | not grep srwi &&
+; RUN: llvm-as < %s | llc -march=ppc32 | not grep slwi &&
+; RUN: llvm-as < %s | llc -march=ppc32 | grep rlwnm | wc -l | grep 1 &&
+; RUN: llvm-as < %s | llc -march=ppc32 | grep rlwinm | wc -l | grep 1
-
-implementation ; Functions:
-
-uint %test1(uint %X, int %Y) {
+define i32 @test1(i32 %X, i32 %Y) {
entry:
- %tmp = cast int %Y to ubyte ; <ubyte> [#uses=2]
- %tmp1 = shl uint %X, ubyte %tmp ; <uint> [#uses=1]
- %tmp2 = sub ubyte 32, %tmp ; <ubyte> [#uses=1]
- %tmp3 = shr uint %X, ubyte %tmp2 ; <uint> [#uses=1]
- %tmp4 = or uint %tmp1, %tmp3 ; <uint> [#uses=1]
- %tmp6 = and uint %tmp4, 127 ; <uint> [#uses=1]
- ret uint %tmp6
+ %tmp = trunc i32 %Y to i8 ; <i8> [#uses=2]
+ %tmp1 = shl i32 %X, %Y ; <i32> [#uses=1]
+ %tmp2 = sub i32 32, %Y ; <i8> [#uses=1]
+ %tmp3 = lshr i32 %X, %tmp2 ; <i32> [#uses=1]
+ %tmp4 = or i32 %tmp1, %tmp3 ; <i32> [#uses=1]
+ %tmp6 = and i32 %tmp4, 127 ; <i32> [#uses=1]
+ ret i32 %tmp6
}
-uint %test2(uint %X) {
+define i32 @test2(i32 %X) {
entry:
- %tmp1 = shr uint %X, ubyte 27 ; <uint> [#uses=1]
- %tmp2 = shl uint %X, ubyte 5 ; <uint> [#uses=1]
- %tmp2.masked = and uint %tmp2, 96 ; <uint> [#uses=1]
- %tmp5 = or uint %tmp1, %tmp2.masked ; <uint> [#uses=1]
- ret uint %tmp5
+ %tmp1 = lshr i32 %X, 27 ; <i32> [#uses=1]
+ %tmp2 = shl i32 %X, 5 ; <i32> [#uses=1]
+ %tmp2.masked = and i32 %tmp2, 96 ; <i32> [#uses=1]
+ %tmp5 = or i32 %tmp1, %tmp2.masked ; <i32> [#uses=1]
+ ret i32 %tmp5
}
diff --git a/llvm/test/CodeGen/PowerPC/rotl.ll b/llvm/test/CodeGen/PowerPC/rotl.ll
index aeb59aab556..a000ec0b318 100644
--- a/llvm/test/CodeGen/PowerPC/rotl.ll
+++ b/llvm/test/CodeGen/PowerPC/rotl.ll
@@ -1,53 +1,38 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | not grep or &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | \
-; RUN: grep rlwnm | wc -l | grep 2 &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 | \
-; RUN: grep rlwinm | wc -l | grep 2
+; RUN: llvm-as < %s | llc -march=ppc32 | not grep or &&
+; RUN: llvm-as < %s | llc -march=ppc32 | grep rlwnm | wc -l | grep 2 &&
+; RUN: llvm-as < %s | llc -march=ppc32 | grep rlwinm | wc -l | grep 2
-implementation ; Functions:
-
-int %rotlw(uint %x, int %sh) {
+define i32 @rotlw(i32 %x, i32 %sh) {
entry:
- %tmp.3 = cast int %sh to ubyte ; <ubyte> [#uses=1]
- %x = cast uint %x to int ; <int> [#uses=1]
- %tmp.7 = sub int 32, %sh ; <int> [#uses=1]
- %tmp.9 = cast int %tmp.7 to ubyte ; <ubyte> [#uses=1]
- %tmp.10 = shr uint %x, ubyte %tmp.9 ; <uint> [#uses=1]
- %tmp.4 = shl int %x, ubyte %tmp.3 ; <int> [#uses=1]
- %tmp.10 = cast uint %tmp.10 to int ; <int> [#uses=1]
- %tmp.12 = or int %tmp.10, %tmp.4 ; <int> [#uses=1]
- ret int %tmp.12
+ %tmp.7 = sub i32 32, %sh ; <i32> [#uses=1]
+ %tmp.10 = lshr i32 %x, %tmp.7 ; <i32> [#uses=2]
+ %tmp.4 = shl i32 %x, %sh ; <i32> [#uses=1]
+ %tmp.12 = or i32 %tmp.10, %tmp.4 ; <i32> [#uses=1]
+ ret i32 %tmp.12
}
-int %rotrw(uint %x, int %sh) {
+define i32 @rotrw(i32 %x, i32 %sh) {
entry:
- %tmp.3 = cast int %sh to ubyte ; <ubyte> [#uses=1]
- %tmp.4 = shr uint %x, ubyte %tmp.3 ; <uint> [#uses=1]
- %tmp.7 = sub int 32, %sh ; <int> [#uses=1]
- %tmp.9 = cast int %tmp.7 to ubyte ; <ubyte> [#uses=1]
- %x = cast uint %x to int ; <int> [#uses=1]
- %tmp.4 = cast uint %tmp.4 to int ; <int> [#uses=1]
- %tmp.10 = shl int %x, ubyte %tmp.9 ; <int> [#uses=1]
- %tmp.12 = or int %tmp.4, %tmp.10 ; <int> [#uses=1]
- ret int %tmp.12
+ %tmp.3 = trunc i32 %sh to i8 ; <i8> [#uses=1]
+ %tmp.4 = lshr i32 %x, %sh ; <i32> [#uses=2]
+ %tmp.7 = sub i32 32, %sh ; <i32> [#uses=1]
+ %tmp.10 = shl i32 %x, %tmp.7 ; <i32> [#uses=1]
+ %tmp.12 = or i32 %tmp.4, %tmp.10 ; <i32> [#uses=1]
+ ret i32 %tmp.12
}
-int %rotlwi(uint %x) {
+define i32 @rotlwi(i32 %x) {
entry:
- %x = cast uint %x to int ; <int> [#uses=1]
- %tmp.7 = shr uint %x, ubyte 27 ; <uint> [#uses=1]
- %tmp.3 = shl int %x, ubyte 5 ; <int> [#uses=1]
- %tmp.7 = cast uint %tmp.7 to int ; <int> [#uses=1]
- %tmp.9 = or int %tmp.3, %tmp.7 ; <int> [#uses=1]
- ret int %tmp.9
+ %tmp.7 = lshr i32 %x, 27 ; <i32> [#uses=2]
+ %tmp.3 = shl i32 %x, 5 ; <i32> [#uses=1]
+ %tmp.9 = or i32 %tmp.3, %tmp.7 ; <i32> [#uses=1]
+ ret i32 %tmp.9
}
-int %rotrwi(uint %x) {
+define i32 @rotrwi(i32 %x) {
entry:
- %tmp.3 = shr uint %x, ubyte 5 ; <uint> [#uses=1]
- %x = cast uint %x to int ; <int> [#uses=1]
- %tmp.3 = cast uint %tmp.3 to int ; <int> [#uses=1]
- %tmp.7 = shl int %x, ubyte 27 ; <int> [#uses=1]
- %tmp.9 = or int %tmp.3, %tmp.7 ; <int> [#uses=1]
- ret int %tmp.9
+ %tmp.3 = lshr i32 %x, 5 ; <i32> [#uses=2]
+ %tmp.7 = shl i32 %x, 27 ; <i32> [#uses=1]
+ %tmp.9 = or i32 %tmp.3, %tmp.7 ; <i32> [#uses=1]
+ ret i32 %tmp.9
}
diff --git a/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll b/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
index 54c249c8bef..8825e346ef2 100644
--- a/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
+++ b/llvm/test/CodeGen/X86/2007-01-13-StackPtrIndex.ll
@@ -29,19 +29,19 @@ b:
%r22 = select i1 %r20, i64 1, i64 %r19h
%r23 = mul i64 %r22, 0
%r23a = trunc i64 %r23 to i32
- %r24 = shl i32 %r23a, i8 0
+ %r24 = shl i32 %r23a, 0
%r25 = add i32 %r24, 0
%ras2 = alloca i8, i32 %r25, align 16
%r28 = getelementptr i8* %ras2, i32 0
- %r38 = shl i64 %r12, i8 0
+ %r38 = shl i64 %r12, 0
%s2013 = add i64 %r38, 0
%c22012 = getelementptr i8* %ras2, i64 %s2013
- %r42 = shl i64 %r12, i8 0
+ %r42 = shl i64 %r12, 0
%s2011 = add i64 %r42, 16
%c22010 = getelementptr i8* %ras2, i64 %s2011
%r50 = add i64 %r16, 0
%r51 = icmp slt i64 %r50, 0
- %r50sh = shl i64 %r50, i8 0
+ %r50sh = shl i64 %r50, 0
%r50j = add i64 %r50sh, 0
%r54 = select i1 %r51, i64 0, i64 %r50j
%r56 = mul i64 %r54, %r12
@@ -69,7 +69,7 @@ a25b140q:
br label %a25b140
a25b:
%w1989 = phi i64 [ 0, %b63 ], [ %v1990, %a25b ]
- %e642 = shl i64 %w1989, i8 0
+ %e642 = shl i64 %w1989, 0
%r129 = add i64 %e642, 0
%r132 = add i64 %e642, 0
%r134 = icmp slt i64 %r132, 0
@@ -112,7 +112,7 @@ a30b294q:
br label %a30b294
a30b:
%w = phi i64 [ 0, %b179 ], [ %v, %a30b ]
- %b2 = shl i64 %w, i8 0
+ %b2 = shl i64 %w, 0
%r283 = add i64 %b2, 0
%r286 = add i64 %b2, 0
%r288 = icmp slt i64 %r286, 0
@@ -152,7 +152,7 @@ b377:
br i1 %r462, label %a35b465, label %b463
a35b:
%w1865 = phi i64 [ 0, %b341 ], [ %v1866, %a35b ]
- %e785 = shl i64 %w1865, i8 0
+ %e785 = shl i64 %w1865, 0
%b1877 = mul i64 %w1865, 0
%s795 = add i64 %b1877, 0
%r399 = add float %r354, 0.000000e+00
@@ -196,7 +196,7 @@ b565:
br i1 %r711, label %a45b714, label %b712
a45b:
%w1852 = phi i64 [ 0, %b535 ], [ %v1853, %a45b ]
- %e945 = shl i64 %w1852, i8 0
+ %e945 = shl i64 %w1852, 0
%r609 = add i64 %r562, 0
%r703 = add i64 %e945, 0
%r706 = add i64 %e945, 0
@@ -261,7 +261,7 @@ b858:
%w1891 = phi i64 [ 0, %b820 ], [ %v1892, %b1016 ]
%s1193 = phi i64 [ 0, %b820 ], [ %r1068, %b1016 ]
%b1894 = mul i64 %r834, 0
- %b1896 = shl i64 %r823, i8 0
+ %b1896 = shl i64 %r823, 0
%b1902 = mul i64 %w1891, 0
%s1173 = add i64 %b1902, 0
%r859 = add i64 %r856, 0
@@ -285,7 +285,7 @@ a53b1019q:
br label %a53b1019
a53b:
%w1881 = phi i64 [ 0, %b858 ], [ %v1882, %a53b ]
- %e1205 = shl i64 %w1881, i8 0
+ %e1205 = shl i64 %w1881, 0
%r1007 = add i64 %e1205, 0
%r1010 = add i64 %e1205, 0
%r1012 = icmp slt i64 %r1010, 0
@@ -365,7 +365,7 @@ a63b:
%b1907 = mul i64 %r1101, 0
%b1929 = mul i64 %w1904, 0
%s1395 = add i64 %b1929, 0
- %e1365 = shl i64 %w1904, i8 0
+ %e1365 = shl i64 %w1904, 0
%r1163 = add i64 %r1090, 0
%r1167 = add i64 %s1375, 0
%r1191 = add i64 %r1163, 0
diff --git a/llvm/test/CodeGen/X86/trunc-to-bool.ll b/llvm/test/CodeGen/X86/trunc-to-bool.ll
index 8486bbd4286..667fc2f3d3c 100644
--- a/llvm/test/CodeGen/X86/trunc-to-bool.ll
+++ b/llvm/test/CodeGen/X86/trunc-to-bool.ll
@@ -12,8 +12,7 @@ define i1 @test1(i32 %X) zext {
define i1 @test2(i32 %val, i32 %mask) {
entry:
- %maski8 = trunc i32 %mask to i8
- %shifted = ashr i32 %val, i8 %maski8
+ %shifted = ashr i32 %val, %mask
%anded = and i32 %shifted, 1
%trunced = trunc i32 %anded to i1
br i1 %trunced, label %ret_true, label %ret_false
diff --git a/llvm/test/Integer/BitBit.ll b/llvm/test/Integer/BitBit.ll
index 2b01c447d9e..7c17326e198 100644
--- a/llvm/test/Integer/BitBit.ll
+++ b/llvm/test/Integer/BitBit.ll
@@ -14,11 +14,12 @@ begin
%t3 = sext i31 %i to i33
%t4 = or i33 %t3, %j
%t5 = xor i31 %t2, 7
- %t6 = shl i31 %i, i8 2
+ %t6 = shl i31 %i, 2
%t7 = trunc i31 %i to i8
- %t8 = shl i8 %t7, i8 3
- %t9 = lshr i33 %j, i8 31
- %t10 = ashr i33 %j, i8 %t7
+ %t8 = shl i8 %t7, 3
+ %t9 = lshr i33 %j, 31
+ %t7z = zext i8 %t7 to i33
+ %t10 = ashr i33 %j, %t7z
ret void
end
diff --git a/llvm/test/Integer/a1.ll b/llvm/test/Integer/a1.ll
index 1e7934402e6..cfd69f06aaa 100644
--- a/llvm/test/Integer/a1.ll
+++ b/llvm/test/Integer/a1.ll
@@ -10,10 +10,10 @@
@f = constant i1 sub(i1 1 , i1 -1)
@g = constant i1 sub(i1 1 , i1 1)
-@h = constant i1 shl(i1 1 , i8 1)
-@i = constant i1 shl(i1 1 , i8 0)
-@j = constant i1 lshr(i1 1, i8 1)
-@m = constant i1 ashr(i1 1, i8 1)
+@h = constant i1 shl(i1 1 , i1 1)
+@i = constant i1 shl(i1 1 , i1 0)
+@j = constant i1 lshr(i1 1, i1 1)
+@m = constant i1 ashr(i1 1, i1 1)
@n = constant i1 mul(i1 -1, i1 1)
@o = constant i1 sdiv(i1 -1, i1 1)
diff --git a/llvm/test/Integer/a15.ll b/llvm/test/Integer/a15.ll
index d3cc319b427..4e4908cbbbf 100644
--- a/llvm/test/Integer/a15.ll
+++ b/llvm/test/Integer/a15.ll
@@ -10,12 +10,12 @@
@f = constant i15 sub(i15 0 , i15 32767)
@g = constant i15 sub(i15 2 , i15 32767)
-@h = constant i15 shl(i15 1 , i8 15)
-@i = constant i15 shl(i15 1 , i8 14)
-@j = constant i15 lshr(i15 32767 , i8 14)
-@k = constant i15 lshr(i15 32767 , i8 15)
-@l = constant i15 ashr(i15 32767 , i8 14)
-@m = constant i15 ashr(i15 32767 , i8 15)
+@h = constant i15 shl(i15 1 , i15 15)
+@i = constant i15 shl(i15 1 , i15 14)
+@j = constant i15 lshr(i15 32767 , i15 14)
+@k = constant i15 lshr(i15 32767 , i15 15)
+@l = constant i15 ashr(i15 32767 , i15 14)
+@m = constant i15 ashr(i15 32767 , i15 15)
@n = constant i15 mul(i15 32767, i15 2)
@q = constant i15 mul(i15 -16383,i15 -3)
diff --git a/llvm/test/Integer/a17.ll b/llvm/test/Integer/a17.ll
index 82760786cd2..a83b2000d86 100644
--- a/llvm/test/Integer/a17.ll
+++ b/llvm/test/Integer/a17.ll
@@ -10,12 +10,12 @@
@f = constant i17 sub(i17 0 , i17 131071)
@g = constant i17 sub(i17 2 , i17 131071)
-@h = constant i17 shl(i17 1 , i8 17)
-@i = constant i17 shl(i17 1 , i8 16)
-@j = constant i17 lshr(i17 131071 , i8 16)
-@k = constant i17 lshr(i17 131071 , i8 17)
-@l = constant i17 ashr(i17 131071 , i8 16)
-@m = constant i17 ashr(i17 131071 , i8 17)
+@h = constant i17 shl(i17 1 , i17 17)
+@i = constant i17 shl(i17 1 , i17 16)
+@j = constant i17 lshr(i17 131071 , i17 16)
+@k = constant i17 lshr(i17 131071 , i17 17)
+@l = constant i17 ashr(i17 131071 , i17 16)
+@m = constant i17 ashr(i17 131071 , i17 17)
@n = constant i17 mul(i17 131071, i17 2)
@q = constant i17 sdiv(i17 -1, i17 65535)
diff --git a/llvm/test/Integer/a31.ll b/llvm/test/Integer/a31.ll
index fa3774b4fb8..57c6191bdd5 100644
--- a/llvm/test/Integer/a31.ll
+++ b/llvm/test/Integer/a31.ll
@@ -10,12 +10,12 @@
@f = constant i31 sub(i31 0 , i31 2147483647)
@g = constant i31 sub(i31 2 , i31 2147483647)
-@h = constant i31 shl(i31 1 , i8 31)
-@i = constant i31 shl(i31 1 , i8 30)
-@j = constant i31 lshr(i31 2147483647 , i8 30)
-@k = constant i31 lshr(i31 2147483647 , i8 31)
-@l = constant i31 ashr(i31 2147483647 , i8 30)
-@m = constant i31 ashr(i31 2147483647 , i8 31)
+@h = constant i31 shl(i31 1 , i31 31)
+@i = constant i31 shl(i31 1 , i31 30)
+@j = constant i31 lshr(i31 2147483647 , i31 30)
+@k = constant i31 lshr(i31 2147483647 , i31 31)
+@l = constant i31 ashr(i31 2147483647 , i31 30)
+@m = constant i31 ashr(i31 2147483647 , i31 31)
@n = constant i31 mul(i31 2147483647, i31 2)
@q = constant i31 sdiv(i31 -1, i31 1073741823)
diff --git a/llvm/test/Integer/a33.ll b/llvm/test/Integer/a33.ll
index cf14274acf4..8ef992b1f4a 100644
--- a/llvm/test/Integer/a33.ll
+++ b/llvm/test/Integer/a33.ll
@@ -10,12 +10,12 @@
@f = constant i33 sub(i33 0 , i33 8589934591)
@g = constant i33 sub(i33 2 , i33 8589934591)
-@h = constant i33 shl(i33 1 , i8 33)
-@i = constant i33 shl(i33 1 , i8 32)
-@j = constant i33 lshr(i33 8589934591 , i8 32)
-@k = constant i33 lshr(i33 8589934591 , i8 33)
-@l = constant i33 ashr(i33 8589934591 , i8 32)
-@m = constant i33 ashr(i33 8589934591 , i8 33)
+@h = constant i33 shl(i33 1 , i33 33)
+@i = constant i33 shl(i33 1 , i33 32)
+@j = constant i33 lshr(i33 8589934591 , i33 32)
+@k = constant i33 lshr(i33 8589934591 , i33 33)
+@l = constant i33 ashr(i33 8589934591 , i33 32)
+@m = constant i33 ashr(i33 8589934591 , i33 33)
@n = constant i33 mul(i33 8589934591, i33 2)
@q = constant i33 sdiv(i33 -1, i33 4294967295)
diff --git a/llvm/test/Integer/a63.ll b/llvm/test/Integer/a63.ll
index ec348ff204b..6dadaf79e0b 100644
--- a/llvm/test/Integer/a63.ll
+++ b/llvm/test/Integer/a63.ll
@@ -10,12 +10,12 @@
@f = constant i63 sub(i63 0 , i63 9223372036854775807)
@g = constant i63 sub(i63 2 , i63 9223372036854775807)
-@h = constant i63 shl(i63 1 , i8 63)
-@i = constant i63 shl(i63 1 , i8 62)
-@j = constant i63 lshr(i63 9223372036854775807 , i8 62)
-@k = constant i63 lshr(i63 9223372036854775807 , i8 63)
-@l = constant i63 ashr(i63 9223372036854775807 , i8 62)
-@m = constant i63 ashr(i63 9223372036854775807 , i8 63)
+@h = constant i63 shl(i63 1 , i63 63)
+@i = constant i63 shl(i63 1 , i63 62)
+@j = constant i63 lshr(i63 9223372036854775807 , i63 62)
+@k = constant i63 lshr(i63 9223372036854775807 , i63 63)
+@l = constant i63 ashr(i63 9223372036854775807 , i63 62)
+@m = constant i63 ashr(i63 9223372036854775807 , i63 63)
@n = constant i63 mul(i63 9223372036854775807, i63 2)
@q = constant i63 sdiv(i63 -1, i63 4611686018427387903)
diff --git a/llvm/test/Integer/a7.ll b/llvm/test/Integer/a7.ll
index 91904cc3ec2..294db188457 100644
--- a/llvm/test/Integer/a7.ll
+++ b/llvm/test/Integer/a7.ll
@@ -13,13 +13,13 @@
@r = constant i7 sub(i7 -3, i7 120)
@s = constant i7 sub(i7 -3, i7 -8)
-@h = constant i7 shl(i7 1 , i8 7)
-@i = constant i7 shl(i7 1 , i8 6)
-@j = constant i7 lshr(i7 127 , i8 6)
-@k = constant i7 lshr(i7 127 , i8 7)
-@l = constant i7 ashr(i7 127 , i8 6)
-@m = constant i7 ashr(i7 127 , i8 7)
-@m2= constant i7 ashr(i7 -1 , i8 3)
+@h = constant i7 shl(i7 1 , i7 7)
+@i = constant i7 shl(i7 1 , i7 6)
+@j = constant i7 lshr(i7 127 , i7 6)
+@k = constant i7 lshr(i7 127 , i7 7)
+@l = constant i7 ashr(i7 127 , i7 6)
+@m = constant i7 ashr(i7 127 , i7 7)
+@m2= constant i7 ashr(i7 -1 , i7 3)
@n = constant i7 mul(i7 127, i7 2)
@t = constant i7 mul(i7 -63, i7 -2)
diff --git a/llvm/test/Integer/a9.ll b/llvm/test/Integer/a9.ll
index 8e4cd9a8db3..990c4459fce 100644
--- a/llvm/test/Integer/a9.ll
+++ b/llvm/test/Integer/a9.ll
@@ -10,12 +10,12 @@
@f = constant i9 sub(i9 0 , i9 511)
@g = constant i9 sub(i9 2 , i9 511)
-@h = constant i9 shl(i9 1 , i8 9)
-@i = constant i9 shl(i9 1 , i8 8)
-@j = constant i9 lshr(i9 511 , i8 8)
-@k = constant i9 lshr(i9 511 , i8 9)
-@l = constant i9 ashr(i9 511 , i8 8)
-@m = constant i9 ashr(i9 511 , i8 9)
+@h = constant i9 shl(i9 1 , i9 9)
+@i = constant i9 shl(i9 1 , i9 8)
+@j = constant i9 lshr(i9 511 , i9 8)
+@k = constant i9 lshr(i9 511 , i9 9)
+@l = constant i9 ashr(i9 511 , i9 8)
+@m = constant i9 ashr(i9 511 , i9 9)
@n = constant i9 mul(i9 511, i9 2)
@q = constant i9 sdiv(i9 511, i9 2)
diff --git a/llvm/test/Integer/testarith_bt.ll b/llvm/test/Integer/testarith_bt.ll
index 2e0ec189822..f962e393d36 100644
--- a/llvm/test/Integer/testarith_bt.ll
+++ b/llvm/test/Integer/testarith_bt.ll
@@ -13,9 +13,9 @@ begin
%t5 = sdiv i31 %t1, %t2
%t6 = urem i31 %t1, %t2
%t7 = srem i31 %t1, %t2
- %t8 = shl i31 %t1, i8 9
- %t9 = lshr i31 %t1, i8 9
- %t10= ashr i31 %t1, i8 9
+ %t8 = shl i31 %t1, 9
+ %t9 = lshr i31 %t1, 9
+ %t10= ashr i31 %t1, 9
%f1 = sitofp i31 %t1 to float
%f2 = fdiv float 4.0, %f1
ret i31 %t3
diff --git a/llvm/test/Integer/testlogical_new_bt.ll b/llvm/test/Integer/testlogical_new_bt.ll
index 41f5d0cac32..349b4ab95ff 100644
--- a/llvm/test/Integer/testlogical_new_bt.ll
+++ b/llvm/test/Integer/testlogical_new_bt.ll
@@ -9,8 +9,8 @@ begin
%t1 = xor i31 %i0, %j0
%t2 = or i31 %i0, %j0
%t3 = and i31 %t1, %t2
- %t4 = shl i31 %i0, i8 2
- %t5 = ashr i31 %i0, i8 2
- %t6 = lshr i31 %j0, i8 22
+ %t4 = shl i31 %i0, 2
+ %t5 = ashr i31 %i0, 2
+ %t6 = lshr i31 %j0, 22
ret i31 %t3
end
diff --git a/llvm/test/Transforms/InstCombine/add.ll b/llvm/test/Transforms/InstCombine/add.ll
index aa12a99bdc0..4e5dbb38532 100644
--- a/llvm/test/Transforms/InstCombine/add.ll
+++ b/llvm/test/Transforms/InstCombine/add.ll
@@ -1,7 +1,8 @@
; This test makes sure that add instructions are properly eliminated.
; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output &&
-; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep -v OK | not grep add
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep -v OK | not grep add
implementation
@@ -46,7 +47,8 @@ int %test7(int %A) {
ret int %C
}
-int %test8(int %A, int %B) { ; (A & C1)+(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+; (A & C1)+(B & C2) -> (A & C1)|(B & C2) iff C1&C2 == 0
+int %test8(int %A, int %B) {
%A1 = and int %A, 7
%B1 = and int %B, 128
%C = add int %A1, %B1
diff --git a/llvm/test/Transforms/InstCombine/shift-simplify.ll b/llvm/test/Transforms/InstCombine/shift-simplify.ll
index ce19384d165..1049b0c01ab 100644
--- a/llvm/test/Transforms/InstCombine/shift-simplify.ll
+++ b/llvm/test/Transforms/InstCombine/shift-simplify.ll
@@ -1,22 +1,23 @@
-; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | egrep 'shl|lshr|ashr' | wc -l | grep 3
+; RUN: llvm-as < %s | opt -instcombine | llvm-dis | \
+; RUN: egrep 'shl|lshr|ashr' | wc -l | grep 3
-int %test0(int %A, int %B, ubyte %C) {
- %X = shl int %A, ubyte %C
- %Y = shl int %B, ubyte %C
- %Z = and int %X, %Y
- ret int %Z
+define i32 @test0(i32 %A, i32 %B, i32 %C) {
+ %X = shl i32 %A, %C
+ %Y = shl i32 %B, %C
+ %Z = and i32 %X, %Y
+ ret i32 %Z
}
-int %test1(int %A, int %B, ubyte %C) {
- %X = lshr int %A, ubyte %C
- %Y = lshr int %B, ubyte %C
- %Z = or int %X, %Y
- ret int %Z
+define i32 @test1(i32 %A, i32 %B, i32 %C) {
+ %X = lshr i32 %A, %C
+ %Y = lshr i32 %B, %C
+ %Z = or i32 %X, %Y
+ ret i32 %Z
}
-int %test2(int %A, int %B, ubyte %C) {
- %X = ashr int %A, ubyte %C
- %Y = ashr int %B, ubyte %C
- %Z = xor int %X, %Y
- ret int %Z
+define i32 @test2(i32 %A, i32 %B, i32 %C) {
+ %X = ashr i32 %A, %C
+ %Y = ashr i32 %B, %C
+ %Z = xor i32 %X, %Y
+ ret i32 %Z
}
diff --git a/llvm/test/Transforms/InstCombine/shift-sra.ll b/llvm/test/Transforms/InstCombine/shift-sra.ll
index a887d61196a..80a2ac9cae0 100644
--- a/llvm/test/Transforms/InstCombine/shift-sra.ll
+++ b/llvm/test/Transforms/InstCombine/shift-sra.ll
@@ -1,5 +1,6 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output &&
-; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | grep 'lshr i32' | wc -l | grep 2 &&
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: grep 'lshr i32' | wc -l | grep 2 &&
; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep ashr
int %test1(int %X, ubyte %A) {
diff --git a/llvm/test/Transforms/InstCombine/signext.ll b/llvm/test/Transforms/InstCombine/signext.ll
index 7dcb466ad64..88f7b02c196 100644
--- a/llvm/test/Transforms/InstCombine/signext.ll
+++ b/llvm/test/Transforms/InstCombine/signext.ll
@@ -1,5 +1,6 @@
; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine -disable-output &&
-; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | not grep '(and\|xor\|add\|shl\|shr)'
+; RUN: llvm-upgrade < %s | llvm-as | opt -instcombine | llvm-dis | \
+; RUN: not grep '(and\|xor\|add\|shl\|shr)'
int %test1(int %x) {
%tmp.1 = and int %x, 65535 ; <int> [#uses=1]
diff --git a/llvm/test/Transforms/Reassociate/shifttest.ll b/llvm/test/Transforms/Reassociate/shifttest.ll
index 0bfe0ec2df6..18c6e4e3205 100644
--- a/llvm/test/Transforms/Reassociate/shifttest.ll
+++ b/llvm/test/Transforms/Reassociate/shifttest.ll
@@ -1,6 +1,6 @@
; With shl->mul reassociation, we can see that this is (shl A, 9) * A
;
-; RUN: llvm-upgrade < %s | llvm-as | opt -reassociate -instcombine | llvm-dis | grep 'shl .*, i8 9'
+; RUN: llvm-upgrade < %s | llvm-as | opt -reassociate -instcombine | llvm-dis | grep 'shl .*, 9'
int %test(int %A, int %B) {
%X = shl int %A, ubyte 5
OpenPOWER on IntegriCloud