summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/LoopUnroll
diff options
context:
space:
mode:
authorDavid Blaikie <dblaikie@gmail.com>2015-02-27 21:17:42 +0000
committerDavid Blaikie <dblaikie@gmail.com>2015-02-27 21:17:42 +0000
commita79ac14fa68297f9888bc70a10df5ed9b8864e38 (patch)
tree8d8217a8928e3ee599bdde405e2e178b3a55b645 /llvm/test/Transforms/LoopUnroll
parent83687fb9e654c9d0086e7f6b728c26fa0b729e71 (diff)
downloadbcm5719-llvm-a79ac14fa68297f9888bc70a10df5ed9b8864e38.tar.gz
bcm5719-llvm-a79ac14fa68297f9888bc70a10df5ed9b8864e38.zip
[opaque pointer type] Add textual IR support for explicit type parameter to load instruction
Essentially the same as the GEP change in r230786. A similar migration script can be used to update test cases, though a few more test case improvements/changes were required this time around: (r229269-r229278) import fileinput import sys import re pat = re.compile(r"((?:=|:|^)\s*load (?:atomic )?(?:volatile )?(.*?))(| addrspace\(\d+\) *)\*($| *(?:%|@|null|undef|blockaddress|getelementptr|addrspacecast|bitcast|inttoptr|\[\[[a-zA-Z]|\{\{).*$)") for line in sys.stdin: sys.stdout.write(re.sub(pat, r"\1, \2\3*\4", line)) Reviewers: rafael, dexonsmith, grosser Differential Revision: http://reviews.llvm.org/D7649 llvm-svn: 230794
Diffstat (limited to 'llvm/test/Transforms/LoopUnroll')
-rw-r--r--llvm/test/Transforms/LoopUnroll/2011-08-08-PhiUpdate.ll4
-rw-r--r--llvm/test/Transforms/LoopUnroll/2011-08-09-IVSimplify.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/2011-10-01-NoopTrunc.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/PowerPC/a2-unrolling.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/X86/partial.ll12
-rw-r--r--llvm/test/Transforms/LoopUnroll/ephemeral.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll4
-rw-r--r--llvm/test/Transforms/LoopUnroll/runtime-loop.ll6
-rw-r--r--llvm/test/Transforms/LoopUnroll/runtime-loop1.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/runtime-loop2.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/runtime-loop3.ll2
-rw-r--r--llvm/test/Transforms/LoopUnroll/scevunroll.ll10
-rw-r--r--llvm/test/Transforms/LoopUnroll/shifted-tripcount.ll4
-rw-r--r--llvm/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll10
-rw-r--r--llvm/test/Transforms/LoopUnroll/unroll-pragmas.ll18
15 files changed, 41 insertions, 41 deletions
diff --git a/llvm/test/Transforms/LoopUnroll/2011-08-08-PhiUpdate.ll b/llvm/test/Transforms/LoopUnroll/2011-08-08-PhiUpdate.ll
index c5a7becdd3c..a87b16a28b7 100644
--- a/llvm/test/Transforms/LoopUnroll/2011-08-08-PhiUpdate.ll
+++ b/llvm/test/Transforms/LoopUnroll/2011-08-08-PhiUpdate.ll
@@ -47,7 +47,7 @@ bb: ; preds = %bb.nph, %bb1
%indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %bb1 ] ; <i64> [#uses=2]
%s.01 = phi i32 [ 0, %bb.nph ], [ %2, %bb1 ] ; <i32> [#uses=1]
%scevgep = getelementptr i32, i32* %p, i64 %indvar ; <i32*> [#uses=1]
- %1 = load i32* %scevgep, align 1 ; <i32> [#uses=1]
+ %1 = load i32, i32* %scevgep, align 1 ; <i32> [#uses=1]
%2 = add nsw i32 %1, %s.01 ; <i32> [#uses=2]
br label %bb1
@@ -84,7 +84,7 @@ do.body: ; preds = %do.cond, %if.end
br i1 %cond2, label %exit, label %do.cond
exit: ; preds = %do.body
- %tmp7.i = load i32* undef, align 8
+ %tmp7.i = load i32, i32* undef, align 8
br i1 undef, label %do.cond, label %land.lhs.true
land.lhs.true: ; preds = %exit
diff --git a/llvm/test/Transforms/LoopUnroll/2011-08-09-IVSimplify.ll b/llvm/test/Transforms/LoopUnroll/2011-08-09-IVSimplify.ll
index 2e85d0dd026..0b484095650 100644
--- a/llvm/test/Transforms/LoopUnroll/2011-08-09-IVSimplify.ll
+++ b/llvm/test/Transforms/LoopUnroll/2011-08-09-IVSimplify.ll
@@ -29,7 +29,7 @@ while.body:
%rem = and i32 %bit_addr.addr.01, 31
%shl = shl i32 1, %rem
%arrayidx = getelementptr inbounds i32, i32* %bitmap, i32 %shr
- %tmp6 = load i32* %arrayidx, align 4
+ %tmp6 = load i32, i32* %arrayidx, align 4
%xor = xor i32 %tmp6, %shl
store i32 %xor, i32* %arrayidx, align 4
%inc = add i32 %bit_addr.addr.01, 1
diff --git a/llvm/test/Transforms/LoopUnroll/2011-10-01-NoopTrunc.ll b/llvm/test/Transforms/LoopUnroll/2011-10-01-NoopTrunc.ll
index c77832d8976..5f9eec72ba5 100644
--- a/llvm/test/Transforms/LoopUnroll/2011-10-01-NoopTrunc.ll
+++ b/llvm/test/Transforms/LoopUnroll/2011-10-01-NoopTrunc.ll
@@ -22,7 +22,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.02 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i8, i8* %arr, i64 %indvars.iv
- %0 = load i8* %arrayidx, align 1
+ %0 = load i8, i8* %arrayidx, align 1
%conv = sext i8 %0 to i32
%add = add nsw i32 %conv, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
diff --git a/llvm/test/Transforms/LoopUnroll/PowerPC/a2-unrolling.ll b/llvm/test/Transforms/LoopUnroll/PowerPC/a2-unrolling.ll
index 23290236e22..e9aa1acd5fe 100644
--- a/llvm/test/Transforms/LoopUnroll/PowerPC/a2-unrolling.ll
+++ b/llvm/test/Transforms/LoopUnroll/PowerPC/a2-unrolling.ll
@@ -28,7 +28,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopUnroll/X86/partial.ll b/llvm/test/Transforms/LoopUnroll/X86/partial.ll
index bb8a04396f1..4566f792deb 100644
--- a/llvm/test/Transforms/LoopUnroll/X86/partial.ll
+++ b/llvm/test/Transforms/LoopUnroll/X86/partial.ll
@@ -11,11 +11,11 @@ vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%0 = getelementptr inbounds double, double* %b, i64 %index
%1 = bitcast double* %0 to <2 x double>*
- %wide.load = load <2 x double>* %1, align 8
+ %wide.load = load <2 x double>, <2 x double>* %1, align 8
%.sum9 = or i64 %index, 2
%2 = getelementptr double, double* %b, i64 %.sum9
%3 = bitcast double* %2 to <2 x double>*
- %wide.load8 = load <2 x double>* %3, align 8
+ %wide.load8 = load <2 x double>, <2 x double>* %3, align 8
%4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00>
%5 = fadd <2 x double> %wide.load8, <double 1.000000e+00, double 1.000000e+00>
%6 = getelementptr inbounds double, double* %a, i64 %index
@@ -47,7 +47,7 @@ vector.body: ; preds = %vector.body, %entry
%index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
%v0 = getelementptr inbounds double, double* %b, i64 %index
%v1 = bitcast double* %v0 to <2 x double>*
- %wide.load = load <2 x double>* %v1, align 8
+ %wide.load = load <2 x double>, <2 x double>* %v1, align 8
%v4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00>
%v5 = fmul <2 x double> %v4, <double 8.000000e+00, double 8.000000e+00>
%v6 = getelementptr inbounds double, double* %a, i64 %index
@@ -85,17 +85,17 @@ for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%reduction.026 = phi i16 [ %add14, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i16, i16* %arr, i64 %indvars.iv
- %0 = load i16* %arrayidx, align 2
+ %0 = load i16, i16* %arrayidx, align 2
%add = add i16 %0, %reduction.026
%sext = mul i64 %indvars.iv, 12884901888
%idxprom3 = ashr exact i64 %sext, 32
%arrayidx4 = getelementptr inbounds i16, i16* %arr, i64 %idxprom3
- %1 = load i16* %arrayidx4, align 2
+ %1 = load i16, i16* %arrayidx4, align 2
%add7 = add i16 %add, %1
%sext28 = mul i64 %indvars.iv, 21474836480
%idxprom10 = ashr exact i64 %sext28, 32
%arrayidx11 = getelementptr inbounds i16, i16* %arr, i64 %idxprom10
- %2 = load i16* %arrayidx11, align 2
+ %2 = load i16, i16* %arrayidx11, align 2
%add14 = add i16 %add7, %2
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopUnroll/ephemeral.ll b/llvm/test/Transforms/LoopUnroll/ephemeral.ll
index 4190520bd2f..d16eba7a964 100644
--- a/llvm/test/Transforms/LoopUnroll/ephemeral.ll
+++ b/llvm/test/Transforms/LoopUnroll/ephemeral.ll
@@ -13,7 +13,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.01 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
; This loop will be completely unrolled, even with these extra instructions,
; but only because they're ephemeral (and, thus, free).
diff --git a/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll b/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
index 458828f17c8..a9104adeb97 100644
--- a/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
+++ b/llvm/test/Transforms/LoopUnroll/full-unroll-heuristics.ll
@@ -47,9 +47,9 @@ loop: ; preds = %loop, %entry
%iv = phi i64 [ 0, %entry ], [ %inc, %loop ]
%r = phi i32 [ 0, %entry ], [ %add, %loop ]
%arrayidx = getelementptr inbounds i32, i32* %src, i64 %iv
- %src_element = load i32* %arrayidx, align 4
+ %src_element = load i32, i32* %arrayidx, align 4
%array_const_idx = getelementptr inbounds [9 x i32], [9 x i32]* @known_constant, i64 0, i64 %iv
- %const_array_element = load i32* %array_const_idx, align 4
+ %const_array_element = load i32, i32* %array_const_idx, align 4
%mul = mul nsw i32 %src_element, %const_array_element
%add = add nsw i32 %mul, %r
%inc = add nuw nsw i64 %iv, 1
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop.ll
index ff63d54e16a..3bec939178a 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-loop.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-loop.ll
@@ -21,7 +21,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -48,7 +48,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%sum.01 = phi i32 [ 0, %entry ], [ %add, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.01
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
@@ -97,7 +97,7 @@ for.body: ; preds = %for.body, %entry
%len.addr.04 = phi i32 [ %sub, %for.body ], [ %len, %entry ]
%res.03 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%incdec.ptr = getelementptr inbounds i16, i16* %p.addr.05, i64 1
- %0 = load i16* %p.addr.05, align 2
+ %0 = load i16, i16* %p.addr.05, align 2
%conv = zext i16 %0 to i32
%add = add i32 %conv, %res.03
%sub = add nsw i32 %len.addr.04, -2
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop1.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop1.ll
index e2fc012b871..7684e394290 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-loop1.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-loop1.ll
@@ -17,7 +17,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop2.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop2.ll
index 7dc466b934e..7c6bb969055 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-loop2.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-loop2.ll
@@ -17,7 +17,7 @@ for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%sum.02 = phi i32 [ %add, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add = add nsw i32 %0, %sum.02
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopUnroll/runtime-loop3.ll b/llvm/test/Transforms/LoopUnroll/runtime-loop3.ll
index 2bbea1b22c3..fd13ebfa0b8 100644
--- a/llvm/test/Transforms/LoopUnroll/runtime-loop3.ll
+++ b/llvm/test/Transforms/LoopUnroll/runtime-loop3.ll
@@ -24,7 +24,7 @@ for.body3: ; preds = %for.cond1.preheader
%sum.19 = phi i32 [ %add4, %for.body3 ], [ %sum.012, %for.cond1.preheader ]
%0 = add nsw i64 %indvars.iv, %indvars.iv16
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %0
- %1 = load i32* %arrayidx, align 4
+ %1 = load i32, i32* %arrayidx, align 4
%add4 = add nsw i32 %1, %sum.19
%indvars.iv.next = add i64 %indvars.iv, 1
%lftr.wideiv = trunc i64 %indvars.iv.next to i32
diff --git a/llvm/test/Transforms/LoopUnroll/scevunroll.ll b/llvm/test/Transforms/LoopUnroll/scevunroll.ll
index e018878aac2..a5c9a6efacf 100644
--- a/llvm/test/Transforms/LoopUnroll/scevunroll.ll
+++ b/llvm/test/Transforms/LoopUnroll/scevunroll.ll
@@ -20,7 +20,7 @@ while.body:
%sum = phi i32 [ 0, %entry ], [ %sum.next, %while.body ]
%iv.next = add i64 %iv, -1
%adr = getelementptr inbounds i32, i32* %base, i64 %iv.next
- %tmp = load i32* %adr, align 8
+ %tmp = load i32, i32* %adr, align 8
%sum.next = add i32 %sum, %tmp
%iv.narrow = trunc i64 %iv.next to i32
%cmp.i65 = icmp sgt i32 %iv.narrow, 0
@@ -47,7 +47,7 @@ loop:
%iv = phi i64 [ 0, %entry ], [ %inc, %tail ]
%s = phi i64 [ 0, %entry ], [ %s.next, %tail ]
%adr = getelementptr i64, i64* %base, i64 %iv
- %val = load i64* %adr
+ %val = load i64, i64* %adr
%s.next = add i64 %s, %val
%inc = add i64 %iv, 1
%cmp = icmp ne i64 %inc, 4
@@ -68,7 +68,7 @@ exit2:
;
; CHECK-LABEL: @multiExit(
; CHECK: getelementptr i32, i32* %base, i32 10
-; CHECK-NEXT: load i32*
+; CHECK-NEXT: load i32, i32*
; CHECK: br i1 false, label %l2.10, label %exit1
; CHECK: l2.10:
; CHECK-NOT: br
@@ -82,7 +82,7 @@ l1:
%inc1 = add i32 %iv1, 1
%inc2 = add i32 %iv2, 1
%adr = getelementptr i32, i32* %base, i32 %iv1
- %val = load i32* %adr
+ %val = load i32, i32* %adr
%cmp1 = icmp slt i32 %iv1, 5
br i1 %cmp1, label %l2, label %exit1
l2:
@@ -113,7 +113,7 @@ l1:
%inc1 = add i32 %iv1, 1
%inc2 = add i32 %iv2, 1
%adr = getelementptr i32, i32* %base, i32 %iv1
- %val = load i32* %adr
+ %val = load i32, i32* %adr
%cmp1 = icmp slt i32 %iv1, 5
br i1 %cmp1, label %l2, label %exit1
l2:
diff --git a/llvm/test/Transforms/LoopUnroll/shifted-tripcount.ll b/llvm/test/Transforms/LoopUnroll/shifted-tripcount.ll
index fb3d857b374..4c216983af5 100644
--- a/llvm/test/Transforms/LoopUnroll/shifted-tripcount.ll
+++ b/llvm/test/Transforms/LoopUnroll/shifted-tripcount.ll
@@ -16,8 +16,8 @@ for.body: ; preds = %entry, %for.body
%arrayidx7 = getelementptr double, double* %p, i64 %i.013 ; <double*> [#uses=2]
%tmp16 = add i64 %i.013, 1 ; <i64> [#uses=3]
%arrayidx = getelementptr double, double* %p, i64 %tmp16 ; <double*> [#uses=1]
- %tmp4 = load double* %arrayidx ; <double> [#uses=1]
- %tmp8 = load double* %arrayidx7 ; <double> [#uses=1]
+ %tmp4 = load double, double* %arrayidx ; <double> [#uses=1]
+ %tmp8 = load double, double* %arrayidx7 ; <double> [#uses=1]
%mul9 = fmul double %tmp8, %tmp4 ; <double> [#uses=1]
store double %mul9, double* %arrayidx7
%exitcond = icmp eq i64 %tmp16, %mul10 ; <i1> [#uses=1]
diff --git a/llvm/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll b/llvm/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll
index e185ddd51fb..dc812fb4065 100644
--- a/llvm/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll
+++ b/llvm/test/Transforms/LoopUnroll/unroll-pragmas-disabled.ll
@@ -20,7 +20,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -52,7 +52,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -79,7 +79,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -111,7 +111,7 @@ entry:
for.body3: ; preds = %for.body3, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body3 ]
%arrayidx = getelementptr inbounds i32, i32* %List, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%add4 = add nsw i32 %0, 10
store i32 %add4, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -125,7 +125,7 @@ for.body3.1: ; preds = %for.body3.1.prehead
%indvars.iv.1 = phi i64 [ %1, %for.body3.1 ], [ 0, %for.body3.1.preheader ]
%1 = add nsw i64 %indvars.iv.1, 1
%arrayidx.1 = getelementptr inbounds i32, i32* %List, i64 %1
- %2 = load i32* %arrayidx.1, align 4
+ %2 = load i32, i32* %arrayidx.1, align 4
%add4.1 = add nsw i32 %2, 10
store i32 %add4.1, i32* %arrayidx.1, align 4
%exitcond.1 = icmp eq i64 %1, 4
diff --git a/llvm/test/Transforms/LoopUnroll/unroll-pragmas.ll b/llvm/test/Transforms/LoopUnroll/unroll-pragmas.ll
index 3840f0b02f2..1354181becd 100644
--- a/llvm/test/Transforms/LoopUnroll/unroll-pragmas.ll
+++ b/llvm/test/Transforms/LoopUnroll/unroll-pragmas.ll
@@ -20,7 +20,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -44,7 +44,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -72,7 +72,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -95,7 +95,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -125,7 +125,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -154,7 +154,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -191,7 +191,7 @@ entry:
for.body: ; preds = %entry, %for.body
%indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -218,7 +218,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
@@ -246,7 +246,7 @@ entry:
for.body: ; preds = %for.body, %entry
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
- %0 = load i32* %arrayidx, align 4
+ %0 = load i32, i32* %arrayidx, align 4
%inc = add nsw i32 %0, 1
store i32 %inc, i32* %arrayidx, align 4
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
OpenPOWER on IntegriCloud