diff options
Diffstat (limited to 'llvm/test/Transforms/LoopUnroll/X86/partial.ll')
-rw-r--r-- | llvm/test/Transforms/LoopUnroll/X86/partial.ll | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/llvm/test/Transforms/LoopUnroll/X86/partial.ll b/llvm/test/Transforms/LoopUnroll/X86/partial.ll index bb8a04396f1..4566f792deb 100644 --- a/llvm/test/Transforms/LoopUnroll/X86/partial.ll +++ b/llvm/test/Transforms/LoopUnroll/X86/partial.ll @@ -11,11 +11,11 @@ vector.body: ; preds = %vector.body, %entry %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] %0 = getelementptr inbounds double, double* %b, i64 %index %1 = bitcast double* %0 to <2 x double>* - %wide.load = load <2 x double>* %1, align 8 + %wide.load = load <2 x double>, <2 x double>* %1, align 8 %.sum9 = or i64 %index, 2 %2 = getelementptr double, double* %b, i64 %.sum9 %3 = bitcast double* %2 to <2 x double>* - %wide.load8 = load <2 x double>* %3, align 8 + %wide.load8 = load <2 x double>, <2 x double>* %3, align 8 %4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00> %5 = fadd <2 x double> %wide.load8, <double 1.000000e+00, double 1.000000e+00> %6 = getelementptr inbounds double, double* %a, i64 %index @@ -47,7 +47,7 @@ vector.body: ; preds = %vector.body, %entry %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] %v0 = getelementptr inbounds double, double* %b, i64 %index %v1 = bitcast double* %v0 to <2 x double>* - %wide.load = load <2 x double>* %v1, align 8 + %wide.load = load <2 x double>, <2 x double>* %v1, align 8 %v4 = fadd <2 x double> %wide.load, <double 1.000000e+00, double 1.000000e+00> %v5 = fmul <2 x double> %v4, <double 8.000000e+00, double 8.000000e+00> %v6 = getelementptr inbounds double, double* %a, i64 %index @@ -85,17 +85,17 @@ for.body: ; preds = %entry, %for.body %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ] %reduction.026 = phi i16 [ %add14, %for.body ], [ 0, %entry ] %arrayidx = getelementptr inbounds i16, i16* %arr, i64 %indvars.iv - %0 = load i16* %arrayidx, align 2 + %0 = load i16, i16* %arrayidx, align 2 %add = add i16 %0, %reduction.026 %sext = mul i64 %indvars.iv, 12884901888 %idxprom3 = ashr exact i64 %sext, 32 %arrayidx4 = getelementptr inbounds i16, i16* %arr, i64 %idxprom3 - %1 = load i16* %arrayidx4, align 2 + %1 = load i16, i16* %arrayidx4, align 2 %add7 = add i16 %add, %1 %sext28 = mul i64 %indvars.iv, 21474836480 %idxprom10 = ashr exact i64 %sext28, 32 %arrayidx11 = getelementptr inbounds i16, i16* %arr, i64 %idxprom10 - %2 = load i16* %arrayidx11, align 2 + %2 = load i16, i16* %arrayidx11, align 2 %add14 = add i16 %add7, %2 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 %lftr.wideiv = trunc i64 %indvars.iv.next to i32 |