diff options
Diffstat (limited to 'llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll')
-rw-r--r-- | llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll | 40 |
1 files changed, 20 insertions, 20 deletions
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll b/llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll index ab7380af3b5..1ad4d694d34 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/consecutive-access.ll @@ -21,25 +21,25 @@ entry: %mul = mul nsw i32 %u, 3 %idxprom = sext i32 %mul to i64 %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom - %0 = load double* %arrayidx, align 8 + %0 = load double, double* %arrayidx, align 8 %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom - %1 = load double* %arrayidx4, align 8 + %1 = load double, double* %arrayidx4, align 8 %add5 = fadd double %0, %1 store double %add5, double* %arrayidx, align 8 %add11 = add nsw i32 %mul, 1 %idxprom12 = sext i32 %add11 to i64 %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12 - %2 = load double* %arrayidx13, align 8 + %2 = load double, double* %arrayidx13, align 8 %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12 - %3 = load double* %arrayidx17, align 8 + %3 = load double, double* %arrayidx17, align 8 %add18 = fadd double %2, %3 store double %add18, double* %arrayidx13, align 8 %add24 = add nsw i32 %mul, 2 %idxprom25 = sext i32 %add24 to i64 %arrayidx26 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom25 - %4 = load double* %arrayidx26, align 8 + %4 = load double, double* %arrayidx26, align 8 %arrayidx30 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom25 - %5 = load double* %arrayidx30, align 8 + %5 = load double, double* %arrayidx30, align 8 %add31 = fadd double %4, %5 store double %add31, double* %arrayidx26, align 8 ret void @@ -58,17 +58,17 @@ entry: %mul = mul nsw i32 %u, 2 %idxprom = sext i32 %mul to i64 %arrayidx = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom - %0 = load double* %arrayidx, align 8 + %0 = load double, double* %arrayidx, align 8 %arrayidx4 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom - %1 = load double* %arrayidx4, align 8 + %1 = load double, double* %arrayidx4, align 8 %add5 = fadd double %0, %1 store double %add5, double* %arrayidx, align 8 %add11 = add nsw i32 %mul, 1 %idxprom12 = sext i32 %add11 to i64 %arrayidx13 = getelementptr inbounds [2000 x double], [2000 x double]* @A, i32 0, i64 %idxprom12 - %2 = load double* %arrayidx13, align 8 + %2 = load double, double* %arrayidx13, align 8 %arrayidx17 = getelementptr inbounds [2000 x double], [2000 x double]* @B, i32 0, i64 %idxprom12 - %3 = load double* %arrayidx17, align 8 + %3 = load double, double* %arrayidx17, align 8 %add18 = fadd double %2, %3 store double %add18, double* %arrayidx13, align 8 ret void @@ -85,33 +85,33 @@ entry: %mul = mul nsw i32 %u, 4 %idxprom = sext i32 %mul to i64 %arrayidx = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom - %0 = load float* %arrayidx, align 4 + %0 = load float, float* %arrayidx, align 4 %arrayidx4 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom - %1 = load float* %arrayidx4, align 4 + %1 = load float, float* %arrayidx4, align 4 %add5 = fadd float %0, %1 store float %add5, float* %arrayidx, align 4 %add11 = add nsw i32 %mul, 1 %idxprom12 = sext i32 %add11 to i64 %arrayidx13 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom12 - %2 = load float* %arrayidx13, align 4 + %2 = load float, float* %arrayidx13, align 4 %arrayidx17 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom12 - %3 = load float* %arrayidx17, align 4 + %3 = load float, float* %arrayidx17, align 4 %add18 = fadd float %2, %3 store float %add18, float* %arrayidx13, align 4 %add24 = add nsw i32 %mul, 2 %idxprom25 = sext i32 %add24 to i64 %arrayidx26 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom25 - %4 = load float* %arrayidx26, align 4 + %4 = load float, float* %arrayidx26, align 4 %arrayidx30 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom25 - %5 = load float* %arrayidx30, align 4 + %5 = load float, float* %arrayidx30, align 4 %add31 = fadd float %4, %5 store float %add31, float* %arrayidx26, align 4 %add37 = add nsw i32 %mul, 3 %idxprom38 = sext i32 %add37 to i64 %arrayidx39 = getelementptr inbounds [2000 x float], [2000 x float]* @C, i32 0, i64 %idxprom38 - %6 = load float* %arrayidx39, align 4 + %6 = load float, float* %arrayidx39, align 4 %arrayidx43 = getelementptr inbounds [2000 x float], [2000 x float]* @D, i32 0, i64 %idxprom38 - %7 = load float* %arrayidx43, align 4 + %7 = load float, float* %arrayidx43, align 4 %add44 = fadd float %6, %7 store float %add44, float* %arrayidx39, align 4 ret void @@ -143,12 +143,12 @@ for.body: ; preds = %for.body.lr.ph, %fo %mul = mul nsw i32 %0, 2 %idxprom = sext i32 %mul to i64 %arrayidx = getelementptr inbounds double, double* %A, i64 %idxprom - %2 = load double* %arrayidx, align 8 + %2 = load double, double* %arrayidx, align 8 %mul1 = fmul double 7.000000e+00, %2 %add = add nsw i32 %mul, 1 %idxprom3 = sext i32 %add to i64 %arrayidx4 = getelementptr inbounds double, double* %A, i64 %idxprom3 - %3 = load double* %arrayidx4, align 8 + %3 = load double, double* %arrayidx4, align 8 %mul5 = fmul double 7.000000e+00, %3 %add6 = fadd double %mul1, %mul5 %add7 = fadd double %1, %add6 |