diff options
author | Sanjay Patel <spatel@rotateright.com> | 2018-02-08 15:32:28 +0000 |
---|---|---|
committer | Sanjay Patel <spatel@rotateright.com> | 2018-02-08 15:32:28 +0000 |
commit | 574fb73c8961c711bd7614f205a8f8e4ef486c7b (patch) | |
tree | ecd849272e2c256d13ccb14e54ea97b05a50ccfa | |
parent | 124392f038a0e4dc9452e447c9d188bdab339733 (diff) | |
download | bcm5719-llvm-574fb73c8961c711bd7614f205a8f8e4ef486c7b.tar.gz bcm5719-llvm-574fb73c8961c711bd7614f205a8f8e4ef486c7b.zip |
[SLPVectorizer] auto-generate complete checks; NFC
llvm-svn: 324616
-rw-r--r-- | llvm/test/Transforms/SLPVectorizer/X86/simplebb.ll | 70 |
1 files changed, 52 insertions, 18 deletions
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/simplebb.ll b/llvm/test/Transforms/SLPVectorizer/X86/simplebb.ll index a5d9ad9685c..33ca0029c48 100644 --- a/llvm/test/Transforms/SLPVectorizer/X86/simplebb.ll +++ b/llvm/test/Transforms/SLPVectorizer/X86/simplebb.ll @@ -1,14 +1,21 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128" target triple = "x86_64-apple-macosx10.8.0" ; Simple 3-pair chain with loads and stores -; CHECK: test1 -; CHECK: store <2 x double> -; CHECK: ret define void @test1(double* %a, double* %b, double* %c) { -entry: +; CHECK-LABEL: @test1( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast double* [[A:%.*]] to <2 x double>* +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[B:%.*]] to <2 x double>* +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* [[TMP3]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[C:%.*]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8 +; CHECK-NEXT: ret void +; %i0 = load double, double* %a, align 8 %i1 = load double, double* %b, align 8 %mul = fmul double %i0, %i1 @@ -24,11 +31,18 @@ entry: } ; Simple 3-pair chain with loads and stores, obfuscated with bitcasts -; CHECK: test2 -; CHECK: store <2 x double> -; CHECK: ret define void @test2(double* %a, double* %b, i8* %e) { -entry: +; CHECK-LABEL: @test2( +; CHECK-NEXT: [[TMP1:%.*]] = bitcast double* [[A:%.*]] to <2 x double>* +; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8 +; CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[B:%.*]] to <2 x double>* +; CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* [[TMP3]], align 8 +; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]] +; CHECK-NEXT: [[C:%.*]] = bitcast i8* [[E:%.*]] to double* +; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[C]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8 +; CHECK-NEXT: ret void +; %i0 = load double, double* %a, align 8 %i1 = load double, double* %b, align 8 %mul = fmul double %i0, %i1 @@ -46,12 +60,23 @@ entry: } ; Don't vectorize volatile loads. -; CHECK: test_volatile_load -; CHECK-NOT: load <2 x double> -; CHECK: store <2 x double> -; CHECK: ret define void @test_volatile_load(double* %a, double* %b, double* %c) { -entry: +; CHECK-LABEL: @test_volatile_load( +; CHECK-NEXT: [[I0:%.*]] = load volatile double, double* [[A:%.*]], align 8 +; CHECK-NEXT: [[I1:%.*]] = load volatile double, double* [[B:%.*]], align 8 +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A]], i64 1 +; CHECK-NEXT: [[I3:%.*]] = load double, double* [[ARRAYIDX3]], align 8 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[B]], i64 1 +; CHECK-NEXT: [[I4:%.*]] = load double, double* [[ARRAYIDX4]], align 8 +; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> undef, double [[I0]], i32 0 +; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double [[I3]], i32 1 +; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x double> undef, double [[I1]], i32 0 +; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[I4]], i32 1 +; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]] +; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[C:%.*]] to <2 x double>* +; CHECK-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8 +; CHECK-NEXT: ret void +; %i0 = load volatile double, double* %a, align 8 %i1 = load volatile double, double* %b, align 8 %mul = fmul double %i0, %i1 @@ -67,11 +92,21 @@ entry: } ; Don't vectorize volatile stores. -; CHECK: test_volatile_store -; CHECK-NOT: store <2 x double> -; CHECK: ret define void @test_volatile_store(double* %a, double* %b, double* %c) { -entry: +; CHECK-LABEL: @test_volatile_store( +; CHECK-NEXT: [[I0:%.*]] = load double, double* [[A:%.*]], align 8 +; CHECK-NEXT: [[I1:%.*]] = load double, double* [[B:%.*]], align 8 +; CHECK-NEXT: [[MUL:%.*]] = fmul double [[I0]], [[I1]] +; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A]], i64 1 +; CHECK-NEXT: [[I3:%.*]] = load double, double* [[ARRAYIDX3]], align 8 +; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[B]], i64 1 +; CHECK-NEXT: [[I4:%.*]] = load double, double* [[ARRAYIDX4]], align 8 +; CHECK-NEXT: [[MUL5:%.*]] = fmul double [[I3]], [[I4]] +; CHECK-NEXT: store volatile double [[MUL]], double* [[C:%.*]], align 8 +; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[C]], i64 1 +; CHECK-NEXT: store volatile double [[MUL5]], double* [[ARRAYIDX5]], align 8 +; CHECK-NEXT: ret void +; %i0 = load double, double* %a, align 8 %i1 = load double, double* %b, align 8 %mul = fmul double %i0, %i1 @@ -86,4 +121,3 @@ entry: ret void } - |