summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/SLPVectorizer/ARM
diff options
context:
space:
mode:
authorEric Christopher <echristo@gmail.com>2019-04-17 02:12:23 +0000
committerEric Christopher <echristo@gmail.com>2019-04-17 02:12:23 +0000
commita86343512845c9c1fdbac865fea88aa5fce7142a (patch)
tree666fc6353de19ad8b00e56b67edd33f24104e4a7 /llvm/test/Transforms/SLPVectorizer/ARM
parent7f8ca6e3679b3af951cb7a4b1377edfaa3244b93 (diff)
downloadbcm5719-llvm-a86343512845c9c1fdbac865fea88aa5fce7142a.tar.gz
bcm5719-llvm-a86343512845c9c1fdbac865fea88aa5fce7142a.zip
Temporarily Revert "Add basic loop fusion pass."
As it's causing some bot failures (and per request from kbarton). This reverts commit r358543/ab70da07286e618016e78247e4a24fcb84077fda. llvm-svn: 358546
Diffstat (limited to 'llvm/test/Transforms/SLPVectorizer/ARM')
-rw-r--r--llvm/test/Transforms/SLPVectorizer/ARM/extract-insert.ll31
-rw-r--r--llvm/test/Transforms/SLPVectorizer/ARM/lit.local.cfg2
-rw-r--r--llvm/test/Transforms/SLPVectorizer/ARM/memory.ll28
-rw-r--r--llvm/test/Transforms/SLPVectorizer/ARM/sroa.ll87
4 files changed, 0 insertions, 148 deletions
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/extract-insert.ll b/llvm/test/Transforms/SLPVectorizer/ARM/extract-insert.ll
deleted file mode 100644
index 5998801705b..00000000000
--- a/llvm/test/Transforms/SLPVectorizer/ARM/extract-insert.ll
+++ /dev/null
@@ -1,31 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -slp-vectorizer -S -mtriple=thumb7 -mcpu=swift | FileCheck %s
-
-define <4 x i32> @PR13837(<4 x float> %in) {
-; CHECK-LABEL: @PR13837(
-; CHECK-NEXT: [[TMP1:%.*]] = fptosi <4 x float> [[IN:%.*]] to <4 x i32>
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <4 x i32> [[TMP1]], i32 0
-; CHECK-NEXT: [[V0:%.*]] = insertelement <4 x i32> undef, i32 [[TMP2]], i32 0
-; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[TMP1]], i32 1
-; CHECK-NEXT: [[V1:%.*]] = insertelement <4 x i32> [[V0]], i32 [[TMP3]], i32 1
-; CHECK-NEXT: [[TMP4:%.*]] = extractelement <4 x i32> [[TMP1]], i32 2
-; CHECK-NEXT: [[V2:%.*]] = insertelement <4 x i32> [[V1]], i32 [[TMP4]], i32 2
-; CHECK-NEXT: [[TMP5:%.*]] = extractelement <4 x i32> [[TMP1]], i32 3
-; CHECK-NEXT: [[V3:%.*]] = insertelement <4 x i32> [[V2]], i32 [[TMP5]], i32 3
-; CHECK-NEXT: ret <4 x i32> [[V3]]
-;
- %t0 = extractelement <4 x float> %in, i64 0
- %t1 = extractelement <4 x float> %in, i64 1
- %t2 = extractelement <4 x float> %in, i64 2
- %t3 = extractelement <4 x float> %in, i64 3
- %c0 = fptosi float %t0 to i32
- %c1 = fptosi float %t1 to i32
- %c2 = fptosi float %t2 to i32
- %c3 = fptosi float %t3 to i32
- %v0 = insertelement <4 x i32> undef, i32 %c0, i32 0
- %v1 = insertelement <4 x i32> %v0, i32 %c1, i32 1
- %v2 = insertelement <4 x i32> %v1, i32 %c2, i32 2
- %v3 = insertelement <4 x i32> %v2, i32 %c3, i32 3
- ret <4 x i32> %v3
-}
-
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/lit.local.cfg b/llvm/test/Transforms/SLPVectorizer/ARM/lit.local.cfg
deleted file mode 100644
index 236e1d34416..00000000000
--- a/llvm/test/Transforms/SLPVectorizer/ARM/lit.local.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-if not 'ARM' in config.root.targets:
- config.unsupported = True
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/memory.ll b/llvm/test/Transforms/SLPVectorizer/ARM/memory.ll
deleted file mode 100644
index 70e87033207..00000000000
--- a/llvm/test/Transforms/SLPVectorizer/ARM/memory.ll
+++ /dev/null
@@ -1,28 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -basicaa -slp-vectorizer -S -mtriple=thumbv7-apple-ios3.0.0 -mcpu=swift | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
-
-; On swift unaligned <2 x double> stores need 4uops and it is there for cheaper
-; to do this scalar.
-
-define void @expensive_double_store(double* noalias %dst, double* noalias %src, i64 %count) {
-; CHECK-LABEL: @expensive_double_store(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[TMP0:%.*]] = load double, double* [[SRC:%.*]], align 8
-; CHECK-NEXT: store double [[TMP0]], double* [[DST:%.*]], align 8
-; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds double, double* [[SRC]], i64 1
-; CHECK-NEXT: [[TMP1:%.*]] = load double, double* [[ARRAYIDX2]], align 8
-; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[DST]], i64 1
-; CHECK-NEXT: store double [[TMP1]], double* [[ARRAYIDX3]], align 8
-; CHECK-NEXT: ret void
-;
-entry:
- %0 = load double, double* %src, align 8
- store double %0, double* %dst, align 8
- %arrayidx2 = getelementptr inbounds double, double* %src, i64 1
- %1 = load double, double* %arrayidx2, align 8
- %arrayidx3 = getelementptr inbounds double, double* %dst, i64 1
- store double %1, double* %arrayidx3, align 8
- ret void
-}
diff --git a/llvm/test/Transforms/SLPVectorizer/ARM/sroa.ll b/llvm/test/Transforms/SLPVectorizer/ARM/sroa.ll
deleted file mode 100644
index c43e8f10a59..00000000000
--- a/llvm/test/Transforms/SLPVectorizer/ARM/sroa.ll
+++ /dev/null
@@ -1,87 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mcpu=swift -mtriple=thumbv7-apple-ios -basicaa -slp-vectorizer < %s | FileCheck %s
-
-target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32"
-
-%class.Complex = type { double, double }
-
-; Code like this is the result of SROA. Make sure we don't vectorize this
-; because the scalar version of the shl/or are handled by the
-; backend and disappear, the vectorized code stays.
-
-define void @SROAed(%class.Complex* noalias nocapture sret %agg.result, [4 x i32] %a.coerce, [4 x i32] %b.coerce) {
-; CHECK-LABEL: @SROAed(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i32] [[A_COERCE:%.*]], 0
-; CHECK-NEXT: [[A_SROA_0_0_INSERT_EXT:%.*]] = zext i32 [[A_COERCE_FCA_0_EXTRACT]] to i64
-; CHECK-NEXT: [[A_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i32] [[A_COERCE]], 1
-; CHECK-NEXT: [[A_SROA_0_4_INSERT_EXT:%.*]] = zext i32 [[A_COERCE_FCA_1_EXTRACT]] to i64
-; CHECK-NEXT: [[A_SROA_0_4_INSERT_SHIFT:%.*]] = shl nuw i64 [[A_SROA_0_4_INSERT_EXT]], 32
-; CHECK-NEXT: [[A_SROA_0_4_INSERT_INSERT:%.*]] = or i64 [[A_SROA_0_4_INSERT_SHIFT]], [[A_SROA_0_0_INSERT_EXT]]
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i64 [[A_SROA_0_4_INSERT_INSERT]] to double
-; CHECK-NEXT: [[A_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i32] [[A_COERCE]], 2
-; CHECK-NEXT: [[A_SROA_3_8_INSERT_EXT:%.*]] = zext i32 [[A_COERCE_FCA_2_EXTRACT]] to i64
-; CHECK-NEXT: [[A_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i32] [[A_COERCE]], 3
-; CHECK-NEXT: [[A_SROA_3_12_INSERT_EXT:%.*]] = zext i32 [[A_COERCE_FCA_3_EXTRACT]] to i64
-; CHECK-NEXT: [[A_SROA_3_12_INSERT_SHIFT:%.*]] = shl nuw i64 [[A_SROA_3_12_INSERT_EXT]], 32
-; CHECK-NEXT: [[A_SROA_3_12_INSERT_INSERT:%.*]] = or i64 [[A_SROA_3_12_INSERT_SHIFT]], [[A_SROA_3_8_INSERT_EXT]]
-; CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[A_SROA_3_12_INSERT_INSERT]] to double
-; CHECK-NEXT: [[B_COERCE_FCA_0_EXTRACT:%.*]] = extractvalue [4 x i32] [[B_COERCE:%.*]], 0
-; CHECK-NEXT: [[B_SROA_0_0_INSERT_EXT:%.*]] = zext i32 [[B_COERCE_FCA_0_EXTRACT]] to i64
-; CHECK-NEXT: [[B_COERCE_FCA_1_EXTRACT:%.*]] = extractvalue [4 x i32] [[B_COERCE]], 1
-; CHECK-NEXT: [[B_SROA_0_4_INSERT_EXT:%.*]] = zext i32 [[B_COERCE_FCA_1_EXTRACT]] to i64
-; CHECK-NEXT: [[B_SROA_0_4_INSERT_SHIFT:%.*]] = shl nuw i64 [[B_SROA_0_4_INSERT_EXT]], 32
-; CHECK-NEXT: [[B_SROA_0_4_INSERT_INSERT:%.*]] = or i64 [[B_SROA_0_4_INSERT_SHIFT]], [[B_SROA_0_0_INSERT_EXT]]
-; CHECK-NEXT: [[TMP2:%.*]] = bitcast i64 [[B_SROA_0_4_INSERT_INSERT]] to double
-; CHECK-NEXT: [[B_COERCE_FCA_2_EXTRACT:%.*]] = extractvalue [4 x i32] [[B_COERCE]], 2
-; CHECK-NEXT: [[B_SROA_3_8_INSERT_EXT:%.*]] = zext i32 [[B_COERCE_FCA_2_EXTRACT]] to i64
-; CHECK-NEXT: [[B_COERCE_FCA_3_EXTRACT:%.*]] = extractvalue [4 x i32] [[B_COERCE]], 3
-; CHECK-NEXT: [[B_SROA_3_12_INSERT_EXT:%.*]] = zext i32 [[B_COERCE_FCA_3_EXTRACT]] to i64
-; CHECK-NEXT: [[B_SROA_3_12_INSERT_SHIFT:%.*]] = shl nuw i64 [[B_SROA_3_12_INSERT_EXT]], 32
-; CHECK-NEXT: [[B_SROA_3_12_INSERT_INSERT:%.*]] = or i64 [[B_SROA_3_12_INSERT_SHIFT]], [[B_SROA_3_8_INSERT_EXT]]
-; CHECK-NEXT: [[TMP3:%.*]] = bitcast i64 [[B_SROA_3_12_INSERT_INSERT]] to double
-; CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP0]], [[TMP2]]
-; CHECK-NEXT: [[ADD3:%.*]] = fadd double [[TMP1]], [[TMP3]]
-; CHECK-NEXT: [[RE_I_I:%.*]] = getelementptr inbounds [[CLASS_COMPLEX:%.*]], %class.Complex* [[AGG_RESULT:%.*]], i32 0, i32 0
-; CHECK-NEXT: store double [[ADD]], double* [[RE_I_I]], align 4
-; CHECK-NEXT: [[IM_I_I:%.*]] = getelementptr inbounds [[CLASS_COMPLEX]], %class.Complex* [[AGG_RESULT]], i32 0, i32 1
-; CHECK-NEXT: store double [[ADD3]], double* [[IM_I_I]], align 4
-; CHECK-NEXT: ret void
-;
-entry:
- %a.coerce.fca.0.extract = extractvalue [4 x i32] %a.coerce, 0
- %a.sroa.0.0.insert.ext = zext i32 %a.coerce.fca.0.extract to i64
- %a.coerce.fca.1.extract = extractvalue [4 x i32] %a.coerce, 1
- %a.sroa.0.4.insert.ext = zext i32 %a.coerce.fca.1.extract to i64
- %a.sroa.0.4.insert.shift = shl nuw i64 %a.sroa.0.4.insert.ext, 32
- %a.sroa.0.4.insert.insert = or i64 %a.sroa.0.4.insert.shift, %a.sroa.0.0.insert.ext
- %0 = bitcast i64 %a.sroa.0.4.insert.insert to double
- %a.coerce.fca.2.extract = extractvalue [4 x i32] %a.coerce, 2
- %a.sroa.3.8.insert.ext = zext i32 %a.coerce.fca.2.extract to i64
- %a.coerce.fca.3.extract = extractvalue [4 x i32] %a.coerce, 3
- %a.sroa.3.12.insert.ext = zext i32 %a.coerce.fca.3.extract to i64
- %a.sroa.3.12.insert.shift = shl nuw i64 %a.sroa.3.12.insert.ext, 32
- %a.sroa.3.12.insert.insert = or i64 %a.sroa.3.12.insert.shift, %a.sroa.3.8.insert.ext
- %1 = bitcast i64 %a.sroa.3.12.insert.insert to double
- %b.coerce.fca.0.extract = extractvalue [4 x i32] %b.coerce, 0
- %b.sroa.0.0.insert.ext = zext i32 %b.coerce.fca.0.extract to i64
- %b.coerce.fca.1.extract = extractvalue [4 x i32] %b.coerce, 1
- %b.sroa.0.4.insert.ext = zext i32 %b.coerce.fca.1.extract to i64
- %b.sroa.0.4.insert.shift = shl nuw i64 %b.sroa.0.4.insert.ext, 32
- %b.sroa.0.4.insert.insert = or i64 %b.sroa.0.4.insert.shift, %b.sroa.0.0.insert.ext
- %2 = bitcast i64 %b.sroa.0.4.insert.insert to double
- %b.coerce.fca.2.extract = extractvalue [4 x i32] %b.coerce, 2
- %b.sroa.3.8.insert.ext = zext i32 %b.coerce.fca.2.extract to i64
- %b.coerce.fca.3.extract = extractvalue [4 x i32] %b.coerce, 3
- %b.sroa.3.12.insert.ext = zext i32 %b.coerce.fca.3.extract to i64
- %b.sroa.3.12.insert.shift = shl nuw i64 %b.sroa.3.12.insert.ext, 32
- %b.sroa.3.12.insert.insert = or i64 %b.sroa.3.12.insert.shift, %b.sroa.3.8.insert.ext
- %3 = bitcast i64 %b.sroa.3.12.insert.insert to double
- %add = fadd double %0, %2
- %add3 = fadd double %1, %3
- %re.i.i = getelementptr inbounds %class.Complex, %class.Complex* %agg.result, i32 0, i32 0
- store double %add, double* %re.i.i, align 4
- %im.i.i = getelementptr inbounds %class.Complex, %class.Complex* %agg.result, i32 0, i32 1
- store double %add3, double* %im.i.i, align 4
- ret void
-}
OpenPOWER on IntegriCloud