summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms
diff options
context:
space:
mode:
authorFedor Sergeev <fedor.sergeev@azul.com>2018-11-26 10:17:27 +0000
committerFedor Sergeev <fedor.sergeev@azul.com>2018-11-26 10:17:27 +0000
commit8cd9d1b5cebe8a694089ac983038966a2fe6a516 (patch)
tree0627872d1fcb453f0793656b68509333d84a24ac /llvm/test/Transforms
parent0e0cd5be403a83de607f88e469a4de06051ac39c (diff)
downloadbcm5719-llvm-8cd9d1b5cebe8a694089ac983038966a2fe6a516.tar.gz
bcm5719-llvm-8cd9d1b5cebe8a694089ac983038966a2fe6a516.zip
Revert "[TTI] Reduction costs only need to include a single extract element cost"
This reverts commit r346970. It was causing PR39774, a crash in slp-vectorizer on a rather simple loop with just a bunch of 'and's in the body. llvm-svn: 347541
Diffstat (limited to 'llvm/test/Transforms')
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll135
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/reduction_unrolled.ll255
-rw-r--r--llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll25
3 files changed, 282 insertions, 133 deletions
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll
index 84bc85f8e98..41825a67d0c 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/horizontal-minmax.ll
@@ -10,33 +10,114 @@
@var = global i32 zeroinitializer, align 8
define i32 @maxi8(i32) {
-; CHECK-LABEL: @maxi8(
-; CHECK-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([32 x i32]* @arr to <8 x i32>*), align 16
-; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 undef, undef
-; CHECK-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 undef, i32 undef
-; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], undef
-; CHECK-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 undef
-; CHECK-NEXT: [[TMP7:%.*]] = icmp sgt i32 [[TMP6]], undef
-; CHECK-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 [[TMP6]], i32 undef
-; CHECK-NEXT: [[TMP9:%.*]] = icmp sgt i32 [[TMP8]], undef
-; CHECK-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 undef
-; CHECK-NEXT: [[TMP11:%.*]] = icmp sgt i32 [[TMP10]], undef
-; CHECK-NEXT: [[TMP12:%.*]] = select i1 [[TMP11]], i32 [[TMP10]], i32 undef
-; CHECK-NEXT: [[TMP13:%.*]] = icmp sgt i32 [[TMP12]], undef
-; CHECK-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP12]], i32 undef
-; CHECK-NEXT: [[TMP15:%.*]] = icmp sgt i32 [[TMP14]], undef
-; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP2]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[RDX_MINMAX_CMP:%.*]] = icmp sgt <8 x i32> [[TMP2]], [[RDX_SHUF]]
-; CHECK-NEXT: [[RDX_MINMAX_SELECT:%.*]] = select <8 x i1> [[RDX_MINMAX_CMP]], <8 x i32> [[TMP2]], <8 x i32> [[RDX_SHUF]]
-; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[RDX_MINMAX_SELECT]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[RDX_MINMAX_CMP2:%.*]] = icmp sgt <8 x i32> [[RDX_MINMAX_SELECT]], [[RDX_SHUF1]]
-; CHECK-NEXT: [[RDX_MINMAX_SELECT3:%.*]] = select <8 x i1> [[RDX_MINMAX_CMP2]], <8 x i32> [[RDX_MINMAX_SELECT]], <8 x i32> [[RDX_SHUF1]]
-; CHECK-NEXT: [[RDX_SHUF4:%.*]] = shufflevector <8 x i32> [[RDX_MINMAX_SELECT3]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[RDX_MINMAX_CMP5:%.*]] = icmp sgt <8 x i32> [[RDX_MINMAX_SELECT3]], [[RDX_SHUF4]]
-; CHECK-NEXT: [[RDX_MINMAX_SELECT6:%.*]] = select <8 x i1> [[RDX_MINMAX_CMP5]], <8 x i32> [[RDX_MINMAX_SELECT3]], <8 x i32> [[RDX_SHUF4]]
-; CHECK-NEXT: [[TMP16:%.*]] = extractelement <8 x i32> [[RDX_MINMAX_SELECT6]], i32 0
-; CHECK-NEXT: [[TMP17:%.*]] = select i1 [[TMP15]], i32 [[TMP14]], i32 undef
-; CHECK-NEXT: ret i32 [[TMP16]]
+; SSE-LABEL: @maxi8(
+; SSE-NEXT: [[TMP2:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 0), align 16
+; SSE-NEXT: [[TMP3:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 1), align 4
+; SSE-NEXT: [[TMP4:%.*]] = icmp sgt i32 [[TMP2]], [[TMP3]]
+; SSE-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i32 [[TMP2]], i32 [[TMP3]]
+; SSE-NEXT: [[TMP6:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 2), align 8
+; SSE-NEXT: [[TMP7:%.*]] = icmp sgt i32 [[TMP5]], [[TMP6]]
+; SSE-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 [[TMP5]], i32 [[TMP6]]
+; SSE-NEXT: [[TMP9:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 3), align 4
+; SSE-NEXT: [[TMP10:%.*]] = icmp sgt i32 [[TMP8]], [[TMP9]]
+; SSE-NEXT: [[TMP11:%.*]] = select i1 [[TMP10]], i32 [[TMP8]], i32 [[TMP9]]
+; SSE-NEXT: [[TMP12:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 4), align 16
+; SSE-NEXT: [[TMP13:%.*]] = icmp sgt i32 [[TMP11]], [[TMP12]]
+; SSE-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP11]], i32 [[TMP12]]
+; SSE-NEXT: [[TMP15:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 5), align 4
+; SSE-NEXT: [[TMP16:%.*]] = icmp sgt i32 [[TMP14]], [[TMP15]]
+; SSE-NEXT: [[TMP17:%.*]] = select i1 [[TMP16]], i32 [[TMP14]], i32 [[TMP15]]
+; SSE-NEXT: [[TMP18:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 6), align 8
+; SSE-NEXT: [[TMP19:%.*]] = icmp sgt i32 [[TMP17]], [[TMP18]]
+; SSE-NEXT: [[TMP20:%.*]] = select i1 [[TMP19]], i32 [[TMP17]], i32 [[TMP18]]
+; SSE-NEXT: [[TMP21:%.*]] = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 7), align 4
+; SSE-NEXT: [[TMP22:%.*]] = icmp sgt i32 [[TMP20]], [[TMP21]]
+; SSE-NEXT: [[TMP23:%.*]] = select i1 [[TMP22]], i32 [[TMP20]], i32 [[TMP21]]
+; SSE-NEXT: ret i32 [[TMP23]]
+;
+; AVX-LABEL: @maxi8(
+; AVX-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([32 x i32]* @arr to <8 x i32>*), align 16
+; AVX-NEXT: [[TMP3:%.*]] = icmp sgt i32 undef, undef
+; AVX-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 undef, i32 undef
+; AVX-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], undef
+; AVX-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 undef
+; AVX-NEXT: [[TMP7:%.*]] = icmp sgt i32 [[TMP6]], undef
+; AVX-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 [[TMP6]], i32 undef
+; AVX-NEXT: [[TMP9:%.*]] = icmp sgt i32 [[TMP8]], undef
+; AVX-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 undef
+; AVX-NEXT: [[TMP11:%.*]] = icmp sgt i32 [[TMP10]], undef
+; AVX-NEXT: [[TMP12:%.*]] = select i1 [[TMP11]], i32 [[TMP10]], i32 undef
+; AVX-NEXT: [[TMP13:%.*]] = icmp sgt i32 [[TMP12]], undef
+; AVX-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP12]], i32 undef
+; AVX-NEXT: [[TMP15:%.*]] = icmp sgt i32 [[TMP14]], undef
+; AVX-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP2]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; AVX-NEXT: [[RDX_MINMAX_CMP:%.*]] = icmp sgt <8 x i32> [[TMP2]], [[RDX_SHUF]]
+; AVX-NEXT: [[RDX_MINMAX_SELECT:%.*]] = select <8 x i1> [[RDX_MINMAX_CMP]], <8 x i32> [[TMP2]], <8 x i32> [[RDX_SHUF]]
+; AVX-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[RDX_MINMAX_SELECT]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; AVX-NEXT: [[RDX_MINMAX_CMP2:%.*]] = icmp sgt <8 x i32> [[RDX_MINMAX_SELECT]], [[RDX_SHUF1]]
+; AVX-NEXT: [[RDX_MINMAX_SELECT3:%.*]] = select <8 x i1> [[RDX_MINMAX_CMP2]], <8 x i32> [[RDX_MINMAX_SELECT]], <8 x i32> [[RDX_SHUF1]]
+; AVX-NEXT: [[RDX_SHUF4:%.*]] = shufflevector <8 x i32> [[RDX_MINMAX_SELECT3]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; AVX-NEXT: [[RDX_MINMAX_CMP5:%.*]] = icmp sgt <8 x i32> [[RDX_MINMAX_SELECT3]], [[RDX_SHUF4]]
+; AVX-NEXT: [[RDX_MINMAX_SELECT6:%.*]] = select <8 x i1> [[RDX_MINMAX_CMP5]], <8 x i32> [[RDX_MINMAX_SELECT3]], <8 x i32> [[RDX_SHUF4]]
+; AVX-NEXT: [[TMP16:%.*]] = extractelement <8 x i32> [[RDX_MINMAX_SELECT6]], i32 0
+; AVX-NEXT: [[TMP17:%.*]] = select i1 [[TMP15]], i32 [[TMP14]], i32 undef
+; AVX-NEXT: ret i32 [[TMP16]]
+;
+; AVX2-LABEL: @maxi8(
+; AVX2-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([32 x i32]* @arr to <8 x i32>*), align 16
+; AVX2-NEXT: [[TMP3:%.*]] = icmp sgt i32 undef, undef
+; AVX2-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 undef, i32 undef
+; AVX2-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], undef
+; AVX2-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 undef
+; AVX2-NEXT: [[TMP7:%.*]] = icmp sgt i32 [[TMP6]], undef
+; AVX2-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 [[TMP6]], i32 undef
+; AVX2-NEXT: [[TMP9:%.*]] = icmp sgt i32 [[TMP8]], undef
+; AVX2-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 undef
+; AVX2-NEXT: [[TMP11:%.*]] = icmp sgt i32 [[TMP10]], undef
+; AVX2-NEXT: [[TMP12:%.*]] = select i1 [[TMP11]], i32 [[TMP10]], i32 undef
+; AVX2-NEXT: [[TMP13:%.*]] = icmp sgt i32 [[TMP12]], undef
+; AVX2-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP12]], i32 undef
+; AVX2-NEXT: [[TMP15:%.*]] = icmp sgt i32 [[TMP14]], undef
+; AVX2-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP2]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; AVX2-NEXT: [[RDX_MINMAX_CMP:%.*]] = icmp sgt <8 x i32> [[TMP2]], [[RDX_SHUF]]
+; AVX2-NEXT: [[RDX_MINMAX_SELECT:%.*]] = select <8 x i1> [[RDX_MINMAX_CMP]], <8 x i32> [[TMP2]], <8 x i32> [[RDX_SHUF]]
+; AVX2-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[RDX_MINMAX_SELECT]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; AVX2-NEXT: [[RDX_MINMAX_CMP2:%.*]] = icmp sgt <8 x i32> [[RDX_MINMAX_SELECT]], [[RDX_SHUF1]]
+; AVX2-NEXT: [[RDX_MINMAX_SELECT3:%.*]] = select <8 x i1> [[RDX_MINMAX_CMP2]], <8 x i32> [[RDX_MINMAX_SELECT]], <8 x i32> [[RDX_SHUF1]]
+; AVX2-NEXT: [[RDX_SHUF4:%.*]] = shufflevector <8 x i32> [[RDX_MINMAX_SELECT3]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; AVX2-NEXT: [[RDX_MINMAX_CMP5:%.*]] = icmp sgt <8 x i32> [[RDX_MINMAX_SELECT3]], [[RDX_SHUF4]]
+; AVX2-NEXT: [[RDX_MINMAX_SELECT6:%.*]] = select <8 x i1> [[RDX_MINMAX_CMP5]], <8 x i32> [[RDX_MINMAX_SELECT3]], <8 x i32> [[RDX_SHUF4]]
+; AVX2-NEXT: [[TMP16:%.*]] = extractelement <8 x i32> [[RDX_MINMAX_SELECT6]], i32 0
+; AVX2-NEXT: [[TMP17:%.*]] = select i1 [[TMP15]], i32 [[TMP14]], i32 undef
+; AVX2-NEXT: ret i32 [[TMP16]]
+;
+; SKX-LABEL: @maxi8(
+; SKX-NEXT: [[TMP2:%.*]] = load <8 x i32>, <8 x i32>* bitcast ([32 x i32]* @arr to <8 x i32>*), align 16
+; SKX-NEXT: [[TMP3:%.*]] = icmp sgt i32 undef, undef
+; SKX-NEXT: [[TMP4:%.*]] = select i1 [[TMP3]], i32 undef, i32 undef
+; SKX-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[TMP4]], undef
+; SKX-NEXT: [[TMP6:%.*]] = select i1 [[TMP5]], i32 [[TMP4]], i32 undef
+; SKX-NEXT: [[TMP7:%.*]] = icmp sgt i32 [[TMP6]], undef
+; SKX-NEXT: [[TMP8:%.*]] = select i1 [[TMP7]], i32 [[TMP6]], i32 undef
+; SKX-NEXT: [[TMP9:%.*]] = icmp sgt i32 [[TMP8]], undef
+; SKX-NEXT: [[TMP10:%.*]] = select i1 [[TMP9]], i32 [[TMP8]], i32 undef
+; SKX-NEXT: [[TMP11:%.*]] = icmp sgt i32 [[TMP10]], undef
+; SKX-NEXT: [[TMP12:%.*]] = select i1 [[TMP11]], i32 [[TMP10]], i32 undef
+; SKX-NEXT: [[TMP13:%.*]] = icmp sgt i32 [[TMP12]], undef
+; SKX-NEXT: [[TMP14:%.*]] = select i1 [[TMP13]], i32 [[TMP12]], i32 undef
+; SKX-NEXT: [[TMP15:%.*]] = icmp sgt i32 [[TMP14]], undef
+; SKX-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP2]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; SKX-NEXT: [[RDX_MINMAX_CMP:%.*]] = icmp sgt <8 x i32> [[TMP2]], [[RDX_SHUF]]
+; SKX-NEXT: [[RDX_MINMAX_SELECT:%.*]] = select <8 x i1> [[RDX_MINMAX_CMP]], <8 x i32> [[TMP2]], <8 x i32> [[RDX_SHUF]]
+; SKX-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[RDX_MINMAX_SELECT]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; SKX-NEXT: [[RDX_MINMAX_CMP2:%.*]] = icmp sgt <8 x i32> [[RDX_MINMAX_SELECT]], [[RDX_SHUF1]]
+; SKX-NEXT: [[RDX_MINMAX_SELECT3:%.*]] = select <8 x i1> [[RDX_MINMAX_CMP2]], <8 x i32> [[RDX_MINMAX_SELECT]], <8 x i32> [[RDX_SHUF1]]
+; SKX-NEXT: [[RDX_SHUF4:%.*]] = shufflevector <8 x i32> [[RDX_MINMAX_SELECT3]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; SKX-NEXT: [[RDX_MINMAX_CMP5:%.*]] = icmp sgt <8 x i32> [[RDX_MINMAX_SELECT3]], [[RDX_SHUF4]]
+; SKX-NEXT: [[RDX_MINMAX_SELECT6:%.*]] = select <8 x i1> [[RDX_MINMAX_CMP5]], <8 x i32> [[RDX_MINMAX_SELECT3]], <8 x i32> [[RDX_SHUF4]]
+; SKX-NEXT: [[TMP16:%.*]] = extractelement <8 x i32> [[RDX_MINMAX_SELECT6]], i32 0
+; SKX-NEXT: [[TMP17:%.*]] = select i1 [[TMP15]], i32 [[TMP14]], i32 undef
+; SKX-NEXT: ret i32 [[TMP16]]
;
%2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 0), align 16
%3 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 1), align 4
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/reduction_unrolled.ll b/llvm/test/Transforms/SLPVectorizer/X86/reduction_unrolled.ll
index d40515fb709..10fe6d1ceee 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/reduction_unrolled.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/reduction_unrolled.ll
@@ -12,8 +12,8 @@
; Vector cost is 5, Scalar cost is 7
; AVX: Adding cost -2 for reduction that starts with %7 = load i32, i32* %arrayidx.7, align 4 (It is a splitting reduction)
-; Vector cost is 7, Scalar cost is 7
-; SSE: Adding cost 0 for reduction that starts with %7 = load i32, i32* %arrayidx.7, align 4 (It is a splitting reduction)
+; Vector cost is 11, Scalar cost is 7
+; SSE: Adding cost 3 for reduction that starts with %7 = load i32, i32* %arrayidx.7, align 4 (It is a splitting reduction)
define i32 @test_add(i32* nocapture readonly %p) {
; CHECK-LABEL: @test_add(
; CHECK-NEXT: entry:
@@ -136,32 +136,58 @@ entry:
; }
define i32 @test_and(i32* nocapture readonly %p) {
-; CHECK-LABEL: @test_and(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
-; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5
-; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6
-; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to <8 x i32>*
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4
-; CHECK-NEXT: [[MUL_18:%.*]] = and i32 undef, undef
-; CHECK-NEXT: [[MUL_29:%.*]] = and i32 undef, [[MUL_18]]
-; CHECK-NEXT: [[MUL_310:%.*]] = and i32 undef, [[MUL_29]]
-; CHECK-NEXT: [[MUL_411:%.*]] = and i32 undef, [[MUL_310]]
-; CHECK-NEXT: [[MUL_512:%.*]] = and i32 undef, [[MUL_411]]
-; CHECK-NEXT: [[MUL_613:%.*]] = and i32 undef, [[MUL_512]]
-; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX:%.*]] = and <8 x i32> [[TMP1]], [[RDX_SHUF]]
-; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX2:%.*]] = and <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
-; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX4:%.*]] = and <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
-; CHECK-NEXT: [[MUL_714:%.*]] = and i32 undef, [[MUL_613]]
-; CHECK-NEXT: ret i32 [[TMP2]]
+; AVX-LABEL: @test_and(
+; AVX-NEXT: entry:
+; AVX-NEXT: [[TMP0:%.*]] = load i32, i32* [[P:%.*]], align 4
+; AVX-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
+; AVX-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
+; AVX-NEXT: [[MUL_18:%.*]] = and i32 [[TMP1]], [[TMP0]]
+; AVX-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2
+; AVX-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4
+; AVX-NEXT: [[MUL_29:%.*]] = and i32 [[TMP2]], [[MUL_18]]
+; AVX-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3
+; AVX-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4
+; AVX-NEXT: [[MUL_310:%.*]] = and i32 [[TMP3]], [[MUL_29]]
+; AVX-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
+; AVX-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_4]], align 4
+; AVX-NEXT: [[MUL_411:%.*]] = and i32 [[TMP4]], [[MUL_310]]
+; AVX-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5
+; AVX-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_5]], align 4
+; AVX-NEXT: [[MUL_512:%.*]] = and i32 [[TMP5]], [[MUL_411]]
+; AVX-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6
+; AVX-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_6]], align 4
+; AVX-NEXT: [[MUL_613:%.*]] = and i32 [[TMP6]], [[MUL_512]]
+; AVX-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
+; AVX-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX_7]], align 4
+; AVX-NEXT: [[MUL_714:%.*]] = and i32 [[TMP7]], [[MUL_613]]
+; AVX-NEXT: ret i32 [[MUL_714]]
+;
+; SSE-LABEL: @test_and(
+; SSE-NEXT: entry:
+; SSE-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
+; SSE-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2
+; SSE-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3
+; SSE-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
+; SSE-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5
+; SSE-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6
+; SSE-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
+; SSE-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to <8 x i32>*
+; SSE-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4
+; SSE-NEXT: [[MUL_18:%.*]] = and i32 undef, undef
+; SSE-NEXT: [[MUL_29:%.*]] = and i32 undef, [[MUL_18]]
+; SSE-NEXT: [[MUL_310:%.*]] = and i32 undef, [[MUL_29]]
+; SSE-NEXT: [[MUL_411:%.*]] = and i32 undef, [[MUL_310]]
+; SSE-NEXT: [[MUL_512:%.*]] = and i32 undef, [[MUL_411]]
+; SSE-NEXT: [[MUL_613:%.*]] = and i32 undef, [[MUL_512]]
+; SSE-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; SSE-NEXT: [[BIN_RDX:%.*]] = and <8 x i32> [[TMP1]], [[RDX_SHUF]]
+; SSE-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; SSE-NEXT: [[BIN_RDX2:%.*]] = and <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; SSE-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; SSE-NEXT: [[BIN_RDX4:%.*]] = and <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
+; SSE-NEXT: [[TMP2:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
+; SSE-NEXT: [[MUL_714:%.*]] = and i32 undef, [[MUL_613]]
+; SSE-NEXT: ret i32 [[TMP2]]
;
entry:
%0 = load i32, i32* %p, align 4
@@ -197,32 +223,58 @@ entry:
; }
define i32 @test_or(i32* nocapture readonly %p) {
-; CHECK-LABEL: @test_or(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
-; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5
-; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6
-; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to <8 x i32>*
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4
-; CHECK-NEXT: [[MUL_18:%.*]] = or i32 undef, undef
-; CHECK-NEXT: [[MUL_29:%.*]] = or i32 undef, [[MUL_18]]
-; CHECK-NEXT: [[MUL_310:%.*]] = or i32 undef, [[MUL_29]]
-; CHECK-NEXT: [[MUL_411:%.*]] = or i32 undef, [[MUL_310]]
-; CHECK-NEXT: [[MUL_512:%.*]] = or i32 undef, [[MUL_411]]
-; CHECK-NEXT: [[MUL_613:%.*]] = or i32 undef, [[MUL_512]]
-; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX:%.*]] = or <8 x i32> [[TMP1]], [[RDX_SHUF]]
-; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX2:%.*]] = or <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
-; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX4:%.*]] = or <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
-; CHECK-NEXT: [[MUL_714:%.*]] = or i32 undef, [[MUL_613]]
-; CHECK-NEXT: ret i32 [[TMP2]]
+; AVX-LABEL: @test_or(
+; AVX-NEXT: entry:
+; AVX-NEXT: [[TMP0:%.*]] = load i32, i32* [[P:%.*]], align 4
+; AVX-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
+; AVX-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
+; AVX-NEXT: [[MUL_18:%.*]] = or i32 [[TMP1]], [[TMP0]]
+; AVX-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2
+; AVX-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4
+; AVX-NEXT: [[MUL_29:%.*]] = or i32 [[TMP2]], [[MUL_18]]
+; AVX-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3
+; AVX-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4
+; AVX-NEXT: [[MUL_310:%.*]] = or i32 [[TMP3]], [[MUL_29]]
+; AVX-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
+; AVX-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_4]], align 4
+; AVX-NEXT: [[MUL_411:%.*]] = or i32 [[TMP4]], [[MUL_310]]
+; AVX-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5
+; AVX-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_5]], align 4
+; AVX-NEXT: [[MUL_512:%.*]] = or i32 [[TMP5]], [[MUL_411]]
+; AVX-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6
+; AVX-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_6]], align 4
+; AVX-NEXT: [[MUL_613:%.*]] = or i32 [[TMP6]], [[MUL_512]]
+; AVX-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
+; AVX-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX_7]], align 4
+; AVX-NEXT: [[MUL_714:%.*]] = or i32 [[TMP7]], [[MUL_613]]
+; AVX-NEXT: ret i32 [[MUL_714]]
+;
+; SSE-LABEL: @test_or(
+; SSE-NEXT: entry:
+; SSE-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
+; SSE-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2
+; SSE-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3
+; SSE-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
+; SSE-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5
+; SSE-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6
+; SSE-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
+; SSE-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to <8 x i32>*
+; SSE-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4
+; SSE-NEXT: [[MUL_18:%.*]] = or i32 undef, undef
+; SSE-NEXT: [[MUL_29:%.*]] = or i32 undef, [[MUL_18]]
+; SSE-NEXT: [[MUL_310:%.*]] = or i32 undef, [[MUL_29]]
+; SSE-NEXT: [[MUL_411:%.*]] = or i32 undef, [[MUL_310]]
+; SSE-NEXT: [[MUL_512:%.*]] = or i32 undef, [[MUL_411]]
+; SSE-NEXT: [[MUL_613:%.*]] = or i32 undef, [[MUL_512]]
+; SSE-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; SSE-NEXT: [[BIN_RDX:%.*]] = or <8 x i32> [[TMP1]], [[RDX_SHUF]]
+; SSE-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; SSE-NEXT: [[BIN_RDX2:%.*]] = or <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; SSE-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; SSE-NEXT: [[BIN_RDX4:%.*]] = or <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
+; SSE-NEXT: [[TMP2:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
+; SSE-NEXT: [[MUL_714:%.*]] = or i32 undef, [[MUL_613]]
+; SSE-NEXT: ret i32 [[TMP2]]
;
entry:
%0 = load i32, i32* %p, align 4
@@ -258,32 +310,58 @@ entry:
; }
define i32 @test_xor(i32* nocapture readonly %p) {
-; CHECK-LABEL: @test_xor(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
-; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2
-; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3
-; CHECK-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
-; CHECK-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5
-; CHECK-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6
-; CHECK-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to <8 x i32>*
-; CHECK-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4
-; CHECK-NEXT: [[MUL_18:%.*]] = xor i32 undef, undef
-; CHECK-NEXT: [[MUL_29:%.*]] = xor i32 undef, [[MUL_18]]
-; CHECK-NEXT: [[MUL_310:%.*]] = xor i32 undef, [[MUL_29]]
-; CHECK-NEXT: [[MUL_411:%.*]] = xor i32 undef, [[MUL_310]]
-; CHECK-NEXT: [[MUL_512:%.*]] = xor i32 undef, [[MUL_411]]
-; CHECK-NEXT: [[MUL_613:%.*]] = xor i32 undef, [[MUL_512]]
-; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX:%.*]] = xor <8 x i32> [[TMP1]], [[RDX_SHUF]]
-; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX2:%.*]] = xor <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
-; CHECK-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX4:%.*]] = xor <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
-; CHECK-NEXT: [[TMP2:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
-; CHECK-NEXT: [[MUL_714:%.*]] = xor i32 undef, [[MUL_613]]
-; CHECK-NEXT: ret i32 [[TMP2]]
+; AVX-LABEL: @test_xor(
+; AVX-NEXT: entry:
+; AVX-NEXT: [[TMP0:%.*]] = load i32, i32* [[P:%.*]], align 4
+; AVX-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 1
+; AVX-NEXT: [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
+; AVX-NEXT: [[MUL_18:%.*]] = xor i32 [[TMP1]], [[TMP0]]
+; AVX-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2
+; AVX-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4
+; AVX-NEXT: [[MUL_29:%.*]] = xor i32 [[TMP2]], [[MUL_18]]
+; AVX-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3
+; AVX-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4
+; AVX-NEXT: [[MUL_310:%.*]] = xor i32 [[TMP3]], [[MUL_29]]
+; AVX-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
+; AVX-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_4]], align 4
+; AVX-NEXT: [[MUL_411:%.*]] = xor i32 [[TMP4]], [[MUL_310]]
+; AVX-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5
+; AVX-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX_5]], align 4
+; AVX-NEXT: [[MUL_512:%.*]] = xor i32 [[TMP5]], [[MUL_411]]
+; AVX-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6
+; AVX-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_6]], align 4
+; AVX-NEXT: [[MUL_613:%.*]] = xor i32 [[TMP6]], [[MUL_512]]
+; AVX-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
+; AVX-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX_7]], align 4
+; AVX-NEXT: [[MUL_714:%.*]] = xor i32 [[TMP7]], [[MUL_613]]
+; AVX-NEXT: ret i32 [[MUL_714]]
+;
+; SSE-LABEL: @test_xor(
+; SSE-NEXT: entry:
+; SSE-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[P:%.*]], i64 1
+; SSE-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 2
+; SSE-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 3
+; SSE-NEXT: [[ARRAYIDX_4:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 4
+; SSE-NEXT: [[ARRAYIDX_5:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 5
+; SSE-NEXT: [[ARRAYIDX_6:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 6
+; SSE-NEXT: [[ARRAYIDX_7:%.*]] = getelementptr inbounds i32, i32* [[P]], i64 7
+; SSE-NEXT: [[TMP0:%.*]] = bitcast i32* [[P]] to <8 x i32>*
+; SSE-NEXT: [[TMP1:%.*]] = load <8 x i32>, <8 x i32>* [[TMP0]], align 4
+; SSE-NEXT: [[MUL_18:%.*]] = xor i32 undef, undef
+; SSE-NEXT: [[MUL_29:%.*]] = xor i32 undef, [[MUL_18]]
+; SSE-NEXT: [[MUL_310:%.*]] = xor i32 undef, [[MUL_29]]
+; SSE-NEXT: [[MUL_411:%.*]] = xor i32 undef, [[MUL_310]]
+; SSE-NEXT: [[MUL_512:%.*]] = xor i32 undef, [[MUL_411]]
+; SSE-NEXT: [[MUL_613:%.*]] = xor i32 undef, [[MUL_512]]
+; SSE-NEXT: [[RDX_SHUF:%.*]] = shufflevector <8 x i32> [[TMP1]], <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+; SSE-NEXT: [[BIN_RDX:%.*]] = xor <8 x i32> [[TMP1]], [[RDX_SHUF]]
+; SSE-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <8 x i32> [[BIN_RDX]], <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; SSE-NEXT: [[BIN_RDX2:%.*]] = xor <8 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
+; SSE-NEXT: [[RDX_SHUF3:%.*]] = shufflevector <8 x i32> [[BIN_RDX2]], <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; SSE-NEXT: [[BIN_RDX4:%.*]] = xor <8 x i32> [[BIN_RDX2]], [[RDX_SHUF3]]
+; SSE-NEXT: [[TMP2:%.*]] = extractelement <8 x i32> [[BIN_RDX4]], i32 0
+; SSE-NEXT: [[MUL_714:%.*]] = xor i32 undef, [[MUL_613]]
+; SSE-NEXT: ret i32 [[TMP2]]
;
entry:
%0 = load i32, i32* %p, align 4
@@ -322,15 +400,14 @@ define i32 @PR37731(<4 x i32>* noalias nocapture dereferenceable(16) %self) unna
; CHECK-NEXT: [[TMP5:%.*]] = shl <4 x i32> [[TMP4]], <i32 18, i32 2, i32 7, i32 13>
; CHECK-NEXT: [[TMP6:%.*]] = xor <4 x i32> [[TMP3]], [[TMP5]]
; CHECK-NEXT: store <4 x i32> [[TMP6]], <4 x i32>* [[SELF]], align 16
-; CHECK-NEXT: [[TMP7:%.*]] = xor i32 undef, undef
-; CHECK-NEXT: [[TMP8:%.*]] = xor i32 [[TMP7]], undef
-; CHECK-NEXT: [[RDX_SHUF:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX:%.*]] = xor <4 x i32> [[TMP6]], [[RDX_SHUF]]
-; CHECK-NEXT: [[RDX_SHUF1:%.*]] = shufflevector <4 x i32> [[BIN_RDX]], <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
-; CHECK-NEXT: [[BIN_RDX2:%.*]] = xor <4 x i32> [[BIN_RDX]], [[RDX_SHUF1]]
-; CHECK-NEXT: [[TMP9:%.*]] = extractelement <4 x i32> [[BIN_RDX2]], i32 0
-; CHECK-NEXT: [[TMP10:%.*]] = xor i32 [[TMP8]], undef
-; CHECK-NEXT: ret i32 [[TMP9]]
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i32> [[TMP6]], i32 0
+; CHECK-NEXT: [[TMP8:%.*]] = extractelement <4 x i32> [[TMP6]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = xor i32 [[TMP7]], [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = extractelement <4 x i32> [[TMP6]], i32 2
+; CHECK-NEXT: [[TMP11:%.*]] = xor i32 [[TMP9]], [[TMP10]]
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <4 x i32> [[TMP6]], i32 3
+; CHECK-NEXT: [[TMP13:%.*]] = xor i32 [[TMP11]], [[TMP12]]
+; CHECK-NEXT: ret i32 [[TMP13]]
;
entry:
%0 = load <4 x i32>, <4 x i32>* %self, align 16
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll b/llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll
index 96c8d7f3b68..e108c4960c0 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/remark_horcost.ll
@@ -56,28 +56,19 @@ for.body: ; preds = %for.body, %entry
%add52 = add nsw i32 %add38, %add45
; CHECK: add nsw <{{[0-9]+}} x i32>
- ; CHECK: add nsw <{{[0-9]+}} x i32>
+ ; CHECK-NOT: add nsw <{{[0-9]+}} x i32>
- ; YAML: --- !Passed
+ ; YAML: --- !Missed
; YAML-NEXT: Pass: slp-vectorizer
- ; YAML-NEXT: Name: StoresVectorized
+ ; YAML-NEXT: Name: HorSLPNotBeneficial
; YAML-NEXT: Function: foo
; YAML-NEXT: Args:
- ; YAML-NEXT: - String: 'Stores SLP vectorized with cost '
- ; YAML-NEXT: - Cost: '-8'
- ; YAML-NEXT: - String: ' and with tree size '
- ; YAML-NEXT: - TreeSize: '4'
+ ; YAML-NEXT: - String: Vectorizing horizontal reduction is possible
+ ; YAML-NEXT: - String: 'but not beneficial with cost '
+ ; YAML-NEXT: - Cost: '1'
+ ; YAML-NEXT: - String: ' and threshold '
+ ; YAML-NEXT: - Threshold: '0'
- ; YAML: --- !Passed
- ; YAML-NEXT: Pass: slp-vectorizer
- ; YAML-NEXT: Name: VectorizedHorizontalReduction
- ; YAML-NEXT: Function: foo
- ; YAML-NEXT: Args:
- ; YAML-NEXT: - String: 'Vectorized horizontal reduction with cost '
- ; YAML-NEXT: - Cost: '-2'
- ; YAML-NEXT: - String: ' and with tree size '
- ; YAML-NEXT: - TreeSize: '1'
-
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%exitcond = icmp eq i64 %indvars.iv.next, 8
br i1 %exitcond, label %for.end, label %for.body
OpenPOWER on IntegriCloud