summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2017-06-11 21:18:58 +0000
committerSanjay Patel <spatel@rotateright.com>2017-06-11 21:18:58 +0000
commitdcbfbb11d985caa30fcd782ac13267d41058f256 (patch)
tree98bfcf2872d8f0ac5753153207fb38e85a6b31de
parent7ed6cd32ea702971e38a28b865727a016b727c3f (diff)
downloadbcm5719-llvm-dcbfbb11d985caa30fcd782ac13267d41058f256.tar.gz
bcm5719-llvm-dcbfbb11d985caa30fcd782ac13267d41058f256.zip
[x86] use vperm2f128 rather than vinsertf128 when there's a chance to fold a 32-byte load
I was looking closer at the x86 test diffs in D33866, and the first change seems like it shouldn't happen in the first place. So this patch will resolve that. Using Agner's tables and AMD docs, vperm2f128 and vinsertf128 have identical timing for any given CPU model, so we should be able to interchange those without affecting perf. But as we can see in some of the diffs here, using vperm2f128 allows load folding, so we should take that opportunity to reduce code size and register pressure. A secondary advantage is making AVX1 and AVX2 codegen more similar. Given that vperm2f128 was introduced with AVX1, we should be selecting it in all of the same situations that we would with AVX2. If there's some reason that an AVX1 CPU would not want to use this instruction, that should be fixed up in a later pass. Differential Revision: https://reviews.llvm.org/D33938 llvm-svn: 305171
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp22
-rw-r--r--llvm/test/CodeGen/X86/avx-vperm2x128.ll20
-rw-r--r--llvm/test/CodeGen/X86/x86-interleaved-access.ll44
3 files changed, 36 insertions, 50 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 831e9bdab0e..f56b1e68cc9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -12007,18 +12007,22 @@ static SDValue lowerV2X128VectorShuffle(const SDLoc &DL, MVT VT, SDValue V1,
// subvector.
bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
- // With AVX2 we should use VPERMQ/VPERMPD to allow memory folding.
+ // With AVX2, use VPERMQ/VPERMPD to allow memory folding.
if (Subtarget.hasAVX2() && V2.isUndef())
return SDValue();
- MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
- VT.getVectorNumElements() / 2);
- SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
- DAG.getIntPtrConstant(0, DL));
- SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
- OnlyUsesV1 ? V1 : V2,
- DAG.getIntPtrConstant(0, DL));
- return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
+ // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
+ // this will likely become vinsertf128 which can't fold a 256-bit memop.
+ if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
+ MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
+ VT.getVectorNumElements() / 2);
+ SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
+ DAG.getIntPtrConstant(0, DL));
+ SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
+ OnlyUsesV1 ? V1 : V2,
+ DAG.getIntPtrConstant(0, DL));
+ return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
+ }
}
}
diff --git a/llvm/test/CodeGen/X86/avx-vperm2x128.ll b/llvm/test/CodeGen/X86/avx-vperm2x128.ll
index f4a77c370db..9a21f4b5cab 100644
--- a/llvm/test/CodeGen/X86/avx-vperm2x128.ll
+++ b/llvm/test/CodeGen/X86/avx-vperm2x128.ll
@@ -50,16 +50,10 @@ entry:
}
define <8 x float> @shuffle_v8f32_01230123_mem(<8 x float>* %pa, <8 x float>* %pb) nounwind uwtable readnone ssp {
-; AVX1-LABEL: shuffle_v8f32_01230123_mem:
-; AVX1: ## BB#0: ## %entry
-; AVX1-NEXT: vmovaps (%rdi), %ymm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: shuffle_v8f32_01230123_mem:
-; AVX2: ## BB#0: ## %entry
-; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[0,1,0,1]
-; AVX2-NEXT: retq
+; ALL-LABEL: shuffle_v8f32_01230123_mem:
+; ALL: ## BB#0: ## %entry
+; ALL-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[0,1,0,1]
+; ALL-NEXT: retq
entry:
%a = load <8 x float>, <8 x float>* %pa
%b = load <8 x float>, <8 x float>* %pb
@@ -195,17 +189,15 @@ define <16 x i16> @shuffle_v16i16_4501_mem(<16 x i16>* %a, <16 x i16>* %b) nounw
; AVX1-LABEL: shuffle_v16i16_4501_mem:
; AVX1: ## BB#0: ## %entry
; AVX1-NEXT: vmovdqa (%rdi), %ymm0
-; AVX1-NEXT: vmovaps (%rsi), %ymm1
; AVX1-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[0,1],ymm0[0,1]
; AVX1-NEXT: retq
;
; AVX2-LABEL: shuffle_v16i16_4501_mem:
; AVX2: ## BB#0: ## %entry
; AVX2-NEXT: vmovdqa (%rdi), %ymm0
-; AVX2-NEXT: vmovdqa (%rsi), %ymm1
; AVX2-NEXT: vpaddw {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = mem[0,1],ymm0[0,1]
; AVX2-NEXT: retq
entry:
%c = load <16 x i16>, <16 x i16>* %a
diff --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
index 74214aa1b8b..ec8bce1b43c 100644
--- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
@@ -9,8 +9,8 @@ define <4 x double> @load_factorf64_4(<16 x double>* %ptr) {
; AVX-NEXT: vmovupd 32(%rdi), %ymm1
; AVX-NEXT: vmovupd 64(%rdi), %ymm2
; AVX-NEXT: vmovupd 96(%rdi), %ymm3
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
-; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
; AVX-NEXT: vhaddpd %ymm5, %ymm4, %ymm4
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
@@ -37,8 +37,8 @@ define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
; AVX-NEXT: vmovupd 32(%rdi), %ymm1
; AVX-NEXT: vmovupd 64(%rdi), %ymm2
; AVX-NEXT: vmovupd 96(%rdi), %ymm3
-; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
-; AVX-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
; AVX-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
@@ -53,25 +53,15 @@ define <4 x double> @load_factorf64_2(<16 x double>* %ptr) {
}
define <4 x double> @load_factorf64_1(<16 x double>* %ptr) {
-; AVX1-LABEL: load_factorf64_1:
-; AVX1: # BB#0:
-; AVX1-NEXT: vmovups (%rdi), %ymm0
-; AVX1-NEXT: vmovups 32(%rdi), %ymm1
-; AVX1-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
-; AVX1-NEXT: vinsertf128 $1, 96(%rdi), %ymm1, %ymm1
-; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX1-NEXT: vmulpd %ymm0, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: load_factorf64_1:
-; AVX2: # BB#0:
-; AVX2-NEXT: vmovupd (%rdi), %ymm0
-; AVX2-NEXT: vmovupd 32(%rdi), %ymm1
-; AVX2-NEXT: vinsertf128 $1, 64(%rdi), %ymm0, %ymm0
-; AVX2-NEXT: vinsertf128 $1, 96(%rdi), %ymm1, %ymm1
-; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
-; AVX2-NEXT: vmulpd %ymm0, %ymm0, %ymm0
-; AVX2-NEXT: retq
+; AVX-LABEL: load_factorf64_1:
+; AVX: # BB#0:
+; AVX-NEXT: vmovupd (%rdi), %ymm0
+; AVX-NEXT: vmovupd 32(%rdi), %ymm1
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[0,1],mem[0,1]
+; AVX-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[0,1],mem[0,1]
+; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
+; AVX-NEXT: vmulpd %ymm0, %ymm0, %ymm0
+; AVX-NEXT: retq
%wide.vec = load <16 x double>, <16 x double>* %ptr, align 16
%strided.v0 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
%strided.v3 = shufflevector <16 x double> %wide.vec, <16 x double> undef, <4 x i32> <i32 0, i32 4, i32 8, i32 12>
@@ -86,8 +76,8 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
; AVX1-NEXT: vmovupd 32(%rdi), %ymm1
; AVX1-NEXT: vmovupd 64(%rdi), %ymm2
; AVX1-NEXT: vmovupd 96(%rdi), %ymm3
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm4
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm5
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
+; AVX1-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
@@ -113,8 +103,8 @@ define <4 x i64> @load_factori64_4(<16 x i64>* %ptr) {
; AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
; AVX2-NEXT: vmovdqu 64(%rdi), %ymm2
; AVX2-NEXT: vmovdqu 96(%rdi), %ymm3
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm4
-; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm1, %ymm5
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm0[0,1],ymm2[0,1]
+; AVX2-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm1[0,1],ymm3[0,1]
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm2[2,3]
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm3[2,3]
; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm2 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
OpenPOWER on IntegriCloud