diff options
author | Craig Topper <craig.topper@intel.com> | 2018-11-19 00:33:16 +0000 |
---|---|---|
committer | Craig Topper <craig.topper@intel.com> | 2018-11-19 00:33:16 +0000 |
commit | 053f1eea96eaa8a0a4bb034274fa485655323d32 (patch) | |
tree | 627689304bdd17e3c1e2f0fcf0dc3eb0910f72f5 | |
parent | a7b204b44f8a8bd0d6adbb1a459fc5ec29d99d8d (diff) | |
download | bcm5719-llvm-053f1eea96eaa8a0a4bb034274fa485655323d32.tar.gz bcm5719-llvm-053f1eea96eaa8a0a4bb034274fa485655323d32.zip |
[X86] Remove most of the SEXTLOAD Custom setOperationAction calls under -x86-experimental-vector-widening-legalization.
Leave just the v4i8->v4i64 and v8i8->v8i64, but only enable them on pre-sse4.1 targets when 64-bit mode is enabled. In those cases we end up creating sext loads that get scalarized to code that looks better than what we get from loading into a vector register and doing a multiple step sign extend using unpacks and shifts.
llvm-svn: 347180
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 11 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/vec_int_to_fp-widen.ll | 38 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/vector-sext-widen.ll | 280 |
3 files changed, 118 insertions, 211 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 8c3d0d7a42d..a8859be9fb1 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -866,14 +866,11 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM, setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom); } - if (ExperimentalVectorWideningLegalization) { - // Explicitly code the list so we don't use narrow result types. - setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Custom); - setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Custom); + if (ExperimentalVectorWideningLegalization && + !Subtarget.hasSSE41() && Subtarget.is64Bit()) { + // This lets DAG combine create sextloads that get split and scalarized. + // TODO: Does this make sense? What about v2i8->v2i64? setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Custom); - setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Custom); - setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Custom); - setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Custom); setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i8, Custom); } diff --git a/llvm/test/CodeGen/X86/vec_int_to_fp-widen.ll b/llvm/test/CodeGen/X86/vec_int_to_fp-widen.ll index 03b2a3db49a..7441ce58027 100644 --- a/llvm/test/CodeGen/X86/vec_int_to_fp-widen.ll +++ b/llvm/test/CodeGen/X86/vec_int_to_fp-widen.ll @@ -4282,13 +4282,12 @@ define <8 x float> @sitofp_load_8i32_to_8f32(<8 x i32> *%a) { define <8 x float> @sitofp_load_8i16_to_8f32(<8 x i16> *%a) { ; SSE2-LABEL: sitofp_load_8i16_to_8f32: ; SSE2: # %bb.0: -; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: psrad $16, %xmm1 -; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: movdqa (%rdi), %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE2-NEXT: psrad $16, %xmm0 ; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1 ; SSE2-NEXT: retq ; @@ -4327,15 +4326,13 @@ define <8 x float> @sitofp_load_8i16_to_8f32(<8 x i16> *%a) { define <8 x float> @sitofp_load_8i8_to_8f32(<8 x i8> *%a) { ; SSE2-LABEL: sitofp_load_8i8_to_8f32: ; SSE2: # %bb.0: -; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] -; SSE2-NEXT: psrad $24, %xmm1 -; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero +; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE2-NEXT: psrad $24, %xmm0 ; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] +; SSE2-NEXT: psrad $24, %xmm1 ; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1 ; SSE2-NEXT: retq ; @@ -4349,10 +4346,8 @@ define <8 x float> @sitofp_load_8i8_to_8f32(<8 x i8> *%a) { ; ; AVX1-LABEL: sitofp_load_8i8_to_8f32: ; AVX1: # %bb.0: -; AVX1-NEXT: vpmovsxbw (%rdi), %xmm0 -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxbd 4(%rdi), %xmm0 +; AVX1-NEXT: vpmovsxbd (%rdi), %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -5439,14 +5434,13 @@ define <8 x float> @uitofp_load_8i8_to_8f32(<8 x i8> *%a) { define void @aggregate_sitofp_8i16_to_8f32(%Arguments* nocapture readonly %a0) { ; SSE2-LABEL: aggregate_sitofp_8i16_to_8f32: ; SSE2: # %bb.0: -; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] -; SSE2-NEXT: psrad $16, %xmm0 -; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] -; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: movq 24(%rdi), %rax +; SSE2-NEXT: movdqu 8(%rdi), %xmm0 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7] +; SSE2-NEXT: psrad $16, %xmm0 ; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0 ; SSE2-NEXT: movaps %xmm0, 16(%rax) ; SSE2-NEXT: movaps %xmm1, (%rax) diff --git a/llvm/test/CodeGen/X86/vector-sext-widen.ll b/llvm/test/CodeGen/X86/vector-sext-widen.ll index dc7db144c73..6d245a223ea 100644 --- a/llvm/test/CodeGen/X86/vector-sext-widen.ll +++ b/llvm/test/CodeGen/X86/vector-sext-widen.ll @@ -1958,10 +1958,8 @@ define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) { ; ; AVX1-LABEL: load_sext_4i8_to_4i64: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vpmovsxbd (%rdi), %xmm0 -; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxbq 2(%rdi), %xmm0 +; AVX1-NEXT: vpmovsxbq (%rdi), %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; @@ -1978,28 +1976,15 @@ define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) { ; X32-SSE2-LABEL: load_sext_4i8_to_4i64: ; X32-SSE2: # %bb.0: # %entry ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE2-NEXT: movsbl 1(%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm1 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm0 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; X32-SSE2-NEXT: movsbl (%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm0 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm2 +; X32-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; X32-SSE2-NEXT: psrad $24, %xmm1 +; X32-SSE2-NEXT: movdqa %xmm1, %xmm2 +; X32-SSE2-NEXT: psrad $31, %xmm2 +; X32-SSE2-NEXT: movdqa %xmm1, %xmm0 ; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; X32-SSE2-NEXT: movsbl 3(%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm2 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm1 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; X32-SSE2-NEXT: movsbl 2(%eax), %eax -; X32-SSE2-NEXT: movd %eax, %xmm1 -; X32-SSE2-NEXT: sarl $31, %eax -; X32-SSE2-NEXT: movd %eax, %xmm3 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; X32-SSE2-NEXT: retl ; ; X32-SSE41-LABEL: load_sext_4i8_to_4i64: @@ -2040,9 +2025,7 @@ define <2 x i64> @load_sext_4i8_to_4i64_extract(<4 x i8> *%ptr) { ; ; AVX1-LABEL: load_sext_4i8_to_4i64_extract: ; AVX1: # %bb.0: -; AVX1-NEXT: vpmovsxbd (%rdi), %xmm0 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxbq 2(%rdi), %xmm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: load_sext_4i8_to_4i64_extract: @@ -2062,17 +2045,13 @@ define <2 x i64> @load_sext_4i8_to_4i64_extract(<4 x i8> *%ptr) { ; X32-SSE2-LABEL: load_sext_4i8_to_4i64_extract: ; X32-SSE2: # %bb.0: ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE2-NEXT: movsbl 3(%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm1 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm0 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; X32-SSE2-NEXT: movsbl 2(%eax), %eax -; X32-SSE2-NEXT: movd %eax, %xmm0 -; X32-SSE2-NEXT: sarl $31, %eax -; X32-SSE2-NEXT: movd %eax, %xmm2 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; X32-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; X32-SSE2-NEXT: psrad $24, %xmm0 +; X32-SSE2-NEXT: movdqa %xmm0, %xmm1 +; X32-SSE2-NEXT: psrad $31, %xmm1 +; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; X32-SSE2-NEXT: retl ; ; X32-SSE41-LABEL: load_sext_4i8_to_4i64_extract: @@ -2482,15 +2461,11 @@ define <8 x i64> @load_sext_8i8_to_8i64(<8 x i8> *%ptr) { ; ; AVX1-LABEL: load_sext_8i8_to_8i64: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vpmovsxbd (%rdi), %xmm0 -; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 -; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; AVX1-NEXT: vpmovsxbd 4(%rdi), %xmm1 -; AVX1-NEXT: vpmovsxdq %xmm1, %xmm2 -; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] -; AVX1-NEXT: vpmovsxdq %xmm1, %xmm1 +; AVX1-NEXT: vpmovsxbq 6(%rdi), %xmm1 +; AVX1-NEXT: vpmovsxbq 4(%rdi), %xmm2 +; AVX1-NEXT: vpmovsxbq 2(%rdi), %xmm0 +; AVX1-NEXT: vpmovsxbq (%rdi), %xmm3 +; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-NEXT: retq ; @@ -2508,50 +2483,22 @@ define <8 x i64> @load_sext_8i8_to_8i64(<8 x i8> *%ptr) { ; X32-SSE2-LABEL: load_sext_8i8_to_8i64: ; X32-SSE2: # %bb.0: # %entry ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE2-NEXT: movsbl 1(%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm1 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm0 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; X32-SSE2-NEXT: movsbl (%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm0 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm2 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; X32-SSE2-NEXT: movsbl 3(%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm2 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm1 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; X32-SSE2-NEXT: movsbl 2(%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm1 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm3 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] -; X32-SSE2-NEXT: movsbl 5(%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm3 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm2 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] -; X32-SSE2-NEXT: movsbl 4(%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm2 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm4 +; X32-SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero +; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3] +; X32-SSE2-NEXT: psrad $24, %xmm1 +; X32-SSE2-NEXT: movdqa %xmm1, %xmm3 +; X32-SSE2-NEXT: psrad $31, %xmm3 +; X32-SSE2-NEXT: movdqa %xmm1, %xmm0 +; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1] +; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3] +; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] +; X32-SSE2-NEXT: psrad $24, %xmm3 +; X32-SSE2-NEXT: movdqa %xmm3, %xmm4 +; X32-SSE2-NEXT: psrad $31, %xmm4 +; X32-SSE2-NEXT: movdqa %xmm3, %xmm2 ; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1] -; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; X32-SSE2-NEXT: movsbl 7(%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm4 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm3 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1] -; X32-SSE2-NEXT: movsbl 6(%eax), %eax -; X32-SSE2-NEXT: movd %eax, %xmm3 -; X32-SSE2-NEXT: sarl $31, %eax -; X32-SSE2-NEXT: movd %eax, %xmm5 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] -; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0] +; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm4[2],xmm3[3],xmm4[3] ; X32-SSE2-NEXT: retl ; ; X32-SSE41-LABEL: load_sext_8i8_to_8i64: @@ -2885,25 +2832,21 @@ entry: define <8 x i32> @load_sext_8i8_to_8i32(<8 x i8> *%ptr) { ; SSE2-LABEL: load_sext_8i8_to_8i32: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] -; SSE2-NEXT: psrad $24, %xmm0 -; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-NEXT: psrad $24, %xmm0 +; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] ; SSE2-NEXT: psrad $24, %xmm1 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: load_sext_8i8_to_8i32: ; SSSE3: # %bb.0: # %entry -; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] -; SSSE3-NEXT: psrad $24, %xmm0 -; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSSE3-NEXT: psrad $24, %xmm0 +; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] ; SSSE3-NEXT: psrad $24, %xmm1 ; SSSE3-NEXT: retq ; @@ -2915,10 +2858,8 @@ define <8 x i32> @load_sext_8i8_to_8i32(<8 x i8> *%ptr) { ; ; AVX1-LABEL: load_sext_8i8_to_8i32: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vpmovsxbw (%rdi), %xmm0 -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxbd 4(%rdi), %xmm0 +; AVX1-NEXT: vpmovsxbd (%rdi), %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; @@ -2935,13 +2876,11 @@ define <8 x i32> @load_sext_8i8_to_8i32(<8 x i8> *%ptr) { ; X32-SSE2-LABEL: load_sext_8i8_to_8i32: ; X32-SSE2: # %bb.0: # %entry ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] -; X32-SSE2-NEXT: psrad $24, %xmm0 -; X32-SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X32-SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] -; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] +; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; X32-SSE2-NEXT: psrad $24, %xmm0 +; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] ; X32-SSE2-NEXT: psrad $24, %xmm1 ; X32-SSE2-NEXT: retl ; @@ -5230,21 +5169,21 @@ entry: define <16 x i16> @load_sext_16i8_to_16i16(<16 x i8> *%ptr) { ; SSE2-LABEL: load_sext_16i8_to_16i16: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: movdqa (%rdi), %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE2-NEXT: psraw $8, %xmm0 -; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE2-NEXT: psraw $8, %xmm1 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: load_sext_16i8_to_16i16: ; SSSE3: # %bb.0: # %entry -; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSSE3-NEXT: movdqa (%rdi), %xmm1 +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSSE3-NEXT: psraw $8, %xmm0 -; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSSE3-NEXT: psraw $8, %xmm1 ; SSSE3-NEXT: retq ; @@ -5274,11 +5213,11 @@ define <16 x i16> @load_sext_16i8_to_16i16(<16 x i8> *%ptr) { ; X32-SSE2-LABEL: load_sext_16i8_to_16i16: ; X32-SSE2: # %bb.0: # %entry ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; X32-SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; X32-SSE2-NEXT: movdqa (%eax), %xmm1 +; X32-SSE2-NEXT: movdqa %xmm1, %xmm0 +; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; X32-SSE2-NEXT: psraw $8, %xmm0 -; X32-SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] +; X32-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; X32-SSE2-NEXT: psraw $8, %xmm1 ; X32-SSE2-NEXT: retl ; @@ -5394,30 +5333,26 @@ entry: define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) { ; SSE2-LABEL: load_sext_4i16_to_4i64: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movswq 2(%rdi), %rax -; SSE2-NEXT: movq %rax, %xmm1 -; SSE2-NEXT: movswq (%rdi), %rax -; SSE2-NEXT: movq %rax, %xmm0 -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-NEXT: movswq 6(%rdi), %rax -; SSE2-NEXT: movq %rax, %xmm2 -; SSE2-NEXT: movswq 4(%rdi), %rax -; SSE2-NEXT: movq %rax, %xmm1 -; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSE2-NEXT: psrad $16, %xmm1 +; SSE2-NEXT: movdqa %xmm1, %xmm2 +; SSE2-NEXT: psrad $31, %xmm2 +; SSE2-NEXT: movdqa %xmm1, %xmm0 +; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; SSE2-NEXT: retq ; ; SSSE3-LABEL: load_sext_4i16_to_4i64: ; SSSE3: # %bb.0: # %entry -; SSSE3-NEXT: movswq 2(%rdi), %rax -; SSSE3-NEXT: movq %rax, %xmm1 -; SSSE3-NEXT: movswq (%rdi), %rax -; SSSE3-NEXT: movq %rax, %xmm0 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSSE3-NEXT: movswq 6(%rdi), %rax -; SSSE3-NEXT: movq %rax, %xmm2 -; SSSE3-NEXT: movswq 4(%rdi), %rax -; SSSE3-NEXT: movq %rax, %xmm1 -; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; SSSE3-NEXT: psrad $16, %xmm1 +; SSSE3-NEXT: movdqa %xmm1, %xmm2 +; SSSE3-NEXT: psrad $31, %xmm2 +; SSSE3-NEXT: movdqa %xmm1, %xmm0 +; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] +; SSSE3-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; SSSE3-NEXT: retq ; ; SSE41-LABEL: load_sext_4i16_to_4i64: @@ -5428,10 +5363,8 @@ define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) { ; ; AVX1-LABEL: load_sext_4i16_to_4i64: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vpmovsxwd (%rdi), %xmm0 -; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1 -; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] -; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0 +; AVX1-NEXT: vpmovsxwq 4(%rdi), %xmm0 +; AVX1-NEXT: vpmovsxwq (%rdi), %xmm1 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 ; AVX1-NEXT: retq ; @@ -5448,28 +5381,14 @@ define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) { ; X32-SSE2-LABEL: load_sext_4i16_to_4i64: ; X32-SSE2: # %bb.0: # %entry ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE2-NEXT: movswl 2(%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm1 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm0 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] -; X32-SSE2-NEXT: movswl (%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm0 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm2 +; X32-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero +; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3] +; X32-SSE2-NEXT: psrad $16, %xmm1 +; X32-SSE2-NEXT: movdqa %xmm1, %xmm2 +; X32-SSE2-NEXT: psrad $31, %xmm2 +; X32-SSE2-NEXT: movdqa %xmm1, %xmm0 ; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; X32-SSE2-NEXT: movswl 6(%eax), %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm2 -; X32-SSE2-NEXT: sarl $31, %ecx -; X32-SSE2-NEXT: movd %ecx, %xmm1 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; X32-SSE2-NEXT: movswl 4(%eax), %eax -; X32-SSE2-NEXT: movd %eax, %xmm1 -; X32-SSE2-NEXT: sarl $31, %eax -; X32-SSE2-NEXT: movd %eax, %xmm3 -; X32-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] -; X32-SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0] +; X32-SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3] ; X32-SSE2-NEXT: retl ; ; X32-SSE41-LABEL: load_sext_4i16_to_4i64: @@ -5487,21 +5406,19 @@ entry: define <8 x i32> @load_sext_8i16_to_8i32(<8 x i16> *%ptr) { ; SSE2-LABEL: load_sext_8i16_to_8i32: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSE2-NEXT: movdqa (%rdi), %xmm1 +; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE2-NEXT: psrad $16, %xmm0 -; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] +; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] ; SSE2-NEXT: psrad $16, %xmm1 ; SSE2-NEXT: retq ; ; SSSE3-LABEL: load_sext_8i16_to_8i32: ; SSSE3: # %bb.0: # %entry -; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; SSSE3-NEXT: movdqa (%rdi), %xmm1 +; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSSE3-NEXT: psrad $16, %xmm0 -; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] +; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] ; SSSE3-NEXT: psrad $16, %xmm1 ; SSSE3-NEXT: retq ; @@ -5531,11 +5448,10 @@ define <8 x i32> @load_sext_8i16_to_8i32(<8 x i16> *%ptr) { ; X32-SSE2-LABEL: load_sext_8i16_to_8i32: ; X32-SSE2: # %bb.0: # %entry ; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero -; X32-SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero -; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] +; X32-SSE2-NEXT: movdqa (%eax), %xmm1 +; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; X32-SSE2-NEXT: psrad $16, %xmm0 -; X32-SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] +; X32-SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] ; X32-SSE2-NEXT: psrad $16, %xmm1 ; X32-SSE2-NEXT: retl ; |