diff options
-rw-r--r-- | llvm/test/CodeGen/X86/pmulh.ll | 126 |
1 files changed, 83 insertions, 43 deletions
diff --git a/llvm/test/CodeGen/X86/pmulh.ll b/llvm/test/CodeGen/X86/pmulh.ll index 9fe8a66b98d..f86b71ff8cb 100644 --- a/llvm/test/CodeGen/X86/pmulh.ll +++ b/llvm/test/CodeGen/X86/pmulh.ll @@ -1,32 +1,53 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE --check-prefix=SSE2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=SSE --check-prefix=SSE2 --check-prefix=SSE2-PROMOTE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=SSE --check-prefix=SSE2 --check-prefix=SSE2-WIDEN +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41 --check-prefix=SSE41-PROMOTE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 -x86-experimental-vector-widening-legalization | FileCheck %s --check-prefix=SSE --check-prefix=SSE41 --check-prefix=SSE41-WIDEN ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512F ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512BW define <4 x i16> @mulhuw_v4i16(<4 x i16> %a, <4 x i16> %b) { -; SSE2-LABEL: mulhuw_v4i16: -; SSE2: # %bb.0: -; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pmulhuw %xmm1, %xmm0 -; SSE2-NEXT: pxor %xmm1, %xmm1 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-NEXT: retq +; SSE2-PROMOTE-LABEL: mulhuw_v4i16: +; SSE2-PROMOTE: # %bb.0: +; SSE2-PROMOTE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] +; SSE2-PROMOTE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] +; SSE2-PROMOTE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-PROMOTE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] +; SSE2-PROMOTE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] +; SSE2-PROMOTE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-PROMOTE-NEXT: pmulhuw %xmm1, %xmm0 +; SSE2-PROMOTE-NEXT: pxor %xmm1, %xmm1 +; SSE2-PROMOTE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-PROMOTE-NEXT: retq ; -; SSE41-LABEL: mulhuw_v4i16: -; SSE41: # %bb.0: -; SSE41-NEXT: pxor %xmm2, %xmm2 -; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7] -; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] -; SSE41-NEXT: pmulld %xmm1, %xmm0 -; SSE41-NEXT: psrld $16, %xmm0 -; SSE41-NEXT: retq +; SSE2-WIDEN-LABEL: mulhuw_v4i16: +; SSE2-WIDEN: # %bb.0: +; SSE2-WIDEN-NEXT: pmulhuw %xmm1, %xmm0 +; SSE2-WIDEN-NEXT: pxor %xmm1, %xmm1 +; SSE2-WIDEN-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-WIDEN-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] +; SSE2-WIDEN-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] +; SSE2-WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-WIDEN-NEXT: retq +; +; SSE41-PROMOTE-LABEL: mulhuw_v4i16: +; SSE41-PROMOTE: # %bb.0: +; SSE41-PROMOTE-NEXT: pxor %xmm2, %xmm2 +; SSE41-PROMOTE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3],xmm0[4],xmm2[5],xmm0[6],xmm2[7] +; SSE41-PROMOTE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7] +; SSE41-PROMOTE-NEXT: pmulld %xmm1, %xmm0 +; SSE41-PROMOTE-NEXT: psrld $16, %xmm0 +; SSE41-PROMOTE-NEXT: retq +; +; SSE41-WIDEN-LABEL: mulhuw_v4i16: +; SSE41-WIDEN: # %bb.0: +; SSE41-WIDEN-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; SSE41-WIDEN-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; SSE41-WIDEN-NEXT: pmulld %xmm2, %xmm0 +; SSE41-WIDEN-NEXT: psrld $16, %xmm0 +; SSE41-WIDEN-NEXT: packusdw %xmm0, %xmm0 +; SSE41-WIDEN-NEXT: retq ; ; AVX-LABEL: mulhuw_v4i16: ; AVX: # %bb.0: @@ -45,28 +66,47 @@ define <4 x i16> @mulhuw_v4i16(<4 x i16> %a, <4 x i16> %b) { } define <4 x i16> @mulhw_v4i16(<4 x i16> %a, <4 x i16> %b) { -; SSE2-LABEL: mulhw_v4i16: -; SSE2: # %bb.0: -; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] -; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT: pmulhw %xmm1, %xmm0 -; SSE2-NEXT: pxor %xmm1, %xmm1 -; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] -; SSE2-NEXT: retq +; SSE2-PROMOTE-LABEL: mulhw_v4i16: +; SSE2-PROMOTE: # %bb.0: +; SSE2-PROMOTE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] +; SSE2-PROMOTE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] +; SSE2-PROMOTE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-PROMOTE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] +; SSE2-PROMOTE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] +; SSE2-PROMOTE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-PROMOTE-NEXT: pmulhw %xmm1, %xmm0 +; SSE2-PROMOTE-NEXT: pxor %xmm1, %xmm1 +; SSE2-PROMOTE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-PROMOTE-NEXT: retq +; +; SSE2-WIDEN-LABEL: mulhw_v4i16: +; SSE2-WIDEN: # %bb.0: +; SSE2-WIDEN-NEXT: pmulhw %xmm1, %xmm0 +; SSE2-WIDEN-NEXT: pxor %xmm1, %xmm1 +; SSE2-WIDEN-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; SSE2-WIDEN-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] +; SSE2-WIDEN-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] +; SSE2-WIDEN-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-WIDEN-NEXT: retq +; +; SSE41-PROMOTE-LABEL: mulhw_v4i16: +; SSE41-PROMOTE: # %bb.0: +; SSE41-PROMOTE-NEXT: pslld $16, %xmm0 +; SSE41-PROMOTE-NEXT: psrad $16, %xmm0 +; SSE41-PROMOTE-NEXT: pslld $16, %xmm1 +; SSE41-PROMOTE-NEXT: psrad $16, %xmm1 +; SSE41-PROMOTE-NEXT: pmulld %xmm1, %xmm0 +; SSE41-PROMOTE-NEXT: psrld $16, %xmm0 +; SSE41-PROMOTE-NEXT: retq ; -; SSE41-LABEL: mulhw_v4i16: -; SSE41: # %bb.0: -; SSE41-NEXT: pslld $16, %xmm0 -; SSE41-NEXT: psrad $16, %xmm0 -; SSE41-NEXT: pslld $16, %xmm1 -; SSE41-NEXT: psrad $16, %xmm1 -; SSE41-NEXT: pmulld %xmm1, %xmm0 -; SSE41-NEXT: psrld $16, %xmm0 -; SSE41-NEXT: retq +; SSE41-WIDEN-LABEL: mulhw_v4i16: +; SSE41-WIDEN: # %bb.0: +; SSE41-WIDEN-NEXT: pmovsxwd %xmm0, %xmm2 +; SSE41-WIDEN-NEXT: pmovsxwd %xmm1, %xmm0 +; SSE41-WIDEN-NEXT: pmulld %xmm2, %xmm0 +; SSE41-WIDEN-NEXT: psrld $16, %xmm0 +; SSE41-WIDEN-NEXT: packusdw %xmm0, %xmm0 +; SSE41-WIDEN-NEXT: retq ; ; AVX-LABEL: mulhw_v4i16: ; AVX: # %bb.0: |