diff options
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-idiv.ll | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll | 10 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-trunc.ll | 256 | 
3 files changed, 141 insertions, 127 deletions
| diff --git a/llvm/test/CodeGen/X86/vector-idiv.ll b/llvm/test/CodeGen/X86/vector-idiv.ll index 27fbd94ed78..a98ed322136 100644 --- a/llvm/test/CodeGen/X86/vector-idiv.ll +++ b/llvm/test/CodeGen/X86/vector-idiv.ll @@ -887,7 +887,7 @@ define <8 x i32> @test9(<8 x i32> %a) {  ; SSE41-NEXT:    movdqa %xmm1, %xmm2  ; SSE41-NEXT:    movdqa %xmm0, %xmm3  ; SSE41-NEXT:    movdqa {{.*#+}} xmm1 = [2454267027,2454267027,2454267027,2454267027] -; SSE41-NEXT:   # kill: XMM0<def> XMM3<kill> +; SSE41-NEXT:    # kill: XMM0<def> XMM3<kill>  ; SSE41-NEXT:    pmuldq %xmm1, %xmm0  ; SSE41-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[1,1,3,3]  ; SSE41-NEXT:    pshufd {{.*#+}} xmm5 = xmm3[1,1,3,3] diff --git a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll index caec89baa96..e2fbde55b65 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-256-v16.ll @@ -1352,10 +1352,10 @@ define <16 x i16> @shuffle_v16i16_00_16_01_17_02_18_03_19_04_20_05_21_06_22_07_2  define <16 x i16> @shuffle_v16i16_zz_zz_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_24(<16 x i16> %a) {  ; AVX1-LABEL: shuffle_v16i16_zz_zz_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_24:  ; AVX1:       # BB#0: -; AVX1-NEXT:    vpslldq	{{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1] +; AVX1-NEXT:    vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1]  ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT:    vpslldq	{{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1] -; AVX1-NEXT:    vinsertf128	$1, %xmm0, %ymm1, %ymm0 +; AVX1-NEXT:    vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1] +; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0  ; AVX1-NEXT:    retq  ;  ; AVX2-LABEL: shuffle_v16i16_zz_zz_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_24: @@ -1369,9 +1369,9 @@ define <16 x i16> @shuffle_v16i16_zz_zz_zz_zz_zz_zz_zz_16_zz_zz_zz_zz_zz_zz_zz_2  define <16 x i16> @shuffle_v16i16_17_18_19_20_21_22_23_zz_25_26_27_28_29_30_31_zz(<16 x i16> %a) {  ; AVX1-LABEL: shuffle_v16i16_17_18_19_20_21_22_23_zz_25_26_27_28_29_30_31_zz:  ; AVX1:       # BB#0: -; AVX1-NEXT:    vpsrldq	{{.*#+}} xmm1 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero +; AVX1-NEXT:    vpsrldq {{.*#+}} xmm1 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero  ; AVX1-NEXT:    vextractf128 $1, %ymm0, %xmm0 -; AVX1-NEXT:    vpsrldq	{{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero +; AVX1-NEXT:    vpsrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero  ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0  ; AVX1-NEXT:    retq  ; diff --git a/llvm/test/CodeGen/X86/vector-trunc.ll b/llvm/test/CodeGen/X86/vector-trunc.ll index c80a332484b..2850b0ad270 100644 --- a/llvm/test/CodeGen/X86/vector-trunc.ll +++ b/llvm/test/CodeGen/X86/vector-trunc.ll @@ -4,15 +4,16 @@  ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1  define <4 x i32> @trunc2x2i64(<2 x i64> %a, <2 x i64> %b) { -; SSE-LABEL:  trunc2x2i64: -; SSE:        # BB#0: # %entry -; SSE-NEXT:   shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; SSE-NEXT:   retq +; SSE-LABEL: trunc2x2i64: +; SSE:       # BB#0: # %entry +; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; SSE-NEXT:    retq +; +; AVX-LABEL: trunc2x2i64: +; AVX:       # BB#0: # %entry +; AVX-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] +; AVX-NEXT:    retq -; AVX-LABEL:  trunc2x2i64: -; AVX:        # BB#0: # %entry -; AVX-NEXT:   vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] -; AVX-NEXT:   retq  entry:    %0 = trunc <2 x i64> %a to <2 x i32> @@ -22,17 +23,18 @@ entry:  }  define i64 @trunc2i64(<2 x i64> %inval) { -; SSE-LABEL:  trunc2i64: -; SSE:        # BB#0: # %entry -; SSE-NEXT:   pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE-NEXT:   movd %xmm0, %rax -; SSE-NEXT:   retq - -; AVX-LABEL:  trunc2i64: -; AVX:        # BB#0: # %entry -; AVX-NEXT:   vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; AVX-NEXT:   vmovq %xmm0, %rax -; AVX-NEXT:   retq +; SSE-LABEL: trunc2i64: +; SSE:       # BB#0: # %entry +; SSE-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE-NEXT:    movd %xmm0, %rax +; SSE-NEXT:    retq +; +; AVX-LABEL: trunc2i64: +; AVX:       # BB#0: # %entry +; AVX-NEXT:    vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; AVX-NEXT:    vmovq %xmm0, %rax +; AVX-NEXT:    retq +  entry:    %0 = trunc <2 x i64> %inval to <2 x i32> @@ -41,40 +43,43 @@ entry:  }  define <8 x i16> @trunc2x4i32(<4 x i32> %a, <4 x i32> %b) { -; SSE2-LABEL:  trunc2x4i32: -; SSE2:        # BB#0: # %entry -; SSE2-NEXT:   pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] -; SSE2-NEXT:   pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] -; SSE2-NEXT:   pshufd  {{.*#+}} xmm1 = xmm1[0,2,2,3] -; SSE2-NEXT:   pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; SSE2-NEXT:   pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] -; SSE2-NEXT:   pshufd  {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT:   punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE2-NEXT:   retq - -; SSSE3-LABEL:  trunc2x4i32: -; SSSE3:        # BB#0: # %entry -; SSSE3-NEXT:   movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; SSSE3-NEXT:   pshufb %xmm2, %xmm1 -; SSSE3-NEXT:   pshufb %xmm2, %xmm0 -; SSSE3-NEXT:   punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSSE3-NEXT:   retq - -; SSE41-LABEL:  trunc2x4i32: -; SSE41:        # BB#0: # %entry -; SSE41-NEXT:   movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; SSE41-NEXT:   pshufb %xmm2, %xmm1 -; SSE41-NEXT:   pshufb %xmm2, %xmm0 -; SSE41-NEXT:   punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; SSE41-NEXT:   retq - -; AVX-LABEL:  trunc2x4i32: -; AVX:        # BB#0: # %entry -; AVX-NEXT:   vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; AVX-NEXT:   vpshufb %xmm2, %xmm1, %xmm1 -; AVX-NEXT:   vpshufb %xmm2, %xmm0, %xmm0 -; AVX-NEXT:   vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] -; AVX-NEXT:   retq +; SSE2-LABEL: trunc2x4i32: +; SSE2:       # BB#0: # %entry +; SSE2-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7] +; SSE2-NEXT:    pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] +; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] +; SSE2-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] +; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE2-NEXT:    retq +; +; SSSE3-LABEL: trunc2x4i32: +; SSSE3:       # BB#0: # %entry +; SSSE3-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSSE3-NEXT:    pshufb %xmm2, %xmm1 +; SSSE3-NEXT:    pshufb %xmm2, %xmm0 +; SSSE3-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSSE3-NEXT:    retq +; +; SSE41-LABEL: trunc2x4i32: +; SSE41:       # BB#0: # %entry +; SSE41-NEXT:    movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSE41-NEXT:    pshufb %xmm2, %xmm1 +; SSE41-NEXT:    pshufb %xmm2, %xmm0 +; SSE41-NEXT:    punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; SSE41-NEXT:    retq +; +; AVX-LABEL: trunc2x4i32: +; AVX:       # BB#0: # %entry +; AVX-NEXT:    vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; AVX-NEXT:    vpshufb %xmm2, %xmm1, %xmm1 +; AVX-NEXT:    vpshufb %xmm2, %xmm0, %xmm0 +; AVX-NEXT:    vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; AVX-NEXT:    retq + + +  entry:    %0 = trunc <4 x i32> %a to <4 x i16> @@ -85,31 +90,34 @@ entry:  ; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524  define i64 @trunc4i32(<4 x i32> %inval) { -; SSE2-LABEL:  trunc4i32: -; SSE2:        # BB#0: # %entry -; SSE2-NEXT:   pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] -; SSE2-NEXT:   pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] -; SSE2-NEXT:   pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] -; SSE2-NEXT:   movd %xmm0, %rax -; SSE2-NEXT:   retq - +; SSE2-LABEL: trunc4i32: +; SSE2:       # BB#0: # %entry +; SSE2-NEXT:    pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7] +; SSE2-NEXT:    pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7] +; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; SSE2-NEXT:    movd %xmm0, %rax +; SSE2-NEXT:    retq +;  ; SSSE3-LABEL: trunc4i32:  ; SSSE3:       # BB#0: # %entry -; SSSE3-NEXT:  pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; SSSE3-NEXT:  movd %xmm0, %rax -; SSSE3-NEXT:  retq - +; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSSE3-NEXT:    movd %xmm0, %rax +; SSSE3-NEXT:    retq +;  ; SSE41-LABEL: trunc4i32:  ; SSE41:       # BB#0: # %entry -; SSE41-NEXT:  pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; SSE41-NEXT:  movd %xmm0, %rax -; SSE41-NEXT:  retq +; SSE41-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; SSE41-NEXT:    movd %xmm0, %rax +; SSE41-NEXT:    retq +; +; AVX-LABEL: trunc4i32: +; AVX:       # BB#0: # %entry +; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; AVX-NEXT:    vmovq %xmm0, %rax +; AVX-NEXT:    retq + + -; AVX-LABEL:  trunc4i32: -; AVX:        # BB#0: # %entry -; AVX-NEXT:   vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] -; AVX-NEXT:   vmovq %xmm0, %rax -; AVX-NEXT:   retq  entry:    %0 = trunc <4 x i32> %inval to <4 x i16> @@ -118,34 +126,37 @@ entry:  }  define <16 x i8> @trunc2x8i16(<8 x i16> %a, <8 x i16> %b) { -; SSE2-LABEL:  trunc2x8i16: -; SSE2:        # BB#0: # %entry -; SSE2-NEXT:   movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] -; SSE2-NEXT:   pand %xmm2, %xmm1 -; SSE2-NEXT:   pand %xmm2, %xmm0 -; SSE2-NEXT:   packuswb %xmm1, %xmm0 -; SSE2-NEXT:   retq - -; SSSE3-LABEL:  trunc2x8i16: -; SSSE3:        # BB#0: # %entry -; SSSE3-NEXT:   pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,2,4,6,8,10,12,14] -; SSSE3-NEXT:   pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero -; SSSE3-NEXT:   por %xmm1, %xmm0 -; SSSE3-NEXT:   retq - -; SSE41-LABEL:  trunc2x8i16: -; SSE41:        # BB#0: # %entry -; SSE41-NEXT:   pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,2,4,6,8,10,12,14] -; SSE41-NEXT:   pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero -; SSE41-NEXT:   por %xmm1, %xmm0 -; SSE41-NEXT:   retq - -; AVX-LABEL:  trunc2x8i16: -; AVX:        # BB#0: # %entry -; AVX-NEXT:   vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,2,4,6,8,10,12,14] -; AVX-NEXT:   vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero -; AVX-NEXT:   vpor %xmm1, %xmm0, %xmm0 -; AVX-NEXT:   retq +; SSE2-LABEL: trunc2x8i16: +; SSE2:       # BB#0: # %entry +; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255] +; SSE2-NEXT:    pand %xmm2, %xmm1 +; SSE2-NEXT:    pand %xmm2, %xmm0 +; SSE2-NEXT:    packuswb %xmm1, %xmm0 +; SSE2-NEXT:    retq +; +; SSSE3-LABEL: trunc2x8i16: +; SSSE3:       # BB#0: # %entry +; SSSE3-NEXT:    pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,2,4,6,8,10,12,14] +; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; SSSE3-NEXT:    por %xmm1, %xmm0 +; SSSE3-NEXT:    retq +; +; SSE41-LABEL: trunc2x8i16: +; SSE41:       # BB#0: # %entry +; SSE41-NEXT:    pshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,2,4,6,8,10,12,14] +; SSE41-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; SSE41-NEXT:    por %xmm1, %xmm0 +; SSE41-NEXT:    retq +; +; AVX-LABEL: trunc2x8i16: +; AVX:       # BB#0: # %entry +; AVX-NEXT:    vpshufb {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,2,4,6,8,10,12,14] +; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14],zero,zero,zero,zero,zero,zero,zero,zero +; AVX-NEXT:    vpor %xmm1, %xmm0, %xmm0 +; AVX-NEXT:    retq + + +  entry:    %0 = trunc <8 x i16> %a to <8 x i8> @@ -156,30 +167,33 @@ entry:  ; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524  define i64 @trunc8i16(<8 x i16> %inval) { -; SSE2-LABEL:  trunc8i16: -; SSE2:        # BB#0: # %entry -; SSE2-NEXT:   pand .LCP{{.*}}(%rip), %xmm0 -; SSE2-NEXT:   packuswb %xmm0, %xmm0 -; SSE2-NEXT:   movd %xmm0, %rax -; SSE2-NEXT:   retq - +; SSE2-LABEL: trunc8i16: +; SSE2:       # BB#0: # %entry +; SSE2-NEXT:    pand {{.*}}(%rip), %xmm0 +; SSE2-NEXT:    packuswb %xmm0, %xmm0 +; SSE2-NEXT:    movd %xmm0, %rax +; SSE2-NEXT:    retq +;  ; SSSE3-LABEL: trunc8i16:  ; SSSE3:       # BB#0: # %entry -; SSSE3-NEXT:  pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] -; SSSE3-NEXT:  movd %xmm0, %rax -; SSSE3-NEXT:  retq - +; SSSE3-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; SSSE3-NEXT:    movd %xmm0, %rax +; SSSE3-NEXT:    retq +;  ; SSE41-LABEL: trunc8i16:  ; SSE41:       # BB#0: # %entry -; SSE41-NEXT:  pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] -; SSE41-NEXT:  movd %xmm0, %rax -; SSE41-NEXT:  retq - -; AVX-LABEL:  trunc8i16: -; AVX:        # BB#0: # %entry -; AVX-NEXT:   vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] -; AVX-NEXT:   vmovq %xmm0, %rax -; AVX-NEXT:   retq +; SSE41-NEXT:    pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; SSE41-NEXT:    movd %xmm0, %rax +; SSE41-NEXT:    retq +; +; AVX-LABEL: trunc8i16: +; AVX:       # BB#0: # %entry +; AVX-NEXT:    vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u] +; AVX-NEXT:    vmovq %xmm0, %rax +; AVX-NEXT:    retq + + +  entry:    %0 = trunc <8 x i16> %inval to <8 x i8> | 

