diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-06-07 11:22:52 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-06-07 11:22:52 +0000 |
commit | 0e29d8d81faa33c683412cd203c208e2b434682a (patch) | |
tree | ca567fd7c6c80f8403cf58f0401a5e565f3b79ef | |
parent | 0d6b01761c1db42ae13c2bf91ffdea4b6ebd9f69 (diff) | |
download | bcm5719-llvm-0e29d8d81faa33c683412cd203c208e2b434682a.tar.gz bcm5719-llvm-0e29d8d81faa33c683412cd203c208e2b434682a.zip |
[X86][SSE] Add extra trunc(shl) test cases
The existing trunc_shl_17_v8i16_v8i32 test case should (but doesn't) fold to zero, I've added 2 new test cases:
- trunc_shl_16_v8i16_v8i32 which folds to zero (this is actually testing the target faux shuffle combine)
- trunc_shl_15_v8i16_v8i32 which should perform the full shl + truncate
llvm-svn: 334188
-rw-r--r-- | llvm/test/CodeGen/X86/reduce-trunc-shl.ll | 46 |
1 files changed, 43 insertions, 3 deletions
diff --git a/llvm/test/CodeGen/X86/reduce-trunc-shl.ll b/llvm/test/CodeGen/X86/reduce-trunc-shl.ll index f237c44102c..b44cadb26d0 100644 --- a/llvm/test/CodeGen/X86/reduce-trunc-shl.ll +++ b/llvm/test/CodeGen/X86/reduce-trunc-shl.ll @@ -26,8 +26,48 @@ define void @trunc_shl_7_v4i32_v4i64(<4 x i32> addrspace(1)* %out, <4 x i64> add ret void } -define <8 x i16> @trunc_shl_v8i16_v8i32(<8 x i32> %a) { -; SSE2-LABEL: trunc_shl_v8i16_v8i32: +define <8 x i16> @trunc_shl_15_v8i16_v8i32(<8 x i32> %a) { +; SSE2-LABEL: trunc_shl_15_v8i16_v8i32: +; SSE2: # %bb.0: +; SSE2-NEXT: pslld $16, %xmm1 +; SSE2-NEXT: psrad $16, %xmm1 +; SSE2-NEXT: pslld $16, %xmm0 +; SSE2-NEXT: psrad $16, %xmm0 +; SSE2-NEXT: packssdw %xmm1, %xmm0 +; SSE2-NEXT: psllw $15, %xmm0 +; SSE2-NEXT: retq +; +; AVX2-LABEL: trunc_shl_15_v8i16_v8i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] +; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq + %shl = shl <8 x i32> %a, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15> + %conv = trunc <8 x i32> %shl to <8 x i16> + ret <8 x i16> %conv +} + +define <8 x i16> @trunc_shl_16_v8i16_v8i32(<8 x i32> %a) { +; SSE2-LABEL: trunc_shl_16_v8i16_v8i32: +; SSE2: # %bb.0: +; SSE2-NEXT: xorps %xmm0, %xmm0 +; SSE2-NEXT: retq +; +; AVX2-LABEL: trunc_shl_16_v8i16_v8i32: +; AVX2: # %bb.0: +; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[28,29] +; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq + %shl = shl <8 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16> + %conv = trunc <8 x i32> %shl to <8 x i16> + ret <8 x i16> %conv +} + +define <8 x i16> @trunc_shl_17_v8i16_v8i32(<8 x i32> %a) { +; SSE2-LABEL: trunc_shl_17_v8i16_v8i32: ; SSE2: # %bb.0: ; SSE2-NEXT: pslld $17, %xmm0 ; SSE2-NEXT: pslld $17, %xmm1 @@ -38,7 +78,7 @@ define <8 x i16> @trunc_shl_v8i16_v8i32(<8 x i32> %a) { ; SSE2-NEXT: packssdw %xmm1, %xmm0 ; SSE2-NEXT: retq ; -; AVX2-LABEL: trunc_shl_v8i16_v8i32: +; AVX2-LABEL: trunc_shl_17_v8i16_v8i32: ; AVX2: # %bb.0: ; AVX2-NEXT: vpslld $17, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] |