summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2019-12-12 18:19:57 -0500
committerSanjay Patel <spatel@rotateright.com>2019-12-13 08:37:06 -0500
commitdc9e6ba90bebe72f846e76fcc3f2c5145df24613 (patch)
tree87c6fb55e40d89b3f556f035140c41f5aea7a7db
parent99581fd4c8e12f5eca38e7cfc5992508a9bfe383 (diff)
downloadbcm5719-llvm-dc9e6ba90bebe72f846e76fcc3f2c5145df24613.tar.gz
bcm5719-llvm-dc9e6ba90bebe72f846e76fcc3f2c5145df24613.zip
[x86] add tests for shift-trunc-shift; NFC
More coverage for a possible generic transform.
-rw-r--r--llvm/test/CodeGen/X86/shift-amount-mod.ll22
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-lshr-256.ll77
2 files changed, 99 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/shift-amount-mod.ll b/llvm/test/CodeGen/X86/shift-amount-mod.ll
index 70fa32027c7..bccb3607c98 100644
--- a/llvm/test/CodeGen/X86/shift-amount-mod.ll
+++ b/llvm/test/CodeGen/X86/shift-amount-mod.ll
@@ -1552,3 +1552,25 @@ define i64 @reg64_lshr_by_masked_negated_unfolded_add_b(i64 %val, i64 %a, i64 %b
%shifted = lshr i64 %val, %negaaddbitwidthaddb
ret i64 %shifted
}
+
+define i16 @sh_trunc_sh(i64 %x) {
+; X32-LABEL: sh_trunc_sh:
+; X32: # %bb.0:
+; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X32-NEXT: shrl $4, %eax
+; X32-NEXT: andl $15, %eax
+; X32-NEXT: # kill: def $ax killed $ax killed $eax
+; X32-NEXT: retl
+;
+; X64-LABEL: sh_trunc_sh:
+; X64: # %bb.0:
+; X64-NEXT: shrq $24, %rdi
+; X64-NEXT: movzwl %di, %eax
+; X64-NEXT: shrl $12, %eax
+; X64-NEXT: # kill: def $ax killed $ax killed $eax
+; X64-NEXT: retq
+ %s = lshr i64 %x, 24
+ %t = trunc i64 %s to i16
+ %r = lshr i16 %t, 12
+ ret i16 %r
+}
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
index 698a45fad4d..c448921db7d 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
@@ -1394,3 +1394,80 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
%shift = lshr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
+
+define <4 x i32> @sh_trunc_sh_vec(<4 x i64> %x) {
+; AVX1-LABEL: sh_trunc_sh_vec:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vpsrlq $24, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlq $24, %xmm0, %xmm0
+; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX1-NEXT: vpsrld $12, %xmm0, %xmm0
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: sh_trunc_sh_vec:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vpsrlq $24, %ymm0, %ymm0
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX2-NEXT: vpsrld $12, %xmm0, %xmm0
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: sh_trunc_sh_vec:
+; XOPAVX1: # %bb.0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT: vpperm {{.*#+}} xmm0 = xmm0[3,4,5,6,11,12,13,14],xmm1[3,4,5,6,11,12,13,14]
+; XOPAVX1-NEXT: vpsrld $12, %xmm0, %xmm0
+; XOPAVX1-NEXT: vzeroupper
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: sh_trunc_sh_vec:
+; XOPAVX2: # %bb.0:
+; XOPAVX2-NEXT: vpsrlq $24, %ymm0, %ymm0
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; XOPAVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; XOPAVX2-NEXT: vpsrld $12, %xmm0, %xmm0
+; XOPAVX2-NEXT: vzeroupper
+; XOPAVX2-NEXT: retq
+;
+; AVX512-LABEL: sh_trunc_sh_vec:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vpsrlq $24, %ymm0, %ymm0
+; AVX512-NEXT: vpmovqd %zmm0, %ymm0
+; AVX512-NEXT: vpsrld $12, %xmm0, %xmm0
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+;
+; AVX512VL-LABEL: sh_trunc_sh_vec:
+; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpsrlq $24, %ymm0, %ymm0
+; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0
+; AVX512VL-NEXT: vpsrld $12, %xmm0, %xmm0
+; AVX512VL-NEXT: vzeroupper
+; AVX512VL-NEXT: retq
+;
+; X32-AVX1-LABEL: sh_trunc_sh_vec:
+; X32-AVX1: # %bb.0:
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; X32-AVX1-NEXT: vpsrlq $24, %xmm1, %xmm1
+; X32-AVX1-NEXT: vpsrlq $24, %xmm0, %xmm0
+; X32-AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; X32-AVX1-NEXT: vpsrld $12, %xmm0, %xmm0
+; X32-AVX1-NEXT: vzeroupper
+; X32-AVX1-NEXT: retl
+;
+; X32-AVX2-LABEL: sh_trunc_sh_vec:
+; X32-AVX2: # %bb.0:
+; X32-AVX2-NEXT: vpsrlq $24, %ymm0, %ymm0
+; X32-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; X32-AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; X32-AVX2-NEXT: vpsrld $12, %xmm0, %xmm0
+; X32-AVX2-NEXT: vzeroupper
+; X32-AVX2-NEXT: retl
+ %s = lshr <4 x i64> %x, <i64 24, i64 24, i64 24, i64 24>
+ %t = trunc <4 x i64> %s to <4 x i32>
+ %r = lshr <4 x i32> %t, <i32 12, i32 12, i32 12, i32 12>
+ ret <4 x i32> %r
+}
OpenPOWER on IntegriCloud