summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-01-07 06:24:27 +0000
committerCraig Topper <craig.topper@intel.com>2018-01-07 06:24:27 +0000
commit89293a2a94434137dbdfbd883e5c1f01e2be02d2 (patch)
tree1e9aaddbd0beb3a6db2aeeb97f3e794353635240 /llvm/test
parenta124ab10ef91c1856c9dbd0b07417edac871d46f (diff)
downloadbcm5719-llvm-89293a2a94434137dbdfbd883e5c1f01e2be02d2.tar.gz
bcm5719-llvm-89293a2a94434137dbdfbd883e5c1f01e2be02d2.zip
[X86] Add 128 and 256-bit VPOPCNTD/Q instructions to load folding tables.
llvm-svn: 321953
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/stack-folding-int-avx512.ll20
-rw-r--r--llvm/test/CodeGen/X86/stack-folding-int-avx512vl.ll38
2 files changed, 56 insertions, 2 deletions
diff --git a/llvm/test/CodeGen/X86/stack-folding-int-avx512.ll b/llvm/test/CodeGen/X86/stack-folding-int-avx512.ll
index 6bde51286dc..284021ac504 100644
--- a/llvm/test/CodeGen/X86/stack-folding-int-avx512.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-int-avx512.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vbmi,+avx512cd < %s | FileCheck %s
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512dq,+avx512vbmi,+avx512cd,+avx512vpopcntdq < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
@@ -1214,6 +1214,24 @@ define <8 x i64> @stack_fold_pmovzxwq_maskz_zmm(<8 x i16> %a0, i8 %mask) {
ret <8 x i64> %4
}
+define <16 x i32> @stack_fold_vpopcntd(<16 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_vpopcntd
+ ;CHECK: vpopcntd {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <16 x i32> @llvm.ctpop.v16i32(<16 x i32> %a0)
+ ret <16 x i32> %2
+}
+declare <16 x i32> @llvm.ctpop.v16i32(<16 x i32>) nounwind readonly
+
+define <8 x i64> @stack_fold_vpopcntq(<8 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_vpopcntq
+ ;CHECK: vpopcntq {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i64> @llvm.ctpop.v8i64(<8 x i64> %a0)
+ ret <8 x i64> %2
+}
+declare <8 x i64> @llvm.ctpop.v8i64(<8 x i64>) nounwind readnone
+
define <8 x i64> @stack_fold_psadbw(<64 x i8> %a0, <64 x i8> %a1) {
;CHECK-LABEL: stack_fold_psadbw
;CHECK: vpsadbw {{-?[0-9]*}}(%rsp), {{%zmm[0-9][0-9]*}}, {{%zmm[0-9][0-9]*}} {{.*#+}} 64-byte Folded Reload
diff --git a/llvm/test/CodeGen/X86/stack-folding-int-avx512vl.ll b/llvm/test/CodeGen/X86/stack-folding-int-avx512vl.ll
index a55288fee51..4bfad5154ef 100644
--- a/llvm/test/CodeGen/X86/stack-folding-int-avx512vl.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-int-avx512vl.ll
@@ -1,4 +1,4 @@
-; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw,+avx512dq,+avx512vbmi,+avx512cd < %s | FileCheck %s
+; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw,+avx512dq,+avx512vbmi,+avx512cd,+avx512vpopcntdq < %s | FileCheck %s
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-unknown"
@@ -1620,6 +1620,42 @@ define <4 x i64> @stack_fold_pmuludq_ymm_maskz(<8 x i32> %a0, <8 x i32> %a1, i8
ret <4 x i64> %5
}
+define <4 x i32> @stack_fold_vpopcntd(<4 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_vpopcntd
+ ;CHECK: vpopcntd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %a0)
+ ret <4 x i32> %2
+}
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>) nounwind readonly
+
+define <8 x i32> @stack_fold_vpopcntd_ymm(<8 x i32> %a0) {
+ ;CHECK-LABEL: stack_fold_vpopcntd_ymm
+ ;CHECK: vpopcntd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %a0)
+ ret <8 x i32> %2
+}
+declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>) nounwind readonly
+
+define <2 x i64> @stack_fold_vpopcntq(<2 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_vpopcntq
+ ;CHECK: vpopcntq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a0)
+ ret <2 x i64> %2
+}
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
+
+define <4 x i64> @stack_fold_vpopcntq_ymm(<4 x i64> %a0) {
+ ;CHECK-LABEL: stack_fold_vpopcntq_ymm
+ ;CHECK: vpopcntq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm1},~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"()
+ %2 = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %a0)
+ ret <4 x i64> %2
+}
+declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>) nounwind readnone
+
define <2 x i64> @stack_fold_psadbw(<16 x i8> %a0, <16 x i8> %a1) {
;CHECK-LABEL: stack_fold_psadbw
;CHECK: vpsadbw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
OpenPOWER on IntegriCloud