diff options
| author | Craig Topper <craig.topper@gmail.com> | 2016-09-03 04:37:50 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@gmail.com> | 2016-09-03 04:37:50 +0000 |
| commit | 892ce56901c54bd3f62c8dd9e1dd7e0992bed90a (patch) | |
| tree | 854f78673bc7206af1d8f4fdbe984e2ea6033d20 /llvm/test | |
| parent | 55fbe483d66c1a390b3e7ec9115393e9510722ae (diff) | |
| download | bcm5719-llvm-892ce56901c54bd3f62c8dd9e1dd7e0992bed90a.tar.gz bcm5719-llvm-892ce56901c54bd3f62c8dd9e1dd7e0992bed90a.zip | |
[AVX-512] Add EVEX encoded VPCMPEQ and VPCMPGT to the load folding tables.
llvm-svn: 280581
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/X86/stack-folding-int-avx512vl.ll | 48 |
1 files changed, 48 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/stack-folding-int-avx512vl.ll b/llvm/test/CodeGen/X86/stack-folding-int-avx512vl.ll new file mode 100644 index 00000000000..f92f9593f13 --- /dev/null +++ b/llvm/test/CodeGen/X86/stack-folding-int-avx512vl.ll @@ -0,0 +1,48 @@ +; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-unknown" + +; Stack reload folding tests. +; +; By including a nop call with sideeffects we can force a partial register spill of the +; relevant registers and check that the reload is correctly folded into the instruction. + +define i16 @stack_fold_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1) { + ;CHECK-LABEL: stack_fold_pcmpeqb + ;CHECK: vpcmpeqb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = icmp eq <16 x i8> %a0, %a1 + %3 = bitcast <16 x i1> %2 to i16 + ret i16 %3 +} + +define i8 @stack_fold_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1) { + ;CHECK-LABEL: stack_fold_pcmpeqd + ;CHECK: vpcmpeqd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = icmp eq <4 x i32> %a0, %a1 + %3 = shufflevector <4 x i1> %2, <4 x i1> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + %4 = bitcast <8 x i1> %3 to i8 + ret i8 %4 +} + +define i8 @stack_fold_pcmpeqq(<2 x i64> %a0, <2 x i64> %a1) { + ;CHECK-LABEL: stack_fold_pcmpeqq + ;CHECK: vpcmpeqq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = icmp eq <2 x i64> %a0, %a1 + %3 = shufflevector <2 x i1> %2, <2 x i1> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 2, i32 3> + %4 = bitcast <8 x i1> %3 to i8 + ret i8 %4 +} + +define i8 @stack_fold_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1) { + ;CHECK-LABEL: stack_fold_pcmpeqw + ;CHECK: vpcmpeqw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = icmp eq <8 x i16> %a0, %a1 + %3 = bitcast <8 x i1> %2 to i8 + ret i8 %3 +} + |

