summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2016-07-22 05:00:39 +0000
committerCraig Topper <craig.topper@gmail.com>2016-07-22 05:00:39 +0000
commit0b90756b0a250c5ced59170fdfae3c810599ec7f (patch)
treeb7e708903cb430d8b998b7723642e21cc9ede494 /llvm/test
parentab13b33dedb60ecd8f19afb0d9d519a4cd15be16 (diff)
downloadbcm5719-llvm-0b90756b0a250c5ced59170fdfae3c810599ec7f.tar.gz
bcm5719-llvm-0b90756b0a250c5ced59170fdfae3c810599ec7f.zip
[AVX512] Add load folding for some AVX512VL logic and arithmetic instructions.
llvm-svn: 276391
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll220
1 files changed, 220 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll b/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
index d79babfc38c..5dec0b8fa11 100644
--- a/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
+++ b/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll
@@ -8,6 +8,38 @@ target triple = "x86_64-unknown-unknown"
; By including a nop call with sideeffects we can force a partial register spill of the
; relevant registers and check that the reload is correctly folded into the instruction.
+define <2 x double> @stack_fold_addpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_addpd
+ ;CHECK: vaddpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd <2 x double> %a0, %a1
+ ret <2 x double> %2
+}
+
+define <4 x double> @stack_fold_addpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_addpd_ymm
+ ;CHECK: vaddpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd <4 x double> %a0, %a1
+ ret <4 x double> %2
+}
+
+define <4 x float> @stack_fold_addps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_addps
+ ;CHECK: vaddps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd <4 x float> %a0, %a1
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_addps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_addps_ymm
+ ;CHECK: vaddps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fadd <8 x float> %a0, %a1
+ ret <8 x float> %2
+}
+
define double @stack_fold_addsd(double %a0, double %a1) {
;CHECK-LABEL: stack_fold_addsd
;CHECK: vaddsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
@@ -42,6 +74,58 @@ define <4 x float> @stack_fold_addss_int(<4 x float> %a0, <4 x float> %a1) {
}
declare <4 x float> @llvm.x86.sse.add.ss(<4 x float>, <4 x float>) nounwind readnone
+define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_andpd
+ ;CHECK: vpandq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <2 x double> %a0 to <2 x i64>
+ %3 = bitcast <2 x double> %a1 to <2 x i64>
+ %4 = and <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <2 x double>
+ ; fadd forces execution domain
+ %6 = fadd <2 x double> %5, <double 0x0, double 0x0>
+ ret <2 x double> %6
+}
+
+define <4 x double> @stack_fold_andpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_andpd_ymm
+ ;CHECK: vpandq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x double> %a0 to <4 x i64>
+ %3 = bitcast <4 x double> %a1 to <4 x i64>
+ %4 = and <4 x i64> %2, %3
+ %5 = bitcast <4 x i64> %4 to <4 x double>
+ ; fadd forces execution domain
+ %6 = fadd <4 x double> %5, <double 0x0, double 0x0, double 0x0, double 0x0>
+ ret <4 x double> %6
+}
+
+define <4 x float> @stack_fold_andps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_andps
+ ;CHECK: vpandd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x float> %a0 to <4 x i32>
+ %3 = bitcast <4 x float> %a1 to <4 x i32>
+ %4 = and <4 x i32> %2, %3
+ %5 = bitcast <4 x i32> %4 to <4 x float>
+ ; fadd forces execution domain
+ %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <4 x float> %6
+}
+
+define <8 x float> @stack_fold_andps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_andps_ymm
+ ;CHECK: vpandd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <8 x float> %a0 to <8 x i32>
+ %3 = bitcast <8 x float> %a1 to <8 x i32>
+ %4 = and <8 x i32> %2, %3
+ %5 = bitcast <8 x i32> %4 to <8 x float>
+ ; fadd forces execution domain
+ %6 = fadd <8 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <8 x float> %6
+}
+
define <2 x double> @stack_fold_divsd_int(<2 x double> %a0, <2 x double> %a1) {
;CHECK-LABEL: stack_fold_divsd_int
;CHECK: vdivsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
@@ -112,6 +196,90 @@ define <4 x float> @stack_fold_mulss_int(<4 x float> %a0, <4 x float> %a1) {
}
declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>) nounwind readnone
+define <2 x double> @stack_fold_orpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_orpd
+ ;CHECK: vporq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <2 x double> %a0 to <2 x i64>
+ %3 = bitcast <2 x double> %a1 to <2 x i64>
+ %4 = or <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <2 x double>
+ ; fadd forces execution domain
+ %6 = fadd <2 x double> %5, <double 0x0, double 0x0>
+ ret <2 x double> %6
+}
+
+define <4 x double> @stack_fold_orpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_orpd_ymm
+ ;CHECK: vporq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x double> %a0 to <4 x i64>
+ %3 = bitcast <4 x double> %a1 to <4 x i64>
+ %4 = or <4 x i64> %2, %3
+ %5 = bitcast <4 x i64> %4 to <4 x double>
+ ; fadd forces execution domain
+ %6 = fadd <4 x double> %5, <double 0x0, double 0x0, double 0x0, double 0x0>
+ ret <4 x double> %6
+}
+
+define <4 x float> @stack_fold_orps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_orps
+ ;CHECK: vpord {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x float> %a0 to <4 x i32>
+ %3 = bitcast <4 x float> %a1 to <4 x i32>
+ %4 = or <4 x i32> %2, %3
+ %5 = bitcast <4 x i32> %4 to <4 x float>
+ ; fadd forces execution domain
+ %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <4 x float> %6
+}
+
+define <8 x float> @stack_fold_orps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_orps_ymm
+ ;CHECK: vpord {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <8 x float> %a0 to <8 x i32>
+ %3 = bitcast <8 x float> %a1 to <8 x i32>
+ %4 = or <8 x i32> %2, %3
+ %5 = bitcast <8 x i32> %4 to <8 x float>
+ ; fadd forces execution domain
+ %6 = fadd <8 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <8 x float> %6
+}
+
+define <2 x double> @stack_fold_subpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_subpd
+ ;CHECK: vsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub <2 x double> %a0, %a1
+ ret <2 x double> %2
+}
+
+define <4 x double> @stack_fold_subpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_subpd_ymm
+ ;CHECK: vsubpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub <4 x double> %a0, %a1
+ ret <4 x double> %2
+}
+
+define <4 x float> @stack_fold_subps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_subps
+ ;CHECK: vsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub <4 x float> %a0, %a1
+ ret <4 x float> %2
+}
+
+define <8 x float> @stack_fold_subps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_subps_ymm
+ ;CHECK: vsubps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = fsub <8 x float> %a0, %a1
+ ret <8 x float> %2
+}
+
define double @stack_fold_subsd(double %a0, double %a1) {
;CHECK-LABEL: stack_fold_subsd
;CHECK: vsubsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload
@@ -145,3 +313,55 @@ define <4 x float> @stack_fold_subss_int(<4 x float> %a0, <4 x float> %a1) {
ret <4 x float> %2
}
declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>) nounwind readnone
+
+define <2 x double> @stack_fold_xorpd(<2 x double> %a0, <2 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_xorpd
+ ;CHECK: vpxorq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <2 x double> %a0 to <2 x i64>
+ %3 = bitcast <2 x double> %a1 to <2 x i64>
+ %4 = xor <2 x i64> %2, %3
+ %5 = bitcast <2 x i64> %4 to <2 x double>
+ ; fadd forces execution domain
+ %6 = fadd <2 x double> %5, <double 0x0, double 0x0>
+ ret <2 x double> %6
+}
+
+define <4 x double> @stack_fold_xorpd_ymm(<4 x double> %a0, <4 x double> %a1) {
+ ;CHECK-LABEL: stack_fold_xorpd_ymm
+ ;CHECK: vpxorq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x double> %a0 to <4 x i64>
+ %3 = bitcast <4 x double> %a1 to <4 x i64>
+ %4 = xor <4 x i64> %2, %3
+ %5 = bitcast <4 x i64> %4 to <4 x double>
+ ; fadd forces execution domain
+ %6 = fadd <4 x double> %5, <double 0x0, double 0x0, double 0x0, double 0x0>
+ ret <4 x double> %6
+}
+
+define <4 x float> @stack_fold_xorps(<4 x float> %a0, <4 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_xorps
+ ;CHECK: vpxord {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <4 x float> %a0 to <4 x i32>
+ %3 = bitcast <4 x float> %a1 to <4 x i32>
+ %4 = xor <4 x i32> %2, %3
+ %5 = bitcast <4 x i32> %4 to <4 x float>
+ ; fadd forces execution domain
+ %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <4 x float> %6
+}
+
+define <8 x float> @stack_fold_xorps_ymm(<8 x float> %a0, <8 x float> %a1) {
+ ;CHECK-LABEL: stack_fold_xorps_ymm
+ ;CHECK: vpxord {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+ %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
+ %2 = bitcast <8 x float> %a0 to <8 x i32>
+ %3 = bitcast <8 x float> %a1 to <8 x i32>
+ %4 = xor <8 x i32> %2, %3
+ %5 = bitcast <8 x i32> %4 to <8 x float>
+ ; fadd forces execution domain
+ %6 = fadd <8 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0>
+ ret <8 x float> %6
+}
OpenPOWER on IntegriCloud