diff options
| -rw-r--r-- | llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll | 56 |
1 files changed, 56 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll b/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll index 738f786f584..891682047a8 100644 --- a/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll +++ b/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll @@ -78,6 +78,62 @@ define <4 x float> @stack_fold_addss_int(<4 x float> %a0, <4 x float> %a1) { ret <4 x float> %5 } +define <2 x double> @stack_fold_andnpd(<2 x double> %a0, <2 x double> %a1) { + ;CHECK-LABEL: stack_fold_andnpd + ;CHECK: vpandnq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <2 x double> %a0 to <2 x i64> + %3 = bitcast <2 x double> %a1 to <2 x i64> + %4 = xor <2 x i64> %2, <i64 -1, i64 -1> + %5 = and <2 x i64> %4, %3 + %6 = bitcast <2 x i64> %5 to <2 x double> + ; fadd forces execution domain + %7 = fadd <2 x double> %6, <double 0x0, double 0x0> + ret <2 x double> %7 +} + +define <4 x double> @stack_fold_andnpd_ymm(<4 x double> %a0, <4 x double> %a1) { + ;CHECK-LABEL: stack_fold_andnpd_ymm + ;CHECK: vpandnq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <4 x double> %a0 to <4 x i64> + %3 = bitcast <4 x double> %a1 to <4 x i64> + %4 = xor <4 x i64> %2, <i64 -1, i64 -1, i64 -1, i64 -1> + %5 = and <4 x i64> %4, %3 + %6 = bitcast <4 x i64> %5 to <4 x double> + ; fadd forces execution domain + %7 = fadd <4 x double> %6, <double 0x0, double 0x0, double 0x0, double 0x0> + ret <4 x double> %7 +} + +define <4 x float> @stack_fold_andnps(<4 x float> %a0, <4 x float> %a1) { + ;CHECK-LABEL: stack_fold_andnps + ;CHECK: vpandnq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <4 x float> %a0 to <2 x i64> + %3 = bitcast <4 x float> %a1 to <2 x i64> + %4 = xor <2 x i64> %2, <i64 -1, i64 -1> + %5 = and <2 x i64> %4, %3 + %6 = bitcast <2 x i64> %5 to <4 x float> + ; fadd forces execution domain + %7 = fadd <4 x float> %6, <float 0x0, float 0x0, float 0x0, float 0x0> + ret <4 x float> %7 +} + +define <8 x float> @stack_fold_andnps_ymm(<8 x float> %a0, <8 x float> %a1) { + ;CHECK-LABEL: stack_fold_andnps_ymm + ;CHECK: vpandnq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <8 x float> %a0 to <4 x i64> + %3 = bitcast <8 x float> %a1 to <4 x i64> + %4 = xor <4 x i64> %2, <i64 -1, i64 -1, i64 -1, i64 -1> + %5 = and <4 x i64> %4, %3 + %6 = bitcast <4 x i64> %5 to <8 x float> + ; fadd forces execution domain + %7 = fadd <8 x float> %6, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0> + ret <8 x float> %7 +} + define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) { ;CHECK-LABEL: stack_fold_andpd ;CHECK: vpandq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload |

