diff options
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 36 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll | 220 |
2 files changed, 256 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 052191a3bff..ea6a28d7d75 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -1744,6 +1744,42 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VADDPDZ256rr, X86::VADDPDZ256rm, 0 }, { X86::VADDPSZ128rr, X86::VADDPSZ128rm, 0 }, { X86::VADDPSZ256rr, X86::VADDPSZ256rm, 0 }, + { X86::VANDPDZ128rr, X86::VANDPDZ128rm, 0 }, + { X86::VANDPDZ256rr, X86::VANDPDZ256rm, 0 }, + { X86::VANDPSZ128rr, X86::VANDPSZ128rm, 0 }, + { X86::VANDPSZ256rr, X86::VANDPSZ256rm, 0 }, + { X86::VANDNPDZ128rr, X86::VANDNPDZ128rm, 0 }, + { X86::VANDNPDZ256rr, X86::VANDNPDZ256rm, 0 }, + { X86::VANDNPSZ128rr, X86::VANDNPSZ128rm, 0 }, + { X86::VANDNPSZ256rr, X86::VANDNPSZ256rm, 0 }, + { X86::VORPDZ128rr, X86::VORPDZ128rm, 0 }, + { X86::VORPDZ256rr, X86::VORPDZ256rm, 0 }, + { X86::VORPSZ128rr, X86::VORPSZ128rm, 0 }, + { X86::VORPSZ256rr, X86::VORPSZ256rm, 0 }, + { X86::VPANDDZ128rr, X86::VPANDDZ128rm, 0 }, + { X86::VPANDDZ256rr, X86::VPANDDZ256rm, 0 }, + { X86::VPANDQZ128rr, X86::VPANDQZ128rm, 0 }, + { X86::VPANDQZ256rr, X86::VPANDQZ256rm, 0 }, + { X86::VPANDNDZ128rr, X86::VPANDNDZ128rm, 0 }, + { X86::VPANDNDZ256rr, X86::VPANDNDZ256rm, 0 }, + { X86::VPANDNQZ128rr, X86::VPANDNQZ128rm, 0 }, + { X86::VPANDNQZ256rr, X86::VPANDNQZ256rm, 0 }, + { X86::VPORDZ128rr, X86::VPORDZ128rm, 0 }, + { X86::VPORDZ256rr, X86::VPORDZ256rm, 0 }, + { X86::VPORQZ128rr, X86::VPORQZ128rm, 0 }, + { X86::VPORQZ256rr, X86::VPORQZ256rm, 0 }, + { X86::VPXORDZ128rr, X86::VPXORDZ128rm, 0 }, + { X86::VPXORDZ256rr, X86::VPXORDZ256rm, 0 }, + { X86::VPXORQZ128rr, X86::VPXORQZ128rm, 0 }, + { X86::VPXORQZ256rr, X86::VPXORQZ256rm, 0 }, + { X86::VSUBPDZ128rr, X86::VSUBPDZ128rm, 0 }, + { X86::VSUBPDZ256rr, X86::VSUBPDZ256rm, 0 }, + { X86::VSUBPSZ128rr, X86::VSUBPSZ128rm, 0 }, + { X86::VSUBPSZ256rr, X86::VSUBPSZ256rm, 0 }, + { X86::VXORPDZ128rr, X86::VXORPDZ128rm, 0 }, + { X86::VXORPDZ256rr, X86::VXORPDZ256rm, 0 }, + { X86::VXORPSZ128rr, X86::VXORPSZ128rm, 0 }, + { X86::VXORPSZ256rr, X86::VXORPSZ256rm, 0 }, // AES foldable instructions { X86::AESDECLASTrr, X86::AESDECLASTrm, TB_ALIGN_16 }, diff --git a/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll b/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll index d79babfc38c..5dec0b8fa11 100644 --- a/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll +++ b/llvm/test/CodeGen/X86/stack-folding-fp-avx512vl.ll @@ -8,6 +8,38 @@ target triple = "x86_64-unknown-unknown" ; By including a nop call with sideeffects we can force a partial register spill of the ; relevant registers and check that the reload is correctly folded into the instruction. +define <2 x double> @stack_fold_addpd(<2 x double> %a0, <2 x double> %a1) { + ;CHECK-LABEL: stack_fold_addpd + ;CHECK: vaddpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = fadd <2 x double> %a0, %a1 + ret <2 x double> %2 +} + +define <4 x double> @stack_fold_addpd_ymm(<4 x double> %a0, <4 x double> %a1) { + ;CHECK-LABEL: stack_fold_addpd_ymm + ;CHECK: vaddpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = fadd <4 x double> %a0, %a1 + ret <4 x double> %2 +} + +define <4 x float> @stack_fold_addps(<4 x float> %a0, <4 x float> %a1) { + ;CHECK-LABEL: stack_fold_addps + ;CHECK: vaddps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = fadd <4 x float> %a0, %a1 + ret <4 x float> %2 +} + +define <8 x float> @stack_fold_addps_ymm(<8 x float> %a0, <8 x float> %a1) { + ;CHECK-LABEL: stack_fold_addps_ymm + ;CHECK: vaddps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = fadd <8 x float> %a0, %a1 + ret <8 x float> %2 +} + define double @stack_fold_addsd(double %a0, double %a1) { ;CHECK-LABEL: stack_fold_addsd ;CHECK: vaddsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload @@ -42,6 +74,58 @@ define <4 x float> @stack_fold_addss_int(<4 x float> %a0, <4 x float> %a1) { } declare <4 x float> @llvm.x86.sse.add.ss(<4 x float>, <4 x float>) nounwind readnone +define <2 x double> @stack_fold_andpd(<2 x double> %a0, <2 x double> %a1) { + ;CHECK-LABEL: stack_fold_andpd + ;CHECK: vpandq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <2 x double> %a0 to <2 x i64> + %3 = bitcast <2 x double> %a1 to <2 x i64> + %4 = and <2 x i64> %2, %3 + %5 = bitcast <2 x i64> %4 to <2 x double> + ; fadd forces execution domain + %6 = fadd <2 x double> %5, <double 0x0, double 0x0> + ret <2 x double> %6 +} + +define <4 x double> @stack_fold_andpd_ymm(<4 x double> %a0, <4 x double> %a1) { + ;CHECK-LABEL: stack_fold_andpd_ymm + ;CHECK: vpandq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <4 x double> %a0 to <4 x i64> + %3 = bitcast <4 x double> %a1 to <4 x i64> + %4 = and <4 x i64> %2, %3 + %5 = bitcast <4 x i64> %4 to <4 x double> + ; fadd forces execution domain + %6 = fadd <4 x double> %5, <double 0x0, double 0x0, double 0x0, double 0x0> + ret <4 x double> %6 +} + +define <4 x float> @stack_fold_andps(<4 x float> %a0, <4 x float> %a1) { + ;CHECK-LABEL: stack_fold_andps + ;CHECK: vpandd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <4 x float> %a0 to <4 x i32> + %3 = bitcast <4 x float> %a1 to <4 x i32> + %4 = and <4 x i32> %2, %3 + %5 = bitcast <4 x i32> %4 to <4 x float> + ; fadd forces execution domain + %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0> + ret <4 x float> %6 +} + +define <8 x float> @stack_fold_andps_ymm(<8 x float> %a0, <8 x float> %a1) { + ;CHECK-LABEL: stack_fold_andps_ymm + ;CHECK: vpandd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <8 x float> %a0 to <8 x i32> + %3 = bitcast <8 x float> %a1 to <8 x i32> + %4 = and <8 x i32> %2, %3 + %5 = bitcast <8 x i32> %4 to <8 x float> + ; fadd forces execution domain + %6 = fadd <8 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0> + ret <8 x float> %6 +} + define <2 x double> @stack_fold_divsd_int(<2 x double> %a0, <2 x double> %a1) { ;CHECK-LABEL: stack_fold_divsd_int ;CHECK: vdivsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload @@ -112,6 +196,90 @@ define <4 x float> @stack_fold_mulss_int(<4 x float> %a0, <4 x float> %a1) { } declare <4 x float> @llvm.x86.sse.mul.ss(<4 x float>, <4 x float>) nounwind readnone +define <2 x double> @stack_fold_orpd(<2 x double> %a0, <2 x double> %a1) { + ;CHECK-LABEL: stack_fold_orpd + ;CHECK: vporq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <2 x double> %a0 to <2 x i64> + %3 = bitcast <2 x double> %a1 to <2 x i64> + %4 = or <2 x i64> %2, %3 + %5 = bitcast <2 x i64> %4 to <2 x double> + ; fadd forces execution domain + %6 = fadd <2 x double> %5, <double 0x0, double 0x0> + ret <2 x double> %6 +} + +define <4 x double> @stack_fold_orpd_ymm(<4 x double> %a0, <4 x double> %a1) { + ;CHECK-LABEL: stack_fold_orpd_ymm + ;CHECK: vporq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <4 x double> %a0 to <4 x i64> + %3 = bitcast <4 x double> %a1 to <4 x i64> + %4 = or <4 x i64> %2, %3 + %5 = bitcast <4 x i64> %4 to <4 x double> + ; fadd forces execution domain + %6 = fadd <4 x double> %5, <double 0x0, double 0x0, double 0x0, double 0x0> + ret <4 x double> %6 +} + +define <4 x float> @stack_fold_orps(<4 x float> %a0, <4 x float> %a1) { + ;CHECK-LABEL: stack_fold_orps + ;CHECK: vpord {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <4 x float> %a0 to <4 x i32> + %3 = bitcast <4 x float> %a1 to <4 x i32> + %4 = or <4 x i32> %2, %3 + %5 = bitcast <4 x i32> %4 to <4 x float> + ; fadd forces execution domain + %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0> + ret <4 x float> %6 +} + +define <8 x float> @stack_fold_orps_ymm(<8 x float> %a0, <8 x float> %a1) { + ;CHECK-LABEL: stack_fold_orps_ymm + ;CHECK: vpord {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <8 x float> %a0 to <8 x i32> + %3 = bitcast <8 x float> %a1 to <8 x i32> + %4 = or <8 x i32> %2, %3 + %5 = bitcast <8 x i32> %4 to <8 x float> + ; fadd forces execution domain + %6 = fadd <8 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0> + ret <8 x float> %6 +} + +define <2 x double> @stack_fold_subpd(<2 x double> %a0, <2 x double> %a1) { + ;CHECK-LABEL: stack_fold_subpd + ;CHECK: vsubpd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = fsub <2 x double> %a0, %a1 + ret <2 x double> %2 +} + +define <4 x double> @stack_fold_subpd_ymm(<4 x double> %a0, <4 x double> %a1) { + ;CHECK-LABEL: stack_fold_subpd_ymm + ;CHECK: vsubpd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = fsub <4 x double> %a0, %a1 + ret <4 x double> %2 +} + +define <4 x float> @stack_fold_subps(<4 x float> %a0, <4 x float> %a1) { + ;CHECK-LABEL: stack_fold_subps + ;CHECK: vsubps {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = fsub <4 x float> %a0, %a1 + ret <4 x float> %2 +} + +define <8 x float> @stack_fold_subps_ymm(<8 x float> %a0, <8 x float> %a1) { + ;CHECK-LABEL: stack_fold_subps_ymm + ;CHECK: vsubps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = fsub <8 x float> %a0, %a1 + ret <8 x float> %2 +} + define double @stack_fold_subsd(double %a0, double %a1) { ;CHECK-LABEL: stack_fold_subsd ;CHECK: vsubsd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 8-byte Folded Reload @@ -145,3 +313,55 @@ define <4 x float> @stack_fold_subss_int(<4 x float> %a0, <4 x float> %a1) { ret <4 x float> %2 } declare <4 x float> @llvm.x86.sse.sub.ss(<4 x float>, <4 x float>) nounwind readnone + +define <2 x double> @stack_fold_xorpd(<2 x double> %a0, <2 x double> %a1) { + ;CHECK-LABEL: stack_fold_xorpd + ;CHECK: vpxorq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <2 x double> %a0 to <2 x i64> + %3 = bitcast <2 x double> %a1 to <2 x i64> + %4 = xor <2 x i64> %2, %3 + %5 = bitcast <2 x i64> %4 to <2 x double> + ; fadd forces execution domain + %6 = fadd <2 x double> %5, <double 0x0, double 0x0> + ret <2 x double> %6 +} + +define <4 x double> @stack_fold_xorpd_ymm(<4 x double> %a0, <4 x double> %a1) { + ;CHECK-LABEL: stack_fold_xorpd_ymm + ;CHECK: vpxorq {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <4 x double> %a0 to <4 x i64> + %3 = bitcast <4 x double> %a1 to <4 x i64> + %4 = xor <4 x i64> %2, %3 + %5 = bitcast <4 x i64> %4 to <4 x double> + ; fadd forces execution domain + %6 = fadd <4 x double> %5, <double 0x0, double 0x0, double 0x0, double 0x0> + ret <4 x double> %6 +} + +define <4 x float> @stack_fold_xorps(<4 x float> %a0, <4 x float> %a1) { + ;CHECK-LABEL: stack_fold_xorps + ;CHECK: vpxord {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%xmm[0-9][0-9]*}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <4 x float> %a0 to <4 x i32> + %3 = bitcast <4 x float> %a1 to <4 x i32> + %4 = xor <4 x i32> %2, %3 + %5 = bitcast <4 x i32> %4 to <4 x float> + ; fadd forces execution domain + %6 = fadd <4 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0> + ret <4 x float> %6 +} + +define <8 x float> @stack_fold_xorps_ymm(<8 x float> %a0, <8 x float> %a1) { + ;CHECK-LABEL: stack_fold_xorps_ymm + ;CHECK: vpxord {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"() + %2 = bitcast <8 x float> %a0 to <8 x i32> + %3 = bitcast <8 x float> %a1 to <8 x i32> + %4 = xor <8 x i32> %2, %3 + %5 = bitcast <8 x i32> %4 to <8 x float> + ; fadd forces execution domain + %6 = fadd <8 x float> %5, <float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0, float 0x0> + ret <8 x float> %6 +} |