diff options
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 21 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/avx512-insert-extract.ll | 4 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/avx512-select.ll | 4 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll | 24 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/fast-isel-select-sse.ll | 48 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/fmaxnum.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/fminnum.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/gpr-to-mask.ll | 6 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/pr38803.ll | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/scalar-fp-to-i64.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/sqrt-fastmath.ll | 4 |
11 files changed, 73 insertions, 64 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 83a8be8e48a..7fba03c6425 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -4358,7 +4358,10 @@ unsigned X86InstrInfo::getPartialRegUpdateClearance( // Return true for any instruction the copies the high bits of the first source // operand into the unused high bits of the destination operand. -static bool hasUndefRegUpdate(unsigned Opcode, bool ForLoadFold = false) { +static bool hasUndefRegUpdate(unsigned Opcode, unsigned &OpNum, + bool ForLoadFold = false) { + // Set the OpNum parameter to the first source operand. + OpNum = 1; switch (Opcode) { case X86::VCVTSI2SSrr: case X86::VCVTSI2SSrm: @@ -4517,6 +4520,14 @@ static bool hasUndefRegUpdate(unsigned Opcode, bool ForLoadFold = false) { case X86::VSQRTSDZm: case X86::VSQRTSDZm_Int: return true; + case X86::VMOVSSZrrk: + case X86::VMOVSDZrrk: + OpNum = 3; + return true; + case X86::VMOVSSZrrkz: + case X86::VMOVSDZrrkz: + OpNum = 2; + return true; } return false; @@ -4539,12 +4550,9 @@ static bool hasUndefRegUpdate(unsigned Opcode, bool ForLoadFold = false) { unsigned X86InstrInfo::getUndefRegClearance(const MachineInstr &MI, unsigned &OpNum, const TargetRegisterInfo *TRI) const { - if (!hasUndefRegUpdate(MI.getOpcode())) + if (!hasUndefRegUpdate(MI.getOpcode(), OpNum)) return 0; - // Set the OpNum parameter to the first source operand. - OpNum = 1; - const MachineOperand &MO = MI.getOperand(OpNum); if (MO.isUndef() && Register::isPhysicalRegister(MO.getReg())) { return UndefRegClearance; @@ -4788,7 +4796,8 @@ MachineInstr *X86InstrInfo::foldMemoryOperandCustom( static bool shouldPreventUndefRegUpdateMemFold(MachineFunction &MF, MachineInstr &MI) { - if (!hasUndefRegUpdate(MI.getOpcode(), /*ForLoadFold*/true) || + unsigned Ignored; + if (!hasUndefRegUpdate(MI.getOpcode(), Ignored, /*ForLoadFold*/true) || !MI.getOperand(1).isReg()) return false; diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll index 37f16489df4..d37220222ce 100644 --- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll +++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll @@ -2295,7 +2295,7 @@ define void @test_concat_v2i1(<2 x half>* %arg, <2 x half>* %arg1, <2 x half>* % ; KNL-NEXT: movswl 2(%rsi), %eax ; KNL-NEXT: vmovd %eax, %xmm1 ; KNL-NEXT: vcvtph2ps %xmm1, %xmm1 -; KNL-NEXT: vmovss %xmm1, %xmm0, %xmm1 {%k2} {z} +; KNL-NEXT: vmovss %xmm1, %xmm1, %xmm1 {%k2} {z} ; KNL-NEXT: vmovss %xmm0, %xmm0, %xmm0 {%k1} {z} ; KNL-NEXT: vcvtps2ph $4, %xmm0, %xmm0 ; KNL-NEXT: vmovd %xmm0, %eax @@ -2343,7 +2343,7 @@ define void @test_concat_v2i1(<2 x half>* %arg, <2 x half>* %arg1, <2 x half>* % ; SKX-NEXT: movswl 2(%rsi), %eax ; SKX-NEXT: vmovd %eax, %xmm1 ; SKX-NEXT: vcvtph2ps %xmm1, %xmm1 -; SKX-NEXT: vmovss %xmm1, %xmm0, %xmm1 {%k2} {z} +; SKX-NEXT: vmovss %xmm1, %xmm1, %xmm1 {%k2} {z} ; SKX-NEXT: vmovss %xmm0, %xmm0, %xmm0 {%k1} {z} ; SKX-NEXT: vcvtps2ph $4, %xmm0, %xmm0 ; SKX-NEXT: vmovd %xmm0, %eax diff --git a/llvm/test/CodeGen/X86/avx512-select.ll b/llvm/test/CodeGen/X86/avx512-select.ll index 31484c16dd5..cd4a70c4f73 100644 --- a/llvm/test/CodeGen/X86/avx512-select.ll +++ b/llvm/test/CodeGen/X86/avx512-select.ll @@ -74,7 +74,7 @@ define float @select02(float %a, float %b, float %c, float %eps) { ; X64-LABEL: select02: ; X64: # %bb.0: ; X64-NEXT: vcmpless %xmm0, %xmm3, %k1 -; X64-NEXT: vmovss %xmm2, %xmm0, %xmm1 {%k1} +; X64-NEXT: vmovss %xmm2, %xmm1, %xmm1 {%k1} ; X64-NEXT: vmovaps %xmm1, %xmm0 ; X64-NEXT: retq %cmp = fcmp oge float %a, %eps @@ -96,7 +96,7 @@ define double @select03(double %a, double %b, double %c, double %eps) { ; X64-LABEL: select03: ; X64: # %bb.0: ; X64-NEXT: vcmplesd %xmm0, %xmm3, %k1 -; X64-NEXT: vmovsd %xmm2, %xmm0, %xmm1 {%k1} +; X64-NEXT: vmovsd %xmm2, %xmm1, %xmm1 {%k1} ; X64-NEXT: vmovapd %xmm1, %xmm0 ; X64-NEXT: retq %cmp = fcmp oge double %a, %eps diff --git a/llvm/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll b/llvm/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll index aab74bbdece..354c695656a 100644 --- a/llvm/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll +++ b/llvm/test/CodeGen/X86/fast-isel-select-pseudo-cmov.ll @@ -27,7 +27,7 @@ define float @select_fcmp_one_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_one_f32: ; AVX512: ## %bb.0: ; AVX512-NEXT: vcmpneq_oqss %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp one float %a, %b @@ -55,7 +55,7 @@ define double @select_fcmp_one_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_one_f64: ; AVX512: ## %bb.0: ; AVX512-NEXT: vcmpneq_oqsd %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp one double %a, %b @@ -87,7 +87,7 @@ define float @select_icmp_eq_f32(i64 %a, i64 %b, float %c, float %d) { ; AVX512-ISEL-NEXT: cmpq %rsi, %rdi ; AVX512-ISEL-NEXT: sete %al ; AVX512-ISEL-NEXT: kmovd %eax, %k1 -; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-ISEL-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-ISEL-NEXT: retq ; @@ -128,7 +128,7 @@ define float @select_icmp_ne_f32(i64 %a, i64 %b, float %c, float %d) { ; AVX512-ISEL-NEXT: cmpq %rsi, %rdi ; AVX512-ISEL-NEXT: setne %al ; AVX512-ISEL-NEXT: kmovd %eax, %k1 -; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-ISEL-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-ISEL-NEXT: retq ; @@ -169,7 +169,7 @@ define float @select_icmp_ugt_f32(i64 %a, i64 %b, float %c, float %d) { ; AVX512-ISEL-NEXT: cmpq %rsi, %rdi ; AVX512-ISEL-NEXT: seta %al ; AVX512-ISEL-NEXT: kmovd %eax, %k1 -; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-ISEL-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-ISEL-NEXT: retq ; @@ -210,7 +210,7 @@ define float @select_icmp_uge_f32(i64 %a, i64 %b, float %c, float %d) { ; AVX512-ISEL-NEXT: cmpq %rsi, %rdi ; AVX512-ISEL-NEXT: setae %al ; AVX512-ISEL-NEXT: kmovd %eax, %k1 -; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-ISEL-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-ISEL-NEXT: retq ; @@ -251,7 +251,7 @@ define float @select_icmp_ult_f32(i64 %a, i64 %b, float %c, float %d) { ; AVX512-ISEL-NEXT: cmpq %rsi, %rdi ; AVX512-ISEL-NEXT: setb %al ; AVX512-ISEL-NEXT: kmovd %eax, %k1 -; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-ISEL-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-ISEL-NEXT: retq ; @@ -292,7 +292,7 @@ define float @select_icmp_ule_f32(i64 %a, i64 %b, float %c, float %d) { ; AVX512-ISEL-NEXT: cmpq %rsi, %rdi ; AVX512-ISEL-NEXT: setbe %al ; AVX512-ISEL-NEXT: kmovd %eax, %k1 -; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-ISEL-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-ISEL-NEXT: retq ; @@ -333,7 +333,7 @@ define float @select_icmp_sgt_f32(i64 %a, i64 %b, float %c, float %d) { ; AVX512-ISEL-NEXT: cmpq %rsi, %rdi ; AVX512-ISEL-NEXT: setg %al ; AVX512-ISEL-NEXT: kmovd %eax, %k1 -; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-ISEL-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-ISEL-NEXT: retq ; @@ -374,7 +374,7 @@ define float @select_icmp_sge_f32(i64 %a, i64 %b, float %c, float %d) { ; AVX512-ISEL-NEXT: cmpq %rsi, %rdi ; AVX512-ISEL-NEXT: setge %al ; AVX512-ISEL-NEXT: kmovd %eax, %k1 -; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-ISEL-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-ISEL-NEXT: retq ; @@ -415,7 +415,7 @@ define float @select_icmp_slt_f32(i64 %a, i64 %b, float %c, float %d) { ; AVX512-ISEL-NEXT: cmpq %rsi, %rdi ; AVX512-ISEL-NEXT: setl %al ; AVX512-ISEL-NEXT: kmovd %eax, %k1 -; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-ISEL-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-ISEL-NEXT: retq ; @@ -456,7 +456,7 @@ define float @select_icmp_sle_f32(i64 %a, i64 %b, float %c, float %d) { ; AVX512-ISEL-NEXT: cmpq %rsi, %rdi ; AVX512-ISEL-NEXT: setle %al ; AVX512-ISEL-NEXT: kmovd %eax, %k1 -; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; AVX512-ISEL-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-ISEL-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-ISEL-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/fast-isel-select-sse.ll b/llvm/test/CodeGen/X86/fast-isel-select-sse.ll index e91b925a38e..54ef4fdc114 100644 --- a/llvm/test/CodeGen/X86/fast-isel-select-sse.ll +++ b/llvm/test/CodeGen/X86/fast-isel-select-sse.ll @@ -26,7 +26,7 @@ define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_oeq_f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpeqss %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp oeq float %a, %b @@ -52,7 +52,7 @@ define double @select_fcmp_oeq_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_oeq_f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpeqsd %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp oeq double %a, %b @@ -79,7 +79,7 @@ define float @select_fcmp_ogt_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_ogt_f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpltss %xmm0, %xmm1, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp ogt float %a, %b @@ -106,7 +106,7 @@ define double @select_fcmp_ogt_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_ogt_f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpltsd %xmm0, %xmm1, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp ogt double %a, %b @@ -133,7 +133,7 @@ define float @select_fcmp_oge_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_oge_f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpless %xmm0, %xmm1, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp oge float %a, %b @@ -160,7 +160,7 @@ define double @select_fcmp_oge_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_oge_f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmplesd %xmm0, %xmm1, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp oge double %a, %b @@ -186,7 +186,7 @@ define float @select_fcmp_olt_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_olt_f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpltss %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp olt float %a, %b @@ -212,7 +212,7 @@ define double @select_fcmp_olt_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_olt_f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpltsd %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp olt double %a, %b @@ -238,7 +238,7 @@ define float @select_fcmp_ole_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_ole_f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpless %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp ole float %a, %b @@ -264,7 +264,7 @@ define double @select_fcmp_ole_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_ole_f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmplesd %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp ole double %a, %b @@ -290,7 +290,7 @@ define float @select_fcmp_ord_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_ord_f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpordss %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp ord float %a, %b @@ -316,7 +316,7 @@ define double @select_fcmp_ord_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_ord_f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpordsd %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp ord double %a, %b @@ -342,7 +342,7 @@ define float @select_fcmp_uno_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_uno_f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpunordss %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp uno float %a, %b @@ -368,7 +368,7 @@ define double @select_fcmp_uno_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_uno_f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpunordsd %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp uno double %a, %b @@ -394,7 +394,7 @@ define float @select_fcmp_ugt_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_ugt_f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpnless %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp ugt float %a, %b @@ -420,7 +420,7 @@ define double @select_fcmp_ugt_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_ugt_f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpnlesd %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp ugt double %a, %b @@ -446,7 +446,7 @@ define float @select_fcmp_uge_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_uge_f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpnltss %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp uge float %a, %b @@ -472,7 +472,7 @@ define double @select_fcmp_uge_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_uge_f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpnltsd %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp uge double %a, %b @@ -499,7 +499,7 @@ define float @select_fcmp_ult_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_ult_f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpnless %xmm0, %xmm1, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp ult float %a, %b @@ -526,7 +526,7 @@ define double @select_fcmp_ult_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_ult_f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpnlesd %xmm0, %xmm1, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp ult double %a, %b @@ -553,7 +553,7 @@ define float @select_fcmp_ule_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_ule_f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpnltss %xmm0, %xmm1, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp ule float %a, %b @@ -580,7 +580,7 @@ define double @select_fcmp_ule_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_ule_f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpnltsd %xmm0, %xmm1, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp ule double %a, %b @@ -606,7 +606,7 @@ define float @select_fcmp_une_f32(float %a, float %b, float %c, float %d) { ; AVX512-LABEL: select_fcmp_une_f32: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpneqss %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovaps %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp une float %a, %b @@ -632,7 +632,7 @@ define double @select_fcmp_une_f64(double %a, double %b, double %c, double %d) { ; AVX512-LABEL: select_fcmp_une_f64: ; AVX512: # %bb.0: ; AVX512-NEXT: vcmpneqsd %xmm1, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512-NEXT: vmovsd %xmm2, %xmm3, %xmm3 {%k1} ; AVX512-NEXT: vmovapd %xmm3, %xmm0 ; AVX512-NEXT: retq %1 = fcmp une double %a, %b diff --git a/llvm/test/CodeGen/X86/fmaxnum.ll b/llvm/test/CodeGen/X86/fmaxnum.ll index e308412f7ca..23de2667a3e 100644 --- a/llvm/test/CodeGen/X86/fmaxnum.ll +++ b/llvm/test/CodeGen/X86/fmaxnum.ll @@ -44,7 +44,7 @@ define float @test_fmaxf(float %x, float %y) { ; AVX512: # %bb.0: ; AVX512-NEXT: vmaxss %xmm0, %xmm1, %xmm2 ; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovaps %xmm2, %xmm0 ; AVX512-NEXT: retq %z = call float @fmaxf(float %x, float %y) readnone @@ -85,7 +85,7 @@ define double @test_fmax(double %x, double %y) { ; AVX512: # %bb.0: ; AVX512-NEXT: vmaxsd %xmm0, %xmm1, %xmm2 ; AVX512-NEXT: vcmpunordsd %xmm0, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovapd %xmm2, %xmm0 ; AVX512-NEXT: retq %z = call double @fmax(double %x, double %y) readnone @@ -133,7 +133,7 @@ define float @test_intrinsic_fmaxf(float %x, float %y) { ; AVX512: # %bb.0: ; AVX512-NEXT: vmaxss %xmm0, %xmm1, %xmm2 ; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovaps %xmm2, %xmm0 ; AVX512-NEXT: retq %z = call float @llvm.maxnum.f32(float %x, float %y) readnone @@ -164,7 +164,7 @@ define double @test_intrinsic_fmax(double %x, double %y) { ; AVX512: # %bb.0: ; AVX512-NEXT: vmaxsd %xmm0, %xmm1, %xmm2 ; AVX512-NEXT: vcmpunordsd %xmm0, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovapd %xmm2, %xmm0 ; AVX512-NEXT: retq %z = call double @llvm.maxnum.f64(double %x, double %y) readnone diff --git a/llvm/test/CodeGen/X86/fminnum.ll b/llvm/test/CodeGen/X86/fminnum.ll index 33accf2e49c..1667a5cd0a7 100644 --- a/llvm/test/CodeGen/X86/fminnum.ll +++ b/llvm/test/CodeGen/X86/fminnum.ll @@ -44,7 +44,7 @@ define float @test_fminf(float %x, float %y) { ; AVX512: # %bb.0: ; AVX512-NEXT: vminss %xmm0, %xmm1, %xmm2 ; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovaps %xmm2, %xmm0 ; AVX512-NEXT: retq %z = call float @fminf(float %x, float %y) readnone @@ -85,7 +85,7 @@ define double @test_fmin(double %x, double %y) { ; AVX512: # %bb.0: ; AVX512-NEXT: vminsd %xmm0, %xmm1, %xmm2 ; AVX512-NEXT: vcmpunordsd %xmm0, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovapd %xmm2, %xmm0 ; AVX512-NEXT: retq %z = call double @fmin(double %x, double %y) readnone @@ -133,7 +133,7 @@ define float @test_intrinsic_fminf(float %x, float %y) { ; AVX512: # %bb.0: ; AVX512-NEXT: vminss %xmm0, %xmm1, %xmm2 ; AVX512-NEXT: vcmpunordss %xmm0, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovaps %xmm2, %xmm0 ; AVX512-NEXT: retq %z = call float @llvm.minnum.f32(float %x, float %y) readnone @@ -164,7 +164,7 @@ define double @test_intrinsic_fmin(double %x, double %y) { ; AVX512: # %bb.0: ; AVX512-NEXT: vminsd %xmm0, %xmm1, %xmm2 ; AVX512-NEXT: vcmpunordsd %xmm0, %xmm0, %k1 -; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm2 {%k1} +; AVX512-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1} ; AVX512-NEXT: vmovapd %xmm2, %xmm0 ; AVX512-NEXT: retq %z = call double @llvm.minnum.f64(double %x, double %y) readnone diff --git a/llvm/test/CodeGen/X86/gpr-to-mask.ll b/llvm/test/CodeGen/X86/gpr-to-mask.ll index ff237799783..8b6a6074024 100644 --- a/llvm/test/CodeGen/X86/gpr-to-mask.ll +++ b/llvm/test/CodeGen/X86/gpr-to-mask.ll @@ -13,7 +13,7 @@ define void @test_fcmp_storefloat(i1 %cond, float* %fptr, float %f1, float %f2, ; X86-64-NEXT: .LBB0_2: # %else ; X86-64-NEXT: vcmpeqss %xmm5, %xmm4, %k1 ; X86-64-NEXT: .LBB0_3: # %exit -; X86-64-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; X86-64-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; X86-64-NEXT: vmovss %xmm1, (%rsi) ; X86-64-NEXT: retq ; @@ -112,7 +112,7 @@ define void @test_load_add(i1 %cond, float* %fptr, i1* %iptr1, i1* %iptr2, float ; X86-64-NEXT: movb (%rcx), %al ; X86-64-NEXT: .LBB2_3: # %exit ; X86-64-NEXT: kmovd %eax, %k1 -; X86-64-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; X86-64-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; X86-64-NEXT: vmovss %xmm1, (%rsi) ; X86-64-NEXT: retq ; @@ -167,7 +167,7 @@ define void @test_load_i1(i1 %cond, float* %fptr, i1* %iptr1, i1* %iptr2, float ; X86-64-NEXT: .LBB3_2: # %else ; X86-64-NEXT: kmovb (%rcx), %k1 ; X86-64-NEXT: .LBB3_3: # %exit -; X86-64-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; X86-64-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; X86-64-NEXT: vmovss %xmm1, (%rsi) ; X86-64-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/pr38803.ll b/llvm/test/CodeGen/X86/pr38803.ll index 37213424d77..a2fc19e0cde 100644 --- a/llvm/test/CodeGen/X86/pr38803.ll +++ b/llvm/test/CodeGen/X86/pr38803.ll @@ -20,7 +20,7 @@ define float @_Z3fn2v() { ; CHECK-NEXT: vcvtsi2ssl {{.*}}(%rip), %xmm1, %xmm1 ; CHECK-NEXT: kmovd %eax, %k1 ; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2 -; CHECK-NEXT: vmovss %xmm2, %xmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovss %xmm2, %xmm1, %xmm1 {%k1} ; CHECK-NEXT: vmovss %xmm1, {{.*}}(%rip) ; CHECK-NEXT: .LBB0_2: # %if.end ; CHECK-NEXT: popq %rax diff --git a/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll b/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll index 7dc44f182cb..158bab5f2f1 100644 --- a/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll +++ b/llvm/test/CodeGen/X86/scalar-fp-to-i64.ll @@ -90,7 +90,7 @@ define i64 @f_to_u64(float %a) nounwind { ; AVX512F_32_WIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; AVX512F_32_WIN-NEXT: vcmpltss %xmm1, %xmm0, %k1 ; AVX512F_32_WIN-NEXT: vsubss %xmm1, %xmm0, %xmm2 -; AVX512F_32_WIN-NEXT: vmovss %xmm0, %xmm0, %xmm2 {%k1} +; AVX512F_32_WIN-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1} ; AVX512F_32_WIN-NEXT: vmovss %xmm2, (%esp) ; AVX512F_32_WIN-NEXT: flds (%esp) ; AVX512F_32_WIN-NEXT: fisttpll (%esp) @@ -111,7 +111,7 @@ define i64 @f_to_u64(float %a) nounwind { ; AVX512F_32_LIN-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; AVX512F_32_LIN-NEXT: vcmpltss %xmm1, %xmm0, %k1 ; AVX512F_32_LIN-NEXT: vsubss %xmm1, %xmm0, %xmm2 -; AVX512F_32_LIN-NEXT: vmovss %xmm0, %xmm0, %xmm2 {%k1} +; AVX512F_32_LIN-NEXT: vmovss %xmm0, %xmm2, %xmm2 {%k1} ; AVX512F_32_LIN-NEXT: vmovss %xmm2, (%esp) ; AVX512F_32_LIN-NEXT: flds (%esp) ; AVX512F_32_LIN-NEXT: fisttpll (%esp) @@ -580,7 +580,7 @@ define i64 @d_to_u64(double %a) nounwind { ; AVX512F_32_WIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; AVX512F_32_WIN-NEXT: vcmpltsd %xmm1, %xmm0, %k1 ; AVX512F_32_WIN-NEXT: vsubsd %xmm1, %xmm0, %xmm2 -; AVX512F_32_WIN-NEXT: vmovsd %xmm0, %xmm0, %xmm2 {%k1} +; AVX512F_32_WIN-NEXT: vmovsd %xmm0, %xmm2, %xmm2 {%k1} ; AVX512F_32_WIN-NEXT: vmovsd %xmm2, (%esp) ; AVX512F_32_WIN-NEXT: fldl (%esp) ; AVX512F_32_WIN-NEXT: fisttpll (%esp) @@ -601,7 +601,7 @@ define i64 @d_to_u64(double %a) nounwind { ; AVX512F_32_LIN-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; AVX512F_32_LIN-NEXT: vcmpltsd %xmm1, %xmm0, %k1 ; AVX512F_32_LIN-NEXT: vsubsd %xmm1, %xmm0, %xmm2 -; AVX512F_32_LIN-NEXT: vmovsd %xmm0, %xmm0, %xmm2 {%k1} +; AVX512F_32_LIN-NEXT: vmovsd %xmm0, %xmm2, %xmm2 {%k1} ; AVX512F_32_LIN-NEXT: vmovsd %xmm2, (%esp) ; AVX512F_32_LIN-NEXT: fldl (%esp) ; AVX512F_32_LIN-NEXT: fisttpll (%esp) diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath.ll b/llvm/test/CodeGen/X86/sqrt-fastmath.ll index 6e0273d513f..6aad4e8f69e 100644 --- a/llvm/test/CodeGen/X86/sqrt-fastmath.ll +++ b/llvm/test/CodeGen/X86/sqrt-fastmath.ll @@ -94,7 +94,7 @@ define float @finite_f32_estimate(float %f) #1 { ; AVX512-NEXT: vmulss %xmm1, %xmm2, %xmm1 ; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; AVX512-NEXT: vcmpeqss %xmm2, %xmm0, %k1 -; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm1 {%k1} +; AVX512-NEXT: vmovss %xmm2, %xmm1, %xmm1 {%k1} ; AVX512-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-NEXT: retq %call = tail call float @__sqrtf_finite(float %f) #2 @@ -165,7 +165,7 @@ define float @sqrtf_check_denorms(float %x) #3 { ; AVX512-NEXT: vandps %xmm2, %xmm0, %xmm0 ; AVX512-NEXT: vcmpltss {{.*}}(%rip), %xmm0, %k1 ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; AVX512-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm1 {%k1} ; AVX512-NEXT: vmovaps %xmm1, %xmm0 ; AVX512-NEXT: retq %call = tail call float @__sqrtf_finite(float %x) #2 |