diff options
| author | Craig Topper <craig.topper@intel.com> | 2018-04-18 22:07:53 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2018-04-18 22:07:53 +0000 |
| commit | ebf52e80c1d0d35686438828322aab5aec2c7bbe (patch) | |
| tree | 15dacfe9f1ac0447e3e9013836b60c6884705dd3 /llvm/lib/Target | |
| parent | 02f7841e4a8c3a5d1a0b2819c3394a85c279625b (diff) | |
| download | bcm5719-llvm-ebf52e80c1d0d35686438828322aab5aec2c7bbe.tar.gz bcm5719-llvm-ebf52e80c1d0d35686438828322aab5aec2c7bbe.zip | |
[X86] Correct the Defs, Uses, hasSideEffects, mayLoad, mayStore for XCHG and XADD instructions.
I don't think we emit any of these from codegen except for using XCHG16ar as 2 byte NOP.
llvm-svn: 330298
Diffstat (limited to 'llvm/lib/Target')
| -rw-r--r-- | llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h | 4 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.td | 81 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86MCInstLower.cpp | 2 |
3 files changed, 52 insertions, 35 deletions
diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h index b71a79ae412..065356baabc 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -670,6 +670,10 @@ namespace X86II { return 1; return 0; case 2: + // XCHG/XADD have two destinations and two sources. + if (NumOps >= 4 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 && + Desc.getOperandConstraint(3, MCOI::TIED_TO) == 1) + return 2; // Check for gather. AVX-512 has the second tied operand early. AVX2 // has it as the last op. if (NumOps == 9 && Desc.getOperandConstraint(2, MCOI::TIED_TO) == 0 && diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 0943e10c219..3ad15450ce5 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -1934,56 +1934,69 @@ defm XCHG : ATOMIC_SWAP<0x86, 0x87, "xchg", "atomic_swap">; // Swap between registers. let SchedRW = [WriteALU] in { -let Constraints = "$val = $dst" in { -def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst), (ins GR8:$val, GR8:$src), - "xchg{b}\t{$val, $src|$src, $val}", []>; -def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst), (ins GR16:$val, GR16:$src), - "xchg{w}\t{$val, $src|$src, $val}", []>, +let Constraints = "$src1 = $dst1, $src2 = $dst2", hasSideEffects = 0 in { +def XCHG8rr : I<0x86, MRMSrcReg, (outs GR8:$dst1, GR8:$dst2), + (ins GR8:$src1, GR8:$src2), + "xchg{b}\t{$src2, $src1|$src1, $src2}", []>; +def XCHG16rr : I<0x87, MRMSrcReg, (outs GR16:$dst1, GR16:$dst2), + (ins GR16:$src1, GR16:$src2), + "xchg{w}\t{$src2, $src1|$src1, $src2}", []>, OpSize16; -def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst), (ins GR32:$val, GR32:$src), - "xchg{l}\t{$val, $src|$src, $val}", []>, +def XCHG32rr : I<0x87, MRMSrcReg, (outs GR32:$dst1, GR32:$dst2), + (ins GR32:$src1, GR32:$src2), + "xchg{l}\t{$src2, $src1|$src1, $src2}", []>, OpSize32; -def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst), (ins GR64:$val,GR64:$src), - "xchg{q}\t{$val, $src|$src, $val}", []>; +def XCHG64rr : RI<0x87, MRMSrcReg, (outs GR64:$dst1, GR64:$dst2), + (ins GR64:$src1 ,GR64:$src2), + "xchg{q}\t{$src2, $src1|$src1, $src2}", []>; } // Swap between EAX and other registers. +let Constraints = "$src = $dst", hasSideEffects = 0 in { let Uses = [AX], Defs = [AX] in -def XCHG16ar : I<0x90, AddRegFrm, (outs), (ins GR16:$src), +def XCHG16ar : I<0x90, AddRegFrm, (outs GR16:$dst), (ins GR16:$src), "xchg{w}\t{$src, %ax|ax, $src}", []>, OpSize16; let Uses = [EAX], Defs = [EAX] in -def XCHG32ar : I<0x90, AddRegFrm, (outs), (ins GR32:$src), - "xchg{l}\t{$src, %eax|eax, $src}", []>, - OpSize32; +def XCHG32ar : I<0x90, AddRegFrm, (outs GR32:$dst), (ins GR32:$src), + "xchg{l}\t{$src, %eax|eax, $src}", []>, OpSize32; let Uses = [RAX], Defs = [RAX] in -def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src), +def XCHG64ar : RI<0x90, AddRegFrm, (outs GR64:$dst), (ins GR64:$src), "xchg{q}\t{$src, %rax|rax, $src}", []>; +} } // SchedRW -let SchedRW = [WriteALU] in { -def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst), (ins GR8:$src), - "xadd{b}\t{$src, $dst|$dst, $src}", []>, TB; -def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst), (ins GR16:$src), - "xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, - OpSize16; -def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst), (ins GR32:$src), - "xadd{l}\t{$src, $dst|$dst, $src}", []>, TB, - OpSize32; -def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst), (ins GR64:$src), - "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB; +let hasSideEffects = 0, Constraints = "$src1 = $dst1, $src2 = $dst2", + Defs = [EFLAGS], SchedRW = [WriteALU] in { +def XADD8rr : I<0xC0, MRMDestReg, (outs GR8:$dst1, GR8:$dst2), + (ins GR8:$src1, GR8:$src2), + "xadd{b}\t{$src2, $src1|$src1, $src2}", []>, TB; +def XADD16rr : I<0xC1, MRMDestReg, (outs GR16:$dst1, GR16:$dst2), + (ins GR16:$src1, GR16:$src2), + "xadd{w}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize16; +def XADD32rr : I<0xC1, MRMDestReg, (outs GR32:$dst1, GR32:$dst2), + (ins GR32:$src1, GR32:$src2), + "xadd{l}\t{$src2, $src1|$src1, $src2}", []>, TB, OpSize32; +def XADD64rr : RI<0xC1, MRMDestReg, (outs GR64:$dst1, GR64:$dst2), + (ins GR64:$src1, GR64:$src2), + "xadd{q}\t{$src2, $src1|$src1, $src2}", []>, TB; } // SchedRW -let mayLoad = 1, mayStore = 1, SchedRW = [WriteALULd, WriteRMW] in { -def XADD8rm : I<0xC0, MRMDestMem, (outs), (ins i8mem:$dst, GR8:$src), - "xadd{b}\t{$src, $dst|$dst, $src}", []>, TB; -def XADD16rm : I<0xC1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src), - "xadd{w}\t{$src, $dst|$dst, $src}", []>, TB, +let mayLoad = 1, mayStore = 1, hasSideEffects = 0, Constraints = "$val = $dst", + Defs = [EFLAGS], SchedRW = [WriteALULd, WriteRMW] in { +def XADD8rm : I<0xC0, MRMSrcMem, (outs GR8:$dst), + (ins GR8:$val, i8mem:$ptr), + "xadd{b}\t{$val, $ptr|$ptr, $val}", []>, TB; +def XADD16rm : I<0xC1, MRMSrcMem, (outs GR16:$dst), + (ins GR16:$val, i16mem:$ptr), + "xadd{w}\t{$val, $ptr|$ptr, $val}", []>, TB, OpSize16; -def XADD32rm : I<0xC1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), - "xadd{l}\t{$src, $dst|$dst, $src}", []>, TB, +def XADD32rm : I<0xC1, MRMSrcMem, (outs GR32:$dst), + (ins GR32:$val, i32mem:$ptr), + "xadd{l}\t{$val, $ptr|$ptr, $val}", []>, TB, OpSize32; -def XADD64rm : RI<0xC1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src), - "xadd{q}\t{$src, $dst|$dst, $src}", []>, TB; +def XADD64rm : RI<0xC1, MRMSrcMem, (outs GR64:$dst), + (ins GR64:$val, i64mem:$ptr), + "xadd{q}\t{$val, $ptr|$ptr, $val}", []>, TB; } diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp index b139f9c92ee..9b0f2a64cf7 100644 --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -1146,7 +1146,7 @@ static unsigned EmitNop(MCStreamer &OS, unsigned NumBytes, bool Is64Bit, OS.EmitInstruction(MCInstBuilder(Opc), STI); break; case X86::XCHG16ar: - OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX), STI); + OS.EmitInstruction(MCInstBuilder(Opc).addReg(X86::AX).addReg(X86::AX), STI); break; case X86::NOOPL: case X86::NOOPW: |

