diff options
author | Eric Astor <epastor@google.com> | 2020-01-09 14:01:55 -0500 |
---|---|---|
committer | Eric Astor <epastor@google.com> | 2020-01-09 14:55:03 -0500 |
commit | 1c545f6dbcbb3ada2dfef2c6afbc1ca8939135cb (patch) | |
tree | 10b597f4379943fee8e5d36e64ac774e0ce81504 /llvm/lib/Target | |
parent | 3408940f736955402b7676e3b8bab6906cc82637 (diff) | |
download | bcm5719-llvm-1c545f6dbcbb3ada2dfef2c6afbc1ca8939135cb.tar.gz bcm5719-llvm-1c545f6dbcbb3ada2dfef2c6afbc1ca8939135cb.zip |
[ms] [X86] Use "P" modifier on all branch-target operands in inline X86 assembly.
Summary:
Extend D71677 to apply to all branch-target operands, rather than special-casing call instructions.
Also add a regression test for llvm.org/PR44272, since this finishes fixing it.
Reviewers: thakis, rnk
Reviewed By: thakis
Subscribers: merge_guards_bot, hiraditya, cfe-commits, llvm-commits
Tags: #clang, #llvm
Differential Revision: https://reviews.llvm.org/D72417
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r-- | llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp | 9 | ||||
-rw-r--r-- | llvm/lib/Target/X86/AsmParser/X86Operand.h | 3 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrControl.td | 23 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.td | 46 |
4 files changed, 31 insertions, 50 deletions
diff --git a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp index 27c6a5f9142..69299ae9b00 100644 --- a/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -2924,15 +2924,6 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, } } - // Mark the operands of a call instruction. These need to be handled - // differently when referenced in MS-style inline assembly. - if (Name.startswith("call") || Name.startswith("lcall")) { - for (size_t i = 1; i < Operands.size(); ++i) { - X86Operand &Op = static_cast<X86Operand &>(*Operands[i]); - Op.setCallOperand(true); - } - } - if (Flags) Operands.push_back(X86Operand::CreatePrefix(Flags, NameLoc, NameLoc)); return false; diff --git a/llvm/lib/Target/X86/AsmParser/X86Operand.h b/llvm/lib/Target/X86/AsmParser/X86Operand.h index 36b8bc4e65f..d831a63b04e 100644 --- a/llvm/lib/Target/X86/AsmParser/X86Operand.h +++ b/llvm/lib/Target/X86/AsmParser/X86Operand.h @@ -284,9 +284,6 @@ struct X86Operand final : public MCParsedAsmOperand { bool needAddressOf() const override { return AddressOf; } - bool isCallOperand() const override { return CallOperand; } - void setCallOperand(bool IsCallOperand) { CallOperand = IsCallOperand; } - bool isMem() const override { return Kind == Memory; } bool isMemUnsized() const { return Kind == Memory && Mem.Size == 0; diff --git a/llvm/lib/Target/X86/X86InstrControl.td b/llvm/lib/Target/X86/X86InstrControl.td index e1e6eea5988..32faeb1a86f 100644 --- a/llvm/lib/Target/X86/X86InstrControl.td +++ b/llvm/lib/Target/X86/X86InstrControl.td @@ -220,12 +220,12 @@ let isCall = 1 in // registers are added manually. let Uses = [ESP, SSP] in { def CALLpcrel32 : Ii32PCRel<0xE8, RawFrm, - (outs), (ins i32imm_pcrel:$dst), + (outs), (ins i32imm_brtarget:$dst), "call{l}\t$dst", []>, OpSize32, Requires<[Not64BitMode]>, Sched<[WriteJump]>; let hasSideEffects = 0 in def CALLpcrel16 : Ii16PCRel<0xE8, RawFrm, - (outs), (ins i16imm_pcrel:$dst), + (outs), (ins i16imm_brtarget:$dst), "call{w}\t$dst", []>, OpSize16, Sched<[WriteJump]>; def CALL16r : I<0xFF, MRM2r, (outs), (ins GR16:$dst), @@ -285,7 +285,7 @@ let isCall = 1 in // Tail call stuff. let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, isCodeGenOnly = 1, Uses = [ESP, SSP] in { - def TCRETURNdi : PseudoI<(outs), (ins i32imm_pcrel:$dst, i32imm:$offset), + def TCRETURNdi : PseudoI<(outs), (ins i32imm_brtarget:$dst, i32imm:$offset), []>, Sched<[WriteJump]>, NotMemoryFoldable; def TCRETURNri : PseudoI<(outs), (ins ptr_rc_tailcall:$dst, i32imm:$offset), []>, Sched<[WriteJump]>, NotMemoryFoldable; @@ -293,7 +293,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, def TCRETURNmi : PseudoI<(outs), (ins i32mem_TC:$dst, i32imm:$offset), []>, Sched<[WriteJumpLd]>; - def TAILJMPd : PseudoI<(outs), (ins i32imm_pcrel:$dst), + def TAILJMPd : PseudoI<(outs), (ins i32imm_brtarget:$dst), []>, Sched<[WriteJump]>; def TAILJMPr : PseudoI<(outs), (ins ptr_rc_tailcall:$dst), @@ -309,10 +309,11 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1, isCodeGenOnly = 1, SchedRW = [WriteJump] in let Uses = [ESP, EFLAGS, SSP] in { def TCRETURNdicc : PseudoI<(outs), - (ins i32imm_pcrel:$dst, i32imm:$offset, i32imm:$cond), []>; + (ins i32imm_brtarget:$dst, i32imm:$offset, i32imm:$cond), + []>; // This gets substituted to a conditional jump instruction in MC lowering. - def TAILJMPd_CC : PseudoI<(outs), (ins i32imm_pcrel:$dst, i32imm:$cond), []>; + def TAILJMPd_CC : PseudoI<(outs), (ins i32imm_brtarget:$dst, i32imm:$cond), []>; } @@ -328,7 +329,7 @@ let isCall = 1, Uses = [RSP, SSP], SchedRW = [WriteJump] in { // that the offset between an arbitrary immediate and the call will fit in // the 32-bit pcrel field that we have. def CALL64pcrel32 : Ii32PCRel<0xE8, RawFrm, - (outs), (ins i64i32imm_pcrel:$dst), + (outs), (ins i64i32imm_brtarget:$dst), "call{q}\t$dst", []>, OpSize32, Requires<[In64BitMode]>; def CALL64r : I<0xFF, MRM2r, (outs), (ins GR64:$dst), @@ -357,7 +358,7 @@ let isCall = 1, Uses = [RSP, SSP], SchedRW = [WriteJump] in { let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, isCodeGenOnly = 1, Uses = [RSP, SSP] in { def TCRETURNdi64 : PseudoI<(outs), - (ins i64i32imm_pcrel:$dst, i32imm:$offset), + (ins i64i32imm_brtarget:$dst, i32imm:$offset), []>, Sched<[WriteJump]>; def TCRETURNri64 : PseudoI<(outs), (ins ptr_rc_tailcall:$dst, i32imm:$offset), @@ -367,7 +368,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, (ins i64mem_TC:$dst, i32imm:$offset), []>, Sched<[WriteJumpLd]>, NotMemoryFoldable; - def TAILJMPd64 : PseudoI<(outs), (ins i64i32imm_pcrel:$dst), + def TAILJMPd64 : PseudoI<(outs), (ins i64i32imm_brtarget:$dst), []>, Sched<[WriteJump]>; def TAILJMPr64 : PseudoI<(outs), (ins ptr_rc_tailcall:$dst), @@ -415,10 +416,10 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBranch = 1, isCodeGenOnly = 1, SchedRW = [WriteJump] in let Uses = [RSP, EFLAGS, SSP] in { def TCRETURNdi64cc : PseudoI<(outs), - (ins i64i32imm_pcrel:$dst, i32imm:$offset, + (ins i64i32imm_brtarget:$dst, i32imm:$offset, i32imm:$cond), []>; // This gets substituted to a conditional jump instruction in MC lowering. def TAILJMPd64_CC : PseudoI<(outs), - (ins i64i32imm_pcrel:$dst, i32imm:$cond), []>; + (ins i64i32imm_brtarget:$dst, i32imm:$cond), []>; } diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index 8d7f7c8c787..ca5425e8b89 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -454,18 +454,6 @@ def i64mem_TC : Operand<i64> { let OperandType = "OPERAND_MEMORY"; } -let OperandType = "OPERAND_PCREL", - ParserMatchClass = X86AbsMemAsmOperand, - PrintMethod = "printPCRelImm" in { -def i32imm_pcrel : Operand<i32>; -def i16imm_pcrel : Operand<i16>; - -// Branch targets have OtherVT type and print as pc-relative values. -def brtarget : Operand<OtherVT>; -def brtarget8 : Operand<OtherVT>; - -} - // Special parser to detect 16-bit mode to select 16-bit displacement. def X86AbsMem16AsmOperand : AsmOperandClass { let Name = "AbsMem16"; @@ -473,14 +461,26 @@ def X86AbsMem16AsmOperand : AsmOperandClass { let SuperClasses = [X86AbsMemAsmOperand]; } -// Branch targets have OtherVT type and print as pc-relative values. -let OperandType = "OPERAND_PCREL", - PrintMethod = "printPCRelImm" in { -let ParserMatchClass = X86AbsMem16AsmOperand in - def brtarget16 : Operand<OtherVT>; -let ParserMatchClass = X86AbsMemAsmOperand in - def brtarget32 : Operand<OtherVT>; +// Branch targets print as pc-relative values. +class BranchTargetOperand<ValueType ty> : Operand<ty> { + let OperandType = "OPERAND_PCREL"; + let PrintMethod = "printPCRelImm"; + let ParserMatchClass = X86AbsMemAsmOperand; +} + +def i32imm_brtarget : BranchTargetOperand<i32>; +def i16imm_brtarget : BranchTargetOperand<i16>; + +// 64-bits but only 32 bits are significant, and those bits are treated as being +// pc relative. +def i64i32imm_brtarget : BranchTargetOperand<i64>; + +def brtarget : BranchTargetOperand<OtherVT>; +def brtarget8 : BranchTargetOperand<OtherVT>; +def brtarget16 : BranchTargetOperand<OtherVT> { + let ParserMatchClass = X86AbsMem16AsmOperand; } +def brtarget32 : BranchTargetOperand<OtherVT>; let RenderMethod = "addSrcIdxOperands" in { def X86SrcIdx8Operand : AsmOperandClass { @@ -756,14 +756,6 @@ def i64u8imm : Operand<i64> { let OperandType = "OPERAND_IMMEDIATE"; } -// 64-bits but only 32 bits are significant, and those bits are treated as being -// pc relative. -def i64i32imm_pcrel : Operand<i64> { - let PrintMethod = "printPCRelImm"; - let ParserMatchClass = X86AbsMemAsmOperand; - let OperandType = "OPERAND_PCREL"; -} - def lea64_32mem : Operand<i32> { let PrintMethod = "printanymem"; let MIOperandInfo = (ops GR64, i8imm, GR64_NOSP, i32imm, SEGMENT_REG); |