diff options
| author | Javed Absar <javed.absar@arm.com> | 2019-04-23 09:39:58 +0000 | 
|---|---|---|
| committer | Javed Absar <javed.absar@arm.com> | 2019-04-23 09:39:58 +0000 | 
| commit | 1cdc3dbc58923f66758b9b718175db2e41e80b0d (patch) | |
| tree | 2e28a6b8f7c64fc31aac66a713cbf8bfb5bb1b3d /llvm/lib | |
| parent | 2619f399f99573609be11c608f5f20f1dab595f0 (diff) | |
| download | bcm5719-llvm-1cdc3dbc58923f66758b9b718175db2e41e80b0d.tar.gz bcm5719-llvm-1cdc3dbc58923f66758b9b718175db2e41e80b0d.zip | |
[AArch64] Add support for MTE intrinsics
This patch provides intrinsics support for Memory Tagging Extension (MTE),
which was introduced with the Armv8.5-a architecture.
The intrinsics are described in detail in the latest
ACLE Q1 2019 documentation: https://developer.arm.com/docs/101028/latest
Reviewed by: David Spickett
Differential Revision: https://reviews.llvm.org/D60486
llvm-svn: 358963
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp | 63 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrFormats.td | 5 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrInfo.cpp | 16 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrInfo.td | 16 | 
4 files changed, 78 insertions, 22 deletions
| diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp index 8c794b9a3d4..23d8adcde39 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -91,6 +91,12 @@ public:    bool SelectAddrModeIndexed7S128(SDValue N, SDValue &Base, SDValue &OffImm) {      return SelectAddrModeIndexed7S(N, 16, Base, OffImm);    } +  bool SelectAddrModeIndexedS9S128(SDValue N, SDValue &Base, SDValue &OffImm) { +    return SelectAddrModeIndexedBitWidth(N, true, 9, 16, Base, OffImm); +  } +  bool SelectAddrModeIndexedU6S128(SDValue N, SDValue &Base, SDValue &OffImm) { +    return SelectAddrModeIndexedBitWidth(N, false, 6, 16, Base, OffImm); +  }    bool SelectAddrModeIndexed8(SDValue N, SDValue &Base, SDValue &OffImm) {      return SelectAddrModeIndexed(N, 1, Base, OffImm);    } @@ -179,7 +185,12 @@ private:    bool SelectShiftedRegister(SDValue N, bool AllowROR, SDValue &Reg,                               SDValue &Shift);    bool SelectAddrModeIndexed7S(SDValue N, unsigned Size, SDValue &Base, -                               SDValue &OffImm); +                               SDValue &OffImm) { +    return SelectAddrModeIndexedBitWidth(N, true, 7, Size, Base, OffImm); +  } +  bool SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm, unsigned BW, +                                     unsigned Size, SDValue &Base, +                                     SDValue &OffImm);    bool SelectAddrModeIndexed(SDValue N, unsigned Size, SDValue &Base,                               SDValue &OffImm);    bool SelectAddrModeUnscaled(SDValue N, unsigned Size, SDValue &Base, @@ -675,12 +686,13 @@ static bool isWorthFoldingADDlow(SDValue N) {    return true;  } -/// SelectAddrModeIndexed7S - Select a "register plus scaled signed 7-bit +/// SelectAddrModeIndexedBitWidth - Select a "register plus scaled (un)signed BW-bit  /// immediate" address.  The "Size" argument is the size in bytes of the memory  /// reference, which determines the scale. -bool AArch64DAGToDAGISel::SelectAddrModeIndexed7S(SDValue N, unsigned Size, -                                                  SDValue &Base, -                                                  SDValue &OffImm) { +bool AArch64DAGToDAGISel::SelectAddrModeIndexedBitWidth(SDValue N, bool IsSignedImm, +                                                        unsigned BW, unsigned Size, +                                                        SDValue &Base, +                                                        SDValue &OffImm) {    SDLoc dl(N);    const DataLayout &DL = CurDAG->getDataLayout();    const TargetLowering *TLI = getTargetLowering(); @@ -693,24 +705,41 @@ bool AArch64DAGToDAGISel::SelectAddrModeIndexed7S(SDValue N, unsigned Size,    // As opposed to the (12-bit) Indexed addressing mode below, the 7-bit signed    // selected here doesn't support labels/immediates, only base+offset. -    if (CurDAG->isBaseWithConstantOffset(N)) {      if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N.getOperand(1))) { -      int64_t RHSC = RHS->getSExtValue(); -      unsigned Scale = Log2_32(Size); -      if ((RHSC & (Size - 1)) == 0 && RHSC >= -(0x40 << Scale) && -          RHSC < (0x40 << Scale)) { -        Base = N.getOperand(0); -        if (Base.getOpcode() == ISD::FrameIndex) { -          int FI = cast<FrameIndexSDNode>(Base)->getIndex(); -          Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); +      if (IsSignedImm) { +        int64_t RHSC = RHS->getSExtValue(); +        unsigned Scale = Log2_32(Size); +        int64_t Range = 0x1 << (BW-1); + +        if ((RHSC & (Size - 1)) == 0 && RHSC >= -(Range << Scale) && +            RHSC < (Range << Scale)) { +          Base = N.getOperand(0); +          if (Base.getOpcode() == ISD::FrameIndex) { +            int FI = cast<FrameIndexSDNode>(Base)->getIndex(); +            Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); +          } +          OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64); +          return true; +        } +      } else { +        // unsigned Immediate +        uint64_t RHSC = RHS->getZExtValue(); +        unsigned Scale = Log2_32(Size); +        uint64_t Range = 0x1 << BW; + +        if ((RHSC & (Size - 1)) == 0 && RHSC < (Range << Scale)) { +          Base = N.getOperand(0); +          if (Base.getOpcode() == ISD::FrameIndex) { +            int FI = cast<FrameIndexSDNode>(Base)->getIndex(); +            Base = CurDAG->getTargetFrameIndex(FI, TLI->getPointerTy(DL)); +          } +          OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64); +          return true;          } -        OffImm = CurDAG->getTargetConstant(RHSC >> Scale, dl, MVT::i64); -        return true;        }      }    } -    // Base only. The address will be materialized into a register before    // the memory is accessed.    //    add x0, Xbase, #offset diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td index 09bed434ba1..74fa5ef713d 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td +++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td @@ -355,6 +355,9 @@ def am_indexed7s32  : ComplexPattern<i64, 2, "SelectAddrModeIndexed7S32", []>;  def am_indexed7s64  : ComplexPattern<i64, 2, "SelectAddrModeIndexed7S64", []>;  def am_indexed7s128 : ComplexPattern<i64, 2, "SelectAddrModeIndexed7S128", []>; +def am_indexedu6s128 : ComplexPattern<i64, 2, "SelectAddrModeIndexedU6S128", []>; +def am_indexeds9s128 : ComplexPattern<i64, 2, "SelectAddrModeIndexedS9S128", []>; +  // uimm5sN predicate - True if the immediate is a multiple of N in the range  // [0 * N, 32 * N].  def UImm5s2Operand : UImmScaledMemoryIndexed<5, 2>; @@ -2339,7 +2342,7 @@ class AddSubG<bit isSub, string asm_inst, SDPatternOperator OpNode>  }  class SUBP<bit setsFlags, string asm_instr, SDPatternOperator OpNode> -      : BaseTwoOperand<0b0000, GPR64, asm_instr, null_frag, GPR64sp, GPR64sp> { +      : BaseTwoOperand<0b0000, GPR64, asm_instr, OpNode, GPR64sp, GPR64sp> {    let Inst{31} = 1;    let Inst{29} = setsFlags;  } diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp index 2819c50c8aa..0b5c7bb811d 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1768,7 +1768,11 @@ unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) {    case AArch64::LDNPSi:    case AArch64::STNPWi:    case AArch64::STNPSi: +  case AArch64::LDG:      return 3; +  case AArch64::ADDG: +  case AArch64::STGOffset: +    return 2;    }  } @@ -2143,6 +2147,18 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, unsigned &Scale,      MinOffset = 0;      MaxOffset = 4095;      break; +  case AArch64::ADDG: +    Scale = 16; +    Width = 0; +    MinOffset = 0; +    MaxOffset = 63; +    break; +  case AArch64::LDG: +  case AArch64::STGOffset: +    Scale = Width = 16; +    MinOffset = -256; +    MaxOffset = 255; +    break;    }    return true; diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td index 7dad458d80e..fdf882c89ad 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -1233,11 +1233,11 @@ defm : STOPregister<"stumin","LDUMIN">;// STUMINx  // v8.5 Memory Tagging Extension  let Predicates = [HasMTE] in { -def IRG   : BaseTwoOperand<0b0100, GPR64sp, "irg", null_frag, GPR64sp, GPR64>, +def IRG   : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>,              Sched<[]>{    let Inst{31} = 1;  } -def GMI   : BaseTwoOperand<0b0101, GPR64, "gmi", null_frag, GPR64sp>, Sched<[]>{ +def GMI   : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{    let Inst{31} = 1;    let isNotDuplicable = 1;  } @@ -1246,7 +1246,7 @@ def SUBG  : AddSubG<1, "subg", null_frag>;  def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>; -def SUBP : SUBP<0, "subp", null_frag>, Sched<[]>; +def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>;  def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{    let Defs = [NZCV];  } @@ -1254,13 +1254,18 @@ def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{  def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>;  def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">; + +def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4), +          (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>; +def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn,  simm9s16:$offset)), +          (LDG GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>; +  def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>;  def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]",                     (outs GPR64:$Rt), (ins GPR64sp:$Rn)>;  def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]",                     (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>; -  def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]",                     (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> {    let Inst{23} = 0; @@ -1275,6 +1280,9 @@ defm STGP     : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;  def  STGPpre  : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;  def  STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">; +def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)), +          (STGOffset GPR64:$Rt, GPR64sp:$Rn,  simm9s16:$offset)>; +  } // Predicates = [HasMTE]  //===----------------------------------------------------------------------===// | 

