summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AArch64
diff options
context:
space:
mode:
authorLuke Geeson <luke.geeson@arm.com>2018-06-27 09:20:13 +0000
committerLuke Geeson <luke.geeson@arm.com>2018-06-27 09:20:13 +0000
commit68cb233c0f856bb1570abde4b458ce1cb350974b (patch)
treeb2faabacb6f713f86ba5333601ae736c12ffc738 /llvm/lib/Target/AArch64
parenta582419ac7ed6f65c3da05392f3b06e71b626758 (diff)
downloadbcm5719-llvm-68cb233c0f856bb1570abde4b458ce1cb350974b.tar.gz
bcm5719-llvm-68cb233c0f856bb1570abde4b458ce1cb350974b.zip
[AArch64] Remove Duplicate FP16 Patterns with same encoding, match on existing patterns
llvm-svn: 335715
Diffstat (limited to 'llvm/lib/Target/AArch64')
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrFormats.td21
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td51
-rw-r--r--llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp13
-rw-r--r--llvm/lib/Target/AArch64/AArch64RegisterInfo.h4
4 files changed, 54 insertions, 35 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index be845e13d47..d71dc90b7a8 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -7923,27 +7923,6 @@ class BaseSIMDScalarShiftTied<bit U, bits<5> opc, bits<7> fixed_imm,
multiclass SIMDFPScalarRShift<bit U, bits<5> opc, string asm> {
let Predicates = [HasNEON, HasFullFP16] in {
- def HSr : BaseSIMDScalarShift<U, opc, {0,0,1,?,?,?,?},
- FPR16, FPR32, vecshiftR16, asm, []> {
- let Inst{19-16} = imm{3-0};
- let Inst{23-22} = 0b11;
- }
- def SHr : BaseSIMDScalarShift<U, opc, {0,0,1,?,?,?,?},
- FPR32, FPR16, vecshiftR32, asm, []> {
- let Inst{19-16} = imm{3-0};
- let Inst{22-21} = 0b01;
- }
- def HDr : BaseSIMDScalarShift<U, opc, {?,?,?,?,?,?,?},
- FPR16, FPR64, vecshiftR32, asm, []> {
- let Inst{21-16} = imm{5-0};
- let Inst{23-22} = 0b11;
- }
- def DHr : BaseSIMDScalarShift<U, opc, {1,1,1,?,?,?,?},
- FPR64, FPR16, vecshiftR64, asm, []> {
- let Inst{21-16} = imm{5-0};
- let Inst{23-22} = 0b01;
- let Inst{31} = 1;
- }
def h : BaseSIMDScalarShift<U, opc, {0,0,1,?,?,?,?},
FPR16, FPR16, vecshiftR16, asm, []> {
let Inst{19-16} = imm{3-0};
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 3327df5a470..aeae6aebf00 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -4958,16 +4958,6 @@ def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
vecshiftR64:$imm)),
(FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
-def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
- (FCVTZSHDr (i64 FPR64:$Rn), vecshiftR32:$imm)>;
-def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu FPR16:$Rn, vecshiftR32:$imm)),
- (FCVTZUSHr FPR16:$Rn, vecshiftR32:$imm)>;
-def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs FPR16:$Rn, vecshiftR32:$imm)),
- (FCVTZSSHr FPR16:$Rn, vecshiftR32:$imm)>;
-def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
- (FCVTZSDHr (f16 FPR16:$Rn), vecshiftR64:$imm)>;
-def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
- (UCVTFHSr FPR32:$Rn, vecshiftR16:$imm)>;
def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
(UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
@@ -4975,10 +4965,6 @@ def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
vecshiftR64:$imm)),
(SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
-def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
- (SCVTFHSr FPR32:$Rn, vecshiftR16:$imm)>;
-def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR16:$imm)),
- (SCVTFHSr FPR32:$Rn, vecshiftR16:$imm)>;
def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
(SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
@@ -4987,6 +4973,43 @@ def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
(SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
+// Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported.
+
+def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
+ (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
+def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
+ (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
+def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
+ (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
+def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
+ (and FPR32:$Rn, (i32 65535)),
+ vecshiftR16:$imm)),
+ (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
+def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
+ (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
+def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
+ (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
+def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)),
+ (i32 (INSERT_SUBREG
+ (i32 (IMPLICIT_DEF)),
+ (FCVTZSh FPR16:$Rn, vecshiftR32:$imm),
+ hsub))>;
+def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
+ (i64 (INSERT_SUBREG
+ (i64 (IMPLICIT_DEF)),
+ (FCVTZSh FPR16:$Rn, vecshiftR64:$imm),
+ hsub))>;
+def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)),
+ (i32 (INSERT_SUBREG
+ (i32 (IMPLICIT_DEF)),
+ (FCVTZUh FPR16:$Rn, vecshiftR32:$imm),
+ hsub))>;
+def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)),
+ (i64 (INSERT_SUBREG
+ (i64 (IMPLICIT_DEF)),
+ (FCVTZUh FPR16:$Rn, vecshiftR64:$imm),
+ hsub))>;
+
defm SHL : SIMDScalarLShiftD< 0, 0b01010, "shl", AArch64vshl>;
defm SLI : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
defm SQRSHRN : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
index 9779f41804d..a7c2c1b8125 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.cpp
@@ -72,6 +72,19 @@ const MCPhysReg *AArch64RegisterInfo::getCalleeSavedRegsViaCopy(
return nullptr;
}
+const TargetRegisterClass *
+AArch64RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
+ unsigned Idx) const {
+ // edge case for GPR/FPR register classes
+ if (RC == &AArch64::GPR32allRegClass && Idx == AArch64::hsub)
+ return &AArch64::FPR32RegClass;
+ else if (RC == &AArch64::GPR64allRegClass && Idx == AArch64::hsub)
+ return &AArch64::FPR64RegClass;
+
+ // Forward to TableGen's default version.
+ return AArch64GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
+}
+
const uint32_t *
AArch64RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
CallingConv::ID CC) const {
diff --git a/llvm/lib/Target/AArch64/AArch64RegisterInfo.h b/llvm/lib/Target/AArch64/AArch64RegisterInfo.h
index 799d8c81af0..57000d37090 100644
--- a/llvm/lib/Target/AArch64/AArch64RegisterInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64RegisterInfo.h
@@ -46,6 +46,10 @@ public:
return 5;
}
+ const TargetRegisterClass *
+ getSubClassWithSubReg(const TargetRegisterClass *RC,
+ unsigned Idx) const override;
+
// Calls involved in thread-local variable lookup save more registers than
// normal calls, so they need a different mask to represent this.
const uint32_t *getTLSCallPreservedMask() const;
OpenPOWER on IntegriCloud