summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2016-08-01 04:29:13 +0000
committerCraig Topper <craig.topper@gmail.com>2016-08-01 04:29:13 +0000
commit394617631474d0dea5b6ada812e68fd37a3e138c (patch)
treefaa65218dac6429f3f83d681b5f7ebbe435fda5a
parentda50eec26d8474a6ae19e9052ff1da7b7c2049e5 (diff)
downloadbcm5719-llvm-394617631474d0dea5b6ada812e68fd37a3e138c.tar.gz
bcm5719-llvm-394617631474d0dea5b6ada812e68fd37a3e138c.zip
[AVX-512] Use FR32X/FR64X/VR128X/VR256X register classes in addRegisterClass if AVX512(for FR32X/FR64) or VLX(for VR128X/VR256) is supported. This is a minimal requirement to be able to allocate all 32 registers.
llvm-svn: 277319
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp45
1 files changed, 30 insertions, 15 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 23f2ee98eae..4cf2639a481 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -484,8 +484,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
// f32 and f64 use SSE.
// Set up the FP register classes.
- addRegisterClass(MVT::f32, &X86::FR32RegClass);
- addRegisterClass(MVT::f64, &X86::FR64RegClass);
+ addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
+ : &X86::FR32RegClass);
+ addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
+ : &X86::FR64RegClass);
for (auto VT : { MVT::f32, MVT::f64 }) {
// Use ANDPD to simulate FABS.
@@ -514,7 +516,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
} else if (UseX87 && X86ScalarSSEf32) {
// Use SSE for f32, x87 for f64.
// Set up the FP register classes.
- addRegisterClass(MVT::f32, &X86::FR32RegClass);
+ addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
+ : &X86::FR32RegClass);
addRegisterClass(MVT::f64, &X86::RFP64RegClass);
// Use ANDPS to simulate FABS.
@@ -717,7 +720,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
- addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
+ addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
+ : &X86::VR128RegClass);
setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
setOperationAction(ISD::FABS, MVT::v4f32, Custom);
@@ -730,14 +734,19 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
- addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
+ addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
+ : &X86::VR128RegClass);
// FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
// registers cannot be used even for integer operations.
- addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
- addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
- addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
- addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
+ addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
+ : &X86::VR128RegClass);
+ addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
+ : &X86::VR128RegClass);
+ addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
+ : &X86::VR128RegClass);
+ addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
+ : &X86::VR128RegClass);
setOperationAction(ISD::MUL, MVT::v16i8, Custom);
setOperationAction(ISD::MUL, MVT::v4i32, Custom);
@@ -945,12 +954,18 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
if (!Subtarget.useSoftFloat() && Subtarget.hasFp256()) {
bool HasInt256 = Subtarget.hasInt256();
- addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
- addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
- addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
- addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
- addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
- addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
+ addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
+ : &X86::VR256RegClass);
+ addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
+ : &X86::VR256RegClass);
+ addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
+ : &X86::VR256RegClass);
+ addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
+ : &X86::VR256RegClass);
+ addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
+ : &X86::VR256RegClass);
+ addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
+ : &X86::VR256RegClass);
for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
setOperationAction(ISD::FFLOOR, VT, Legal);
OpenPOWER on IntegriCloud