summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFlorian Hahn <florian.hahn@arm.com>2017-05-23 09:33:34 +0000
committerFlorian Hahn <florian.hahn@arm.com>2017-05-23 09:33:34 +0000
commitabb4218b988f6051cfc466a402b12dd91fb27c43 (patch)
tree6096567314b53fd92a9124d6a1e28154955a210a
parent617be6e47596306a03b22f718dab00d986ac74a2 (diff)
downloadbcm5719-llvm-abb4218b988f6051cfc466a402b12dd91fb27c43.tar.gz
bcm5719-llvm-abb4218b988f6051cfc466a402b12dd91fb27c43.zip
[AArch64] Make instruction fusion more aggressive.
Summary: This patch makes instruction fusion more aggressive by * adding artificial edges between the successors of FirstSU and SecondSU, similar to BaseMemOpClusterMutation::clusterNeighboringMemOps. * updating PostGenericScheduler::tryCandidate to keep clusters together, similar to GenericScheduler::tryCandidate. This change increases the number of AES instruction pairs generated on Cortex-A57 and Cortex-A72. This doesn't change code at all in most benchmarks or general code, but we've seen improvement on kernels using AESE/AESMC and AESD/AESIMC. Reviewers: evandro, kristof.beyls, t.p.northover, silviu.baranga, atrick, rengolin, MatzeB Reviewed By: evandro Subscribers: aemerson, rengolin, MatzeB, javed.absar, llvm-commits Differential Revision: https://reviews.llvm.org/D33230 llvm-svn: 303618
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp6
-rw-r--r--llvm/lib/Target/AArch64/AArch64MacroFusion.cpp13
-rw-r--r--llvm/lib/Target/AArch64/AArch64TargetMachine.cpp2
-rw-r--r--llvm/test/CodeGen/AArch64/misched-fusion-aes.ll145
4 files changed, 91 insertions, 75 deletions
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index 41e161f71e5..72b7ad47e09 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -3233,6 +3233,12 @@ void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
return;
+ // Keep clustered nodes together.
+ if (tryGreater(TryCand.SU == DAG->getNextClusterSucc(),
+ Cand.SU == DAG->getNextClusterSucc(),
+ TryCand, Cand, Cluster))
+ return;
+
// Avoid critical resource consumption and balance the schedule.
if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
TryCand, Cand, ResourceReduce))
diff --git a/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp b/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp
index a6926a6700e..3b71d529db5 100644
--- a/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MacroFusion.cpp
@@ -232,6 +232,19 @@ static bool scheduleAdjacentImpl(ScheduleDAGMI *DAG, SUnit &AnchorSU) {
dbgs() << DAG->TII->getName(FirstMI->getOpcode()) << " - " <<
DAG->TII->getName(SecondMI->getOpcode()) << '\n'; );
+ if (&SecondSU != &DAG->ExitSU)
+ // Make instructions dependent on FirstSU also dependent on SecondSU to
+ // prevent them from being scheduled between FirstSU and and SecondSU.
+ for (SUnit::const_succ_iterator
+ SI = FirstSU.Succs.begin(), SE = FirstSU.Succs.end();
+ SI != SE; ++SI) {
+ if (!SI->getSUnit() || SI->getSUnit() == &SecondSU)
+ continue;
+ DEBUG(dbgs() << " Copy Succ ";
+ SI->getSUnit()->print(dbgs(), DAG); dbgs() << '\n';);
+ DAG->addEdge(SI->getSUnit(), SDep(&SecondSU, SDep::Artificial));
+ }
+
++NumFused;
return true;
}
diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
index 132f192f2a9..82b1ff7b1fa 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp
@@ -277,7 +277,7 @@ public:
ScheduleDAGInstrs *
createPostMachineScheduler(MachineSchedContext *C) const override {
const AArch64Subtarget &ST = C->MF->getSubtarget<AArch64Subtarget>();
- if (ST.hasFuseLiterals()) {
+ if (ST.hasFuseAES() || ST.hasFuseLiterals()) {
// Run the Macro Fusion after RA again since literals are expanded from
// pseudos then (v. addPreSched2()).
ScheduleDAGMI *DAG = createGenericSchedPostRA(C);
diff --git a/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll b/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
index 4c682e594e6..1d878721257 100644
--- a/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
+++ b/llvm/test/CodeGen/AArch64/misched-fusion-aes.ll
@@ -1,5 +1,5 @@
-; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKA57
-; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKA72
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a57 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKA57A72
+; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=cortex-a72 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKA57A72
; RUN: llc %s -o - -mtriple=aarch64-unknown -mcpu=exynos-m1 | FileCheck %s --check-prefix=CHECK --check-prefix=CHECKM1
declare <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %d, <16 x i8> %k)
@@ -72,55 +72,40 @@ define void @aesea(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
ret void
; CHECK-LABEL: aesea:
-; CHECKA57: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VC]]
-; CHECKA57: aesmc {{v[0-7].16b}}, [[VA]]
-; CHECKA57: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VD]]
-; CHECKA57: aesmc {{v[0-7].16b}}, [[VB]]
-; CHECKA57: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VE]]
-; CHECKA57: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VF]]
-; CHECKA57: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VG]]
-; CHECKA57: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57-NEXT: aesmc {{v[0-7].16b}}, [[VH]]
-; CHECKA72: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VA]]
-; CHECKA72: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VB]]
-; CHECKA72: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VC]]
-; CHECKA72: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VD]]
-; CHECKA72: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VE]]
-; CHECKA72: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VF]]
-; CHECKA72: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VG]]
-; CHECKA72: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesmc {{v[0-7].16b}}, [[VH]]
+; CHECKA57A72: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VA]]
+; CHECKA57A72: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VB]]
+; CHECKA57A72: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VC]]
+; CHECKA57A72: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VD]]
+; CHECKA57A72: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VE]]
+; CHECKA57A72: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VF]]
+; CHECKA57A72: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VG]]
+; CHECKA57A72: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesmc {{v[0-7].16b}}, [[VH]]
+
; CHECKM1: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1: aesmc {{v[0-7].16b}}, [[VA]]
+; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VA]]
+; CHECKM1: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VB]]
; CHECKM1: aese {{v[0-7].16b}}, {{v[0-7].16b}}
; CHECKM1: aese [[VC:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VC]]
; CHECKM1: aese [[VD:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1: aesmc {{v[0-7].16b}}, [[VD]]
+; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VD]]
+; CHECKM1: aesmc {{v[0-7].16b}}, [[VH]]
; CHECKM1: aese [[VE:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VE]]
; CHECKM1: aese [[VF:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VF]]
; CHECKM1: aese [[VG:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VG]]
-; CHECKM1: aese [[VH:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesmc {{v[0-7].16b}}, [[VH]]
}
define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d, <16 x i8> %e) {
@@ -188,53 +173,65 @@ define void @aesda(<16 x i8>* %a0, <16 x i8>* %b0, <16 x i8>* %c0, <16 x i8> %d,
ret void
; CHECK-LABEL: aesda:
-; CHECKA57: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VC]]
-; CHECKA57: aesimc {{v[0-7].16b}}, [[VA]]
-; CHECKA57: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VD]]
-; CHECKA57: aesimc {{v[0-7].16b}}, [[VB]]
-; CHECKA57: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VE]]
-; CHECKA57: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VF]]
-; CHECKA57: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VG]]
-; CHECKA57: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA57-NEXT: aesimc {{v[0-7].16b}}, [[VH]]
-; CHECKA72: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VA]]
-; CHECKA72: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VB]]
-; CHECKA72: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VC]]
-; CHECKA72: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VD]]
-; CHECKA72: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VE]]
-; CHECKA72: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VF]]
-; CHECKA72: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VG]]
-; CHECKA72: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKA72-NEXT: aesimc {{v[0-7].16b}}, [[VH]]
+; CHECKA57A72: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VA]]
+; CHECKA57A72: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VB]]
+; CHECKA57A72: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VC]]
+; CHECKA57A72: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VD]]
+; CHECKA57A72: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VE]]
+; CHECKA57A72: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VF]]
+; CHECKA57A72: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VG]]
+; CHECKA57A72: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
+; CHECKA57A72-NEXT: aesimc {{v[0-7].16b}}, [[VH]]
+
; CHECKM1: aesd [[VA:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1: aesimc {{v[0-7].16b}}, [[VA]]
+; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VA]]
+; CHECKM1: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1: aesd [[VB:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VB]]
; CHECKM1: aesd {{v[0-7].16b}}, {{v[0-7].16b}}
; CHECKM1: aesd [[VC:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VC]]
; CHECKM1: aesd [[VD:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1: aesimc {{v[0-7].16b}}, [[VD]]
+; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VD]]
+; CHECKM1: aesimc {{v[0-7].16b}}, [[VH]]
; CHECKM1: aesd [[VE:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VE]]
; CHECKM1: aesd [[VF:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VF]]
; CHECKM1: aesd [[VG:v[0-7].16b]], {{v[0-7].16b}}
; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VG]]
-; CHECKM1: aesd [[VH:v[0-7].16b]], {{v[0-7].16b}}
-; CHECKM1-NEXT: aesimc {{v[0-7].16b}}, [[VH]]
+}
+
+define void @aes_load_store(<16 x i8> *%p1, <16 x i8> *%p2 , <16 x i8> *%p3) {
+entry:
+ %x1 = alloca <16 x i8>, align 16
+ %x2 = alloca <16 x i8>, align 16
+ %x3 = alloca <16 x i8>, align 16
+ %x4 = alloca <16 x i8>, align 16
+ %x5 = alloca <16 x i8>, align 16
+ %in1 = load <16 x i8>, <16 x i8>* %p1, align 16
+ store <16 x i8> %in1, <16 x i8>* %x1, align 16
+ %aese1 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in1) #2
+ store <16 x i8> %aese1, <16 x i8>* %x2, align 16
+ %in2 = load <16 x i8>, <16 x i8>* %p2, align 16
+ %aesmc1= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese1) #2
+ store <16 x i8> %aesmc1, <16 x i8>* %x3, align 16
+ %aese2 = call <16 x i8> @llvm.aarch64.crypto.aese(<16 x i8> %in1, <16 x i8> %in2) #2
+ store <16 x i8> %aese2, <16 x i8>* %x4, align 16
+ %aesmc2= call <16 x i8> @llvm.aarch64.crypto.aesmc(<16 x i8> %aese2) #2
+ store <16 x i8> %aesmc2, <16 x i8>* %x5, align 16
+ ret void
+
+; CHECK-LABEL: aes_load_store:
+; CHECK: aese [[VA:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesmc {{v[0-7].16b}}, [[VA]]
+; CHECK: aese [[VB:v[0-7].16b]], {{v[0-7].16b}}
+; CHECK-NEXT: aesmc {{v[0-7].16b}}, [[VB]]
}
OpenPOWER on IntegriCloud