summaryrefslogtreecommitdiffstats
path: root/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
diff options
context:
space:
mode:
authorCong Hou <congh@google.com>2015-08-05 22:01:20 +0000
committerCong Hou <congh@google.com>2015-08-05 22:01:20 +0000
commit36e7e52aa4f8d79c898d74f93711c4a0c78e253f (patch)
tree2adf8ae761c69bf9271220f2371bf51410f463f5 /llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
parent758f3f542af978ec48745a01c998ccbbc7a7a077 (diff)
downloadbcm5719-llvm-36e7e52aa4f8d79c898d74f93711c4a0c78e253f.tar.gz
bcm5719-llvm-36e7e52aa4f8d79c898d74f93711c4a0c78e253f.zip
Record whether the weights on out-edges from a MBB are normalized.
1. Create a utility function normalizeEdgeWeights() in MachineBranchProbabilityInfo that normalizes a list of edge weights so that the sum of then can fit in uint32_t. 2. Provide an interface in MachineBasicBlock to normalize its successors' weights. 3. Add a flag in MachineBasicBlock that tracks whether its successors' weights are normalized. 4. Provide an overload of getSumForBlock that accepts a non-const pointer to a MBB so that it can force normalizing this MBB's successors' weights. 5. Update several uses of getSumForBlock() by eliminating the once needed weight scale. Differential Revision: http://reviews.llvm.org/D11442 llvm-svn: 244154
Diffstat (limited to 'llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp')
-rw-r--r--llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp51
1 files changed, 25 insertions, 26 deletions
diff --git a/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp b/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
index 6fbc2be7048..fe03d4d0b5f 100644
--- a/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
+++ b/llvm/lib/CodeGen/MachineBranchProbabilityInfo.cpp
@@ -28,36 +28,35 @@ char MachineBranchProbabilityInfo::ID = 0;
void MachineBranchProbabilityInfo::anchor() { }
-uint32_t MachineBranchProbabilityInfo::
-getSumForBlock(const MachineBasicBlock *MBB, uint32_t &Scale) const {
- // First we compute the sum with 64-bits of precision, ensuring that cannot
- // overflow by bounding the number of weights considered. Hopefully no one
- // actually needs 2^32 successors.
- assert(MBB->succ_size() < UINT32_MAX);
- uint64_t Sum = 0;
- Scale = 1;
+uint32_t
+MachineBranchProbabilityInfo::getSumForBlock(MachineBasicBlock *MBB) const {
+ // Normalize the weights of MBB's all successors so that the sum is guaranteed
+ // to be no greater than UINT32_MAX.
+ MBB->normalizeSuccWeights();
+
+ SmallVector<uint32_t, 8> Weights;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
- E = MBB->succ_end(); I != E; ++I) {
- uint32_t Weight = getEdgeWeight(MBB, I);
- Sum += Weight;
- }
+ E = MBB->succ_end();
+ I != E; ++I)
+ Weights.push_back(getEdgeWeight(MBB, I));
- // If the computed sum fits in 32-bits, we're done.
- if (Sum <= UINT32_MAX)
- return Sum;
+ return std::accumulate(Weights.begin(), Weights.end(), 0u);
+}
- // Otherwise, compute the scale necessary to cause the weights to fit, and
- // re-sum with that scale applied.
- assert((Sum / UINT32_MAX) < UINT32_MAX);
- Scale = (Sum / UINT32_MAX) + 1;
- Sum = 0;
+uint32_t
+MachineBranchProbabilityInfo::getSumForBlock(const MachineBasicBlock *MBB,
+ uint32_t &Scale) const {
+ SmallVector<uint32_t, 8> Weights;
for (MachineBasicBlock::const_succ_iterator I = MBB->succ_begin(),
- E = MBB->succ_end(); I != E; ++I) {
- uint32_t Weight = getEdgeWeight(MBB, I);
- Sum += Weight / Scale;
- }
- assert(Sum <= UINT32_MAX);
- return Sum;
+ E = MBB->succ_end();
+ I != E; ++I)
+ Weights.push_back(getEdgeWeight(MBB, I));
+
+ if (MBB->areSuccWeightsNormalized())
+ Scale = 1;
+ else
+ Scale = MachineBranchProbabilityInfo::normalizeEdgeWeights(Weights);
+ return std::accumulate(Weights.begin(), Weights.end(), 0u);
}
uint32_t MachineBranchProbabilityInfo::
OpenPOWER on IntegriCloud