diff options
| -rw-r--r-- | llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp | 45 | ||||
| -rw-r--r-- | llvm/test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll | 21 | 
2 files changed, 0 insertions, 66 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index 2f3d8b2b3e3..b68fbc2db5c 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -14,13 +14,11 @@  #include "InstCombine.h"  #include "llvm/IntrinsicInst.h"  #include "llvm/Analysis/Loads.h" -#include "llvm/Support/PatternMatch.h"  #include "llvm/Target/TargetData.h"  #include "llvm/Transforms/Utils/BasicBlockUtils.h"  #include "llvm/Transforms/Utils/Local.h"  #include "llvm/ADT/Statistic.h"  using namespace llvm; -using namespace PatternMatch;  STATISTIC(NumDeadStore, "Number of dead stores eliminated"); @@ -475,49 +473,6 @@ Instruction *InstCombiner::visitStoreInst(StoreInst &SI) {    if (SI.isVolatile()) return 0;  // Don't hack volatile stores. -  // Attempt to narrow sequences where we load a wide value, perform bitmasks -  // that only affect the low bits of it, and then store it back.  This  -  // typically arises from bitfield initializers in C++. -  ConstantInt *CI1 =0, *CI2 = 0; -  Value *Ld = 0; -  if (getTargetData() && -      match(SI.getValueOperand(), -            m_And(m_Or(m_Value(Ld), m_ConstantInt(CI1)), m_ConstantInt(CI2))) && -      isa<LoadInst>(Ld) && -      equivalentAddressValues(cast<LoadInst>(Ld)->getPointerOperand(), Ptr)) { -    APInt OrMask = CI1->getValue(); -    APInt AndMask = CI2->getValue(); -     -    // Compute the prefix of the value that is unmodified by the bitmasking. -    unsigned LeadingAndOnes = AndMask.countLeadingOnes(); -    unsigned LeadingOrZeros = OrMask.countLeadingZeros(); -    unsigned Prefix = std::min(LeadingAndOnes, LeadingOrZeros); -    uint64_t NewWidth = AndMask.getBitWidth() - Prefix; -    if (!isPowerOf2_64(NewWidth)) NewWidth = NextPowerOf2(NewWidth); -     -    // If we can find a power-of-2 prefix (and if the values we're working with -    // are themselves POT widths), then we can narrow the store.  We rely on -    // later iterations of instcombine to propagate the demanded bits to narrow -    // the other computations in the chain. -    if (NewWidth < AndMask.getBitWidth() &&  -        isPowerOf2_64(AndMask.getBitWidth())) { -      const Type *NewType = IntegerType::get(Ptr->getContext(), NewWidth); -      const Type *NewPtrType = PointerType::getUnqual(NewType); -       -      Value *NewVal = Builder->CreateTrunc(SI.getValueOperand(), NewType); -      Value *NewPtr = Builder->CreateBitCast(Ptr, NewPtrType); -       -      // On big endian targets, we need to offset from the original pointer -      // in order to store to the low-bit suffix. -      if (getTargetData()->isBigEndian()) { -        uint64_t GEPOffset = (AndMask.getBitWidth() - NewWidth) / 8; -        NewPtr = Builder->CreateConstGEP1_64(NewPtr, GEPOffset); -      } -       -      return new StoreInst(NewVal, NewPtr); -    } -  } -    // store X, null    -> turns into 'unreachable' in SimplifyCFG    if (isa<ConstantPointerNull>(Ptr) && SI.getPointerAddressSpace() == 0) {      if (!isa<UndefValue>(Val)) { diff --git a/llvm/test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll b/llvm/test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll index d4d528943fe..e69de29bb2d 100644 --- a/llvm/test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll +++ b/llvm/test/Transforms/InstCombine/2010-08-19-StoreNarrowing.ll @@ -1,21 +0,0 @@ -; RUN: opt -S -instcombine %s | not grep and -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" -target triple = "x86_64-apple-darwin10.0.0" - -%class.A = type { i8, [3 x i8] } - -define void @_ZN1AC2Ev(%class.A* %this) nounwind ssp align 2 { -entry: -  %0 = bitcast %class.A* %this to i32*            ; <i32*> [#uses=5] -  %1 = load i32* %0, align 4                      ; <i32> [#uses=1] -  %2 = and i32 %1, -8                             ; <i32> [#uses=2] -  store i32 %2, i32* %0, align 4 -  %3 = and i32 %2, -57                            ; <i32> [#uses=1] -  %4 = or i32 %3, 8                               ; <i32> [#uses=2] -  store i32 %4, i32* %0, align 4 -  %5 = and i32 %4, -65                            ; <i32> [#uses=2] -  store i32 %5, i32* %0, align 4 -  %6 = and i32 %5, -129                           ; <i32> [#uses=1] -  store i32 %6, i32* %0, align 4 -  ret void -}  | 

