diff options
Diffstat (limited to 'clang/lib')
-rw-r--r-- | clang/lib/CodeGen/CGBlocks.cpp | 2 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExpr.cpp | 361 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExprComplex.cpp | 131 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExprScalar.cpp | 367 |
4 files changed, 427 insertions, 434 deletions
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp index d180820ab4f..0c98c40866a 100644 --- a/clang/lib/CodeGen/CGBlocks.cpp +++ b/clang/lib/CodeGen/CGBlocks.cpp @@ -459,7 +459,7 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) { llvm::Value *BlockLiteral = LoadBlockStruct(); llvm::Value *V = Builder.CreateGEP(BlockLiteral, llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), - offset), + offset), "block.literal"); if (E->isByRef()) { bool needsCopyDispose = BlockRequiresCopying(E->getType()); diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp index d39e10f1f07..4a04bd3f200 100644 --- a/clang/lib/CodeGen/CGExpr.cpp +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -46,9 +46,9 @@ llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { /// EmitAnyExpr - Emit code to compute the specified expression which can have /// any type. The result is returned as an RValue struct. If this is an -/// aggregate expression, the aggloc/agglocvolatile arguments indicate where -/// the result should be returned. -RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, +/// aggregate expression, the aggloc/agglocvolatile arguments indicate where the +/// result should be returned. +RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, bool IsAggLocVolatile, bool IgnoreResult, bool IsInitializer) { if (!hasAggregateLLVMType(E->getType())) @@ -56,23 +56,22 @@ RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, else if (E->getType()->isAnyComplexType()) return RValue::getComplex(EmitComplexExpr(E, false, false, IgnoreResult, IgnoreResult)); - + EmitAggExpr(E, AggLoc, IsAggLocVolatile, IgnoreResult, IsInitializer); return RValue::getAggregate(AggLoc, IsAggLocVolatile); } -/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result -/// will always be accessible even if no aggregate location is -/// provided. -RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, +/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will +/// always be accessible even if no aggregate location is provided. +RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E, bool IsAggLocVolatile, bool IsInitializer) { llvm::Value *AggLoc = 0; - - if (hasAggregateLLVMType(E->getType()) && + + if (hasAggregateLLVMType(E->getType()) && !E->getType()->isAnyComplexType()) AggLoc = CreateTempAlloca(ConvertType(E->getType()), "agg.tmp"); - return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false, + return EmitAnyExpr(E, AggLoc, IsAggLocVolatile, /*IgnoreResult=*/false, IsInitializer); } @@ -92,15 +91,15 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, // if B inherits from A. Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false, IsInitializer); - + if (IsInitializer) { // We might have to destroy the temporary variable. if (const RecordType *RT = E->getType()->getAs<RecordType>()) { if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) { if (!ClassDecl->hasTrivialDestructor()) { - const CXXDestructorDecl *Dtor = + const CXXDestructorDecl *Dtor = ClassDecl->getDestructor(getContext()); - + CleanupScope scope(*this); EmitCXXDestructorCall(Dtor, Dtor_Complete, Val.getAggregateAddr()); } @@ -113,7 +112,7 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, Val = RValue::get(Val.getAggregateAddr()); } else { // Create a temporary variable that we can bind the reference to. - llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), + llvm::Value *Temp = CreateTempAlloca(ConvertTypeForMem(E->getType()), "reftmp"); if (Val.isScalar()) EmitStoreOfScalar(Val.getScalarVal(), Temp, false, E->getType()); @@ -126,13 +125,13 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E, } -/// getAccessedFieldNo - Given an encoded value and a result number, return -/// the input field number being accessed. -unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, +/// getAccessedFieldNo - Given an encoded value and a result number, return the +/// input field number being accessed. +unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts) { if (isa<llvm::ConstantAggregateZero>(Elts)) return 0; - + return cast<llvm::ConstantInt>(Elts->getOperand(Idx))->getZExtValue(); } @@ -175,32 +174,31 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E, /// EmitLValue - Emit code to compute a designator that specifies the location /// of the expression. /// -/// This can return one of two things: a simple address or a bitfield -/// reference. In either case, the LLVM Value* in the LValue structure is -/// guaranteed to be an LLVM pointer type. +/// This can return one of two things: a simple address or a bitfield reference. +/// In either case, the LLVM Value* in the LValue structure is guaranteed to be +/// an LLVM pointer type. /// -/// If this returns a bitfield reference, nothing about the pointee type of -/// the LLVM value is known: For example, it may not be a pointer to an -/// integer. +/// If this returns a bitfield reference, nothing about the pointee type of the +/// LLVM value is known: For example, it may not be a pointer to an integer. /// -/// If this returns a normal address, and if the lvalue's C type is fixed -/// size, this method guarantees that the returned pointer type will point to -/// an LLVM type of the same size of the lvalue's type. If the lvalue has a -/// variable length type, this is not possible. +/// If this returns a normal address, and if the lvalue's C type is fixed size, +/// this method guarantees that the returned pointer type will point to an LLVM +/// type of the same size of the lvalue's type. If the lvalue has a variable +/// length type, this is not possible. /// LValue CodeGenFunction::EmitLValue(const Expr *E) { switch (E->getStmtClass()) { default: return EmitUnsupportedLValue(E, "l-value expression"); - case Expr::BinaryOperatorClass: + case Expr::BinaryOperatorClass: return EmitBinaryOperatorLValue(cast<BinaryOperator>(E)); - case Expr::CallExprClass: + case Expr::CallExprClass: case Expr::CXXMemberCallExprClass: case Expr::CXXOperatorCallExprClass: return EmitCallExprLValue(cast<CallExpr>(E)); case Expr::VAArgExprClass: return EmitVAArgExprLValue(cast<VAArgExpr>(E)); - case Expr::DeclRefExprClass: + case Expr::DeclRefExprClass: case Expr::QualifiedDeclRefExprClass: return EmitDeclRefLValue(cast<DeclRefExpr>(E)); case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); @@ -211,7 +209,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) { case Expr::ObjCEncodeExprClass: return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E)); - case Expr::BlockDeclRefExprClass: + case Expr::BlockDeclRefExprClass: return EmitBlockDeclRefLValue(cast<BlockDeclRefExpr>(E)); case Expr::CXXConditionDeclExprClass: @@ -224,7 +222,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) { case Expr::ObjCMessageExprClass: return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E)); - case Expr::ObjCIvarRefExprClass: + case Expr::ObjCIvarRefExprClass: return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E)); case Expr::ObjCPropertyRefExprClass: return EmitObjCPropertyRefLValue(cast<ObjCPropertyRefExpr>(E)); @@ -235,13 +233,13 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) { case Expr::StmtExprClass: return EmitStmtExprLValue(cast<StmtExpr>(E)); - case Expr::UnaryOperatorClass: + case Expr::UnaryOperatorClass: return EmitUnaryOpLValue(cast<UnaryOperator>(E)); case Expr::ArraySubscriptExprClass: return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); case Expr::ExtVectorElementExprClass: return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E)); - case Expr::MemberExprClass: + case Expr::MemberExprClass: return EmitMemberExpr(cast<MemberExpr>(E)); case Expr::CompoundLiteralExprClass: return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E)); @@ -268,52 +266,52 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile, if (Ty->isBooleanType()) if (V->getType() != llvm::Type::getInt1Ty(VMContext)) V = Builder.CreateTrunc(V, llvm::Type::getInt1Ty(VMContext), "tobool"); - + return V; } void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr, bool Volatile, QualType Ty) { - + if (Ty->isBooleanType()) { // Bool can have different representation in memory than in registers. const llvm::Type *SrcTy = Value->getType(); const llvm::PointerType *DstPtr = cast<llvm::PointerType>(Addr->getType()); if (DstPtr->getElementType() != SrcTy) { - const llvm::Type *MemTy = + const llvm::Type *MemTy = llvm::PointerType::get(SrcTy, DstPtr->getAddressSpace()); Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp"); } } - Builder.CreateStore(Value, Addr, Volatile); + Builder.CreateStore(Value, Addr, Volatile); } -/// EmitLoadOfLValue - Given an expression that represents a value lvalue, -/// this method emits the address of the lvalue, then loads the result as an -/// rvalue, returning the rvalue. +/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this +/// method emits the address of the lvalue, then loads the result as an rvalue, +/// returning the rvalue. RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { if (LV.isObjCWeak()) { - // load of a __weak object. + // load of a __weak object. llvm::Value *AddrWeakObj = LV.getAddress(); - llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this, + llvm::Value *read_weak = CGM.getObjCRuntime().EmitObjCWeakRead(*this, AddrWeakObj); return RValue::get(read_weak); } - + if (LV.isSimple()) { llvm::Value *Ptr = LV.getAddress(); const llvm::Type *EltTy = cast<llvm::PointerType>(Ptr->getType())->getElementType(); - + // Simple scalar l-value. if (EltTy->isSingleValueType()) - return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), + return RValue::get(EmitLoadOfScalar(Ptr, LV.isVolatileQualified(), ExprType)); - + assert(ExprType->isFunctionType() && "Unknown scalar value"); return RValue::get(Ptr); } - + if (LV.isVectorElt()) { llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), LV.isVolatileQualified(), "tmp"); @@ -342,58 +340,58 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, unsigned BitfieldSize = LV.getBitfieldSize(); llvm::Value *Ptr = LV.getBitfieldAddr(); - const llvm::Type *EltTy = + const llvm::Type *EltTy = cast<llvm::PointerType>(Ptr->getType())->getElementType(); unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); - // In some cases the bitfield may straddle two memory locations. - // Currently we load the entire bitfield, then do the magic to - // sign-extend it if necessary. This results in somewhat more code - // than necessary for the common case (one load), since two shifts - // accomplish both the masking and sign extension. + // In some cases the bitfield may straddle two memory locations. Currently we + // load the entire bitfield, then do the magic to sign-extend it if + // necessary. This results in somewhat more code than necessary for the common + // case (one load), since two shifts accomplish both the masking and sign + // extension. unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); llvm::Value *Val = Builder.CreateLoad(Ptr, LV.isVolatileQualified(), "tmp"); - + // Shift to proper location. if (StartBit) - Val = Builder.CreateLShr(Val, llvm::ConstantInt::get(EltTy, StartBit), + Val = Builder.CreateLShr(Val, llvm::ConstantInt::get(EltTy, StartBit), "bf.lo"); - + // Mask off unused bits. - llvm::Constant *LowMask = llvm::ConstantInt::get(VMContext, + llvm::Constant *LowMask = llvm::ConstantInt::get(VMContext, llvm::APInt::getLowBitsSet(EltTySize, LowBits)); Val = Builder.CreateAnd(Val, LowMask, "bf.lo.cleared"); - + // Fetch the high bits if necessary. if (LowBits < BitfieldSize) { unsigned HighBits = BitfieldSize - LowBits; llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); - llvm::Value *HighVal = Builder.CreateLoad(HighPtr, + llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); + llvm::Value *HighVal = Builder.CreateLoad(HighPtr, LV.isVolatileQualified(), "tmp"); - + // Mask off unused bits. llvm::Constant *HighMask = llvm::ConstantInt::get(VMContext, llvm::APInt::getLowBitsSet(EltTySize, HighBits)); HighVal = Builder.CreateAnd(HighVal, HighMask, "bf.lo.cleared"); // Shift to proper location and or in to bitfield value. - HighVal = Builder.CreateShl(HighVal, + HighVal = Builder.CreateShl(HighVal, llvm::ConstantInt::get(EltTy, LowBits)); Val = Builder.CreateOr(Val, HighVal, "bf.val"); } // Sign extend if necessary. if (LV.isBitfieldSigned()) { - llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy, + llvm::Value *ExtraBits = llvm::ConstantInt::get(EltTy, EltTySize - BitfieldSize); - Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits), + Val = Builder.CreateAShr(Builder.CreateShl(Val, ExtraBits), ExtraBits, "bf.val.sext"); } - // The bitfield type and the normal type differ when the storage sizes - // differ (currently just _Bool). + // The bitfield type and the normal type differ when the storage sizes differ + // (currently just _Bool). Val = Builder.CreateIntCast(Val, ConvertType(ExprType), false, "tmp"); return RValue::get(Val); @@ -415,11 +413,11 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, QualType ExprType) { llvm::Value *Vec = Builder.CreateLoad(LV.getExtVectorAddr(), LV.isVolatileQualified(), "tmp"); - + const llvm::Constant *Elts = LV.getExtVectorElts(); - - // If the result of the expression is a non-vector type, we must be - // extracting a single element. Just codegen as an extractelement. + + // If the result of the expression is a non-vector type, we must be extracting + // a single element. Just codegen as an extractelement. const VectorType *ExprVT = ExprType->getAsVectorType(); if (!ExprVT) { unsigned InIdx = getAccessedFieldNo(0, Elts); @@ -430,14 +428,14 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, // Always use shuffle vector to try to retain the original program structure unsigned NumResultElts = ExprVT->getNumElements(); - + llvm::SmallVector<llvm::Constant*, 4> Mask; for (unsigned i = 0; i != NumResultElts; ++i) { unsigned InIdx = getAccessedFieldNo(i, Elts); Mask.push_back(llvm::ConstantInt::get( llvm::Type::getInt32Ty(VMContext), InIdx)); } - + llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()), @@ -450,7 +448,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, /// EmitStoreThroughLValue - Store the specified rvalue into the specified /// lvalue, where both are guaranteed to the have the same type, and that type /// is 'Ty'. -void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, +void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty) { if (!Dst.isSimple()) { if (Dst.isVectorElt()) { @@ -462,7 +460,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, Builder.CreateStore(Vec, Dst.getVectorAddr(),Dst.isVolatileQualified()); return; } - + // If this is an update of extended vector elements, insert them as // appropriate. if (Dst.isExtVectorElt()) @@ -479,21 +477,21 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, assert(0 && "Unknown LValue type"); } - + if (Dst.isObjCWeak() && !Dst.isNonGC()) { - // load of a __weak object. + // load of a __weak object. llvm::Value *LvalueDst = Dst.getAddress(); llvm::Value *src = Src.getScalarVal(); CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst); return; } - + if (Dst.isObjCStrong() && !Dst.isNonGC()) { - // load of a __strong object. + // load of a __strong object. llvm::Value *LvalueDst = Dst.getAddress(); llvm::Value *src = Src.getScalarVal(); #if 0 - // FIXME. We cannot positively determine if we have an 'ivar' assignment, + // FIXME: We cannot positively determine if we have an 'ivar' assignment, // object assignment or an unknown assignment. For now, generate call to // objc_assign_strongCast assignment which is a safe, but consevative // assumption. @@ -508,25 +506,25 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst); return; } - + assert(Src.isScalar() && "Can't emit an agg store with this method"); EmitStoreOfScalar(Src.getScalarVal(), Dst.getAddress(), Dst.isVolatileQualified(), Ty); } void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, - QualType Ty, + QualType Ty, llvm::Value **Result) { unsigned StartBit = Dst.getBitfieldStartBit(); unsigned BitfieldSize = Dst.getBitfieldSize(); llvm::Value *Ptr = Dst.getBitfieldAddr(); - const llvm::Type *EltTy = + const llvm::Type *EltTy = cast<llvm::PointerType>(Ptr->getType())->getElementType(); unsigned EltTySize = CGM.getTargetData().getTypeSizeInBits(EltTy); - // Get the new value, cast to the appropriate type and masked to - // exactly the size of the bit-field. + // Get the new value, cast to the appropriate type and masked to exactly the + // size of the bit-field. llvm::Value *SrcVal = Src.getScalarVal(); llvm::Value *NewVal = Builder.CreateIntCast(SrcVal, EltTy, false, "tmp"); llvm::Constant *Mask = llvm::ConstantInt::get(VMContext, @@ -545,34 +543,33 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, unsigned SrcTySize = CGM.getTargetData().getTypeSizeInBits(SrcTy); llvm::Value *ExtraBits = llvm::ConstantInt::get(SrcTy, SrcTySize - BitfieldSize); - SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits), + SrcTrunc = Builder.CreateAShr(Builder.CreateShl(SrcTrunc, ExtraBits), ExtraBits, "bf.reload.sext"); } *Result = SrcTrunc; } - // In some cases the bitfield may straddle two memory locations. - // Emit the low part first and check to see if the high needs to be - // done. + // In some cases the bitfield may straddle two memory locations. Emit the low + // part first and check to see if the high needs to be done. unsigned LowBits = std::min(BitfieldSize, EltTySize - StartBit); llvm::Value *LowVal = Builder.CreateLoad(Ptr, Dst.isVolatileQualified(), "bf.prev.low"); // Compute the mask for zero-ing the low part of this bitfield. - llvm::Constant *InvMask = + llvm::Constant *InvMask = llvm::ConstantInt::get(VMContext, ~llvm::APInt::getBitsSet(EltTySize, StartBit, StartBit + LowBits)); - + // Compute the new low part as // LowVal = (LowVal & InvMask) | (NewVal << StartBit), // with the shift of NewVal implicitly stripping the high bits. - llvm::Value *NewLowVal = - Builder.CreateShl(NewVal, llvm::ConstantInt::get(EltTy, StartBit), - "bf.value.lo"); + llvm::Value *NewLowVal = + Builder.CreateShl(NewVal, llvm::ConstantInt::get(EltTy, StartBit), + "bf.value.lo"); LowVal = Builder.CreateAnd(LowVal, InvMask, "bf.prev.lo.cleared"); LowVal = Builder.CreateOr(LowVal, NewLowVal, "bf.new.lo"); - + // Write back. Builder.CreateStore(LowVal, Ptr, Dst.isVolatileQualified()); @@ -580,26 +577,26 @@ void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, if (LowBits < BitfieldSize) { unsigned HighBits = BitfieldSize - LowBits; llvm::Value *HighPtr = Builder.CreateGEP(Ptr, llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); - llvm::Value *HighVal = Builder.CreateLoad(HighPtr, + llvm::Type::getInt32Ty(VMContext), 1), "bf.ptr.hi"); + llvm::Value *HighVal = Builder.CreateLoad(HighPtr, Dst.isVolatileQualified(), "bf.prev.hi"); - + // Compute the mask for zero-ing the high part of this bitfield. - llvm::Constant *InvMask = - llvm::ConstantInt::get(VMContext, ~llvm::APInt::getLowBitsSet(EltTySize, + llvm::Constant *InvMask = + llvm::ConstantInt::get(VMContext, ~llvm::APInt::getLowBitsSet(EltTySize, HighBits)); - + // Compute the new high part as // HighVal = (HighVal & InvMask) | (NewVal lshr LowBits), // where the high bits of NewVal have already been cleared and the // shift stripping the low bits. - llvm::Value *NewHighVal = - Builder.CreateLShr(NewVal, llvm::ConstantInt::get(EltTy, LowBits), - "bf.value.high"); + llvm::Value *NewHighVal = + Builder.CreateLShr(NewVal, llvm::ConstantInt::get(EltTy, LowBits), + "bf.value.high"); HighVal = Builder.CreateAnd(HighVal, InvMask, "bf.prev.hi.cleared"); HighVal = Builder.CreateOr(HighVal, NewHighVal, "bf.new.hi"); - + // Write back. Builder.CreateStore(HighVal, HighPtr, Dst.isVolatileQualified()); } @@ -625,24 +622,24 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, llvm::Value *Vec = Builder.CreateLoad(Dst.getExtVectorAddr(), Dst.isVolatileQualified(), "tmp"); const llvm::Constant *Elts = Dst.getExtVectorElts(); - + llvm::Value *SrcVal = Src.getScalarVal(); - + if (const VectorType *VTy = Ty->getAsVectorType()) { unsigned NumSrcElts = VTy->getNumElements(); unsigned NumDstElts = cast<llvm::VectorType>(Vec->getType())->getNumElements(); if (NumDstElts == NumSrcElts) { - // Use shuffle vector is the src and destination are the same number - // of elements and restore the vector mask since it is on the side - // it will be stored. + // Use shuffle vector is the src and destination are the same number of + // elements and restore the vector mask since it is on the side it will be + // stored. llvm::SmallVector<llvm::Constant*, 4> Mask(NumDstElts); for (unsigned i = 0; i != NumSrcElts; ++i) { unsigned InIdx = getAccessedFieldNo(i, Elts); Mask[InIdx] = llvm::ConstantInt::get( llvm::Type::getInt32Ty(VMContext), i); } - + llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); Vec = Builder.CreateShuffleVector(SrcVal, llvm::UndefValue::get(Vec->getType()), @@ -662,7 +659,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, llvm::Type::getInt32Ty(VMContext))); llvm::Value *ExtMaskV = llvm::ConstantVector::get(&ExtMask[0], ExtMask.size()); - llvm::Value *ExtSrcVal = + llvm::Value *ExtSrcVal = Builder.CreateShuffleVector(SrcVal, llvm::UndefValue::get(SrcVal->getType()), ExtMaskV, "tmp"); @@ -691,17 +688,17 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, llvm::Type::getInt32Ty(VMContext), InIdx); Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); } - + Builder.CreateStore(Vec, Dst.getExtVectorAddr(), Dst.isVolatileQualified()); } LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()); - + if (VD && (VD->isBlockVarDecl() || isa<ParmVarDecl>(VD) || isa<ImplicitParamDecl>(VD))) { LValue LV; - bool NonGCable = VD->hasLocalStorage() && + bool NonGCable = VD->hasLocalStorage() && !VD->hasAttr<BlocksAttr>(); if (VD->hasExternalStorage()) { llvm::Value *V = CGM.GetAddrOfGlobalVar(VD); @@ -778,7 +775,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { } LValue CodeGenFunction::EmitBlockDeclRefLValue(const BlockDeclRefExpr *E) { - return LValue::MakeAddr(GetAddrOfBlockDecl(E), + return LValue::MakeAddr(GetAddrOfBlockDecl(E), E->getType().getCVRQualifiers(), getContext().getObjCGCAttrKind(E->getType()), E->getType().getAddressSpace()); @@ -788,7 +785,7 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { // __extension__ doesn't affect lvalue-ness. if (E->getOpcode() == UnaryOperator::Extension) return EmitLValue(E->getSubExpr()); - + QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType()); switch (E->getOpcode()) { default: assert(0 && "Unknown unary operator lvalue!"); @@ -796,9 +793,9 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { { QualType T = E->getSubExpr()->getType()->getPointeeType(); assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type"); - + LValue LV = LValue::MakeAddr(EmitScalarExpr(E->getSubExpr()), - T.getCVRQualifiers(), + T.getCVRQualifiers(), getContext().getObjCGCAttrKind(T), ExprTy.getAddressSpace()); // We should not generate __weak write barrier on indirect reference @@ -854,12 +851,12 @@ LValue CodeGenFunction::EmitPredefinedFunctionName(unsigned Type) { CurCodeDecl); GlobalVarName += FunctionName; - llvm::Constant *C = + llvm::Constant *C = CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str()); return LValue::MakeAddr(C, 0); } -LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { +LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) { switch (E->getIdentType()) { default: return EmitUnsupportedLValue(E, "predefined expression"); @@ -882,15 +879,15 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { // Emit the vector as an lvalue to get its address. LValue LHS = EmitLValue(E->getBase()); assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); - Idx = Builder.CreateIntCast(Idx, + Idx = Builder.CreateIntCast(Idx, llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx"); return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType().getCVRQualifiers()); } - + // The base must be a pointer, which is not an aggregate. Emit it. llvm::Value *Base = EmitScalarExpr(E->getBase()); - + // Extend or truncate the index type to 32 or 64-bits. unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); if (IdxBitwidth != LLVMPointerWidth) @@ -898,28 +895,28 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { llvm::IntegerType::get(VMContext, LLVMPointerWidth), IdxSigned, "idxprom"); - // We know that the pointer points to a type of the correct size, - // unless the size is a VLA or Objective-C interface. + // We know that the pointer points to a type of the correct size, unless the + // size is a VLA or Objective-C interface. llvm::Value *Address = 0; - if (const VariableArrayType *VAT = + if (const VariableArrayType *VAT = getContext().getAsVariableArrayType(E->getType())) { llvm::Value *VLASize = GetVLASize(VAT); - + Idx = Builder.CreateMul(Idx, VLASize); - + QualType BaseType = getContext().getBaseElementType(VAT); - + uint64_t BaseTypeSize = getContext().getTypeSize(BaseType) / 8; Idx = Builder.CreateUDiv(Idx, - llvm::ConstantInt::get(Idx->getType(), + llvm::ConstantInt::get(Idx->getType(), BaseTypeSize)); Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); - } else if (const ObjCInterfaceType *OIT = + } else if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(E->getType())) { - llvm::Value *InterfaceSize = + llvm::Value *InterfaceSize = llvm::ConstantInt::get(Idx->getType(), getContext().getTypeSize(OIT) / 8); - + Idx = Builder.CreateMul(Idx, InterfaceSize); llvm::Type *i8PTy = @@ -930,11 +927,11 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { } else { Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx"); } - + QualType T = E->getBase()->getType()->getPointeeType(); - assert(!T.isNull() && + assert(!T.isNull() && "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type"); - + LValue LV = LValue::MakeAddr(Address, T.getCVRQualifiers(), getContext().getObjCGCAttrKind(T), @@ -945,11 +942,11 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { return LV; } -static +static llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, llvm::SmallVector<unsigned, 4> &Elts) { llvm::SmallVector<llvm::Constant *, 4> CElts; - + for (unsigned i = 0, e = Elts.size(); i != e; ++i) CElts.push_back(llvm::ConstantInt::get( llvm::Type::getInt32Ty(VMContext), Elts[i])); @@ -1011,7 +1008,7 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. if (E->isArrow()) { BaseValue = EmitScalarExpr(BaseExpr); - const PointerType *PTy = + const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>(); if (PTy->getPointeeType()->isUnionType()) isUnion = true; @@ -1055,7 +1052,7 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, // FIXME: CodeGenTypes should expose a method to get the appropriate type for // FieldTy (the appropriate type is ABI-dependent). - const llvm::Type *FieldTy = + const llvm::Type *FieldTy = CGM.getTypes().ConvertTypeForMem(Field->getType()); const llvm::PointerType *BaseTy = cast<llvm::PointerType>(BaseValue->getType()); @@ -1063,11 +1060,11 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue, BaseValue = Builder.CreateBitCast(BaseValue, llvm::PointerType::get(FieldTy, AS), "tmp"); - - llvm::Value *Idx = + + llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Info.FieldNo); llvm::Value *V = Builder.CreateGEP(BaseValue, Idx, "tmp"); - + return LValue::MakeBitfield(V, Info.Start, Info.Size, Field->getType()->isSignedIntegerType(), Field->getType().getCVRQualifiers()|CVRQualifiers); @@ -1080,19 +1077,19 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, { if (Field->isBitField()) return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers); - + unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp"); // Match union field type. if (isUnion) { - const llvm::Type *FieldTy = + const llvm::Type *FieldTy = CGM.getTypes().ConvertTypeForMem(Field->getType()); - const llvm::PointerType * BaseTy = + const llvm::PointerType * BaseTy = cast<llvm::PointerType>(BaseValue->getType()); unsigned AS = BaseTy->getAddressSpace(); - V = Builder.CreateBitCast(V, - llvm::PointerType::get(FieldTy, AS), + V = Builder.CreateBitCast(V, + llvm::PointerType::get(FieldTy, AS), "tmp"); } if (Field->getType()->isReferenceType()) @@ -1110,8 +1107,8 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, } else if (Ty->isObjCObjectPointerType()) attr = QualType::Strong; } - LValue LV = - LValue::MakeAddr(V, + LValue LV = + LValue::MakeAddr(V, Field->getType().getCVRQualifiers()|CVRQualifiers, attr, Field->getType().getAddressSpace()); @@ -1143,7 +1140,7 @@ LValue CodeGenFunction::EmitConditionalOperator(const ConditionalOperator* E) { return EmitUnsupportedLValue(E, "conditional operator"); // ?: here should be an aggregate. - assert((hasAggregateLLVMType(E->getType()) && + assert((hasAggregateLLVMType(E->getType()) && !E->getType()->isAnyComplexType()) && "Unexpected conditional operator!"); @@ -1153,7 +1150,7 @@ LValue CodeGenFunction::EmitConditionalOperator(const ConditionalOperator* E) { return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), getContext().getObjCGCAttrKind(E->getType()), E->getType().getAddressSpace()); - + } /// EmitCastLValue - Casts are never lvalues. If a cast is needed by the code @@ -1163,15 +1160,15 @@ LValue CodeGenFunction::EmitConditionalOperator(const ConditionalOperator* E) { /// noop aggregate casts, and cast from scalar to union. LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { if (E->getCastKind() == CastExpr::CK_UserDefinedConversion) { - if (const CXXFunctionalCastExpr *CXXFExpr = + if (const CXXFunctionalCastExpr *CXXFExpr = dyn_cast<CXXFunctionalCastExpr>(E)) return LValue::MakeAddr( EmitCXXFunctionalCastExpr(CXXFExpr).getScalarVal(), 0); - assert(isa<CStyleCastExpr>(E) && + assert(isa<CStyleCastExpr>(E) && "EmitCastLValue - Expected CStyleCastExpr"); return EmitLValue(E->getSubExpr()); } - + // If this is an aggregate-to-aggregate cast, just use the input's address as // the lvalue. if (E->getCastKind() == CastExpr::CK_NoOp) @@ -1186,11 +1183,11 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) { // Otherwise, we must have a cast from scalar to union. assert(E->getCastKind() == CastExpr::CK_ToUnion && "Expected scalar-to-union cast"); - + // Casts are only lvalues when the source and destination types are the same. llvm::Value *Temp = CreateTempAlloca(ConvertType(E->getType())); EmitAnyExpr(E->getSubExpr(), Temp, false); - + return LValue::MakeAddr(Temp, E->getType().getCVRQualifiers(), getContext().getObjCGCAttrKind(E->getType()), E->getType().getAddressSpace()); @@ -1208,7 +1205,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) { if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E)) return EmitCXXMemberCallExpr(CE); - + const Decl *TargetDecl = 0; if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) { if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) { @@ -1222,17 +1219,17 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) { if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E)) if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl)) return EmitCXXOperatorMemberCallExpr(CE, MD); - + if (isa<CXXPseudoDestructorExpr>(E->getCallee())) { // C++ [expr.pseudo]p1: - // The result shall only be used as the operand for the function call + // The result shall only be used as the operand for the function call // operator (), and the result of such a call has type void. The only // effect is the evaluation of the postfix-expression before the dot or // arrow. EmitScalarExpr(E->getCallee()); return RValue::get(0); } - + llvm::Value *Callee = EmitScalarExpr(E->getCallee()); return EmitCall(Callee, E->getCallee()->getType(), E->arg_begin(), E->arg_end(), TargetDecl); @@ -1244,7 +1241,7 @@ LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) { EmitAnyExpr(E->getLHS()); return EmitLValue(E->getRHS()); } - + // Can only get l-value for binary operator expressions which are a // simple assignment of aggregate type. if (E->getOpcode() != BinaryOperator::Assign) @@ -1265,12 +1262,12 @@ LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { assert(E->getCallReturnType()->isReferenceType() && "Can't have a scalar return unless the return type is a " "reference type!"); - - return LValue::MakeAddr(RV.getScalarVal(), E->getType().getCVRQualifiers(), + + return LValue::MakeAddr(RV.getScalarVal(), E->getType().getCVRQualifiers(), getContext().getObjCGCAttrKind(E->getType()), E->getType().getAddressSpace()); } - + return LValue::MakeAddr(RV.getAggregateAddr(), E->getType().getCVRQualifiers(), getContext().getObjCGCAttrKind(E->getType()), @@ -1301,9 +1298,9 @@ LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) { LValue CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) { LValue LV = EmitLValue(E->getSubExpr()); - + PushCXXTemporary(E->getTemporary(), LV.getAddress()); - + return LV; } @@ -1351,18 +1348,18 @@ LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) { return EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(), CVRQualifiers); } -LValue +LValue CodeGenFunction::EmitObjCPropertyRefLValue(const ObjCPropertyRefExpr *E) { - // This is a special l-value that just issues sends when we load or - // store through it. + // This is a special l-value that just issues sends when we load or store + // through it. return LValue::MakePropertyRef(E, E->getType().getCVRQualifiers()); } -LValue +LValue CodeGenFunction::EmitObjCKVCRefLValue( const ObjCImplicitSetterGetterRefExpr *E) { - // This is a special l-value that just issues sends when we load or - // store through it. + // This is a special l-value that just issues sends when we load or store + // through it. return LValue::MakeKVCRef(E, E->getType().getCVRQualifiers()); } @@ -1372,7 +1369,7 @@ CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) { } LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { - + // Can only get l-value for message expression returning aggregate type RValue RV = EmitAnyExprToTemp(E); // FIXME: can this be volatile? @@ -1383,13 +1380,13 @@ LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) { } -RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType, +RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType, CallExpr::const_arg_iterator ArgBeg, CallExpr::const_arg_iterator ArgEnd, const Decl *TargetDecl) { - // Get the actual function type. The callee type will always be a - // pointer to function type or a block pointer type. - assert(CalleeType->isFunctionPointerType() && + // Get the actual function type. The callee type will always be a pointer to + // function type or a block pointer type. + assert(CalleeType->isFunctionPointerType() && "Call must have function pointer type!"); QualType FnType = CalleeType->getAs<PointerType>()->getPointeeType(); @@ -1398,6 +1395,6 @@ RValue CodeGenFunction::EmitCall(llvm::Value *Callee, QualType CalleeType, CallArgList Args; EmitCallArgs(Args, FnType->getAsFunctionProtoType(), ArgBeg, ArgEnd); - return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), + return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args), Callee, Args, TargetDecl); } diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp index f9cb607611d..4fcfe06cd72 100644 --- a/clang/lib/CodeGen/CGExprComplex.cpp +++ b/clang/lib/CodeGen/CGExprComplex.cpp @@ -46,7 +46,7 @@ public: IgnoreRealAssign(irn), IgnoreImagAssign(iin) { } - + //===--------------------------------------------------------------------===// // Utilities //===--------------------------------------------------------------------===// @@ -82,23 +82,23 @@ public: if (LV.isPropertyRef()) return CGF.EmitObjCPropertyGet(LV.getPropertyRefExpr()).getComplexVal(); - + assert(LV.isKVCRef() && "Unknown LValue type!"); return CGF.EmitObjCPropertyGet(LV.getKVCRefExpr()).getComplexVal(); } - + /// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load /// the real and imaginary pieces. ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile); - + /// EmitStoreOfComplex - Store the specified real/imag parts into the /// specified value pointer. void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol); - + /// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType. ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType, QualType DestType); - + //===--------------------------------------------------------------------===// // Visitor Methods //===--------------------------------------------------------------------===// @@ -111,10 +111,10 @@ public: ComplexPairTy VisitExpr(Expr *S); ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());} ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL); - + // l-values. ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); } - ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { + ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { return EmitLoadOfLValue(E); } ComplexPairTy VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { @@ -131,7 +131,7 @@ public: ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); } // FIXME: CompoundLiteralExpr - + ComplexPairTy EmitCast(Expr *Op, QualType DestTy); ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) { // Unlike for scalars, we don't have to worry about function->ptr demotion @@ -182,24 +182,23 @@ public: ComplexPairTy VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) { assert(E->getType()->isAnyComplexType() && "Expected complex type!"); QualType Elem = E->getType()->getAsComplexType()->getElementType(); - llvm::Constant *Null = - llvm::Constant::getNullValue(CGF.ConvertType(Elem)); + llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem)); return ComplexPairTy(Null, Null); } ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) { assert(E->getType()->isAnyComplexType() && "Expected complex type!"); QualType Elem = E->getType()->getAsComplexType()->getElementType(); - llvm::Constant *Null = + llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem)); return ComplexPairTy(Null, Null); } - + struct BinOpInfo { ComplexPairTy LHS; ComplexPairTy RHS; QualType Ty; // Computation Type. - }; - + }; + BinOpInfo EmitBinOps(const BinaryOperator *E); ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E, ComplexPairTy (ComplexExprEmitter::*Func) @@ -209,7 +208,7 @@ public: ComplexPairTy EmitBinSub(const BinOpInfo &Op); ComplexPairTy EmitBinMul(const BinOpInfo &Op); ComplexPairTy EmitBinDiv(const BinOpInfo &Op); - + ComplexPairTy VisitBinMul(const BinaryOperator *E) { return EmitBinMul(EmitBinOps(E)); } @@ -222,7 +221,7 @@ public: ComplexPairTy VisitBinDiv(const BinaryOperator *E) { return EmitBinDiv(EmitBinOps(E)); } - + // Compound assignments. ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) { return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd); @@ -236,7 +235,7 @@ public: ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) { return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv); } - + // GCC rejects rem/and/or/xor for integer complex. // Logical and/or always return int, never complex. @@ -244,7 +243,7 @@ public: ComplexPairTy VisitBinAssign (const BinaryOperator *E); ComplexPairTy VisitBinComma (const BinaryOperator *E); - + ComplexPairTy VisitConditionalOperator(const ConditionalOperator *CO); ComplexPairTy VisitChooseExpr(ChooseExpr *CE); @@ -264,7 +263,7 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile) { llvm::SmallString<64> Name(SrcPtr->getName().begin(), SrcPtr->getName().end()); - + llvm::Value *Real=0, *Imag=0; if (!IgnoreReal) { @@ -279,10 +278,10 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr, Name.str().str().c_str()); Name.resize(Name.size()-4); // .real -> .imagp } - + if (!IgnoreImag) { Name += "imagp"; - + // FIXME: Clean this up once builder takes Twine/StringRef. llvm::Value *ImagPtr = Builder.CreateStructGEP(SrcPtr, 1, Name.str().str().c_str()); @@ -300,7 +299,7 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr, bool isVolatile) { llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real"); llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag"); - + Builder.CreateStore(Val.first, RealPtr, isVolatile); Builder.CreateStore(Val.second, ImagPtr, isVolatile); } @@ -313,7 +312,7 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr, ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) { CGF.ErrorUnsupported(E, "complex expression"); - const llvm::Type *EltTy = + const llvm::Type *EltTy = CGF.ConvertType(E->getType()->getAsComplexType()->getElementType()); llvm::Value *U = llvm::UndefValue::get(EltTy); return ComplexPairTy(U, U); @@ -358,7 +357,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) { // Two cases here: cast from (complex to complex) and (scalar to complex). if (Op->getType()->isAnyComplexType()) return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy); - + // C99 6.3.1.7: When a value of real type is converted to a complex type, the // real part of the complex result value is determined by the rules of // conversion to the corresponding real type and the imaginary part of the @@ -368,7 +367,7 @@ ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) { // Convert the input element to the element type of the complex. DestTy = DestTy->getAsComplexType()->getElementType(); Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy); - + // Return (realval, 0). return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType())); } @@ -378,12 +377,12 @@ ComplexPairTy ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, LValue LV = CGF.EmitLValue(E->getSubExpr()); ComplexPairTy InVal = EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified()); - + llvm::Value *NextVal; if (isa<llvm::IntegerType>(InVal.first->getType())) { uint64_t AmountVal = isInc ? 1 : -1; NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true); - + // Add the inc/dec to the real part. NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); } else { @@ -392,16 +391,16 @@ ComplexPairTy ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, if (!isInc) FVal.changeSign(); NextVal = llvm::ConstantFP::get(CGF.getLLVMContext(), FVal); - + // Add the inc/dec to the real part. NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); } - + ComplexPairTy IncVal(NextVal, InVal.second); - + // Store the updated result through the lvalue. EmitStoreOfComplex(IncVal, LV.getAddress(), LV.isVolatileQualified()); - + // If this is a postinc, return the value read from memory, otherwise use the // updated value. return isPre ? IncVal : InVal; @@ -413,7 +412,7 @@ ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { TestAndClearIgnoreRealAssign(); TestAndClearIgnoreImagAssign(); ComplexPairTy Op = Visit(E->getSubExpr()); - + llvm::Value *ResR, *ResI; if (Op.first->getType()->isFloatingPoint()) { ResR = Builder.CreateFNeg(Op.first, "neg.r"); @@ -437,13 +436,13 @@ ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) { ResI = Builder.CreateFNeg(Op.second, "conj.i"); else ResI = Builder.CreateNeg(Op.second, "conj.i"); - + return ComplexPairTy(Op.first, ResI); } ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) { llvm::Value *ResR, *ResI; - + if (Op.LHS.first->getType()->isFloatingPoint()) { ResR = Builder.CreateFAdd(Op.LHS.first, Op.RHS.first, "add.r"); ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i"); @@ -470,12 +469,12 @@ ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) { ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) { using llvm::Value; Value *ResR, *ResI; - + if (Op.LHS.first->getType()->isFloatingPoint()) { Value *ResRl = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl"); Value *ResRr = Builder.CreateFMul(Op.LHS.second, Op.RHS.second,"mul.rr"); ResR = Builder.CreateFSub(ResRl, ResRr, "mul.r"); - + Value *ResIl = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il"); Value *ResIr = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir"); ResI = Builder.CreateFAdd(ResIl, ResIr, "mul.i"); @@ -483,7 +482,7 @@ ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) { Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl"); Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr"); ResR = Builder.CreateSub(ResRl, ResRr, "mul.r"); - + Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il"); Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir"); ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i"); @@ -494,7 +493,7 @@ ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) { ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) { llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second; llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second; - + llvm::Value *DSTr, *DSTi; if (Op.LHS.first->getType()->isFloatingPoint()) { @@ -502,15 +501,15 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) { llvm::Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr, "tmp"); // a*c llvm::Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi, "tmp"); // b*d llvm::Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2, "tmp"); // ac+bd - + llvm::Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr, "tmp"); // c*c llvm::Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi, "tmp"); // d*d llvm::Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5, "tmp"); // cc+dd - + llvm::Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr, "tmp"); // b*c llvm::Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi, "tmp"); // a*d llvm::Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8, "tmp"); // bc-ad - + DSTr = Builder.CreateFDiv(Tmp3, Tmp6, "tmp"); DSTi = Builder.CreateFDiv(Tmp9, Tmp6, "tmp"); } else { @@ -518,15 +517,15 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) { llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr, "tmp"); // a*c llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi, "tmp"); // b*d llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2, "tmp"); // ac+bd - + llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr, "tmp"); // c*c llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi, "tmp"); // d*d llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5, "tmp"); // cc+dd - + llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr, "tmp"); // b*c llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi, "tmp"); // a*d llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8, "tmp"); // bc-ad - + if (Op.Ty->getAsComplexType()->getElementType()->isUnsignedIntegerType()) { DSTr = Builder.CreateUDiv(Tmp3, Tmp6, "tmp"); DSTi = Builder.CreateUDiv(Tmp9, Tmp6, "tmp"); @@ -535,11 +534,11 @@ ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) { DSTi = Builder.CreateSDiv(Tmp9, Tmp6, "tmp"); } } - + return ComplexPairTy(DSTr, DSTi); } -ComplexExprEmitter::BinOpInfo +ComplexExprEmitter::BinOpInfo ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) { TestAndClearIgnoreReal(); TestAndClearIgnoreImag(); @@ -564,27 +563,27 @@ EmitCompoundAssign(const CompoundAssignOperator *E, QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType(); BinOpInfo OpInfo; - + // Load the RHS and LHS operands. // __block variables need to have the rhs evaluated first, plus this should // improve codegen a little. It is possible for the RHS to be complex or // scalar. OpInfo.Ty = E->getComputationResultType(); OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty); - + LValue LHSLV = CGF.EmitLValue(E->getLHS()); // We know the LHS is a complex lvalue. - OpInfo.LHS=EmitLoadOfComplex(LHSLV.getAddress(),LHSLV.isVolatileQualified()); + OpInfo.LHS=EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified()); OpInfo.LHS=EmitComplexToComplexCast(OpInfo.LHS, LHSTy, OpInfo.Ty); - + // Expand the binary operator. ComplexPairTy Result = (this->*Func)(OpInfo); - + // Truncate the result back to the LHS type. Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy); - + // Store the result value into the LHS lvalue. EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified()); // And now return the LHS @@ -608,7 +607,7 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) { // Compute the address to store into. LValue LHS = CGF.EmitLValue(E->getLHS()); - + // Store into it, if simple. if (LHS.isSimple()) { EmitStoreOfComplex(Val, LHS.getAddress(), LHS.isVolatileQualified()); @@ -620,7 +619,7 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) { IgnoreImagAssign = ignimag; return EmitLoadOfComplex(LHS.getAddress(), LHS.isVolatileQualified()); } - + // Otherwise we must have a property setter (no complex vector/bitfields). if (LHS.isPropertyRef()) CGF.EmitObjCPropertySet(LHS.getPropertyRefExpr(), RValue::getComplex(Val)); @@ -651,27 +650,27 @@ VisitConditionalOperator(const ConditionalOperator *E) { llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); - + llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond()); Builder.CreateCondBr(Cond, LHSBlock, RHSBlock); - + CGF.EmitBlock(LHSBlock); - + // Handle the GNU extension for missing LHS. assert(E->getLHS() && "Must have LHS for complex value"); ComplexPairTy LHS = Visit(E->getLHS()); LHSBlock = Builder.GetInsertBlock(); CGF.EmitBranch(ContBlock); - + CGF.EmitBlock(RHSBlock); - + ComplexPairTy RHS = Visit(E->getRHS()); RHSBlock = Builder.GetInsertBlock(); CGF.EmitBranch(ContBlock); - + CGF.EmitBlock(ContBlock); - + // Create a PHI node for the real part. llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), "cond.r"); RealPN->reserveOperandSpace(2); @@ -683,7 +682,7 @@ VisitConditionalOperator(const ConditionalOperator *E) { ImagPN->reserveOperandSpace(2); ImagPN->addIncoming(LHS.second, LHSBlock); ImagPN->addIncoming(RHS.second, RHSBlock); - + return ComplexPairTy(RealPN, ImagPN); } @@ -714,7 +713,7 @@ ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) { if (!ArgPtr) { CGF.ErrorUnsupported(E, "complex va_arg expression"); - const llvm::Type *EltTy = + const llvm::Type *EltTy = CGF.ConvertType(E->getType()->getAsComplexType()->getElementType()); llvm::Value *U = llvm::UndefValue::get(EltTy); return ComplexPairTy(U, U); @@ -734,7 +733,7 @@ ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal, bool IgnoreImag, bool IgnoreRealAssign, bool IgnoreImagAssign) { assert(E && E->getType()->isAnyComplexType() && "Invalid complex expression to emit"); - + return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag, IgnoreRealAssign, IgnoreImagAssign) .Visit(const_cast<Expr*>(E)); @@ -760,7 +759,7 @@ void CodeGenFunction::StoreComplexToAddr(ComplexPairTy V, } /// LoadComplexFromAddr - Load a complex number from the specified address. -ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr, +ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile) { return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile); } diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp index 8732dc91309..3dc95902af7 100644 --- a/clang/lib/CodeGen/CGExprScalar.cpp +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -53,10 +53,10 @@ class VISIBILITY_HIDDEN ScalarExprEmitter public: ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false) - : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), + : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira), VMContext(cgf.getLLVMContext()) { } - + //===--------------------------------------------------------------------===// // Utilities //===--------------------------------------------------------------------===// @@ -73,25 +73,25 @@ public: Value *EmitLoadOfLValue(LValue LV, QualType T) { return CGF.EmitLoadOfLValue(LV, T).getScalarVal(); } - + /// EmitLoadOfLValue - Given an expression with complex type that represents a /// value l-value, this method emits the address of the l-value, then loads /// and returns the result. Value *EmitLoadOfLValue(const Expr *E) { return EmitLoadOfLValue(EmitLValue(E), E->getType()); } - + /// EmitConversionToBool - Convert the specified expression value to a /// boolean (i1) truth value. This is equivalent to "Val != 0". Value *EmitConversionToBool(Value *Src, QualType DstTy); - + /// EmitScalarConversion - Emit a conversion from the specified type to the /// specified destination type, both of which are LLVM scalar types. Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy); /// EmitComplexToScalarConversion - Emit a conversion from the specified - /// complex type to the specified destination type, where the destination - /// type is an LLVM scalar type. + /// complex type to the specified destination type, where the destination type + /// is an LLVM scalar type. Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy); @@ -133,26 +133,26 @@ public: } Value *VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E); Value *VisitAddrLabelExpr(const AddrLabelExpr *E) { - llvm::Value *V = + llvm::Value *V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), CGF.GetIDForAddrOfLabel(E->getLabel())); - + return Builder.CreateIntToPtr(V, ConvertType(E->getType())); } - + // l-values. Value *VisitDeclRefExpr(DeclRefExpr *E) { if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(E->getDecl())) return llvm::ConstantInt::get(VMContext, EC->getInitVal()); return EmitLoadOfLValue(E); } - Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { - return CGF.EmitObjCSelectorExpr(E); + Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) { + return CGF.EmitObjCSelectorExpr(E); } - Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { - return CGF.EmitObjCProtocolExpr(E); + Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) { + return CGF.EmitObjCProtocolExpr(E); } - Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { + Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) { return EmitLoadOfLValue(E); } Value *VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) { @@ -177,7 +177,7 @@ public: Value *VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return EmitLValue(E).getAddress(); } - + Value *VisitPredefinedExpr(Expr *E) { return EmitLValue(E).getAddress(); } Value *VisitInitListExpr(InitListExpr *E) { @@ -185,24 +185,24 @@ public: (void)Ignore; assert (Ignore == false && "init list ignored"); unsigned NumInitElements = E->getNumInits(); - + if (E->hadArrayRangeDesignator()) { CGF.ErrorUnsupported(E, "GNU array range designator extension"); } - const llvm::VectorType *VType = + const llvm::VectorType *VType = dyn_cast<llvm::VectorType>(ConvertType(E->getType())); - + // We have a scalar in braces. Just use the first element. - if (!VType) + if (!VType) return Visit(E->getInit(0)); - + unsigned NumVectorElements = VType->getNumElements(); const llvm::Type *ElementType = VType->getElementType(); // Emit individual vector element stores. llvm::Value *V = llvm::UndefValue::get(VType); - + // Emit initializers unsigned i; for (i = 0; i < NumInitElements; ++i) { @@ -211,7 +211,7 @@ public: llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), i); V = Builder.CreateInsertElement(V, NewV, Idx); } - + // Emit remaining default initializers for (/* Do not initialize i*/; i < NumVectorElements; ++i) { Value *Idx = @@ -219,22 +219,22 @@ public: llvm::Value *NewV = llvm::Constant::getNullValue(ElementType); V = Builder.CreateInsertElement(V, NewV, Idx); } - + return V; } - + Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) { return llvm::Constant::getNullValue(ConvertType(E->getType())); } Value *VisitCastExpr(const CastExpr *E) { if (E->getCastKind() == CastExpr::CK_UserDefinedConversion) { - if (const CXXFunctionalCastExpr *CXXFExpr = + if (const CXXFunctionalCastExpr *CXXFExpr = dyn_cast<CXXFunctionalCastExpr>(E)) return CGF.EmitCXXFunctionalCastExpr(CXXFExpr).getScalarVal(); - assert(isa<CStyleCastExpr>(E) && + assert(isa<CStyleCastExpr>(E) && "VisitCastExpr - missing CStyleCastExpr"); } - + // Make sure to evaluate VLA bounds now so that we have them for later. if (E->getType()->isVariablyModifiedType()) CGF.EmitVLASize(E->getType()); @@ -246,14 +246,14 @@ public: Value *VisitCallExpr(const CallExpr *E) { if (E->getCallReturnType()->isReferenceType()) return EmitLoadOfLValue(E); - + return CGF.EmitCallExpr(E).getScalarVal(); } Value *VisitStmtExpr(const StmtExpr *E); Value *VisitBlockDeclRefExpr(const BlockDeclRefExpr *E); - + // Unary Operators. Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre); Value *VisitUnaryPostDec(const UnaryOperator *E) { @@ -286,15 +286,15 @@ public: return Visit(E->getSubExpr()); } Value *VisitUnaryOffsetOf(const UnaryOperator *E); - + // C++ Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) { return Visit(DAE->getExpr()); } Value *VisitCXXThisExpr(CXXThisExpr *TE) { return CGF.LoadCXXThis(); - } - + } + Value *VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) { return CGF.EmitCXXExprWithTemporaries(E).getScalarVal(); } @@ -305,17 +305,17 @@ public: CGF.EmitCXXDeleteExpr(E); return 0; } - + Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) { // C++ [expr.pseudo]p1: - // The result shall only be used as the operand for the function call + // The result shall only be used as the operand for the function call // operator (), and the result of such a call has type void. The only // effect is the evaluation of the postfix-expression before the dot or // arrow. CGF.EmitScalarExpr(E->getBase()); return 0; } - + // Binary Operators. Value *EmitMul(const BinOpInfo &Ops) { if (CGF.getContext().getLangOptions().OverflowChecking @@ -382,7 +382,7 @@ public: VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ); VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE); #undef VISITCOMP - + Value *VisitBinAssign (const BinaryOperator *E); Value *VisitBinLAnd (const BinaryOperator *E); @@ -408,24 +408,24 @@ public: /// boolean (i1) truth value. This is equivalent to "Val != 0". Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { assert(SrcType->isCanonical() && "EmitScalarConversion strips typedefs"); - + if (SrcType->isRealFloatingType()) { // Compare against 0.0 for fp scalars. llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType()); return Builder.CreateFCmpUNE(Src, Zero, "tobool"); } - + if (SrcType->isMemberPointerType()) { // FIXME: This is ABI specific. - + // Compare against -1. llvm::Value *NegativeOne = llvm::Constant::getAllOnesValue(Src->getType()); return Builder.CreateICmpNE(Src, NegativeOne, "tobool"); } - + assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) && "Unknown scalar type to convert"); - + // Because of the type rules of C, we often end up computing a logical value, // then zero extending it to int, then wanting it as a logical value again. // Optimize this common case. @@ -441,7 +441,7 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { return Result; } } - + // Compare against an integer or pointer null. llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType()); return Builder.CreateICmpNE(Src, Zero, "tobool"); @@ -454,32 +454,31 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, SrcType = CGF.getContext().getCanonicalType(SrcType); DstType = CGF.getContext().getCanonicalType(DstType); if (SrcType == DstType) return Src; - + if (DstType->isVoidType()) return 0; - + llvm::LLVMContext &VMContext = CGF.getLLVMContext(); // Handle conversions to bool first, they are special: comparisons against 0. if (DstType->isBooleanType()) return EmitConversionToBool(Src, SrcType); - + const llvm::Type *DstTy = ConvertType(DstType); // Ignore conversions like int -> uint. if (Src->getType() == DstTy) return Src; - // Handle pointer conversions next: pointers can only be converted - // to/from other pointers and integers. Check for pointer types in - // terms of LLVM, as some native types (like Obj-C id) may map to a - // pointer type. + // Handle pointer conversions next: pointers can only be converted to/from + // other pointers and integers. Check for pointer types in terms of LLVM, as + // some native types (like Obj-C id) may map to a pointer type. if (isa<llvm::PointerType>(DstTy)) { // The source value may be an integer, or a pointer. if (isa<llvm::PointerType>(Src->getType())) { // Some heavy lifting for derived to base conversion. - if (const CXXRecordDecl *ClassDecl = + if (const CXXRecordDecl *ClassDecl = SrcType->getCXXRecordDeclForPointerType()) - if (const CXXRecordDecl *BaseClassDecl = + if (const CXXRecordDecl *BaseClassDecl = DstType->getCXXRecordDeclForPointerType()) Src = CGF.AddressCXXOfBaseClass(Src, ClassDecl, BaseClassDecl); return Builder.CreateBitCast(Src, DstTy, "conv"); @@ -487,7 +486,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); // First, convert to the correct width so that we control the kind of // extension. - const llvm::Type *MiddleTy = + const llvm::Type *MiddleTy = llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); bool InputSigned = SrcType->isSignedIntegerType(); llvm::Value* IntResult = @@ -495,13 +494,13 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, // Then, cast to pointer. return Builder.CreateIntToPtr(IntResult, DstTy, "conv"); } - + if (isa<llvm::PointerType>(Src->getType())) { // Must be an ptr to int cast. assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); return Builder.CreatePtrToInt(Src, DstTy, "conv"); } - + // A scalar can be splatted to an extended vector of the same element type if (DstType->isExtVectorType() && !SrcType->isVectorType()) { // Cast the scalar to element type @@ -520,7 +519,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, for (unsigned i = 0; i < NumElements; i++) Args.push_back(llvm::ConstantInt::get( llvm::Type::getInt32Ty(VMContext), 0)); - + llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); return Yay; @@ -530,7 +529,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, if (isa<llvm::VectorType>(Src->getType()) || isa<llvm::VectorType>(DstTy)) return Builder.CreateBitCast(Src, DstTy, "conv"); - + // Finally, we have the arithmetic types: real int/float. if (isa<llvm::IntegerType>(Src->getType())) { bool InputSigned = SrcType->isSignedIntegerType(); @@ -541,7 +540,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, else return Builder.CreateUIToFP(Src, DstTy, "conv"); } - + assert(Src->getType()->isFloatingPoint() && "Unknown real conversion"); if (isa<llvm::IntegerType>(DstTy)) { if (DstType->isSignedIntegerType()) @@ -557,15 +556,15 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, return Builder.CreateFPExt(Src, DstTy, "conv"); } -/// EmitComplexToScalarConversion - Emit a conversion from the specified -/// complex type to the specified destination type, where the destination -/// type is an LLVM scalar type. +/// EmitComplexToScalarConversion - Emit a conversion from the specified complex +/// type to the specified destination type, where the destination type is an +/// LLVM scalar type. Value *ScalarExprEmitter:: EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, QualType SrcTy, QualType DstTy) { // Get the source element type. SrcTy = SrcTy->getAsComplexType()->getElementType(); - + // Handle conversions to bool first, they are special: comparisons against 0. if (DstTy->isBooleanType()) { // Complex != 0 -> (Real != 0) | (Imag != 0) @@ -573,11 +572,11 @@ EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy); return Builder.CreateOr(Src.first, Src.second, "tobool"); } - + // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, // the imaginary part of the complex value is discarded and the value of the // real part is converted according to the conversion rules for the - // corresponding real type. + // corresponding real type. return EmitScalarConversion(Src.first, SrcTy, DstTy); } @@ -613,14 +612,14 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { // so we can't get it as an lvalue. if (!E->getBase()->getType()->isVectorType()) return EmitLoadOfLValue(E); - + // Handle the vector case. The base must be a vector, the index must be an // integer value. Value *Base = Visit(E->getBase()); Value *Idx = Visit(E->getIdx()); bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType(); Idx = Builder.CreateIntCast(Idx, - llvm::Type::getInt32Ty(CGF.getLLVMContext()), + llvm::Type::getInt32Ty(CGF.getLLVMContext()), IdxSigned, "vecidxcast"); return Builder.CreateExtractElement(Base, Idx, "vecext"); @@ -633,7 +632,7 @@ Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy, CastExpr::CastKind Kind) { if (!DestTy->isVoidType()) TestAndClearIgnoreResultAssign(); - + switch (Kind) { default: break; @@ -644,7 +643,7 @@ Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy, case CastExpr::CK_ArrayToPointerDecay: { assert(E->getType()->isArrayType() && "Array to pointer decay must have array source type!"); - + Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays. // Note that VLA pointers are always decayed, so we don't need to do @@ -656,7 +655,7 @@ Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy, "Expected pointer to array"); V = Builder.CreateStructGEP(V, 0, "arraydecay"); } - + // The resultant pointer type can be implicitly casted to other pointer // types as well (e.g. void*) and can be implicitly converted to integer. const llvm::Type *DestLTy = ConvertType(DestTy); @@ -669,20 +668,20 @@ Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy, } } return V; - } + } case CastExpr::CK_NullToMemberPointer: return CGF.CGM.EmitNullConstant(DestTy); } - + // Handle cases where the source is an non-complex type. - + if (!CGF.hasAggregateLLVMType(E->getType())) { Value *Src = Visit(const_cast<Expr*>(E)); // Use EmitScalarConversion to perform the conversion. return EmitScalarConversion(Src, E->getType(), DestTy); } - + if (E->getType()->isAnyComplexType()) { // Handle cases where the source is a complex type. bool IgnoreImag = true; @@ -727,7 +726,7 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, Value *InVal = CGF.EmitLoadOfLValue(LV, ValTy).getScalarVal(); llvm::LLVMContext &VMContext = CGF.getLLVMContext(); - + int AmountVal = isInc ? 1 : -1; if (ValTy->isPointerType() && @@ -737,26 +736,26 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, } Value *NextVal; - if (const llvm::PointerType *PT = + if (const llvm::PointerType *PT = dyn_cast<llvm::PointerType>(InVal->getType())) { llvm::Constant *Inc = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal); if (!isa<llvm::FunctionType>(PT->getElementType())) { QualType PTEE = ValTy->getPointeeType(); - if (const ObjCInterfaceType *OIT = + if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(PTEE)) { // Handle interface types, which are not represented with a concrete type. int size = CGF.getContext().getTypeSize(OIT) / 8; if (!isInc) size = -size; Inc = llvm::ConstantInt::get(Inc->getType(), size); - const llvm::Type *i8Ty = + const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); InVal = Builder.CreateBitCast(InVal, i8Ty); NextVal = Builder.CreateGEP(InVal, Inc, "add.ptr"); llvm::Value *lhs = LV.getAddress(); lhs = Builder.CreateBitCast(lhs, llvm::PointerType::getUnqual(i8Ty)); - LV = LValue::MakeAddr(lhs, ValTy.getCVRQualifiers(), + LV = LValue::MakeAddr(lhs, ValTy.getCVRQualifiers(), CGF.getContext().getObjCGCAttrKind(ValTy)); } else NextVal = Builder.CreateInBoundsGEP(InVal, Inc, "ptrincdec"); @@ -785,11 +784,11 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, } else { // Add the inc/dec to the real part. if (InVal->getType() == llvm::Type::getFloatTy(VMContext)) - NextVal = - llvm::ConstantFP::get(VMContext, + NextVal = + llvm::ConstantFP::get(VMContext, llvm::APFloat(static_cast<float>(AmountVal))); else if (InVal->getType() == llvm::Type::getDoubleTy(VMContext)) - NextVal = + NextVal = llvm::ConstantFP::get(VMContext, llvm::APFloat(static_cast<double>(AmountVal))); else { @@ -801,7 +800,7 @@ Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, } NextVal = Builder.CreateFAdd(InVal, NextVal, isInc ? "inc" : "dec"); } - + // Store the updated result through the lvalue. if (LV.isBitfield()) CGF.EmitStoreThroughBitfieldLValue(RValue::get(NextVal), LV, ValTy, @@ -832,12 +831,12 @@ Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { // Compare operand to zero. Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); - + // Invert value. // TODO: Could dynamically modify easy computations here. For example, if // the operand is an icmp ne, turn into icmp eq. BoolVal = Builder.CreateNot(BoolVal, "lnot"); - + // ZExt result to the expr type. return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext"); } @@ -848,7 +847,7 @@ Value * ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) { QualType TypeToSize = E->getTypeOfArgument(); if (E->isSizeOf()) { - if (const VariableArrayType *VAT = + if (const VariableArrayType *VAT = CGF.getContext().getAsVariableArrayType(TypeToSize)) { if (E->isArgumentType()) { // sizeof(type) - make sure to emit the VLA size. @@ -858,13 +857,13 @@ ScalarExprEmitter::VisitSizeOfAlignOfExpr(const SizeOfAlignOfExpr *E) { // VLA, it is evaluated. CGF.EmitAnyExpr(E->getArgumentExpr()); } - + return CGF.GetVLASize(VAT); } } - // If this isn't sizeof(vla), the result must be constant; use the - // constant folding logic so we don't have to duplicate it here. + // If this isn't sizeof(vla), the result must be constant; use the constant + // folding logic so we don't have to duplicate it here. Expr::EvalResult Result; E->Evaluate(Result, CGF.getContext()); return llvm::ConstantInt::get(VMContext, Result.Val.getInt()); @@ -880,7 +879,7 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { Expr *Op = E->getSubExpr(); if (Op->getType()->isAnyComplexType()) return CGF.EmitComplexExpr(Op, true, false, true, false).second; - + // __imag on a scalar returns zero. Emit the subexpr to ensure side // effects are evaluated, but not the actual value. if (E->isLvalue(CGF.getContext()) == Expr::LV_Valid) @@ -919,10 +918,10 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, BinOpInfo OpInfo; if (E->getComputationResultType()->isAnyComplexType()) { - // This needs to go through the complex expression emitter, but - // it's a tad complicated to do that... I'm leaving it out for now. - // (Note that we do actually need the imaginary part of the RHS for - // multiplication and division.) + // This needs to go through the complex expression emitter, but it's a tad + // complicated to do that... I'm leaving it out for now. (Note that we do + // actually need the imaginary part of the RHS for multiplication and + // division.) CGF.ErrorUnsupported(E, "complex compound assignment"); return llvm::UndefValue::get(CGF.ConvertType(E->getType())); } @@ -937,17 +936,17 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy); OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, E->getComputationLHSType()); - + // Expand the binary operator. Value *Result = (this->*Func)(OpInfo); - + // Convert the result back to the LHS type. Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy); - // Store the result value into the LHS lvalue. Bit-fields are - // handled specially because the result is altered by the store, - // i.e., [C99 6.5.16p1] 'An assignment expression has the value of - // the left operand after the assignment...'. + // Store the result value into the LHS lvalue. Bit-fields are handled + // specially because the result is altered by the store, i.e., [C99 6.5.16p1] + // 'An assignment expression has the value of the left operand after the + // assignment...'. if (LHSLV.isBitfield()) { if (!LHSLV.isVolatileQualified()) { CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, LHSTy, @@ -1029,7 +1028,7 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { Builder.SetInsertPoint(overflowBB); // Handler is: - // long long *__overflow_handler)(long long a, long long b, char op, + // long long *__overflow_handler)(long long a, long long b, char op, // char width) std::vector<const llvm::Type*> handerArgTypes; handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext)); @@ -1047,13 +1046,13 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { Builder.CreateSExt(Ops.LHS, llvm::Type::getInt64Ty(VMContext)), Builder.CreateSExt(Ops.RHS, llvm::Type::getInt64Ty(VMContext)), llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), OpID), - llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), + llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), cast<llvm::IntegerType>(opTy)->getBitWidth())); handlerResult = Builder.CreateTrunc(handlerResult, opTy); Builder.CreateBr(continueBB); - + // Set up the continuation Builder.SetInsertPoint(continueBB); // Get the correct result @@ -1070,7 +1069,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { if (CGF.getContext().getLangOptions().OverflowChecking && Ops.Ty->isSignedIntegerType()) return EmitOverflowCheckedBinOp(Ops); - + if (Ops.LHS->getType()->isFPOrFPVector()) return Builder.CreateFAdd(Ops.LHS, Ops.RHS, "add"); @@ -1089,7 +1088,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { Value *Ptr, *Idx; Expr *IdxExp; const PointerType *PT = Ops.E->getLHS()->getType()->getAs<PointerType>(); - const ObjCObjectPointerType *OPT = + const ObjCObjectPointerType *OPT = Ops.E->getLHS()->getType()->getAsObjCObjectPointerType(); if (PT || OPT) { Ptr = Ops.LHS; @@ -1116,10 +1115,9 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext"); } const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType(); - // Handle interface types, which are not represented with a concrete - // type. + // Handle interface types, which are not represented with a concrete type. if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) { - llvm::Value *InterfaceSize = + llvm::Value *InterfaceSize = llvm::ConstantInt::get(Idx->getType(), CGF.getContext().getTypeSize(OIT) / 8); Idx = Builder.CreateMul(Idx, InterfaceSize); @@ -1128,19 +1126,19 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); return Builder.CreateBitCast(Res, Ptr->getType()); - } + } - // Explicitly handle GNU void* and function pointer arithmetic - // extensions. The GNU void* casts amount to no-ops since our void* - // type is i8*, but this is future proof. + // Explicitly handle GNU void* and function pointer arithmetic extensions. The + // GNU void* casts amount to no-ops since our void* type is i8*, but this is + // future proof. if (ElementType->isVoidType() || ElementType->isFunctionType()) { const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); Value *Casted = Builder.CreateBitCast(Ptr, i8Ty); Value *Res = Builder.CreateGEP(Casted, Idx, "add.ptr"); return Builder.CreateBitCast(Res, Ptr->getType()); - } - + } + return Builder.CreateInBoundsGEP(Ptr, Idx, "add.ptr"); } @@ -1182,38 +1180,37 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { } Idx = Builder.CreateNeg(Idx, "sub.ptr.neg"); - // Handle interface types, which are not represented with a concrete - // type. - if (const ObjCInterfaceType *OIT = + // Handle interface types, which are not represented with a concrete type. + if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(LHSElementType)) { - llvm::Value *InterfaceSize = + llvm::Value *InterfaceSize = llvm::ConstantInt::get(Idx->getType(), CGF.getContext().getTypeSize(OIT) / 8); Idx = Builder.CreateMul(Idx, InterfaceSize); - const llvm::Type *i8Ty = + const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); Value *Res = Builder.CreateGEP(LHSCasted, Idx, "add.ptr"); return Builder.CreateBitCast(Res, Ops.LHS->getType()); - } + } // Explicitly handle GNU void* and function pointer arithmetic - // extensions. The GNU void* casts amount to no-ops since our - // void* type is i8*, but this is future proof. + // extensions. The GNU void* casts amount to no-ops since our void* type is + // i8*, but this is future proof. if (LHSElementType->isVoidType() || LHSElementType->isFunctionType()) { const llvm::Type *i8Ty = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext)); Value *LHSCasted = Builder.CreateBitCast(Ops.LHS, i8Ty); Value *Res = Builder.CreateGEP(LHSCasted, Idx, "sub.ptr"); return Builder.CreateBitCast(Res, Ops.LHS->getType()); - } - + } + return Builder.CreateInBoundsGEP(Ops.LHS, Idx, "sub.ptr"); } else { // pointer - pointer Value *LHS = Ops.LHS; Value *RHS = Ops.RHS; - + uint64_t ElementSize; // Handle GCC extension for pointer arithmetic on void* and function pointer @@ -1223,19 +1220,19 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { } else { ElementSize = CGF.getContext().getTypeSize(LHSElementType) / 8; } - + const llvm::Type *ResultType = ConvertType(Ops.Ty); LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast"); RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); - + // Optimize out the shift for element size of 1. if (ElementSize == 1) return BytesBetween; // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since - // pointer difference in C is only defined in the case where both - // operands are pointing to elements of an array. + // pointer difference in C is only defined in the case where both operands + // are pointing to elements of an array. Value *BytesPerElt = llvm::ConstantInt::get(ResultType, ElementSize); return Builder.CreateExactSDiv(BytesBetween, BytesPerElt, "sub.ptr.div"); } @@ -1247,7 +1244,7 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { Value *RHS = Ops.RHS; if (Ops.LHS->getType() != RHS->getType()) RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); - + return Builder.CreateShl(Ops.LHS, RHS, "shl"); } @@ -1257,7 +1254,7 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { Value *RHS = Ops.RHS; if (Ops.LHS->getType() != RHS->getType()) RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); - + if (Ops.Ty->isUnsignedIntegerType()) return Builder.CreateLShr(Ops.LHS, RHS, "shr"); return Builder.CreateAShr(Ops.LHS, RHS, "shr"); @@ -1271,7 +1268,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc, if (!LHSTy->isAnyComplexType()) { Value *LHS = Visit(E->getLHS()); Value *RHS = Visit(E->getRHS()); - + if (LHS->getType()->isFPOrFPVector()) { Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc, LHS, RHS, "cmp"); @@ -1288,14 +1285,14 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc, // vector integer type and return it (don't convert to bool). if (LHSTy->isVectorType()) return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext"); - + } else { // Complex Comparison: can only be an equality comparison. CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS()); CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS()); - + QualType CETy = LHSTy->getAsComplexType()->getElementType(); - + Value *ResultR, *ResultI; if (CETy->isRealFloatingType()) { ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, @@ -1310,7 +1307,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc, ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, LHS.second, RHS.second, "cmp.i"); } - + if (E->getOpcode() == BinaryOperator::EQ) { Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); } else { @@ -1330,7 +1327,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { // improve codegen just a little. Value *RHS = Visit(E->getRHS()); LValue LHS = EmitLValue(E->getLHS()); - + // Store the value into the LHS. Bit-fields are handled specially // because the result is altered by the store, i.e., [C99 6.5.16p1] // 'An assignment expression has the value of the left operand after @@ -1358,12 +1355,12 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { // ZExt result to int. return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "land.ext"); } - + // 0 && RHS: If it is safe, just elide the RHS, and return 0. if (!CGF.ContainsLabel(E->getRHS())) return llvm::Constant::getNullValue(CGF.LLVMIntTy); } - + llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end"); llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs"); @@ -1379,12 +1376,12 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock); PI != PE; ++PI) PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI); - + CGF.PushConditionalTempDestruction(); CGF.EmitBlock(RHSBlock); Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); CGF.PopConditionalTempDestruction(); - + // Reaquire the RHS block, as there may be subblocks inserted. RHSBlock = Builder.GetInsertBlock(); @@ -1392,7 +1389,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { // into the phi node for the edge with the value of RHSCond. CGF.EmitBlock(ContBlock); PN->addIncoming(RHSCond, RHSBlock); - + // ZExt result to int. return Builder.CreateZExt(PN, CGF.LLVMIntTy, "land.ext"); } @@ -1406,15 +1403,15 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { // ZExt result to int. return Builder.CreateZExt(RHSCond, CGF.LLVMIntTy, "lor.ext"); } - + // 1 || RHS: If it is safe, just elide the RHS, and return 1. if (!CGF.ContainsLabel(E->getRHS())) return llvm::ConstantInt::get(CGF.LLVMIntTy, 1); } - + llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end"); llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs"); - + // Branch on the LHS first. If it is true, go to the success (cont) block. CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock); @@ -1433,17 +1430,17 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { // Emit the RHS condition as a bool value. CGF.EmitBlock(RHSBlock); Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); - + CGF.PopConditionalTempDestruction(); - + // Reaquire the RHS block, as there may be subblocks inserted. RHSBlock = Builder.GetInsertBlock(); - + // Emit an unconditional branch from this block to ContBlock. Insert an entry // into the phi node for the edge with the value of RHSCond. CGF.EmitBlock(ContBlock); PN->addIncoming(RHSCond, RHSBlock); - + // ZExt result to int. return Builder.CreateZExt(PN, CGF.LLVMIntTy, "lor.ext"); } @@ -1465,19 +1462,19 @@ Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E) { if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) return isCheapEnoughToEvaluateUnconditionally(PE->getSubExpr()); - + // TODO: Allow anything we can constant fold to an integer or fp constant. if (isa<IntegerLiteral>(E) || isa<CharacterLiteral>(E) || isa<FloatingLiteral>(E)) return true; - + // Non-volatile automatic variables too, to get "cond ? X : Y" where // X and Y are local variables. if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) if (VD->hasLocalStorage() && !VD->getType().isVolatileQualified()) return true; - + return false; } @@ -1491,7 +1488,7 @@ VisitConditionalOperator(const ConditionalOperator *E) { Expr *Live = E->getLHS(), *Dead = E->getRHS(); if (Cond == -1) std::swap(Live, Dead); - + // If the dead side doesn't have labels we need, and if the Live side isn't // the gnu missing ?: extension (which we could handle, but don't bother // to), just emit the Live part. @@ -1499,8 +1496,8 @@ VisitConditionalOperator(const ConditionalOperator *E) { Live) // Live part isn't missing. return Visit(Live); } - - + + // If this is a really simple expression (like x ? 4 : 5), emit this as a // select instead of as control flow. We can only do this if it is cheap and // safe to evaluate the LHS and RHS unconditionally. @@ -1511,15 +1508,15 @@ VisitConditionalOperator(const ConditionalOperator *E) { llvm::Value *RHS = Visit(E->getRHS()); return Builder.CreateSelect(CondV, LHS, RHS, "cond"); } - - + + llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true"); llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false"); llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end"); Value *CondVal = 0; - // If we don't have the GNU missing condition extension, emit a branch on - // bool the normal way. + // If we don't have the GNU missing condition extension, emit a branch on bool + // the normal way. if (E->getLHS()) { // Otherwise, just use EmitBranchOnBoolExpr to get small and simple code for // the branch on bool. @@ -1529,7 +1526,7 @@ VisitConditionalOperator(const ConditionalOperator *E) { // convert it to bool the hard way. We do this explicitly because we need // the unconverted value for the missing middle value of the ?:. CondVal = CGF.EmitScalarExpr(E->getCond()); - + // In some cases, EmitScalarConversion will delete the "CondVal" expression // if there are no extra uses (an optimization). Inhibit this by making an // extra dead use, because we're going to add a use of CondVal later. We @@ -1537,7 +1534,7 @@ VisitConditionalOperator(const ConditionalOperator *E) { // away. This leaves dead code, but the ?: extension isn't common. new llvm::BitCastInst(CondVal, CondVal->getType(), "dummy?:holder", Builder.GetInsertBlock()); - + Value *CondBoolVal = CGF.EmitScalarConversion(CondVal, E->getCond()->getType(), CGF.getContext().BoolTy); @@ -1546,33 +1543,33 @@ VisitConditionalOperator(const ConditionalOperator *E) { CGF.PushConditionalTempDestruction(); CGF.EmitBlock(LHSBlock); - + // Handle the GNU extension for missing LHS. Value *LHS; if (E->getLHS()) LHS = Visit(E->getLHS()); else // Perform promotions, to handle cases like "short ?: int" LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType()); - + CGF.PopConditionalTempDestruction(); LHSBlock = Builder.GetInsertBlock(); CGF.EmitBranch(ContBlock); - + CGF.PushConditionalTempDestruction(); CGF.EmitBlock(RHSBlock); - + Value *RHS = Visit(E->getRHS()); CGF.PopConditionalTempDestruction(); RHSBlock = Builder.GetInsertBlock(); CGF.EmitBranch(ContBlock); - + CGF.EmitBlock(ContBlock); - + if (!LHS || !RHS) { assert(E->getType()->isVoidType() && "Non-void value should have a value"); return 0; } - + // Create a PHI node for the real part. llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond"); PN->reserveOperandSpace(2); @@ -1590,7 +1587,7 @@ Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType()); // If EmitVAArg fails, we fall back to the LLVM instruction. - if (!ArgPtr) + if (!ArgPtr) return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType())); // FIXME Volatility. @@ -1605,12 +1602,12 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *BE) { // Entry Point into this File //===----------------------------------------------------------------------===// -/// EmitScalarExpr - Emit the computation of the specified expression of -/// scalar type, ignoring the result. +/// EmitScalarExpr - Emit the computation of the specified expression of scalar +/// type, ignoring the result. Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) { assert(E && !hasAggregateLLVMType(E->getType()) && "Invalid scalar expression to emit"); - + return ScalarExprEmitter(*this, IgnoreResultAssign) .Visit(const_cast<Expr*>(E)); } @@ -1624,9 +1621,9 @@ Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy); } -/// EmitComplexToScalarConversion - Emit a conversion from the specified -/// complex type to the specified destination type, where the destination -/// type is an LLVM scalar type. +/// EmitComplexToScalarConversion - Emit a conversion from the specified complex +/// type to the specified destination type, where the destination type is an +/// LLVM scalar type. Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, QualType DstTy) { @@ -1639,40 +1636,40 @@ Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) { assert(V1->getType() == V2->getType() && "Vector operands must be of the same type"); - unsigned NumElements = + unsigned NumElements = cast<llvm::VectorType>(V1->getType())->getNumElements(); - + va_list va; va_start(va, V2); - + llvm::SmallVector<llvm::Constant*, 16> Args; for (unsigned i = 0; i < NumElements; i++) { int n = va_arg(va, int); - assert(n >= 0 && n < (int)NumElements * 2 && + assert(n >= 0 && n < (int)NumElements * 2 && "Vector shuffle index out of bounds!"); Args.push_back(llvm::ConstantInt::get( llvm::Type::getInt32Ty(VMContext), n)); } - + const char *Name = va_arg(va, const char *); va_end(va); - + llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); - + return Builder.CreateShuffleVector(V1, V2, Mask, Name); } -llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals, +llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals, unsigned NumVals, bool isSplat) { llvm::Value *Vec = llvm::UndefValue::get(llvm::VectorType::get(Vals[0]->getType(), NumVals)); - + for (unsigned i = 0, e = NumVals; i != e; ++i) { llvm::Value *Val = isSplat ? Vals[0] : Vals[i]; llvm::Value *Idx = llvm::ConstantInt::get( llvm::Type::getInt32Ty(VMContext), i); Vec = Builder.CreateInsertElement(Vec, Val, Idx, "tmp"); } - - return Vec; + + return Vec; } |