diff options
Diffstat (limited to 'clang/lib/CodeGen')
-rw-r--r-- | clang/lib/CodeGen/CGBuiltin.cpp | 486 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGDecl.cpp | 163 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExpr.cpp | 615 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExprAgg.cpp | 337 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExprComplex.cpp | 542 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExprConstant.cpp | 627 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGExprScalar.cpp | 1185 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGObjC.cpp | 25 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGObjCGNU.cpp | 97 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGObjCRuntime.h | 47 | ||||
-rw-r--r-- | clang/lib/CodeGen/CGStmt.cpp | 776 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenFunction.cpp | 182 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenFunction.h | 486 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenModule.cpp | 509 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenModule.h | 129 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenTypes.cpp | 580 | ||||
-rw-r--r-- | clang/lib/CodeGen/CodeGenTypes.h | 165 | ||||
-rw-r--r-- | clang/lib/CodeGen/Makefile | 23 | ||||
-rw-r--r-- | clang/lib/CodeGen/ModuleBuilder.cpp | 104 |
19 files changed, 7078 insertions, 0 deletions
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp new file mode 100644 index 00000000000..83c5e60475c --- /dev/null +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -0,0 +1,486 @@ +//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Builtin calls as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Builtins.h" +#include "clang/AST/Expr.h" +#include "clang/AST/TargetBuiltins.h" +#include "llvm/Constants.h" +#include "llvm/Function.h" +#include "llvm/Intrinsics.h" +using namespace clang; +using namespace CodeGen; +using namespace llvm; + +RValue CodeGenFunction::EmitBuiltinExpr(unsigned BuiltinID, const CallExpr *E) { + switch (BuiltinID) { + default: { + if (getContext().BuiltinInfo.isLibFunction(BuiltinID)) + return EmitCallExpr(CGM.getBuiltinLibFunction(BuiltinID), + E->getCallee()->getType(), E->arg_begin(), + E->getNumArgs()); + + // See if we have a target specific intrinsic. + Intrinsic::ID IntrinsicID; + const char *TargetPrefix = Target.getTargetPrefix(); + const char *BuiltinName = getContext().BuiltinInfo.GetName(BuiltinID); +#define GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN +#include "llvm/Intrinsics.gen" +#undef GET_LLVM_INTRINSIC_FOR_GCC_BUILTIN + + if (IntrinsicID != Intrinsic::not_intrinsic) { + SmallVector<Value*, 16> Args; + + Function *F = CGM.getIntrinsic(IntrinsicID); + const llvm::FunctionType *FTy = F->getFunctionType(); + + for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) { + Value *ArgValue = EmitScalarExpr(E->getArg(i)); + + // If the intrinsic arg type is different from the builtin arg type + // we need to do a bit cast. + const llvm::Type *PTy = FTy->getParamType(i); + if (PTy != ArgValue->getType()) { + assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) && + "Must be able to losslessly bit cast to param"); + ArgValue = Builder.CreateBitCast(ArgValue, PTy); + } + + Args.push_back(ArgValue); + } + + Value *V = Builder.CreateCall(F, &Args[0], &Args[0] + Args.size()); + QualType BuiltinRetType = E->getType(); + + const llvm::Type *RetTy = llvm::Type::VoidTy; + if (!BuiltinRetType->isVoidType()) RetTy = ConvertType(BuiltinRetType); + + if (RetTy != V->getType()) { + assert(V->getType()->canLosslesslyBitCastTo(RetTy) && + "Must be able to losslessly bit cast result type"); + V = Builder.CreateBitCast(V, RetTy); + } + + return RValue::get(V); + } + + // See if we have a target specific builtin that needs to be lowered. + Value *V = 0; + + if (strcmp(TargetPrefix, "x86") == 0) + V = EmitX86BuiltinExpr(BuiltinID, E); + else if (strcmp(TargetPrefix, "ppc") == 0) + V = EmitPPCBuiltinExpr(BuiltinID, E); + + if (V) + return RValue::get(V); + + WarnUnsupported(E, "builtin function"); + + // Unknown builtin, for now just dump it out and return undef. + if (hasAggregateLLVMType(E->getType())) + return RValue::getAggregate(CreateTempAlloca(ConvertType(E->getType()))); + return RValue::get(UndefValue::get(ConvertType(E->getType()))); + } + case Builtin::BI__builtin___CFStringMakeConstantString: { + const Expr *Arg = E->getArg(0); + + while (1) { + if (const ParenExpr *PE = dyn_cast<ParenExpr>(Arg)) + Arg = PE->getSubExpr(); + else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Arg)) + Arg = CE->getSubExpr(); + else + break; + } + + const StringLiteral *Literal = cast<StringLiteral>(Arg); + std::string S(Literal->getStrData(), Literal->getByteLength()); + + return RValue::get(CGM.GetAddrOfConstantCFString(S)); + } + case Builtin::BI__builtin_va_start: + case Builtin::BI__builtin_va_end: { + Value *ArgValue = EmitScalarExpr(E->getArg(0)); + const llvm::Type *DestType = + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + if (ArgValue->getType() != DestType) + ArgValue = Builder.CreateBitCast(ArgValue, DestType, + ArgValue->getNameStart()); + + Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_start) ? + Intrinsic::vastart : Intrinsic::vaend; + return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue)); + } + case Builtin::BI__builtin_va_copy: { + // FIXME: This does not yet handle architectures where va_list is a struct. + Value *DstPtr = EmitScalarExpr(E->getArg(0)); + Value *SrcValue = EmitScalarExpr(E->getArg(1)); + + Value *SrcPtr = CreateTempAlloca(SrcValue->getType(), "dst_ptr"); + + // FIXME: Volatile + Builder.CreateStore(SrcValue, SrcPtr, false); + + const llvm::Type *Type = + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + + DstPtr = Builder.CreateBitCast(DstPtr, Type); + SrcPtr = Builder.CreateBitCast(SrcPtr, Type); + Value *Args[] = { DstPtr, SrcPtr }; + return RValue::get(Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy), + &Args[0], &Args[2])); + } + case Builtin::BI__builtin_classify_type: { + APSInt Result(32); + if (!E->isBuiltinClassifyType(Result)) + assert(0 && "Expr not __builtin_classify_type!"); + return RValue::get(ConstantInt::get(Result)); + } + case Builtin::BI__builtin_constant_p: { + APSInt Result(32); + // FIXME: Analyze the parameter and check if it is a constant. + Result = 0; + return RValue::get(ConstantInt::get(Result)); + } + case Builtin::BI__builtin_abs: { + Value *ArgValue = EmitScalarExpr(E->getArg(0)); + + llvm::BinaryOperator *NegOp = + Builder.CreateNeg(ArgValue, (ArgValue->getName() + "neg").c_str()); + Value *CmpResult = + Builder.CreateICmpSGE(ArgValue, NegOp->getOperand(0), "abscond"); + Value *Result = + Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs"); + + return RValue::get(Result); + } + case Builtin::BI__builtin_ctz: + case Builtin::BI__builtin_ctzl: + case Builtin::BI__builtin_ctzll: { + Value *ArgValue = EmitScalarExpr(E->getArg(0)); + + const llvm::Type *ArgType = ArgValue->getType(); + Value *F = CGM.getIntrinsic(Intrinsic::cttz, &ArgType, 1); + + const llvm::Type *ResultType = ConvertType(E->getType()); + Value *Result = Builder.CreateCall(F, ArgValue, "tmp"); + if (Result->getType() != ResultType) + Result = Builder.CreateIntCast(Result, ResultType, "cast"); + return RValue::get(Result); + } + case Builtin::BI__builtin_expect: + return RValue::get(EmitScalarExpr(E->getArg(0))); + case Builtin::BI__builtin_bswap32: + case Builtin::BI__builtin_bswap64: { + Value *ArgValue = EmitScalarExpr(E->getArg(0)); + const llvm::Type *ArgType = ArgValue->getType(); + Value *F = CGM.getIntrinsic(Intrinsic::bswap, &ArgType, 1); + return RValue::get(Builder.CreateCall(F, ArgValue, "tmp")); + } + case Builtin::BI__builtin_inff: { + APFloat f(APFloat::IEEEsingle, APFloat::fcInfinity, false); + return RValue::get(ConstantFP::get(llvm::Type::FloatTy, f)); + } + case Builtin::BI__builtin_huge_val: + case Builtin::BI__builtin_inf: + // FIXME: mapping long double onto double. + case Builtin::BI__builtin_infl: { + APFloat f(APFloat::IEEEdouble, APFloat::fcInfinity, false); + return RValue::get(ConstantFP::get(llvm::Type::DoubleTy, f)); + } + case Builtin::BI__builtin_isgreater: + case Builtin::BI__builtin_isgreaterequal: + case Builtin::BI__builtin_isless: + case Builtin::BI__builtin_islessequal: + case Builtin::BI__builtin_islessgreater: + case Builtin::BI__builtin_isunordered: { + // Ordered comparisons: we know the arguments to these are matching scalar + // floating point values. + Value *LHS = EmitScalarExpr(E->getArg(0)); + Value *RHS = EmitScalarExpr(E->getArg(1)); + + switch (BuiltinID) { + default: assert(0 && "Unknown ordered comparison"); + case Builtin::BI__builtin_isgreater: + LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp"); + break; + case Builtin::BI__builtin_isgreaterequal: + LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp"); + break; + case Builtin::BI__builtin_isless: + LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp"); + break; + case Builtin::BI__builtin_islessequal: + LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp"); + break; + case Builtin::BI__builtin_islessgreater: + LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp"); + break; + case Builtin::BI__builtin_isunordered: + LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp"); + break; + } + // ZExt bool to int type. + return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType()), + "tmp")); + } + case Builtin::BI__builtin_alloca: + return RValue::get(Builder.CreateAlloca(llvm::Type::Int8Ty, + EmitScalarExpr(E->getArg(0)), + "tmp")); + } + return RValue::get(0); +} + +Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { + + llvm::SmallVector<Value*, 4> Ops; + + for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) + Ops.push_back(EmitScalarExpr(E->getArg(i))); + + switch (BuiltinID) { + default: return 0; + case X86::BI__builtin_ia32_mulps: + return Builder.CreateMul(Ops[0], Ops[1], "mulps"); + case X86::BI__builtin_ia32_pand: + return Builder.CreateAnd(Ops[0], Ops[1], "pand"); + case X86::BI__builtin_ia32_por: + return Builder.CreateAnd(Ops[0], Ops[1], "por"); + case X86::BI__builtin_ia32_pxor: + return Builder.CreateAnd(Ops[0], Ops[1], "pxor"); + case X86::BI__builtin_ia32_pandn: { + Ops[0] = Builder.CreateNot(Ops[0], "tmp"); + return Builder.CreateAnd(Ops[0], Ops[1], "pandn"); + } + case X86::BI__builtin_ia32_paddb: + case X86::BI__builtin_ia32_paddd: + case X86::BI__builtin_ia32_paddq: + case X86::BI__builtin_ia32_paddw: + case X86::BI__builtin_ia32_addps: + return Builder.CreateAdd(Ops[0], Ops[1], "add"); + case X86::BI__builtin_ia32_psubb: + case X86::BI__builtin_ia32_psubd: + case X86::BI__builtin_ia32_psubq: + case X86::BI__builtin_ia32_psubw: + case X86::BI__builtin_ia32_subps: + return Builder.CreateSub(Ops[0], Ops[1], "sub"); + case X86::BI__builtin_ia32_divps: + return Builder.CreateFDiv(Ops[0], Ops[1], "divps"); + case X86::BI__builtin_ia32_pmullw: + return Builder.CreateMul(Ops[0], Ops[1], "pmul"); + case X86::BI__builtin_ia32_punpckhbw: + return EmitShuffleVector(Ops[0], Ops[1], 4, 12, 5, 13, 6, 14, 7, 15, + "punpckhbw"); + case X86::BI__builtin_ia32_punpckhwd: + return EmitShuffleVector(Ops[0], Ops[1], 2, 6, 3, 7, "punpckhwd"); + case X86::BI__builtin_ia32_punpckhdq: + return EmitShuffleVector(Ops[0], Ops[1], 1, 3, "punpckhdq"); + case X86::BI__builtin_ia32_punpcklbw: + return EmitShuffleVector(Ops[0], Ops[1], 0, 8, 1, 9, 2, 10, 3, 11, + "punpcklbw"); + case X86::BI__builtin_ia32_punpcklwd: + return EmitShuffleVector(Ops[0], Ops[1], 0, 4, 1, 5, "punpcklwd"); + case X86::BI__builtin_ia32_punpckldq: + return EmitShuffleVector(Ops[0], Ops[1], 0, 2, "punpckldq"); + case X86::BI__builtin_ia32_pslldi: + case X86::BI__builtin_ia32_psllqi: + case X86::BI__builtin_ia32_psllwi: + case X86::BI__builtin_ia32_psradi: + case X86::BI__builtin_ia32_psrawi: + case X86::BI__builtin_ia32_psrldi: + case X86::BI__builtin_ia32_psrlqi: + case X86::BI__builtin_ia32_psrlwi: { + Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::Int64Ty, "zext"); + const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::Int64Ty, 1); + Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast"); + const char *name = 0; + Intrinsic::ID ID = Intrinsic::not_intrinsic; + + switch (BuiltinID) { + default: assert(0 && "Unsupported shift intrinsic!"); + case X86::BI__builtin_ia32_pslldi: + name = "pslldi"; + ID = Intrinsic::x86_mmx_psll_d; + break; + case X86::BI__builtin_ia32_psllqi: + name = "psllqi"; + ID = Intrinsic::x86_mmx_psll_q; + break; + case X86::BI__builtin_ia32_psllwi: + name = "psllwi"; + ID = Intrinsic::x86_mmx_psll_w; + break; + case X86::BI__builtin_ia32_psradi: + name = "psradi"; + ID = Intrinsic::x86_mmx_psra_d; + break; + case X86::BI__builtin_ia32_psrawi: + name = "psrawi"; + ID = Intrinsic::x86_mmx_psra_w; + break; + case X86::BI__builtin_ia32_psrldi: + name = "psrldi"; + ID = Intrinsic::x86_mmx_psrl_d; + break; + case X86::BI__builtin_ia32_psrlqi: + name = "psrlqi"; + ID = Intrinsic::x86_mmx_psrl_q; + break; + case X86::BI__builtin_ia32_psrlwi: + name = "psrlwi"; + ID = Intrinsic::x86_mmx_psrl_w; + break; + } + llvm::Function *F = CGM.getIntrinsic(ID); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name); + } + case X86::BI__builtin_ia32_pshufd: { + unsigned i = cast<ConstantInt>(Ops[1])->getZExtValue(); + return EmitShuffleVector(Ops[0], Ops[0], + i & 0x3, (i & 0xc) >> 2, + (i & 0x30) >> 4, (i & 0xc0) >> 6, + "pshufd"); + } + case X86::BI__builtin_ia32_vec_init_v4hi: + case X86::BI__builtin_ia32_vec_init_v8qi: + case X86::BI__builtin_ia32_vec_init_v2si: + return EmitVector(&Ops[0], Ops.size()); + case X86::BI__builtin_ia32_vec_ext_v2si: + return Builder.CreateExtractElement(Ops[0], Ops[1], "result"); + case X86::BI__builtin_ia32_cmpordss: + case X86::BI__builtin_ia32_cmpunordss: + case X86::BI__builtin_ia32_cmpeqss: + case X86::BI__builtin_ia32_cmpltss: + case X86::BI__builtin_ia32_cmpless: + case X86::BI__builtin_ia32_cmpneqss: + case X86::BI__builtin_ia32_cmpnltss: + case X86::BI__builtin_ia32_cmpnless: { + unsigned i = 0; + const char *name = 0; + switch (BuiltinID) { + default: assert(0 && "Unknown compare builtin!"); + case X86::BI__builtin_ia32_cmpeqss: + i = 0; + name = "cmpeqss"; + break; + case X86::BI__builtin_ia32_cmpltss: + i = 1; + name = "cmpltss"; + break; + case X86::BI__builtin_ia32_cmpless: + i = 2; + name = "cmpless"; + break; + case X86::BI__builtin_ia32_cmpunordss: + i = 3; + name = "cmpunordss"; + break; + case X86::BI__builtin_ia32_cmpneqss: + i = 4; + name = "cmpneqss"; + break; + case X86::BI__builtin_ia32_cmpnltss: + i = 5; + name = "cmpntlss"; + break; + case X86::BI__builtin_ia32_cmpnless: + i = 6; + name = "cmpnless"; + break; + case X86::BI__builtin_ia32_cmpordss: + i = 7; + name = "cmpordss"; + break; + } + + Ops.push_back(llvm::ConstantInt::get(llvm::Type::Int8Ty, i)); + llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ss); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name); + } + case X86::BI__builtin_ia32_cmpordps: + case X86::BI__builtin_ia32_cmpunordps: + case X86::BI__builtin_ia32_cmpeqps: + case X86::BI__builtin_ia32_cmpltps: + case X86::BI__builtin_ia32_cmpleps: + case X86::BI__builtin_ia32_cmpneqps: + case X86::BI__builtin_ia32_cmpngtps: + case X86::BI__builtin_ia32_cmpnltps: + case X86::BI__builtin_ia32_cmpgtps: + case X86::BI__builtin_ia32_cmpgeps: + case X86::BI__builtin_ia32_cmpngeps: + case X86::BI__builtin_ia32_cmpnleps: { + unsigned i = 0; + const char *name = 0; + bool ShouldSwap = false; + switch (BuiltinID) { + default: assert(0 && "Unknown compare builtin!"); + case X86::BI__builtin_ia32_cmpeqps: i = 0; name = "cmpeqps"; break; + case X86::BI__builtin_ia32_cmpltps: i = 1; name = "cmpltps"; break; + case X86::BI__builtin_ia32_cmpleps: i = 2; name = "cmpleps"; break; + case X86::BI__builtin_ia32_cmpunordps: i = 3; name = "cmpunordps"; break; + case X86::BI__builtin_ia32_cmpneqps: i = 4; name = "cmpneqps"; break; + case X86::BI__builtin_ia32_cmpnltps: i = 5; name = "cmpntlps"; break; + case X86::BI__builtin_ia32_cmpnleps: i = 6; name = "cmpnleps"; break; + case X86::BI__builtin_ia32_cmpordps: i = 7; name = "cmpordps"; break; + case X86::BI__builtin_ia32_cmpgtps: + ShouldSwap = true; + i = 1; + name = "cmpgtps"; + break; + case X86::BI__builtin_ia32_cmpgeps: + i = 2; + name = "cmpgeps"; + ShouldSwap = true; + break; + case X86::BI__builtin_ia32_cmpngtps: + i = 5; + name = "cmpngtps"; + ShouldSwap = true; + break; + case X86::BI__builtin_ia32_cmpngeps: + i = 6; + name = "cmpngeps"; + ShouldSwap = true; + break; + } + + if (ShouldSwap) + std::swap(Ops[0], Ops[1]); + + Ops.push_back(llvm::ConstantInt::get(llvm::Type::Int8Ty, i)); + llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse_cmp_ps); + return Builder.CreateCall(F, &Ops[0], &Ops[0] + Ops.size(), name); + } + case X86::BI__builtin_ia32_movss: + return EmitShuffleVector(Ops[0], Ops[1], 4, 1, 2, 3, "movss"); + case X86::BI__builtin_ia32_shufps: + unsigned i = cast<ConstantInt>(Ops[2])->getZExtValue(); + return EmitShuffleVector(Ops[0], Ops[1], + i & 0x3, (i & 0xc) >> 2, + ((i & 0x30) >> 4) + 4, + ((i & 0x60) >> 6) + 4, "shufps"); + } +} + +Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID, + const CallExpr *E) { + switch (BuiltinID) { + default: return 0; + } +} diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp new file mode 100644 index 00000000000..c80cecc76ee --- /dev/null +++ b/clang/lib/CodeGen/CGDecl.cpp @@ -0,0 +1,163 @@ +//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Decl nodes as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/AST.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Type.h" +using namespace clang; +using namespace CodeGen; + + +void CodeGenFunction::EmitDecl(const Decl &D) { + switch (D.getKind()) { + default: assert(0 && "Unknown decl kind!"); + case Decl::FileVar: + assert(0 && "Should not see file-scope variables inside a function!"); + case Decl::ParmVar: + assert(0 && "Parmdecls should not be in declstmts!"); + case Decl::Typedef: // typedef int X; + case Decl::Function: // void X(); + case Decl::Struct: // struct X; + case Decl::Union: // union X; + case Decl::Class: // class X; + case Decl::Enum: // enum X; + // None of these decls require codegen support. + return; + + case Decl::BlockVar: + return EmitBlockVarDecl(cast<BlockVarDecl>(D)); + case Decl::EnumConstant: + return EmitEnumConstantDecl(cast<EnumConstantDecl>(D)); + } +} + +void CodeGenFunction::EmitEnumConstantDecl(const EnumConstantDecl &D) { + assert(0 && "FIXME: Enum constant decls not implemented yet!"); +} + +/// EmitBlockVarDecl - This method handles emission of any variable declaration +/// inside a function, including static vars etc. +void CodeGenFunction::EmitBlockVarDecl(const BlockVarDecl &D) { + switch (D.getStorageClass()) { + case VarDecl::Static: + return EmitStaticBlockVarDecl(D); + case VarDecl::Extern: + // Don't emit it now, allow it to be emitted lazily on its first use. + return; + default: + assert((D.getStorageClass() == VarDecl::None || + D.getStorageClass() == VarDecl::Auto || + D.getStorageClass() == VarDecl::Register) && + "Unknown storage class"); + return EmitLocalBlockVarDecl(D); + } +} + +void CodeGenFunction::EmitStaticBlockVarDecl(const BlockVarDecl &D) { + QualType Ty = D.getCanonicalType(); + assert(Ty->isConstantSizeType() && "VLAs can't be static"); + + llvm::Value *&DMEntry = LocalDeclMap[&D]; + assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); + + const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty); + llvm::Constant *Init = 0; + if (D.getInit() == 0) { + Init = llvm::Constant::getNullValue(LTy); + } else { + Init = CGM.EmitConstantExpr(D.getInit(), this); + } + + assert(Init && "Unable to create initialiser for static decl"); + + std::string ContextName; + if (CurFuncDecl) + ContextName = CurFuncDecl->getName(); + else + assert(0 && "Unknown context for block var decl"); // FIXME Handle objc. + + DMEntry = + new llvm::GlobalVariable(LTy, false, + llvm::GlobalValue::InternalLinkage, + Init, ContextName + "." + D.getName(), + &CGM.getModule(), 0, + Ty.getAddressSpace()); + +} + +/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a +/// variable declaration with auto, register, or no storage class specifier. +/// These turn into simple stack objects. +void CodeGenFunction::EmitLocalBlockVarDecl(const BlockVarDecl &D) { + QualType Ty = D.getCanonicalType(); + + llvm::Value *DeclPtr; + if (Ty->isConstantSizeType()) { + // A normal fixed sized variable becomes an alloca in the entry block. + const llvm::Type *LTy = ConvertType(Ty); + // TODO: Alignment + DeclPtr = CreateTempAlloca(LTy, D.getName()); + } else { + // TODO: Create a dynamic alloca. + assert(0 && "FIXME: Local VLAs not implemented yet"); + } + + llvm::Value *&DMEntry = LocalDeclMap[&D]; + assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); + DMEntry = DeclPtr; + + // If this local has an initializer, emit it now. + if (const Expr *Init = D.getInit()) { + if (!hasAggregateLLVMType(Init->getType())) { + llvm::Value *V = EmitScalarExpr(Init); + Builder.CreateStore(V, DeclPtr, D.getType().isVolatileQualified()); + } else if (Init->getType()->isComplexType()) { + EmitComplexExprIntoAddr(Init, DeclPtr, D.getType().isVolatileQualified()); + } else { + EmitAggExpr(Init, DeclPtr, D.getType().isVolatileQualified()); + } + } +} + +/// Emit an alloca for the specified parameter and set up LocalDeclMap. +void CodeGenFunction::EmitParmDecl(const ParmVarDecl &D, llvm::Value *Arg) { + QualType Ty = D.getCanonicalType(); + + llvm::Value *DeclPtr; + if (!Ty->isConstantSizeType()) { + // Variable sized values always are passed by-reference. + DeclPtr = Arg; + } else { + // A fixed sized first class variable becomes an alloca in the entry block. + const llvm::Type *LTy = ConvertType(Ty); + if (LTy->isFirstClassType()) { + // TODO: Alignment + DeclPtr = new llvm::AllocaInst(LTy, 0, std::string(D.getName())+".addr", + AllocaInsertPt); + + // Store the initial value into the alloca. + Builder.CreateStore(Arg, DeclPtr); + } else { + // Otherwise, if this is an aggregate, just use the input pointer. + DeclPtr = Arg; + } + Arg->setName(D.getName()); + } + + llvm::Value *&DMEntry = LocalDeclMap[&D]; + assert(DMEntry == 0 && "Decl already exists in localdeclmap!"); + DMEntry = DeclPtr; +} + diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp new file mode 100644 index 00000000000..932a5c5da8b --- /dev/null +++ b/clang/lib/CodeGen/CGExpr.cpp @@ -0,0 +1,615 @@ +//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Expr nodes as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/AST.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Support/MathExtras.h" +using namespace clang; +using namespace CodeGen; + +//===--------------------------------------------------------------------===// +// Miscellaneous Helper Methods +//===--------------------------------------------------------------------===// + +/// CreateTempAlloca - This creates a alloca and inserts it into the entry +/// block. +llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty, + const char *Name) { + return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt); +} + +/// EvaluateExprAsBool - Perform the usual unary conversions on the specified +/// expression and compare the result against zero, returning an Int1Ty value. +llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) { + QualType BoolTy = getContext().BoolTy; + if (!E->getType()->isComplexType()) + return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy); + + return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy); +} + +/// EmitAnyExpr - Emit code to compute the specified expression which can have +/// any type. The result is returned as an RValue struct. If this is an +/// aggregate expression, the aggloc/agglocvolatile arguments indicate where +/// the result should be returned. +RValue CodeGenFunction::EmitAnyExpr(const Expr *E, llvm::Value *AggLoc, + bool isAggLocVolatile) { + if (!hasAggregateLLVMType(E->getType())) + return RValue::get(EmitScalarExpr(E)); + else if (E->getType()->isComplexType()) + return RValue::getComplex(EmitComplexExpr(E)); + + EmitAggExpr(E, AggLoc, isAggLocVolatile); + return RValue::getAggregate(AggLoc); +} + + +//===----------------------------------------------------------------------===// +// LValue Expression Emission +//===----------------------------------------------------------------------===// + +/// EmitLValue - Emit code to compute a designator that specifies the location +/// of the expression. +/// +/// This can return one of two things: a simple address or a bitfield +/// reference. In either case, the LLVM Value* in the LValue structure is +/// guaranteed to be an LLVM pointer type. +/// +/// If this returns a bitfield reference, nothing about the pointee type of +/// the LLVM value is known: For example, it may not be a pointer to an +/// integer. +/// +/// If this returns a normal address, and if the lvalue's C type is fixed +/// size, this method guarantees that the returned pointer type will point to +/// an LLVM type of the same size of the lvalue's type. If the lvalue has a +/// variable length type, this is not possible. +/// +LValue CodeGenFunction::EmitLValue(const Expr *E) { + switch (E->getStmtClass()) { + default: { + WarnUnsupported(E, "l-value expression"); + llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); + return LValue::MakeAddr(llvm::UndefValue::get(Ty)); + } + + case Expr::CallExprClass: return EmitCallExprLValue(cast<CallExpr>(E)); + case Expr::DeclRefExprClass: return EmitDeclRefLValue(cast<DeclRefExpr>(E)); + case Expr::ParenExprClass:return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); + case Expr::PreDefinedExprClass: + return EmitPreDefinedLValue(cast<PreDefinedExpr>(E)); + case Expr::StringLiteralClass: + return EmitStringLiteralLValue(cast<StringLiteral>(E)); + + case Expr::UnaryOperatorClass: + return EmitUnaryOpLValue(cast<UnaryOperator>(E)); + case Expr::ArraySubscriptExprClass: + return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E)); + case Expr::OCUVectorElementExprClass: + return EmitOCUVectorElementExpr(cast<OCUVectorElementExpr>(E)); + case Expr::MemberExprClass: return EmitMemberExpr(cast<MemberExpr>(E)); + } +} + +/// EmitLoadOfLValue - Given an expression that represents a value lvalue, +/// this method emits the address of the lvalue, then loads the result as an +/// rvalue, returning the rvalue. +RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, QualType ExprType) { + if (LV.isSimple()) { + llvm::Value *Ptr = LV.getAddress(); + const llvm::Type *EltTy = + cast<llvm::PointerType>(Ptr->getType())->getElementType(); + + // Simple scalar l-value. + if (EltTy->isFirstClassType()) { + llvm::Value *V = Builder.CreateLoad(Ptr, "tmp"); + + // Bool can have different representation in memory than in registers. + if (ExprType->isBooleanType()) { + if (V->getType() != llvm::Type::Int1Ty) + V = Builder.CreateTrunc(V, llvm::Type::Int1Ty, "tobool"); + } + + return RValue::get(V); + } + + assert(ExprType->isFunctionType() && "Unknown scalar value"); + return RValue::get(Ptr); + } + + if (LV.isVectorElt()) { + llvm::Value *Vec = Builder.CreateLoad(LV.getVectorAddr(), "tmp"); + return RValue::get(Builder.CreateExtractElement(Vec, LV.getVectorIdx(), + "vecext")); + } + + // If this is a reference to a subset of the elements of a vector, either + // shuffle the input or extract/insert them as appropriate. + if (LV.isOCUVectorElt()) + return EmitLoadOfOCUElementLValue(LV, ExprType); + + if (LV.isBitfield()) + return EmitLoadOfBitfieldLValue(LV, ExprType); + + assert(0 && "Unknown LValue type!"); + //an invalid RValue, but the assert will + //ensure that this point is never reached + return RValue(); +} + +RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV, + QualType ExprType) { + llvm::Value *Ptr = LV.getBitfieldAddr(); + const llvm::Type *EltTy = + cast<llvm::PointerType>(Ptr->getType())->getElementType(); + unsigned EltTySize = EltTy->getPrimitiveSizeInBits(); + unsigned short BitfieldSize = LV.getBitfieldSize(); + unsigned short EndBit = LV.getBitfieldStartBit() + BitfieldSize; + + llvm::Value *V = Builder.CreateLoad(Ptr, "tmp"); + + llvm::Value *ShAmt = llvm::ConstantInt::get(EltTy, EltTySize - EndBit); + V = Builder.CreateShl(V, ShAmt, "tmp"); + + ShAmt = llvm::ConstantInt::get(EltTy, EltTySize - BitfieldSize); + V = LV.isBitfieldSigned() ? + Builder.CreateAShr(V, ShAmt, "tmp") : + Builder.CreateLShr(V, ShAmt, "tmp"); + return RValue::get(V); +} + +// If this is a reference to a subset of the elements of a vector, either +// shuffle the input or extract/insert them as appropriate. +RValue CodeGenFunction::EmitLoadOfOCUElementLValue(LValue LV, + QualType ExprType) { + llvm::Value *Vec = Builder.CreateLoad(LV.getOCUVectorAddr(), "tmp"); + + unsigned EncFields = LV.getOCUVectorElts(); + + // If the result of the expression is a non-vector type, we must be + // extracting a single element. Just codegen as an extractelement. + const VectorType *ExprVT = ExprType->getAsVectorType(); + if (!ExprVT) { + unsigned InIdx = OCUVectorElementExpr::getAccessedFieldNo(0, EncFields); + llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx); + return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); + } + + // If the source and destination have the same number of elements, use a + // vector shuffle instead of insert/extracts. + unsigned NumResultElts = ExprVT->getNumElements(); + unsigned NumSourceElts = + cast<llvm::VectorType>(Vec->getType())->getNumElements(); + + if (NumResultElts == NumSourceElts) { + llvm::SmallVector<llvm::Constant*, 4> Mask; + for (unsigned i = 0; i != NumResultElts; ++i) { + unsigned InIdx = OCUVectorElementExpr::getAccessedFieldNo(i, EncFields); + Mask.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx)); + } + + llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); + Vec = Builder.CreateShuffleVector(Vec, + llvm::UndefValue::get(Vec->getType()), + MaskV, "tmp"); + return RValue::get(Vec); + } + + // Start out with an undef of the result type. + llvm::Value *Result = llvm::UndefValue::get(ConvertType(ExprType)); + + // Extract/Insert each element of the result. + for (unsigned i = 0; i != NumResultElts; ++i) { + unsigned InIdx = OCUVectorElementExpr::getAccessedFieldNo(i, EncFields); + llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx); + Elt = Builder.CreateExtractElement(Vec, Elt, "tmp"); + + llvm::Value *OutIdx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + Result = Builder.CreateInsertElement(Result, Elt, OutIdx, "tmp"); + } + + return RValue::get(Result); +} + + + +/// EmitStoreThroughLValue - Store the specified rvalue into the specified +/// lvalue, where both are guaranteed to the have the same type, and that type +/// is 'Ty'. +void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, + QualType Ty) { + if (!Dst.isSimple()) { + if (Dst.isVectorElt()) { + // Read/modify/write the vector, inserting the new element. + // FIXME: Volatility. + llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddr(), "tmp"); + Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(), + Dst.getVectorIdx(), "vecins"); + Builder.CreateStore(Vec, Dst.getVectorAddr()); + return; + } + + // If this is an update of elements of a vector, insert them as appropriate. + if (Dst.isOCUVectorElt()) + return EmitStoreThroughOCUComponentLValue(Src, Dst, Ty); + + if (Dst.isBitfield()) + return EmitStoreThroughBitfieldLValue(Src, Dst, Ty); + + assert(0 && "Unknown LValue type"); + } + + llvm::Value *DstAddr = Dst.getAddress(); + assert(Src.isScalar() && "Can't emit an agg store with this method"); + // FIXME: Handle volatility etc. + const llvm::Type *SrcTy = Src.getScalarVal()->getType(); + const llvm::PointerType *DstPtr = cast<llvm::PointerType>(DstAddr->getType()); + const llvm::Type *AddrTy = DstPtr->getElementType(); + unsigned AS = DstPtr->getAddressSpace(); + + if (AddrTy != SrcTy) + DstAddr = Builder.CreateBitCast(DstAddr, + llvm::PointerType::get(SrcTy, AS), + "storetmp"); + Builder.CreateStore(Src.getScalarVal(), DstAddr); +} + +void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, + QualType Ty) { + unsigned short StartBit = Dst.getBitfieldStartBit(); + unsigned short BitfieldSize = Dst.getBitfieldSize(); + llvm::Value *Ptr = Dst.getBitfieldAddr(); + const llvm::Type *EltTy = + cast<llvm::PointerType>(Ptr->getType())->getElementType(); + unsigned EltTySize = EltTy->getPrimitiveSizeInBits(); + + llvm::Value *NewVal = Src.getScalarVal(); + llvm::Value *OldVal = Builder.CreateLoad(Ptr, "tmp"); + + llvm::Value *ShAmt = llvm::ConstantInt::get(EltTy, StartBit); + NewVal = Builder.CreateShl(NewVal, ShAmt, "tmp"); + + llvm::Constant *Mask = llvm::ConstantInt::get( + llvm::APInt::getBitsSet(EltTySize, StartBit, + StartBit + BitfieldSize)); + + // Mask out any bits that shouldn't be set in the result. + NewVal = Builder.CreateAnd(NewVal, Mask, "tmp"); + + // Next, mask out the bits this bit-field should include from the old value. + Mask = llvm::ConstantExpr::getNot(Mask); + OldVal = Builder.CreateAnd(OldVal, Mask, "tmp"); + + // Finally, merge the two together and store it. + NewVal = Builder.CreateOr(OldVal, NewVal, "tmp"); + + Builder.CreateStore(NewVal, Ptr); +} + +void CodeGenFunction::EmitStoreThroughOCUComponentLValue(RValue Src, LValue Dst, + QualType Ty) { + // This access turns into a read/modify/write of the vector. Load the input + // value now. + llvm::Value *Vec = Builder.CreateLoad(Dst.getOCUVectorAddr(), "tmp"); + // FIXME: Volatility. + unsigned EncFields = Dst.getOCUVectorElts(); + + llvm::Value *SrcVal = Src.getScalarVal(); + + if (const VectorType *VTy = Ty->getAsVectorType()) { + unsigned NumSrcElts = VTy->getNumElements(); + + // Extract/Insert each element. + for (unsigned i = 0; i != NumSrcElts; ++i) { + llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + Elt = Builder.CreateExtractElement(SrcVal, Elt, "tmp"); + + unsigned Idx = OCUVectorElementExpr::getAccessedFieldNo(i, EncFields); + llvm::Value *OutIdx = llvm::ConstantInt::get(llvm::Type::Int32Ty, Idx); + Vec = Builder.CreateInsertElement(Vec, Elt, OutIdx, "tmp"); + } + } else { + // If the Src is a scalar (not a vector) it must be updating one element. + unsigned InIdx = OCUVectorElementExpr::getAccessedFieldNo(0, EncFields); + llvm::Value *Elt = llvm::ConstantInt::get(llvm::Type::Int32Ty, InIdx); + Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); + } + + Builder.CreateStore(Vec, Dst.getOCUVectorAddr()); +} + + +LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) { + const ValueDecl *D = E->getDecl(); + if (isa<BlockVarDecl>(D) || isa<ParmVarDecl>(D)) { + const VarDecl *VD = cast<VarDecl>(D); + if (VD->getStorageClass() == VarDecl::Extern) + return LValue::MakeAddr(CGM.GetAddrOfGlobalVar(VD, false)); + else { + llvm::Value *V = LocalDeclMap[D]; + assert(V && "BlockVarDecl not entered in LocalDeclMap?"); + return LValue::MakeAddr(V); + } + } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + return LValue::MakeAddr(CGM.GetAddrOfFunctionDecl(FD, false)); + } else if (const FileVarDecl *FVD = dyn_cast<FileVarDecl>(D)) { + return LValue::MakeAddr(CGM.GetAddrOfGlobalVar(FVD, false)); + } + assert(0 && "Unimp declref"); + //an invalid LValue, but the assert will + //ensure that this point is never reached. + return LValue(); +} + +LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) { + // __extension__ doesn't affect lvalue-ness. + if (E->getOpcode() == UnaryOperator::Extension) + return EmitLValue(E->getSubExpr()); + + switch (E->getOpcode()) { + default: assert(0 && "Unknown unary operator lvalue!"); + case UnaryOperator::Deref: + return LValue::MakeAddr(EmitScalarExpr(E->getSubExpr())); + case UnaryOperator::Real: + case UnaryOperator::Imag: + LValue LV = EmitLValue(E->getSubExpr()); + + llvm::Constant *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + llvm::Constant *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, + E->getOpcode() == UnaryOperator::Imag); + llvm::Value *Ops[] = {Zero, Idx}; + return LValue::MakeAddr(Builder.CreateGEP(LV.getAddress(), Ops, Ops+2, + "idx")); + } +} + +LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) { + assert(!E->isWide() && "FIXME: Wide strings not supported yet!"); + const char *StrData = E->getStrData(); + unsigned Len = E->getByteLength(); + std::string StringLiteral(StrData, StrData+Len); + return LValue::MakeAddr(CGM.GetAddrOfConstantString(StringLiteral)); +} + +LValue CodeGenFunction::EmitPreDefinedLValue(const PreDefinedExpr *E) { + std::string FunctionName(CurFuncDecl->getName()); + std::string GlobalVarName; + + switch (E->getIdentType()) { + default: + assert(0 && "unknown pre-defined ident type"); + case PreDefinedExpr::Func: + GlobalVarName = "__func__."; + break; + case PreDefinedExpr::Function: + GlobalVarName = "__FUNCTION__."; + break; + case PreDefinedExpr::PrettyFunction: + // FIXME:: Demangle C++ method names + GlobalVarName = "__PRETTY_FUNCTION__."; + break; + } + + GlobalVarName += CurFuncDecl->getName(); + + // FIXME: Can cache/reuse these within the module. + llvm::Constant *C=llvm::ConstantArray::get(FunctionName); + + // Create a global variable for this. + C = new llvm::GlobalVariable(C->getType(), true, + llvm::GlobalValue::InternalLinkage, + C, GlobalVarName, CurFn->getParent()); + return LValue::MakeAddr(C); +} + +LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { + // The index must always be an integer, which is not an aggregate. Emit it. + llvm::Value *Idx = EmitScalarExpr(E->getIdx()); + + // If the base is a vector type, then we are forming a vector element lvalue + // with this subscript. + if (E->getLHS()->getType()->isVectorType()) { + // Emit the vector as an lvalue to get its address. + LValue LHS = EmitLValue(E->getLHS()); + assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); + // FIXME: This should properly sign/zero/extend or truncate Idx to i32. + return LValue::MakeVectorElt(LHS.getAddress(), Idx); + } + + // The base must be a pointer, which is not an aggregate. Emit it. + llvm::Value *Base = EmitScalarExpr(E->getBase()); + + // Extend or truncate the index type to 32 or 64-bits. + QualType IdxTy = E->getIdx()->getType(); + bool IdxSigned = IdxTy->isSignedIntegerType(); + unsigned IdxBitwidth = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); + if (IdxBitwidth != LLVMPointerWidth) + Idx = Builder.CreateIntCast(Idx, llvm::IntegerType::get(LLVMPointerWidth), + IdxSigned, "idxprom"); + + // We know that the pointer points to a type of the correct size, unless the + // size is a VLA. + if (!E->getType()->isConstantSizeType()) + assert(0 && "VLA idx not implemented"); + return LValue::MakeAddr(Builder.CreateGEP(Base, Idx, "arrayidx")); +} + +LValue CodeGenFunction:: +EmitOCUVectorElementExpr(const OCUVectorElementExpr *E) { + // Emit the base vector as an l-value. + LValue Base = EmitLValue(E->getBase()); + assert(Base.isSimple() && "Can only subscript lvalue vectors here!"); + + return LValue::MakeOCUVectorElt(Base.getAddress(), + E->getEncodedElementAccess()); +} + +LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) { + bool isUnion = false; + Expr *BaseExpr = E->getBase(); + llvm::Value *BaseValue = NULL; + + // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar. + if (E->isArrow()) { + BaseValue = EmitScalarExpr(BaseExpr); + const PointerType *PTy = + cast<PointerType>(BaseExpr->getType().getCanonicalType()); + if (PTy->getPointeeType()->isUnionType()) + isUnion = true; + } + else { + LValue BaseLV = EmitLValue(BaseExpr); + // FIXME: this isn't right for bitfields. + BaseValue = BaseLV.getAddress(); + if (BaseExpr->getType()->isUnionType()) + isUnion = true; + } + + FieldDecl *Field = E->getMemberDecl(); + return EmitLValueForField(BaseValue, Field, isUnion); +} + +LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue, + FieldDecl* Field, + bool isUnion) +{ + llvm::Value *V; + unsigned idx = CGM.getTypes().getLLVMFieldNo(Field); + + if (Field->isBitField()) { + const llvm::Type * FieldTy = ConvertType(Field->getType()); + const llvm::PointerType * BaseTy = + cast<llvm::PointerType>(BaseValue->getType()); + unsigned AS = BaseTy->getAddressSpace(); + BaseValue = Builder.CreateBitCast(BaseValue, + llvm::PointerType::get(FieldTy, AS), + "tmp"); + V = Builder.CreateGEP(BaseValue, + llvm::ConstantInt::get(llvm::Type::Int32Ty, idx), + "tmp"); + } else { + llvm::Value *Idxs[2] = { llvm::Constant::getNullValue(llvm::Type::Int32Ty), + llvm::ConstantInt::get(llvm::Type::Int32Ty, idx) }; + V = Builder.CreateGEP(BaseValue,Idxs, Idxs + 2, "tmp"); + } + // Match union field type. + if (isUnion) { + const llvm::Type * FieldTy = ConvertType(Field->getType()); + const llvm::PointerType * BaseTy = + cast<llvm::PointerType>(BaseValue->getType()); + if (FieldTy != BaseTy->getElementType()) { + unsigned AS = BaseTy->getAddressSpace(); + V = Builder.CreateBitCast(V, + llvm::PointerType::get(FieldTy, AS), + "tmp"); + } + } + + if (Field->isBitField()) { + CodeGenTypes::BitFieldInfo bitFieldInfo = + CGM.getTypes().getBitFieldInfo(Field); + return LValue::MakeBitfield(V, bitFieldInfo.Begin, bitFieldInfo.Size, + Field->getType()->isSignedIntegerType()); + } else + return LValue::MakeAddr(V); +} + +//===--------------------------------------------------------------------===// +// Expression Emission +//===--------------------------------------------------------------------===// + + +RValue CodeGenFunction::EmitCallExpr(const CallExpr *E) { + if (const ImplicitCastExpr *IcExpr = + dyn_cast<const ImplicitCastExpr>(E->getCallee())) + if (const DeclRefExpr *DRExpr = + dyn_cast<const DeclRefExpr>(IcExpr->getSubExpr())) + if (const FunctionDecl *FDecl = + dyn_cast<const FunctionDecl>(DRExpr->getDecl())) + if (unsigned builtinID = FDecl->getIdentifier()->getBuiltinID()) + return EmitBuiltinExpr(builtinID, E); + + llvm::Value *Callee = EmitScalarExpr(E->getCallee()); + return EmitCallExpr(Callee, E->getCallee()->getType(), + E->arg_begin(), E->getNumArgs()); +} + +RValue CodeGenFunction::EmitCallExpr(Expr *FnExpr, Expr *const *Args, + unsigned NumArgs) { + llvm::Value *Callee = EmitScalarExpr(FnExpr); + return EmitCallExpr(Callee, FnExpr->getType(), Args, NumArgs); +} + +LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) { + // Can only get l-value for call expression returning aggregate type + RValue RV = EmitCallExpr(E); + return LValue::MakeAddr(RV.getAggregateAddr()); +} + +RValue CodeGenFunction::EmitCallExpr(llvm::Value *Callee, QualType FnType, + Expr *const *ArgExprs, unsigned NumArgs) { + // The callee type will always be a pointer to function type, get the function + // type. + FnType = cast<PointerType>(FnType.getCanonicalType())->getPointeeType(); + QualType ResultType = cast<FunctionType>(FnType)->getResultType(); + + llvm::SmallVector<llvm::Value*, 16> Args; + + // Handle struct-return functions by passing a pointer to the location that + // we would like to return into. + if (hasAggregateLLVMType(ResultType)) { + // Create a temporary alloca to hold the result of the call. :( + Args.push_back(CreateTempAlloca(ConvertType(ResultType))); + // FIXME: set the stret attribute on the argument. + } + + for (unsigned i = 0, e = NumArgs; i != e; ++i) { + QualType ArgTy = ArgExprs[i]->getType(); + + if (!hasAggregateLLVMType(ArgTy)) { + // Scalar argument is passed by-value. + Args.push_back(EmitScalarExpr(ArgExprs[i])); + } else if (ArgTy->isComplexType()) { + // Make a temporary alloca to pass the argument. + llvm::Value *DestMem = CreateTempAlloca(ConvertType(ArgTy)); + EmitComplexExprIntoAddr(ArgExprs[i], DestMem, false); + Args.push_back(DestMem); + } else { + llvm::Value *DestMem = CreateTempAlloca(ConvertType(ArgTy)); + EmitAggExpr(ArgExprs[i], DestMem, false); + Args.push_back(DestMem); + } + } + + llvm::CallInst *CI = Builder.CreateCall(Callee,&Args[0],&Args[0]+Args.size()); + if (const llvm::Function *F = dyn_cast<llvm::Function>(Callee)) + CI->setCallingConv(F->getCallingConv()); + if (CI->getType() != llvm::Type::VoidTy) + CI->setName("call"); + else if (ResultType->isComplexType()) + return RValue::getComplex(LoadComplexFromAddr(Args[0], false)); + else if (hasAggregateLLVMType(ResultType)) + // Struct return. + return RValue::getAggregate(Args[0]); + else { + // void return. + assert(ResultType->isVoidType() && "Should only have a void expr here"); + CI = 0; + } + + return RValue::get(CI); +} diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp new file mode 100644 index 00000000000..325ac2109eb --- /dev/null +++ b/clang/lib/CodeGen/CGExprAgg.cpp @@ -0,0 +1,337 @@ +//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Aggregate Expr nodes as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/AST.h" +#include "llvm/Constants.h" +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Support/Compiler.h" +using namespace clang; +using namespace CodeGen; + +//===----------------------------------------------------------------------===// +// Aggregate Expression Emitter +//===----------------------------------------------------------------------===// + +namespace { +class VISIBILITY_HIDDEN AggExprEmitter : public StmtVisitor<AggExprEmitter> { + CodeGenFunction &CGF; + llvm::LLVMFoldingBuilder &Builder; + llvm::Value *DestPtr; + bool VolatileDest; +public: + AggExprEmitter(CodeGenFunction &cgf, llvm::Value *destPtr, bool volatileDest) + : CGF(cgf), Builder(CGF.Builder), + DestPtr(destPtr), VolatileDest(volatileDest) { + } + + //===--------------------------------------------------------------------===// + // Utilities + //===--------------------------------------------------------------------===// + + /// EmitAggLoadOfLValue - Given an expression with aggregate type that + /// represents a value lvalue, this method emits the address of the lvalue, + /// then loads the result into DestPtr. + void EmitAggLoadOfLValue(const Expr *E); + + void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr, + QualType EltTy); + + void EmitAggregateClear(llvm::Value *DestPtr, QualType Ty); + + void EmitNonConstInit(InitListExpr *E); + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + void VisitStmt(Stmt *S) { + CGF.WarnUnsupported(S, "aggregate expression"); + } + void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); } + + // l-values. + void VisitDeclRefExpr(DeclRefExpr *DRE) { EmitAggLoadOfLValue(DRE); } + void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); } + void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); } + void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); } + + void VisitArraySubscriptExpr(ArraySubscriptExpr *E) { + EmitAggLoadOfLValue(E); + } + + // Operators. + // case Expr::UnaryOperatorClass: + // case Expr::CastExprClass: + void VisitImplicitCastExpr(ImplicitCastExpr *E); + void VisitCallExpr(const CallExpr *E); + void VisitStmtExpr(const StmtExpr *E); + void VisitBinaryOperator(const BinaryOperator *BO); + void VisitBinAssign(const BinaryOperator *E); + void VisitOverloadExpr(const OverloadExpr *E); + + + void VisitConditionalOperator(const ConditionalOperator *CO); + void VisitInitListExpr(InitListExpr *E); + // case Expr::ChooseExprClass: + +}; +} // end anonymous namespace. + +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// + +void AggExprEmitter::EmitAggregateClear(llvm::Value *DestPtr, QualType Ty) { + assert(!Ty->isComplexType() && "Shouldn't happen for complex"); + + // Aggregate assignment turns into llvm.memset. + const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + if (DestPtr->getType() != BP) + DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp"); + + // Get size and alignment info for this aggregate. + std::pair<uint64_t, unsigned> TypeInfo = CGF.getContext().getTypeInfo(Ty); + + // FIXME: Handle variable sized types. + const llvm::Type *IntPtr = llvm::IntegerType::get(CGF.LLVMPointerWidth); + + llvm::Value *MemSetOps[4] = { + DestPtr, + llvm::ConstantInt::getNullValue(llvm::Type::Int8Ty), + // TypeInfo.first describes size in bits. + llvm::ConstantInt::get(IntPtr, TypeInfo.first/8), + llvm::ConstantInt::get(llvm::Type::Int32Ty, TypeInfo.second/8) + }; + + Builder.CreateCall(CGF.CGM.getMemSetFn(), MemSetOps, MemSetOps+4); +} + +void AggExprEmitter::EmitAggregateCopy(llvm::Value *DestPtr, + llvm::Value *SrcPtr, QualType Ty) { + assert(!Ty->isComplexType() && "Shouldn't happen for complex"); + + // Aggregate assignment turns into llvm.memcpy. + const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + if (DestPtr->getType() != BP) + DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp"); + if (SrcPtr->getType() != BP) + SrcPtr = Builder.CreateBitCast(SrcPtr, BP, "tmp"); + + // Get size and alignment info for this aggregate. + std::pair<uint64_t, unsigned> TypeInfo = CGF.getContext().getTypeInfo(Ty); + + // FIXME: Handle variable sized types. + const llvm::Type *IntPtr = llvm::IntegerType::get(CGF.LLVMPointerWidth); + + llvm::Value *MemCpyOps[4] = { + DestPtr, SrcPtr, + // TypeInfo.first describes size in bits. + llvm::ConstantInt::get(IntPtr, TypeInfo.first/8), + llvm::ConstantInt::get(llvm::Type::Int32Ty, TypeInfo.second/8) + }; + + Builder.CreateCall(CGF.CGM.getMemCpyFn(), MemCpyOps, MemCpyOps+4); +} + + +/// EmitAggLoadOfLValue - Given an expression with aggregate type that +/// represents a value lvalue, this method emits the address of the lvalue, +/// then loads the result into DestPtr. +void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) { + LValue LV = CGF.EmitLValue(E); + assert(LV.isSimple() && "Can't have aggregate bitfield, vector, etc"); + llvm::Value *SrcPtr = LV.getAddress(); + + // If the result is ignored, don't copy from the value. + if (DestPtr == 0) + // FIXME: If the source is volatile, we must read from it. + return; + + EmitAggregateCopy(DestPtr, SrcPtr, E->getType()); +} + +//===----------------------------------------------------------------------===// +// Visitor Methods +//===----------------------------------------------------------------------===// + +void AggExprEmitter::VisitImplicitCastExpr(ImplicitCastExpr *E) +{ + QualType STy = E->getSubExpr()->getType().getCanonicalType(); + QualType Ty = E->getType().getCanonicalType(); + + assert(CGF.getContext().typesAreCompatible( + STy.getUnqualifiedType(), Ty.getUnqualifiedType()) + && "Implicit cast types must be compatible"); + + Visit(E->getSubExpr()); +} + +void AggExprEmitter::VisitCallExpr(const CallExpr *E) +{ + RValue RV = CGF.EmitCallExpr(E); + assert(RV.isAggregate() && "Return value must be aggregate value!"); + + // If the result is ignored, don't copy from the value. + if (DestPtr == 0) + // FIXME: If the source is volatile, we must read from it. + return; + + EmitAggregateCopy(DestPtr, RV.getAggregateAddr(), E->getType()); +} + +void AggExprEmitter::VisitOverloadExpr(const OverloadExpr *E) +{ + RValue RV = CGF.EmitCallExpr(E->getFn(), E->arg_begin(), + E->getNumArgs(CGF.getContext())); + assert(RV.isAggregate() && "Return value must be aggregate value!"); + + // If the result is ignored, don't copy from the value. + if (DestPtr == 0) + // FIXME: If the source is volatile, we must read from it. + return; + + EmitAggregateCopy(DestPtr, RV.getAggregateAddr(), E->getType()); +} + +void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) { + CGF.EmitCompoundStmt(*E->getSubStmt(), true, DestPtr, VolatileDest); +} + +void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) { + CGF.WarnUnsupported(E, "aggregate binary expression"); +} + +void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) { + // For an assignment to work, the value on the right has + // to be compatible with the value on the left. + assert(CGF.getContext().typesAreCompatible( + E->getLHS()->getType().getUnqualifiedType(), + E->getRHS()->getType().getUnqualifiedType()) + && "Invalid assignment"); + LValue LHS = CGF.EmitLValue(E->getLHS()); + + // Codegen the RHS so that it stores directly into the LHS. + CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), false /*FIXME: VOLATILE LHS*/); + + if (DestPtr == 0) + return; + + // If the result of the assignment is used, copy the RHS there also. + EmitAggregateCopy(DestPtr, LHS.getAddress(), E->getType()); +} + +void AggExprEmitter::VisitConditionalOperator(const ConditionalOperator *E) { + llvm::BasicBlock *LHSBlock = new llvm::BasicBlock("cond.?"); + llvm::BasicBlock *RHSBlock = new llvm::BasicBlock("cond.:"); + llvm::BasicBlock *ContBlock = new llvm::BasicBlock("cond.cont"); + + llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond()); + Builder.CreateCondBr(Cond, LHSBlock, RHSBlock); + + CGF.EmitBlock(LHSBlock); + + // Handle the GNU extension for missing LHS. + assert(E->getLHS() && "Must have LHS for aggregate value"); + + Visit(E->getLHS()); + Builder.CreateBr(ContBlock); + LHSBlock = Builder.GetInsertBlock(); + + CGF.EmitBlock(RHSBlock); + + Visit(E->getRHS()); + Builder.CreateBr(ContBlock); + RHSBlock = Builder.GetInsertBlock(); + + CGF.EmitBlock(ContBlock); +} + +void AggExprEmitter::EmitNonConstInit(InitListExpr *E) { + + const llvm::PointerType *APType = + cast<llvm::PointerType>(DestPtr->getType()); + const llvm::Type *DestType = APType->getElementType(); + + if (const llvm::ArrayType *AType = dyn_cast<llvm::ArrayType>(DestType)) { + unsigned NumInitElements = E->getNumInits(); + + llvm::Value *Idxs[] = { + llvm::Constant::getNullValue(llvm::Type::Int32Ty), + NULL + }; + llvm::Value *NextVal = NULL; + unsigned i; + for (i = 0; i != NumInitElements; ++i) { + Idxs[1] = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + NextVal = Builder.CreateGEP(DestPtr, Idxs, Idxs + 2,".array"); + Expr *Init = E->getInit(i); + if (isa<InitListExpr>(Init)) + CGF.EmitAggExpr(Init, NextVal, VolatileDest); + else + Builder.CreateStore(CGF.EmitScalarExpr(Init), NextVal); + } + + // Emit remaining default initializers + unsigned NumArrayElements = AType->getNumElements(); + QualType QType = E->getInit(0)->getType(); + const llvm::Type *EType = AType->getElementType(); + for (/*Do not initialize i*/; i < NumArrayElements; ++i) { + Idxs[1] = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + NextVal = Builder.CreateGEP(DestPtr, Idxs, Idxs + 2,".array"); + if (EType->isFirstClassType()) + Builder.CreateStore(llvm::Constant::getNullValue(EType), NextVal); + else + EmitAggregateClear(NextVal, QType); + } + } else + assert(false && "Invalid initializer"); +} + +void AggExprEmitter::VisitInitListExpr(InitListExpr *E) { + + if (E->isConstantExpr(CGF.CGM.getContext(), NULL)) { + llvm::Constant *V = CGF.CGM.EmitConstantExpr(E); + // Create global value to hold this array. + V = new llvm::GlobalVariable(V->getType(), true, + llvm::GlobalValue::InternalLinkage, + V, ".array", + &CGF.CGM.getModule()); + + EmitAggregateCopy(DestPtr, V , E->getType()); + return; + } else { + if (!E->getType()->isArrayType()) { + CGF.WarnUnsupported(E, "aggregate init-list expression"); + return; + } + EmitNonConstInit(E); + } +} + +//===----------------------------------------------------------------------===// +// Entry Points into this File +//===----------------------------------------------------------------------===// + +/// EmitAggExpr - Emit the computation of the specified expression of +/// aggregate type. The result is computed into DestPtr. Note that if +/// DestPtr is null, the value of the aggregate expression is not needed. +void CodeGenFunction::EmitAggExpr(const Expr *E, llvm::Value *DestPtr, + bool VolatileDest) { + assert(E && hasAggregateLLVMType(E->getType()) && + "Invalid aggregate expression to emit"); + + AggExprEmitter(*this, DestPtr, VolatileDest).Visit(const_cast<Expr*>(E)); +} diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp new file mode 100644 index 00000000000..b1de93570d9 --- /dev/null +++ b/clang/lib/CodeGen/CGExprComplex.cpp @@ -0,0 +1,542 @@ +//===--- CGExprComplex.cpp - Emit LLVM Code for Complex Exprs -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Expr nodes with complex types as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/AST.h" +#include "llvm/Constants.h" +#include "llvm/Function.h" +#include "llvm/ADT/SmallString.h" +#include "llvm/Support/Compiler.h" +using namespace clang; +using namespace CodeGen; + +//===----------------------------------------------------------------------===// +// Complex Expression Emitter +//===----------------------------------------------------------------------===// + +typedef CodeGenFunction::ComplexPairTy ComplexPairTy; + +namespace { +class VISIBILITY_HIDDEN ComplexExprEmitter + : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> { + CodeGenFunction &CGF; + llvm::LLVMFoldingBuilder &Builder; +public: + ComplexExprEmitter(CodeGenFunction &cgf) : CGF(cgf), Builder(CGF.Builder) { + } + + + //===--------------------------------------------------------------------===// + // Utilities + //===--------------------------------------------------------------------===// + + /// EmitLoadOfLValue - Given an expression with complex type that represents a + /// value l-value, this method emits the address of the l-value, then loads + /// and returns the result. + ComplexPairTy EmitLoadOfLValue(const Expr *E) { + LValue LV = CGF.EmitLValue(E); + // FIXME: Volatile + return EmitLoadOfComplex(LV.getAddress(), false); + } + + /// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load + /// the real and imaginary pieces. + ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile); + + /// EmitStoreOfComplex - Store the specified real/imag parts into the + /// specified value pointer. + void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol); + + /// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType. + ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType, + QualType DestType); + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + ComplexPairTy VisitStmt(Stmt *S) { + S->dump(CGF.getContext().getSourceManager()); + assert(0 && "Stmt can't have complex result type!"); + return ComplexPairTy(); + } + ComplexPairTy VisitExpr(Expr *S); + ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());} + ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL); + + // l-values. + ComplexPairTy VisitDeclRefExpr(const Expr *E) { return EmitLoadOfLValue(E); } + ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); } + ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); } + + // FIXME: CompoundLiteralExpr + + ComplexPairTy EmitCast(Expr *Op, QualType DestTy); + ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) { + // Unlike for scalars, we don't have to worry about function->ptr demotion + // here. + return EmitCast(E->getSubExpr(), E->getType()); + } + ComplexPairTy VisitCastExpr(CastExpr *E) { + return EmitCast(E->getSubExpr(), E->getType()); + } + ComplexPairTy VisitCallExpr(const CallExpr *E); + ComplexPairTy VisitStmtExpr(const StmtExpr *E); + ComplexPairTy VisitOverloadExpr(const OverloadExpr *OE); + + // Operators. + ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E, + bool isInc, bool isPre); + ComplexPairTy VisitUnaryPostDec(const UnaryOperator *E) { + return VisitPrePostIncDec(E, false, false); + } + ComplexPairTy VisitUnaryPostInc(const UnaryOperator *E) { + return VisitPrePostIncDec(E, true, false); + } + ComplexPairTy VisitUnaryPreDec(const UnaryOperator *E) { + return VisitPrePostIncDec(E, false, true); + } + ComplexPairTy VisitUnaryPreInc(const UnaryOperator *E) { + return VisitPrePostIncDec(E, true, true); + } + ComplexPairTy VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); } + ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) { + return Visit(E->getSubExpr()); + } + ComplexPairTy VisitUnaryMinus (const UnaryOperator *E); + ComplexPairTy VisitUnaryNot (const UnaryOperator *E); + // LNot,SizeOf,AlignOf,Real,Imag never return complex. + ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) { + return Visit(E->getSubExpr()); + } + + struct BinOpInfo { + ComplexPairTy LHS; + ComplexPairTy RHS; + QualType Ty; // Computation Type. + }; + + BinOpInfo EmitBinOps(const BinaryOperator *E); + ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E, + ComplexPairTy (ComplexExprEmitter::*Func) + (const BinOpInfo &)); + + ComplexPairTy EmitBinAdd(const BinOpInfo &Op); + ComplexPairTy EmitBinSub(const BinOpInfo &Op); + ComplexPairTy EmitBinMul(const BinOpInfo &Op); + ComplexPairTy EmitBinDiv(const BinOpInfo &Op); + + ComplexPairTy VisitBinMul(const BinaryOperator *E) { + return EmitBinMul(EmitBinOps(E)); + } + ComplexPairTy VisitBinAdd(const BinaryOperator *E) { + return EmitBinAdd(EmitBinOps(E)); + } + ComplexPairTy VisitBinSub(const BinaryOperator *E) { + return EmitBinSub(EmitBinOps(E)); + } + ComplexPairTy VisitBinDiv(const BinaryOperator *E) { + return EmitBinDiv(EmitBinOps(E)); + } + + // Compound assignments. + ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) { + return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd); + } + ComplexPairTy VisitBinSubAssign(const CompoundAssignOperator *E) { + return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinSub); + } + ComplexPairTy VisitBinMulAssign(const CompoundAssignOperator *E) { + return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinMul); + } + ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) { + return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv); + } + + // GCC rejects rem/and/or/xor for integer complex. + // Logical and/or always return int, never complex. + + // No comparisons produce a complex result. + ComplexPairTy VisitBinAssign (const BinaryOperator *E); + ComplexPairTy VisitBinComma (const BinaryOperator *E); + + + ComplexPairTy VisitConditionalOperator(const ConditionalOperator *CO); + ComplexPairTy VisitChooseExpr(ChooseExpr *CE); +}; +} // end anonymous namespace. + +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// + +/// EmitLoadOfComplex - Given an RValue reference for a complex, emit code to +/// load the real and imaginary pieces, returning them as Real/Imag. +ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr, + bool isVolatile) { + llvm::Constant *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + llvm::Constant *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1); + + llvm::SmallString<64> Name(SrcPtr->getNameStart(), + SrcPtr->getNameStart()+SrcPtr->getNameLen()); + + Name += ".realp"; + llvm::Value *Ops[] = {Zero, Zero}; + llvm::Value *RealPtr = Builder.CreateGEP(SrcPtr, Ops, Ops+2, Name.c_str()); + + Name.pop_back(); // .realp -> .real + llvm::Value *Real = Builder.CreateLoad(RealPtr, isVolatile, Name.c_str()); + + Name.resize(Name.size()-4); // .real -> .imagp + Name += "imagp"; + + Ops[1] = One; // { Ops = { Zero, One } + llvm::Value *ImagPtr = Builder.CreateGEP(SrcPtr, Ops, Ops+2, Name.c_str()); + + Name.pop_back(); // .imagp -> .imag + llvm::Value *Imag = Builder.CreateLoad(ImagPtr, isVolatile, Name.c_str()); + return ComplexPairTy(Real, Imag); +} + +/// EmitStoreOfComplex - Store the specified real/imag parts into the +/// specified value pointer. +void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr, + bool isVolatile) { + llvm::Constant *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + llvm::Constant *One = llvm::ConstantInt::get(llvm::Type::Int32Ty, 1); + + llvm::Value *Ops[] = {Zero, Zero}; + llvm::Value *RealPtr = Builder.CreateGEP(Ptr, Ops, Ops+2, "real"); + + Ops[1] = One; // { Ops = { Zero, One } + llvm::Value *ImagPtr = Builder.CreateGEP(Ptr, Ops, Ops+2, "imag"); + + Builder.CreateStore(Val.first, RealPtr, isVolatile); + Builder.CreateStore(Val.second, ImagPtr, isVolatile); +} + + + +//===----------------------------------------------------------------------===// +// Visitor Methods +//===----------------------------------------------------------------------===// + +ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) { + CGF.WarnUnsupported(E, "complex expression"); + const llvm::Type *EltTy = + CGF.ConvertType(E->getType()->getAsComplexType()->getElementType()); + llvm::Value *U = llvm::UndefValue::get(EltTy); + return ComplexPairTy(U, U); +} + +ComplexPairTy ComplexExprEmitter:: +VisitImaginaryLiteral(const ImaginaryLiteral *IL) { + llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr()); + return ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag); +} + + +ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) { + return CGF.EmitCallExpr(E).getComplexVal(); +} + +ComplexPairTy ComplexExprEmitter::VisitOverloadExpr(const OverloadExpr *E) { + return CGF.EmitCallExpr(E->getFn(), E->arg_begin(), + E->getNumArgs(CGF.getContext())).getComplexVal(); +} + +ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) { + return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getComplexVal(); +} + +/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType. +ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val, + QualType SrcType, + QualType DestType) { + // Get the src/dest element type. + SrcType = cast<ComplexType>(SrcType.getCanonicalType())->getElementType(); + DestType = cast<ComplexType>(DestType.getCanonicalType())->getElementType(); + + // C99 6.3.1.6: When a value of complextype is converted to another + // complex type, both the real and imaginary parts followthe conversion + // rules for the corresponding real types. + Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType); + Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType); + return Val; +} + +ComplexPairTy ComplexExprEmitter::EmitCast(Expr *Op, QualType DestTy) { + // Two cases here: cast from (complex to complex) and (scalar to complex). + if (Op->getType()->isComplexType()) + return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy); + + // C99 6.3.1.7: When a value of real type is converted to a complex type, the + // real part of the complex result value is determined by the rules of + // conversion to the corresponding real type and the imaginary part of the + // complex result value is a positive zero or an unsigned zero. + llvm::Value *Elt = CGF.EmitScalarExpr(Op); + + // Convert the input element to the element type of the complex. + DestTy = cast<ComplexType>(DestTy.getCanonicalType())->getElementType(); + Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy); + + // Return (realval, 0). + return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType())); +} + +ComplexPairTy ComplexExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, + bool isInc, bool isPre) { + LValue LV = CGF.EmitLValue(E->getSubExpr()); + // FIXME: Handle volatile! + ComplexPairTy InVal = EmitLoadOfComplex(LV.getAddress(), false); + + uint64_t AmountVal = isInc ? 1 : -1; + + llvm::Value *NextVal; + if (isa<llvm::IntegerType>(InVal.first->getType())) + NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal); + else if (InVal.first->getType() == llvm::Type::FloatTy) + // FIXME: Handle long double. + NextVal = + llvm::ConstantFP::get(InVal.first->getType(), + llvm::APFloat(static_cast<float>(AmountVal))); + else { + // FIXME: Handle long double. + assert(InVal.first->getType() == llvm::Type::DoubleTy); + NextVal = + llvm::ConstantFP::get(InVal.first->getType(), + llvm::APFloat(static_cast<double>(AmountVal))); + } + + // Add the inc/dec to the real part. + NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec"); + + ComplexPairTy IncVal(NextVal, InVal.second); + + // Store the updated result through the lvalue. + EmitStoreOfComplex(IncVal, LV.getAddress(), false); /* FIXME: Volatile */ + + // If this is a postinc, return the value read from memory, otherwise use the + // updated value. + return isPre ? IncVal : InVal; +} + +ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { + ComplexPairTy Op = Visit(E->getSubExpr()); + llvm::Value *ResR = Builder.CreateNeg(Op.first, "neg.r"); + llvm::Value *ResI = Builder.CreateNeg(Op.second, "neg.i"); + return ComplexPairTy(ResR, ResI); +} + +ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) { + // ~(a+ib) = a + i*-b + ComplexPairTy Op = Visit(E->getSubExpr()); + llvm::Value *ResI = Builder.CreateNeg(Op.second, "conj.i"); + return ComplexPairTy(Op.first, ResI); +} + +ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) { + llvm::Value *ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r"); + llvm::Value *ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i"); + return ComplexPairTy(ResR, ResI); +} + +ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) { + llvm::Value *ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r"); + llvm::Value *ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i"); + return ComplexPairTy(ResR, ResI); +} + + +ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) { + llvm::Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl"); + llvm::Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr"); + llvm::Value *ResR = Builder.CreateSub(ResRl, ResRr, "mul.r"); + + llvm::Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il"); + llvm::Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir"); + llvm::Value *ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i"); + return ComplexPairTy(ResR, ResI); +} + +ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) { + llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second; + llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second; + + // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd)) + llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr, "tmp"); // a*c + llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi, "tmp"); // b*d + llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2, "tmp"); // ac+bd + + llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr, "tmp"); // c*c + llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi, "tmp"); // d*d + llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5, "tmp"); // cc+dd + + llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr, "tmp"); // b*c + llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi, "tmp"); // a*d + llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8, "tmp"); // bc-ad + + llvm::Value *DSTr, *DSTi; + if (Tmp3->getType()->isFloatingPoint()) { + DSTr = Builder.CreateFDiv(Tmp3, Tmp6, "tmp"); + DSTi = Builder.CreateFDiv(Tmp9, Tmp6, "tmp"); + } else { + if (Op.Ty->getAsComplexType()->getElementType()->isUnsignedIntegerType()) { + DSTr = Builder.CreateUDiv(Tmp3, Tmp6, "tmp"); + DSTi = Builder.CreateUDiv(Tmp9, Tmp6, "tmp"); + } else { + DSTr = Builder.CreateSDiv(Tmp3, Tmp6, "tmp"); + DSTi = Builder.CreateSDiv(Tmp9, Tmp6, "tmp"); + } + } + + return ComplexPairTy(DSTr, DSTi); +} + +ComplexExprEmitter::BinOpInfo +ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) { + BinOpInfo Ops; + Ops.LHS = Visit(E->getLHS()); + Ops.RHS = Visit(E->getRHS()); + Ops.Ty = E->getType(); + return Ops; +} + + +// Compound assignments. +ComplexPairTy ComplexExprEmitter:: +EmitCompoundAssign(const CompoundAssignOperator *E, + ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){ + QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType(); + + // Load the LHS and RHS operands. + LValue LHSLV = CGF.EmitLValue(E->getLHS()); + + BinOpInfo OpInfo; + OpInfo.Ty = E->getComputationType(); + + // We know the LHS is a complex lvalue. + OpInfo.LHS = EmitLoadOfComplex(LHSLV.getAddress(), false);// FIXME: Volatile. + OpInfo.LHS = EmitComplexToComplexCast(OpInfo.LHS, LHSTy, OpInfo.Ty); + + // It is possible for the RHS to be complex or scalar. + OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty); + + // Expand the binary operator. + ComplexPairTy Result = (this->*Func)(OpInfo); + + // Truncate the result back to the LHS type. + Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy); + + // Store the result value into the LHS lvalue. + EmitStoreOfComplex(Result, LHSLV.getAddress(), false); // FIXME: VOLATILE + return Result; +} + +ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) { + assert(E->getLHS()->getType().getCanonicalType() == + E->getRHS()->getType().getCanonicalType() && "Invalid assignment"); + // Emit the RHS. + ComplexPairTy Val = Visit(E->getRHS()); + + // Compute the address to store into. + LValue LHS = CGF.EmitLValue(E->getLHS()); + + // Store into it. + // FIXME: Volatility! + EmitStoreOfComplex(Val, LHS.getAddress(), false); + return Val; +} + +ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) { + CGF.EmitStmt(E->getLHS()); + return Visit(E->getRHS()); +} + +ComplexPairTy ComplexExprEmitter:: +VisitConditionalOperator(const ConditionalOperator *E) { + llvm::BasicBlock *LHSBlock = new llvm::BasicBlock("cond.?"); + llvm::BasicBlock *RHSBlock = new llvm::BasicBlock("cond.:"); + llvm::BasicBlock *ContBlock = new llvm::BasicBlock("cond.cont"); + + llvm::Value *Cond = CGF.EvaluateExprAsBool(E->getCond()); + Builder.CreateCondBr(Cond, LHSBlock, RHSBlock); + + CGF.EmitBlock(LHSBlock); + + // Handle the GNU extension for missing LHS. + assert(E->getLHS() && "Must have LHS for complex value"); + + ComplexPairTy LHS = Visit(E->getLHS()); + Builder.CreateBr(ContBlock); + LHSBlock = Builder.GetInsertBlock(); + + CGF.EmitBlock(RHSBlock); + + ComplexPairTy RHS = Visit(E->getRHS()); + Builder.CreateBr(ContBlock); + RHSBlock = Builder.GetInsertBlock(); + + CGF.EmitBlock(ContBlock); + + // Create a PHI node for the real part. + llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), "cond.r"); + RealPN->reserveOperandSpace(2); + RealPN->addIncoming(LHS.first, LHSBlock); + RealPN->addIncoming(RHS.first, RHSBlock); + + // Create a PHI node for the imaginary part. + llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), "cond.i"); + ImagPN->reserveOperandSpace(2); + ImagPN->addIncoming(LHS.second, LHSBlock); + ImagPN->addIncoming(RHS.second, RHSBlock); + + return ComplexPairTy(RealPN, ImagPN); +} + +ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) { + // Emit the LHS or RHS as appropriate. + return Visit(E->isConditionTrue(CGF.getContext()) ? E->getLHS() :E->getRHS()); +} + +//===----------------------------------------------------------------------===// +// Entry Point into this File +//===----------------------------------------------------------------------===// + +/// EmitComplexExpr - Emit the computation of the specified expression of +/// complex type, ignoring the result. +ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E) { + assert(E && E->getType()->isComplexType() && + "Invalid complex expression to emit"); + + return ComplexExprEmitter(*this).Visit(const_cast<Expr*>(E)); +} + +/// EmitComplexExprIntoAddr - Emit the computation of the specified expression +/// of complex type, storing into the specified Value*. +void CodeGenFunction::EmitComplexExprIntoAddr(const Expr *E, + llvm::Value *DestAddr, + bool DestIsVolatile) { + assert(E && E->getType()->isComplexType() && + "Invalid complex expression to emit"); + ComplexExprEmitter Emitter(*this); + ComplexPairTy Val = Emitter.Visit(const_cast<Expr*>(E)); + Emitter.EmitStoreOfComplex(Val, DestAddr, DestIsVolatile); +} + +/// LoadComplexFromAddr - Load a complex number from the specified address. +ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr, + bool SrcIsVolatile) { + return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile); +} diff --git a/clang/lib/CodeGen/CGExprConstant.cpp b/clang/lib/CodeGen/CGExprConstant.cpp new file mode 100644 index 00000000000..e2405b88f37 --- /dev/null +++ b/clang/lib/CodeGen/CGExprConstant.cpp @@ -0,0 +1,627 @@ +//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Constant Expr nodes as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/AST.h" +#include "llvm/Constants.h" +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Support/Compiler.h" +using namespace clang; +using namespace CodeGen; + +namespace { +class VISIBILITY_HIDDEN ConstExprEmitter : + public StmtVisitor<ConstExprEmitter, llvm::Constant*> { + CodeGenModule &CGM; + CodeGenFunction *CGF; +public: + ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf) + : CGM(cgm), CGF(cgf) { + } + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + llvm::Constant *VisitStmt(Stmt *S) { + CGM.WarnUnsupported(S, "constant expression"); + QualType T = cast<Expr>(S)->getType(); + return llvm::UndefValue::get(CGM.getTypes().ConvertType(T)); + } + + llvm::Constant *VisitParenExpr(ParenExpr *PE) { + return Visit(PE->getSubExpr()); + } + + // Leaves + llvm::Constant *VisitIntegerLiteral(const IntegerLiteral *E) { + return llvm::ConstantInt::get(E->getValue()); + } + llvm::Constant *VisitFloatingLiteral(const FloatingLiteral *E) { + return llvm::ConstantFP::get(ConvertType(E->getType()), E->getValue()); + } + llvm::Constant *VisitCharacterLiteral(const CharacterLiteral *E) { + return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); + } + llvm::Constant *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { + return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); + } + + llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { + return Visit(E->getInitializer()); + } + + llvm::Constant *VisitCastExpr(const CastExpr* E) { + llvm::Constant *C = Visit(E->getSubExpr()); + + return EmitConversion(C, E->getSubExpr()->getType(), E->getType()); + } + + llvm::Constant *EmitArrayInitialization(InitListExpr *ILE, + const llvm::ArrayType *AType) { + std::vector<llvm::Constant*> Elts; + unsigned NumInitElements = ILE->getNumInits(); + // FIXME: Check for wide strings + if (NumInitElements > 0 && isa<StringLiteral>(ILE->getInit(0)) && + ILE->getType()->getAsArrayType()->getElementType()->isCharType()) + return Visit(ILE->getInit(0)); + const llvm::Type *ElemTy = AType->getElementType(); + unsigned NumElements = AType->getNumElements(); + + // Initialising an array requires us to automatically + // initialise any elements that have not been initialised explicitly + unsigned NumInitableElts = std::min(NumInitElements, NumElements); + + // Copy initializer elements. + unsigned i = 0; + for (; i < NumInitableElts; ++i) { + + llvm::Constant *C = Visit(ILE->getInit(i)); + // FIXME: Remove this when sema of initializers is finished (and the code + // above). + if (C == 0 && ILE->getInit(i)->getType()->isVoidType()) { + if (ILE->getType()->isVoidType()) return 0; + return llvm::UndefValue::get(AType); + } + assert (C && "Failed to create initializer expression"); + Elts.push_back(C); + } + + // Initialize remaining array elements. + for (; i < NumElements; ++i) + Elts.push_back(llvm::Constant::getNullValue(ElemTy)); + + return llvm::ConstantArray::get(AType, Elts); + } + + llvm::Constant *EmitStructInitialization(InitListExpr *ILE, + const llvm::StructType *SType) { + + TagDecl *TD = ILE->getType()->getAsRecordType()->getDecl(); + std::vector<llvm::Constant*> Elts; + const CGRecordLayout *CGR = CGM.getTypes().getCGRecordLayout(TD); + unsigned NumInitElements = ILE->getNumInits(); + unsigned NumElements = SType->getNumElements(); + + // Initialising an structure requires us to automatically + // initialise any elements that have not been initialised explicitly + unsigned NumInitableElts = std::min(NumInitElements, NumElements); + + // Copy initializer elements. Skip padding fields. + unsigned EltNo = 0; // Element no in ILE + unsigned FieldNo = 0; // Field no in SType + while (EltNo < NumInitableElts) { + + // Zero initialize padding field. + if (CGR->isPaddingField(FieldNo)) { + const llvm::Type *FieldTy = SType->getElementType(FieldNo); + Elts.push_back(llvm::Constant::getNullValue(FieldTy)); + FieldNo++; + continue; + } + + llvm::Constant *C = Visit(ILE->getInit(EltNo)); + // FIXME: Remove this when sema of initializers is finished (and the code + // above). + if (C == 0 && ILE->getInit(EltNo)->getType()->isVoidType()) { + if (ILE->getType()->isVoidType()) return 0; + return llvm::UndefValue::get(SType); + } + assert (C && "Failed to create initializer expression"); + Elts.push_back(C); + EltNo++; + FieldNo++; + } + + // Initialize remaining structure elements. + for (unsigned i = Elts.size(); i < NumElements; ++i) { + const llvm::Type *FieldTy = SType->getElementType(i); + Elts.push_back(llvm::Constant::getNullValue(FieldTy)); + } + + return llvm::ConstantStruct::get(SType, Elts); + } + + llvm::Constant *EmitVectorInitialization(InitListExpr *ILE, + const llvm::VectorType *VType) { + + std::vector<llvm::Constant*> Elts; + unsigned NumInitElements = ILE->getNumInits(); + unsigned NumElements = VType->getNumElements(); + + assert (NumInitElements == NumElements + && "Unsufficient vector init elelments"); + // Copy initializer elements. + unsigned i = 0; + for (; i < NumElements; ++i) { + + llvm::Constant *C = Visit(ILE->getInit(i)); + // FIXME: Remove this when sema of initializers is finished (and the code + // above). + if (C == 0 && ILE->getInit(i)->getType()->isVoidType()) { + if (ILE->getType()->isVoidType()) return 0; + return llvm::UndefValue::get(VType); + } + assert (C && "Failed to create initializer expression"); + Elts.push_back(C); + } + + return llvm::ConstantVector::get(VType, Elts); + } + + llvm::Constant *VisitInitListExpr(InitListExpr *ILE) { + const llvm::CompositeType *CType = + dyn_cast<llvm::CompositeType>(ConvertType(ILE->getType())); + + if (!CType) { + // We have a scalar in braces. Just use the first element. + return Visit(ILE->getInit(0)); + } + + if (const llvm::ArrayType *AType = dyn_cast<llvm::ArrayType>(CType)) + return EmitArrayInitialization(ILE, AType); + + if (const llvm::StructType *SType = dyn_cast<llvm::StructType>(CType)) + return EmitStructInitialization(ILE, SType); + + if (const llvm::VectorType *VType = dyn_cast<llvm::VectorType>(CType)) + return EmitVectorInitialization(ILE, VType); + + // Make sure we have an array at this point + assert(0 && "Unable to handle InitListExpr"); + // Get rid of control reaches end of void function warning. + // Not reached. + return 0; + } + + llvm::Constant *VisitImplicitCastExpr(ImplicitCastExpr *ICExpr) { + Expr* SExpr = ICExpr->getSubExpr(); + QualType SType = SExpr->getType(); + llvm::Constant *C; // the intermediate expression + QualType T; // the type of the intermediate expression + if (SType->isArrayType()) { + // Arrays decay to a pointer to the first element + // VLAs would require special handling, but they can't occur here + C = EmitLValue(SExpr); + llvm::Constant *Idx0 = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + llvm::Constant *Ops[] = {Idx0, Idx0}; + C = llvm::ConstantExpr::getGetElementPtr(C, Ops, 2); + + QualType ElemType = SType->getAsArrayType()->getElementType(); + T = CGM.getContext().getPointerType(ElemType); + } else if (SType->isFunctionType()) { + // Function types decay to a pointer to the function + C = EmitLValue(SExpr); + T = CGM.getContext().getPointerType(SType); + } else { + C = Visit(SExpr); + T = SType; + } + + // Perform the conversion; note that an implicit cast can both promote + // and convert an array/function + return EmitConversion(C, T, ICExpr->getType()); + } + + llvm::Constant *VisitStringLiteral(StringLiteral *E) { + const char *StrData = E->getStrData(); + unsigned Len = E->getByteLength(); + assert(!E->getType()->isPointerType() && "Strings are always arrays"); + + // Otherwise this must be a string initializing an array in a static + // initializer. Don't emit it as the address of the string, emit the string + // data itself as an inline array. + const ConstantArrayType *CAT = E->getType()->getAsConstantArrayType(); + assert(CAT && "String isn't pointer or array!"); + + std::string Str(StrData, StrData + Len); + // Null terminate the string before potentially truncating it. + // FIXME: What about wchar_t strings? + Str.push_back(0); + + uint64_t RealLen = CAT->getSize().getZExtValue(); + // String or grow the initializer to the required size. + if (RealLen != Str.size()) + Str.resize(RealLen); + + return llvm::ConstantArray::get(Str, false); + } + + llvm::Constant *VisitDeclRefExpr(DeclRefExpr *E) { + const ValueDecl *Decl = E->getDecl(); + if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(Decl)) + return llvm::ConstantInt::get(EC->getInitVal()); + assert(0 && "Unsupported decl ref type!"); + return 0; + } + + llvm::Constant *VisitSizeOfAlignOfTypeExpr(const SizeOfAlignOfTypeExpr *E) { + return EmitSizeAlignOf(E->getArgumentType(), E->getType(), E->isSizeOf()); + } + + // Unary operators + llvm::Constant *VisitUnaryPlus(const UnaryOperator *E) { + return Visit(E->getSubExpr()); + } + llvm::Constant *VisitUnaryMinus(const UnaryOperator *E) { + return llvm::ConstantExpr::getNeg(Visit(E->getSubExpr())); + } + llvm::Constant *VisitUnaryNot(const UnaryOperator *E) { + return llvm::ConstantExpr::getNot(Visit(E->getSubExpr())); + } + llvm::Constant *VisitUnaryLNot(const UnaryOperator *E) { + llvm::Constant *SubExpr = Visit(E->getSubExpr()); + + if (E->getSubExpr()->getType()->isRealFloatingType()) { + // Compare against 0.0 for fp scalars. + llvm::Constant *Zero = llvm::Constant::getNullValue(SubExpr->getType()); + SubExpr = llvm::ConstantExpr::getFCmp(llvm::FCmpInst::FCMP_UEQ, SubExpr, + Zero); + } else { + assert((E->getSubExpr()->getType()->isIntegerType() || + E->getSubExpr()->getType()->isPointerType()) && + "Unknown scalar type to convert"); + // Compare against an integer or pointer null. + llvm::Constant *Zero = llvm::Constant::getNullValue(SubExpr->getType()); + SubExpr = llvm::ConstantExpr::getICmp(llvm::ICmpInst::ICMP_EQ, SubExpr, + Zero); + } + + return llvm::ConstantExpr::getZExt(SubExpr, ConvertType(E->getType())); + } + llvm::Constant *VisitUnarySizeOf(const UnaryOperator *E) { + return EmitSizeAlignOf(E->getSubExpr()->getType(), E->getType(), true); + } + llvm::Constant *VisitUnaryAlignOf(const UnaryOperator *E) { + return EmitSizeAlignOf(E->getSubExpr()->getType(), E->getType(), false); + } + llvm::Constant *VisitUnaryAddrOf(const UnaryOperator *E) { + return EmitLValue(E->getSubExpr()); + } + llvm::Constant *VisitUnaryOffsetOf(const UnaryOperator *E) { + int64_t Val = E->evaluateOffsetOf(CGM.getContext()); + + assert(E->getType()->isIntegerType() && "Result type must be an integer!"); + + uint32_t ResultWidth = + static_cast<uint32_t>(CGM.getContext().getTypeSize(E->getType())); + return llvm::ConstantInt::get(llvm::APInt(ResultWidth, Val)); + } + + // Binary operators + llvm::Constant *VisitBinOr(const BinaryOperator *E) { + llvm::Constant *LHS = Visit(E->getLHS()); + llvm::Constant *RHS = Visit(E->getRHS()); + + return llvm::ConstantExpr::getOr(LHS, RHS); + } + llvm::Constant *VisitBinSub(const BinaryOperator *E) { + llvm::Constant *LHS = Visit(E->getLHS()); + llvm::Constant *RHS = Visit(E->getRHS()); + + if (!isa<llvm::PointerType>(RHS->getType())) { + // pointer - int + if (isa<llvm::PointerType>(LHS->getType())) { + llvm::Constant *Idx = llvm::ConstantExpr::getNeg(RHS); + + return llvm::ConstantExpr::getGetElementPtr(LHS, &Idx, 1); + } + + // int - int + return llvm::ConstantExpr::getSub(LHS, RHS); + } + + assert(0 && "Unhandled bin sub case!"); + return 0; + } + + llvm::Constant *VisitBinShl(const BinaryOperator *E) { + llvm::Constant *LHS = Visit(E->getLHS()); + llvm::Constant *RHS = Visit(E->getRHS()); + + // LLVM requires the LHS and RHS to be the same type: promote or truncate the + // RHS to the same size as the LHS. + if (LHS->getType() != RHS->getType()) + RHS = llvm::ConstantExpr::getIntegerCast(RHS, LHS->getType(), false); + + return llvm::ConstantExpr::getShl(LHS, RHS); + } + + llvm::Constant *VisitBinMul(const BinaryOperator *E) { + llvm::Constant *LHS = Visit(E->getLHS()); + llvm::Constant *RHS = Visit(E->getRHS()); + + return llvm::ConstantExpr::getMul(LHS, RHS); + } + + llvm::Constant *VisitBinDiv(const BinaryOperator *E) { + llvm::Constant *LHS = Visit(E->getLHS()); + llvm::Constant *RHS = Visit(E->getRHS()); + + if (LHS->getType()->isFPOrFPVector()) + return llvm::ConstantExpr::getFDiv(LHS, RHS); + else if (E->getType()->isUnsignedIntegerType()) + return llvm::ConstantExpr::getUDiv(LHS, RHS); + else + return llvm::ConstantExpr::getSDiv(LHS, RHS); + } + + llvm::Constant *VisitBinAdd(const BinaryOperator *E) { + llvm::Constant *LHS = Visit(E->getLHS()); + llvm::Constant *RHS = Visit(E->getRHS()); + + if (!E->getType()->isPointerType()) + return llvm::ConstantExpr::getAdd(LHS, RHS); + + llvm::Constant *Ptr, *Idx; + if (isa<llvm::PointerType>(LHS->getType())) { // pointer + int + Ptr = LHS; + Idx = RHS; + } else { // int + pointer + Ptr = RHS; + Idx = LHS; + } + + return llvm::ConstantExpr::getGetElementPtr(Ptr, &Idx, 1); + } + + llvm::Constant *VisitBinAnd(const BinaryOperator *E) { + llvm::Constant *LHS = Visit(E->getLHS()); + llvm::Constant *RHS = Visit(E->getRHS()); + + return llvm::ConstantExpr::getAnd(LHS, RHS); + } + + // Utility methods + const llvm::Type *ConvertType(QualType T) { + return CGM.getTypes().ConvertType(T); + } + + llvm::Constant *EmitConversionToBool(llvm::Constant *Src, QualType SrcType) { + assert(SrcType->isCanonical() && "EmitConversion strips typedefs"); + + if (SrcType->isRealFloatingType()) { + // Compare against 0.0 for fp scalars. + llvm::Constant *Zero = llvm::Constant::getNullValue(Src->getType()); + return llvm::ConstantExpr::getFCmp(llvm::FCmpInst::FCMP_UNE, Src, Zero); + } + + assert((SrcType->isIntegerType() || SrcType->isPointerType()) && + "Unknown scalar type to convert"); + + // Compare against an integer or pointer null. + llvm::Constant *Zero = llvm::Constant::getNullValue(Src->getType()); + return llvm::ConstantExpr::getICmp(llvm::ICmpInst::ICMP_NE, Src, Zero); + } + + llvm::Constant *EmitConversion(llvm::Constant *Src, QualType SrcType, + QualType DstType) { + SrcType = SrcType.getCanonicalType(); + DstType = DstType.getCanonicalType(); + if (SrcType == DstType) return Src; + + // Handle conversions to bool first, they are special: comparisons against 0. + if (DstType->isBooleanType()) + return EmitConversionToBool(Src, SrcType); + + const llvm::Type *DstTy = ConvertType(DstType); + + // Ignore conversions like int -> uint. + if (Src->getType() == DstTy) + return Src; + + // Handle pointer conversions next: pointers can only be converted to/from + // other pointers and integers. + if (isa<PointerType>(DstType)) { + // The source value may be an integer, or a pointer. + if (isa<llvm::PointerType>(Src->getType())) + return llvm::ConstantExpr::getBitCast(Src, DstTy); + assert(SrcType->isIntegerType() &&"Not ptr->ptr or int->ptr conversion?"); + return llvm::ConstantExpr::getIntToPtr(Src, DstTy); + } + + if (isa<PointerType>(SrcType)) { + // Must be an ptr to int cast. + assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); + return llvm::ConstantExpr::getPtrToInt(Src, DstTy); + } + + // A scalar source can be splatted to a vector of the same element type + if (isa<llvm::VectorType>(DstTy) && !isa<VectorType>(SrcType)) { + const llvm::VectorType *VT = cast<llvm::VectorType>(DstTy); + assert((VT->getElementType() == Src->getType()) && + "Vector element type must match scalar type to splat."); + unsigned NumElements = DstType->getAsVectorType()->getNumElements(); + llvm::SmallVector<llvm::Constant*, 16> Elements; + for (unsigned i = 0; i < NumElements; i++) + Elements.push_back(Src); + + return llvm::ConstantVector::get(&Elements[0], NumElements); + } + + if (isa<llvm::VectorType>(Src->getType()) || + isa<llvm::VectorType>(DstTy)) { + return llvm::ConstantExpr::getBitCast(Src, DstTy); + } + + // Finally, we have the arithmetic types: real int/float. + if (isa<llvm::IntegerType>(Src->getType())) { + bool InputSigned = SrcType->isSignedIntegerType(); + if (isa<llvm::IntegerType>(DstTy)) + return llvm::ConstantExpr::getIntegerCast(Src, DstTy, InputSigned); + else if (InputSigned) + return llvm::ConstantExpr::getSIToFP(Src, DstTy); + else + return llvm::ConstantExpr::getUIToFP(Src, DstTy); + } + + assert(Src->getType()->isFloatingPoint() && "Unknown real conversion"); + if (isa<llvm::IntegerType>(DstTy)) { + if (DstType->isSignedIntegerType()) + return llvm::ConstantExpr::getFPToSI(Src, DstTy); + else + return llvm::ConstantExpr::getFPToUI(Src, DstTy); + } + + assert(DstTy->isFloatingPoint() && "Unknown real conversion"); + if (DstTy->getTypeID() < Src->getType()->getTypeID()) + return llvm::ConstantExpr::getFPTrunc(Src, DstTy); + else + return llvm::ConstantExpr::getFPExtend(Src, DstTy); + } + + llvm::Constant *EmitSizeAlignOf(QualType TypeToSize, + QualType RetType, bool isSizeOf) { + std::pair<uint64_t, unsigned> Info = + CGM.getContext().getTypeInfo(TypeToSize); + + uint64_t Val = isSizeOf ? Info.first : Info.second; + Val /= 8; // Return size in bytes, not bits. + + assert(RetType->isIntegerType() && "Result type must be an integer!"); + + uint32_t ResultWidth = + static_cast<uint32_t>(CGM.getContext().getTypeSize(RetType)); + return llvm::ConstantInt::get(llvm::APInt(ResultWidth, Val)); + } + + llvm::Constant *EmitLValue(Expr *E) { + switch (E->getStmtClass()) { + default: break; + case Expr::ParenExprClass: + // Elide parenthesis + return EmitLValue(cast<ParenExpr>(E)->getSubExpr()); + case Expr::CompoundLiteralExprClass: { + // Note that due to the nature of compound literals, this is guaranteed + // to be the only use of the variable, so we just generate it here. + CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E); + llvm::Constant* C = Visit(CLE->getInitializer()); + C = new llvm::GlobalVariable(C->getType(),E->getType().isConstQualified(), + llvm::GlobalValue::InternalLinkage, + C, ".compoundliteral", &CGM.getModule()); + return C; + } + case Expr::DeclRefExprClass: { + ValueDecl *Decl = cast<DeclRefExpr>(E)->getDecl(); + if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl)) + return CGM.GetAddrOfFunctionDecl(FD, false); + if (const FileVarDecl* VD = dyn_cast<FileVarDecl>(Decl)) + return CGM.GetAddrOfGlobalVar(VD, false); + if (const BlockVarDecl* BVD = dyn_cast<BlockVarDecl>(Decl)) { + assert(CGF && "Can't access static local vars without CGF"); + return CGF->GetAddrOfStaticLocalVar(BVD); + } + break; + } + case Expr::MemberExprClass: { + MemberExpr* ME = cast<MemberExpr>(E); + llvm::Constant *Base; + if (ME->isArrow()) + Base = Visit(ME->getBase()); + else + Base = EmitLValue(ME->getBase()); + + unsigned FieldNumber = CGM.getTypes().getLLVMFieldNo(ME->getMemberDecl()); + llvm::Constant *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + llvm::Constant *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, + FieldNumber); + llvm::Value *Ops[] = {Zero, Idx}; + return llvm::ConstantExpr::getGetElementPtr(Base, Ops, 2); + } + case Expr::ArraySubscriptExprClass: { + ArraySubscriptExpr* ASExpr = cast<ArraySubscriptExpr>(E); + llvm::Constant *Base = Visit(ASExpr->getBase()); + llvm::Constant *Index = Visit(ASExpr->getIdx()); + assert(!ASExpr->getBase()->getType()->isVectorType() && + "Taking the address of a vector component is illegal!"); + return llvm::ConstantExpr::getGetElementPtr(Base, &Index, 1); + } + case Expr::StringLiteralClass: { + StringLiteral *String = cast<StringLiteral>(E); + assert(!String->isWide() && "Cannot codegen wide strings yet"); + const char *StrData = String->getStrData(); + unsigned Len = String->getByteLength(); + + return CGM.GetAddrOfConstantString(std::string(StrData, StrData + Len)); + } + case Expr::UnaryOperatorClass: { + UnaryOperator *Exp = cast<UnaryOperator>(E); + switch (Exp->getOpcode()) { + default: break; + case UnaryOperator::Extension: + // Extension is just a wrapper for expressions + return EmitLValue(Exp->getSubExpr()); + case UnaryOperator::Real: + case UnaryOperator::Imag: { + // The address of __real or __imag is just a GEP off the address + // of the internal expression + llvm::Constant* C = EmitLValue(Exp->getSubExpr()); + llvm::Constant *Zero = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + llvm::Constant *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, + Exp->getOpcode() == UnaryOperator::Imag); + llvm::Value *Ops[] = {Zero, Idx}; + return llvm::ConstantExpr::getGetElementPtr(C, Ops, 2); + } + case UnaryOperator::Deref: + // The address of a deref is just the value of the expression + return Visit(Exp->getSubExpr()); + } + break; + } + } + CGM.WarnUnsupported(E, "constant l-value expression"); + llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType())); + return llvm::UndefValue::get(Ty); + } + +}; + +} // end anonymous namespace. + + +llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E, + CodeGenFunction *CGF) +{ + QualType type = E->getType().getCanonicalType(); + + if (type->isIntegerType()) { + llvm::APSInt Value(static_cast<uint32_t>(Context.getTypeSize(type))); + if (E->isIntegerConstantExpr(Value, Context)) { + return llvm::ConstantInt::get(Value); + } + } + + return ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E)); +} diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp new file mode 100644 index 00000000000..892712a0d4c --- /dev/null +++ b/clang/lib/CodeGen/CGExprScalar.cpp @@ -0,0 +1,1185 @@ +//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Expr nodes with scalar LLVM types as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/AST.h" +#include "llvm/Constants.h" +#include "llvm/Function.h" +#include "llvm/GlobalVariable.h" +#include "llvm/Intrinsics.h" +#include "llvm/Support/Compiler.h" +#include <cstdarg> + +using namespace clang; +using namespace CodeGen; +using llvm::Value; + +//===----------------------------------------------------------------------===// +// Scalar Expression Emitter +//===----------------------------------------------------------------------===// + +struct BinOpInfo { + Value *LHS; + Value *RHS; + QualType Ty; // Computation Type. + const BinaryOperator *E; +}; + +namespace { +class VISIBILITY_HIDDEN ScalarExprEmitter + : public StmtVisitor<ScalarExprEmitter, Value*> { + CodeGenFunction &CGF; + llvm::LLVMFoldingBuilder &Builder; + CGObjCRuntime *Runtime; + + +public: + + ScalarExprEmitter(CodeGenFunction &cgf) : CGF(cgf), + Builder(CGF.Builder), + Runtime(CGF.CGM.getObjCRuntime()) { + } + + + //===--------------------------------------------------------------------===// + // Utilities + //===--------------------------------------------------------------------===// + + const llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); } + LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); } + + Value *EmitLoadOfLValue(LValue LV, QualType T) { + return CGF.EmitLoadOfLValue(LV, T).getScalarVal(); + } + + /// EmitLoadOfLValue - Given an expression with complex type that represents a + /// value l-value, this method emits the address of the l-value, then loads + /// and returns the result. + Value *EmitLoadOfLValue(const Expr *E) { + // FIXME: Volatile + return EmitLoadOfLValue(EmitLValue(E), E->getType()); + } + + /// EmitConversionToBool - Convert the specified expression value to a + /// boolean (i1) truth value. This is equivalent to "Val != 0". + Value *EmitConversionToBool(Value *Src, QualType DstTy); + + /// EmitScalarConversion - Emit a conversion from the specified type to the + /// specified destination type, both of which are LLVM scalar types. + Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy); + + /// EmitComplexToScalarConversion - Emit a conversion from the specified + /// complex type to the specified destination type, where the destination + /// type is an LLVM scalar type. + Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, + QualType SrcTy, QualType DstTy); + + //===--------------------------------------------------------------------===// + // Visitor Methods + //===--------------------------------------------------------------------===// + + Value *VisitStmt(Stmt *S) { + S->dump(CGF.getContext().getSourceManager()); + assert(0 && "Stmt can't have complex result type!"); + return 0; + } + Value *VisitExpr(Expr *S); + Value *VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr()); } + + // Leaves. + Value *VisitIntegerLiteral(const IntegerLiteral *E) { + return llvm::ConstantInt::get(E->getValue()); + } + Value *VisitFloatingLiteral(const FloatingLiteral *E) { + return llvm::ConstantFP::get(ConvertType(E->getType()), E->getValue()); + } + Value *VisitCharacterLiteral(const CharacterLiteral *E) { + return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); + } + Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) { + return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue()); + } + Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) { + return llvm::ConstantInt::get(ConvertType(E->getType()), + CGF.getContext().typesAreCompatible( + E->getArgType1(), E->getArgType2())); + } + Value *VisitSizeOfAlignOfTypeExpr(const SizeOfAlignOfTypeExpr *E) { + return EmitSizeAlignOf(E->getArgumentType(), E->getType(), E->isSizeOf()); + } + + // l-values. + Value *VisitDeclRefExpr(DeclRefExpr *E) { + if (const EnumConstantDecl *EC = dyn_cast<EnumConstantDecl>(E->getDecl())) + return llvm::ConstantInt::get(EC->getInitVal()); + return EmitLoadOfLValue(E); + } + Value *VisitObjCMessageExpr(ObjCMessageExpr *E); + Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E); + Value *VisitMemberExpr(Expr *E) { return EmitLoadOfLValue(E); } + Value *VisitOCUVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); } + Value *VisitStringLiteral(Expr *E) { return EmitLValue(E).getAddress(); } + Value *VisitPreDefinedExpr(Expr *E) { return EmitLValue(E).getAddress(); } + + Value *VisitInitListExpr(InitListExpr *E) { + unsigned NumInitElements = E->getNumInits(); + + const llvm::VectorType *VType = + dyn_cast<llvm::VectorType>(ConvertType(E->getType())); + + // We have a scalar in braces. Just use the first element. + if (!VType) + return Visit(E->getInit(0)); + + unsigned NumVectorElements = VType->getNumElements(); + const llvm::Type *ElementType = VType->getElementType(); + + // Emit individual vector element stores. + llvm::Value *V = llvm::UndefValue::get(VType); + + // Emit initializers + unsigned i; + for (i = 0; i < NumInitElements; ++i) { + Value *NewV = Visit(E->getInit(i)); + Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + V = Builder.CreateInsertElement(V, NewV, Idx); + } + + // Emit remaining default initializers + for (/* Do not initialize i*/; i < NumVectorElements; ++i) { + Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + llvm::Value *NewV = llvm::Constant::getNullValue(ElementType); + V = Builder.CreateInsertElement(V, NewV, Idx); + } + + return V; + } + + Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) { + return Visit(E->getInitializer()); + } + + Value *VisitImplicitCastExpr(const ImplicitCastExpr *E); + Value *VisitCastExpr(const CastExpr *E) { + return EmitCastExpr(E->getSubExpr(), E->getType()); + } + Value *EmitCastExpr(const Expr *E, QualType T); + + Value *VisitCallExpr(const CallExpr *E) { + return CGF.EmitCallExpr(E).getScalarVal(); + } + + Value *VisitStmtExpr(const StmtExpr *E); + + // Unary Operators. + Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre); + Value *VisitUnaryPostDec(const UnaryOperator *E) { + return VisitPrePostIncDec(E, false, false); + } + Value *VisitUnaryPostInc(const UnaryOperator *E) { + return VisitPrePostIncDec(E, true, false); + } + Value *VisitUnaryPreDec(const UnaryOperator *E) { + return VisitPrePostIncDec(E, false, true); + } + Value *VisitUnaryPreInc(const UnaryOperator *E) { + return VisitPrePostIncDec(E, true, true); + } + Value *VisitUnaryAddrOf(const UnaryOperator *E) { + return EmitLValue(E->getSubExpr()).getAddress(); + } + Value *VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); } + Value *VisitUnaryPlus(const UnaryOperator *E) { + return Visit(E->getSubExpr()); + } + Value *VisitUnaryMinus (const UnaryOperator *E); + Value *VisitUnaryNot (const UnaryOperator *E); + Value *VisitUnaryLNot (const UnaryOperator *E); + Value *VisitUnarySizeOf (const UnaryOperator *E) { + return EmitSizeAlignOf(E->getSubExpr()->getType(), E->getType(), true); + } + Value *VisitUnaryAlignOf (const UnaryOperator *E) { + return EmitSizeAlignOf(E->getSubExpr()->getType(), E->getType(), false); + } + Value *EmitSizeAlignOf(QualType TypeToSize, QualType RetType, + bool isSizeOf); + Value *VisitUnaryReal (const UnaryOperator *E); + Value *VisitUnaryImag (const UnaryOperator *E); + Value *VisitUnaryExtension(const UnaryOperator *E) { + return Visit(E->getSubExpr()); + } + Value *VisitUnaryOffsetOf(const UnaryOperator *E); + + // Binary Operators. + Value *EmitMul(const BinOpInfo &Ops) { + return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul"); + } + Value *EmitDiv(const BinOpInfo &Ops); + Value *EmitRem(const BinOpInfo &Ops); + Value *EmitAdd(const BinOpInfo &Ops); + Value *EmitSub(const BinOpInfo &Ops); + Value *EmitShl(const BinOpInfo &Ops); + Value *EmitShr(const BinOpInfo &Ops); + Value *EmitAnd(const BinOpInfo &Ops) { + return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and"); + } + Value *EmitXor(const BinOpInfo &Ops) { + return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor"); + } + Value *EmitOr (const BinOpInfo &Ops) { + return Builder.CreateOr(Ops.LHS, Ops.RHS, "or"); + } + + BinOpInfo EmitBinOps(const BinaryOperator *E); + Value *EmitCompoundAssign(const CompoundAssignOperator *E, + Value *(ScalarExprEmitter::*F)(const BinOpInfo &)); + + // Binary operators and binary compound assignment operators. +#define HANDLEBINOP(OP) \ + Value *VisitBin ## OP(const BinaryOperator *E) { \ + return Emit ## OP(EmitBinOps(E)); \ + } \ + Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \ + return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \ + } + HANDLEBINOP(Mul); + HANDLEBINOP(Div); + HANDLEBINOP(Rem); + HANDLEBINOP(Add); + // (Sub) - Sub is handled specially below for ptr-ptr subtract. + HANDLEBINOP(Shl); + HANDLEBINOP(Shr); + HANDLEBINOP(And); + HANDLEBINOP(Xor); + HANDLEBINOP(Or); +#undef HANDLEBINOP + Value *VisitBinSub(const BinaryOperator *E); + Value *VisitBinSubAssign(const CompoundAssignOperator *E) { + return EmitCompoundAssign(E, &ScalarExprEmitter::EmitSub); + } + + // Comparisons. + Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc, + unsigned SICmpOpc, unsigned FCmpOpc); +#define VISITCOMP(CODE, UI, SI, FP) \ + Value *VisitBin##CODE(const BinaryOperator *E) { \ + return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \ + llvm::FCmpInst::FP); } + VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT); + VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT); + VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE); + VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE); + VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ); + VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE); +#undef VISITCOMP + + Value *VisitBinAssign (const BinaryOperator *E); + + Value *VisitBinLAnd (const BinaryOperator *E); + Value *VisitBinLOr (const BinaryOperator *E); + Value *VisitBinComma (const BinaryOperator *E); + + // Other Operators. + Value *VisitConditionalOperator(const ConditionalOperator *CO); + Value *VisitChooseExpr(ChooseExpr *CE); + Value *VisitOverloadExpr(OverloadExpr *OE); + Value *VisitVAArgExpr(VAArgExpr *VE); + Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) { + return CGF.EmitObjCStringLiteral(E); + } + Value *VisitObjCEncodeExpr(const ObjCEncodeExpr *E); +}; +} // end anonymous namespace. + +//===----------------------------------------------------------------------===// +// Utilities +//===----------------------------------------------------------------------===// + +/// EmitConversionToBool - Convert the specified expression value to a +/// boolean (i1) truth value. This is equivalent to "Val != 0". +Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) { + assert(SrcType->isCanonical() && "EmitScalarConversion strips typedefs"); + + if (SrcType->isRealFloatingType()) { + // Compare against 0.0 for fp scalars. + llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType()); + return Builder.CreateFCmpUNE(Src, Zero, "tobool"); + } + + assert((SrcType->isIntegerType() || SrcType->isPointerType()) && + "Unknown scalar type to convert"); + + // Because of the type rules of C, we often end up computing a logical value, + // then zero extending it to int, then wanting it as a logical value again. + // Optimize this common case. + if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(Src)) { + if (ZI->getOperand(0)->getType() == llvm::Type::Int1Ty) { + Value *Result = ZI->getOperand(0); + // If there aren't any more uses, zap the instruction to save space. + // Note that there can be more uses, for example if this + // is the result of an assignment. + if (ZI->use_empty()) + ZI->eraseFromParent(); + return Result; + } + } + + // Compare against an integer or pointer null. + llvm::Value *Zero = llvm::Constant::getNullValue(Src->getType()); + return Builder.CreateICmpNE(Src, Zero, "tobool"); +} + +/// EmitScalarConversion - Emit a conversion from the specified type to the +/// specified destination type, both of which are LLVM scalar types. +Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, + QualType DstType) { + SrcType = SrcType.getCanonicalType(); + DstType = DstType.getCanonicalType(); + if (SrcType == DstType) return Src; + + if (DstType->isVoidType()) return 0; + + // Handle conversions to bool first, they are special: comparisons against 0. + if (DstType->isBooleanType()) + return EmitConversionToBool(Src, SrcType); + + const llvm::Type *DstTy = ConvertType(DstType); + + // Ignore conversions like int -> uint. + if (Src->getType() == DstTy) + return Src; + + // Handle pointer conversions next: pointers can only be converted to/from + // other pointers and integers. + if (isa<PointerType>(DstType)) { + // The source value may be an integer, or a pointer. + if (isa<llvm::PointerType>(Src->getType())) + return Builder.CreateBitCast(Src, DstTy, "conv"); + assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); + return Builder.CreateIntToPtr(Src, DstTy, "conv"); + } + + if (isa<PointerType>(SrcType)) { + // Must be an ptr to int cast. + assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?"); + return Builder.CreatePtrToInt(Src, DstTy, "conv"); + } + + // A scalar source can be splatted to an OCU vector of the same element type + if (DstType->isOCUVectorType() && !isa<VectorType>(SrcType) && + cast<llvm::VectorType>(DstTy)->getElementType() == Src->getType()) + return CGF.EmitVector(&Src, DstType->getAsVectorType()->getNumElements(), + true); + + // Allow bitcast from vector to integer/fp of the same size. + if (isa<llvm::VectorType>(Src->getType()) || + isa<llvm::VectorType>(DstTy)) + return Builder.CreateBitCast(Src, DstTy, "conv"); + + // Finally, we have the arithmetic types: real int/float. + if (isa<llvm::IntegerType>(Src->getType())) { + bool InputSigned = SrcType->isSignedIntegerType(); + if (isa<llvm::IntegerType>(DstTy)) + return Builder.CreateIntCast(Src, DstTy, InputSigned, "conv"); + else if (InputSigned) + return Builder.CreateSIToFP(Src, DstTy, "conv"); + else + return Builder.CreateUIToFP(Src, DstTy, "conv"); + } + + assert(Src->getType()->isFloatingPoint() && "Unknown real conversion"); + if (isa<llvm::IntegerType>(DstTy)) { + if (DstType->isSignedIntegerType()) + return Builder.CreateFPToSI(Src, DstTy, "conv"); + else + return Builder.CreateFPToUI(Src, DstTy, "conv"); + } + + assert(DstTy->isFloatingPoint() && "Unknown real conversion"); + if (DstTy->getTypeID() < Src->getType()->getTypeID()) + return Builder.CreateFPTrunc(Src, DstTy, "conv"); + else + return Builder.CreateFPExt(Src, DstTy, "conv"); +} + +/// EmitComplexToScalarConversion - Emit a conversion from the specified +/// complex type to the specified destination type, where the destination +/// type is an LLVM scalar type. +Value *ScalarExprEmitter:: +EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src, + QualType SrcTy, QualType DstTy) { + // Get the source element type. + SrcTy = cast<ComplexType>(SrcTy.getCanonicalType())->getElementType(); + + // Handle conversions to bool first, they are special: comparisons against 0. + if (DstTy->isBooleanType()) { + // Complex != 0 -> (Real != 0) | (Imag != 0) + Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy); + Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy); + return Builder.CreateOr(Src.first, Src.second, "tobool"); + } + + // C99 6.3.1.7p2: "When a value of complex type is converted to a real type, + // the imaginary part of the complex value is discarded and the value of the + // real part is converted according to the conversion rules for the + // corresponding real type. + return EmitScalarConversion(Src.first, SrcTy, DstTy); +} + + +//===----------------------------------------------------------------------===// +// Visitor Methods +//===----------------------------------------------------------------------===// + +Value *ScalarExprEmitter::VisitExpr(Expr *E) { + CGF.WarnUnsupported(E, "scalar expression"); + if (E->getType()->isVoidType()) + return 0; + return llvm::UndefValue::get(CGF.ConvertType(E->getType())); +} + +Value *ScalarExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) { + // Only the lookup mechanism and first two arguments of the method + // implementation vary between runtimes. We can get the receiver and + // arguments in generic code. + + // Find the receiver + llvm::Value * Receiver = CGF.EmitScalarExpr(E->getReceiver()); + + // Process the arguments + unsigned int ArgC = E->getNumArgs(); + llvm::SmallVector<llvm::Value*, 16> Args; + for(unsigned i=0 ; i<ArgC ; i++) { + Expr *ArgExpr = E->getArg(i); + QualType ArgTy = ArgExpr->getType(); + if (!CGF.hasAggregateLLVMType(ArgTy)) { + // Scalar argument is passed by-value. + Args.push_back(CGF.EmitScalarExpr(ArgExpr)); + } else if (ArgTy->isComplexType()) { + // Make a temporary alloca to pass the argument. + llvm::Value *DestMem = CGF.CreateTempAlloca(ConvertType(ArgTy)); + CGF.EmitComplexExprIntoAddr(ArgExpr, DestMem, false); + Args.push_back(DestMem); + } else { + llvm::Value *DestMem = CGF.CreateTempAlloca(ConvertType(ArgTy)); + CGF.EmitAggExpr(ArgExpr, DestMem, false); + Args.push_back(DestMem); + } + } + + // Get the selector string + std::string SelStr = E->getSelector().getName(); + llvm::Constant *Selector = CGF.CGM.GetAddrOfConstantString(SelStr); + ConvertType(E->getType()); + return Runtime->generateMessageSend(Builder, + ConvertType(E->getType()), + Receiver, + Selector, + &Args[0], + Args.size()); +} + +Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { + // Emit subscript expressions in rvalue context's. For most cases, this just + // loads the lvalue formed by the subscript expr. However, we have to be + // careful, because the base of a vector subscript is occasionally an rvalue, + // so we can't get it as an lvalue. + if (!E->getBase()->getType()->isVectorType()) + return EmitLoadOfLValue(E); + + // Handle the vector case. The base must be a vector, the index must be an + // integer value. + Value *Base = Visit(E->getBase()); + Value *Idx = Visit(E->getIdx()); + + // FIXME: Convert Idx to i32 type. + return Builder.CreateExtractElement(Base, Idx, "vecext"); +} + +/// VisitImplicitCastExpr - Implicit casts are the same as normal casts, but +/// also handle things like function to pointer-to-function decay, and array to +/// pointer decay. +Value *ScalarExprEmitter::VisitImplicitCastExpr(const ImplicitCastExpr *E) { + const Expr *Op = E->getSubExpr(); + + // If this is due to array->pointer conversion, emit the array expression as + // an l-value. + if (Op->getType()->isArrayType()) { + // FIXME: For now we assume that all source arrays map to LLVM arrays. This + // will not true when we add support for VLAs. + Value *V = EmitLValue(Op).getAddress(); // Bitfields can't be arrays. + + assert(isa<llvm::PointerType>(V->getType()) && + isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType()) + ->getElementType()) && + "Doesn't support VLAs yet!"); + llvm::Constant *Idx0 = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + + llvm::Value *Ops[] = {Idx0, Idx0}; + V = Builder.CreateGEP(V, Ops, Ops+2, "arraydecay"); + + // The resultant pointer type can be implicitly casted to other pointer + // types as well, for example void*. + const llvm::Type *DestPTy = ConvertType(E->getType()); + assert(isa<llvm::PointerType>(DestPTy) && + "Only expect implicit cast to pointer"); + if (V->getType() != DestPTy) + V = Builder.CreateBitCast(V, DestPTy, "ptrconv"); + return V; + + } else if (E->getType()->isReferenceType()) { + assert(cast<ReferenceType>(E->getType().getCanonicalType())-> + getReferenceeType() == + Op->getType().getCanonicalType() && "Incompatible types!"); + + return EmitLValue(Op).getAddress(); + } + + return EmitCastExpr(Op, E->getType()); +} + + +// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts +// have to handle a more broad range of conversions than explicit casts, as they +// handle things like function to ptr-to-function decay etc. +Value *ScalarExprEmitter::EmitCastExpr(const Expr *E, QualType DestTy) { + // Handle cases where the source is an non-complex type. + + if (!CGF.hasAggregateLLVMType(E->getType())) { + Value *Src = Visit(const_cast<Expr*>(E)); + + // Use EmitScalarConversion to perform the conversion. + return EmitScalarConversion(Src, E->getType(), DestTy); + } + + if (E->getType()->isComplexType()) { + // Handle cases where the source is a complex type. + return EmitComplexToScalarConversion(CGF.EmitComplexExpr(E), E->getType(), + DestTy); + } + + // Okay, this is a cast from an aggregate. It must be a cast to void. Just + // evaluate the result and return. + CGF.EmitAggExpr(E, 0, false); + return 0; +} + +Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) { + return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getScalarVal(); +} + + +//===----------------------------------------------------------------------===// +// Unary Operators +//===----------------------------------------------------------------------===// + +Value *ScalarExprEmitter::VisitPrePostIncDec(const UnaryOperator *E, + bool isInc, bool isPre) { + LValue LV = EmitLValue(E->getSubExpr()); + // FIXME: Handle volatile! + Value *InVal = CGF.EmitLoadOfLValue(LV, // false + E->getSubExpr()->getType()).getScalarVal(); + + int AmountVal = isInc ? 1 : -1; + + Value *NextVal; + if (isa<llvm::PointerType>(InVal->getType())) { + // FIXME: This isn't right for VLAs. + NextVal = llvm::ConstantInt::get(llvm::Type::Int32Ty, AmountVal); + NextVal = Builder.CreateGEP(InVal, NextVal); + } else { + // Add the inc/dec to the real part. + if (isa<llvm::IntegerType>(InVal->getType())) + NextVal = llvm::ConstantInt::get(InVal->getType(), AmountVal); + else if (InVal->getType() == llvm::Type::FloatTy) + // FIXME: Handle long double. + NextVal = + llvm::ConstantFP::get(InVal->getType(), + llvm::APFloat(static_cast<float>(AmountVal))); + else { + // FIXME: Handle long double. + assert(InVal->getType() == llvm::Type::DoubleTy); + NextVal = + llvm::ConstantFP::get(InVal->getType(), + llvm::APFloat(static_cast<double>(AmountVal))); + } + NextVal = Builder.CreateAdd(InVal, NextVal, isInc ? "inc" : "dec"); + } + + // Store the updated result through the lvalue. + CGF.EmitStoreThroughLValue(RValue::get(NextVal), LV, + E->getSubExpr()->getType()); + + // If this is a postinc, return the value read from memory, otherwise use the + // updated value. + return isPre ? NextVal : InVal; +} + + +Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) { + Value *Op = Visit(E->getSubExpr()); + return Builder.CreateNeg(Op, "neg"); +} + +Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) { + Value *Op = Visit(E->getSubExpr()); + return Builder.CreateNot(Op, "neg"); +} + +Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) { + // Compare operand to zero. + Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr()); + + // Invert value. + // TODO: Could dynamically modify easy computations here. For example, if + // the operand is an icmp ne, turn into icmp eq. + BoolVal = Builder.CreateNot(BoolVal, "lnot"); + + // ZExt result to int. + return Builder.CreateZExt(BoolVal, CGF.LLVMIntTy, "lnot.ext"); +} + +/// EmitSizeAlignOf - Return the size or alignment of the 'TypeToSize' type as +/// an integer (RetType). +Value *ScalarExprEmitter::EmitSizeAlignOf(QualType TypeToSize, + QualType RetType,bool isSizeOf){ + assert(RetType->isIntegerType() && "Result type must be an integer!"); + uint32_t ResultWidth = + static_cast<uint32_t>(CGF.getContext().getTypeSize(RetType)); + + // sizeof(void) and __alignof__(void) = 1 as a gcc extension. + if (TypeToSize->isVoidType()) + return llvm::ConstantInt::get(llvm::APInt(ResultWidth, 1)); + + /// FIXME: This doesn't handle VLAs yet! + std::pair<uint64_t, unsigned> Info = CGF.getContext().getTypeInfo(TypeToSize); + + uint64_t Val = isSizeOf ? Info.first : Info.second; + Val /= 8; // Return size in bytes, not bits. + + return llvm::ConstantInt::get(llvm::APInt(ResultWidth, Val)); +} + +Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) { + Expr *Op = E->getSubExpr(); + if (Op->getType()->isComplexType()) + return CGF.EmitComplexExpr(Op).first; + return Visit(Op); +} +Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) { + Expr *Op = E->getSubExpr(); + if (Op->getType()->isComplexType()) + return CGF.EmitComplexExpr(Op).second; + + // __imag on a scalar returns zero. Emit it the subexpr to ensure side + // effects are evaluated. + CGF.EmitScalarExpr(Op); + return llvm::Constant::getNullValue(ConvertType(E->getType())); +} + +Value *ScalarExprEmitter::VisitUnaryOffsetOf(const UnaryOperator *E) +{ + int64_t Val = E->evaluateOffsetOf(CGF.getContext()); + + assert(E->getType()->isIntegerType() && "Result type must be an integer!"); + + uint32_t ResultWidth = + static_cast<uint32_t>(CGF.getContext().getTypeSize(E->getType())); + return llvm::ConstantInt::get(llvm::APInt(ResultWidth, Val)); +} + +//===----------------------------------------------------------------------===// +// Binary Operators +//===----------------------------------------------------------------------===// + +BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) { + BinOpInfo Result; + Result.LHS = Visit(E->getLHS()); + Result.RHS = Visit(E->getRHS()); + Result.Ty = E->getType(); + Result.E = E; + return Result; +} + +Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E, + Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) { + QualType LHSTy = E->getLHS()->getType(), RHSTy = E->getRHS()->getType(); + + BinOpInfo OpInfo; + + // Load the LHS and RHS operands. + LValue LHSLV = EmitLValue(E->getLHS()); + OpInfo.LHS = EmitLoadOfLValue(LHSLV, LHSTy); + + // Determine the computation type. If the RHS is complex, then this is one of + // the add/sub/mul/div operators. All of these operators can be computed in + // with just their real component even though the computation domain really is + // complex. + QualType ComputeType = E->getComputationType(); + + // If the computation type is complex, then the RHS is complex. Emit the RHS. + if (const ComplexType *CT = ComputeType->getAsComplexType()) { + ComputeType = CT->getElementType(); + + // Emit the RHS, only keeping the real component. + OpInfo.RHS = CGF.EmitComplexExpr(E->getRHS()).first; + RHSTy = RHSTy->getAsComplexType()->getElementType(); + } else { + // Otherwise the RHS is a simple scalar value. + OpInfo.RHS = Visit(E->getRHS()); + } + + // Convert the LHS/RHS values to the computation type. + OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy, ComputeType); + + // Do not merge types for -= or += where the LHS is a pointer. + if (!(E->getOpcode() == BinaryOperator::SubAssign || + E->getOpcode() == BinaryOperator::AddAssign) || + !E->getLHS()->getType()->isPointerType()) { + OpInfo.RHS = EmitScalarConversion(OpInfo.RHS, RHSTy, ComputeType); + } + OpInfo.Ty = ComputeType; + OpInfo.E = E; + + // Expand the binary operator. + Value *Result = (this->*Func)(OpInfo); + + // Truncate the result back to the LHS type. + Result = EmitScalarConversion(Result, ComputeType, LHSTy); + + // Store the result value into the LHS lvalue. + CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV, E->getType()); + + return Result; +} + + +Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) { + if (Ops.LHS->getType()->isFPOrFPVector()) + return Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div"); + else if (Ops.Ty->isUnsignedIntegerType()) + return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div"); + else + return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div"); +} + +Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) { + // Rem in C can't be a floating point type: C99 6.5.5p2. + if (Ops.Ty->isUnsignedIntegerType()) + return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem"); + else + return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem"); +} + + +Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { + if (!Ops.Ty->isPointerType()) + return Builder.CreateAdd(Ops.LHS, Ops.RHS, "add"); + + // FIXME: What about a pointer to a VLA? + Value *Ptr, *Idx; + Expr *IdxExp; + if (isa<llvm::PointerType>(Ops.LHS->getType())) { // pointer + int + Ptr = Ops.LHS; + Idx = Ops.RHS; + IdxExp = Ops.E->getRHS(); + } else { // int + pointer + Ptr = Ops.RHS; + Idx = Ops.LHS; + IdxExp = Ops.E->getLHS(); + } + + unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); + if (Width < CGF.LLVMPointerWidth) { + // Zero or sign extend the pointer value based on whether the index is + // signed or not. + const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth); + if (IdxExp->getType().getCanonicalType()->isSignedIntegerType()) + Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); + else + Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext"); + } + + return Builder.CreateGEP(Ptr, Idx, "add.ptr"); +} + +Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { + if (!isa<llvm::PointerType>(Ops.LHS->getType())) + return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub"); + + // pointer - int + assert(!isa<llvm::PointerType>(Ops.RHS->getType()) && + "ptr-ptr shouldn't get here"); + // FIXME: The pointer could point to a VLA. + Value *Idx = Builder.CreateNeg(Ops.RHS, "sub.ptr.neg"); + + unsigned Width = cast<llvm::IntegerType>(Idx->getType())->getBitWidth(); + if (Width < CGF.LLVMPointerWidth) { + // Zero or sign extend the pointer value based on whether the index is + // signed or not. + const llvm::Type *IdxType = llvm::IntegerType::get(CGF.LLVMPointerWidth); + if (Ops.E->getRHS()->getType().getCanonicalType()->isSignedIntegerType()) + Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); + else + Idx = Builder.CreateZExt(Idx, IdxType, "idx.ext"); + } + + return Builder.CreateGEP(Ops.LHS, Idx, "sub.ptr"); +} + +Value *ScalarExprEmitter::VisitBinSub(const BinaryOperator *E) { + // "X - Y" is different from "X -= Y" in one case: when Y is a pointer. In + // the compound assignment case it is invalid, so just handle it here. + if (!E->getRHS()->getType()->isPointerType()) + return EmitSub(EmitBinOps(E)); + + // pointer - pointer + Value *LHS = Visit(E->getLHS()); + Value *RHS = Visit(E->getRHS()); + + const QualType LHSType = E->getLHS()->getType().getCanonicalType(); + const QualType LHSElementType = cast<PointerType>(LHSType)->getPointeeType(); + uint64_t ElementSize = CGF.getContext().getTypeSize(LHSElementType) / 8; + + const llvm::Type *ResultType = ConvertType(E->getType()); + LHS = Builder.CreatePtrToInt(LHS, ResultType, "sub.ptr.lhs.cast"); + RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast"); + Value *BytesBetween = Builder.CreateSub(LHS, RHS, "sub.ptr.sub"); + + // HACK: LLVM doesn't have an divide instruction that 'knows' there is no + // remainder. As such, we handle common power-of-two cases here to generate + // better code. + if (llvm::isPowerOf2_64(ElementSize)) { + Value *ShAmt = + llvm::ConstantInt::get(ResultType, llvm::Log2_64(ElementSize)); + return Builder.CreateAShr(BytesBetween, ShAmt, "sub.ptr.shr"); + } + + // Otherwise, do a full sdiv. + Value *BytesPerElt = llvm::ConstantInt::get(ResultType, ElementSize); + return Builder.CreateSDiv(BytesBetween, BytesPerElt, "sub.ptr.div"); +} + + +Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) { + // LLVM requires the LHS and RHS to be the same type: promote or truncate the + // RHS to the same size as the LHS. + Value *RHS = Ops.RHS; + if (Ops.LHS->getType() != RHS->getType()) + RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); + + return Builder.CreateShl(Ops.LHS, RHS, "shl"); +} + +Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) { + // LLVM requires the LHS and RHS to be the same type: promote or truncate the + // RHS to the same size as the LHS. + Value *RHS = Ops.RHS; + if (Ops.LHS->getType() != RHS->getType()) + RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom"); + + if (Ops.Ty->isUnsignedIntegerType()) + return Builder.CreateLShr(Ops.LHS, RHS, "shr"); + return Builder.CreateAShr(Ops.LHS, RHS, "shr"); +} + +Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc, + unsigned SICmpOpc, unsigned FCmpOpc) { + Value *Result; + QualType LHSTy = E->getLHS()->getType(); + if (!LHSTy->isComplexType()) { + Value *LHS = Visit(E->getLHS()); + Value *RHS = Visit(E->getRHS()); + + if (LHS->getType()->isFloatingPoint()) { + Result = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, + LHS, RHS, "cmp"); + } else if (LHSTy->isUnsignedIntegerType()) { + Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, + LHS, RHS, "cmp"); + } else { + // Signed integers and pointers. + Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc, + LHS, RHS, "cmp"); + } + } else { + // Complex Comparison: can only be an equality comparison. + CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS()); + CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS()); + + QualType CETy = + cast<ComplexType>(LHSTy.getCanonicalType())->getElementType(); + + Value *ResultR, *ResultI; + if (CETy->isRealFloatingType()) { + ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, + LHS.first, RHS.first, "cmp.r"); + ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc, + LHS.second, RHS.second, "cmp.i"); + } else { + // Complex comparisons can only be equality comparisons. As such, signed + // and unsigned opcodes are the same. + ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, + LHS.first, RHS.first, "cmp.r"); + ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc, + LHS.second, RHS.second, "cmp.i"); + } + + if (E->getOpcode() == BinaryOperator::EQ) { + Result = Builder.CreateAnd(ResultR, ResultI, "and.ri"); + } else { + assert(E->getOpcode() == BinaryOperator::NE && + "Complex comparison other than == or != ?"); + Result = Builder.CreateOr(ResultR, ResultI, "or.ri"); + } + } + + // ZExt result to int. + return Builder.CreateZExt(Result, CGF.LLVMIntTy, "cmp.ext"); +} + +Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) { + LValue LHS = EmitLValue(E->getLHS()); + Value *RHS = Visit(E->getRHS()); + + // Store the value into the LHS. + // FIXME: Volatility! + CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS, E->getType()); + + // Return the RHS. + return RHS; +} + +Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) { + Value *LHSCond = CGF.EvaluateExprAsBool(E->getLHS()); + + llvm::BasicBlock *ContBlock = new llvm::BasicBlock("land_cont"); + llvm::BasicBlock *RHSBlock = new llvm::BasicBlock("land_rhs"); + + llvm::BasicBlock *OrigBlock = Builder.GetInsertBlock(); + Builder.CreateCondBr(LHSCond, RHSBlock, ContBlock); + + CGF.EmitBlock(RHSBlock); + Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); + + // Reaquire the RHS block, as there may be subblocks inserted. + RHSBlock = Builder.GetInsertBlock(); + CGF.EmitBlock(ContBlock); + + // Create a PHI node. If we just evaluted the LHS condition, the result is + // false. If we evaluated both, the result is the RHS condition. + llvm::PHINode *PN = Builder.CreatePHI(llvm::Type::Int1Ty, "land"); + PN->reserveOperandSpace(2); + PN->addIncoming(llvm::ConstantInt::getFalse(), OrigBlock); + PN->addIncoming(RHSCond, RHSBlock); + + // ZExt result to int. + return Builder.CreateZExt(PN, CGF.LLVMIntTy, "land.ext"); +} + +Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) { + Value *LHSCond = CGF.EvaluateExprAsBool(E->getLHS()); + + llvm::BasicBlock *ContBlock = new llvm::BasicBlock("lor_cont"); + llvm::BasicBlock *RHSBlock = new llvm::BasicBlock("lor_rhs"); + + llvm::BasicBlock *OrigBlock = Builder.GetInsertBlock(); + Builder.CreateCondBr(LHSCond, ContBlock, RHSBlock); + + CGF.EmitBlock(RHSBlock); + Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS()); + + // Reaquire the RHS block, as there may be subblocks inserted. + RHSBlock = Builder.GetInsertBlock(); + CGF.EmitBlock(ContBlock); + + // Create a PHI node. If we just evaluted the LHS condition, the result is + // true. If we evaluated both, the result is the RHS condition. + llvm::PHINode *PN = Builder.CreatePHI(llvm::Type::Int1Ty, "lor"); + PN->reserveOperandSpace(2); + PN->addIncoming(llvm::ConstantInt::getTrue(), OrigBlock); + PN->addIncoming(RHSCond, RHSBlock); + + // ZExt result to int. + return Builder.CreateZExt(PN, CGF.LLVMIntTy, "lor.ext"); +} + +Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) { + CGF.EmitStmt(E->getLHS()); + return Visit(E->getRHS()); +} + +//===----------------------------------------------------------------------===// +// Other Operators +//===----------------------------------------------------------------------===// + +Value *ScalarExprEmitter:: +VisitConditionalOperator(const ConditionalOperator *E) { + llvm::BasicBlock *LHSBlock = new llvm::BasicBlock("cond.?"); + llvm::BasicBlock *RHSBlock = new llvm::BasicBlock("cond.:"); + llvm::BasicBlock *ContBlock = new llvm::BasicBlock("cond.cont"); + + // Evaluate the conditional, then convert it to bool. We do this explicitly + // because we need the unconverted value if this is a GNU ?: expression with + // missing middle value. + Value *CondVal = CGF.EmitScalarExpr(E->getCond()); + Value *CondBoolVal =CGF.EmitScalarConversion(CondVal, E->getCond()->getType(), + CGF.getContext().BoolTy); + Builder.CreateCondBr(CondBoolVal, LHSBlock, RHSBlock); + + CGF.EmitBlock(LHSBlock); + + // Handle the GNU extension for missing LHS. + Value *LHS; + if (E->getLHS()) + LHS = Visit(E->getLHS()); + else // Perform promotions, to handle cases like "short ?: int" + LHS = EmitScalarConversion(CondVal, E->getCond()->getType(), E->getType()); + + Builder.CreateBr(ContBlock); + LHSBlock = Builder.GetInsertBlock(); + + CGF.EmitBlock(RHSBlock); + + Value *RHS = Visit(E->getRHS()); + Builder.CreateBr(ContBlock); + RHSBlock = Builder.GetInsertBlock(); + + CGF.EmitBlock(ContBlock); + + if (!LHS) { + assert(E->getType()->isVoidType() && "Non-void value should have a value"); + return 0; + } + + // Create a PHI node for the real part. + llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), "cond"); + PN->reserveOperandSpace(2); + PN->addIncoming(LHS, LHSBlock); + PN->addIncoming(RHS, RHSBlock); + return PN; +} + +Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) { + // Emit the LHS or RHS as appropriate. + return + Visit(E->isConditionTrue(CGF.getContext()) ? E->getLHS() : E->getRHS()); +} + +Value *ScalarExprEmitter::VisitOverloadExpr(OverloadExpr *E) { + return CGF.EmitCallExpr(E->getFn(), E->arg_begin(), + E->getNumArgs(CGF.getContext())).getScalarVal(); +} + +Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) { + llvm::Value *ArgValue = EmitLValue(VE->getSubExpr()).getAddress(); + + llvm::Value *V = Builder.CreateVAArg(ArgValue, ConvertType(VE->getType())); + return V; +} + +Value *ScalarExprEmitter::VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { + std::string str; + llvm::SmallVector<const RecordType *, 8> EncodingRecordTypes; + CGF.getContext().getObjCEncodingForType(E->getEncodedType(), str, + EncodingRecordTypes); + + llvm::Constant *C = llvm::ConstantArray::get(str); + C = new llvm::GlobalVariable(C->getType(), true, + llvm::GlobalValue::InternalLinkage, + C, ".str", &CGF.CGM.getModule()); + llvm::Constant *Zero = llvm::Constant::getNullValue(llvm::Type::Int32Ty); + llvm::Constant *Zeros[] = { Zero, Zero }; + C = llvm::ConstantExpr::getGetElementPtr(C, Zeros, 2); + + return C; +} + +//===----------------------------------------------------------------------===// +// Entry Point into this File +//===----------------------------------------------------------------------===// + +/// EmitComplexExpr - Emit the computation of the specified expression of +/// complex type, ignoring the result. +Value *CodeGenFunction::EmitScalarExpr(const Expr *E) { + assert(E && !hasAggregateLLVMType(E->getType()) && + "Invalid scalar expression to emit"); + + return ScalarExprEmitter(*this).Visit(const_cast<Expr*>(E)); +} + +/// EmitScalarConversion - Emit a conversion from the specified type to the +/// specified destination type, both of which are LLVM scalar types. +Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy, + QualType DstTy) { + assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) && + "Invalid scalar expression to emit"); + return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy); +} + +/// EmitComplexToScalarConversion - Emit a conversion from the specified +/// complex type to the specified destination type, where the destination +/// type is an LLVM scalar type. +Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src, + QualType SrcTy, + QualType DstTy) { + assert(SrcTy->isComplexType() && !hasAggregateLLVMType(DstTy) && + "Invalid complex -> scalar conversion"); + return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy, + DstTy); +} + +Value *CodeGenFunction::EmitShuffleVector(Value* V1, Value *V2, ...) { + assert(V1->getType() == V2->getType() && + "Vector operands must be of the same type"); + + unsigned NumElements = + cast<llvm::VectorType>(V1->getType())->getNumElements(); + + va_list va; + va_start(va, V2); + + llvm::SmallVector<llvm::Constant*, 16> Args; + + for (unsigned i = 0; i < NumElements; i++) { + int n = va_arg(va, int); + + assert(n >= 0 && n < (int)NumElements * 2 && + "Vector shuffle index out of bounds!"); + + Args.push_back(llvm::ConstantInt::get(llvm::Type::Int32Ty, n)); + } + + const char *Name = va_arg(va, const char *); + va_end(va); + + llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); + + return Builder.CreateShuffleVector(V1, V2, Mask, Name); +} + +llvm::Value *CodeGenFunction::EmitVector(llvm::Value * const *Vals, + unsigned NumVals, bool isSplat) +{ + llvm::Value *Vec + = llvm::UndefValue::get(llvm::VectorType::get(Vals[0]->getType(), NumVals)); + + for (unsigned i = 0, e = NumVals ; i != e; ++i) { + llvm::Value *Val = isSplat ? Vals[0] : Vals[i]; + llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::Int32Ty, i); + Vec = Builder.CreateInsertElement(Vec, Val, Idx, "tmp"); + } + + return Vec; +} diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp new file mode 100644 index 00000000000..33419a3d51d --- /dev/null +++ b/clang/lib/CodeGen/CGObjC.cpp @@ -0,0 +1,25 @@ +//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Objective-C code as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/AST/Expr.h" +#include "llvm/Constant.h" +using namespace clang; +using namespace CodeGen; + +llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E){ + std::string S(E->getString()->getStrData(), E->getString()->getByteLength()); + return CGM.GetAddrOfConstantCFString(S); +} + diff --git a/clang/lib/CodeGen/CGObjCGNU.cpp b/clang/lib/CodeGen/CGObjCGNU.cpp new file mode 100644 index 00000000000..f0d6f554d55 --- /dev/null +++ b/clang/lib/CodeGen/CGObjCGNU.cpp @@ -0,0 +1,97 @@ +//===------- CGObjCGNU.cpp - Emit LLVM Code from ASTs for a Module --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This provides Objective-C code generation targetting the GNU runtime. +// +//===----------------------------------------------------------------------===// + +#include "CGObjCRuntime.h" +#include "llvm/Module.h" +#include "llvm/Support/Compiler.h" +#include "llvm/Support/LLVMBuilder.h" +#include "llvm/ADT/SmallVector.h" + +using namespace clang::CodeGen; +using namespace clang; + +CGObjCRuntime::~CGObjCRuntime() {} + +namespace { +class CGObjCGNU : public CGObjCRuntime { +private: + llvm::Module &TheModule; +public: + CGObjCGNU(llvm::Module &M) : TheModule(M) {}; + virtual llvm::Value *generateMessageSend(llvm::LLVMFoldingBuilder &Builder, + const llvm::Type *ReturnTy, + llvm::Value *Receiver, + llvm::Constant *Selector, + llvm::Value** ArgV, + unsigned ArgC); +}; +} // end anonymous namespace + +// Generate code for a message send expression on the GNU runtime. +// BIG FAT WARNING: Much of this code will need factoring out later. +// FIXME: This currently only handles id returns. Other return types +// need some explicit casting. +llvm::Value *CGObjCGNU::generateMessageSend(llvm::LLVMFoldingBuilder &Builder, + const llvm::Type *ReturnTy, + llvm::Value *Receiver, + llvm::Constant *Selector, + llvm::Value** ArgV, + unsigned ArgC) { + // Get the selector Type. + const llvm::Type *PtrToInt8Ty = + llvm::PointerType::getUnqual(llvm::Type::Int8Ty); + std::vector<const llvm::Type*> Str2(2, PtrToInt8Ty); + const llvm::Type *SelStructTy = llvm::StructType::get(Str2); + const llvm::Type *SelTy = llvm::PointerType::getUnqual(SelStructTy); + + // Look up the selector. + // If we haven't got the selector lookup function, look it up now. + // TODO: Factor this out and use it to implement @selector() too. + llvm::Constant *SelFunction = + TheModule.getOrInsertFunction("sel_get_uid", SelTy, PtrToInt8Ty, NULL); + // FIXME: Selectors should be statically cached, not looked up on every call. + + // TODO: Pull this out into the caller. + llvm::Constant *Idx0 = llvm::ConstantInt::get(llvm::Type::Int32Ty, 0); + llvm::Constant *Ops[] = {Idx0, Idx0}; + llvm::Value *SelStr = llvm::ConstantExpr::getGetElementPtr(Selector, Ops, 2); + llvm::Value *cmd = Builder.CreateCall(SelFunction, &SelStr, &SelStr+1); + + // Look up the method implementation. + std::vector<const llvm::Type*> impArgTypes; + impArgTypes.push_back(Receiver->getType()); + impArgTypes.push_back(SelTy); + + // Avoid an explicit cast on the IMP by getting a version that has the right + // return type. + llvm::FunctionType *impType = llvm::FunctionType::get(ReturnTy, impArgTypes, + true); + + llvm::Constant *lookupFunction = + TheModule.getOrInsertFunction("objc_msg_lookup", + llvm::PointerType::get(impType, 0), + Receiver->getType(), SelTy, NULL); + llvm::SmallVector<llvm::Value*, 16> lookupArgs; + lookupArgs.push_back(Receiver); + lookupArgs.push_back(cmd); + llvm::Value *imp = Builder.CreateCall(lookupFunction, + lookupArgs.begin(), lookupArgs.end()); + + // Call the method. + lookupArgs.insert(lookupArgs.end(), ArgV, ArgV+ArgC); + return Builder.CreateCall(imp, lookupArgs.begin(), lookupArgs.end()); +} + +CGObjCRuntime * clang::CodeGen::CreateObjCRuntime(llvm::Module &M) { + return new CGObjCGNU(M); +} diff --git a/clang/lib/CodeGen/CGObjCRuntime.h b/clang/lib/CodeGen/CGObjCRuntime.h new file mode 100644 index 00000000000..d8fc60f99c0 --- /dev/null +++ b/clang/lib/CodeGen/CGObjCRuntime.h @@ -0,0 +1,47 @@ +//===----- CGObjCRuntime.h - Emit LLVM Code from ASTs for a Module --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This provides an abstract class for Objective-C code generation. Concrete +// subclasses of this implement code generation for specific Objective-C +// runtime libraries. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_OBCJRUNTIME_H +#define CLANG_CODEGEN_OBCJRUNTIME_H + +namespace llvm { + class LLVMFoldingBuilder; + class Constant; + class Type; + class Value; + class Module; +} + +namespace clang { +namespace CodeGen { + +// Implements runtime-specific code generation functions +class CGObjCRuntime { +public: + virtual ~CGObjCRuntime(); + + // Generate an Objective-C message send operation + virtual llvm::Value *generateMessageSend(llvm::LLVMFoldingBuilder &Builder, + const llvm::Type *ReturnTy, + llvm::Value *Receiver, + llvm::Constant *Selector, + llvm::Value** ArgV, + unsigned ArgC) = 0; +}; + +CGObjCRuntime *CreateObjCRuntime(llvm::Module &M); +} +} +#endif diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp new file mode 100644 index 00000000000..5fdc61b46f8 --- /dev/null +++ b/clang/lib/CodeGen/CGStmt.cpp @@ -0,0 +1,776 @@ +//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This contains code to emit Stmt nodes as LLVM code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "clang/AST/AST.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/InlineAsm.h" +#include "llvm/ADT/StringExtras.h" +using namespace clang; +using namespace CodeGen; + +//===----------------------------------------------------------------------===// +// Statement Emission +//===----------------------------------------------------------------------===// + +void CodeGenFunction::EmitStmt(const Stmt *S) { + assert(S && "Null statement?"); + + switch (S->getStmtClass()) { + default: + // Must be an expression in a stmt context. Emit the value (to get + // side-effects) and ignore the result. + if (const Expr *E = dyn_cast<Expr>(S)) { + if (!hasAggregateLLVMType(E->getType())) + EmitScalarExpr(E); + else if (E->getType()->isComplexType()) + EmitComplexExpr(E); + else + EmitAggExpr(E, 0, false); + } else { + WarnUnsupported(S, "statement"); + } + break; + case Stmt::NullStmtClass: break; + case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break; + case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break; + case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break; + + case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break; + case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break; + case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break; + case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break; + + case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break; + case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break; + + case Stmt::BreakStmtClass: EmitBreakStmt(); break; + case Stmt::ContinueStmtClass: EmitContinueStmt(); break; + case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break; + case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break; + case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break; + case Stmt::AsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break; + } +} + +/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true, +/// this captures the expression result of the last sub-statement and returns it +/// (for use by the statement expression extension). +RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast, + llvm::Value *AggLoc, bool isAggVol) { + // FIXME: handle vla's etc. + if (S.body_empty() || !isa<Expr>(S.body_back())) GetLast = false; + + for (CompoundStmt::const_body_iterator I = S.body_begin(), + E = S.body_end()-GetLast; I != E; ++I) + EmitStmt(*I); + + + if (!GetLast) + return RValue::get(0); + + return EmitAnyExpr(cast<Expr>(S.body_back()), AggLoc); +} + +void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB) { + // Emit a branch from this block to the next one if this was a real block. If + // this was just a fall-through block after a terminator, don't emit it. + llvm::BasicBlock *LastBB = Builder.GetInsertBlock(); + + if (LastBB->getTerminator()) { + // If the previous block is already terminated, don't touch it. + } else if (LastBB->empty() && LastBB->getValueName() == 0) { + // If the last block was an empty placeholder, remove it now. + // TODO: cache and reuse these. + Builder.GetInsertBlock()->eraseFromParent(); + } else { + // Otherwise, create a fall-through branch. + Builder.CreateBr(BB); + } + CurFn->getBasicBlockList().push_back(BB); + Builder.SetInsertPoint(BB); +} + +void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) { + llvm::BasicBlock *NextBB = getBasicBlockForLabel(&S); + + EmitBlock(NextBB); + EmitStmt(S.getSubStmt()); +} + +void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) { + Builder.CreateBr(getBasicBlockForLabel(S.getLabel())); + + // Emit a block after the branch so that dead code after a goto has some place + // to go. + Builder.SetInsertPoint(new llvm::BasicBlock("", CurFn)); +} + +void CodeGenFunction::EmitIfStmt(const IfStmt &S) { + // C99 6.8.4.1: The first substatement is executed if the expression compares + // unequal to 0. The condition must be a scalar type. + llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); + + llvm::BasicBlock *ContBlock = new llvm::BasicBlock("ifend"); + llvm::BasicBlock *ThenBlock = new llvm::BasicBlock("ifthen"); + llvm::BasicBlock *ElseBlock = ContBlock; + + if (S.getElse()) + ElseBlock = new llvm::BasicBlock("ifelse"); + + // Insert the conditional branch. + Builder.CreateCondBr(BoolCondVal, ThenBlock, ElseBlock); + + // Emit the 'then' code. + EmitBlock(ThenBlock); + EmitStmt(S.getThen()); + llvm::BasicBlock *BB = Builder.GetInsertBlock(); + if (isDummyBlock(BB)) { + BB->eraseFromParent(); + Builder.SetInsertPoint(ThenBlock); + } + else + Builder.CreateBr(ContBlock); + + // Emit the 'else' code if present. + if (const Stmt *Else = S.getElse()) { + EmitBlock(ElseBlock); + EmitStmt(Else); + llvm::BasicBlock *BB = Builder.GetInsertBlock(); + if (isDummyBlock(BB)) { + BB->eraseFromParent(); + Builder.SetInsertPoint(ElseBlock); + } + else + Builder.CreateBr(ContBlock); + } + + // Emit the continuation block for code after the if. + EmitBlock(ContBlock); +} + +void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) { + // Emit the header for the loop, insert it, which will create an uncond br to + // it. + llvm::BasicBlock *LoopHeader = new llvm::BasicBlock("whilecond"); + EmitBlock(LoopHeader); + + // Evaluate the conditional in the while header. C99 6.8.5.1: The evaluation + // of the controlling expression takes place before each execution of the loop + // body. + llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); + + // while(1) is common, avoid extra exit blocks. Be sure + // to correctly handle break/continue though. + bool EmitBoolCondBranch = true; + if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) + if (C->isOne()) + EmitBoolCondBranch = false; + + // Create an exit block for when the condition fails, create a block for the + // body of the loop. + llvm::BasicBlock *ExitBlock = new llvm::BasicBlock("whileexit"); + llvm::BasicBlock *LoopBody = new llvm::BasicBlock("whilebody"); + + // As long as the condition is true, go to the loop body. + if (EmitBoolCondBranch) + Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock); + + // Store the blocks to use for break and continue. + BreakContinueStack.push_back(BreakContinue(ExitBlock, LoopHeader)); + + // Emit the loop body. + EmitBlock(LoopBody); + EmitStmt(S.getBody()); + + BreakContinueStack.pop_back(); + + // Cycle to the condition. + Builder.CreateBr(LoopHeader); + + // Emit the exit block. + EmitBlock(ExitBlock); + + // If LoopHeader is a simple forwarding block then eliminate it. + if (!EmitBoolCondBranch + && &LoopHeader->front() == LoopHeader->getTerminator()) { + LoopHeader->replaceAllUsesWith(LoopBody); + LoopHeader->getTerminator()->eraseFromParent(); + LoopHeader->eraseFromParent(); + } +} + +void CodeGenFunction::EmitDoStmt(const DoStmt &S) { + // Emit the body for the loop, insert it, which will create an uncond br to + // it. + llvm::BasicBlock *LoopBody = new llvm::BasicBlock("dobody"); + llvm::BasicBlock *AfterDo = new llvm::BasicBlock("afterdo"); + EmitBlock(LoopBody); + + llvm::BasicBlock *DoCond = new llvm::BasicBlock("docond"); + + // Store the blocks to use for break and continue. + BreakContinueStack.push_back(BreakContinue(AfterDo, DoCond)); + + // Emit the body of the loop into the block. + EmitStmt(S.getBody()); + + BreakContinueStack.pop_back(); + + EmitBlock(DoCond); + + // C99 6.8.5.2: "The evaluation of the controlling expression takes place + // after each execution of the loop body." + + // Evaluate the conditional in the while header. + // C99 6.8.5p2/p4: The first substatement is executed if the expression + // compares unequal to 0. The condition must be a scalar type. + llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); + + // "do {} while (0)" is common in macros, avoid extra blocks. Be sure + // to correctly handle break/continue though. + bool EmitBoolCondBranch = true; + if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal)) + if (C->isZero()) + EmitBoolCondBranch = false; + + // As long as the condition is true, iterate the loop. + if (EmitBoolCondBranch) + Builder.CreateCondBr(BoolCondVal, LoopBody, AfterDo); + + // Emit the exit block. + EmitBlock(AfterDo); + + // If DoCond is a simple forwarding block then eliminate it. + if (!EmitBoolCondBranch && &DoCond->front() == DoCond->getTerminator()) { + DoCond->replaceAllUsesWith(AfterDo); + DoCond->getTerminator()->eraseFromParent(); + DoCond->eraseFromParent(); + } +} + +void CodeGenFunction::EmitForStmt(const ForStmt &S) { + // FIXME: What do we do if the increment (f.e.) contains a stmt expression, + // which contains a continue/break? + // TODO: We could keep track of whether the loop body contains any + // break/continue statements and not create unnecessary blocks (like + // "afterfor" for a condless loop) if it doesn't. + + // Evaluate the first part before the loop. + if (S.getInit()) + EmitStmt(S.getInit()); + + // Start the loop with a block that tests the condition. + llvm::BasicBlock *CondBlock = new llvm::BasicBlock("forcond"); + llvm::BasicBlock *AfterFor = new llvm::BasicBlock("afterfor"); + + EmitBlock(CondBlock); + + // Evaluate the condition if present. If not, treat it as a non-zero-constant + // according to 6.8.5.3p2, aka, true. + if (S.getCond()) { + // C99 6.8.5p2/p4: The first substatement is executed if the expression + // compares unequal to 0. The condition must be a scalar type. + llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond()); + + // As long as the condition is true, iterate the loop. + llvm::BasicBlock *ForBody = new llvm::BasicBlock("forbody"); + Builder.CreateCondBr(BoolCondVal, ForBody, AfterFor); + EmitBlock(ForBody); + } else { + // Treat it as a non-zero constant. Don't even create a new block for the + // body, just fall into it. + } + + // If the for loop doesn't have an increment we can just use the + // condition as the continue block. + llvm::BasicBlock *ContinueBlock; + if (S.getInc()) + ContinueBlock = new llvm::BasicBlock("forinc"); + else + ContinueBlock = CondBlock; + + // Store the blocks to use for break and continue. + BreakContinueStack.push_back(BreakContinue(AfterFor, ContinueBlock)); + + // If the condition is true, execute the body of the for stmt. + EmitStmt(S.getBody()); + + BreakContinueStack.pop_back(); + + if (S.getInc()) + EmitBlock(ContinueBlock); + + // If there is an increment, emit it next. + if (S.getInc()) + EmitStmt(S.getInc()); + + // Finally, branch back up to the condition for the next iteration. + Builder.CreateBr(CondBlock); + + // Emit the fall-through block. + EmitBlock(AfterFor); +} + +/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand +/// if the function returns void, or may be missing one if the function returns +/// non-void. Fun stuff :). +void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) { + // Emit the result value, even if unused, to evalute the side effects. + const Expr *RV = S.getRetValue(); + + QualType FnRetTy = CurFuncDecl->getType().getCanonicalType(); + FnRetTy = cast<FunctionType>(FnRetTy)->getResultType(); + + if (FnRetTy->isVoidType()) { + // If the function returns void, emit ret void. + Builder.CreateRetVoid(); + } else if (RV == 0) { + // Handle "return;" in a function that returns a value. + const llvm::Type *RetTy = CurFn->getFunctionType()->getReturnType(); + if (RetTy == llvm::Type::VoidTy) + Builder.CreateRetVoid(); // struct return etc. + else + Builder.CreateRet(llvm::UndefValue::get(RetTy)); + } else if (!hasAggregateLLVMType(RV->getType())) { + Builder.CreateRet(EmitScalarExpr(RV)); + } else if (RV->getType()->isComplexType()) { + llvm::Value *SRetPtr = CurFn->arg_begin(); + EmitComplexExprIntoAddr(RV, SRetPtr, false); + } else { + llvm::Value *SRetPtr = CurFn->arg_begin(); + EmitAggExpr(RV, SRetPtr, false); + } + + // Emit a block after the branch so that dead code after a return has some + // place to go. + EmitBlock(new llvm::BasicBlock()); +} + +void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) { + for (const ScopedDecl *Decl = S.getDecl(); Decl; + Decl = Decl->getNextDeclarator()) + EmitDecl(*Decl); +} + +void CodeGenFunction::EmitBreakStmt() { + assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!"); + + llvm::BasicBlock *Block = BreakContinueStack.back().BreakBlock; + Builder.CreateBr(Block); + EmitBlock(new llvm::BasicBlock()); +} + +void CodeGenFunction::EmitContinueStmt() { + assert(!BreakContinueStack.empty() && "continue stmt not in a loop!"); + + llvm::BasicBlock *Block = BreakContinueStack.back().ContinueBlock; + Builder.CreateBr(Block); + EmitBlock(new llvm::BasicBlock()); +} + +/// EmitCaseStmtRange - If case statement range is not too big then +/// add multiple cases to switch instruction, one for each value within +/// the range. If range is too big then emit "if" condition check. +void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) { + assert (S.getRHS() && "Unexpected RHS value in CaseStmt"); + + const Expr *L = S.getLHS(); + const Expr *R = S.getRHS(); + llvm::ConstantInt *LV = cast<llvm::ConstantInt>(EmitScalarExpr(L)); + llvm::ConstantInt *RV = cast<llvm::ConstantInt>(EmitScalarExpr(R)); + llvm::APInt LHS = LV->getValue(); + const llvm::APInt &RHS = RV->getValue(); + + llvm::APInt Range = RHS - LHS; + if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) { + // Range is small enough to add multiple switch instruction cases. + StartBlock("sw.bb"); + llvm::BasicBlock *CaseDest = Builder.GetInsertBlock(); + SwitchInsn->addCase(LV, CaseDest); + LHS++; + while (LHS != RHS) { + SwitchInsn->addCase(llvm::ConstantInt::get(LHS), CaseDest); + LHS++; + } + SwitchInsn->addCase(RV, CaseDest); + EmitStmt(S.getSubStmt()); + return; + } + + // The range is too big. Emit "if" condition. + llvm::BasicBlock *FalseDest = NULL; + llvm::BasicBlock *CaseDest = new llvm::BasicBlock("sw.bb"); + + // If we have already seen one case statement range for this switch + // instruction then piggy-back otherwise use default block as false + // destination. + if (CaseRangeBlock) + FalseDest = CaseRangeBlock; + else + FalseDest = SwitchInsn->getDefaultDest(); + + // Start new block to hold case statement range check instructions. + StartBlock("case.range"); + CaseRangeBlock = Builder.GetInsertBlock(); + + // Emit range check. + llvm::Value *Diff = + Builder.CreateSub(SwitchInsn->getCondition(), LV, "tmp"); + llvm::Value *Cond = + Builder.CreateICmpULE(Diff, llvm::ConstantInt::get(Range), "tmp"); + Builder.CreateCondBr(Cond, CaseDest, FalseDest); + + // Now emit case statement body. + EmitBlock(CaseDest); + EmitStmt(S.getSubStmt()); +} + +void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) { + if (S.getRHS()) { + EmitCaseStmtRange(S); + return; + } + + StartBlock("sw.bb"); + llvm::BasicBlock *CaseDest = Builder.GetInsertBlock(); + llvm::APSInt CaseVal(32); + S.getLHS()->isIntegerConstantExpr(CaseVal, getContext()); + llvm::ConstantInt *LV = llvm::ConstantInt::get(CaseVal); + SwitchInsn->addCase(LV, CaseDest); + EmitStmt(S.getSubStmt()); +} + +void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) { + StartBlock("sw.default"); + // Current insert block is the default destination. + SwitchInsn->setSuccessor(0, Builder.GetInsertBlock()); + EmitStmt(S.getSubStmt()); +} + +void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) { + llvm::Value *CondV = EmitScalarExpr(S.getCond()); + + // Handle nested switch statements. + llvm::SwitchInst *SavedSwitchInsn = SwitchInsn; + llvm::BasicBlock *SavedCRBlock = CaseRangeBlock; + CaseRangeBlock = NULL; + + // Create basic block to hold stuff that comes after switch statement. + // Initially use it to hold DefaultStmt. + llvm::BasicBlock *NextBlock = new llvm::BasicBlock("after.sw"); + SwitchInsn = Builder.CreateSwitch(CondV, NextBlock); + + // All break statements jump to NextBlock. If BreakContinueStack is non empty + // then reuse last ContinueBlock. + llvm::BasicBlock *ContinueBlock = NULL; + if (!BreakContinueStack.empty()) + ContinueBlock = BreakContinueStack.back().ContinueBlock; + BreakContinueStack.push_back(BreakContinue(NextBlock, ContinueBlock)); + + // Emit switch body. + EmitStmt(S.getBody()); + BreakContinueStack.pop_back(); + + // If one or more case statement range is seen then use CaseRangeBlock + // as the default block. False edge of CaseRangeBlock will lead to + // original default block. + if (CaseRangeBlock) + SwitchInsn->setSuccessor(0, CaseRangeBlock); + + // Prune insert block if it is dummy. + llvm::BasicBlock *BB = Builder.GetInsertBlock(); + if (isDummyBlock(BB)) + BB->eraseFromParent(); + else // Otherwise, branch to continuation. + Builder.CreateBr(NextBlock); + + // Place NextBlock as the new insert point. + CurFn->getBasicBlockList().push_back(NextBlock); + Builder.SetInsertPoint(NextBlock); + SwitchInsn = SavedSwitchInsn; + CaseRangeBlock = SavedCRBlock; +} + +static inline std::string ConvertAsmString(const char *Start, + unsigned NumOperands, + bool IsSimple) +{ + static unsigned AsmCounter = 0; + + AsmCounter++; + + std::string Result; + if (IsSimple) { + while (*Start) { + switch (*Start) { + default: + Result += *Start; + break; + case '$': + Result += "$$"; + break; + } + + Start++; + } + + return Result; + } + + while (*Start) { + switch (*Start) { + default: + Result += *Start; + break; + case '$': + Result += "$$"; + break; + case '%': + // Escaped character + Start++; + if (!*Start) { + // FIXME: This should be caught during Sema. + assert(0 && "Trailing '%' in asm string."); + } + + char EscapedChar = *Start; + if (EscapedChar == '%') { + // Escaped percentage sign. + Result += '%'; + } + else if (EscapedChar == '=') { + // Generate an unique ID. + Result += llvm::utostr(AsmCounter); + } else if (isdigit(EscapedChar)) { + // %n - Assembler operand n + char *End; + + unsigned long n = strtoul(Start, &End, 10); + if (Start == End) { + // FIXME: This should be caught during Sema. + assert(0 && "Missing operand!"); + } else if (n >= NumOperands) { + // FIXME: This should be caught during Sema. + assert(0 && "Operand number out of range!"); + } + + Result += '$' + llvm::utostr(n); + Start = End - 1; + } else if (isalpha(EscapedChar)) { + char *End; + + unsigned long n = strtoul(Start + 1, &End, 10); + if (Start == End) { + // FIXME: This should be caught during Sema. + assert(0 && "Missing operand!"); + } else if (n >= NumOperands) { + // FIXME: This should be caught during Sema. + assert(0 && "Operand number out of range!"); + } + + Result += "${" + llvm::utostr(n) + ':' + EscapedChar + '}'; + Start = End - 1; + } else { + assert(0 && "Unhandled asm escaped character!"); + } + } + Start++; + } + + return Result; +} + +static std::string SimplifyConstraint(const char* Constraint, + TargetInfo &Target) { + std::string Result; + + while (*Constraint) { + switch (*Constraint) { + default: + Result += Target.convertConstraint(*Constraint); + break; + // Ignore these + case '*': + case '?': + case '!': + break; + case 'g': + Result += "imr"; + break; + } + + Constraint++; + } + + return Result; +} + +void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { + std::string AsmString = + ConvertAsmString(std::string(S.getAsmString()->getStrData(), + S.getAsmString()->getByteLength()).c_str(), + S.getNumOutputs() + S.getNumInputs(), S.isSimple()); + + std::string Constraints; + + llvm::Value *ResultAddr = 0; + const llvm::Type *ResultType = llvm::Type::VoidTy; + + std::vector<const llvm::Type*> ArgTypes; + std::vector<llvm::Value*> Args; + + // Keep track of inout constraints. + std::string InOutConstraints; + std::vector<llvm::Value*> InOutArgs; + std::vector<const llvm::Type*> InOutArgTypes; + + for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) { + std::string OutputConstraint(S.getOutputConstraint(i)->getStrData(), + S.getOutputConstraint(i)->getByteLength()); + + TargetInfo::ConstraintInfo Info; + bool result = Target.validateOutputConstraint(OutputConstraint.c_str(), + Info); + assert(result && "Failed to parse output constraint"); + + // Simplify the output constraint. + OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, Target); + + LValue Dest = EmitLValue(S.getOutputExpr(i)); + const llvm::Type *DestValueType = + cast<llvm::PointerType>(Dest.getAddress()->getType())->getElementType(); + + // If the first output operand is not a memory dest, we'll + // make it the return value. + if (i == 0 && !(Info & TargetInfo::CI_AllowsMemory) && + DestValueType->isFirstClassType()) { + ResultAddr = Dest.getAddress(); + ResultType = DestValueType; + Constraints += "=" + OutputConstraint; + } else { + ArgTypes.push_back(Dest.getAddress()->getType()); + Args.push_back(Dest.getAddress()); + if (i != 0) + Constraints += ','; + Constraints += "=*"; + Constraints += OutputConstraint; + } + + if (Info & TargetInfo::CI_ReadWrite) { + // FIXME: This code should be shared with the code that handles inputs. + InOutConstraints += ','; + + const Expr *InputExpr = S.getOutputExpr(i); + llvm::Value *Arg; + if ((Info & TargetInfo::CI_AllowsRegister) || + !(Info & TargetInfo::CI_AllowsMemory)) { + if (ConvertType(InputExpr->getType())->isFirstClassType()) { + Arg = EmitScalarExpr(InputExpr); + } else { + assert(0 && "FIXME: Implement passing non first class types as inputs"); + } + } else { + LValue Dest = EmitLValue(InputExpr); + Arg = Dest.getAddress(); + InOutConstraints += '*'; + } + + InOutArgTypes.push_back(Arg->getType()); + InOutArgs.push_back(Arg); + InOutConstraints += OutputConstraint; + } + } + + unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs(); + + for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) { + const Expr *InputExpr = S.getInputExpr(i); + + std::string InputConstraint(S.getInputConstraint(i)->getStrData(), + S.getInputConstraint(i)->getByteLength()); + + TargetInfo::ConstraintInfo Info; + bool result = Target.validateInputConstraint(InputConstraint.c_str(), + NumConstraints, + Info); + assert(result && "Failed to parse input constraint"); + + if (i != 0 || S.getNumOutputs() > 0) + Constraints += ','; + + // Simplify the input constraint. + InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target); + + llvm::Value *Arg; + + if ((Info & TargetInfo::CI_AllowsRegister) || + !(Info & TargetInfo::CI_AllowsMemory)) { + if (ConvertType(InputExpr->getType())->isFirstClassType()) { + Arg = EmitScalarExpr(InputExpr); + } else { + assert(0 && "FIXME: Implement passing non first class types as inputs"); + } + } else { + LValue Dest = EmitLValue(InputExpr); + Arg = Dest.getAddress(); + Constraints += '*'; + } + + ArgTypes.push_back(Arg->getType()); + Args.push_back(Arg); + Constraints += InputConstraint; + } + + // Append the "input" part of inout constraints last. + for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) { + ArgTypes.push_back(InOutArgTypes[i]); + Args.push_back(InOutArgs[i]); + } + Constraints += InOutConstraints; + + // Clobbers + for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) { + std::string Clobber(S.getClobber(i)->getStrData(), + S.getClobber(i)->getByteLength()); + + Clobber = Target.getNormalizedGCCRegisterName(Clobber.c_str()); + + if (i != 0 || NumConstraints != 0) + Constraints += ','; + + Constraints += "~{"; + Constraints += Clobber; + Constraints += '}'; + } + + // Add machine specific clobbers + if (const char *C = Target.getClobbers()) { + if (!Constraints.empty()) + Constraints += ','; + Constraints += C; + } + + const llvm::FunctionType *FTy = + llvm::FunctionType::get(ResultType, ArgTypes, false); + + llvm::InlineAsm *IA = + llvm::InlineAsm::get(FTy, AsmString, Constraints, + S.isVolatile() || S.getNumOutputs() == 0); + llvm::Value *Result = Builder.CreateCall(IA, Args.begin(), Args.end(), ""); + if (ResultAddr) + Builder.CreateStore(Result, ResultAddr); +} diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp new file mode 100644 index 00000000000..49f23f27c34 --- /dev/null +++ b/clang/lib/CodeGen/CodeGenFunction.cpp @@ -0,0 +1,182 @@ +//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This coordinates the per-function state used while generating code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenFunction.h" +#include "CodeGenModule.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/AST/AST.h" +#include "llvm/CallingConv.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Function.h" +#include "llvm/Analysis/Verifier.h" +#include "llvm/Support/CFG.h" +using namespace clang; +using namespace CodeGen; + +CodeGenFunction::CodeGenFunction(CodeGenModule &cgm) + : CGM(cgm), Target(CGM.getContext().Target), SwitchInsn(NULL), + CaseRangeBlock(NULL) {} + +ASTContext &CodeGenFunction::getContext() const { + return CGM.getContext(); +} + + +llvm::BasicBlock *CodeGenFunction::getBasicBlockForLabel(const LabelStmt *S) { + llvm::BasicBlock *&BB = LabelMap[S]; + if (BB) return BB; + + // Create, but don't insert, the new block. + return BB = new llvm::BasicBlock(S->getName()); +} + +llvm::Constant * +CodeGenFunction::GetAddrOfStaticLocalVar(const BlockVarDecl *BVD) { + return cast<llvm::Constant>(LocalDeclMap[BVD]); +} + +const llvm::Type *CodeGenFunction::ConvertType(QualType T) { + return CGM.getTypes().ConvertType(T); +} + +bool CodeGenFunction::hasAggregateLLVMType(QualType T) { + return !T->isRealType() && !T->isPointerType() && !T->isReferenceType() && + !T->isVoidType() && !T->isVectorType() && !T->isFunctionType(); +} + + +void CodeGenFunction::GenerateCode(const FunctionDecl *FD) { + LLVMIntTy = ConvertType(getContext().IntTy); + LLVMPointerWidth = static_cast<unsigned>( + getContext().getTypeSize(getContext().getPointerType(getContext().VoidTy))); + + CurFuncDecl = FD; + CurFn = cast<llvm::Function>(CGM.GetAddrOfFunctionDecl(FD, true)); + assert(CurFn->isDeclaration() && "Function already has body?"); + + // TODO: Set up linkage and many other things. Note, this is a simple + // approximation of what we really want. + if (FD->getAttr<DLLImportAttr>()) + CurFn->setLinkage(llvm::Function::DLLImportLinkage); + else if (FD->getAttr<DLLExportAttr>()) + CurFn->setLinkage(llvm::Function::DLLExportLinkage); + else if (FD->getAttr<WeakAttr>() || FD->isInline()) + CurFn->setLinkage(llvm::Function::WeakLinkage); + else if (FD->getStorageClass() == FunctionDecl::Static) + CurFn->setLinkage(llvm::Function::InternalLinkage); + + if (FD->getAttr<FastCallAttr>()) + CurFn->setCallingConv(llvm::CallingConv::Fast); + + if (const VisibilityAttr *attr = FD->getAttr<VisibilityAttr>()) + CurFn->setVisibility(attr->getVisibility()); + // FIXME: else handle -fvisibility + + + unsigned FuncAttrs = 0; + if (FD->getAttr<NoThrowAttr>()) + FuncAttrs |= llvm::ParamAttr::NoUnwind; + if (FD->getAttr<NoReturnAttr>()) + FuncAttrs |= llvm::ParamAttr::NoReturn; + + if (FuncAttrs) { + llvm::ParamAttrsWithIndex PAWI = + llvm::ParamAttrsWithIndex::get(0, FuncAttrs); + CurFn->setParamAttrs(llvm::PAListPtr::get(&PAWI, 1)); + } + + llvm::BasicBlock *EntryBB = new llvm::BasicBlock("entry", CurFn); + + // Create a marker to make it easy to insert allocas into the entryblock + // later. Don't create this with the builder, because we don't want it + // folded. + llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::Int32Ty); + AllocaInsertPt = new llvm::BitCastInst(Undef, llvm::Type::Int32Ty, "allocapt", + EntryBB); + + Builder.SetInsertPoint(EntryBB); + + // Emit allocs for param decls. Give the LLVM Argument nodes names. + llvm::Function::arg_iterator AI = CurFn->arg_begin(); + + // Name the struct return argument. + if (hasAggregateLLVMType(FD->getResultType())) { + AI->setName("agg.result"); + ++AI; + } + + for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i, ++AI) { + assert(AI != CurFn->arg_end() && "Argument mismatch!"); + EmitParmDecl(*FD->getParamDecl(i), AI); + } + + // Emit the function body. + EmitStmt(FD->getBody()); + + // Emit a return for code that falls off the end. If insert point + // is a dummy block with no predecessors then remove the block itself. + llvm::BasicBlock *BB = Builder.GetInsertBlock(); + if (isDummyBlock(BB)) + BB->eraseFromParent(); + else { + // FIXME: if this is C++ main, this should return 0. + if (CurFn->getReturnType() == llvm::Type::VoidTy) + Builder.CreateRetVoid(); + else + Builder.CreateRet(llvm::UndefValue::get(CurFn->getReturnType())); + } + assert(BreakContinueStack.empty() && + "mismatched push/pop in break/continue stack!"); + + // Remove the AllocaInsertPt instruction, which is just a convenience for us. + AllocaInsertPt->eraseFromParent(); + AllocaInsertPt = 0; + + // Verify that the function is well formed. + assert(!verifyFunction(*CurFn)); +} + +/// isDummyBlock - Return true if BB is an empty basic block +/// with no predecessors. +bool CodeGenFunction::isDummyBlock(const llvm::BasicBlock *BB) { + if (BB->empty() && pred_begin(BB) == pred_end(BB)) + return true; + return false; +} + +/// StartBlock - Start new block named N. If insert block is a dummy block +/// then reuse it. +void CodeGenFunction::StartBlock(const char *N) { + llvm::BasicBlock *BB = Builder.GetInsertBlock(); + if (!isDummyBlock(BB)) + EmitBlock(new llvm::BasicBlock(N)); + else + BB->setName(N); +} + +/// getCGRecordLayout - Return record layout info. +const CGRecordLayout *CodeGenFunction::getCGRecordLayout(CodeGenTypes &CGT, + QualType Ty) { + const RecordType *RTy = Ty->getAsRecordType(); + assert (RTy && "Unexpected type. RecordType expected here."); + + return CGT.getCGRecordLayout(RTy->getDecl()); +} + +/// WarnUnsupported - Print out a warning that codegen doesn't support the +/// specified stmt yet. +void CodeGenFunction::WarnUnsupported(const Stmt *S, const char *Type) { + CGM.WarnUnsupported(S, Type); +} + diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h new file mode 100644 index 00000000000..509e8296d20 --- /dev/null +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -0,0 +1,486 @@ +//===--- CodeGenFunction.h - Per-Function state for LLVM CodeGen ----------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is the internal per-function state used for llvm translation. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H +#define CLANG_CODEGEN_CODEGENFUNCTION_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/Support/LLVMBuilder.h" +#include <vector> + +namespace llvm { + class Module; +} + +namespace clang { + class ASTContext; + class Decl; + class FunctionDecl; + class TargetInfo; + class QualType; + class FunctionTypeProto; + + class Stmt; + class CompoundStmt; + class LabelStmt; + class GotoStmt; + class IfStmt; + class WhileStmt; + class DoStmt; + class ForStmt; + class ReturnStmt; + class DeclStmt; + class CaseStmt; + class DefaultStmt; + class SwitchStmt; + class AsmStmt; + + class Expr; + class DeclRefExpr; + class StringLiteral; + class IntegerLiteral; + class FloatingLiteral; + class CharacterLiteral; + class TypesCompatibleExpr; + + class ImplicitCastExpr; + class CastExpr; + class CallExpr; + class UnaryOperator; + class BinaryOperator; + class CompoundAssignOperator; + class ArraySubscriptExpr; + class OCUVectorElementExpr; + class ConditionalOperator; + class ChooseExpr; + class PreDefinedExpr; + class ObjCStringLiteral; + class MemberExpr; + + class BlockVarDecl; + class EnumConstantDecl; + class ParmVarDecl; + class FieldDecl; +namespace CodeGen { + class CodeGenModule; + class CodeGenTypes; + class CGRecordLayout; + +/// RValue - This trivial value class is used to represent the result of an +/// expression that is evaluated. It can be one of three things: either a +/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the +/// address of an aggregate value in memory. +class RValue { + llvm::Value *V1, *V2; + // TODO: Encode this into the low bit of pointer for more efficient + // return-by-value. + enum { Scalar, Complex, Aggregate } Flavor; + + // FIXME: Aggregate rvalues need to retain information about whether they are + // volatile or not. +public: + + bool isScalar() const { return Flavor == Scalar; } + bool isComplex() const { return Flavor == Complex; } + bool isAggregate() const { return Flavor == Aggregate; } + + /// getScalar() - Return the Value* of this scalar value. + llvm::Value *getScalarVal() const { + assert(isScalar() && "Not a scalar!"); + return V1; + } + + /// getComplexVal - Return the real/imag components of this complex value. + /// + std::pair<llvm::Value *, llvm::Value *> getComplexVal() const { + return std::pair<llvm::Value *, llvm::Value *>(V1, V2); + } + + /// getAggregateAddr() - Return the Value* of the address of the aggregate. + llvm::Value *getAggregateAddr() const { + assert(isAggregate() && "Not an aggregate!"); + return V1; + } + + static RValue get(llvm::Value *V) { + RValue ER; + ER.V1 = V; + ER.Flavor = Scalar; + return ER; + } + static RValue getComplex(llvm::Value *V1, llvm::Value *V2) { + RValue ER; + ER.V1 = V1; + ER.V2 = V2; + ER.Flavor = Complex; + return ER; + } + static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) { + RValue ER; + ER.V1 = C.first; + ER.V2 = C.second; + ER.Flavor = Complex; + return ER; + } + static RValue getAggregate(llvm::Value *V) { + RValue ER; + ER.V1 = V; + ER.Flavor = Aggregate; + return ER; + } +}; + + +/// LValue - This represents an lvalue references. Because C/C++ allow +/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a +/// bitrange. +class LValue { + // FIXME: Volatility. Restrict? + // alignment? + + enum { + Simple, // This is a normal l-value, use getAddress(). + VectorElt, // This is a vector element l-value (V[i]), use getVector* + BitField, // This is a bitfield l-value, use getBitfield*. + OCUVectorElt // This is an ocu vector subset, use getOCUVectorComp + } LVType; + + llvm::Value *V; + + union { + llvm::Value *VectorIdx; // Index into a vector subscript: V[i] + unsigned VectorElts; // Encoded OCUVector element subset: V.xyx + struct { + unsigned short StartBit; + unsigned short Size; + bool IsSigned; + } BitfieldData; // BitField start bit and size + }; +public: + bool isSimple() const { return LVType == Simple; } + bool isVectorElt() const { return LVType == VectorElt; } + bool isBitfield() const { return LVType == BitField; } + bool isOCUVectorElt() const { return LVType == OCUVectorElt; } + + // simple lvalue + llvm::Value *getAddress() const { assert(isSimple()); return V; } + // vector elt lvalue + llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; } + llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; } + // ocu vector elements. + llvm::Value *getOCUVectorAddr() const { assert(isOCUVectorElt()); return V; } + unsigned getOCUVectorElts() const { + assert(isOCUVectorElt()); + return VectorElts; + } + // bitfield lvalue + llvm::Value *getBitfieldAddr() const { assert(isBitfield()); return V; } + unsigned short getBitfieldStartBit() const { + assert(isBitfield()); + return BitfieldData.StartBit; + } + unsigned short getBitfieldSize() const { + assert(isBitfield()); + return BitfieldData.Size; + } + bool isBitfieldSigned() const { + assert(isBitfield()); + return BitfieldData.IsSigned; + } + + static LValue MakeAddr(llvm::Value *V) { + LValue R; + R.LVType = Simple; + R.V = V; + return R; + } + + static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx) { + LValue R; + R.LVType = VectorElt; + R.V = Vec; + R.VectorIdx = Idx; + return R; + } + + static LValue MakeOCUVectorElt(llvm::Value *Vec, unsigned Elements) { + LValue R; + R.LVType = OCUVectorElt; + R.V = Vec; + R.VectorElts = Elements; + return R; + } + + static LValue MakeBitfield(llvm::Value *V, unsigned short StartBit, + unsigned short Size, bool IsSigned) { + LValue R; + R.LVType = BitField; + R.V = V; + R.BitfieldData.StartBit = StartBit; + R.BitfieldData.Size = Size; + R.BitfieldData.IsSigned = IsSigned; + return R; + } +}; + +/// CodeGenFunction - This class organizes the per-function state that is used +/// while generating LLVM code. +class CodeGenFunction { +public: + CodeGenModule &CGM; // Per-module state. + TargetInfo &Target; + + typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy; + llvm::LLVMFoldingBuilder Builder; + + const FunctionDecl *CurFuncDecl; + llvm::Function *CurFn; + + /// AllocaInsertPoint - This is an instruction in the entry block before which + /// we prefer to insert allocas. + llvm::Instruction *AllocaInsertPt; + + const llvm::Type *LLVMIntTy; + uint32_t LLVMPointerWidth; + +private: + /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C + /// decls. + llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap; + + /// LabelMap - This keeps track of the LLVM basic block for each C label. + llvm::DenseMap<const LabelStmt*, llvm::BasicBlock*> LabelMap; + + // BreakContinueStack - This keeps track of where break and continue + // statements should jump to. + struct BreakContinue { + BreakContinue(llvm::BasicBlock *bb, llvm::BasicBlock *cb) + : BreakBlock(bb), ContinueBlock(cb) {} + + llvm::BasicBlock *BreakBlock; + llvm::BasicBlock *ContinueBlock; + }; + llvm::SmallVector<BreakContinue, 8> BreakContinueStack; + + /// SwitchInsn - This is nearest current switch instruction. It is null if + /// if current context is not in a switch. + llvm::SwitchInst *SwitchInsn; + + /// CaseRangeBlock - This block holds if condition check for last case + /// statement range in current switch instruction. + llvm::BasicBlock *CaseRangeBlock; + +public: + CodeGenFunction(CodeGenModule &cgm); + + ASTContext &getContext() const; + + void GenerateCode(const FunctionDecl *FD); + + const llvm::Type *ConvertType(QualType T); + + /// hasAggregateLLVMType - Return true if the specified AST type will map into + /// an aggregate LLVM type or is void. + static bool hasAggregateLLVMType(QualType T); + + /// getBasicBlockForLabel - Return the LLVM basicblock that the specified + /// label maps to. + llvm::BasicBlock *getBasicBlockForLabel(const LabelStmt *S); + + + void EmitBlock(llvm::BasicBlock *BB); + + /// WarnUnsupported - Print out a warning that codegen doesn't support the + /// specified stmt yet. + void WarnUnsupported(const Stmt *S, const char *Type); + + //===--------------------------------------------------------------------===// + // Helpers + //===--------------------------------------------------------------------===// + + /// CreateTempAlloca - This creates a alloca and inserts it into the entry + /// block. + llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty, + const char *Name = "tmp"); + + /// EvaluateExprAsBool - Perform the usual unary conversions on the specified + /// expression and compare the result against zero, returning an Int1Ty value. + llvm::Value *EvaluateExprAsBool(const Expr *E); + + /// EmitAnyExpr - Emit code to compute the specified expression which can have + /// any type. The result is returned as an RValue struct. If this is an + /// aggregate expression, the aggloc/agglocvolatile arguments indicate where + /// the result should be returned. + RValue EmitAnyExpr(const Expr *E, llvm::Value *AggLoc = 0, + bool isAggLocVolatile = false); + + /// isDummyBlock - Return true if BB is an empty basic block + /// with no predecessors. + static bool isDummyBlock(const llvm::BasicBlock *BB); + + /// StartBlock - Start new block named N. If insert block is a dummy block + /// then reuse it. + void StartBlock(const char *N); + + /// getCGRecordLayout - Return record layout info. + const CGRecordLayout *getCGRecordLayout(CodeGenTypes &CGT, QualType RTy); + + /// GetAddrOfStaticLocalVar - Return the address of a static local variable. + llvm::Constant *GetAddrOfStaticLocalVar(const BlockVarDecl *BVD); + //===--------------------------------------------------------------------===// + // Declaration Emission + //===--------------------------------------------------------------------===// + + void EmitDecl(const Decl &D); + void EmitEnumConstantDecl(const EnumConstantDecl &D); + void EmitBlockVarDecl(const BlockVarDecl &D); + void EmitLocalBlockVarDecl(const BlockVarDecl &D); + void EmitStaticBlockVarDecl(const BlockVarDecl &D); + void EmitParmDecl(const ParmVarDecl &D, llvm::Value *Arg); + + //===--------------------------------------------------------------------===// + // Statement Emission + //===--------------------------------------------------------------------===// + + void EmitStmt(const Stmt *S); + RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false, + llvm::Value *AggLoc = 0, bool isAggVol = false); + void EmitLabelStmt(const LabelStmt &S); + void EmitGotoStmt(const GotoStmt &S); + void EmitIfStmt(const IfStmt &S); + void EmitWhileStmt(const WhileStmt &S); + void EmitDoStmt(const DoStmt &S); + void EmitForStmt(const ForStmt &S); + void EmitReturnStmt(const ReturnStmt &S); + void EmitDeclStmt(const DeclStmt &S); + void EmitBreakStmt(); + void EmitContinueStmt(); + void EmitSwitchStmt(const SwitchStmt &S); + void EmitDefaultStmt(const DefaultStmt &S); + void EmitCaseStmt(const CaseStmt &S); + void EmitCaseStmtRange(const CaseStmt &S); + void EmitAsmStmt(const AsmStmt &S); + + //===--------------------------------------------------------------------===// + // LValue Expression Emission + //===--------------------------------------------------------------------===// + + /// EmitLValue - Emit code to compute a designator that specifies the location + /// of the expression. + /// + /// This can return one of two things: a simple address or a bitfield + /// reference. In either case, the LLVM Value* in the LValue structure is + /// guaranteed to be an LLVM pointer type. + /// + /// If this returns a bitfield reference, nothing about the pointee type of + /// the LLVM value is known: For example, it may not be a pointer to an + /// integer. + /// + /// If this returns a normal address, and if the lvalue's C type is fixed + /// size, this method guarantees that the returned pointer type will point to + /// an LLVM type of the same size of the lvalue's type. If the lvalue has a + /// variable length type, this is not possible. + /// + LValue EmitLValue(const Expr *E); + + /// EmitLoadOfLValue - Given an expression that represents a value lvalue, + /// this method emits the address of the lvalue, then loads the result as an + /// rvalue, returning the rvalue. + RValue EmitLoadOfLValue(LValue V, QualType LVType); + RValue EmitLoadOfOCUElementLValue(LValue V, QualType LVType); + RValue EmitLoadOfBitfieldLValue(LValue LV, QualType ExprType); + + + /// EmitStoreThroughLValue - Store the specified rvalue into the specified + /// lvalue, where both are guaranteed to the have the same type, and that type + /// is 'Ty'. + void EmitStoreThroughLValue(RValue Src, LValue Dst, QualType Ty); + void EmitStoreThroughOCUComponentLValue(RValue Src, LValue Dst, QualType Ty); + void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst, QualType Ty); + + // Note: only availabe for agg return types + LValue EmitCallExprLValue(const CallExpr *E); + + LValue EmitDeclRefLValue(const DeclRefExpr *E); + LValue EmitStringLiteralLValue(const StringLiteral *E); + LValue EmitPreDefinedLValue(const PreDefinedExpr *E); + LValue EmitUnaryOpLValue(const UnaryOperator *E); + LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E); + LValue EmitOCUVectorElementExpr(const OCUVectorElementExpr *E); + LValue EmitMemberExpr(const MemberExpr *E); + + LValue EmitLValueForField(llvm::Value* Base, FieldDecl* Field, + bool isUnion); + + //===--------------------------------------------------------------------===// + // Scalar Expression Emission + //===--------------------------------------------------------------------===// + + RValue EmitCallExpr(const CallExpr *E); + RValue EmitCallExpr(Expr *FnExpr, Expr *const *Args, unsigned NumArgs); + RValue EmitCallExpr(llvm::Value *Callee, QualType FnType, + Expr *const *Args, unsigned NumArgs); + RValue EmitBuiltinExpr(unsigned BuiltinID, const CallExpr *E); + + llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E); + llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E); + + llvm::Value *EmitShuffleVector(llvm::Value* V1, llvm::Value *V2, ...); + llvm::Value *EmitVector(llvm::Value * const *Vals, unsigned NumVals, + bool isSplat = false); + + llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E); + + //===--------------------------------------------------------------------===// + // Expression Emission + //===--------------------------------------------------------------------===// + + // Expressions are broken into three classes: scalar, complex, aggregate. + + /// EmitScalarExpr - Emit the computation of the specified expression of + /// LLVM scalar type, returning the result. + llvm::Value *EmitScalarExpr(const Expr *E); + + /// EmitScalarConversion - Emit a conversion from the specified type to the + /// specified destination type, both of which are LLVM scalar types. + llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy, + QualType DstTy); + + /// EmitComplexToScalarConversion - Emit a conversion from the specified + /// complex type to the specified destination type, where the destination + /// type is an LLVM scalar type. + llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy, + QualType DstTy); + + + /// EmitAggExpr - Emit the computation of the specified expression of + /// aggregate type. The result is computed into DestPtr. Note that if + /// DestPtr is null, the value of the aggregate expression is not needed. + void EmitAggExpr(const Expr *E, llvm::Value *DestPtr, bool VolatileDest); + + /// EmitComplexExpr - Emit the computation of the specified expression of + /// complex type, returning the result. + ComplexPairTy EmitComplexExpr(const Expr *E); + + /// EmitComplexExprIntoAddr - Emit the computation of the specified expression + /// of complex type, storing into the specified Value*. + void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr, + bool DestIsVolatile); + /// LoadComplexFromAddr - Load a complex number from the specified address. + ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile); +}; +} // end namespace CodeGen +} // end namespace clang + +#endif diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp new file mode 100644 index 00000000000..43f399a61f2 --- /dev/null +++ b/clang/lib/CodeGen/CodeGenModule.cpp @@ -0,0 +1,509 @@ +//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This coordinates the per-module state used while generating code. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenModule.h" +#include "CodeGenFunction.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +#include "clang/Basic/Diagnostic.h" +#include "clang/Basic/LangOptions.h" +#include "clang/Basic/TargetInfo.h" +#include "llvm/CallingConv.h" +#include "llvm/Constants.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Module.h" +#include "llvm/Intrinsics.h" +#include <algorithm> +using namespace clang; +using namespace CodeGen; + + +CodeGenModule::CodeGenModule(ASTContext &C, const LangOptions &LO, + llvm::Module &M, const llvm::TargetData &TD, + Diagnostic &diags) + : Context(C), Features(LO), TheModule(M), TheTargetData(TD), Diags(diags), + Types(C, M, TD), MemCpyFn(0), MemSetFn(0), CFConstantStringClassRef(0) { + //TODO: Make this selectable at runtime + Runtime = CreateObjCRuntime(M); +} + +CodeGenModule::~CodeGenModule() { + EmitGlobalCtors(); + delete Runtime; +} + +/// WarnUnsupported - Print out a warning that codegen doesn't support the +/// specified stmt yet. +void CodeGenModule::WarnUnsupported(const Stmt *S, const char *Type) { + unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Warning, + "cannot codegen this %0 yet"); + SourceRange Range = S->getSourceRange(); + std::string Msg = Type; + getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID, + &Msg, 1, &Range, 1); +} + +/// WarnUnsupported - Print out a warning that codegen doesn't support the +/// specified decl yet. +void CodeGenModule::WarnUnsupported(const Decl *D, const char *Type) { + unsigned DiagID = getDiags().getCustomDiagID(Diagnostic::Warning, + "cannot codegen this %0 yet"); + std::string Msg = Type; + getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID, + &Msg, 1); +} + +/// AddGlobalCtor - Add a function to the list that will be called before +/// main() runs. +void CodeGenModule::AddGlobalCtor(llvm::Function * Ctor) { + // TODO: Type coercion of void()* types. + GlobalCtors.push_back(Ctor); +} + +void CodeGenModule::EmitGlobalCtors() { + // Get the type of @llvm.global_ctors + std::vector<const llvm::Type*> CtorFields; + CtorFields.push_back(llvm::IntegerType::get(32)); + // Constructor function type + std::vector<const llvm::Type*> VoidArgs; + llvm::FunctionType* CtorFuncTy = llvm::FunctionType::get( + llvm::Type::VoidTy, + VoidArgs, + false); + // i32, function type pair + CtorFields.push_back(llvm::PointerType::getUnqual(CtorFuncTy)); + llvm::StructType* CtorStructTy = llvm::StructType::get(CtorFields, false); + // Array of fields + llvm::ArrayType* GlobalCtorsTy = llvm::ArrayType::get(CtorStructTy, + GlobalCtors.size()); + + const std::string GlobalCtorsVar = std::string("llvm.global_ctors"); + // Define the global variable + llvm::GlobalVariable *GlobalCtorsVal = new llvm::GlobalVariable( + GlobalCtorsTy, + false, + llvm::GlobalValue::AppendingLinkage, + (llvm::Constant*)0, + GlobalCtorsVar, + &TheModule); + + // Populate the array + std::vector<llvm::Constant*> CtorValues; + llvm::Constant *MagicNumber = llvm::ConstantInt::get(llvm::IntegerType::Int32Ty, + 65535, + false); + for (std::vector<llvm::Constant*>::iterator I = GlobalCtors.begin(), + E = GlobalCtors.end(); I != E; ++I) { + std::vector<llvm::Constant*> StructValues; + StructValues.push_back(MagicNumber); + StructValues.push_back(*I); + + llvm::Constant* CtorEntry = llvm::ConstantStruct::get(CtorStructTy, StructValues); + CtorValues.push_back(CtorEntry); + } + llvm::Constant* CtorArray = llvm::ConstantArray::get(GlobalCtorsTy, CtorValues); + GlobalCtorsVal->setInitializer(CtorArray); + +} + +/// ReplaceMapValuesWith - This is a really slow and bad function that +/// searches for any entries in GlobalDeclMap that point to OldVal, changing +/// them to point to NewVal. This is badbadbad, FIXME! +void CodeGenModule::ReplaceMapValuesWith(llvm::Constant *OldVal, + llvm::Constant *NewVal) { + for (llvm::DenseMap<const Decl*, llvm::Constant*>::iterator + I = GlobalDeclMap.begin(), E = GlobalDeclMap.end(); I != E; ++I) + if (I->second == OldVal) I->second = NewVal; +} + + +llvm::Constant *CodeGenModule::GetAddrOfFunctionDecl(const FunctionDecl *D, + bool isDefinition) { + // See if it is already in the map. If so, just return it. + llvm::Constant *&Entry = GlobalDeclMap[D]; + if (Entry) return Entry; + + const llvm::Type *Ty = getTypes().ConvertType(D->getType()); + + // Check to see if the function already exists. + llvm::Function *F = getModule().getFunction(D->getName()); + const llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty); + + // If it doesn't already exist, just create and return an entry. + if (F == 0) { + // FIXME: param attributes for sext/zext etc. + F = new llvm::Function(FTy, llvm::Function::ExternalLinkage, D->getName(), + &getModule()); + + // Set the appropriate calling convention for the Function. + if (D->getAttr<FastCallAttr>()) + F->setCallingConv(llvm::CallingConv::Fast); + return Entry = F; + } + + // If the pointer type matches, just return it. + llvm::Type *PFTy = llvm::PointerType::getUnqual(Ty); + if (PFTy == F->getType()) return Entry = F; + + // If this isn't a definition, just return it casted to the right type. + if (!isDefinition) + return Entry = llvm::ConstantExpr::getBitCast(F, PFTy); + + // Otherwise, we have a definition after a prototype with the wrong type. + // F is the Function* for the one with the wrong type, we must make a new + // Function* and update everything that used F (a declaration) with the new + // Function* (which will be a definition). + // + // This happens if there is a prototype for a function (e.g. "int f()") and + // then a definition of a different type (e.g. "int f(int x)"). Start by + // making a new function of the correct type, RAUW, then steal the name. + llvm::Function *NewFn = new llvm::Function(FTy, + llvm::Function::ExternalLinkage, + "", &getModule()); + NewFn->takeName(F); + + // Replace uses of F with the Function we will endow with a body. + llvm::Constant *NewPtrForOldDecl = + llvm::ConstantExpr::getBitCast(NewFn, F->getType()); + F->replaceAllUsesWith(NewPtrForOldDecl); + + // FIXME: Update the globaldeclmap for the previous decl of this name. We + // really want a way to walk all of these, but we don't have it yet. This + // is incredibly slow! + ReplaceMapValuesWith(F, NewPtrForOldDecl); + + // Ok, delete the old function now, which is dead. + assert(F->isDeclaration() && "Shouldn't replace non-declaration"); + F->eraseFromParent(); + + // Return the new function which has the right type. + return Entry = NewFn; +} + +static bool IsZeroElementArray(const llvm::Type *Ty) { + if (const llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(Ty)) + return ATy->getNumElements() == 0; + return false; +} + +llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D, + bool isDefinition) { + assert(D->hasGlobalStorage() && "Not a global variable"); + + // See if it is already in the map. + llvm::Constant *&Entry = GlobalDeclMap[D]; + if (Entry) return Entry; + + QualType ASTTy = D->getType(); + const llvm::Type *Ty = getTypes().ConvertTypeForMem(ASTTy); + + // Check to see if the global already exists. + llvm::GlobalVariable *GV = getModule().getGlobalVariable(D->getName(), true); + + // If it doesn't already exist, just create and return an entry. + if (GV == 0) { + return Entry = new llvm::GlobalVariable(Ty, false, + llvm::GlobalValue::ExternalLinkage, + 0, D->getName(), &getModule(), 0, + ASTTy.getAddressSpace()); + } + + // If the pointer type matches, just return it. + llvm::Type *PTy = llvm::PointerType::getUnqual(Ty); + if (PTy == GV->getType()) return Entry = GV; + + // If this isn't a definition, just return it casted to the right type. + if (!isDefinition) + return Entry = llvm::ConstantExpr::getBitCast(GV, PTy); + + + // Otherwise, we have a definition after a prototype with the wrong type. + // GV is the GlobalVariable* for the one with the wrong type, we must make a + /// new GlobalVariable* and update everything that used GV (a declaration) + // with the new GlobalVariable* (which will be a definition). + // + // This happens if there is a prototype for a global (e.g. "extern int x[];") + // and then a definition of a different type (e.g. "int x[10];"). Start by + // making a new global of the correct type, RAUW, then steal the name. + llvm::GlobalVariable *NewGV = + new llvm::GlobalVariable(Ty, false, llvm::GlobalValue::ExternalLinkage, + 0, D->getName(), &getModule(), 0, + ASTTy.getAddressSpace()); + NewGV->takeName(GV); + + // Replace uses of GV with the globalvalue we will endow with a body. + llvm::Constant *NewPtrForOldDecl = + llvm::ConstantExpr::getBitCast(NewGV, GV->getType()); + GV->replaceAllUsesWith(NewPtrForOldDecl); + + // FIXME: Update the globaldeclmap for the previous decl of this name. We + // really want a way to walk all of these, but we don't have it yet. This + // is incredibly slow! + ReplaceMapValuesWith(GV, NewPtrForOldDecl); + + // Verify that GV was a declaration or something like x[] which turns into + // [0 x type]. + assert((GV->isDeclaration() || + IsZeroElementArray(GV->getType()->getElementType())) && + "Shouldn't replace non-declaration"); + + // Ok, delete the old global now, which is dead. + GV->eraseFromParent(); + + // Return the new global which has the right type. + return Entry = NewGV; +} + + +void CodeGenModule::EmitFunction(const FunctionDecl *FD) { + // If this is not a prototype, emit the body. + if (FD->getBody()) + CodeGenFunction(*this).GenerateCode(FD); +} + +llvm::Constant *CodeGenModule::EmitGlobalInit(const Expr *Expr) { + return EmitConstantExpr(Expr); +} + +void CodeGenModule::EmitGlobalVar(const FileVarDecl *D) { + // If this is just a forward declaration of the variable, don't emit it now, + // allow it to be emitted lazily on its first use. + if (D->getStorageClass() == VarDecl::Extern && D->getInit() == 0) + return; + + // Get the global, forcing it to be a direct reference. + llvm::GlobalVariable *GV = + cast<llvm::GlobalVariable>(GetAddrOfGlobalVar(D, true)); + + // Convert the initializer, or use zero if appropriate. + llvm::Constant *Init = 0; + if (D->getInit() == 0) { + Init = llvm::Constant::getNullValue(GV->getType()->getElementType()); + } else if (D->getType()->isIntegerType()) { + llvm::APSInt Value(static_cast<uint32_t>( + getContext().getTypeSize(D->getInit()->getType()))); + if (D->getInit()->isIntegerConstantExpr(Value, Context)) + Init = llvm::ConstantInt::get(Value); + } + + if (!Init) + Init = EmitGlobalInit(D->getInit()); + + assert(GV->getType()->getElementType() == Init->getType() && + "Initializer codegen type mismatch!"); + GV->setInitializer(Init); + + if (const VisibilityAttr *attr = D->getAttr<VisibilityAttr>()) + GV->setVisibility(attr->getVisibility()); + // FIXME: else handle -fvisibility + + // Set the llvm linkage type as appropriate. + if (D->getAttr<DLLImportAttr>()) + GV->setLinkage(llvm::Function::DLLImportLinkage); + else if (D->getAttr<DLLExportAttr>()) + GV->setLinkage(llvm::Function::DLLExportLinkage); + else if (D->getAttr<WeakAttr>()) { + GV->setLinkage(llvm::GlobalVariable::WeakLinkage); + + } else { + // FIXME: This isn't right. This should handle common linkage and other + // stuff. + switch (D->getStorageClass()) { + case VarDecl::Auto: + case VarDecl::Register: + assert(0 && "Can't have auto or register globals"); + case VarDecl::None: + if (!D->getInit()) + GV->setLinkage(llvm::GlobalVariable::WeakLinkage); + break; + case VarDecl::Extern: + case VarDecl::PrivateExtern: + // todo: common + break; + case VarDecl::Static: + GV->setLinkage(llvm::GlobalVariable::InternalLinkage); + break; + } + } +} + +/// EmitGlobalVarDeclarator - Emit all the global vars attached to the specified +/// declarator chain. +void CodeGenModule::EmitGlobalVarDeclarator(const FileVarDecl *D) { + for (; D; D = cast_or_null<FileVarDecl>(D->getNextDeclarator())) + EmitGlobalVar(D); +} + +void CodeGenModule::UpdateCompletedType(const TagDecl *TD) { + // Make sure that this type is translated. + Types.UpdateCompletedType(TD); +} + + +/// getBuiltinLibFunction +llvm::Function *CodeGenModule::getBuiltinLibFunction(unsigned BuiltinID) { + if (BuiltinID > BuiltinFunctions.size()) + BuiltinFunctions.resize(BuiltinID); + + // Cache looked up functions. Since builtin id #0 is invalid we don't reserve + // a slot for it. + assert(BuiltinID && "Invalid Builtin ID"); + llvm::Function *&FunctionSlot = BuiltinFunctions[BuiltinID-1]; + if (FunctionSlot) + return FunctionSlot; + + assert(Context.BuiltinInfo.isLibFunction(BuiltinID) && "isn't a lib fn"); + + // Get the name, skip over the __builtin_ prefix. + const char *Name = Context.BuiltinInfo.GetName(BuiltinID)+10; + + // Get the type for the builtin. + QualType Type = Context.BuiltinInfo.GetBuiltinType(BuiltinID, Context); + const llvm::FunctionType *Ty = + cast<llvm::FunctionType>(getTypes().ConvertType(Type)); + + // FIXME: This has a serious problem with code like this: + // void abs() {} + // ... __builtin_abs(x); + // The two versions of abs will collide. The fix is for the builtin to win, + // and for the existing one to be turned into a constantexpr cast of the + // builtin. In the case where the existing one is a static function, it + // should just be renamed. + if (llvm::Function *Existing = getModule().getFunction(Name)) { + if (Existing->getFunctionType() == Ty && Existing->hasExternalLinkage()) + return FunctionSlot = Existing; + assert(Existing == 0 && "FIXME: Name collision"); + } + + // FIXME: param attributes for sext/zext etc. + return FunctionSlot = new llvm::Function(Ty, llvm::Function::ExternalLinkage, + Name, &getModule()); +} + +llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,const llvm::Type **Tys, + unsigned NumTys) { + return llvm::Intrinsic::getDeclaration(&getModule(), + (llvm::Intrinsic::ID)IID, Tys, NumTys); +} + +llvm::Function *CodeGenModule::getMemCpyFn() { + if (MemCpyFn) return MemCpyFn; + llvm::Intrinsic::ID IID; + switch (Context.Target.getPointerWidth(0)) { + default: assert(0 && "Unknown ptr width"); + case 32: IID = llvm::Intrinsic::memcpy_i32; break; + case 64: IID = llvm::Intrinsic::memcpy_i64; break; + } + return MemCpyFn = getIntrinsic(IID); +} + +llvm::Function *CodeGenModule::getMemSetFn() { + if (MemSetFn) return MemSetFn; + llvm::Intrinsic::ID IID; + switch (Context.Target.getPointerWidth(0)) { + default: assert(0 && "Unknown ptr width"); + case 32: IID = llvm::Intrinsic::memset_i32; break; + case 64: IID = llvm::Intrinsic::memset_i64; break; + } + return MemSetFn = getIntrinsic(IID); +} + +llvm::Constant *CodeGenModule:: +GetAddrOfConstantCFString(const std::string &str) { + llvm::StringMapEntry<llvm::Constant *> &Entry = + CFConstantStringMap.GetOrCreateValue(&str[0], &str[str.length()]); + + if (Entry.getValue()) + return Entry.getValue(); + + std::vector<llvm::Constant*> Fields; + + if (!CFConstantStringClassRef) { + const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy); + Ty = llvm::ArrayType::get(Ty, 0); + + CFConstantStringClassRef = + new llvm::GlobalVariable(Ty, false, + llvm::GlobalVariable::ExternalLinkage, 0, + "__CFConstantStringClassReference", + &getModule()); + } + + // Class pointer. + llvm::Constant *Zero = llvm::Constant::getNullValue(llvm::Type::Int32Ty); + llvm::Constant *Zeros[] = { Zero, Zero }; + llvm::Constant *C = + llvm::ConstantExpr::getGetElementPtr(CFConstantStringClassRef, Zeros, 2); + Fields.push_back(C); + + // Flags. + const llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy); + Fields.push_back(llvm::ConstantInt::get(Ty, 1992)); + + // String pointer. + C = llvm::ConstantArray::get(str); + C = new llvm::GlobalVariable(C->getType(), true, + llvm::GlobalValue::InternalLinkage, + C, ".str", &getModule()); + + C = llvm::ConstantExpr::getGetElementPtr(C, Zeros, 2); + Fields.push_back(C); + + // String length. + Ty = getTypes().ConvertType(getContext().LongTy); + Fields.push_back(llvm::ConstantInt::get(Ty, str.length())); + + // The struct. + Ty = getTypes().ConvertType(getContext().getCFConstantStringType()); + C = llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Fields); + llvm::GlobalVariable *GV = + new llvm::GlobalVariable(C->getType(), true, + llvm::GlobalVariable::InternalLinkage, + C, "", &getModule()); + GV->setSection("__DATA,__cfstring"); + Entry.setValue(GV); + return GV; +} + +/// GenerateWritableString -- Creates storage for a string literal. +static llvm::Constant *GenerateStringLiteral(const std::string &str, + bool constant, + CodeGenModule &CGM) { + // Create Constant for this string literal + llvm::Constant *C=llvm::ConstantArray::get(str); + + // Create a global variable for this string + C = new llvm::GlobalVariable(C->getType(), constant, + llvm::GlobalValue::InternalLinkage, + C, ".str", &CGM.getModule()); + return C; +} + +/// CodeGenModule::GetAddrOfConstantString -- returns a pointer to the character +/// array containing the literal. The result is pointer to array type. +llvm::Constant *CodeGenModule::GetAddrOfConstantString(const std::string &str) { + // Don't share any string literals if writable-strings is turned on. + if (Features.WritableStrings) + return GenerateStringLiteral(str, false, *this); + + llvm::StringMapEntry<llvm::Constant *> &Entry = + ConstantStringMap.GetOrCreateValue(&str[0], &str[str.length()]); + + if (Entry.getValue()) + return Entry.getValue(); + + // Create a global variable for this. + llvm::Constant *C = GenerateStringLiteral(str, true, *this); + Entry.setValue(C); + return C; +} diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h new file mode 100644 index 00000000000..cbea09fd3ec --- /dev/null +++ b/clang/lib/CodeGen/CodeGenModule.h @@ -0,0 +1,129 @@ +//===--- CodeGenModule.h - Per-Module state for LLVM CodeGen --------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is the internal per-translation-unit state used for llvm translation. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_CODEGENMODULE_H +#define CLANG_CODEGEN_CODEGENMODULE_H + +#include "CodeGenTypes.h" +#include "CGObjCRuntime.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/StringMap.h" + +namespace llvm { + class Module; + class Constant; + class Function; + class GlobalVariable; + class TargetData; +} + +namespace clang { + class ASTContext; + class FunctionDecl; + class Decl; + class Expr; + class Stmt; + class ValueDecl; + class VarDecl; + class TypeDecl; + class FileVarDecl; + struct LangOptions; + class Diagnostic; + +namespace CodeGen { + + class CodeGenFunction; + +/// CodeGenModule - This class organizes the cross-module state that is used +/// while generating LLVM code. +class CodeGenModule { + ASTContext &Context; + const LangOptions &Features; + llvm::Module &TheModule; + const llvm::TargetData &TheTargetData; + Diagnostic &Diags; + CodeGenTypes Types; + CGObjCRuntime *Runtime; + + llvm::Function *MemCpyFn; + llvm::Function *MemSetFn; + llvm::DenseMap<const Decl*, llvm::Constant*> GlobalDeclMap; + std::vector<llvm::Constant*> GlobalCtors; + + llvm::StringMap<llvm::Constant*> CFConstantStringMap; + llvm::StringMap<llvm::Constant*> ConstantStringMap; + llvm::Constant *CFConstantStringClassRef; + + std::vector<llvm::Function *> BuiltinFunctions; +public: + CodeGenModule(ASTContext &C, const LangOptions &Features, llvm::Module &M, + const llvm::TargetData &TD, Diagnostic &Diags); + ~CodeGenModule(); + + CGObjCRuntime *getObjCRuntime() { return Runtime; } + ASTContext &getContext() const { return Context; } + const LangOptions &getLangOptions() const { return Features; } + llvm::Module &getModule() const { return TheModule; } + CodeGenTypes &getTypes() { return Types; } + Diagnostic &getDiags() const { return Diags; } + const llvm::TargetData &getTargetData() const { return TheTargetData; } + + llvm::Constant *GetAddrOfFunctionDecl(const FunctionDecl *D, + bool isDefinition); + llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D, bool isDefinition); + + + /// getBuiltinLibFunction - Given a builtin id for a function like + /// "__builtin_fabsf", return a Function* for "fabsf". + /// + llvm::Function *getBuiltinLibFunction(unsigned BuiltinID); + llvm::Constant *GetAddrOfConstantCFString(const std::string& str); + + /// GetAddrOfConstantString -- returns a pointer to the character + /// array containing the literal. The result is pointer to array type. + llvm::Constant *GetAddrOfConstantString(const std::string& str); + llvm::Function *getMemCpyFn(); + llvm::Function *getMemSetFn(); + llvm::Function *getIntrinsic(unsigned IID, const llvm::Type **Tys = 0, + unsigned NumTys = 0); + + void AddGlobalCtor(llvm::Function * Ctor); + void EmitGlobalCtors(void); + + void EmitFunction(const FunctionDecl *FD); + void EmitGlobalVar(const FileVarDecl *D); + void EmitGlobalVarDeclarator(const FileVarDecl *D); + void UpdateCompletedType(const TagDecl *D); + llvm::Constant *EmitGlobalInit(const Expr *E); + llvm::Constant *EmitConstantExpr(const Expr *E, CodeGenFunction *CGF = 0); + + /// WarnUnsupported - Print out a warning that codegen doesn't support the + /// specified stmt yet. + + void WarnUnsupported(const Stmt *S, const char *Type); + + /// WarnUnsupported - Print out a warning that codegen doesn't support the + /// specified decl yet. + void WarnUnsupported(const Decl *D, const char *Type); + +private: + /// ReplaceMapValuesWith - This is a really slow and bad function that + /// searches for any entries in GlobalDeclMap that point to OldVal, changing + /// them to point to NewVal. This is badbadbad, FIXME! + void ReplaceMapValuesWith(llvm::Constant *OldVal, llvm::Constant *NewVal); + +}; +} // end namespace CodeGen +} // end namespace clang + +#endif diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp new file mode 100644 index 00000000000..9a669e87056 --- /dev/null +++ b/clang/lib/CodeGen/CodeGenTypes.cpp @@ -0,0 +1,580 @@ +//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is the code that handles AST -> LLVM type lowering. +// +//===----------------------------------------------------------------------===// + +#include "CodeGenTypes.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/AST/AST.h" +#include "llvm/DerivedTypes.h" +#include "llvm/Module.h" +#include "llvm/Target/TargetData.h" + +using namespace clang; +using namespace CodeGen; + +namespace { + /// RecordOrganizer - This helper class, used by CGRecordLayout, layouts + /// structs and unions. It manages transient information used during layout. + /// FIXME : Handle field aligments. Handle packed structs. + class RecordOrganizer { + public: + explicit RecordOrganizer(CodeGenTypes &Types) : + CGT(Types), STy(NULL), llvmFieldNo(0), Cursor(0), + llvmSize(0) {} + + /// addField - Add new field. + void addField(const FieldDecl *FD); + + /// addLLVMField - Add llvm struct field that corresponds to llvm type Ty. + /// Increment field count. + void addLLVMField(const llvm::Type *Ty, bool isPaddingField = false); + + /// addPaddingFields - Current cursor is not suitable place to add next + /// field. Add required padding fields. + void addPaddingFields(unsigned WaterMark); + + /// layoutStructFields - Do the actual work and lay out all fields. Create + /// corresponding llvm struct type. This should be invoked only after + /// all fields are added. + void layoutStructFields(const ASTRecordLayout &RL); + + /// layoutUnionFields - Do the actual work and lay out all fields. Create + /// corresponding llvm struct type. This should be invoked only after + /// all fields are added. + void layoutUnionFields(); + + /// getLLVMType - Return associated llvm struct type. This may be NULL + /// if fields are not laid out. + llvm::Type *getLLVMType() const { + return STy; + } + + /// placeBitField - Find a place for FD, which is a bit-field. + void placeBitField(const FieldDecl *FD); + + llvm::SmallSet<unsigned, 8> &getPaddingFields() { + return PaddingFields; + } + + private: + CodeGenTypes &CGT; + llvm::Type *STy; + unsigned llvmFieldNo; + uint64_t Cursor; + uint64_t llvmSize; + llvm::SmallVector<const FieldDecl *, 8> FieldDecls; + std::vector<const llvm::Type*> LLVMFields; + llvm::SmallSet<unsigned, 8> PaddingFields; + }; +} + +CodeGenTypes::CodeGenTypes(ASTContext &Ctx, llvm::Module& M, + const llvm::TargetData &TD) + : Context(Ctx), Target(Ctx.Target), TheModule(M), TheTargetData(TD) { +} + +CodeGenTypes::~CodeGenTypes() { + for(llvm::DenseMap<const TagDecl *, CGRecordLayout *>::iterator + I = CGRecordLayouts.begin(), E = CGRecordLayouts.end(); + I != E; ++I) + delete I->second; + CGRecordLayouts.clear(); +} + +/// ConvertType - Convert the specified type to its LLVM form. +const llvm::Type *CodeGenTypes::ConvertType(QualType T) { + // See if type is already cached. + llvm::DenseMap<Type *, llvm::PATypeHolder>::iterator + I = TypeCache.find(T.getCanonicalType().getTypePtr()); + // If type is found in map and this is not a definition for a opaque + // place holder type then use it. Otherwise, convert type T. + if (I != TypeCache.end()) + return I->second.get(); + + const llvm::Type *ResultType = ConvertNewType(T); + TypeCache.insert(std::make_pair(T.getCanonicalType().getTypePtr(), + llvm::PATypeHolder(ResultType))); + return ResultType; +} + +/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from +/// ConvertType in that it is used to convert to the memory representation for +/// a type. For example, the scalar representation for _Bool is i1, but the +/// memory representation is usually i8 or i32, depending on the target. +const llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T) { + const llvm::Type *R = ConvertType(T); + + // If this is a non-bool type, don't map it. + if (R != llvm::Type::Int1Ty) + return R; + + // Otherwise, return an integer of the target-specified size. + return llvm::IntegerType::get((unsigned)Context.getTypeSize(T)); + +} + +/// UpdateCompletedType - When we find the full definition for a TagDecl, +/// replace the 'opaque' type we previously made for it if applicable. +void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) { + llvm::DenseMap<const TagDecl*, llvm::PATypeHolder>::iterator TDTI = + TagDeclTypes.find(TD); + if (TDTI == TagDeclTypes.end()) return; + + // Remember the opaque LLVM type for this tagdecl. + llvm::PATypeHolder OpaqueHolder = TDTI->second; + assert(isa<llvm::OpaqueType>(OpaqueHolder.get()) && + "Updating compilation of an already non-opaque type?"); + + // Remove it from TagDeclTypes so that it will be regenerated. + TagDeclTypes.erase(TDTI); + + // Generate the new type. + const llvm::Type *NT = ConvertTagDeclType(TD); + + // Refine the old opaque type to its new definition. + cast<llvm::OpaqueType>(OpaqueHolder.get())->refineAbstractTypeTo(NT); +} + + + +const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { + const clang::Type &Ty = *T.getCanonicalType(); + + switch (Ty.getTypeClass()) { + case Type::TypeName: // typedef isn't canonical. + case Type::TypeOfExp: // typeof isn't canonical. + case Type::TypeOfTyp: // typeof isn't canonical. + assert(0 && "Non-canonical type, shouldn't happen"); + case Type::Builtin: { + switch (cast<BuiltinType>(Ty).getKind()) { + case BuiltinType::Void: + // LLVM void type can only be used as the result of a function call. Just + // map to the same as char. + return llvm::IntegerType::get(8); + + case BuiltinType::Bool: + // Note that we always return bool as i1 for use as a scalar type. + return llvm::Type::Int1Ty; + + case BuiltinType::Char_S: + case BuiltinType::Char_U: + case BuiltinType::SChar: + case BuiltinType::UChar: + case BuiltinType::Short: + case BuiltinType::UShort: + case BuiltinType::Int: + case BuiltinType::UInt: + case BuiltinType::Long: + case BuiltinType::ULong: + case BuiltinType::LongLong: + case BuiltinType::ULongLong: + return llvm::IntegerType::get( + static_cast<unsigned>(Context.getTypeSize(T))); + + case BuiltinType::Float: return llvm::Type::FloatTy; + case BuiltinType::Double: return llvm::Type::DoubleTy; + case BuiltinType::LongDouble: + // FIXME: mapping long double onto double. + return llvm::Type::DoubleTy; + } + break; + } + case Type::Complex: { + std::vector<const llvm::Type*> Elts; + Elts.push_back(ConvertType(cast<ComplexType>(Ty).getElementType())); + Elts.push_back(Elts[0]); + return llvm::StructType::get(Elts); + } + case Type::Pointer: { + const PointerType &P = cast<PointerType>(Ty); + QualType ETy = P.getPointeeType(); + return llvm::PointerType::get(ConvertType(ETy), ETy.getAddressSpace()); + } + case Type::Reference: { + const ReferenceType &R = cast<ReferenceType>(Ty); + return llvm::PointerType::getUnqual(ConvertType(R.getReferenceeType())); + } + + case Type::VariableArray: { + const VariableArrayType &A = cast<VariableArrayType>(Ty); + assert(A.getIndexTypeQualifier() == 0 && + "FIXME: We only handle trivial array types so far!"); + // VLAs resolve to the innermost element type; this matches + // the return of alloca, and there isn't any obviously better choice. + return ConvertType(A.getElementType()); + } + case Type::IncompleteArray: { + const IncompleteArrayType &A = cast<IncompleteArrayType>(Ty); + assert(A.getIndexTypeQualifier() == 0 && + "FIXME: We only handle trivial array types so far!"); + // int X[] -> [0 x int] + return llvm::ArrayType::get(ConvertType(A.getElementType()), 0); + } + case Type::ConstantArray: { + const ConstantArrayType &A = cast<ConstantArrayType>(Ty); + const llvm::Type *EltTy = ConvertType(A.getElementType()); + return llvm::ArrayType::get(EltTy, A.getSize().getZExtValue()); + } + case Type::OCUVector: + case Type::Vector: { + const VectorType &VT = cast<VectorType>(Ty); + return llvm::VectorType::get(ConvertType(VT.getElementType()), + VT.getNumElements()); + } + case Type::FunctionNoProto: + case Type::FunctionProto: { + const FunctionType &FP = cast<FunctionType>(Ty); + const llvm::Type *ResultType; + + if (FP.getResultType()->isVoidType()) + ResultType = llvm::Type::VoidTy; // Result of function uses llvm void. + else + ResultType = ConvertType(FP.getResultType()); + + // FIXME: Convert argument types. + bool isVarArg; + std::vector<const llvm::Type*> ArgTys; + + // Struct return passes the struct byref. + if (!ResultType->isFirstClassType() && ResultType != llvm::Type::VoidTy) { + ArgTys.push_back(llvm::PointerType::get(ResultType, + FP.getResultType().getAddressSpace())); + ResultType = llvm::Type::VoidTy; + } + + if (const FunctionTypeProto *FTP = dyn_cast<FunctionTypeProto>(&FP)) { + DecodeArgumentTypes(*FTP, ArgTys); + isVarArg = FTP->isVariadic(); + } else { + isVarArg = true; + } + + return llvm::FunctionType::get(ResultType, ArgTys, isVarArg); + } + + case Type::ASQual: + return ConvertType(QualType(cast<ASQualType>(Ty).getBaseType(), 0)); + + case Type::ObjCInterface: + assert(0 && "FIXME: add missing functionality here"); + break; + + case Type::ObjCQualifiedInterface: + assert(0 && "FIXME: add missing functionality here"); + break; + + case Type::ObjCQualifiedId: + assert(0 && "FIXME: add missing functionality here"); + break; + + case Type::Tagged: { + const TagDecl *TD = cast<TagType>(Ty).getDecl(); + const llvm::Type *Res = ConvertTagDeclType(TD); + + std::string TypeName(TD->getKindName()); + TypeName += '.'; + + // Name the codegen type after the typedef name + // if there is no tag type name available + if (TD->getIdentifier()) + TypeName += TD->getName(); + else if (const TypedefType *TdT = dyn_cast<TypedefType>(T)) + TypeName += TdT->getDecl()->getName(); + else + TypeName += "anon"; + + TheModule.addTypeName(TypeName, Res); + return Res; + } + } + + // FIXME: implement. + return llvm::OpaqueType::get(); +} + +void CodeGenTypes::DecodeArgumentTypes(const FunctionTypeProto &FTP, + std::vector<const llvm::Type*> &ArgTys) { + for (unsigned i = 0, e = FTP.getNumArgs(); i != e; ++i) { + const llvm::Type *Ty = ConvertType(FTP.getArgType(i)); + if (Ty->isFirstClassType()) + ArgTys.push_back(Ty); + else + // byval arguments are always on the stack, which is addr space #0. + ArgTys.push_back(llvm::PointerType::getUnqual(Ty)); + } +} + +/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or +/// enum. +const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) { + llvm::DenseMap<const TagDecl*, llvm::PATypeHolder>::iterator TDTI = + TagDeclTypes.find(TD); + + // If we've already compiled this tag type, use the previous definition. + if (TDTI != TagDeclTypes.end()) + return TDTI->second; + + // If this is still a forward definition, just define an opaque type to use + // for this tagged decl. + if (!TD->isDefinition()) { + llvm::Type *ResultType = llvm::OpaqueType::get(); + TagDeclTypes.insert(std::make_pair(TD, ResultType)); + return ResultType; + } + + // Okay, this is a definition of a type. Compile the implementation now. + + if (TD->getKind() == Decl::Enum) { + // Don't bother storing enums in TagDeclTypes. + return ConvertType(cast<EnumDecl>(TD)->getIntegerType()); + } + + // This decl could well be recursive. In this case, insert an opaque + // definition of this type, which the recursive uses will get. We will then + // refine this opaque version later. + + // Create new OpaqueType now for later use in case this is a recursive + // type. This will later be refined to the actual type. + llvm::PATypeHolder ResultHolder = llvm::OpaqueType::get(); + TagDeclTypes.insert(std::make_pair(TD, ResultHolder)); + + const llvm::Type *ResultType; + const RecordDecl *RD = cast<const RecordDecl>(TD); + if (TD->getKind() == Decl::Struct || TD->getKind() == Decl::Class) { + // Layout fields. + RecordOrganizer RO(*this); + for (unsigned i = 0, e = RD->getNumMembers(); i != e; ++i) + RO.addField(RD->getMember(i)); + + RO.layoutStructFields(Context.getASTRecordLayout(RD)); + + // Get llvm::StructType. + CGRecordLayouts[TD] = new CGRecordLayout(RO.getLLVMType(), + RO.getPaddingFields()); + ResultType = RO.getLLVMType(); + + } else if (TD->getKind() == Decl::Union) { + // Just use the largest element of the union, breaking ties with the + // highest aligned member. + if (RD->getNumMembers() != 0) { + RecordOrganizer RO(*this); + for (unsigned i = 0, e = RD->getNumMembers(); i != e; ++i) + RO.addField(RD->getMember(i)); + + RO.layoutUnionFields(); + + // Get llvm::StructType. + CGRecordLayouts[TD] = new CGRecordLayout(RO.getLLVMType(), + RO.getPaddingFields()); + ResultType = RO.getLLVMType(); + } else { + ResultType = llvm::StructType::get(std::vector<const llvm::Type*>()); + } + } else { + assert(0 && "FIXME: Unknown tag decl kind!"); + } + + // Refine our Opaque type to ResultType. This can invalidate ResultType, so + // make sure to read the result out of the holder. + cast<llvm::OpaqueType>(ResultHolder.get()) + ->refineAbstractTypeTo(ResultType); + + return ResultHolder.get(); +} + +/// getLLVMFieldNo - Return llvm::StructType element number +/// that corresponds to the field FD. +unsigned CodeGenTypes::getLLVMFieldNo(const FieldDecl *FD) { + llvm::DenseMap<const FieldDecl *, unsigned>::iterator + I = FieldInfo.find(FD); + assert (I != FieldInfo.end() && "Unable to find field info"); + return I->second; +} + +/// addFieldInfo - Assign field number to field FD. +void CodeGenTypes::addFieldInfo(const FieldDecl *FD, unsigned No) { + FieldInfo[FD] = No; +} + +/// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field FD. +CodeGenTypes::BitFieldInfo CodeGenTypes::getBitFieldInfo(const FieldDecl *FD) { + llvm::DenseMap<const FieldDecl *, BitFieldInfo>::iterator + I = BitFields.find(FD); + assert (I != BitFields.end() && "Unable to find bitfield info"); + return I->second; +} + +/// addBitFieldInfo - Assign a start bit and a size to field FD. +void CodeGenTypes::addBitFieldInfo(const FieldDecl *FD, unsigned Begin, + unsigned Size) { + BitFields.insert(std::make_pair(FD, BitFieldInfo(Begin, Size))); +} + +/// getCGRecordLayout - Return record layout info for the given llvm::Type. +const CGRecordLayout * +CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const { + llvm::DenseMap<const TagDecl*, CGRecordLayout *>::iterator I + = CGRecordLayouts.find(TD); + assert (I != CGRecordLayouts.end() + && "Unable to find record layout information for type"); + return I->second; +} + +/// addField - Add new field. +void RecordOrganizer::addField(const FieldDecl *FD) { + assert (!STy && "Record fields are already laid out"); + FieldDecls.push_back(FD); +} + +/// layoutStructFields - Do the actual work and lay out all fields. Create +/// corresponding llvm struct type. This should be invoked only after +/// all fields are added. +/// FIXME : At the moment assume +/// - one to one mapping between AST FieldDecls and +/// llvm::StructType elements. +/// - Ignore bit fields +/// - Ignore field aligments +/// - Ignore packed structs +void RecordOrganizer::layoutStructFields(const ASTRecordLayout &RL) { + // FIXME : Use SmallVector + llvmSize = 0; + llvmFieldNo = 0; + Cursor = 0; + LLVMFields.clear(); + + for (llvm::SmallVector<const FieldDecl *, 8>::iterator I = FieldDecls.begin(), + E = FieldDecls.end(); I != E; ++I) { + const FieldDecl *FD = *I; + + if (FD->isBitField()) + placeBitField(FD); + else { + const llvm::Type *Ty = CGT.ConvertType(FD->getType()); + addLLVMField(Ty); + CGT.addFieldInfo(FD, llvmFieldNo - 1); + Cursor = llvmSize; + } + } + + unsigned StructAlign = RL.getAlignment(); + if (llvmSize % StructAlign) { + unsigned StructPadding = StructAlign - (llvmSize % StructAlign); + addPaddingFields(llvmSize + StructPadding); + } + + STy = llvm::StructType::get(LLVMFields); +} + +/// addPaddingFields - Current cursor is not suitable place to add next field. +/// Add required padding fields. +void RecordOrganizer::addPaddingFields(unsigned WaterMark) { + assert(WaterMark >= llvmSize && "Invalid padding Field"); + unsigned RequiredBits = WaterMark - llvmSize; + unsigned RequiredBytes = (RequiredBits + 7) / 8; + for (unsigned i = 0; i != RequiredBytes; ++i) + addLLVMField(llvm::Type::Int8Ty, true); +} + +/// addLLVMField - Add llvm struct field that corresponds to llvm type Ty. +/// Increment field count. +void RecordOrganizer::addLLVMField(const llvm::Type *Ty, bool isPaddingField) { + + unsigned AlignmentInBits = CGT.getTargetData().getABITypeAlignment(Ty) * 8; + if (llvmSize % AlignmentInBits) { + // At the moment, insert padding fields even if target specific llvm + // type alignment enforces implict padding fields for FD. Later on, + // optimize llvm fields by removing implicit padding fields and + // combining consequetive padding fields. + unsigned Padding = AlignmentInBits - (llvmSize % AlignmentInBits); + addPaddingFields(llvmSize + Padding); + } + + unsigned TySize = CGT.getTargetData().getABITypeSizeInBits(Ty); + llvmSize += TySize; + if (isPaddingField) + PaddingFields.insert(llvmFieldNo); + LLVMFields.push_back(Ty); + ++llvmFieldNo; +} + +/// layoutUnionFields - Do the actual work and lay out all fields. Create +/// corresponding llvm struct type. This should be invoked only after +/// all fields are added. +void RecordOrganizer::layoutUnionFields() { + + unsigned PrimaryEltNo = 0; + std::pair<uint64_t, unsigned> PrimaryElt = + CGT.getContext().getTypeInfo(FieldDecls[0]->getType()); + CGT.addFieldInfo(FieldDecls[0], 0); + + unsigned Size = FieldDecls.size(); + for(unsigned i = 1; i != Size; ++i) { + const FieldDecl *FD = FieldDecls[i]; + assert (!FD->isBitField() && "Bit fields are not yet supported"); + std::pair<uint64_t, unsigned> EltInfo = + CGT.getContext().getTypeInfo(FD->getType()); + + // Use largest element, breaking ties with the hightest aligned member. + if (EltInfo.first > PrimaryElt.first || + (EltInfo.first == PrimaryElt.first && + EltInfo.second > PrimaryElt.second)) { + PrimaryElt = EltInfo; + PrimaryEltNo = i; + } + + // In union, each field gets first slot. + CGT.addFieldInfo(FD, 0); + } + + std::vector<const llvm::Type*> Fields; + const llvm::Type *Ty = CGT.ConvertType(FieldDecls[PrimaryEltNo]->getType()); + Fields.push_back(Ty); + STy = llvm::StructType::get(Fields); +} + +/// placeBitField - Find a place for FD, which is a bit-field. +/// This function searches for the last aligned field. If the bit-field fits in +/// it, it is reused. Otherwise, the bit-field is placed in a new field. +void RecordOrganizer::placeBitField(const FieldDecl *FD) { + + assert (FD->isBitField() && "FD is not a bit-field"); + Expr *BitWidth = FD->getBitWidth(); + llvm::APSInt FieldSize(32); + bool isBitField = + BitWidth->isIntegerConstantExpr(FieldSize, CGT.getContext()); + assert (isBitField && "Invalid BitField size expression"); + uint64_t BitFieldSize = FieldSize.getZExtValue(); + + const llvm::Type *Ty = CGT.ConvertType(FD->getType()); + uint64_t TySize = CGT.getTargetData().getABITypeSizeInBits(Ty); + + unsigned Idx = Cursor / TySize; + unsigned BitsLeft = TySize - (Cursor % TySize); + + if (BitsLeft >= BitFieldSize) { + // The bitfield fits in the last aligned field. + // This is : struct { char a; int CurrentField:10;}; + // where 'CurrentField' shares first field with 'a'. + CGT.addFieldInfo(FD, Idx); + CGT.addBitFieldInfo(FD, TySize - BitsLeft, BitFieldSize); + Cursor += BitFieldSize; + } else { + // Place the bitfield in a new LLVM field. + // This is : struct { char a; short CurrentField:10;}; + // where 'CurrentField' needs a new llvm field. + CGT.addFieldInfo(FD, Idx + 1); + CGT.addBitFieldInfo(FD, 0, BitFieldSize); + Cursor = (Idx + 1) * TySize + BitFieldSize; + } + if (Cursor > llvmSize) + addPaddingFields(Cursor); +} diff --git a/clang/lib/CodeGen/CodeGenTypes.h b/clang/lib/CodeGen/CodeGenTypes.h new file mode 100644 index 00000000000..08a2467106a --- /dev/null +++ b/clang/lib/CodeGen/CodeGenTypes.h @@ -0,0 +1,165 @@ +//===--- CodeGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This is the code that handles AST -> LLVM type lowering. +// +//===----------------------------------------------------------------------===// + +#ifndef CLANG_CODEGEN_CODEGENTYPES_H +#define CLANG_CODEGEN_CODEGENTYPES_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/SmallSet.h" +#include <vector> + +namespace llvm { + class Module; + class Type; + class PATypeHolder; + class TargetData; +} + +namespace clang { + class ASTContext; + class TagDecl; + class TargetInfo; + class QualType; + class Type; + class FunctionTypeProto; + class FieldDecl; + class RecordDecl; + +namespace CodeGen { + class CodeGenTypes; + + /// CGRecordLayout - This class handles struct and union layout info while + /// lowering AST types to LLVM types. + class CGRecordLayout { + CGRecordLayout(); // DO NOT IMPLEMENT + public: + CGRecordLayout(llvm::Type *T, llvm::SmallSet<unsigned, 8> &PF) + : STy(T), PaddingFields(PF) { + // FIXME : Collect info about fields that requires adjustments + // (i.e. fields that do not directly map to llvm struct fields.) + } + + /// getLLVMType - Return llvm type associated with this record. + llvm::Type *getLLVMType() const { + return STy; + } + + bool isPaddingField(unsigned No) const { + return PaddingFields.count(No) != 0; + } + + unsigned getNumPaddingFields() { + return PaddingFields.size(); + } + + private: + llvm::Type *STy; + llvm::SmallSet<unsigned, 8> PaddingFields; + }; + +/// CodeGenTypes - This class organizes the cross-module state that is used +/// while lowering AST types to LLVM types. +class CodeGenTypes { + ASTContext &Context; + TargetInfo &Target; + llvm::Module& TheModule; + const llvm::TargetData& TheTargetData; + + llvm::DenseMap<const TagDecl*, llvm::PATypeHolder> TagDeclTypes; + + /// CGRecordLayouts - This maps llvm struct type with corresponding + /// record layout info. + /// FIXME : If CGRecordLayout is less than 16 bytes then use + /// inline it in the map. + llvm::DenseMap<const TagDecl*, CGRecordLayout *> CGRecordLayouts; + + /// FieldInfo - This maps struct field with corresponding llvm struct type + /// field no. This info is populated by record organizer. + llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo; + +public: + class BitFieldInfo { + public: + explicit BitFieldInfo(unsigned short B, unsigned short S) + : Begin(B), Size(S) {} + + unsigned short Begin; + unsigned short Size; + }; + +private: + llvm::DenseMap<const FieldDecl *, BitFieldInfo> BitFields; + + /// TypeCache - This map keeps cache of llvm::Types (through PATypeHolder) + /// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is + /// used instead of llvm::Type because it allows us to bypass potential + /// dangling type pointers due to type refinement on llvm side. + llvm::DenseMap<Type *, llvm::PATypeHolder> TypeCache; + + /// ConvertNewType - Convert type T into a llvm::Type. Do not use this + /// method directly because it does not do any type caching. This method + /// is available only for ConvertType(). CovertType() is preferred + /// interface to convert type T into a llvm::Type. + const llvm::Type *ConvertNewType(QualType T); +public: + CodeGenTypes(ASTContext &Ctx, llvm::Module &M, const llvm::TargetData &TD); + ~CodeGenTypes(); + + const llvm::TargetData &getTargetData() const { return TheTargetData; } + TargetInfo &getTarget() const { return Target; } + ASTContext &getContext() const { return Context; } + + /// ConvertType - Convert type T into a llvm::Type. + const llvm::Type *ConvertType(QualType T); + + /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from + /// ConvertType in that it is used to convert to the memory representation for + /// a type. For example, the scalar representation for _Bool is i1, but the + /// memory representation is usually i8 or i32, depending on the target. + const llvm::Type *ConvertTypeForMem(QualType T); + + + const CGRecordLayout *getCGRecordLayout(const TagDecl*) const; + + /// getLLVMFieldNo - Return llvm::StructType element number + /// that corresponds to the field FD. + unsigned getLLVMFieldNo(const FieldDecl *FD); + + + /// UpdateCompletedType - When we find the full definition for a TagDecl, + /// replace the 'opaque' type we previously made for it if applicable. + void UpdateCompletedType(const TagDecl *TD); + +public: // These are internal details of CGT that shouldn't be used externally. + void DecodeArgumentTypes(const FunctionTypeProto &FTP, + std::vector<const llvm::Type*> &ArgTys); + + /// addFieldInfo - Assign field number to field FD. + void addFieldInfo(const FieldDecl *FD, unsigned No); + + /// addBitFieldInfo - Assign a start bit and a size to field FD. + void addBitFieldInfo(const FieldDecl *FD, unsigned Begin, unsigned Size); + + /// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field + /// FD. + BitFieldInfo getBitFieldInfo(const FieldDecl *FD); + + /// ConvertTagDeclType - Lay out a tagged decl type like struct or union or + /// enum. + const llvm::Type *ConvertTagDeclType(const TagDecl *TD); +}; + +} // end namespace CodeGen +} // end namespace clang + +#endif diff --git a/clang/lib/CodeGen/Makefile b/clang/lib/CodeGen/Makefile new file mode 100644 index 00000000000..4d7828ef67a --- /dev/null +++ b/clang/lib/CodeGen/Makefile @@ -0,0 +1,23 @@ +##===- clang/lib/CodeGen/Makefile --------------------------*- Makefile -*-===## +# +# The LLVM Compiler Infrastructure +# +# This file is distributed under the University of Illinois Open Source +# License. See LICENSE.TXT for details. +# +##===----------------------------------------------------------------------===## +# +# This implements the AST -> LLVM code generation library for the +# C-Language front-end. +# +##===----------------------------------------------------------------------===## + +LEVEL = ../../../.. +LIBRARYNAME := clangCodeGen +BUILD_ARCHIVE = 1 +CXXFLAGS = -fno-rtti + +CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include + +include $(LEVEL)/Makefile.common + diff --git a/clang/lib/CodeGen/ModuleBuilder.cpp b/clang/lib/CodeGen/ModuleBuilder.cpp new file mode 100644 index 00000000000..06467488a5e --- /dev/null +++ b/clang/lib/CodeGen/ModuleBuilder.cpp @@ -0,0 +1,104 @@ +//===--- ModuleBuilder.cpp - Emit LLVM Code from ASTs ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This builds an AST and converts it to LLVM Code. +// +//===----------------------------------------------------------------------===// + +#include "clang/CodeGen/ModuleBuilder.h" +#include "CodeGenModule.h" +#include "clang/AST/ASTConsumer.h" +#include "clang/AST/ASTContext.h" +#include "clang/AST/Decl.h" +using namespace clang; + +//===----------------------------------------------------------------------===// +// LLVM Emitter + +#include "clang/Basic/Diagnostic.h" +#include "clang/Basic/TargetInfo.h" +#include "clang/CodeGen/ModuleBuilder.h" +#include "llvm/Module.h" +#include "llvm/Target/TargetData.h" +#include "llvm/Target/TargetMachine.h" + +namespace { + class CodeGenerator : public ASTConsumer { + Diagnostic &Diags; + const llvm::TargetData *TD; + ASTContext *Ctx; + const LangOptions &Features; + protected: + llvm::Module *&M; + CodeGen::CodeGenModule *Builder; + public: + CodeGenerator(Diagnostic &diags, const LangOptions &LO, + llvm::Module *&DestModule) + : Diags(diags), Features(LO), M(DestModule) {} + + ~CodeGenerator() { + delete Builder; + } + + virtual void Initialize(ASTContext &Context) { + Ctx = &Context; + + M->setTargetTriple(Ctx->Target.getTargetTriple()); + M->setDataLayout(Ctx->Target.getTargetDescription()); + TD = new llvm::TargetData(Ctx->Target.getTargetDescription()); + Builder = new CodeGen::CodeGenModule(Context, Features, *M, *TD, Diags); + } + + virtual void HandleTopLevelDecl(Decl *D) { + // If an error occurred, stop code generation, but continue parsing and + // semantic analysis (to ensure all warnings and errors are emitted). + if (Diags.hasErrorOccurred()) + return; + + if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) { + Builder->EmitFunction(FD); + } else if (FileVarDecl *FVD = dyn_cast<FileVarDecl>(D)) { + Builder->EmitGlobalVarDeclarator(FVD); + } else if (LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(D)) { + if (LSD->getLanguage() == LinkageSpecDecl::lang_cxx) + Builder->WarnUnsupported(LSD, "linkage spec"); + // FIXME: implement C++ linkage, C linkage works mostly by C + // language reuse already. + } else if (FileScopeAsmDecl *AD = dyn_cast<FileScopeAsmDecl>(D)) { + std::string AsmString(AD->getAsmString()->getStrData(), + AD->getAsmString()->getByteLength()); + + const std::string &S = Builder->getModule().getModuleInlineAsm(); + if (S.empty()) + Builder->getModule().setModuleInlineAsm(AsmString); + else + Builder->getModule().setModuleInlineAsm(S + '\n' + AsmString); + } else { + assert(isa<TypeDecl>(D) && "Unknown top level decl"); + // TODO: handle debug info? + } + } + + /// HandleTagDeclDefinition - This callback is invoked each time a TagDecl + /// (e.g. struct, union, enum, class) is completed. This allows the client to + /// hack on the type, which can occur at any point in the file (because these + /// can be defined in declspecs). + virtual void HandleTagDeclDefinition(TagDecl *D) { + Builder->UpdateCompletedType(D); + } + + }; +} + +ASTConsumer *clang::CreateLLVMCodeGen(Diagnostic &Diags, + const LangOptions &Features, + llvm::Module *&DestModule) { + return new CodeGenerator(Diags, Features, DestModule); +} + |