summaryrefslogtreecommitdiffstats
path: root/clang/lib/CodeGen/CGCall.cpp
Commit message (Collapse)AuthorAgeFilesLines
...
* Pass the LLVM IR version of argument types down into computeInfo.Chris Lattner2010-06-291-1/+9
| | | | | | | | | This is somewhat annoying to do this at this level, but it avoids having ABIInfo know depend on CodeGenTypes for a hint. Nothing is using this yet, so no functionality change. llvm-svn: 107111
* add IR names to coerced arguments.Chris Lattner2010-06-291-0/+3
| | | | llvm-svn: 107105
* make the argument passing stuff in the FCA case smarter still, byChris Lattner2010-06-291-21/+46
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | avoiding making the FCA at all when the types exactly line up. For example, before we made: %struct.DeclGroup = type { i64, i64 } define i64 @_Z3foo9DeclGroup(i64, i64) nounwind { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] %2 = insertvalue %struct.DeclGroup undef, i64 %0, 0 ; <%struct.DeclGroup> [#uses=1] %3 = insertvalue %struct.DeclGroup %2, i64 %1, 1 ; <%struct.DeclGroup> [#uses=1] store %struct.DeclGroup %3, %struct.DeclGroup* %D %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i64*> [#uses=1] %tmp3 = load i64* %tmp2 ; <i64> [#uses=1] %add = add nsw i64 %tmp1, %tmp3 ; <i64> [#uses=1] ret i64 %add } ... which has the pointless insertvalue, which fastisel hates, now we make: %struct.DeclGroup = type { i64, i64 } define i64 @_Z3foo9DeclGroup(i64, i64) nounwind { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=4] %2 = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] store i64 %0, i64* %2 %3 = getelementptr %struct.DeclGroup* %D, i32 0, i32 1 ; <i64*> [#uses=1] store i64 %1, i64* %3 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i64*> [#uses=1] %tmp3 = load i64* %tmp2 ; <i64> [#uses=1] %add = add nsw i64 %tmp1, %tmp3 ; <i64> [#uses=1] ret i64 %add } This only kicks in when x86-64 abi lowering decides it likes us. llvm-svn: 107104
* Change CGCall to handle the "coerce" case where the coerce-to typeChris Lattner2010-06-281-11/+60
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | is a FCA to pass each of the elements as individual scalars. This produces code fast isel is less likely to reject and is easier on the optimizers. For example, before we would compile: struct DeclGroup { long NumDecls; char * Y; }; char * foo(DeclGroup D) { return D.NumDecls+D.Y; } to: %struct.DeclGroup = type { i64, i64 } define i64 @_Z3foo9DeclGroup(%struct.DeclGroup) nounwind { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] store %struct.DeclGroup %0, %struct.DeclGroup* %D, align 1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i64*> [#uses=1] %tmp3 = load i64* %tmp2 ; <i64> [#uses=1] %add = add nsw i64 %tmp1, %tmp3 ; <i64> [#uses=1] ret i64 %add } Now we get: %0 = type { i64, i64 } %struct.DeclGroup = type { i64, i8* } define i8* @_Z3foo9DeclGroup(i64, i64) nounwind { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] %2 = insertvalue %0 undef, i64 %0, 0 ; <%0> [#uses=1] %3 = insertvalue %0 %2, i64 %1, 1 ; <%0> [#uses=1] %4 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1] store %0 %3, %0* %4, align 1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] %tmp3 = load i8** %tmp2 ; <i8*> [#uses=1] %add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1] ret i8* %add.ptr } Elimination of the FCA inside the function is still-to-come. llvm-svn: 107099
* make the trivial forms of CreateCoerced{Load|Store} trivial.Chris Lattner2010-06-281-3/+12
| | | | llvm-svn: 107091
* finally get around to doing a significant cleanup to irgen:Chris Lattner2010-06-271-4/+2
| | | | | | | | have CGF create and make accessible standard int32,int64 and intptr types. This fixes a ton of 80 column violations introduced by LLVMContextification and cleans up stuff a lot. llvm-svn: 106977
* If coercing something from int or pointer type to int or pointer typeChris Lattner2010-06-271-0/+49
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | (potentially after unwrapping it from a struct) do it without going through memory. We now compile: struct DeclGroup { unsigned NumDecls; }; int foo(DeclGroup D) { return D.NumDecls; } into: %struct.DeclGroup = type { i32 } define i32 @_Z3foo9DeclGroup(i64) nounwind ssp noredzone { entry: %D = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %coerce.dive = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] %coerce.val.ii = trunc i64 %0 to i32 ; <i32> [#uses=1] store i32 %coerce.val.ii, i32* %coerce.dive %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] %tmp1 = load i32* %tmp ; <i32> [#uses=1] ret i32 %tmp1 } instead of: %struct.DeclGroup = type { i32 } define i32 @_Z3foo9DeclGroup(i64) nounwind ssp noredzone { entry: %D = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp = alloca i64 ; <i64*> [#uses=2] %coerce.dive = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] store i64 %0, i64* %tmp %1 = bitcast i64* %tmp to i32* ; <i32*> [#uses=1] %2 = load i32* %1, align 1 ; <i32> [#uses=1] store i32 %2, i32* %coerce.dive %tmp1 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] %tmp2 = load i32* %tmp1 ; <i32> [#uses=1] ret i32 %tmp2 } ... which is quite a bit less terrifying. llvm-svn: 106975
* Same patch as the previous on the store side. Before we compiled this:Chris Lattner2010-06-271-7/+13
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | struct DeclGroup { unsigned NumDecls; }; int foo(DeclGroup D) { return D.NumDecls; } to: %struct.DeclGroup = type { i32 } define i32 @_Z3foo9DeclGroup(i64) nounwind ssp noredzone { entry: %D = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp = alloca i64 ; <i64*> [#uses=2] store i64 %0, i64* %tmp %1 = bitcast i64* %tmp to %struct.DeclGroup* ; <%struct.DeclGroup*> [#uses=1] %2 = load %struct.DeclGroup* %1, align 1 ; <%struct.DeclGroup> [#uses=1] store %struct.DeclGroup %2, %struct.DeclGroup* %D %tmp1 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] %tmp2 = load i32* %tmp1 ; <i32> [#uses=1] ret i32 %tmp2 } which caused fast isel bailouts due to the FCA load/store of %2. Now we generate this just blissful code: %struct.DeclGroup = type { i32 } define i32 @_Z3foo9DeclGroup(i64) nounwind ssp noredzone { entry: %D = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp = alloca i64 ; <i64*> [#uses=2] %coerce.dive = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] store i64 %0, i64* %tmp %1 = bitcast i64* %tmp to i32* ; <i32*> [#uses=1] %2 = load i32* %1, align 1 ; <i32> [#uses=1] store i32 %2, i32* %coerce.dive %tmp1 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i32*> [#uses=1] %tmp2 = load i32* %tmp1 ; <i32> [#uses=1] ret i32 %tmp2 } This avoids fastisel bailing out and is groundwork for future patch. This reduces bailouts on CGStmt.ll to 911 from 935. llvm-svn: 106974
* improve CreateCoercedLoad a bit to generate slightly less awfulChris Lattner2010-06-271-1/+42
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | IR when handling X86-64 by-value struct stuff. For example, we use to compile this: struct DeclGroup { unsigned NumDecls; }; int foo(DeclGroup D); void bar(DeclGroup *D) { foo(*D); } into: define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) ssp nounwind { entry: %D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2] %agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp3 = alloca i64 ; <i64*> [#uses=2] store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr %tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1] %tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false) %0 = bitcast i64* %tmp3 to %struct.DeclGroup* ; <%struct.DeclGroup*> [#uses=1] %1 = load %struct.DeclGroup* %agg.tmp ; <%struct.DeclGroup> [#uses=1] store %struct.DeclGroup %1, %struct.DeclGroup* %0, align 1 %2 = load i64* %tmp3 ; <i64> [#uses=1] call void @_Z3foo9DeclGroup(i64 %2) ret void } which would cause fastisel to bail out due to the first class aggregate load %1. With this patch we now compile it into the (still awful): define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind ssp noredzone { entry: %D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2] %agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp3 = alloca i64 ; <i64*> [#uses=2] store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr %tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1] %tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false) %coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <i32*> [#uses=1] %0 = bitcast i64* %tmp3 to i32* ; <i32*> [#uses=1] %1 = load i32* %coerce.dive ; <i32> [#uses=1] store i32 %1, i32* %0, align 1 %2 = load i64* %tmp3 ; <i64> [#uses=1] %call = call i32 @_Z3foo9DeclGroup(i64 %2) noredzone ; <i32> [#uses=0] ret void } which doesn't bail out. On CGStmt.ll, this reduces fastisel bail outs from 958 to 935, and is the precursor of better things to come. llvm-svn: 106973
* Change IR generation for return (in the simple case) to avoid doing sillyChris Lattner2010-06-271-18/+37
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | load/store nonsense in the epilog. For example, for: int foo(int X) { int A[100]; return A[X]; } we used to generate: %arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1] %tmp1 = load i32* %arrayidx ; <i32> [#uses=1] store i32 %tmp1, i32* %retval %0 = load i32* %retval ; <i32> [#uses=1] ret i32 %0 } which codegen'd to this code: _foo: ## @foo ## BB#0: ## %entry subq $408, %rsp ## imm = 0x198 movl %edi, 400(%rsp) movl 400(%rsp), %edi movslq %edi, %rax movl (%rsp,%rax,4), %edi movl %edi, 404(%rsp) movl 404(%rsp), %eax addq $408, %rsp ## imm = 0x198 ret Now we generate: %arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1] %tmp1 = load i32* %arrayidx ; <i32> [#uses=1] ret i32 %tmp1 } and: _foo: ## @foo ## BB#0: ## %entry subq $408, %rsp ## imm = 0x198 movl %edi, 404(%rsp) movl 404(%rsp), %edi movslq %edi, %rax movl (%rsp,%rax,4), %eax addq $408, %rsp ## imm = 0x198 ret This actually does matter, cutting out 2000 lines of IR from CGStmt.ll for example. Another interesting effect is that altivec.h functions which are dead now get dce'd by the inliner. Hence all the changes to builtins-ppc-altivec.c to ensure the calls aren't dead. llvm-svn: 106970
* reduce indentationChris Lattner2010-06-261-34/+35
| | | | llvm-svn: 106967
* Change EmitReferenceBindingToExpr to take a decl instead of a boolean.Anders Carlsson2010-06-261-1/+1
| | | | llvm-svn: 106949
* Move CodeGenOptions.h *back* into Frontend. This should have been done when theChandler Carruth2010-06-151-1/+1
| | | | | | dependency edge was reversed such that CodeGen depends on Frontend. llvm-svn: 106065
* Fix for PR7040: Don't try to compute the LLVM type for a function where itEli Friedman2010-05-301-17/+1
| | | | | | | | | | | isn't possible to compute. This patch is mostly refactoring; the key change is the addition of the code starting with the comment, "Check whether the function has a computable LLVM signature." The solution here is essentially the same as the way the vtable code handles such functions. llvm-svn: 105151
* Correctly pass aggregates by reference when emitting thunks.John McCall2010-05-261-0/+30
| | | | llvm-svn: 104778
* Add support for Microsoft's __thiscall, from Steven Watanabe!Douglas Gregor2010-05-181-0/+4
| | | | llvm-svn: 104026
* As per Chris' request, return the Instruction from EmitCall and add the ↵David Chisnall2010-05-021-4/+3
| | | | | | metadata in the caller. llvm-svn: 102862
* Tweaked EmitCall() to permit the caller to provide some metadata to attach ↵David Chisnall2010-05-011-1/+6
| | | | | | | | to the call site. Used this in CGObjCGNU to attach metadata about message sends to permit speculative inlining. llvm-svn: 102833
* don't slap noalias attribute on stret result arguments.Chris Lattner2010-04-201-2/+1
| | | | | | | | | This mirror's Dan's patch for llvm-gcc in r97989, and fixes the miscompilation in PR6525. There is some contention over whether this is the right thing to do, but it is the conservative answer and demonstrably fixes a miscompilation. llvm-svn: 101877
* Vtable -> VTable renames across the board.Anders Carlsson2010-04-171-1/+1
| | | | llvm-svn: 101666
* Remember the regparm attribute in FunctionType::ExtInfo.Rafael Espindola2010-03-301-11/+6
| | | | | | Fixes PR3782. llvm-svn: 99940
* the big refactoring bits of PR3782.Rafael Espindola2010-03-301-15/+14
| | | | | | | | This introduces FunctionType::ExtInfo to hold the calling convention and the noreturn attribute. The next patch will extend it to include the regparm attribute and fix the bug. llvm-svn: 99920
* When mapping restrict to noalias, look for 'restrict' on the parameter variableJohn McCall2010-03-271-2/+6
| | | | | | | instead of the canonical parameter type (which has correctly dropped all such direct qualifiers). Fixes PR6695. llvm-svn: 99688
* Use the power of types to track down another canonicalization bug inJohn McCall2010-02-261-36/+43
| | | | | | the ABI-computation interface. Fixes <rdar://problem/7691046>. llvm-svn: 97197
* Canonicalize parameter and return types before computing ABI info. EliminatesJohn McCall2010-02-241-60/+71
| | | | | | | | | | | a common source of oddities and, in theory, removes some redundant ABI computations. Also fixes a miscompile I introduced yesterday by refactoring some code and causing a slightly different code path to be taken that didn't perform *parameter* type canonicalization, just normal type canonicalization; this in turn caused a bit of ABI code to misfire because it was looking for 'double' or 'float' but received 'const float'. llvm-svn: 97030
* Perform two more constructor/destructor code-size optimizations:John McCall2010-02-231-0/+12
| | | | | | | | | | | | | | | | 1) emit base destructors as aliases to their unique base class destructors under some careful conditions. This is enabled for the same targets that can support complete-to-base aliases, i.e. not darwin. 2) Emit non-variadic complete constructors for classes with no virtual bases as calls to the base constructor. This is enabled on all targets and in theory can trigger in situations that the alias optimization can't (mostly involving virtual bases, mostly not yet supported). These are bundled together because I didn't think it worthwhile to split them, not because they really need to be. llvm-svn: 96842
* IRgen: Add CreateMemTemp, for creating an temporary memory object for a ↵Daniel Dunbar2010-02-091-11/+10
| | | | | | | | | | particular type, and flood fill. - CreateMemTemp sets the alignment on the alloca correctly, which fixes a great many places in IRgen where we were doing the wrong thing. - This fixes many many more places than the test case, but my feeling is we need to audit alignment systematically so I'm not inclined to try hard to test the individual fixes in this patch. If this bothers you, patches welcome! PR6240. llvm-svn: 95648
* Use the correct function info for constructors when applying function ↵Anders Carlsson2010-02-061-0/+13
| | | | | | attributes. Fixes PR6245. llvm-svn: 95474
* Standardize the parsing of function type attributes in a way thatJohn McCall2010-02-051-29/+47
| | | | | | | | | | | | follows (as conservatively as possible) gcc's current behavior: attributes written on return types that don't apply there are applied to the function instead, etc. Only parse CC attributes as type attributes, not as decl attributes; don't accepet noreturn as a decl attribute on ValueDecls, either (it still needs to apply to other decls, like blocks). Consistently consume CC/noreturn information throughout codegen; enforce this by removing their default values in CodeGenTypes::getFunctionInfo(). llvm-svn: 95436
* Revert the new reference binding code; I came up with a way simpler solution ↵Anders Carlsson2010-02-031-4/+1
| | | | | | for the reference binding bug that is preventing self-hosting. llvm-svn: 95223
* Start creating CXXBindReferenceExpr nodes when binding complex types to ↵Anders Carlsson2010-01-311-0/+3
| | | | | | references. llvm-svn: 94964
* Simplify EmitLValueForField - we can get whether the field is part of a ↵Anders Carlsson2010-01-291-2/+2
| | | | | | union or not from the FieldDecl (through its DeclContext). llvm-svn: 94798
* Fill in the return value slot in CGExprAgg::VisitCallExpr. This takes us ↵Anders Carlsson2009-12-241-15/+32
| | | | | | halfway towards fixing PR5824. llvm-svn: 92142
* Pass ReturnValueSlot to EmitCall. No functionality change yet.Anders Carlsson2009-12-241-0/+1
| | | | llvm-svn: 92138
* implement PR5274: mark 'restrict' parameters as noaliasNuno Lopes2009-12-071-0/+3
| | | | llvm-svn: 90778
* Make functions returning a struct indirectly evaluate the returned structEli Friedman2009-12-041-1/+1
| | | | | | | directly into the sret pointer. This is an optimization in C, but is required for correctness in C++ for classes with a non-trivial copy constructor. llvm-svn: 90526
* Add VTT parameter to base ctors/dtors with virtual bases. (They aren't used ↵Anders Carlsson2009-11-251-0/+36
| | | | | | yet). llvm-svn: 89835
* It is common for vtables to contain pointers to functions that have either ↵Anders Carlsson2009-11-241-0/+26
| | | | | | | | incomplete return types or incomplete argument types. Handle this by returning the llvm::OpaqueType for those cases, which CodeGenModule::GetOrCreateLLVMFunction knows about, and treats as being an "incomplete function". llvm-svn: 89736
* The ssp and sspreq function attributes should only be applied to function ↵Anders Carlsson2009-11-161-5/+0
| | | | | | definitions, not declarations or calls. llvm-svn: 88915
* Move CompileOptions -> CodeGenOptions, and sink it into the CodeGen library.Chandler Carruth2009-11-121-4/+4
| | | | | | This resolves the layering violation where CodeGen depended on Frontend. llvm-svn: 86998
* Set OptimizeForSize LLVM function attribute with -Os.Daniel Dunbar2009-10-271-0/+2
| | | | llvm-svn: 85278
* Twinify CodeGenFunction::CreateTempAllocaDaniel Dunbar2009-10-191-3/+2
| | | | llvm-svn: 84456
* Use new predicates for some type equality tests.Benjamin Kramer2009-10-051-2/+1
| | | | llvm-svn: 83303
* Implement code generation of member function pointer calls. Fixes PR5121.Anders Carlsson2009-10-031-0/+15
| | | | llvm-svn: 83271
* Refactor the representation of qualifiers to bring ExtQualType out of theJohn McCall2009-09-241-1/+1
| | | | | | | | Type hierarchy. Demote 'volatile' to extended-qualifier status. Audit our use of qualifiers and fix a few places that weren't dealing with qualifiers quite right; many more remain. llvm-svn: 82705
* Change all the Type::getAsFoo() methods to specializations of Type::getAs().John McCall2009-09-211-2/+2
| | | | | | | | | | | Several of the existing methods were identical to their respective specializations, and so have been removed entirely. Several more 'leaf' optimizations were introduced. The getAsFoo() methods which imposed extra conditions, like getAsObjCInterfacePointerType(), have been left in place. llvm-svn: 82501
* x86-64 ABI: If a type is a C++ record with either a non-trivial destructor ↵Anders Carlsson2009-09-161-1/+3
| | | | | | or a non-trivial copy constructor, it should be passed in a pointer. Daniel, plz review. llvm-svn: 82050
* Change CodeGenModule::ConstructTypeAttributes to return the calling conventionDaniel Dunbar2009-09-121-6/+9
| | | | | | to use, and allow the ABI implementation to override the calling convention. llvm-svn: 81593
* Set the calling convention based on the CGFunctionInfo.Daniel Dunbar2009-09-111-3/+3
| | | | llvm-svn: 81582
* Add CallingConvention argument to CGFunctionInfo.Daniel Dunbar2009-09-111-16/+49
| | | | | | - Currently unused. llvm-svn: 81581
OpenPOWER on IntegriCloud