summaryrefslogtreecommitdiffstats
path: root/clang/test/CodeGen
Commit message (Collapse)AuthorAgeFilesLines
...
* Fix a warning on a test.John McCall2010-08-031-0/+1
| | | | llvm-svn: 110165
* Do a very simple pass over every function we emit to infer whether we canJohn McCall2010-08-031-4/+14
| | | | | | | mark it nounwind based on whether it contains any non-nounwind calls. <rdar://problem/8087431> llvm-svn: 110163
* Support x86 AVX 256-bit instructions built-ins. Right now support all of ↵Bruno Cardoso Lopes2010-08-031-2/+112
| | | | | | | | | | them, but as soon as we properly codegen the simple vector operations, remove the unnecessary built-ins/intrinsics from clang and llvm. Also add tests for the new built-ins llvm-svn: 110096
* Only run the jump-checker if there's a branch-protected scope *and* there'sJohn McCall2010-08-011-2/+7
| | | | | | | | | | a switch or goto somewhere in the function. Indirect gotos trigger the jump-checker regardless, because the conditions there are slightly more elaborate and it's too marginal a case to be worth optimizing. Turns off the jump-checker in a lot of cases in C++. rdar://problem/7702918 llvm-svn: 109962
* There is no reason for this test to invoke 'llc'.Daniel Dunbar2010-07-301-1/+1
| | | | llvm-svn: 109847
* fix rdar://8251384, another case where we could access beyond theChris Lattner2010-07-291-0/+10
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | end of a struct. This improves the case when the struct being passed contains 3 floats, either due to a struct or array of 3 things. Before we'd generate this IR for the testcase: define float @bar(double %X.coerce0, double %X.coerce1) nounwind { entry: %X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2] %0 = bitcast %struct.foof* %X to %1* ; <%1*> [#uses=2] %1 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %X.coerce0, double* %1 %2 = getelementptr %1* %0, i32 0, i32 1 ; <double*> [#uses=1] store double %X.coerce1, double* %2 %tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1] %tmp1 = load float* %tmp ; <float> [#uses=1] ret float %tmp1 } which compiled (with optimization) to: _bar: ## @bar ## BB#0: ## %entry movd %xmm1, %rax movd %eax, %xmm0 ret Now we produce: define float @bar(double %X.coerce0, float %X.coerce1) nounwind { entry: %X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2] %0 = bitcast %struct.foof* %X to %0* ; <%0*> [#uses=2] %1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %X.coerce0, double* %1 %2 = getelementptr %0* %0, i32 0, i32 1 ; <float*> [#uses=1] store float %X.coerce1, float* %2 %tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1] %tmp1 = load float* %tmp ; <float> [#uses=1] ret float %tmp1 } and: _bar: ## @bar ## BB#0: ## %entry movaps %xmm1, %xmm0 ret llvm-svn: 109776
* handle a case where we could access off the end of a functionChris Lattner2010-07-291-0/+6
| | | | | | that Eli pointed out, rdar://8249586 llvm-svn: 109762
* in release mode, irbuilder doesn't add names to instructions,Chris Lattner2010-07-291-2/+2
| | | | | | this will hopefully fix the osuosl clang-i686-darwin10 builder. llvm-svn: 109760
* This is a little bit far, but optimize cases like:Chris Lattner2010-07-291-0/+10
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | struct a { struct c { double x; int y; } x[1]; }; void foo(struct a A) { } into: define void @foo(double %A.coerce0, i32 %A.coerce1) nounwind { entry: %A = alloca %struct.a, align 8 ; <%struct.a*> [#uses=1] %0 = bitcast %struct.a* %A to %struct.c* ; <%struct.c*> [#uses=2] %1 = getelementptr %struct.c* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %A.coerce0, double* %1 %2 = getelementptr %struct.c* %0, i32 0, i32 1 ; <i32*> [#uses=1] store i32 %A.coerce1, i32* %2 instead of: define void @foo(double %A.coerce0, i64 %A.coerce1) nounwind { entry: %A = alloca %struct.a, align 8 ; <%struct.a*> [#uses=1] %0 = bitcast %struct.a* %A to %0* ; <%0*> [#uses=2] %1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %A.coerce0, double* %1 %2 = getelementptr %0* %0, i32 0, i32 1 ; <i64*> [#uses=1] store i64 %A.coerce1, i64* %2 I only do this now because I never want to look at this code again :) llvm-svn: 109738
* implement a todo: pass a eight-byte that consists of aChris Lattner2010-07-291-3/+13
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | small integer + padding as that small integer. On code like: struct c { double x; int y; }; void bar(struct c C) { } This means that we compile to: define void @bar(double %C.coerce0, i32 %C.coerce1) nounwind { entry: %C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=2] %0 = getelementptr %struct.c* %C, i32 0, i32 0 ; <double*> [#uses=1] store double %C.coerce0, double* %0 %1 = getelementptr %struct.c* %C, i32 0, i32 1 ; <i32*> [#uses=1] store i32 %C.coerce1, i32* %1 instead of: define void @bar(double %C.coerce0, i64 %C.coerce1) nounwind { entry: %C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=3] %0 = bitcast %struct.c* %C to %0* ; <%0*> [#uses=2] %1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %C.coerce0, double* %1 %2 = getelementptr %0* %0, i32 0, i32 1 ; <i64*> [#uses=1] store i64 %C.coerce1, i64* %2 which gives SRoA heartburn. This implements rdar://5711709, a nice low number :) llvm-svn: 109737
* Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' alwaysChris Lattner2010-07-291-2/+8
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | have a "coerce to" type which often matches the default lowering of Clang type to LLVM IR type, but the coerce case can be handled by making them not be the same. This simplifies things and fixes issues where X86-64 abi lowering would return coerce after making preferred types exactly match up. This caused us to compile: typedef float v4f32 __attribute__((__vector_size__(16))); v4f32 foo(v4f32 X) { return X+X; } into this code at -O0: define <4 x float> @foo(<4 x float> %X.coerce) nounwind { entry: %retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X.coerce, <4 x float>* %coerce %X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] store <4 x float> %add, <4 x float>* %retval %0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1] ret <4 x float> %0 } Now we get: define <4 x float> @foo(<4 x float> %X) nounwind { entry: %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] ret <4 x float> %add } This implements rdar://8248065 llvm-svn: 109733
* ignore structs that wrap vectors in IR, the abstraction shouldn't add penalty.Chris Lattner2010-07-291-0/+10
| | | | | | | | | | | | | | | | | Before we'd compile the example into something like: %coerce.dive2 = getelementptr %struct.v4f32wrapper* %retval, i32 0, i32 0 ; <<4 x float>*> [#uses=1] %1 = bitcast <4 x float>* %coerce.dive2 to <2 x double>* ; <<2 x double>*> [#uses=1] %2 = load <2 x double>* %1, align 1 ; <<2 x double>> [#uses=1] ret <2 x double> %2 Now we produce: %coerce.dive2 = getelementptr %struct.v4f32wrapper* %retval, i32 0, i32 0 ; <<4 x float>*> [#uses=1] %0 = load <4 x float>* %coerce.dive2, align 1 ; <<4 x float>> [#uses=1] ret <4 x float> %0 llvm-svn: 109732
* move the 'pretty 16-byte vector' inferring code up to be sharedChris Lattner2010-07-291-1/+1
| | | | | | with return values, improving stuff that returns __m128 etc. llvm-svn: 109731
* now that we have CGT around, we can start using preferred typesChris Lattner2010-07-291-0/+8
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | for return values too. Instead of compiling something like: struct foo { int *X; float *Y; }; struct foo test(struct foo *P) { return *P; } to: %1 = type { i64, i64 } define %1 @test(%struct.foo* %P) nounwind { entry: %retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2] %P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2] store %struct.foo* %P, %struct.foo** %P.addr %tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1] %tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false) %0 = bitcast %struct.foo* %retval to %1* ; <%1*> [#uses=1] %1 = load %1* %0, align 1 ; <%1> [#uses=1] ret %1 %1 } We now get the result more type safe, with: define %struct.foo @test(%struct.foo* %P) nounwind { entry: %retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2] %P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2] store %struct.foo* %P, %struct.foo** %P.addr %tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1] %tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false) %0 = load %struct.foo* %retval ; <%struct.foo> [#uses=1] ret %struct.foo %0 } That memcpy is completely terrible, but I don't know how to fix it. llvm-svn: 109729
* pass argument vectors in a type that corresponds to the user type ifChris Lattner2010-07-281-0/+8
| | | | | | | | possible. This improves the example to pass <4 x float> instead of <2 x double> but we still get awful code, and still don't get the return value right. llvm-svn: 109700
* use Get8ByteTypeAtOffset for the return value path as well so weChris Lattner2010-07-281-0/+7
| | | | | | don't get errors similar to PR7714 on the return path. llvm-svn: 109689
* fix PR7714 by not referencing off the end of a struct when passed by value inChris Lattner2010-07-281-0/+14
| | | | | | | x86-64 abi. This also improves codegen as well. Some refactoring is needed of this code. llvm-svn: 109681
* Fix flags in global block descriptor whenFariborz Jahanian2010-07-281-2/+2
| | | | | | | block returns structs. Fies radar 8241648. Executable test added to llvm test suite. llvm-svn: 109620
* 2nd argument of __builtin_expect must be evaluatedFariborz Jahanian2010-07-261-0/+11
| | | | | | | if it hs side-effect to matchgcc's behaviour. Addresses radar 8172109. llvm-svn: 109467
* Switch some random local-decl cleanups over to using lazy cleanups. Turn onJohn McCall2010-07-211-1/+1
| | | | | | | the block-release unwind cleanup: we're never going to test it if we don't turn it on. llvm-svn: 108992
* Fix a goof in my previous patch -- not all of the builtins return a value, someChandler Carruth2010-07-181-3/+8
| | | | | | fixed return types. llvm-svn: 108657
* Improve the representation of the atomic builtins in a few ways. First, we makeChandler Carruth2010-07-181-1/+5
| | | | | | | | | | | | | | | their call expressions synthetically have the "deduced" types based on their first argument. We only insert conversions in the AST for arguments whose values require conversion to match the value type expected. This keeps PR7600 closed by maintaining the return type, but avoids assertions due to unexpected implicit casts making the type unsigned (test case added from Daniel). The magic is moved into the codegen for the atomic builtin which inserts the casts as needed at the IR level to raise the type to an integer suitable for the LLVM intrinsic. This shouldn't cause any real change in functionality, but now we can make the builtin be more truly polymorphic. llvm-svn: 108638
* Fix for PR3800: make sure not to evaluate the expression for a read-writeEli Friedman2010-07-161-0/+19
| | | | | | asm operand twice. llvm-svn: 108489
* Builtins/ARM: __clear_cache doesn't seem to have a consistent prototype, declareDaniel Dunbar2010-07-161-1/+1
| | | | | | | the builtin as void __clear_cache(...) to workaround this, which appears to match what GCC does. llvm-svn: 108487
* IRgen: Support user defined attributes on block runtime functions.Daniel Dunbar2010-07-161-0/+20
| | | | | | | | | | | | | | | | | | - This issue here is that /usr/include/Blocks.h wants to define some of the block runtime globals as weak, depending on the target. This doesn't work in Clang because we aren't using the AST decl for these globals. - The fix is a pretty gross hack which just watches all the decls for the specific blocks globals we need to know about; if we see one we use it, otherwise we use the hand coded type. In time, I would like to clean this up by changing IRgen to ask Sema/AST for the decl, which would then be lazily loaded from the builtin table if necessary. This could be used in a whole host of places in IRgen and would get rid of a lot of grotty hand coding of LLVM IR; however, we need some extra Sema support for this as well as support for builtin global variables. llvm-svn: 108482
* Improve test case. Thanks EliDouglas Gregor2010-07-151-0/+1
| | | | llvm-svn: 108470
* Don't suppress the emission of available_externally functions markedDouglas Gregor2010-07-151-0/+14
| | | | | | with always_inline attribute. Thanks to Howard for the tip. llvm-svn: 108469
* When forming a function call or message send expression, be sure toDouglas Gregor2010-07-131-3/+3
| | | | | | | | | | | | | | | | | strip cv-qualifiers from the expression's type when the language calls for it: in C, that's all the time, while C++ only does it for non-class types. Centralized the computation of the call expression type in QualType::getCallResultType() and some helper functions in other nodes (FunctionDecl, ObjCMethodDecl, FunctionType), and updated all relevant callers of getResultType() to getCallResultType(). Fixes PR7598 and PR7463, along with a bunch of getResultType() call sites that weren't stripping references off the result type (nothing stripped cv-qualifiers properly before this change). llvm-svn: 108234
* Reinstate the optimization suppressing available_externally functionsDouglas Gregor2010-07-133-5/+17
| | | | | | | | at -O0. The only change from the previous patch is that we don't try to generate virtual method thunks for an available_externally function. llvm-svn: 108230
* Speculatively revert r108156; it appears to be breaking self-host.Douglas Gregor2010-07-123-17/+5
| | | | llvm-svn: 108194
* Do not generate LLVM IR for available_externally function bodies atDouglas Gregor2010-07-123-5/+17
| | | | | | | | | | -O0, since we won't be using the definitions for anything anyway. For lib/System/Path.o when built in Debug+Asserts mode, this leads to a 4% improvement in compile time (and suppresses 440 function bodies). <rdar://problem/7987644> llvm-svn: 108156
* fix PR7280 by making the warning on code like this:Chris Lattner2010-07-111-1/+1
| | | | | | | | | | int test1() { return; } default to an error. llvm-svn: 108108
* allow this to pass on 32-bit hosts.Chris Lattner2010-07-081-2/+2
| | | | llvm-svn: 107845
* fix the clang side of PR7437: EmitAggregateCopyChris Lattner2010-07-081-0/+12
| | | | | | | | | | | was not producing a memcpy with the right address spaces because of two places in it doing casts of the arguments to i8, one of which that didn't preserve the address space. There is also an optimizer bug here. llvm-svn: 107842
* filecheckize this test.Chris Lattner2010-07-081-5/+17
| | | | llvm-svn: 107841
* Don't consider casted non-global pointers to be evaluatable.John McCall2010-07-071-0/+6
| | | | | | Fixes rdar://problem/8154689 llvm-svn: 107755
* in the "coerce" case, the ABI handling code ends up making theChris Lattner2010-07-051-1/+9
| | | | | | | | | alloca for an argument. Make sure the argument gets the proper decl alignment, which may be different than the type alignment. This fixes PR7567 llvm-svn: 107627
* fix PR7564 a cast where the bitfield struct init codeChris Lattner2010-07-051-0/+8
| | | | | | wasn't handling array padding elements right. llvm-svn: 107621
* fix rdar://8147692 - yet another crash due to my abi work.Chris Lattner2010-07-011-0/+15
| | | | llvm-svn: 107387
* Driver/IRgen: Add support for -momit-leaf-frame-pointer.Daniel Dunbar2010-07-011-0/+29
| | | | llvm-svn: 107367
* Reapply:Chris Lattner2010-06-301-0/+13
| | | | | | | | | | r107173, "fix PR7519: after thrashing around and remembering how all this stuff" r107216, "fix PR7523, which was caused by the ABI code calling ConvertType instead" This includes a fix to make ConvertTypeForMem handle the "recursive" case, and call it as such when lowering function types which have an indirect result. llvm-svn: 107310
* Revert r107173, "fix PR7519: after thrashing around and remembering how all ↵Daniel Dunbar2010-06-301-13/+0
| | | | | | this stuff", it broke bootstrap. llvm-svn: 107232
* IRgen: Assignment to Objective-C properties shouldn't reload the value (whichDaniel Dunbar2010-06-292-1/+49
| | | | | | | | would trigger an extra method call). - While in the area, I also changed Clang to not emit an unnecessary load from 'x' in cases like 'y = (x = 1)'. llvm-svn: 107210
* tests: Fix test to not depend on instruction names.Daniel Dunbar2010-06-291-2/+3
| | | | llvm-svn: 107186
* fix PR7519: after thrashing around and remembering how all this stuffChris Lattner2010-06-291-0/+13
| | | | | | | works, the fix is quite simple: just make sure to call ConvertTypeRecursive when the function type being lowered is in the midst of ConvertType. llvm-svn: 107173
* Change X86_64ABIInfo to have ASTContext and TargetData ivars toChris Lattner2010-06-291-0/+10
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | avoid passing ASTContext down through all the methods it has. When classifying an argument, or argument piece, as INTEGER, check to see if we have a pointer at exactly the same offset in the preferred type. If so, use that pointer type instead of i64. This allows us to compile A function taking a stringref into something like this: define i8* @foo(i64 %D.coerce0, i8* %D.coerce1) nounwind ssp { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=4] %0 = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] store i64 %D.coerce0, i64* %0 %1 = getelementptr %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] store i8* %D.coerce1, i8** %1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] %tmp3 = load i8** %tmp2 ; <i8*> [#uses=1] %add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1] ret i8* %add.ptr } instead of this: define i8* @foo(i64 %D.coerce0, i64 %D.coerce1) nounwind ssp { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] %0 = insertvalue %0 undef, i64 %D.coerce0, 0 ; <%0> [#uses=1] %1 = insertvalue %0 %0, i64 %D.coerce1, 1 ; <%0> [#uses=1] %2 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1] store %0 %1, %0* %2, align 1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] %tmp3 = load i8** %tmp2 ; <i8*> [#uses=1] %add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1] ret i8* %add.ptr } This implements rdar://7375902 - [codegen quality] clang x86-64 ABI lowering code punishing StringRef llvm-svn: 107123
* add IR names to coerced arguments.Chris Lattner2010-06-291-4/+4
| | | | llvm-svn: 107105
* Change CGCall to handle the "coerce" case where the coerce-to typeChris Lattner2010-06-281-1/+1
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | is a FCA to pass each of the elements as individual scalars. This produces code fast isel is less likely to reject and is easier on the optimizers. For example, before we would compile: struct DeclGroup { long NumDecls; char * Y; }; char * foo(DeclGroup D) { return D.NumDecls+D.Y; } to: %struct.DeclGroup = type { i64, i64 } define i64 @_Z3foo9DeclGroup(%struct.DeclGroup) nounwind { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] store %struct.DeclGroup %0, %struct.DeclGroup* %D, align 1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i64*> [#uses=1] %tmp3 = load i64* %tmp2 ; <i64> [#uses=1] %add = add nsw i64 %tmp1, %tmp3 ; <i64> [#uses=1] ret i64 %add } Now we get: %0 = type { i64, i64 } %struct.DeclGroup = type { i64, i8* } define i8* @_Z3foo9DeclGroup(i64, i64) nounwind { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] %2 = insertvalue %0 undef, i64 %0, 0 ; <%0> [#uses=1] %3 = insertvalue %0 %2, i64 %1, 1 ; <%0> [#uses=1] %4 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1] store %0 %3, %0* %4, align 1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] %tmp3 = load i8** %tmp2 ; <i8*> [#uses=1] %add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1] ret i8* %add.ptr } Elimination of the FCA inside the function is still-to-come. llvm-svn: 107099
* X86-64:Chris Lattner2010-06-281-3/+4
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | pass/return structs of float/int as float/i32 instead of double/i64 to make the code generated for ABI cleaner. Passing in the low part of a double is the same as passing in a float. For example, we now compile: struct DeclGroup { float NumDecls; }; float foo(DeclGroup D); void bar(DeclGroup *D) { foo(*D); } into: %struct.DeclGroup = type { float } define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind { entry: %D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2] %agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr %tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1] %tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false) %coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1] %0 = load float* %coerce.dive, align 1 ; <float> [#uses=1] %call = call float @_Z3foo9DeclGroup(float %0) ; <float> [#uses=0] ret void } instead of: %struct.DeclGroup = type { float } define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind { entry: %D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2] %agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp3 = alloca double ; <double*> [#uses=2] store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr %tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1] %tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false) %coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1] %0 = bitcast double* %tmp3 to float* ; <float*> [#uses=1] %1 = load float* %coerce.dive ; <float> [#uses=1] store float %1, float* %0, align 1 %2 = load double* %tmp3 ; <double> [#uses=1] %call = call float @_Z3foo9DeclGroup(double %2) ; <float> [#uses=0] ret void } which is this machine code (at -O0): __Z3barP9DeclGroup: subq $24, %rsp movq %rdi, 16(%rsp) movq 16(%rsp), %rdi leaq 8(%rsp), %rax movl (%rdi), %ecx movl %ecx, (%rax) movss 8(%rsp), %xmm0 callq __Z3foo9DeclGroup addq $24, %rsp ret vs this: __Z3barP9DeclGroup: subq $24, %rsp movq %rdi, 16(%rsp) movq 16(%rsp), %rdi leaq 8(%rsp), %rax movl (%rdi), %ecx movl %ecx, (%rax) movss 8(%rsp), %xmm0 movss %xmm0, (%rsp) movsd (%rsp), %xmm0 callq __Z3foo9DeclGroup addq $24, %rsp ret At -O3, it is the difference between this now: __Z3barP9DeclGroup: movss (%rdi), %xmm0 jmp __Z3foo9DeclGroup # TAILCALL vs this before: __Z3barP9DeclGroup: movl (%rdi), %eax movd %rax, %xmm0 jmp __Z3foo9DeclGroup # TAILCALL llvm-svn: 107048
* Have __func__ and siblings point to block's implementation functionFariborz Jahanian2010-06-281-0/+18
| | | | | | name. Fixes radar 7860965. llvm-svn: 107044
OpenPOWER on IntegriCloud