summaryrefslogtreecommitdiffstats
path: root/clang/lib/CodeGen/TargetInfo.cpp
Commit message (Collapse)AuthorAgeFilesLines
...
* 1 x ulonglong needs to be classified as INTEGER, just like 1 x longlong,Chris Lattner2010-08-261-1/+2
| | | | | | this fixes a miscompilation on the included testcase, rdar://8359248 llvm-svn: 112201
* tame an assertion, fixing rdar://8357396Chris Lattner2010-08-261-1/+1
| | | | llvm-svn: 112174
* Finally pass "two floats in a 64-bit unit" as a <2 x float> instead ofChris Lattner2010-08-251-6/+2
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | as a double in the x86-64 ABI. This allows us to generate much better code for certain things, e.g.: _Complex float f32(_Complex float A, _Complex float B) { return A+B; } Used to compile into (look at the integer silliness!): _f32: ## @f32 ## BB#0: ## %entry movd %xmm1, %rax movd %eax, %xmm1 movd %xmm0, %rcx movd %ecx, %xmm0 addss %xmm1, %xmm0 movd %xmm0, %edx shrq $32, %rax movd %eax, %xmm0 shrq $32, %rcx movd %ecx, %xmm1 addss %xmm0, %xmm1 movd %xmm1, %eax shlq $32, %rax addq %rdx, %rax movd %rax, %xmm0 ret Now we get: _f32: ## @f32 movdqa %xmm0, %xmm2 addss %xmm1, %xmm2 pshufd $16, %xmm2, %xmm2 pshufd $1, %xmm1, %xmm1 pshufd $1, %xmm0, %xmm0 addss %xmm1, %xmm0 pshufd $16, %xmm0, %xmm1 movdqa %xmm2, %xmm0 unpcklps %xmm1, %xmm0 ret and compile stuff like: extern float _Complex ccoshf( float _Complex ) ; float _Complex ccosf ( float _Complex z ) { float _Complex iz; (__real__ iz) = -(__imag__ z); (__imag__ iz) = (__real__ z); return ccoshf(iz); } into: _ccosf: ## @ccosf ## BB#0: ## %entry pshufd $1, %xmm0, %xmm1 xorps LCPI4_0(%rip), %xmm1 unpcklps %xmm0, %xmm1 movaps %xmm1, %xmm0 jmp _ccoshf ## TAILCALL instead of: _ccosf: ## @ccosf ## BB#0: ## %entry movd %xmm0, %rax movq %rax, %rcx shlq $32, %rcx shrq $32, %rax xorl $-2147483648, %eax ## imm = 0xFFFFFFFF80000000 addq %rcx, %rax movd %rax, %xmm0 jmp _ccoshf ## TAILCALL There is still "stuff to be done" here for the struct case, but this resolves rdar://6379669 - [x86-64 ABI] Pass and return _Complex float / double efficiently llvm-svn: 112111
* Fix horrible white space errors.Michael J. Spencer2010-08-251-69/+69
| | | | llvm-svn: 112067
* Experiment with using first-class aggregates to represent member functionJohn McCall2010-08-221-11/+16
| | | | | | | | | | pointers. I find the resulting code to be substantially cleaner, and it makes it very easy to use the same APIs for data member pointers (which I have conscientiously avoided here), and it avoids a plethora of potential inefficiencies due to excessive memory copying, but we'll have to see if it actually works. llvm-svn: 111776
* fix PR5179 and correctly fix PR5831 to not miscompile.Chris Lattner2010-07-301-10/+32
| | | | | | | | | | | | | | | | | | | | The X86-64 ABI code didn't handle the case when a struct would get classified and turn up as "NoClass INTEGER" for example. This is perfectly possible when the first slot is all padding (e.g. due to empty base classes). In this situation, the first 8-byte doesn't take a register at all, only the second 8-byte does. This fixes this by enhancing the x86-64 abi stuff to allow and handle this case, reverts the broken fix for PR5831, and enhances the target independent stuff to be able to handle an argument value in registers being accessed at an offset from the memory value. This is the last x86-64 calling convention related miscompile that I'm aware of. llvm-svn: 109848
* move the last hunk of getCoerceResult into the placeChris Lattner2010-07-291-32/+27
| | | | | | that needs it and remove getCoerceResult. llvm-svn: 109807
* now that direct and coerce are merged, getCoerceResult gets simpler.Chris Lattner2010-07-291-5/+0
| | | | llvm-svn: 109805
* now that GetSSETypeAtOffset handles passing SSE class values asChris Lattner2010-07-291-14/+0
| | | | | | float, the special case hack in getCoerceResult can go away. llvm-svn: 109804
* Implement the clang-side of detection for when to pass asChris Lattner2010-07-291-3/+39
| | | | | | | | <2 x float> instead of double. This works but can't be turned on until I teach codegen to pass <2 x float> as one XMM register instead of two. llvm-svn: 109790
* Look at me, I can count!Chris Lattner2010-07-291-1/+1
| | | | llvm-svn: 109786
* fix rdar://8251384, another case where we could access beyond theChris Lattner2010-07-291-21/+28
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | end of a struct. This improves the case when the struct being passed contains 3 floats, either due to a struct or array of 3 things. Before we'd generate this IR for the testcase: define float @bar(double %X.coerce0, double %X.coerce1) nounwind { entry: %X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2] %0 = bitcast %struct.foof* %X to %1* ; <%1*> [#uses=2] %1 = getelementptr %1* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %X.coerce0, double* %1 %2 = getelementptr %1* %0, i32 0, i32 1 ; <double*> [#uses=1] store double %X.coerce1, double* %2 %tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1] %tmp1 = load float* %tmp ; <float> [#uses=1] ret float %tmp1 } which compiled (with optimization) to: _bar: ## @bar ## BB#0: ## %entry movd %xmm1, %rax movd %eax, %xmm0 ret Now we produce: define float @bar(double %X.coerce0, float %X.coerce1) nounwind { entry: %X = alloca %struct.foof, align 8 ; <%struct.foof*> [#uses=2] %0 = bitcast %struct.foof* %X to %0* ; <%0*> [#uses=2] %1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %X.coerce0, double* %1 %2 = getelementptr %0* %0, i32 0, i32 1 ; <float*> [#uses=1] store float %X.coerce1, float* %2 %tmp = getelementptr inbounds %struct.foof* %X, i32 0, i32 2 ; <float*> [#uses=1] %tmp1 = load float* %tmp ; <float> [#uses=1] ret float %tmp1 } and: _bar: ## @bar ## BB#0: ## %entry movaps %xmm1, %xmm0 ret llvm-svn: 109776
* start setting up infrastructure for passing multi-floatsChris Lattner2010-07-291-15/+33
| | | | | | | as <2 x float> instead of as double. The backend isn't ready yet, but infrastructure in the frontend can come up. llvm-svn: 109768
* rename Get8ByteTypeAtOffset -> GetINTEGERTypeAtOffset toChris Lattner2010-07-291-17/+18
| | | | | | | make it clear that this function should only return a type that the codegen will classify the same as an INTEGER type. llvm-svn: 109763
* handle a case where we could access off the end of a functionChris Lattner2010-07-291-9/+6
| | | | | | that Eli pointed out, rdar://8249586 llvm-svn: 109762
* fix PR7742 / rdar://8250764, a miscompilation of structChris Lattner2010-07-291-2/+3
| | | | | | | | | | | return where the struct has a base but no fields. This was because the x86-64 abi logic was checking the wrong predicate in one place. This was introduced in r91874, which was a fix for PR5831, which lacked a CHECK line, so I verified and added it. llvm-svn: 109759
* This is a little bit far, but optimize cases like:Chris Lattner2010-07-291-3/+26
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | struct a { struct c { double x; int y; } x[1]; }; void foo(struct a A) { } into: define void @foo(double %A.coerce0, i32 %A.coerce1) nounwind { entry: %A = alloca %struct.a, align 8 ; <%struct.a*> [#uses=1] %0 = bitcast %struct.a* %A to %struct.c* ; <%struct.c*> [#uses=2] %1 = getelementptr %struct.c* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %A.coerce0, double* %1 %2 = getelementptr %struct.c* %0, i32 0, i32 1 ; <i32*> [#uses=1] store i32 %A.coerce1, i32* %2 instead of: define void @foo(double %A.coerce0, i64 %A.coerce1) nounwind { entry: %A = alloca %struct.a, align 8 ; <%struct.a*> [#uses=1] %0 = bitcast %struct.a* %A to %0* ; <%0*> [#uses=2] %1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %A.coerce0, double* %1 %2 = getelementptr %0* %0, i32 0, i32 1 ; <i64*> [#uses=1] store i64 %A.coerce1, i64* %2 I only do this now because I never want to look at this code again :) llvm-svn: 109738
* implement a todo: pass a eight-byte that consists of aChris Lattner2010-07-291-6/+92
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | small integer + padding as that small integer. On code like: struct c { double x; int y; }; void bar(struct c C) { } This means that we compile to: define void @bar(double %C.coerce0, i32 %C.coerce1) nounwind { entry: %C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=2] %0 = getelementptr %struct.c* %C, i32 0, i32 0 ; <double*> [#uses=1] store double %C.coerce0, double* %0 %1 = getelementptr %struct.c* %C, i32 0, i32 1 ; <i32*> [#uses=1] store i32 %C.coerce1, i32* %1 instead of: define void @bar(double %C.coerce0, i64 %C.coerce1) nounwind { entry: %C = alloca %struct.c, align 8 ; <%struct.c*> [#uses=3] %0 = bitcast %struct.c* %C to %0* ; <%0*> [#uses=2] %1 = getelementptr %0* %0, i32 0, i32 0 ; <double*> [#uses=1] store double %C.coerce0, double* %1 %2 = getelementptr %0* %0, i32 0, i32 1 ; <i64*> [#uses=1] store i64 %C.coerce1, i64* %2 which gives SRoA heartburn. This implements rdar://5711709, a nice low number :) llvm-svn: 109737
* Kill off the 'coerce' ABI passing form. Now 'direct' and 'extend' alwaysChris Lattner2010-07-291-24/+22
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | have a "coerce to" type which often matches the default lowering of Clang type to LLVM IR type, but the coerce case can be handled by making them not be the same. This simplifies things and fixes issues where X86-64 abi lowering would return coerce after making preferred types exactly match up. This caused us to compile: typedef float v4f32 __attribute__((__vector_size__(16))); v4f32 foo(v4f32 X) { return X+X; } into this code at -O0: define <4 x float> @foo(<4 x float> %X.coerce) nounwind { entry: %retval = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %coerce = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=2] %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X.coerce, <4 x float>* %coerce %X = load <4 x float>* %coerce ; <<4 x float>> [#uses=1] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] store <4 x float> %add, <4 x float>* %retval %0 = load <4 x float>* %retval ; <<4 x float>> [#uses=1] ret <4 x float> %0 } Now we get: define <4 x float> @foo(<4 x float> %X) nounwind { entry: %X.addr = alloca <4 x float>, align 16 ; <<4 x float>*> [#uses=3] store <4 x float> %X, <4 x float>* %X.addr %tmp = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %tmp1 = load <4 x float>* %X.addr ; <<4 x float>> [#uses=1] %add = fadd <4 x float> %tmp, %tmp1 ; <<4 x float>> [#uses=1] ret <4 x float> %add } This implements rdar://8248065 llvm-svn: 109733
* ignore structs that wrap vectors in IR, the abstraction shouldn't add penalty.Chris Lattner2010-07-291-2/+13
| | | | | | | | | | | | | | | | | Before we'd compile the example into something like: %coerce.dive2 = getelementptr %struct.v4f32wrapper* %retval, i32 0, i32 0 ; <<4 x float>*> [#uses=1] %1 = bitcast <4 x float>* %coerce.dive2 to <2 x double>* ; <<2 x double>*> [#uses=1] %2 = load <2 x double>* %1, align 1 ; <<2 x double>> [#uses=1] ret <2 x double> %2 Now we produce: %coerce.dive2 = getelementptr %struct.v4f32wrapper* %retval, i32 0, i32 0 ; <<4 x float>*> [#uses=1] %0 = load <4 x float>* %coerce.dive2, align 1 ; <<4 x float>> [#uses=1] ret <4 x float> %0 llvm-svn: 109732
* move the 'pretty 16-byte vector' inferring code up to be sharedChris Lattner2010-07-291-14/+24
| | | | | | with return values, improving stuff that returns __m128 etc. llvm-svn: 109731
* simplify code by eliminating a premature optimization.Chris Lattner2010-07-291-30/+11
| | | | llvm-svn: 109730
* now that we have CGT around, we can start using preferred typesChris Lattner2010-07-291-3/+9
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | for return values too. Instead of compiling something like: struct foo { int *X; float *Y; }; struct foo test(struct foo *P) { return *P; } to: %1 = type { i64, i64 } define %1 @test(%struct.foo* %P) nounwind { entry: %retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2] %P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2] store %struct.foo* %P, %struct.foo** %P.addr %tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1] %tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false) %0 = bitcast %struct.foo* %retval to %1* ; <%1*> [#uses=1] %1 = load %1* %0, align 1 ; <%1> [#uses=1] ret %1 %1 } We now get the result more type safe, with: define %struct.foo @test(%struct.foo* %P) nounwind { entry: %retval = alloca %struct.foo, align 8 ; <%struct.foo*> [#uses=2] %P.addr = alloca %struct.foo*, align 8 ; <%struct.foo**> [#uses=2] store %struct.foo* %P, %struct.foo** %P.addr %tmp = load %struct.foo** %P.addr ; <%struct.foo*> [#uses=1] %tmp1 = bitcast %struct.foo* %retval to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.foo* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 16, i32 8, i1 false) %0 = load %struct.foo* %retval ; <%struct.foo> [#uses=1] ret %struct.foo %0 } That memcpy is completely terrible, but I don't know how to fix it. llvm-svn: 109729
* sink preferred type stuff lower. It's possible that this mightChris Lattner2010-07-291-27/+33
| | | | | | | improve codegen for vaarg or something, because its codepath is getting preferred types now. llvm-svn: 109728
* dissolve some more complexity: make the x86-64 abi lowering codeChris Lattner2010-07-291-30/+10
| | | | | | | compute its own preferred types instead of having CGT compute them then pass them (circuituously) down into ABIInfo. llvm-svn: 109726
* simplify Get8ByteTypeAtOffset by making it a member of X86_64ABIInfoChris Lattner2010-07-291-24/+20
| | | | llvm-svn: 109724
* now that ABIInfo depends on CGT, it has trivial access to suchChris Lattner2010-07-291-212/+158
| | | | | | | things as TargetData, ASTContext, LLVMContext etc. Stop passing them through so many APIs. llvm-svn: 109723
* cave in to reality and make ABIInfo depend on CodeGenTypes.Chris Lattner2010-07-291-61/+90
| | | | | | This will simplify a bunch of code, coming up next. llvm-svn: 109722
* pass argument vectors in a type that corresponds to the user type ifChris Lattner2010-07-281-2/+14
| | | | | | | | possible. This improves the example to pass <4 x float> instead of <2 x double> but we still get awful code, and still don't get the return value right. llvm-svn: 109700
* fix some break statements to be formatted more consistently,Chris Lattner2010-07-281-16/+7
| | | | | | remove some now-dead code. llvm-svn: 109690
* use Get8ByteTypeAtOffset for the return value path as well so weChris Lattner2010-07-281-60/+63
| | | | | | don't get errors similar to PR7714 on the return path. llvm-svn: 109689
* refactor the autosizing code, eliminating duplicationChris Lattner2010-07-281-58/+53
| | | | | | and making Get8ByteTypeAtOffset always succeed and documented. llvm-svn: 109685
* fix PR7714 by not referencing off the end of a struct when passed by value inChris Lattner2010-07-281-7/+39
| | | | | | | x86-64 abi. This also improves codegen as well. Some refactoring is needed of this code. llvm-svn: 109681
* CodeGen: Tweak ABI handling for Minix, patch by Kees van Reeuwijk!Daniel Dunbar2010-07-151-1/+0
| | | | llvm-svn: 108423
* add driver support for minix, patch by Kees van ReeuwijkChris Lattner2010-07-071-0/+1
| | | | | | from PR7583 llvm-svn: 107788
* Reapply:Chris Lattner2010-06-301-2/+1
| | | | | | | | | | r107173, "fix PR7519: after thrashing around and remembering how all this stuff" r107216, "fix PR7523, which was caused by the ABI code calling ConvertType instead" This includes a fix to make ConvertTypeForMem handle the "recursive" case, and call it as such when lowering function types which have an indirect result. llvm-svn: 107310
* Revert r107173, "fix PR7519: after thrashing around and remembering how all ↵Daniel Dunbar2010-06-301-1/+2
| | | | | | this stuff", it broke bootstrap. llvm-svn: 107232
* fix PR7519: after thrashing around and remembering how all this stuffChris Lattner2010-06-291-2/+1
| | | | | | | works, the fix is quite simple: just make sure to call ConvertTypeRecursive when the function type being lowered is in the midst of ConvertType. llvm-svn: 107173
* Change X86_64ABIInfo to have ASTContext and TargetData ivars toChris Lattner2010-06-291-43/+84
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | avoid passing ASTContext down through all the methods it has. When classifying an argument, or argument piece, as INTEGER, check to see if we have a pointer at exactly the same offset in the preferred type. If so, use that pointer type instead of i64. This allows us to compile A function taking a stringref into something like this: define i8* @foo(i64 %D.coerce0, i8* %D.coerce1) nounwind ssp { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=4] %0 = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] store i64 %D.coerce0, i64* %0 %1 = getelementptr %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] store i8* %D.coerce1, i8** %1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] %tmp3 = load i8** %tmp2 ; <i8*> [#uses=1] %add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1] ret i8* %add.ptr } instead of this: define i8* @foo(i64 %D.coerce0, i64 %D.coerce1) nounwind ssp { entry: %D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3] %0 = insertvalue %0 undef, i64 %D.coerce0, 0 ; <%0> [#uses=1] %1 = insertvalue %0 %0, i64 %D.coerce1, 1 ; <%0> [#uses=1] %2 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1] store %0 %1, %0* %2, align 1 %tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1] %tmp1 = load i64* %tmp ; <i64> [#uses=1] %tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1] %tmp3 = load i8** %tmp2 ; <i8*> [#uses=1] %add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1] ret i8* %add.ptr } This implements rdar://7375902 - [codegen quality] clang x86-64 ABI lowering code punishing StringRef llvm-svn: 107123
* plumb preferred types down into X86_64ABIInfo::classifyArgumentType,Chris Lattner2010-06-291-4/+14
| | | | | | no functionality change. llvm-svn: 107115
* Pass the LLVM IR version of argument types down into computeInfo.Chris Lattner2010-06-291-8/+24
| | | | | | | | | This is somewhat annoying to do this at this level, but it avoids having ABIInfo know depend on CodeGenTypes for a hint. Nothing is using this yet, so no functionality change. llvm-svn: 107111
* pass/return structs of char and short as i8/i16 to avoidChris Lattner2010-06-281-5/+9
| | | | | | aweful through-memory coersion, just like we do for i32 now. llvm-svn: 107078
* more tidying up.Chris Lattner2010-06-281-32/+45
| | | | llvm-svn: 107076
* random acts of tidying.Chris Lattner2010-06-281-28/+47
| | | | llvm-svn: 107050
* X86-64:Chris Lattner2010-06-281-0/+12
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | pass/return structs of float/int as float/i32 instead of double/i64 to make the code generated for ABI cleaner. Passing in the low part of a double is the same as passing in a float. For example, we now compile: struct DeclGroup { float NumDecls; }; float foo(DeclGroup D); void bar(DeclGroup *D) { foo(*D); } into: %struct.DeclGroup = type { float } define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind { entry: %D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2] %agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr %tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1] %tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false) %coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1] %0 = load float* %coerce.dive, align 1 ; <float> [#uses=1] %call = call float @_Z3foo9DeclGroup(float %0) ; <float> [#uses=0] ret void } instead of: %struct.DeclGroup = type { float } define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind { entry: %D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2] %agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2] %tmp3 = alloca double ; <double*> [#uses=2] store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr %tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1] %tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1] %tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1] call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false) %coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1] %0 = bitcast double* %tmp3 to float* ; <float*> [#uses=1] %1 = load float* %coerce.dive ; <float> [#uses=1] store float %1, float* %0, align 1 %2 = load double* %tmp3 ; <double> [#uses=1] %call = call float @_Z3foo9DeclGroup(double %2) ; <float> [#uses=0] ret void } which is this machine code (at -O0): __Z3barP9DeclGroup: subq $24, %rsp movq %rdi, 16(%rsp) movq 16(%rsp), %rdi leaq 8(%rsp), %rax movl (%rdi), %ecx movl %ecx, (%rax) movss 8(%rsp), %xmm0 callq __Z3foo9DeclGroup addq $24, %rsp ret vs this: __Z3barP9DeclGroup: subq $24, %rsp movq %rdi, 16(%rsp) movq 16(%rsp), %rdi leaq 8(%rsp), %rax movl (%rdi), %ecx movl %ecx, (%rax) movss 8(%rsp), %xmm0 movss %xmm0, (%rsp) movsd (%rsp), %xmm0 callq __Z3foo9DeclGroup addq $24, %rsp ret At -O3, it is the difference between this now: __Z3barP9DeclGroup: movss (%rdi), %xmm0 jmp __Z3foo9DeclGroup # TAILCALL vs this before: __Z3barP9DeclGroup: movl (%rdi), %eax movd %rax, %xmm0 jmp __Z3foo9DeclGroup # TAILCALL llvm-svn: 107048
* finally get around to doing a significant cleanup to irgen:Chris Lattner2010-06-271-18/+13
| | | | | | | | have CGF create and make accessible standard int32,int64 and intptr types. This fixes a ton of 80 column violations introduced by LLVMContextification and cleans up stuff a lot. llvm-svn: 106977
* use more efficient type comparison predicates.Chris Lattner2010-06-261-2/+2
| | | | llvm-svn: 106958
* Change the test for which ABI/CC to use on ARM to be base on the environmentRafael Espindola2010-06-161-3/+4
| | | | | | (the last argument of the triple). llvm-svn: 106131
* Don't set the calling convention for ARM if it is already the default.Rafael Espindola2010-06-161-3/+11
| | | | llvm-svn: 106106
* Give Type::isIntegralType() an ASTContext parameter, so that itDouglas Gregor2010-06-161-1/+1
| | | | | | | | | | | | provides C "integer type" semantics in C and C++ "integral type" semantics in C++. Note that I still need to update isIntegerType (and possibly other predicates) using the same approach I've taken for isIntegralType(). The two should have the same meaning, but currently don't (!). llvm-svn: 106074
OpenPOWER on IntegriCloud