| Commit message (Collapse) | Author | Age | Files | Lines |
| |
|
|
|
|
|
|
|
| |
doesn't mangle array parameters right), but I think that should be fixed
in Sema (Doug, John, what do you think?).
Also, stub out the remaining mangleType() routines.
llvm-svn: 107264
|
| |
|
|
|
|
| |
ConvertType instead", it is part of a boostrap breaking sequence.
llvm-svn: 107231
|
| |
|
|
|
|
|
| |
of ConvertTypeRecursive when it needed to in a few cases, causing pointer
types to get resolved at the wrong time.
llvm-svn: 107216
|
| |
|
|
|
|
|
|
| |
would trigger an extra method call).
- While in the area, I also changed Clang to not emit an unnecessary load from
'x' in cases like 'y = (x = 1)'.
llvm-svn: 107210
|
| |
|
|
| |
llvm-svn: 107153
|
| |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
avoid passing ASTContext down through all the methods it has.
When classifying an argument, or argument piece, as INTEGER, check
to see if we have a pointer at exactly the same offset in the
preferred type. If so, use that pointer type instead of i64. This
allows us to compile A function taking a stringref into something
like this:
define i8* @foo(i64 %D.coerce0, i8* %D.coerce1) nounwind ssp {
entry:
%D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=4]
%0 = getelementptr %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
store i64 %D.coerce0, i64* %0
%1 = getelementptr %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
store i8* %D.coerce1, i8** %1
%tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp1 = load i64* %tmp ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
%tmp3 = load i8** %tmp2 ; <i8*> [#uses=1]
%add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1]
ret i8* %add.ptr
}
instead of this:
define i8* @foo(i64 %D.coerce0, i64 %D.coerce1) nounwind ssp {
entry:
%D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3]
%0 = insertvalue %0 undef, i64 %D.coerce0, 0 ; <%0> [#uses=1]
%1 = insertvalue %0 %0, i64 %D.coerce1, 1 ; <%0> [#uses=1]
%2 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1]
store %0 %1, %0* %2, align 1
%tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp1 = load i64* %tmp ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
%tmp3 = load i8** %tmp2 ; <i8*> [#uses=1]
%add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1]
ret i8* %add.ptr
}
This implements rdar://7375902 - [codegen quality] clang x86-64 ABI lowering code punishing StringRef
llvm-svn: 107123
|
| |
|
|
| |
llvm-svn: 107105
|
| |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
is a FCA to pass each of the elements as individual scalars. This
produces code fast isel is less likely to reject and is easier on
the optimizers.
For example, before we would compile:
struct DeclGroup { long NumDecls; char * Y; };
char * foo(DeclGroup D) {
return D.NumDecls+D.Y;
}
to:
%struct.DeclGroup = type { i64, i64 }
define i64 @_Z3foo9DeclGroup(%struct.DeclGroup) nounwind {
entry:
%D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3]
store %struct.DeclGroup %0, %struct.DeclGroup* %D, align 1
%tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp1 = load i64* %tmp ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i64*> [#uses=1]
%tmp3 = load i64* %tmp2 ; <i64> [#uses=1]
%add = add nsw i64 %tmp1, %tmp3 ; <i64> [#uses=1]
ret i64 %add
}
Now we get:
%0 = type { i64, i64 }
%struct.DeclGroup = type { i64, i8* }
define i8* @_Z3foo9DeclGroup(i64, i64) nounwind {
entry:
%D = alloca %struct.DeclGroup, align 8 ; <%struct.DeclGroup*> [#uses=3]
%2 = insertvalue %0 undef, i64 %0, 0 ; <%0> [#uses=1]
%3 = insertvalue %0 %2, i64 %1, 1 ; <%0> [#uses=1]
%4 = bitcast %struct.DeclGroup* %D to %0* ; <%0*> [#uses=1]
store %0 %3, %0* %4, align 1
%tmp = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 0 ; <i64*> [#uses=1]
%tmp1 = load i64* %tmp ; <i64> [#uses=1]
%tmp2 = getelementptr inbounds %struct.DeclGroup* %D, i32 0, i32 1 ; <i8**> [#uses=1]
%tmp3 = load i8** %tmp2 ; <i8*> [#uses=1]
%add.ptr = getelementptr inbounds i8* %tmp3, i64 %tmp1 ; <i8*> [#uses=1]
ret i8* %add.ptr
}
Elimination of the FCA inside the function is still-to-come.
llvm-svn: 107099
|
| |
|
|
|
|
| |
aweful through-memory coersion, just like we do for i32 now.
llvm-svn: 107078
|
| |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
pass/return structs of float/int as float/i32 instead of double/i64
to make the code generated for ABI cleaner. Passing in the low part
of a double is the same as passing in a float.
For example, we now compile:
struct DeclGroup { float NumDecls; };
float foo(DeclGroup D);
void bar(DeclGroup *D) {
foo(*D);
}
into:
%struct.DeclGroup = type { float }
define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind {
entry:
%D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2]
%agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2]
store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr
%tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1]
%tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false)
%coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1]
%0 = load float* %coerce.dive, align 1 ; <float> [#uses=1]
%call = call float @_Z3foo9DeclGroup(float %0) ; <float> [#uses=0]
ret void
}
instead of:
%struct.DeclGroup = type { float }
define void @_Z3barP9DeclGroup(%struct.DeclGroup* %D) nounwind {
entry:
%D.addr = alloca %struct.DeclGroup*, align 8 ; <%struct.DeclGroup**> [#uses=2]
%agg.tmp = alloca %struct.DeclGroup, align 4 ; <%struct.DeclGroup*> [#uses=2]
%tmp3 = alloca double ; <double*> [#uses=2]
store %struct.DeclGroup* %D, %struct.DeclGroup** %D.addr
%tmp = load %struct.DeclGroup** %D.addr ; <%struct.DeclGroup*> [#uses=1]
%tmp1 = bitcast %struct.DeclGroup* %agg.tmp to i8* ; <i8*> [#uses=1]
%tmp2 = bitcast %struct.DeclGroup* %tmp to i8* ; <i8*> [#uses=1]
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %tmp1, i8* %tmp2, i64 4, i32 4, i1 false)
%coerce.dive = getelementptr %struct.DeclGroup* %agg.tmp, i32 0, i32 0 ; <float*> [#uses=1]
%0 = bitcast double* %tmp3 to float* ; <float*> [#uses=1]
%1 = load float* %coerce.dive ; <float> [#uses=1]
store float %1, float* %0, align 1
%2 = load double* %tmp3 ; <double> [#uses=1]
%call = call float @_Z3foo9DeclGroup(double %2) ; <float> [#uses=0]
ret void
}
which is this machine code (at -O0):
__Z3barP9DeclGroup:
subq $24, %rsp
movq %rdi, 16(%rsp)
movq 16(%rsp), %rdi
leaq 8(%rsp), %rax
movl (%rdi), %ecx
movl %ecx, (%rax)
movss 8(%rsp), %xmm0
callq __Z3foo9DeclGroup
addq $24, %rsp
ret
vs this:
__Z3barP9DeclGroup:
subq $24, %rsp
movq %rdi, 16(%rsp)
movq 16(%rsp), %rdi
leaq 8(%rsp), %rax
movl (%rdi), %ecx
movl %ecx, (%rax)
movss 8(%rsp), %xmm0
movss %xmm0, (%rsp)
movsd (%rsp), %xmm0
callq __Z3foo9DeclGroup
addq $24, %rsp
ret
At -O3, it is the difference between this now:
__Z3barP9DeclGroup:
movss (%rdi), %xmm0
jmp __Z3foo9DeclGroup # TAILCALL
vs this before:
__Z3barP9DeclGroup:
movl (%rdi), %eax
movd %rax, %xmm0
jmp __Z3foo9DeclGroup # TAILCALL
llvm-svn: 107048
|
| |
|
|
| |
llvm-svn: 107024
|
| |
|
|
|
|
| |
ErrorUnsupported call when binding a global reference to a non-lvalue. Fixes PR7326.
llvm-svn: 106983
|
| |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
load/store nonsense in the epilog. For example, for:
int foo(int X) {
int A[100];
return A[X];
}
we used to generate:
%arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
store i32 %tmp1, i32* %retval
%0 = load i32* %retval ; <i32> [#uses=1]
ret i32 %0
}
which codegen'd to this code:
_foo: ## @foo
## BB#0: ## %entry
subq $408, %rsp ## imm = 0x198
movl %edi, 400(%rsp)
movl 400(%rsp), %edi
movslq %edi, %rax
movl (%rsp,%rax,4), %edi
movl %edi, 404(%rsp)
movl 404(%rsp), %eax
addq $408, %rsp ## imm = 0x198
ret
Now we generate:
%arrayidx = getelementptr inbounds [100 x i32]* %A, i32 0, i64 %idxprom ; <i32*> [#uses=1]
%tmp1 = load i32* %arrayidx ; <i32> [#uses=1]
ret i32 %tmp1
}
and:
_foo: ## @foo
## BB#0: ## %entry
subq $408, %rsp ## imm = 0x198
movl %edi, 404(%rsp)
movl 404(%rsp), %edi
movslq %edi, %rax
movl (%rsp,%rax,4), %eax
addq $408, %rsp ## imm = 0x198
ret
This actually does matter, cutting out 2000 lines of IR from CGStmt.ll
for example.
Another interesting effect is that altivec.h functions which are dead
now get dce'd by the inliner. Hence all the changes to
builtins-ppc-altivec.c to ensure the calls aren't dead.
llvm-svn: 106970
|
| |
|
|
|
|
| |
While I'm in there, adjust pointer to member adjustments as well.
llvm-svn: 106955
|
| |
|
|
|
|
|
| |
Also, fix mangling of throw specs. Turns out MSVC totally ignores throw
specs when mangling names.
llvm-svn: 106937
|
| |
|
|
| |
llvm-svn: 106898
|
| |
|
|
| |
llvm-svn: 106887
|
| |
|
|
|
|
|
| |
when block literal is declared inside a ctor/dtor.
Fixes radr 8096995.
llvm-svn: 106700
|
| |
|
|
|
|
|
| |
declaration have default visibility even under
-fvisibility=hidden. Fixes <rdar://problem/8109763>.
llvm-svn: 106440
|
| |
|
|
|
|
|
| |
particular sequence causes its definition to not be generated in the object file.
(fixes radar 8071804).
llvm-svn: 106424
|
| |
|
|
|
|
| |
conservative for static variables in templated classes.
llvm-svn: 106385
|
| |
|
|
|
|
| |
-mconstructor-aliases by using a WeakVH instead of a raw pointer.
llvm-svn: 106384
|
| |
|
|
|
|
|
| |
Also, test that static members with default visibility in a struct have the
right mangling.
llvm-svn: 106276
|
| |
|
|
|
|
| |
Patch by Nico Weber (pr7390).
llvm-svn: 106242
|
| |
|
|
| |
llvm-svn: 106211
|
| |
|
|
|
|
| |
(the last argument of the triple).
llvm-svn: 106131
|
| |
|
|
| |
llvm-svn: 106118
|
| |
|
|
| |
llvm-svn: 106081
|
| |
|
|
|
|
| |
added a new test case (related to radar 8070772).
llvm-svn: 106067
|
| |
|
|
|
|
|
|
| |
objective-c++ class objects which have GC'able objc object
pointers and need to use ObjC's objc_memmove_collectable
API (radar 8070772).
llvm-svn: 106061
|
| |
|
|
| |
llvm-svn: 106003
|
| |
|
|
|
|
| |
handle visibility properly. Fixes <rdar://problem/8091955>.
llvm-svn: 105977
|
| |
|
|
|
|
|
| |
mangling for types, where the <source-name> is ASxxx (xxx is the
address-space number).
llvm-svn: 105975
|
| |
|
|
|
|
|
|
| |
- Mangle qualifiers.
- Start mangling variables' types into the name. A variable declared with a
builtin type should now mangle properly.
llvm-svn: 105931
|
| |
|
|
|
|
|
| |
- Don't mangle static variables at global scope.
- Add support for mangling builtin types. This will be used later.
llvm-svn: 105881
|
| |
|
|
|
|
| |
namespace, too.
llvm-svn: 105809
|
| |
|
|
|
|
|
|
|
|
|
| |
(but not their types; that's later).
NOTE: Right now, variables in the global namespace don't get mangled, even
though they're supposed to be. This is because the default mangler
implements the shouldMangleDeclName() method that tells clang not to mangle
them. This will be fixed in a later patch.
llvm-svn: 105805
|
| |
|
|
| |
llvm-svn: 105668
|
| |
|
|
| |
llvm-svn: 105660
|
| |
|
|
| |
llvm-svn: 105651
|
| |
|
|
|
|
| |
entry previously constructed via copy constructor.
llvm-svn: 105641
|
| |
|
|
|
|
| |
copy-in of c++ class objects into blocks.
llvm-svn: 105622
|
| |
|
|
| |
llvm-svn: 105606
|
| |
|
|
| |
llvm-svn: 105594
|
| |
|
|
| |
llvm-svn: 105500
|
| |
|
|
|
|
| |
class object in blocks and carry it to IRGen.
llvm-svn: 105487
|
| |
|
|
|
|
|
|
| |
setting up block's descriptor. This is on going work to
support c++ specific issues in setting up blocks
various APIs.
llvm-svn: 105469
|
| |
|
|
| |
llvm-svn: 105434
|
| |
|
|
| |
llvm-svn: 105433
|
| |
|
|
|
|
| |
in -fno-exceptions mode.
llvm-svn: 105432
|