diff options
author | Chris Lattner <sabre@nondot.org> | 2010-09-05 02:18:34 +0000 |
---|---|---|
committer | Chris Lattner <sabre@nondot.org> | 2010-09-05 02:18:34 +0000 |
commit | eeba0c73e58cf1e5cdf3177627644820eb748b9c (patch) | |
tree | 40eb18031b8f7df147c9d9daf91f7cee68b0353c /llvm/lib/Transforms/IPO/MergeFunctions.cpp | |
parent | cbf93f39592428fc01f0c92196c6abf35dc4ed42 (diff) | |
download | bcm5719-llvm-eeba0c73e58cf1e5cdf3177627644820eb748b9c.tar.gz bcm5719-llvm-eeba0c73e58cf1e5cdf3177627644820eb748b9c.zip |
implement rdar://6653118 - fastisel should fold loads where possible.
Since mem2reg isn't run at -O0, we get a ton of reloads from the stack,
for example, before, this code:
int foo(int x, int y, int z) {
return x+y+z;
}
used to compile into:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
movl 4(%rsp), %esi
addl %edx, %esi
movl (%rsp), %edx
addl %esi, %edx
movl %edx, %eax
addq $12, %rsp
ret
Now we produce:
_foo: ## @foo
subq $12, %rsp
movl %edi, 8(%rsp)
movl %esi, 4(%rsp)
movl %edx, (%rsp)
movl 8(%rsp), %edx
addl 4(%rsp), %edx ## Folded load
addl (%rsp), %edx ## Folded load
movl %edx, %eax
addq $12, %rsp
ret
Fewer instructions and less register use = faster compiles.
llvm-svn: 113102
Diffstat (limited to 'llvm/lib/Transforms/IPO/MergeFunctions.cpp')
0 files changed, 0 insertions, 0 deletions