summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorEli Friedman <eli.friedman@gmail.com>2011-06-09 23:02:19 +0000
committerEli Friedman <eli.friedman@gmail.com>2011-06-09 23:02:19 +0000
commit5abfd799001292574f825c39d7a0adb8aa6fd635 (patch)
tree4e729c76e7212ac6fc6e43d8386f24ca198f6119 /llvm/lib
parent81512fc1bb79627d05a8ae5f96ccc79b8c1230cb (diff)
downloadbcm5719-llvm-5abfd799001292574f825c39d7a0adb8aa6fd635.tar.gz
bcm5719-llvm-5abfd799001292574f825c39d7a0adb8aa6fd635.zip
Chris fixed this README a while back by changing how clang generates code for structs like the given struct.
llvm-svn: 132815
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/X86/README-X86-64.txt45
1 files changed, 0 insertions, 45 deletions
diff --git a/llvm/lib/Target/X86/README-X86-64.txt b/llvm/lib/Target/X86/README-X86-64.txt
index e7429a30810..bcfdf0bc56b 100644
--- a/llvm/lib/Target/X86/README-X86-64.txt
+++ b/llvm/lib/Target/X86/README-X86-64.txt
@@ -124,51 +124,6 @@ if we have whole-function selectiondags.
//===---------------------------------------------------------------------===//
-Take the following C code
-(from http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43640):
-
-struct u1
-{
- float x;
- float y;
-};
-
-float foo(struct u1 u)
-{
- return u.x + u.y;
-}
-
-Optimizes to the following IR:
-define float @foo(double %u.0) nounwind readnone {
-entry:
- %tmp8 = bitcast double %u.0 to i64 ; <i64> [#uses=2]
- %tmp6 = trunc i64 %tmp8 to i32 ; <i32> [#uses=1]
- %tmp7 = bitcast i32 %tmp6 to float ; <float> [#uses=1]
- %tmp2 = lshr i64 %tmp8, 32 ; <i64> [#uses=1]
- %tmp3 = trunc i64 %tmp2 to i32 ; <i32> [#uses=1]
- %tmp4 = bitcast i32 %tmp3 to float ; <float> [#uses=1]
- %0 = fadd float %tmp7, %tmp4 ; <float> [#uses=1]
- ret float %0
-}
-
-And current llvm-gcc/clang output:
- movd %xmm0, %rax
- movd %eax, %xmm1
- shrq $32, %rax
- movd %eax, %xmm0
- addss %xmm1, %xmm0
- ret
-
-We really shouldn't move the floats to RAX, only to immediately move them
-straight back to the XMM registers.
-
-There really isn't any good way to handle this purely in IR optimizers; it
-could possibly be handled by changing the output of the fronted, though. It
-would also be feasible to add a x86-specific DAGCombine to optimize the
-bitcast+trunc+(lshr+)bitcast combination.
-
-//===---------------------------------------------------------------------===//
-
Take the following code
(from http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34653):
extern unsigned long table[];
OpenPOWER on IntegriCloud