summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2006-12-11 00:44:03 +0000
committerChris Lattner <sabre@nondot.org>2006-12-11 00:44:03 +0000
commitfeeb9c7be54f1468b316dc043d2067a82c403770 (patch)
tree5848ce884334bb02062d5307df1b9ab7415c1507
parent23f4b68f7ee0594187f6fab07f611b44cf893420 (diff)
downloadbcm5719-llvm-feeb9c7be54f1468b316dc043d2067a82c403770.tar.gz
bcm5719-llvm-feeb9c7be54f1468b316dc043d2067a82c403770.zip
Evan implemented the machineinstr improvements.
The rot example works if the type is cast to ubyte. Move sroa examples together and upgrade them to HEAD syntax. llvm-svn: 32423
-rw-r--r--llvm/lib/Target/README.txt69
1 files changed, 23 insertions, 46 deletions
diff --git a/llvm/lib/Target/README.txt b/llvm/lib/Target/README.txt
index c04e06fbfe0..9738573a8d5 100644
--- a/llvm/lib/Target/README.txt
+++ b/llvm/lib/Target/README.txt
@@ -2,20 +2,6 @@ Target Independent Opportunities:
//===---------------------------------------------------------------------===//
-We should make the following changes to clean up MachineInstr:
-
-1. Add an Opcode field to TargetInstrDescriptor, so you can tell the opcode of
- an instruction with just a TargetInstrDescriptor*.
-2. Remove the Opcode field from MachineInstr, replacing it with a
- TargetInstrDescriptor*.
-3. Getting information about a machine instr then becomes:
- MI->getInfo()->isTwoAddress()
- instead of:
- const TargetInstrInfo &TII = ...
- TII.isTwoAddrInstr(MI->getOpcode())
-
-//===---------------------------------------------------------------------===//
-
With the recent changes to make the implicit def/use set explicit in
machineinstrs, we should change the target descriptions for 'call' instructions
so that the .td files don't list all the call-clobbered registers as implicit
@@ -136,16 +122,6 @@ for 1,2,4,8 bytes.
//===---------------------------------------------------------------------===//
-This code:
-int rot(unsigned char b) { int a = ((b>>1) ^ (b<<7)) & 0xff; return a; }
-
-Can be improved in two ways:
-
-1. The instcombiner should eliminate the type conversions.
-2. The X86 backend should turn this into a rotate by one bit.
-
-//===---------------------------------------------------------------------===//
-
Add LSR exit value substitution. It'll probably be a win for Ackermann, etc.
//===---------------------------------------------------------------------===//
@@ -216,13 +192,13 @@ Scalar Repl cannot currently promote this testcase to 'ret long cst':
%struct.X = type { int, int }
%struct.Y = type { %struct.X }
ulong %bar() {
- %retval = alloca %struct.Y, align 8 ; <%struct.Y*> [#uses=3]
+ %retval = alloca %struct.Y, align 8
%tmp12 = getelementptr %struct.Y* %retval, int 0, uint 0, uint 0
store int 0, int* %tmp12
%tmp15 = getelementptr %struct.Y* %retval, int 0, uint 0, uint 1
store int 1, int* %tmp15
- %retval = cast %struct.Y* %retval to ulong*
- %retval = load ulong* %retval ; <ulong> [#uses=1]
+ %retval = bitcast %struct.Y* %retval to ulong*
+ %retval = load ulong* %retval
ret ulong %retval
}
@@ -230,6 +206,26 @@ it should be extended to do so.
//===---------------------------------------------------------------------===//
+-scalarrepl should promote this to be a vector scalar.
+
+ %struct..0anon = type { <4 x float> }
+
+implementation ; Functions:
+
+void %test1(<4 x float> %V, float* %P) {
+ %u = alloca %struct..0anon, align 16
+ %tmp = getelementptr %struct..0anon* %u, int 0, uint 0
+ store <4 x float> %V, <4 x float>* %tmp
+ %tmp1 = bitcast %struct..0anon* %u to [4 x float]*
+ %tmp = getelementptr [4 x float]* %tmp1, int 0, int 1
+ %tmp = load float* %tmp
+ %tmp3 = mul float %tmp, 2.000000e+00
+ store float %tmp3, float* %P
+ ret void
+}
+
+//===---------------------------------------------------------------------===//
+
Turn this into a single byte store with no load (the other 3 bytes are
unmodified):
@@ -327,25 +323,6 @@ unsigned short read_16_be(const unsigned char *adr) {
//===---------------------------------------------------------------------===//
--scalarrepl should promote this to be a vector scalar.
-
- %struct..0anon = type { <4 x float> }
-implementation ; Functions:
-void %test1(<4 x float> %V, float* %P) {
-entry:
- %u = alloca %struct..0anon, align 16 ; <%struct..0anon*> [#uses=2]
- %tmp = getelementptr %struct..0anon* %u, int 0, uint 0 ; <<4 x float>*> [#uses=1]
- store <4 x float> %V, <4 x float>* %tmp
- %tmp1 = cast %struct..0anon* %u to [4 x float]* ; <[4 x float]*> [#uses=1]
- %tmp = getelementptr [4 x float]* %tmp1, int 0, int 1 ; <float*> [#uses=1]
- %tmp = load float* %tmp ; <float> [#uses=1]
- %tmp3 = mul float %tmp, 2.000000e+00 ; <float> [#uses=1]
- store float %tmp3, float* %P
- ret void
-}
-
-//===---------------------------------------------------------------------===//
-
-instcombine should handle this transform:
setcc (sdiv X / C1 ), C2
when X, C1, and C2 are unsigned. Similarly for udiv and signed operands.
OpenPOWER on IntegriCloud