| Commit message (Collapse) | Author | Age | Files | Lines |
|
|
|
|
|
|
| |
176.gcc. Note that this is apparently not the only bug miscompiling gcc
though. :(
llvm-svn: 15639
|
|
|
|
|
|
| |
This should go into the 1.3 branch
llvm-svn: 15593
|
|
|
|
| |
llvm-svn: 15487
|
|
|
|
| |
llvm-svn: 15410
|
|
|
|
| |
llvm-svn: 15343
|
|
|
|
| |
llvm-svn: 15342
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
assumed that a constant on the RHS of a multiplication was either an
IntConstant or an FPConstant. It checked for an IntConstant and then,
if it did not find one, did a hard cast to an FPConstant. That code
would crash if the RHS were a ConstantExpr that was neither an
IntConstant nor an FPConstant. This version replaces the hard cast
with a dyn_cast. It performs the same way for IntConstants and
FPConstants but does nothing, instead of crashing, for constant
expressions.
The regression test for this change is 2004-07-27-ConstantExprMul.ll.
llvm-svn: 15291
|
|
|
|
| |
llvm-svn: 15276
|
|
|
|
|
|
|
|
|
|
|
| |
* Test for whether bits are shifted out during the optzn.
If so, the fold is illegal, though it can be handled explicitly for setne/seteq
This fixes the miscompilation of 254.gap last night, which was a latent bug
exposed by other optimizer improvements.
llvm-svn: 15085
|
|
|
|
|
|
| |
"simplify" a bit of code for comparison/and folding
llvm-svn: 15082
|
|
|
|
|
|
| |
the appopriate size. This gives us the ability to eliminate int -> ptr -> int
llvm-svn: 15063
|
|
|
|
| |
llvm-svn: 15029
|
|
|
|
| |
llvm-svn: 15024
|
|
|
|
|
|
|
|
| |
actually care about. Someday when the cast instruction is gone, we can do
better here, but this will do for now. This implements
instcombine/cast.ll:test17/18 as well.
llvm-svn: 15018
|
|
|
|
| |
llvm-svn: 14972
|
|
|
|
| |
llvm-svn: 14965
|
|
|
|
|
|
|
|
| |
- Replace ConstantPointerRef usage with GlobalValue usage
- Minimize redundant isa<GlobalValue> usage
- Correct isa<Constant> for GlobalValue subclass
llvm-svn: 14950
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
"load (cast foo)". This allows us to compile C++ code like this:
class Bclass {
public: virtual int operator()() { return 666; }
};
class Dclass: public Bclass {
public: virtual int operator()() { return 667; }
} ;
int main(int argc, char** argv) {
Dclass x;
return x();
}
Into this:
int %main(int %argc, sbyte** %argv) {
entry:
call void %__main( )
ret int 667
}
Instead of this:
int %main(int %argc, sbyte** %argv) {
entry:
%x = alloca "struct.std::bad_typeid" ; <"struct.std::bad_typeid"*> [#uses=3]
call void %__main( )
%tmp.1.i.i = getelementptr "struct.std::bad_typeid"* %x, uint 0, uint 0, uint 0 ; <int (...)***> [#uses=1]
store int (...)** getelementptr ([3 x int (...)*]* %vtable for Bclass, int 0, long 2), int (...)*** %tmp.1.i.i
%tmp.3.i = getelementptr "struct.std::bad_typeid"* %x, int 0, uint 0, uint 0 ; <int (...)***> [#uses=1]
store int (...)** getelementptr ([3 x int (...)*]* %vtable for Dclass, int 0, long 2), int (...)*** %tmp.3.i
%tmp.5 = load int ("struct.std::bad_typeid"*)** cast (int (...)** getelementptr ([3 x int (...)*]* %vtable for Dclass, int 0, long 2) to int
("struct.std::bad_typeid"*)**) ; <int ("struct.std::bad_typeid"*)*> [#uses=1]
%tmp.6 = call int %tmp.5( "struct.std::bad_typeid"* %x ) ; <int> [#uses=1]
ret int %tmp.6
ret int 0
}
In order words, we now resolve the virtual function call.
llvm-svn: 14783
|
|
|
|
| |
llvm-svn: 14649
|
|
|
|
| |
llvm-svn: 14640
|
|
|
|
| |
llvm-svn: 14638
|
|
|
|
|
|
|
| |
Also, remove X % -1 = 0, because it's not true for unsigneds, and the
signed case is superceeded by this new handling.
llvm-svn: 14637
|
|
|
|
| |
llvm-svn: 14587
|
|
|
|
| |
llvm-svn: 14584
|
|
|
|
| |
llvm-svn: 14443
|
|
|
|
|
|
| |
occurs due to unordered comparison macros in math.h
llvm-svn: 14221
|
|
|
|
|
|
| |
Delete two functions that are now methods on the Type class
llvm-svn: 14200
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
186.crafty, fhourstones and 132.ijpeg.
Bugpoint makes really nasty miscompilations embarassingly easy to find. It
narrowed it down to the instcombiner and this testcase (from fhourstones):
bool %l7153_l4706_htstat_loopentry_2E_4_no_exit_2E_4(int* %i, [32 x int]* %works, int* %tmp.98.out) {
newFuncRoot:
%tmp.96 = load int* %i ; <int> [#uses=1]
%tmp.97 = getelementptr [32 x int]* %works, long 0, int %tmp.96 ; <int*> [#uses=1]
%tmp.98 = load int* %tmp.97 ; <int> [#uses=2]
%tmp.99 = load int* %i ; <int> [#uses=1]
%tmp.100 = and int %tmp.99, 7 ; <int> [#uses=1]
%tmp.101 = seteq int %tmp.100, 7 ; <bool> [#uses=2]
%tmp.102 = cast bool %tmp.101 to int ; <int> [#uses=0]
br bool %tmp.101, label %codeRepl4.exitStub, label %codeRepl3.exitStub
codeRepl4.exitStub: ; preds = %newFuncRoot
store int %tmp.98, int* %tmp.98.out
ret bool true
codeRepl3.exitStub: ; preds = %newFuncRoot
store int %tmp.98, int* %tmp.98.out
ret bool false
}
... which only has one combination performed on it:
$ llvm-as < t.ll | opt -instcombine -debug | llvm-dis
IC: Old = %tmp.101 = seteq int %tmp.100, 7 ; <bool> [#uses=1]
New = setne int %tmp.100, 0 ; <bool>:<badref> [#uses=0]
IC: MOD = br bool %tmp.101, label %codeRepl3.exitStub, label %codeRepl4.exitStub
IC: MOD = %tmp.97 = getelementptr [32 x int]* %works, uint 0, int %tmp.96 ; <int*> [#uses=1]
It doesn't get much better than this. :)
llvm-svn: 14109
|
|
|
|
| |
llvm-svn: 14108
|
|
|
|
| |
llvm-svn: 14107
|
|
|
|
| |
llvm-svn: 14095
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
us to
collapse this:
bool %le(int %A, int %B) {
%c1 = setgt int %A, %B
%tmp = select bool %c1, int 1, int 0
%c2 = setlt int %A, %B
%result = select bool %c2, int -1, int %tmp
%c3 = setle int %result, 0
ret bool %c3
}
into:
bool %le(int %A, int %B) {
%c3 = setle int %A, %B ; <bool> [#uses=1]
ret bool %c3
}
which is handy, because the Java FE makes these sequences all over the place.
This is tested as: test/Regression/Transforms/InstCombine/JavaCompare.ll
llvm-svn: 14086
|
|
|
|
| |
llvm-svn: 14083
|
|
|
|
|
|
|
|
|
|
| |
This code hadn't been updated after the "structs with more than 256 elements"
related changes to the GEP instruction. Also it was not handling the
ConstantAggregateZero class.
Now it does!
llvm-svn: 13834
|
|
|
|
|
|
|
|
|
|
| |
into (X & (C2 << C1)) != (C3 << C1), where the shift may be either left or
right and the compare may be any one.
This triggers 1546 times in 176.gcc alone, as it is a common pattern that
occurs for bitfield accesses.
llvm-svn: 13740
|
|
|
|
|
|
| |
Canonicalize cast X to bool into a setne instruction
llvm-svn: 13736
|
|
|
|
| |
llvm-svn: 13565
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
in the size calculation.
This is not something you want to see:
Loop Unroll: F[main] Loop %no_exit Loop Size = 2 Trip Count = 2147483648 - UNROLLING!
The problem was that 2*2147483648 == 0.
Now we get:
Loop Unroll: F[main] Loop %no_exit Loop Size = 2 Trip Count = 2147483648 - TOO LARGE: 4294967296>100
Thanks to some anonymous person playing with the demo page that repeatedly
caused zion to go into swapping land. That's one way to ensure you'll get
a quick bugfix. :)
Testcase here: Transforms/LoopUnroll/2004-05-13-DontUnrollTooMuch.ll
llvm-svn: 13564
|
|
|
|
| |
llvm-svn: 13429
|
|
|
|
|
|
|
|
|
| |
%tmp.0 = getelementptr [50 x sbyte]* %ar, uint 0, int 5 ; <sbyte*> [#uses=2]
%tmp.7 = getelementptr sbyte* %tmp.0, int 8 ; <sbyte*> [#uses=1]
together. This patch actually allows us to simplify and generalize the code.
llvm-svn: 13415
|
|
|
|
| |
llvm-svn: 13400
|
|
|
|
| |
llvm-svn: 13341
|
|
|
|
|
|
| |
missing opportunities for combination.
llvm-svn: 13309
|
|
|
|
|
|
| |
when replacing them, missing the opportunity to do simplifications
llvm-svn: 13308
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
is only used by a cast, and the casted type is the same size as the original
allocation, it would eliminate the cast by folding it into the allocation.
Unfortunately, it was placing the new allocation instruction right before
the cast, which could pull (for example) alloca instructions into the body
of a function. This turns statically allocatable allocas into expensive
dynamically allocated allocas, which is bad bad bad.
This fixes the problem by placing the new allocation instruction at the same
place the old one was, duh. :)
llvm-svn: 13289
|
|
|
|
|
|
| |
patch was graciously contributed by Vladimir Prus.
llvm-svn: 13185
|
|
|
|
| |
llvm-svn: 13172
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
(familiar) function:
int _strlen(const char *str) {
int len = 0;
while (*str++) len++;
return len;
}
And transforming it to use a ulong induction variable, because the type of
the pointer index was left as a constant long. This is obviously very bad.
The fix is to shrink long constants in getelementptr instructions to intptr_t,
making the indvars pass insert a uint induction variable, which is much more
efficient.
Here's the before code for this function:
int %_strlen(sbyte* %str) {
entry:
%tmp.13 = load sbyte* %str ; <sbyte> [#uses=1]
%tmp.24 = seteq sbyte %tmp.13, 0 ; <bool> [#uses=1]
br bool %tmp.24, label %loopexit, label %no_exit
no_exit: ; preds = %entry, %no_exit
*** %indvar = phi uint [ %indvar.next, %no_exit ], [ 0, %entry ] ; <uint> [#uses=2]
*** %indvar = phi ulong [ %indvar.next, %no_exit ], [ 0, %entry ] ; <ulong> [#uses=2]
%indvar1 = cast ulong %indvar to uint ; <uint> [#uses=1]
%inc.02.sum = add uint %indvar1, 1 ; <uint> [#uses=1]
%inc.0.0 = getelementptr sbyte* %str, uint %inc.02.sum ; <sbyte*> [#uses=1]
%tmp.1 = load sbyte* %inc.0.0 ; <sbyte> [#uses=1]
%tmp.2 = seteq sbyte %tmp.1, 0 ; <bool> [#uses=1]
%indvar.next = add ulong %indvar, 1 ; <ulong> [#uses=1]
%indvar.next = add uint %indvar, 1 ; <uint> [#uses=1]
br bool %tmp.2, label %loopexit.loopexit, label %no_exit
loopexit.loopexit: ; preds = %no_exit
%indvar = cast uint %indvar to int ; <int> [#uses=1]
%inc.1 = add int %indvar, 1 ; <int> [#uses=1]
ret int %inc.1
loopexit: ; preds = %entry
ret int 0
}
Here's the after code:
int %_strlen(sbyte* %str) {
entry:
%inc.02 = getelementptr sbyte* %str, uint 1 ; <sbyte*> [#uses=1]
%tmp.13 = load sbyte* %str ; <sbyte> [#uses=1]
%tmp.24 = seteq sbyte %tmp.13, 0 ; <bool> [#uses=1]
br bool %tmp.24, label %loopexit, label %no_exit
no_exit: ; preds = %entry, %no_exit
*** %indvar = phi uint [ %indvar.next, %no_exit ], [ 0, %entry ] ; <uint> [#uses=3]
%indvar = cast uint %indvar to int ; <int> [#uses=1]
%inc.0.0 = getelementptr sbyte* %inc.02, uint %indvar ; <sbyte*> [#uses=1]
%inc.1 = add int %indvar, 1 ; <int> [#uses=1]
%tmp.1 = load sbyte* %inc.0.0 ; <sbyte> [#uses=1]
%tmp.2 = seteq sbyte %tmp.1, 0 ; <bool> [#uses=1]
%indvar.next = add uint %indvar, 1 ; <uint> [#uses=1]
br bool %tmp.2, label %loopexit, label %no_exit
loopexit: ; preds = %entry, %no_exit
%len.0.1 = phi int [ 0, %entry ], [ %inc.1, %no_exit ] ; <int> [#uses=1]
ret int %len.0.1
}
llvm-svn: 13016
|
|
|
|
|
|
| |
make the verifier more strict. This fixes building zlib
llvm-svn: 13002
|
|
|
|
| |
llvm-svn: 12940
|