| Commit message (Collapse) | Author | Age | Files | Lines |
|
|
|
| |
llvm-svn: 37330
|
|
|
|
|
|
| |
value store is the same as the base pointer.
llvm-svn: 37318
|
|
|
|
| |
llvm-svn: 37310
|
|
|
|
| |
llvm-svn: 37233
|
|
|
|
| |
llvm-svn: 37130
|
|
|
|
|
|
| |
This fixes PR1423
llvm-svn: 37102
|
|
|
|
| |
llvm-svn: 37094
|
|
|
|
| |
llvm-svn: 37086
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
CodeGen/PowerPC/fneg.ll into:
_t4:
fmul f0, f3, f4
fmadd f1, f1, f2, f0
blr
instead of:
_t4:
fneg f0, f3
fmul f0, f0, f4
fmsub f1, f1, f2, f0
blr
llvm-svn: 37054
|
|
|
|
| |
llvm-svn: 36962
|
|
|
|
| |
llvm-svn: 36910
|
|
|
|
|
|
|
|
|
| |
- (store (bitconvert v)) -> (store v) if resultant store does not require
higher alignment
- (bitconvert (load v)) -> (load (bitconvert*)v) if resultant load does not
require higher alignment
llvm-svn: 36908
|
|
|
|
| |
llvm-svn: 36716
|
|
|
|
| |
llvm-svn: 36622
|
|
|
|
|
|
|
| |
produce two results.)
* Do not touch volatile loads.
llvm-svn: 36604
|
|
|
|
| |
llvm-svn: 36356
|
|
|
|
| |
llvm-svn: 36309
|
|
|
|
| |
llvm-svn: 36301
|
|
|
|
| |
llvm-svn: 36245
|
|
|
|
|
|
|
| |
single-use nodes, they will be dead soon. Make sure to remove them before
processing other nodes. This implements CodeGen/X86/shl_elim.ll
llvm-svn: 36244
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
a chance to hack on it. This compiles:
int baz(long long a) { return (short)(((int)(a >>24)) >> 9); }
into:
_baz:
slwi r2, r3, 8
srwi r2, r2, 9
extsh r3, r2
blr
instead of:
_baz:
srwi r2, r4, 24
rlwimi r2, r3, 8, 0, 23
srwi r2, r2, 9
extsh r3, r2
blr
This implements CodeGen/PowerPC/sign_ext_inreg1.ll
llvm-svn: 36212
|
|
|
|
| |
llvm-svn: 35910
|
|
|
|
| |
llvm-svn: 35888
|
|
|
|
| |
llvm-svn: 35887
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
allows other simplifications. For example, this compiles:
int isnegative(unsigned int X) {
return !(X < 2147483648U);
}
Into this code:
x86:
movl 4(%esp), %eax
shrl $31, %eax
ret
arm:
mov r0, r0, lsr #31
bx lr
thumb:
lsr r0, r0, #31
bx lr
instead of:
x86:
cmpl $0, 4(%esp)
sets %al
movzbl %al, %eax
ret
arm:
mov r3, #0
cmp r0, #0
movlt r3, #1
mov r0, r3
bx lr
thumb:
mov r2, #1
mov r1, #0
cmp r0, #0
blt LBB1_2 @entry
LBB1_1: @entry
cpy r2, r1
LBB1_2: @entry
cpy r0, r2
bx lr
Testcase here: test/CodeGen/Generic/ispositive.ll
llvm-svn: 35883
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
improves codegen on many architectures. Tests committed as CodeGen/*/iabs.ll
X86 Old: X86 New:
_test: _test:
movl 4(%esp), %ecx movl 4(%esp), %eax
movl %ecx, %eax movl %eax, %ecx
negl %eax sarl $31, %ecx
testl %ecx, %ecx addl %ecx, %eax
cmovns %ecx, %eax xorl %ecx, %eax
ret ret
PPC Old: PPC New:
_test: _test:
cmpwi cr0, r3, -1 srawi r2, r3, 31
neg r2, r3 add r3, r3, r2
bgt cr0, LBB1_2 ; xor r3, r3, r2
LBB1_1: ; blr
mr r3, r2
LBB1_2: ;
blr
ARM Old: ARM New:
_test: _test:
rsb r3, r0, #0 add r3, r0, r0, asr #31
cmp r0, #0 eor r0, r3, r0, asr #31
movge r3, r0 bx lr
mov r0, r3
bx lr
Thumb Old: Thumb New:
_test: _test:
neg r2, r0 asr r2, r0, #31
cmp r0, #0 add r0, r0, r2
bge LBB1_2 eor r0, r2
LBB1_1: @ bx lr
cpy r0, r2
LBB1_2: @
bx lr
Sparc Old: Sparc New:
test: test:
save -96, %o6, %o6 save -96, %o6, %o6
sethi 0, %l0 sra %i0, 31, %l0
sub %l0, %i0, %l0 add %i0, %l0, %l1
subcc %i0, -1, %l1 xor %l1, %l0, %i0
bg .BB1_2 restore %g0, %g0, %g0
nop retl
.BB1_1: nop
or %g0, %l0, %i0
.BB1_2:
restore %g0, %g0, %g0
retl
nop
It also helps alpha/ia64 :)
llvm-svn: 35881
|
|
|
|
|
|
|
|
|
| |
2. Help DAGCombiner recognize zero/sign/any-extended versions of ROTR and ROTL
patterns. This was motivated by the X86/rotate.ll testcase, which should now
generate code for other platforms (and soon-to-come platforms.) Rewrote code
slightly to make it easier to read.
llvm-svn: 35605
|
|
|
|
|
|
| |
combination.
llvm-svn: 35517
|
|
|
|
|
|
| |
big endian targets until llvm-gcc build issue has been resolved.
llvm-svn: 35449
|
|
|
|
| |
llvm-svn: 35350
|
|
|
|
| |
llvm-svn: 35293
|
|
|
|
| |
llvm-svn: 35289
|
|
|
|
| |
llvm-svn: 35286
|
|
|
|
|
|
|
| |
1. Address offset is in bytes.
2. Make sure truncate node uses are replaced with new load.
llvm-svn: 35274
|
|
|
|
| |
llvm-svn: 35254
|
|
|
|
| |
llvm-svn: 35239
|
|
|
|
| |
llvm-svn: 35005
|
|
|
|
| |
llvm-svn: 34913
|
|
|
|
| |
llvm-svn: 34910
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| |
out of
addc, turn it into add.
This allows us to compile:
long long test(long long A, unsigned B) {
return (A + ((long long)B << 32)) & 123;
}
into:
_test:
movl $123, %eax
andl 4(%esp), %eax
xorl %edx, %edx
ret
instead of:
_test:
xorl %edx, %edx
movl %edx, %eax
addl 4(%esp), %eax ;; add of zero
andl $123, %eax
ret
llvm-svn: 34909
|
|
|
|
|
|
|
|
|
| |
sextinreg if not needed. This is useful in two cases: before legalize,
it avoids creating a sextinreg that will be trivially removed. After legalize
if the target doesn't support sextinreg, the trunc/sext would not have been
removed before.
llvm-svn: 34621
|
|
|
|
| |
llvm-svn: 34065
|
|
|
|
| |
llvm-svn: 33398
|
|
|
|
|
|
|
|
|
|
| |
(shl (add x, c1), c2) -> (add (shl x, c2), c1<<c2)
Replace it with:
(add (shl (add x, c1), c2), ) -> (add (add (shl x, c2), c1<<c2), )
This fixes test/CodeGen/ARM/smul.ll
llvm-svn: 33361
|
|
|
|
|
|
|
| |
careful when folding "c ? load p : load q" that C doesn't reach either load.
If so, folding this into load (c ? p : q) will induce a cycle in the graph.
llvm-svn: 33251
|
|
|
|
| |
llvm-svn: 33249
|
|
|
|
|
|
| |
This implements CodeGen/PowerPC/unsafe-math.ll
llvm-svn: 33024
|
|
|
|
| |
llvm-svn: 32698
|
|
|
|
| |
llvm-svn: 32629
|
|
|
|
|
|
| |
sure the right conditions are checked.
llvm-svn: 32611
|