summaryrefslogtreecommitdiffstats
path: root/llvm/lib
Commit message (Collapse)AuthorAgeFilesLines
* Fix a bug in RemoveDeadNodes where it would crash when its "optional"Chris Lattner2005-08-171-1/+77
| | | | | | | | argument is not specified. Implement ReplaceAllUsesWith. llvm-svn: 22834
* Switched to using BitsToDouble for int_to_float to avoid aliasing problem.Jim Laskey2005-08-171-4/+4
| | | | llvm-svn: 22831
* Fix some bugs in the alpha backend, some of which I introduced yesterday,Chris Lattner2005-08-171-2/+3
| | | | | | and some that were preexisting. All alpha regtests pass now. llvm-svn: 22829
* Change hex float constants for the sake of VC++.Jim Laskey2005-08-171-1/+4
| | | | llvm-svn: 22828
* Add a new beta option for critical edge splitting, to avoid a problem thatChris Lattner2005-08-171-0/+23
| | | | | | | | | Nate noticed in yacr2 (and I know occurs in other places as well). This is still rough, as the critical edge blocks are not intelligently placed but is added to get some idea to see if this improves performance. llvm-svn: 22825
* Use a new helper to split critical edges, making the code simpler.Chris Lattner2005-08-171-18/+21
| | | | | | | | Do not claim to not change the CFG. We do change the cfg to split critical edges. This isn't causing us a problem now, but could likely do so in the future. llvm-svn: 22824
* Fix a regression on X86, where FP values can be promoted too.Chris Lattner2005-08-171-1/+4
| | | | llvm-svn: 22822
* Fix a few small typos I noticed when converting this over to the DAG->DAGChris Lattner2005-08-171-12/+11
| | | | | | | selector. Also, there is no difference between addSImm and addImm, so just use addImm, folding some branches. llvm-svn: 22819
* Removed UINT_TO_FP and SINT_TO_FP from ISel outright.Jim Laskey2005-08-171-5/+0
| | | | llvm-svn: 22818
* thinko. Should fix s4addl.ll regressionAndrew Lenharth2005-08-171-2/+2
| | | | llvm-svn: 22817
* Remove ISel code generation for UINT_TO_FP and SINT_TO_FP. Now asserts ifJim Laskey2005-08-171-34/+2
| | | | | | marked as legal. llvm-svn: 22816
* Make UINT_TO_FP and SINT_TO_FP use generic expansion.Jim Laskey2005-08-171-0/+4
| | | | llvm-svn: 22815
* Added generic code expansion for [signed|unsigned] i32 to [f32|f64] casts in theJim Laskey2005-08-171-11/+72
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | legalizer. PowerPC now uses this expansion instead of ISel version. Example: // signed integer to double conversion double f1(signed x) { return (double)x; } // unsigned integer to double conversion double f2(unsigned x) { return (double)x; } // signed integer to float conversion float f3(signed x) { return (float)x; } // unsigned integer to float conversion float f4(unsigned x) { return (float)x; } Byte Code: internal fastcc double %_Z2f1i(int %x) { entry: %tmp.1 = cast int %x to double ; <double> [#uses=1] ret double %tmp.1 } internal fastcc double %_Z2f2j(uint %x) { entry: %tmp.1 = cast uint %x to double ; <double> [#uses=1] ret double %tmp.1 } internal fastcc float %_Z2f3i(int %x) { entry: %tmp.1 = cast int %x to float ; <float> [#uses=1] ret float %tmp.1 } internal fastcc float %_Z2f4j(uint %x) { entry: %tmp.1 = cast uint %x to float ; <float> [#uses=1] ret float %tmp.1 } internal fastcc double %_Z2g1i(int %x) { entry: %buffer = alloca [2 x uint] ; <[2 x uint]*> [#uses=3] %tmp.0 = getelementptr [2 x uint]* %buffer, int 0, int 0 ; <uint*> [#uses=1] store uint 1127219200, uint* %tmp.0 %tmp.2 = cast int %x to uint ; <uint> [#uses=1] %tmp.3 = xor uint %tmp.2, 2147483648 ; <uint> [#uses=1] %tmp.5 = getelementptr [2 x uint]* %buffer, int 0, int 1 ; <uint*> [#uses=1] store uint %tmp.3, uint* %tmp.5 %tmp.9 = cast [2 x uint]* %buffer to double* ; <double*> [#uses=1] %tmp.10 = load double* %tmp.9 ; <double> [#uses=1] %tmp.13 = load double* cast (long* %signed_bias to double*) ; <double> [#uses=1] %tmp.14 = sub double %tmp.10, %tmp.13 ; <double> [#uses=1] ret double %tmp.14 } internal fastcc double %_Z2g2j(uint %x) { entry: %buffer = alloca [2 x uint] ; <[2 x uint]*> [#uses=3] %tmp.0 = getelementptr [2 x uint]* %buffer, int 0, int 0 ; <uint*> [#uses=1] store uint 1127219200, uint* %tmp.0 %tmp.1 = getelementptr [2 x uint]* %buffer, int 0, int 1 ; <uint*> [#uses=1] store uint %x, uint* %tmp.1 %tmp.4 = cast [2 x uint]* %buffer to double* ; <double*> [#uses=1] %tmp.5 = load double* %tmp.4 ; <double> [#uses=1] %tmp.8 = load double* cast (long* %unsigned_bias to double*) ; <double> [#uses=1] %tmp.9 = sub double %tmp.5, %tmp.8 ; <double> [#uses=1] ret double %tmp.9 } internal fastcc float %_Z2g3i(int %x) { entry: %buffer = alloca [2 x uint] ; <[2 x uint]*> [#uses=3] %tmp.0 = getelementptr [2 x uint]* %buffer, int 0, int 0 ; <uint*> [#uses=1] store uint 1127219200, uint* %tmp.0 %tmp.2 = cast int %x to uint ; <uint> [#uses=1] %tmp.3 = xor uint %tmp.2, 2147483648 ; <uint> [#uses=1] %tmp.5 = getelementptr [2 x uint]* %buffer, int 0, int 1 ; <uint*> [#uses=1] store uint %tmp.3, uint* %tmp.5 %tmp.9 = cast [2 x uint]* %buffer to double* ; <double*> [#uses=1] %tmp.10 = load double* %tmp.9 ; <double> [#uses=1] %tmp.13 = load double* cast (long* %signed_bias to double*) ; <double> [#uses=1] %tmp.14 = sub double %tmp.10, %tmp.13 ; <double> [#uses=1] %tmp.16 = cast double %tmp.14 to float ; <float> [#uses=1] ret float %tmp.16 } internal fastcc float %_Z2g4j(uint %x) { entry: %buffer = alloca [2 x uint] ; <[2 x uint]*> [#uses=3] %tmp.0 = getelementptr [2 x uint]* %buffer, int 0, int 0 ; <uint*> [#uses=1] store uint 1127219200, uint* %tmp.0 %tmp.1 = getelementptr [2 x uint]* %buffer, int 0, int 1 ; <uint*> [#uses=1] store uint %x, uint* %tmp.1 %tmp.4 = cast [2 x uint]* %buffer to double* ; <double*> [#uses=1] %tmp.5 = load double* %tmp.4 ; <double> [#uses=1] %tmp.8 = load double* cast (long* %unsigned_bias to double*) ; <double> [#uses=1] %tmp.9 = sub double %tmp.5, %tmp.8 ; <double> [#uses=1] %tmp.11 = cast double %tmp.9 to float ; <float> [#uses=1] ret float %tmp.11 } PowerPC Code: .machine ppc970 .const .align 2 .CPIl1__Z2f1i_0: ; float 0x4330000080000000 .long 1501560836 ; float 4.5036e+15 .text .align 2 .globl l1__Z2f1i l1__Z2f1i: .LBBl1__Z2f1i_0: ; entry xoris r2, r3, 32768 stw r2, -4(r1) lis r2, 17200 stw r2, -8(r1) lfd f0, -8(r1) lis r2, ha16(.CPIl1__Z2f1i_0) lfs f1, lo16(.CPIl1__Z2f1i_0)(r2) fsub f1, f0, f1 blr .const .align 2 .CPIl2__Z2f2j_0: ; float 0x4330000000000000 .long 1501560832 ; float 4.5036e+15 .text .align 2 .globl l2__Z2f2j l2__Z2f2j: .LBBl2__Z2f2j_0: ; entry stw r3, -4(r1) lis r2, 17200 stw r2, -8(r1) lfd f0, -8(r1) lis r2, ha16(.CPIl2__Z2f2j_0) lfs f1, lo16(.CPIl2__Z2f2j_0)(r2) fsub f1, f0, f1 blr .const .align 2 .CPIl3__Z2f3i_0: ; float 0x4330000080000000 .long 1501560836 ; float 4.5036e+15 .text .align 2 .globl l3__Z2f3i l3__Z2f3i: .LBBl3__Z2f3i_0: ; entry xoris r2, r3, 32768 stw r2, -4(r1) lis r2, 17200 stw r2, -8(r1) lfd f0, -8(r1) lis r2, ha16(.CPIl3__Z2f3i_0) lfs f1, lo16(.CPIl3__Z2f3i_0)(r2) fsub f0, f0, f1 frsp f1, f0 blr .const .align 2 .CPIl4__Z2f4j_0: ; float 0x4330000000000000 .long 1501560832 ; float 4.5036e+15 .text .align 2 .globl l4__Z2f4j l4__Z2f4j: .LBBl4__Z2f4j_0: ; entry stw r3, -4(r1) lis r2, 17200 stw r2, -8(r1) lfd f0, -8(r1) lis r2, ha16(.CPIl4__Z2f4j_0) lfs f1, lo16(.CPIl4__Z2f4j_0)(r2) fsub f0, f0, f1 frsp f1, f0 blr llvm-svn: 22814
* add a new TargetConstant nodeChris Lattner2005-08-171-1/+19
| | | | llvm-svn: 22813
* Implement a couple improvements:Nate Begeman2005-08-171-12/+28
| | | | | | | | | | | | | | | | | | | | Remove dead code in ISD::Constant handling Add support for add long, imm16 We now codegen 'long long foo(long long a) { return ++a; }' as: addic r4, r4, 1 addze r3, r3 blr instead of: li r2, 1 li r5, 0 addc r2, r4, r2 adde r3, r3, r5 blr llvm-svn: 22811
* This is a dummy, it doesn't matter what the ValueType isChris Lattner2005-08-161-1/+1
| | | | llvm-svn: 22809
* updates for changes in nodesChris Lattner2005-08-162-19/+21
| | | | llvm-svn: 22808
* update the backends to work with the new CopyFromReg/CopyToReg/ImplicitDef nodesChris Lattner2005-08-163-47/+49
| | | | llvm-svn: 22807
* Eliminate the RegSDNode class, which 3 nodes (CopyFromReg/CopyToReg/ImplicitDef)Chris Lattner2005-08-164-54/+82
| | | | | | | | | | | | | | used to tack a register number onto the node. Instead of doing this, make a new node, RegisterSDNode, which is a leaf containing a register number. These three operations just become normal DAG nodes now, instead of requiring special handling. Note that with this change, it is no longer correct to make illegal CopyFromReg/CopyToReg nodes. The legalizer will not touch them, and this is bad, so don't do it. :) llvm-svn: 22806
* Implement BR_CC and BRTWOWAY_CC. This allows the removal of a rather nastyNate Begeman2005-08-168-30/+154
| | | | | | | fixme from the PowerPC backend. Emit slightly better code for legalizing select_cc. llvm-svn: 22805
* Allow passing a dag into dump and getOperationName. If one is availableChris Lattner2005-08-161-9/+21
| | | | | | | when printing a node, use it to render target operations with their target instruction name instead of "<<unknown>>". llvm-svn: 22804
* Use a extant helper to do this.Chris Lattner2005-08-161-19/+7
| | | | llvm-svn: 22802
* Add some methods for dag->dag isel.Chris Lattner2005-08-162-19/+59
| | | | | | Split RemoveNodeFromCSEMaps out of DeleteNodesIfDead to do it. llvm-svn: 22801
* Pull the LLVM -> DAG lowering code out of the pattern selector so that itChris Lattner2005-08-163-484/+526
| | | | | | can be shared with the DAG->DAG selector. llvm-svn: 22799
* Fix a bad case in gzip where we put lots of things in registers across theChris Lattner2005-08-161-9/+17
| | | | | | | loop, because a IV-dependent value was used outside of the loop and didn't have immediate-folding capability llvm-svn: 22798
* Fix Transforms/LoopStrengthReduce/2005-08-15-AddRecIV.llChris Lattner2005-08-161-2/+3
| | | | llvm-svn: 22797
* Turn loop strength reduction on by default.Chris Lattner2005-08-151-20/+14
| | | | | | | Only run createLowerConstantExpressionsPass for the simple isel. The DAG isel has no need for it. llvm-svn: 22794
* Teach LLVM to know how many times a loop executes when constructed withChris Lattner2005-08-151-2/+110
| | | | | | a < expression, e.g.: for (i = m; i < n; ++i) llvm-svn: 22793
* Broke 80 column rule.Jim Laskey2005-08-151-2/+3
| | | | llvm-svn: 22792
* Changed code gen for int to f32 to use rounding. This makes FP resultsJim Laskey2005-08-151-2/+2
| | | | | | consistent with gcc. llvm-svn: 22791
* isIntImmediate is a good Idea. Add a flavor that checks bounds while it is ↵Andrew Lenharth2005-08-151-105/+97
| | | | | | at it llvm-svn: 22790
* Fix last night's PPC32 regressions byNate Begeman2005-08-142-3/+2
| | | | | | | | | 1. Not selecting the false value of a select_cc in the false arm, which isn't legal for nested selects. 2. Actually returning the node we created and Legalized in the FP_TO_UINT Expander. llvm-svn: 22789
* Fix last night's X86 regressions by putting code for SSE in the if(SSE)Nate Begeman2005-08-141-5/+5
| | | | | | block. nur. llvm-svn: 22788
* only build .a on alphaAndrew Lenharth2005-08-141-0/+4
| | | | llvm-svn: 22787
* Fix FP_TO_UINT with Scalar SSE2 now that the legalizer can handle it. WeNate Begeman2005-08-141-1/+9
| | | | | | | | | | | | | | | | | | | | | | | | | | now generate the relatively good code sequences: unsigned short foo(float a) { return a; } _foo: movss 4(%esp), %xmm0 cvttss2si %xmm0, %eax movzwl %ax, %eax ret and unsigned bar(float a) { return a; } _bar: movss .CPI_bar_0, %xmm0 movss 4(%esp), %xmm1 movapd %xmm1, %xmm2 subss %xmm0, %xmm2 cvttss2si %xmm2, %eax xorl $-2147483648, %eax cvttss2si %xmm1, %ecx ucomiss %xmm0, %xmm1 cmovb %ecx, %eax ret llvm-svn: 22786
* Teach the legalizer how to legalize FP_TO_UINT.Nate Begeman2005-08-141-3/+32
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | Teach the legalizer to promote FP_TO_UINT to FP_TO_SINT if the wider FP_TO_UINT is also illegal. This allows us on PPC to codegen unsigned short foo(float a) { return a; } as: _foo: .LBB_foo_0: ; entry fctiwz f0, f1 stfd f0, -8(r1) lwz r2, -4(r1) rlwinm r3, r2, 0, 16, 31 blr instead of: _foo: .LBB_foo_0: ; entry fctiwz f0, f1 stfd f0, -8(r1) lwz r2, -4(r1) lis r3, ha16(.CPI_foo_0) lfs f0, lo16(.CPI_foo_0)(r3) fcmpu cr0, f1, f0 blt .LBB_foo_2 ; entry .LBB_foo_1: ; entry fsubs f0, f1, f0 fctiwz f0, f0 stfd f0, -16(r1) lwz r2, -12(r1) xoris r2, r2, 32768 .LBB_foo_2: ; entry rlwinm r3, r2, 0, 16, 31 blr llvm-svn: 22785
* Make FP_TO_UINT Illegal. This allows us to generate significantly betterNate Begeman2005-08-142-71/+35
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | codegen for FP_TO_UINT by using the legalizer's SELECT variant. Implement a codegen improvement for SELECT_CC, selecting the false node in the MBB that feeds the phi node. This allows us to codegen: void foo(int *a, int b, int c) { int d = (a < b) ? 5 : 9; *a = d; } as: _foo: li r2, 5 cmpw cr0, r4, r3 bgt .LBB_foo_2 ; entry .LBB_foo_1: ; entry li r2, 9 .LBB_foo_2: ; entry stw r2, 0(r3) blr insted of: _foo: li r2, 5 li r5, 9 cmpw cr0, r4, r3 bgt .LBB_foo_2 ; entry .LBB_foo_1: ; entry or r2, r5, r5 .LBB_foo_2: ; entry stw r2, 0(r3) blr llvm-svn: 22784
* Testing a variable before it is defined doesn't work so well. It is a ↵Andrew Lenharth2005-08-131-3/+0
| | | | | | fairly small thing, so just let everyone build the .a file llvm-svn: 22783
* Ooops, don't forget to clear this. The real inner loop is now:Chris Lattner2005-08-131-0/+1
| | | | | | | | | | | | | | | | | | .LBB_foo_3: ; no_exit.1 lfd f2, 0(r9) lfd f3, 8(r9) fmul f4, f1, f2 fmadd f4, f0, f3, f4 stfd f4, 8(r9) fmul f3, f1, f3 fmsub f2, f0, f2, f3 stfd f2, 0(r9) addi r9, r9, 16 addi r8, r8, 1 cmpw cr0, r8, r4 ble .LBB_foo_3 ; no_exit.1 llvm-svn: 22782
* Recursively scan scev expressions for common subexpressions. This allows usChris Lattner2005-08-131-28/+61
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | to handle nested loops much better, for example, by being able to tell that these two expressions: {( 8 + ( 16 * ( 1 + %Tmp11 + %Tmp12)) + %c_),+,( 16 * %Tmp 12)}<loopentry.1> {(( 16 * ( 1 + %Tmp11 + %Tmp12)) + %c_),+,( 16 * %Tmp12)}<loopentry.1> Have the following common part that can be shared: {(( 16 * ( 1 + %Tmp11 + %Tmp12)) + %c_),+,( 16 * %Tmp12)}<loopentry.1> This allows us to codegen an important inner loop in 168.wupwise as: .LBB_foo_4: ; no_exit.1 lfd f2, 16(r9) fmul f3, f0, f2 fmul f2, f1, f2 fadd f4, f3, f2 stfd f4, 8(r9) fsub f2, f3, f2 stfd f2, 16(r9) addi r8, r8, 1 addi r9, r9, 16 cmpw cr0, r8, r4 ble .LBB_foo_4 ; no_exit.1 instead of: .LBB_foo_3: ; no_exit.1 lfdx f2, r6, r9 add r10, r6, r9 lfd f3, 8(r10) fmul f4, f1, f2 fmadd f4, f0, f3, f4 stfd f4, 8(r10) fmul f3, f1, f3 fmsub f2, f0, f2, f3 stfdx f2, r6, r9 addi r9, r9, 16 addi r8, r8, 1 cmpw cr0, r8, r4 ble .LBB_foo_3 ; no_exit.1 llvm-svn: 22781
* Remove an unncessary argument to SimplifySelectCC and add an additionalNate Begeman2005-08-131-8/+10
| | | | | | assert when creating a select_cc node. llvm-svn: 22780
* Fix the fabs regression on x86 by abstracting the select_cc optimizationNate Begeman2005-08-131-68/+83
| | | | | | | out into SimplifySelectCC. This allows both ISD::SELECT and ISD::SELECT_CC to use the same set of simplifying folds. llvm-svn: 22779
* Remove support for 64b PPC, it's been broken for a long time. It'll beNate Begeman2005-08-1310-2272/+4
| | | | | | back once a DAG->DAG ISel exists. llvm-svn: 22778
* Fix oversized GOT problem with gcc-4 on alphaAndrew Lenharth2005-08-131-0/+4
| | | | llvm-svn: 22777
* Teach SplitCriticalEdge to update LoopInfo if it is alive. This fixesChris Lattner2005-08-131-0/+31
| | | | | | | a problem in LoopStrengthReduction, where it would split critical edges then confused itself with outdated loop information. llvm-svn: 22776
* remove dead code. The exit block list is computed on demand, thus does notChris Lattner2005-08-131-15/+0
| | | | | | need to be updated. This code is a relic from when it did. llvm-svn: 22775
* implement a couple of simple shift foldings.Chris Lattner2005-08-121-0/+18
| | | | | | e.g. (X & 7) >> 3 -> 0 llvm-svn: 22774
* Fix for 2005-08-12-rlwimi-crash.ll. Make allowance for masks being shifted toJim Laskey2005-08-121-1/+1
| | | | | | zero. llvm-svn: 22773
* 1. This changes handles the cases of (~x)&y and x&(~y) yielding ANDC, andJim Laskey2005-08-121-3/+24
| | | | | | (~x)|y and x|(~y) yielding ORC. llvm-svn: 22771
* When splitting critical edges, make sure not to leave the new block in theChris Lattner2005-08-121-3/+15
| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | middle of the loop. This turns a critical loop in gzip into this: .LBB_test_1: ; loopentry or r27, r28, r28 add r28, r3, r27 lhz r28, 3(r28) add r26, r4, r27 lhz r26, 3(r26) cmpw cr0, r28, r26 bne .LBB_test_8 ; loopentry.loopexit_crit_edge .LBB_test_2: ; shortcirc_next.0 add r28, r3, r27 lhz r28, 5(r28) add r26, r4, r27 lhz r26, 5(r26) cmpw cr0, r28, r26 bne .LBB_test_7 ; shortcirc_next.0.loopexit_crit_edge .LBB_test_3: ; shortcirc_next.1 add r28, r3, r27 lhz r28, 7(r28) add r26, r4, r27 lhz r26, 7(r26) cmpw cr0, r28, r26 bne .LBB_test_6 ; shortcirc_next.1.loopexit_crit_edge .LBB_test_4: ; shortcirc_next.2 add r28, r3, r27 lhz r26, 9(r28) add r28, r4, r27 lhz r25, 9(r28) addi r28, r27, 8 cmpw cr7, r26, r25 mfcr r26, 1 rlwinm r26, r26, 31, 31, 31 add r25, r8, r27 cmpw cr7, r25, r7 mfcr r25, 1 rlwinm r25, r25, 29, 31, 31 and. r26, r26, r25 bne .LBB_test_1 ; loopentry instead of this: .LBB_test_1: ; loopentry or r27, r28, r28 add r28, r3, r27 lhz r28, 3(r28) add r26, r4, r27 lhz r26, 3(r26) cmpw cr0, r28, r26 beq .LBB_test_3 ; shortcirc_next.0 .LBB_test_2: ; loopentry.loopexit_crit_edge add r2, r30, r27 add r8, r29, r27 b .LBB_test_9 ; loopexit .LBB_test_3: ; shortcirc_next.0 add r28, r3, r27 lhz r28, 5(r28) add r26, r4, r27 lhz r26, 5(r26) cmpw cr0, r28, r26 beq .LBB_test_5 ; shortcirc_next.1 .LBB_test_4: ; shortcirc_next.0.loopexit_crit_edge add r2, r11, r27 add r8, r12, r27 b .LBB_test_9 ; loopexit .LBB_test_5: ; shortcirc_next.1 add r28, r3, r27 lhz r28, 7(r28) add r26, r4, r27 lhz r26, 7(r26) cmpw cr0, r28, r26 beq .LBB_test_7 ; shortcirc_next.2 .LBB_test_6: ; shortcirc_next.1.loopexit_crit_edge add r2, r9, r27 add r8, r10, r27 b .LBB_test_9 ; loopexit .LBB_test_7: ; shortcirc_next.2 add r28, r3, r27 lhz r26, 9(r28) add r28, r4, r27 lhz r25, 9(r28) addi r28, r27, 8 cmpw cr7, r26, r25 mfcr r26, 1 rlwinm r26, r26, 31, 31, 31 add r25, r8, r27 cmpw cr7, r25, r7 mfcr r25, 1 rlwinm r25, r25, 29, 31, 31 and. r26, r26, r25 bne .LBB_test_1 ; loopentry Next up, improve the code for the loop. llvm-svn: 22769
OpenPOWER on IntegriCloud