diff options
Diffstat (limited to 'llvm/test/CodeGen/ARM/inc-of-add.ll')
| -rw-r--r-- | llvm/test/CodeGen/ARM/inc-of-add.ll | 117 | 
1 files changed, 46 insertions, 71 deletions
| diff --git a/llvm/test/CodeGen/ARM/inc-of-add.ll b/llvm/test/CodeGen/ARM/inc-of-add.ll index fb8ce839352..de09b915716 100644 --- a/llvm/test/CodeGen/ARM/inc-of-add.ll +++ b/llvm/test/CodeGen/ARM/inc-of-add.ll @@ -91,11 +91,11 @@ define i64 @scalar_i64(i64 %x, i64 %y) nounwind {  ;  ; THUMB6-LABEL: scalar_i64:  ; THUMB6:       @ %bb.0: -; THUMB6-NEXT:    adds r0, r0, r2 -; THUMB6-NEXT:    adcs r1, r3 -; THUMB6-NEXT:    movs r2, #0 -; THUMB6-NEXT:    adds r0, r0, #1 -; THUMB6-NEXT:    adcs r1, r2 +; THUMB6-NEXT:    mvns r1, r1 +; THUMB6-NEXT:    mvns r0, r0 +; THUMB6-NEXT:    subs r0, r2, r0 +; THUMB6-NEXT:    sbcs r3, r1 +; THUMB6-NEXT:    mov r1, r3  ; THUMB6-NEXT:    bx lr  ;  ; THUMB78-LABEL: scalar_i64: @@ -196,12 +196,11 @@ define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y) nounwind {  ; ARM78-LABEL: vector_i128_i8:  ; ARM78:       @ %bb.0:  ; ARM78-NEXT:    vmov d17, r2, r3 -; ARM78-NEXT:    mov r12, sp  ; ARM78-NEXT:    vmov d16, r0, r1 -; ARM78-NEXT:    vld1.64 {d18, d19}, [r12] -; ARM78-NEXT:    vmov.i8 q10, #0x1 -; ARM78-NEXT:    vadd.i8 q8, q8, q9 -; ARM78-NEXT:    vadd.i8 q8, q8, q10 +; ARM78-NEXT:    mov r0, sp +; ARM78-NEXT:    vmvn q8, q8 +; ARM78-NEXT:    vld1.64 {d18, d19}, [r0] +; ARM78-NEXT:    vsub.i8 q8, q9, q8  ; ARM78-NEXT:    vmov r0, r1, d16  ; ARM78-NEXT:    vmov r2, r3, d17  ; ARM78-NEXT:    bx lr @@ -292,12 +291,11 @@ define <16 x i8> @vector_i128_i8(<16 x i8> %x, <16 x i8> %y) nounwind {  ; THUMB78-LABEL: vector_i128_i8:  ; THUMB78:       @ %bb.0:  ; THUMB78-NEXT:    vmov d17, r2, r3 -; THUMB78-NEXT:    mov r12, sp  ; THUMB78-NEXT:    vmov d16, r0, r1 -; THUMB78-NEXT:    vld1.64 {d18, d19}, [r12] -; THUMB78-NEXT:    vmov.i8 q10, #0x1 -; THUMB78-NEXT:    vadd.i8 q8, q8, q9 -; THUMB78-NEXT:    vadd.i8 q8, q8, q10 +; THUMB78-NEXT:    mov r0, sp +; THUMB78-NEXT:    vmvn q8, q8 +; THUMB78-NEXT:    vld1.64 {d18, d19}, [r0] +; THUMB78-NEXT:    vsub.i8 q8, q9, q8  ; THUMB78-NEXT:    vmov r0, r1, d16  ; THUMB78-NEXT:    vmov r2, r3, d17  ; THUMB78-NEXT:    bx lr @@ -352,12 +350,11 @@ define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y) nounwind {  ; ARM78-LABEL: vector_i128_i16:  ; ARM78:       @ %bb.0:  ; ARM78-NEXT:    vmov d17, r2, r3 -; ARM78-NEXT:    mov r12, sp  ; ARM78-NEXT:    vmov d16, r0, r1 -; ARM78-NEXT:    vld1.64 {d18, d19}, [r12] -; ARM78-NEXT:    vmov.i16 q10, #0x1 -; ARM78-NEXT:    vadd.i16 q8, q8, q9 -; ARM78-NEXT:    vadd.i16 q8, q8, q10 +; ARM78-NEXT:    mov r0, sp +; ARM78-NEXT:    vmvn q8, q8 +; ARM78-NEXT:    vld1.64 {d18, d19}, [r0] +; ARM78-NEXT:    vsub.i16 q8, q9, q8  ; ARM78-NEXT:    vmov r0, r1, d16  ; ARM78-NEXT:    vmov r2, r3, d17  ; ARM78-NEXT:    bx lr @@ -408,12 +405,11 @@ define <8 x i16> @vector_i128_i16(<8 x i16> %x, <8 x i16> %y) nounwind {  ; THUMB78-LABEL: vector_i128_i16:  ; THUMB78:       @ %bb.0:  ; THUMB78-NEXT:    vmov d17, r2, r3 -; THUMB78-NEXT:    mov r12, sp  ; THUMB78-NEXT:    vmov d16, r0, r1 -; THUMB78-NEXT:    vld1.64 {d18, d19}, [r12] -; THUMB78-NEXT:    vmov.i16 q10, #0x1 -; THUMB78-NEXT:    vadd.i16 q8, q8, q9 -; THUMB78-NEXT:    vadd.i16 q8, q8, q10 +; THUMB78-NEXT:    mov r0, sp +; THUMB78-NEXT:    vmvn q8, q8 +; THUMB78-NEXT:    vld1.64 {d18, d19}, [r0] +; THUMB78-NEXT:    vsub.i16 q8, q9, q8  ; THUMB78-NEXT:    vmov r0, r1, d16  ; THUMB78-NEXT:    vmov r2, r3, d17  ; THUMB78-NEXT:    bx lr @@ -442,12 +438,11 @@ define <4 x i32> @vector_i128_i32(<4 x i32> %x, <4 x i32> %y) nounwind {  ; ARM78-LABEL: vector_i128_i32:  ; ARM78:       @ %bb.0:  ; ARM78-NEXT:    vmov d17, r2, r3 -; ARM78-NEXT:    mov r12, sp  ; ARM78-NEXT:    vmov d16, r0, r1 -; ARM78-NEXT:    vld1.64 {d18, d19}, [r12] -; ARM78-NEXT:    vmov.i32 q10, #0x1 -; ARM78-NEXT:    vadd.i32 q8, q8, q9 -; ARM78-NEXT:    vadd.i32 q8, q8, q10 +; ARM78-NEXT:    mov r0, sp +; ARM78-NEXT:    vmvn q8, q8 +; ARM78-NEXT:    vld1.64 {d18, d19}, [r0] +; ARM78-NEXT:    vsub.i32 q8, q9, q8  ; ARM78-NEXT:    vmov r0, r1, d16  ; ARM78-NEXT:    vmov r2, r3, d17  ; ARM78-NEXT:    bx lr @@ -472,12 +467,11 @@ define <4 x i32> @vector_i128_i32(<4 x i32> %x, <4 x i32> %y) nounwind {  ; THUMB78-LABEL: vector_i128_i32:  ; THUMB78:       @ %bb.0:  ; THUMB78-NEXT:    vmov d17, r2, r3 -; THUMB78-NEXT:    mov r12, sp  ; THUMB78-NEXT:    vmov d16, r0, r1 -; THUMB78-NEXT:    vld1.64 {d18, d19}, [r12] -; THUMB78-NEXT:    vmov.i32 q10, #0x1 -; THUMB78-NEXT:    vadd.i32 q8, q8, q9 -; THUMB78-NEXT:    vadd.i32 q8, q8, q10 +; THUMB78-NEXT:    mov r0, sp +; THUMB78-NEXT:    vmvn q8, q8 +; THUMB78-NEXT:    vld1.64 {d18, d19}, [r0] +; THUMB78-NEXT:    vsub.i32 q8, q9, q8  ; THUMB78-NEXT:    vmov r0, r1, d16  ; THUMB78-NEXT:    vmov r2, r3, d17  ; THUMB78-NEXT:    bx lr @@ -509,60 +503,41 @@ define <2 x i64> @vector_i128_i64(<2 x i64> %x, <2 x i64> %y) nounwind {  ; ARM78-NEXT:    vmov d17, r2, r3  ; ARM78-NEXT:    vmov d16, r0, r1  ; ARM78-NEXT:    mov r0, sp +; ARM78-NEXT:    vmvn q8, q8  ; ARM78-NEXT:    vld1.64 {d18, d19}, [r0] -; ARM78-NEXT:    adr r0, .LCPI7_0 -; ARM78-NEXT:    vadd.i64 q8, q8, q9 -; ARM78-NEXT:    vld1.64 {d18, d19}, [r0:128] -; ARM78-NEXT:    vadd.i64 q8, q8, q9 +; ARM78-NEXT:    vsub.i64 q8, q9, q8  ; ARM78-NEXT:    vmov r0, r1, d16  ; ARM78-NEXT:    vmov r2, r3, d17  ; ARM78-NEXT:    bx lr -; ARM78-NEXT:    .p2align 4 -; ARM78-NEXT:  @ %bb.1: -; ARM78-NEXT:  .LCPI7_0: -; ARM78-NEXT:    .long 1 @ 0x1 -; ARM78-NEXT:    .long 0 @ 0x0 -; ARM78-NEXT:    .long 1 @ 0x1 -; ARM78-NEXT:    .long 0 @ 0x0  ;  ; THUMB6-LABEL: vector_i128_i64:  ; THUMB6:       @ %bb.0: -; THUMB6-NEXT:    push {r4, r5, r6, lr} -; THUMB6-NEXT:    ldr r4, [sp, #20] +; THUMB6-NEXT:    push {r4, r5, r7, lr} +; THUMB6-NEXT:    mvns r4, r1 +; THUMB6-NEXT:    mvns r0, r0 +; THUMB6-NEXT:    ldr r1, [sp, #20]  ; THUMB6-NEXT:    ldr r5, [sp, #16] -; THUMB6-NEXT:    adds r0, r0, r5 -; THUMB6-NEXT:    adcs r1, r4 -; THUMB6-NEXT:    movs r4, #0 -; THUMB6-NEXT:    adds r0, r0, #1 -; THUMB6-NEXT:    adcs r1, r4 -; THUMB6-NEXT:    ldr r5, [sp, #28] -; THUMB6-NEXT:    ldr r6, [sp, #24] -; THUMB6-NEXT:    adds r2, r2, r6 -; THUMB6-NEXT:    adcs r3, r5 -; THUMB6-NEXT:    adds r2, r2, #1 -; THUMB6-NEXT:    adcs r3, r4 -; THUMB6-NEXT:    pop {r4, r5, r6, pc} +; THUMB6-NEXT:    subs r0, r5, r0 +; THUMB6-NEXT:    sbcs r1, r4 +; THUMB6-NEXT:    mvns r4, r3 +; THUMB6-NEXT:    mvns r2, r2 +; THUMB6-NEXT:    ldr r3, [sp, #28] +; THUMB6-NEXT:    ldr r5, [sp, #24] +; THUMB6-NEXT:    subs r2, r5, r2 +; THUMB6-NEXT:    sbcs r3, r4 +; THUMB6-NEXT:    pop {r4, r5, r7, pc}  ;  ; THUMB78-LABEL: vector_i128_i64:  ; THUMB78:       @ %bb.0:  ; THUMB78-NEXT:    vmov d17, r2, r3  ; THUMB78-NEXT:    vmov d16, r0, r1  ; THUMB78-NEXT:    mov r0, sp +; THUMB78-NEXT:    vmvn q8, q8  ; THUMB78-NEXT:    vld1.64 {d18, d19}, [r0] -; THUMB78-NEXT:    adr r0, .LCPI7_0 -; THUMB78-NEXT:    vadd.i64 q8, q8, q9 -; THUMB78-NEXT:    vld1.64 {d18, d19}, [r0:128] -; THUMB78-NEXT:    vadd.i64 q8, q8, q9 +; THUMB78-NEXT:    vsub.i64 q8, q9, q8  ; THUMB78-NEXT:    vmov r0, r1, d16  ; THUMB78-NEXT:    vmov r2, r3, d17  ; THUMB78-NEXT:    bx lr -; THUMB78-NEXT:    .p2align 4 -; THUMB78-NEXT:  @ %bb.1: -; THUMB78-NEXT:  .LCPI7_0: -; THUMB78-NEXT:    .long 1 @ 0x1 -; THUMB78-NEXT:    .long 0 @ 0x0 -; THUMB78-NEXT:    .long 1 @ 0x1 -; THUMB78-NEXT:    .long 0 @ 0x0    %t0 = add <2 x i64> %x, <i64 1, i64 1>    %t1 = add <2 x i64> %y, %t0    ret <2 x i64> %t1 | 

