diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-08-29 11:18:14 +0000 | 
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2018-08-29 11:18:14 +0000 | 
| commit | 6d71c4cfe31c39c307c9af8a6531f127d99100fb (patch) | |
| tree | d7f9689a1281494075d966332baab5ca866b5943 | |
| parent | a22d24a36c030aa039ad37b1f26cf5ff68072df0 (diff) | |
| download | bcm5719-llvm-6d71c4cfe31c39c307c9af8a6531f127d99100fb.tar.gz bcm5719-llvm-6d71c4cfe31c39c307c9af8a6531f127d99100fb.zip  | |
[DAGCombiner] Add X / X -> 1 & X % X -> 0 folds (test tweaks)
Adjust tests to avoid the X / X -> 1 & X % X -> 0 folds while keeping their original purposes.
Differential Revision: https://reviews.llvm.org/D50636
llvm-svn: 340916
| -rw-r--r-- | llvm/test/CodeGen/MSP430/libcalls.ll | 60 | ||||
| -rw-r--r-- | llvm/test/CodeGen/SystemZ/pr32372.ll | 8 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/pr38539.ll | 45 | 
3 files changed, 63 insertions, 50 deletions
diff --git a/llvm/test/CodeGen/MSP430/libcalls.ll b/llvm/test/CodeGen/MSP430/libcalls.ll index 950ed6c17e2..30402377813 100644 --- a/llvm/test/CodeGen/MSP430/libcalls.ll +++ b/llvm/test/CodeGen/MSP430/libcalls.ll @@ -433,9 +433,10 @@ entry:  ; CHECK: call #__mspabi_divi    %0 = load volatile i16, i16* @g_i16, align 8 -  %1 = sdiv i16 %0, %0 +  %1 = load volatile i16, i16* @g_i16, align 8 +  %2 = sdiv i16 %0, %1 -  ret i16 %1 +  ret i16 %2  }  define i32 @divli() #0 { @@ -444,9 +445,10 @@ entry:  ; CHECK: call #__mspabi_divli    %0 = load volatile i32, i32* @g_i32, align 8 -  %1 = sdiv i32 %0, %0 +  %1 = load volatile i32, i32* @g_i32, align 8 +  %2 = sdiv i32 %0, %1 -  ret i32 %1 +  ret i32 %2  }  define i64 @divlli() #0 { @@ -455,9 +457,10 @@ entry:  ; CHECK: call #__mspabi_divlli    %0 = load volatile i64, i64* @g_i64, align 8 -  %1 = sdiv i64 %0, %0 +  %1 = load volatile i64, i64* @g_i64, align 8 +  %2 = sdiv i64 %0, %1 -  ret i64 %1 +  ret i64 %2  }  define i16 @divu() #0 { @@ -466,9 +469,10 @@ entry:  ; CHECK: call #__mspabi_divu    %0 = load volatile i16, i16* @g_i16, align 8 -  %1 = udiv i16 %0, %0 +  %1 = load volatile i16, i16* @g_i16, align 8 +  %2 = udiv i16 %0, %1 -  ret i16 %1 +  ret i16 %2  }  define i32 @divul() #0 { @@ -477,9 +481,10 @@ entry:  ; CHECK: call #__mspabi_divul    %0 = load volatile i32, i32* @g_i32, align 8 -  %1 = udiv i32 %0, %0 +  %1 = load volatile i32, i32* @g_i32, align 8 +  %2 = udiv i32 %0, %1 -  ret i32 %1 +  ret i32 %2  }  define i64 @divull() #0 { @@ -488,9 +493,10 @@ entry:  ; CHECK: call #__mspabi_divull    %0 = load volatile i64, i64* @g_i64, align 8 -  %1 = udiv i64 %0, %0 +  %1 = load volatile i64, i64* @g_i64, align 8 +  %2 = udiv i64 %0, %1 -  ret i64 %1 +  ret i64 %2  }  define i16 @remi() #0 { @@ -499,9 +505,10 @@ entry:  ; CHECK: call #__mspabi_remi    %0 = load volatile i16, i16* @g_i16, align 8 -  %1 = srem i16 %0, %0 +  %1 = load volatile i16, i16* @g_i16, align 8 +  %2 = srem i16 %0, %1 -  ret i16 %1 +  ret i16 %2  }  define i32 @remli() #0 { @@ -510,9 +517,10 @@ entry:  ; CHECK: call #__mspabi_remli    %0 = load volatile i32, i32* @g_i32, align 8 -  %1 = srem i32 %0, %0 +  %1 = load volatile i32, i32* @g_i32, align 8 +  %2 = srem i32 %0, %1 -  ret i32 %1 +  ret i32 %2  }  define i64 @remlli() #0 { @@ -521,9 +529,10 @@ entry:  ; CHECK: call #__mspabi_remlli    %0 = load volatile i64, i64* @g_i64, align 8 -  %1 = srem i64 %0, %0 +  %1 = load volatile i64, i64* @g_i64, align 8 +  %2 = srem i64 %0, %1 -  ret i64 %1 +  ret i64 %2  }  define i16 @remu() #0 { @@ -532,9 +541,10 @@ entry:  ; CHECK: call #__mspabi_remu    %0 = load volatile i16, i16* @g_i16, align 8 -  %1 = urem i16 %0, %0 +  %1 = load volatile i16, i16* @g_i16, align 8 +  %2 = urem i16 %0, %1 -  ret i16 %1 +  ret i16 %2  }  define i32 @remul() #0 { @@ -543,9 +553,10 @@ entry:  ; CHECK: call #__mspabi_remul    %0 = load volatile i32, i32* @g_i32, align 8 -  %1 = urem i32 %0, %0 +  %1 = load volatile i32, i32* @g_i32, align 8 +  %2 = urem i32 %0, %1 -  ret i32 %1 +  ret i32 %2  }  define i64 @remull() #0 { @@ -554,9 +565,10 @@ entry:  ; CHECK: call #__mspabi_remull    %0 = load volatile i64, i64* @g_i64, align 8 -  %1 = urem i64 %0, %0 +  %1 = load volatile i64, i64* @g_i64, align 8 +  %2 = urem i64 %0, %1 -  ret i64 %1 +  ret i64 %2  }  define i16 @mpyi() #0 { diff --git a/llvm/test/CodeGen/SystemZ/pr32372.ll b/llvm/test/CodeGen/SystemZ/pr32372.ll index d252a9a96de..4dc8b6bf01f 100644 --- a/llvm/test/CodeGen/SystemZ/pr32372.ll +++ b/llvm/test/CodeGen/SystemZ/pr32372.ll @@ -4,10 +4,7 @@  define void @pr32372(i8*) {  ; CHECK-LABEL: pr32372:  ; CHECK:       # %bb.0: # %BB -; CHECK-NEXT:    llc %r1, 0(%r2)  ; CHECK-NEXT:    mvhhi 0(%r1), -3825 -; CHECK-NEXT:    llill %r0, 0 -; CHECK-NEXT:    dlr %r0, %r1  ; CHECK-NEXT:  .LBB0_1: # %CF251  ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1  ; CHECK-NEXT:    j .LBB0_1 @@ -15,7 +12,8 @@ BB:    %L = load i8, i8* %0    store i16 -3825, i16* undef    %L5 = load i8, i8* %0 -  %B9 = urem i8 %L5, %L +  %B8 = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 %L5, i8 %L) +  %B9 = extractvalue {i8, i1} %B8, 0    %I107 = insertelement <8 x i8> zeroinitializer, i8 %B9, i32 7    %ZE141 = zext i8 %L5 to i16    br label %CF251 @@ -29,3 +27,5 @@ CF258:                                            ; preds = %CF251    %Shuff230 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>    br label %CF251  } + +declare {i8, i1} @llvm.umul.with.overflow.i8(i8, i8) nounwind readnone diff --git a/llvm/test/CodeGen/X86/pr38539.ll b/llvm/test/CodeGen/X86/pr38539.ll index 7a871a9f117..a9475f9bc62 100644 --- a/llvm/test/CodeGen/X86/pr38539.ll +++ b/llvm/test/CodeGen/X86/pr38539.ll @@ -187,7 +187,7 @@ define void @g() {  ; X64-NEXT:    movb (%rax), %al  ; X64-NEXT:    movzbl %al, %eax  ; X64-NEXT:    # kill: def $eax killed $eax def $ax -; X64-NEXT:    divb %al +; X64-NEXT:    divb (%rax)  ; X64-NEXT:    movl %eax, %r8d  ; X64-NEXT:    xorl %eax, %eax  ; X64-NEXT:    xorl %edx, %edx @@ -239,47 +239,47 @@ define void @g() {  ; X86-NEXT:    .cfi_offset %edi, -16  ; X86-NEXT:    .cfi_offset %ebx, -12  ; X86-NEXT:    movzbl {{[0-9]+}}(%esp), %esi -; X86-NEXT:    movl %esi, %eax -; X86-NEXT:    shll $30, %eax -; X86-NEXT:    sarl $30, %eax +; X86-NEXT:    movl %esi, %ecx +; X86-NEXT:    shll $30, %ecx +; X86-NEXT:    sarl $30, %ecx  ; X86-NEXT:    movl (%esp), %edi -; X86-NEXT:    movb (%eax), %bl -; X86-NEXT:    pushl %eax +; X86-NEXT:    movb (%eax), %al +; X86-NEXT:    movzbl %al, %eax +; X86-NEXT:    # kill: def $eax killed $eax def $ax +; X86-NEXT:    divb (%eax) +; X86-NEXT:    movl %eax, %ebx +; X86-NEXT:    pushl %ecx  ; X86-NEXT:    pushl %edi  ; X86-NEXT:    pushl $0  ; X86-NEXT:    pushl $0  ; X86-NEXT:    calll __moddi3  ; X86-NEXT:    addl $16, %esp  ; X86-NEXT:    andl $3, %edx +; X86-NEXT:    testb %al, %al +; X86-NEXT:    setne (%eax)  ; X86-NEXT:    cmpl %eax, %edi  ; X86-NEXT:    sbbl %edx, %esi -; X86-NEXT:    setb %dl -; X86-NEXT:    setae %dh +; X86-NEXT:    setae %dl +; X86-NEXT:    sbbb %cl, %cl  ; X86-NEXT:    testb %al, %al -; X86-NEXT:    setne %bh -; X86-NEXT:    setne (%eax) -; X86-NEXT:    movzbl %bl, %eax -; X86-NEXT:    xorl %ecx, %ecx -; X86-NEXT:    subb %dl, %cl -; X86-NEXT:    # kill: def $eax killed $eax def $ax -; X86-NEXT:    divb %bl -; X86-NEXT:    negb %dh -; X86-NEXT:    cmpb %al, %al +; X86-NEXT:    setne %ch +; X86-NEXT:    negb %dl +; X86-NEXT:    cmpb %bl, %al  ; X86-NEXT:    setle %al  ; X86-NEXT:    negb %al  ; X86-NEXT:    cbtw -; X86-NEXT:    idivb %dh +; X86-NEXT:    idivb %dl  ; X86-NEXT:    movsbl %ah, %eax  ; X86-NEXT:    movzbl %al, %eax  ; X86-NEXT:    andl $1, %eax  ; X86-NEXT:    shll $3, %eax  ; X86-NEXT:    negl %eax -; X86-NEXT:    negb %bh +; X86-NEXT:    negb %ch  ; X86-NEXT:    leal -8(%esp,%eax), %eax  ; X86-NEXT:    movl %eax, (%eax)  ; X86-NEXT:    movl %ecx, %eax  ; X86-NEXT:    cbtw -; X86-NEXT:    idivb %bh +; X86-NEXT:    idivb %ch  ; X86-NEXT:    movsbl %ah, %eax  ; X86-NEXT:    andb $1, %al  ; X86-NEXT:    movb %al, (%eax) @@ -295,8 +295,9 @@ BB:    %L17 = load i34, i34* %A30    %B20 = and i34 %L17, -1    %G2 = getelementptr i34, i34* %A30, i1 true -  %L10 = load i8, i8* undef -  %B6 = udiv i8 %L10, %L10 +  %L10 = load volatile i8, i8* undef +  %L11 = load volatile i8, i8* undef +  %B6 = udiv i8 %L10, %L11    %C15 = icmp eq i8 undef, 0    %B8 = srem i34 0, %B20    %C2 = icmp ule i34 %B8, %B20  | 

