diff options
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll | 9 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/atomic-non-integer.ll | 202 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/combineIncDecVector-crash.ll | 2 |
4 files changed, 168 insertions, 47 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index ea4d43bbc8e..fbc4c6f0495 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -87,7 +87,7 @@ static cl::opt<bool> MulConstantOptimization( cl::Hidden); static cl::opt<bool> ExperimentalUnorderedISEL( - "x86-experimental-unordered-atomic-isel", cl::init(true), + "x86-experimental-unordered-atomic-isel", cl::init(false), cl::desc("Use LoadSDNode and StoreSDNode instead of " "AtomicSDNode for unordered atomic loads and " "stores respectively."), diff --git a/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll b/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll index 1415cd22c2c..a3028e9ac7b 100644 --- a/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll +++ b/llvm/test/CodeGen/X86/atomic-non-integer-fp128.ll @@ -21,7 +21,14 @@ define void @store_fp128(fp128* %fptr, fp128 %v) { ; ; X64-SSE-LABEL: store_fp128: ; X64-SSE: # %bb.0: -; X64-SSE-NEXT: movaps %xmm0, (%rdi) +; X64-SSE-NEXT: subq $24, %rsp +; X64-SSE-NEXT: .cfi_def_cfa_offset 32 +; X64-SSE-NEXT: movaps %xmm0, (%rsp) +; X64-SSE-NEXT: movq (%rsp), %rsi +; X64-SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; X64-SSE-NEXT: callq __sync_lock_test_and_set_16 +; X64-SSE-NEXT: addq $24, %rsp +; X64-SSE-NEXT: .cfi_def_cfa_offset 8 ; X64-SSE-NEXT: retq store atomic fp128 %v, fp128* %fptr unordered, align 16 ret void diff --git a/llvm/test/CodeGen/X86/atomic-non-integer.ll b/llvm/test/CodeGen/X86/atomic-non-integer.ll index 812e703690c..8fd96b749a3 100644 --- a/llvm/test/CodeGen/X86/atomic-non-integer.ll +++ b/llvm/test/CodeGen/X86/atomic-non-integer.ll @@ -114,26 +114,12 @@ define void @store_half(half* %fptr, half %v) { } define void @store_float(float* %fptr, float %v) { -; X86-SSE-LABEL: store_float: -; X86-SSE: # %bb.0: -; X86-SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: movss %xmm0, (%eax) -; X86-SSE-NEXT: retl -; -; X86-AVX-LABEL: store_float: -; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-AVX-NEXT: vmovss %xmm0, (%eax) -; X86-AVX-NEXT: retl -; -; X86-NOSSE-LABEL: store_float: -; X86-NOSSE: # %bb.0: -; X86-NOSSE-NEXT: flds {{[0-9]+}}(%esp) -; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NOSSE-NEXT: fstps (%eax) -; X86-NOSSE-NEXT: retl +; X86-LABEL: store_float: +; X86: # %bb.0: +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movl %ecx, (%eax) +; X86-NEXT: retl ; ; X64-SSE-LABEL: store_float: ; X64-SSE: # %bb.0: @@ -176,16 +162,16 @@ define void @store_double(double* %fptr, double %v) { ; ; X86-SSE2-LABEL: store_double: ; X86-SSE2: # %bb.0: -; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE2-NEXT: movsd %xmm0, (%eax) +; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-SSE2-NEXT: movlps %xmm0, (%eax) ; X86-SSE2-NEXT: retl ; ; X86-AVX-LABEL: store_double: ; X86-AVX: # %bb.0: -; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero ; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-AVX-NEXT: vmovsd %xmm0, (%eax) +; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; X86-AVX-NEXT: vmovlps %xmm0, (%eax) ; X86-AVX-NEXT: retl ; ; X86-NOSSE-LABEL: store_double: @@ -290,12 +276,26 @@ define void @store_fp128(fp128* %fptr, fp128 %v) { ; ; X64-SSE-LABEL: store_fp128: ; X64-SSE: # %bb.0: -; X64-SSE-NEXT: movaps %xmm0, (%rdi) +; X64-SSE-NEXT: subq $24, %rsp +; X64-SSE-NEXT: .cfi_def_cfa_offset 32 +; X64-SSE-NEXT: movaps %xmm0, (%rsp) +; X64-SSE-NEXT: movq (%rsp), %rsi +; X64-SSE-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; X64-SSE-NEXT: callq __sync_lock_test_and_set_16 +; X64-SSE-NEXT: addq $24, %rsp +; X64-SSE-NEXT: .cfi_def_cfa_offset 8 ; X64-SSE-NEXT: retq ; ; X64-AVX-LABEL: store_fp128: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vmovaps %xmm0, (%rdi) +; X64-AVX-NEXT: subq $24, %rsp +; X64-AVX-NEXT: .cfi_def_cfa_offset 32 +; X64-AVX-NEXT: vmovaps %xmm0, (%rsp) +; X64-AVX-NEXT: movq (%rsp), %rsi +; X64-AVX-NEXT: movq {{[0-9]+}}(%rsp), %rdx +; X64-AVX-NEXT: callq __sync_lock_test_and_set_16 +; X64-AVX-NEXT: addq $24, %rsp +; X64-AVX-NEXT: .cfi_def_cfa_offset 8 ; X64-AVX-NEXT: retq store atomic fp128 %v, fp128* %fptr unordered, align 16 ret void @@ -383,11 +383,53 @@ define half @load_half(half* %fptr) { } define float @load_float(float* %fptr) { -; X86-LABEL: load_float: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: flds (%eax) -; X86-NEXT: retl +; X86-SSE1-LABEL: load_float: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: pushl %eax +; X86-SSE1-NEXT: .cfi_def_cfa_offset 8 +; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE1-NEXT: movl (%eax), %eax +; X86-SSE1-NEXT: movl %eax, (%esp) +; X86-SSE1-NEXT: flds (%esp) +; X86-SSE1-NEXT: popl %eax +; X86-SSE1-NEXT: .cfi_def_cfa_offset 4 +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: load_float: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: pushl %eax +; X86-SSE2-NEXT: .cfi_def_cfa_offset 8 +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-SSE2-NEXT: movss %xmm0, (%esp) +; X86-SSE2-NEXT: flds (%esp) +; X86-SSE2-NEXT: popl %eax +; X86-SSE2-NEXT: .cfi_def_cfa_offset 4 +; X86-SSE2-NEXT: retl +; +; X86-AVX-LABEL: load_float: +; X86-AVX: # %bb.0: +; X86-AVX-NEXT: pushl %eax +; X86-AVX-NEXT: .cfi_def_cfa_offset 8 +; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-AVX-NEXT: vmovss %xmm0, (%esp) +; X86-AVX-NEXT: flds (%esp) +; X86-AVX-NEXT: popl %eax +; X86-AVX-NEXT: .cfi_def_cfa_offset 4 +; X86-AVX-NEXT: retl +; +; X86-NOSSE-LABEL: load_float: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: pushl %eax +; X86-NOSSE-NEXT: .cfi_def_cfa_offset 8 +; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NOSSE-NEXT: movl (%eax), %eax +; X86-NOSSE-NEXT: movl %eax, (%esp) +; X86-NOSSE-NEXT: flds (%esp) +; X86-NOSSE-NEXT: popl %eax +; X86-NOSSE-NEXT: .cfi_def_cfa_offset 4 +; X86-NOSSE-NEXT: retl ; ; X64-SSE-LABEL: load_float: ; X64-SSE: # %bb.0: @@ -403,11 +445,61 @@ define float @load_float(float* %fptr) { } define double @load_double(double* %fptr) { -; X86-LABEL: load_double: -; X86: # %bb.0: -; X86-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-NEXT: fldl (%eax) -; X86-NEXT: retl +; X86-SSE1-LABEL: load_double: +; X86-SSE1: # %bb.0: +; X86-SSE1-NEXT: subl $20, %esp +; X86-SSE1-NEXT: .cfi_def_cfa_offset 24 +; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE1-NEXT: fildll (%eax) +; X86-SSE1-NEXT: fistpll {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE1-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-SSE1-NEXT: movl %ecx, {{[0-9]+}}(%esp) +; X86-SSE1-NEXT: movl %eax, (%esp) +; X86-SSE1-NEXT: fldl (%esp) +; X86-SSE1-NEXT: addl $20, %esp +; X86-SSE1-NEXT: .cfi_def_cfa_offset 4 +; X86-SSE1-NEXT: retl +; +; X86-SSE2-LABEL: load_double: +; X86-SSE2: # %bb.0: +; X86-SSE2-NEXT: subl $12, %esp +; X86-SSE2-NEXT: .cfi_def_cfa_offset 16 +; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero +; X86-SSE2-NEXT: movlps %xmm0, (%esp) +; X86-SSE2-NEXT: fldl (%esp) +; X86-SSE2-NEXT: addl $12, %esp +; X86-SSE2-NEXT: .cfi_def_cfa_offset 4 +; X86-SSE2-NEXT: retl +; +; X86-AVX-LABEL: load_double: +; X86-AVX: # %bb.0: +; X86-AVX-NEXT: subl $12, %esp +; X86-AVX-NEXT: .cfi_def_cfa_offset 16 +; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; X86-AVX-NEXT: vmovlps %xmm0, (%esp) +; X86-AVX-NEXT: fldl (%esp) +; X86-AVX-NEXT: addl $12, %esp +; X86-AVX-NEXT: .cfi_def_cfa_offset 4 +; X86-AVX-NEXT: retl +; +; X86-NOSSE-LABEL: load_double: +; X86-NOSSE: # %bb.0: +; X86-NOSSE-NEXT: subl $20, %esp +; X86-NOSSE-NEXT: .cfi_def_cfa_offset 24 +; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NOSSE-NEXT: fildll (%eax) +; X86-NOSSE-NEXT: fistpll {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NOSSE-NEXT: movl %ecx, {{[0-9]+}}(%esp) +; X86-NOSSE-NEXT: movl %eax, (%esp) +; X86-NOSSE-NEXT: fldl (%esp) +; X86-NOSSE-NEXT: addl $20, %esp +; X86-NOSSE-NEXT: .cfi_def_cfa_offset 4 +; X86-NOSSE-NEXT: retl ; ; X64-SSE-LABEL: load_double: ; X64-SSE: # %bb.0: @@ -465,10 +557,10 @@ define fp128 @load_fp128(fp128* %fptr) { ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-SSE-NEXT: movl %edi, 12(%esi) -; X86-SSE-NEXT: movl %edx, 8(%esi) -; X86-SSE-NEXT: movl %ecx, 4(%esi) +; X86-SSE-NEXT: movl %edi, 8(%esi) +; X86-SSE-NEXT: movl %edx, 12(%esi) ; X86-SSE-NEXT: movl %eax, (%esi) +; X86-SSE-NEXT: movl %ecx, 4(%esi) ; X86-SSE-NEXT: movl %esi, %eax ; X86-SSE-NEXT: addl $20, %esp ; X86-SSE-NEXT: .cfi_def_cfa_offset 12 @@ -546,10 +638,10 @@ define fp128 @load_fp128(fp128* %fptr) { ; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NOSSE-NEXT: movl {{[0-9]+}}(%esp), %edi -; X86-NOSSE-NEXT: movl %edi, 12(%esi) -; X86-NOSSE-NEXT: movl %edx, 8(%esi) -; X86-NOSSE-NEXT: movl %ecx, 4(%esi) +; X86-NOSSE-NEXT: movl %edi, 8(%esi) +; X86-NOSSE-NEXT: movl %edx, 12(%esi) ; X86-NOSSE-NEXT: movl %eax, (%esi) +; X86-NOSSE-NEXT: movl %ecx, 4(%esi) ; X86-NOSSE-NEXT: movl %esi, %eax ; X86-NOSSE-NEXT: addl $20, %esp ; X86-NOSSE-NEXT: .cfi_def_cfa_offset 12 @@ -561,12 +653,34 @@ define fp128 @load_fp128(fp128* %fptr) { ; ; X64-SSE-LABEL: load_fp128: ; X64-SSE: # %bb.0: -; X64-SSE-NEXT: movaps (%rdi), %xmm0 +; X64-SSE-NEXT: subq $24, %rsp +; X64-SSE-NEXT: .cfi_def_cfa_offset 32 +; X64-SSE-NEXT: xorl %esi, %esi +; X64-SSE-NEXT: xorl %edx, %edx +; X64-SSE-NEXT: xorl %ecx, %ecx +; X64-SSE-NEXT: xorl %r8d, %r8d +; X64-SSE-NEXT: callq __sync_val_compare_and_swap_16 +; X64-SSE-NEXT: movq %rdx, {{[0-9]+}}(%rsp) +; X64-SSE-NEXT: movq %rax, (%rsp) +; X64-SSE-NEXT: movaps (%rsp), %xmm0 +; X64-SSE-NEXT: addq $24, %rsp +; X64-SSE-NEXT: .cfi_def_cfa_offset 8 ; X64-SSE-NEXT: retq ; ; X64-AVX-LABEL: load_fp128: ; X64-AVX: # %bb.0: -; X64-AVX-NEXT: vmovaps (%rdi), %xmm0 +; X64-AVX-NEXT: subq $24, %rsp +; X64-AVX-NEXT: .cfi_def_cfa_offset 32 +; X64-AVX-NEXT: xorl %esi, %esi +; X64-AVX-NEXT: xorl %edx, %edx +; X64-AVX-NEXT: xorl %ecx, %ecx +; X64-AVX-NEXT: xorl %r8d, %r8d +; X64-AVX-NEXT: callq __sync_val_compare_and_swap_16 +; X64-AVX-NEXT: movq %rdx, {{[0-9]+}}(%rsp) +; X64-AVX-NEXT: movq %rax, (%rsp) +; X64-AVX-NEXT: vmovaps (%rsp), %xmm0 +; X64-AVX-NEXT: addq $24, %rsp +; X64-AVX-NEXT: .cfi_def_cfa_offset 8 ; X64-AVX-NEXT: retq %v = load atomic fp128, fp128* %fptr unordered, align 16 ret fp128 %v diff --git a/llvm/test/CodeGen/X86/combineIncDecVector-crash.ll b/llvm/test/CodeGen/X86/combineIncDecVector-crash.ll index 62f3da44bda..8dea7a5fdcd 100644 --- a/llvm/test/CodeGen/X86/combineIncDecVector-crash.ll +++ b/llvm/test/CodeGen/X86/combineIncDecVector-crash.ll @@ -19,11 +19,11 @@ define void @TestvMeth(i32 %0, i64 %1) gc "statepoint-example" !prof !1 { ; CHECK-NEXT: callq newarray ; CHECK-NEXT: .Ltmp0: ; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; CHECK-NEXT: addss (%rax), %xmm0 ; CHECK-NEXT: movdqu (%rax), %xmm1 ; CHECK-NEXT: pcmpeqd %xmm2, %xmm2 ; CHECK-NEXT: psubd %xmm2, %xmm1 ; CHECK-NEXT: movdqu %xmm1, (%rax) -; CHECK-NEXT: addss {{.*}}(%rip), %xmm0 ; CHECK-NEXT: movss %xmm0, (%rax) bci_0: %token418 = call token (i64, i32, i8 * (i64, i32, i32, i32)*, i32, |