diff options
| author | Philip Reames <listmail@philipreames.com> | 2015-12-16 00:49:36 +0000 |
|---|---|---|
| committer | Philip Reames <listmail@philipreames.com> | 2015-12-16 00:49:36 +0000 |
| commit | 61a24ab6cc305fec36c4b6389a1d6c966d12c259 (patch) | |
| tree | 243ac0dc51eaed9dd8be6b1d197d00a845994940 /llvm/test | |
| parent | e3fb51cf5ac539e4958245f2cd4df4aeaecf7c0e (diff) | |
| download | bcm5719-llvm-61a24ab6cc305fec36c4b6389a1d6c966d12c259.tar.gz bcm5719-llvm-61a24ab6cc305fec36c4b6389a1d6c966d12c259.zip | |
[IR] Add support for floating pointer atomic loads and stores
This patch allows atomic loads and stores of floating point to be specified in the IR and adds an adapter to allow them to be lowered via existing backend support for bitcast-to-equivalent-integer idiom.
Previously, the only way to specify a atomic float operation was to bitcast the pointer to a i32, load the value as an i32, then bitcast to a float. At it's most basic, this patch simply moves this expansion step to the point we start lowering to the backend.
This patch does not add canonicalization rules to convert the bitcast idioms to the appropriate atomic loads. I plan to do that in the future, but for now, let's simply add the support. I'd like to get instruction selection working through at least one backend (x86-64) without the bitcast conversion before canonicalizing into this form.
Similarly, I haven't yet added the target hooks to opt out of the lowering step I added to AtomicExpand. I figured it would more sense to add those once at least one backend (x86) was ready to actually opt out.
As you can see from the included tests, the generated code quality is not great. I plan on submitting some patches to fix this, but help from others along that line would be very welcome. I'm not super familiar with the backend and my ramp up time may be material.
Differential Revision: http://reviews.llvm.org/D15471
llvm-svn: 255737
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/X86/atomic-non-integer.ll | 108 | ||||
| -rw-r--r-- | llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll | 82 | ||||
| -rw-r--r-- | llvm/test/Verifier/atomics.ll | 14 |
3 files changed, 204 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/atomic-non-integer.ll b/llvm/test/CodeGen/X86/atomic-non-integer.ll new file mode 100644 index 00000000000..98fcd96d3e4 --- /dev/null +++ b/llvm/test/CodeGen/X86/atomic-non-integer.ll @@ -0,0 +1,108 @@ +; RUN: llc < %s -mtriple=x86_64-linux-generic -verify-machineinstrs -mattr=sse2 | FileCheck %s + +; Note: This test is testing that the lowering for atomics matches what we +; currently emit for non-atomics + the atomic restriction. The presence of +; particular lowering detail in these tests should not be read as requiring +; that detail for correctness unless it's related to the atomicity itself. +; (Specifically, there were reviewer questions about the lowering for halfs +; and their calling convention which remain unresolved.) + +define void @store_half(half* %fptr, half %v) { +; CHECK-LABEL: @store_half +; CHECK: movq %rdi, %rbx +; CHECK: callq __gnu_f2h_ieee +; CHECK: movw %ax, (%rbx) + store atomic half %v, half* %fptr unordered, align 2 + ret void +} + +define void @store_float(float* %fptr, float %v) { +; CHECK-LABEL: @store_float +; CHECK: movd %xmm0, %eax +; CHECK: movl %eax, (%rdi) + store atomic float %v, float* %fptr unordered, align 4 + ret void +} + +define void @store_double(double* %fptr, double %v) { +; CHECK-LABEL: @store_double +; CHECK: movd %xmm0, %rax +; CHECK: movq %rax, (%rdi) + store atomic double %v, double* %fptr unordered, align 8 + ret void +} + +define void @store_fp128(fp128* %fptr, fp128 %v) { +; CHECK-LABEL: @store_fp128 +; CHECK: callq __sync_lock_test_and_set_16 + store atomic fp128 %v, fp128* %fptr unordered, align 16 + ret void +} + +define half @load_half(half* %fptr) { +; CHECK-LABEL: @load_half +; CHECK: movw (%rdi), %ax +; CHECK: movzwl %ax, %edi +; CHECK: jmp __gnu_h2f_ieee + %v = load atomic half, half* %fptr unordered, align 2 + ret half %v +} + +define float @load_float(float* %fptr) { +; CHECK-LABEL: @load_float +; CHECK: movl (%rdi), %eax +; CHECK: movd %eax, %xmm0 + %v = load atomic float, float* %fptr unordered, align 4 + ret float %v +} + +define double @load_double(double* %fptr) { +; CHECK-LABEL: @load_double +; CHECK: movq (%rdi), %rax +; CHECK: movd %rax, %xmm0 + %v = load atomic double, double* %fptr unordered, align 8 + ret double %v +} + +define fp128 @load_fp128(fp128* %fptr) { +; CHECK-LABEL: @load_fp128 +; CHECK: callq __sync_val_compare_and_swap_16 + %v = load atomic fp128, fp128* %fptr unordered, align 16 + ret fp128 %v +} + + +; sanity check the seq_cst lowering since that's the +; interesting one from an ordering perspective on x86. + +define void @store_float_seq_cst(float* %fptr, float %v) { +; CHECK-LABEL: @store_float_seq_cst +; CHECK: movd %xmm0, %eax +; CHECK: xchgl %eax, (%rdi) + store atomic float %v, float* %fptr seq_cst, align 4 + ret void +} + +define void @store_double_seq_cst(double* %fptr, double %v) { +; CHECK-LABEL: @store_double_seq_cst +; CHECK: movd %xmm0, %rax +; CHECK: xchgq %rax, (%rdi) + store atomic double %v, double* %fptr seq_cst, align 8 + ret void +} + +define float @load_float_seq_cst(float* %fptr) { +; CHECK-LABEL: @load_float_seq_cst +; CHECK: movl (%rdi), %eax +; CHECK: movd %eax, %xmm0 + %v = load atomic float, float* %fptr seq_cst, align 4 + ret float %v +} + +define double @load_double_seq_cst(double* %fptr) { +; CHECK-LABEL: @load_double_seq_cst +; CHECK: movq (%rdi), %rax +; CHECK: movd %rax, %xmm0 + %v = load atomic double, double* %fptr seq_cst, align 8 + ret double %v +} diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll new file mode 100644 index 00000000000..792fb1ec4f7 --- /dev/null +++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-non-integer.ll @@ -0,0 +1,82 @@ +; RUN: opt -S %s -atomic-expand -mtriple=x86_64-linux-gnu | FileCheck %s + +; This file tests the functions `llvm::convertAtomicLoadToIntegerType` and +; `llvm::convertAtomicStoreToIntegerType`. If X86 stops using this +; functionality, please move this test to a target which still is. + +define float @float_load_expand(float* %ptr) { +; CHECK-LABEL: @float_load_expand +; CHECK: %1 = bitcast float* %ptr to i32* +; CHECK: %2 = load atomic i32, i32* %1 unordered, align 4 +; CHECK: %3 = bitcast i32 %2 to float +; CHECK: ret float %3 + %res = load atomic float, float* %ptr unordered, align 4 + ret float %res +} + +define float @float_load_expand_seq_cst(float* %ptr) { +; CHECK-LABEL: @float_load_expand_seq_cst +; CHECK: %1 = bitcast float* %ptr to i32* +; CHECK: %2 = load atomic i32, i32* %1 seq_cst, align 4 +; CHECK: %3 = bitcast i32 %2 to float +; CHECK: ret float %3 + %res = load atomic float, float* %ptr seq_cst, align 4 + ret float %res +} + +define float @float_load_expand_vol(float* %ptr) { +; CHECK-LABEL: @float_load_expand_vol +; CHECK: %1 = bitcast float* %ptr to i32* +; CHECK: %2 = load atomic volatile i32, i32* %1 unordered, align 4 +; CHECK: %3 = bitcast i32 %2 to float +; CHECK: ret float %3 + %res = load atomic volatile float, float* %ptr unordered, align 4 + ret float %res +} + +define float @float_load_expand_addr1(float addrspace(1)* %ptr) { +; CHECK-LABEL: @float_load_expand_addr1 +; CHECK: %1 = bitcast float addrspace(1)* %ptr to i32 addrspace(1)* +; CHECK: %2 = load atomic i32, i32 addrspace(1)* %1 unordered, align 4 +; CHECK: %3 = bitcast i32 %2 to float +; CHECK: ret float %3 + %res = load atomic float, float addrspace(1)* %ptr unordered, align 4 + ret float %res +} + +define void @float_store_expand(float* %ptr, float %v) { +; CHECK-LABEL: @float_store_expand +; CHECK: %1 = bitcast float %v to i32 +; CHECK: %2 = bitcast float* %ptr to i32* +; CHECK: store atomic i32 %1, i32* %2 unordered, align 4 + store atomic float %v, float* %ptr unordered, align 4 + ret void +} + +define void @float_store_expand_seq_cst(float* %ptr, float %v) { +; CHECK-LABEL: @float_store_expand_seq_cst +; CHECK: %1 = bitcast float %v to i32 +; CHECK: %2 = bitcast float* %ptr to i32* +; CHECK: store atomic i32 %1, i32* %2 seq_cst, align 4 + store atomic float %v, float* %ptr seq_cst, align 4 + ret void +} + +define void @float_store_expand_vol(float* %ptr, float %v) { +; CHECK-LABEL: @float_store_expand_vol +; CHECK: %1 = bitcast float %v to i32 +; CHECK: %2 = bitcast float* %ptr to i32* +; CHECK: store atomic volatile i32 %1, i32* %2 unordered, align 4 + store atomic volatile float %v, float* %ptr unordered, align 4 + ret void +} + +define void @float_store_expand_addr1(float addrspace(1)* %ptr, float %v) { +; CHECK-LABEL: @float_store_expand_addr1 +; CHECK: %1 = bitcast float %v to i32 +; CHECK: %2 = bitcast float addrspace(1)* %ptr to i32 addrspace(1)* +; CHECK: store atomic i32 %1, i32 addrspace(1)* %2 unordered, align 4 + store atomic float %v, float addrspace(1)* %ptr unordered, align 4 + ret void +} + diff --git a/llvm/test/Verifier/atomics.ll b/llvm/test/Verifier/atomics.ll new file mode 100644 index 00000000000..ee7bf4bb19a --- /dev/null +++ b/llvm/test/Verifier/atomics.ll @@ -0,0 +1,14 @@ +; RUN: not opt -verify < %s 2>&1 | FileCheck %s + +; CHECK: atomic store operand must have integer or floating point type! +; CHECK: atomic load operand must have integer or floating point type! + +define void @foo(x86_mmx* %P, x86_mmx %v) { + store atomic x86_mmx %v, x86_mmx* %P unordered, align 8 + ret void +} + +define x86_mmx @bar(x86_mmx* %P) { + %v = load atomic x86_mmx, x86_mmx* %P unordered, align 8 + ret x86_mmx %v +} |

