diff options
author | Igor Breger <igor.breger@intel.com> | 2015-11-10 07:09:07 +0000 |
---|---|---|
committer | Igor Breger <igor.breger@intel.com> | 2015-11-10 07:09:07 +0000 |
commit | b6b27af46a3caeb0aa249417137fc28072472413 (patch) | |
tree | 3d8848101c69722d37c5f968abd746f85344b726 /llvm/test/CodeGen | |
parent | 649a607e11d64f69cf972bcaee3103475826f67c (diff) | |
download | bcm5719-llvm-b6b27af46a3caeb0aa249417137fc28072472413.tar.gz bcm5719-llvm-b6b27af46a3caeb0aa249417137fc28072472413.zip |
AVX512 : Implemented encoding and DAG lowering for VMOVHPS/PD and VMOVLPS/PD instructions.
Differential Revision: http://reviews.llvm.org/D14492
llvm-svn: 252592
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r-- | llvm/test/CodeGen/X86/avx-isa-check.ll | 38 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/exedeps-movq.ll | 18 |
2 files changed, 56 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/avx-isa-check.ll b/llvm/test/CodeGen/X86/avx-isa-check.ll index 02b4f37f96a..d295ffd3048 100644 --- a/llvm/test/CodeGen/X86/avx-isa-check.ll +++ b/llvm/test/CodeGen/X86/avx-isa-check.ll @@ -344,3 +344,41 @@ define <16 x i16> @shuffle_v16i16_00_00_00_00_00_00_00_00_00_00_00_00_00_00_00_0 ret <16 x i16> %shuffle } +define <2 x double> @insert_mem_lo_v2f64(double* %ptr, <2 x double> %b) { + %a = load double, double* %ptr + %v = insertelement <2 x double> undef, double %a, i32 0 + %shuffle = shufflevector <2 x double> %v, <2 x double> %b, <2 x i32> <i32 0, i32 3> + ret <2 x double> %shuffle +} + +define <2 x double> @insert_mem_hi_v2f64(double* %ptr, <2 x double> %b) { + %a = load double, double* %ptr + %v = insertelement <2 x double> undef, double %a, i32 0 + %shuffle = shufflevector <2 x double> %v, <2 x double> %b, <2 x i32> <i32 2, i32 0> + ret <2 x double> %shuffle +} + +define void @store_floats(<4 x float> %x, i64* %p) { + %a = fadd <4 x float> %x, %x + %b = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 1> + %c = bitcast <2 x float> %b to i64 + store i64 %c, i64* %p + ret void +} + +define void @store_double(<2 x double> %x, i64* %p) { + %a = fadd <2 x double> %x, %x + %b = extractelement <2 x double> %a, i32 0 + %c = bitcast double %b to i64 + store i64 %c, i64* %p + ret void +} + +define void @store_h_double(<2 x double> %x, i64* %p) { + %a = fadd <2 x double> %x, %x + %b = extractelement <2 x double> %a, i32 1 + %c = bitcast double %b to i64 + store i64 %c, i64* %p + ret void +} + diff --git a/llvm/test/CodeGen/X86/exedeps-movq.ll b/llvm/test/CodeGen/X86/exedeps-movq.ll index a5873be6f27..ae147accc3a 100644 --- a/llvm/test/CodeGen/X86/exedeps-movq.ll +++ b/llvm/test/CodeGen/X86/exedeps-movq.ll @@ -66,3 +66,21 @@ define void @store_int(<4 x i32> %x, <2 x float>* %p) { ret void } +define void @store_h_double(<2 x double> %x, i64* %p) { +; SSE-LABEL: store_h_double: +; SSE: # BB#0: +; SSE-NEXT: addpd %xmm0, %xmm0 +; SSE-NEXT: movhpd %xmm0, (%rdi) +; SSE-NEXT: retq +; +; AVX-LABEL: store_h_double: +; AVX: # BB#0: +; AVX-NEXT: vaddpd %xmm0, %xmm0, %xmm0 +; AVX-NEXT: vmovhpd %xmm0, (%rdi) +; AVX-NEXT: retq + %a = fadd <2 x double> %x, %x + %b = extractelement <2 x double> %a, i32 1 + %c = bitcast double %b to i64 + store i64 %c, i64* %p + ret void +} |