diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-06-03 13:42:49 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-06-03 13:42:49 +0000 |
commit | f92d175a78fcbe415cc2b788e7d63c0570f9a5d8 (patch) | |
tree | e7cf0cfdb96cad9b2c5aec95e50d637f27128fda /llvm | |
parent | 9da258d8e5ba3e9b4f4e173865ce191d8634dca8 (diff) | |
download | bcm5719-llvm-f92d175a78fcbe415cc2b788e7d63c0570f9a5d8.tar.gz bcm5719-llvm-f92d175a78fcbe415cc2b788e7d63c0570f9a5d8.zip |
[X86][AVX512] Added 512-bit vector nontemporal load tests
llvm-svn: 271668
Diffstat (limited to 'llvm')
-rw-r--r-- | llvm/test/CodeGen/X86/nontemporal-loads.ll | 1054 |
1 files changed, 909 insertions, 145 deletions
diff --git a/llvm/test/CodeGen/X86/nontemporal-loads.ll b/llvm/test/CodeGen/X86/nontemporal-loads.ll index f33273f8ae4..bcfa46d16d9 100644 --- a/llvm/test/CodeGen/X86/nontemporal-loads.ll +++ b/llvm/test/CodeGen/X86/nontemporal-loads.ll @@ -3,7 +3,9 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=VLX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512BW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512VL ; FIXME: Tests for nontemporal load support which was introduced in SSE41 @@ -18,10 +20,10 @@ define <4 x float> @test_v4f32(<4 x float>* %src) { ; AVX-NEXT: vmovaps (%rdi), %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_v4f32: -; VLX: # BB#0: -; VLX-NEXT: vmovaps (%rdi), %xmm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_v4f32: +; AVX512: # BB#0: +; AVX512-NEXT: vmovaps (%rdi), %xmm0 +; AVX512-NEXT: retq %1 = load <4 x float>, <4 x float>* %src, align 16, !nontemporal !1 ret <4 x float> %1 } @@ -37,10 +39,20 @@ define <4 x i32> @test_v4i32(<4 x i32>* %src) { ; AVX-NEXT: vmovaps (%rdi), %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_v4i32: -; VLX: # BB#0: -; VLX-NEXT: vmovdqa32 (%rdi), %xmm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_v4i32: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovaps (%rdi), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_v4i32: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovaps (%rdi), %xmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_v4i32: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqa32 (%rdi), %xmm0 +; AVX512VL-NEXT: retq %1 = load <4 x i32>, <4 x i32>* %src, align 16, !nontemporal !1 ret <4 x i32> %1 } @@ -56,10 +68,20 @@ define <2 x double> @test_v2f64(<2 x double>* %src) { ; AVX-NEXT: vmovaps (%rdi), %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_v2f64: -; VLX: # BB#0: -; VLX-NEXT: vmovapd (%rdi), %xmm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_v2f64: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovaps (%rdi), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_v2f64: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovaps (%rdi), %xmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_v2f64: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovapd (%rdi), %xmm0 +; AVX512VL-NEXT: retq %1 = load <2 x double>, <2 x double>* %src, align 16, !nontemporal !1 ret <2 x double> %1 } @@ -75,10 +97,20 @@ define <2 x i64> @test_v2i64(<2 x i64>* %src) { ; AVX-NEXT: vmovaps (%rdi), %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_v2i64: -; VLX: # BB#0: -; VLX-NEXT: vmovdqa64 (%rdi), %xmm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_v2i64: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovaps (%rdi), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_v2i64: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovaps (%rdi), %xmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_v2i64: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0 +; AVX512VL-NEXT: retq %1 = load <2 x i64>, <2 x i64>* %src, align 16, !nontemporal !1 ret <2 x i64> %1 } @@ -94,10 +126,20 @@ define <8 x i16> @test_v8i16(<8 x i16>* %src) { ; AVX-NEXT: vmovaps (%rdi), %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_v8i16: -; VLX: # BB#0: -; VLX-NEXT: vmovdqa64 (%rdi), %xmm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_v8i16: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovaps (%rdi), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_v8i16: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovaps (%rdi), %xmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_v8i16: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0 +; AVX512VL-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %src, align 16, !nontemporal !1 ret <8 x i16> %1 } @@ -113,10 +155,20 @@ define <16 x i8> @test_v16i8(<16 x i8>* %src) { ; AVX-NEXT: vmovaps (%rdi), %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_v16i8: -; VLX: # BB#0: -; VLX-NEXT: vmovdqa64 (%rdi), %xmm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_v16i8: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovaps (%rdi), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_v16i8: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovaps (%rdi), %xmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_v16i8: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqa64 (%rdi), %xmm0 +; AVX512VL-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %src, align 16, !nontemporal !1 ret <16 x i8> %1 } @@ -135,10 +187,10 @@ define <8 x float> @test_v8f32(<8 x float>* %src) { ; AVX-NEXT: vmovaps (%rdi), %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_v8f32: -; VLX: # BB#0: -; VLX-NEXT: vmovaps (%rdi), %ymm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_v8f32: +; AVX512: # BB#0: +; AVX512-NEXT: vmovaps (%rdi), %ymm0 +; AVX512-NEXT: retq %1 = load <8 x float>, <8 x float>* %src, align 32, !nontemporal !1 ret <8 x float> %1 } @@ -155,10 +207,20 @@ define <8 x i32> @test_v8i32(<8 x i32>* %src) { ; AVX-NEXT: vmovaps (%rdi), %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_v8i32: -; VLX: # BB#0: -; VLX-NEXT: vmovdqa32 (%rdi), %ymm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_v8i32: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovaps (%rdi), %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_v8i32: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovaps (%rdi), %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_v8i32: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqa32 (%rdi), %ymm0 +; AVX512VL-NEXT: retq %1 = load <8 x i32>, <8 x i32>* %src, align 32, !nontemporal !1 ret <8 x i32> %1 } @@ -175,10 +237,20 @@ define <4 x double> @test_v4f64(<4 x double>* %src) { ; AVX-NEXT: vmovaps (%rdi), %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_v4f64: -; VLX: # BB#0: -; VLX-NEXT: vmovapd (%rdi), %ymm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_v4f64: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovaps (%rdi), %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_v4f64: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovaps (%rdi), %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_v4f64: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovapd (%rdi), %ymm0 +; AVX512VL-NEXT: retq %1 = load <4 x double>, <4 x double>* %src, align 32, !nontemporal !1 ret <4 x double> %1 } @@ -195,10 +267,20 @@ define <4 x i64> @test_v4i64(<4 x i64>* %src) { ; AVX-NEXT: vmovaps (%rdi), %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_v4i64: -; VLX: # BB#0: -; VLX-NEXT: vmovdqa64 (%rdi), %ymm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_v4i64: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovaps (%rdi), %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_v4i64: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovaps (%rdi), %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_v4i64: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqa64 (%rdi), %ymm0 +; AVX512VL-NEXT: retq %1 = load <4 x i64>, <4 x i64>* %src, align 32, !nontemporal !1 ret <4 x i64> %1 } @@ -215,10 +297,20 @@ define <16 x i16> @test_v16i16(<16 x i16>* %src) { ; AVX-NEXT: vmovaps (%rdi), %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_v16i16: -; VLX: # BB#0: -; VLX-NEXT: vmovdqa64 (%rdi), %ymm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_v16i16: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovaps (%rdi), %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_v16i16: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovaps (%rdi), %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_v16i16: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqa64 (%rdi), %ymm0 +; AVX512VL-NEXT: retq %1 = load <16 x i16>, <16 x i16>* %src, align 32, !nontemporal !1 ret <16 x i16> %1 } @@ -235,14 +327,188 @@ define <32 x i8> @test_v32i8(<32 x i8>* %src) { ; AVX-NEXT: vmovaps (%rdi), %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_v32i8: -; VLX: # BB#0: -; VLX-NEXT: vmovdqa64 (%rdi), %ymm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_v32i8: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovaps (%rdi), %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_v32i8: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovaps (%rdi), %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_v32i8: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqa64 (%rdi), %ymm0 +; AVX512VL-NEXT: retq %1 = load <32 x i8>, <32 x i8>* %src, align 32, !nontemporal !1 ret <32 x i8> %1 } +; And now ZMM versions. + +define <16 x float> @test_v16f32(<16 x float>* %src) { +; SSE-LABEL: test_v16f32: +; SSE: # BB#0: +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: movaps 16(%rdi), %xmm1 +; SSE-NEXT: movaps 32(%rdi), %xmm2 +; SSE-NEXT: movaps 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v16f32: +; AVX: # BB#0: +; AVX-NEXT: vmovaps (%rdi), %ymm0 +; AVX-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX-NEXT: retq +; +; AVX512-LABEL: test_v16f32: +; AVX512: # BB#0: +; AVX512-NEXT: vmovups (%rdi), %zmm0 +; AVX512-NEXT: retq + %1 = load <16 x float>, <16 x float>* %src, align 32, !nontemporal !1 + ret <16 x float> %1 +} + +define <16 x i32> @test_v16i32(<16 x i32>* %src) { +; SSE-LABEL: test_v16i32: +; SSE: # BB#0: +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: movaps 16(%rdi), %xmm1 +; SSE-NEXT: movaps 32(%rdi), %xmm2 +; SSE-NEXT: movaps 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v16i32: +; AVX: # BB#0: +; AVX-NEXT: vmovaps (%rdi), %ymm0 +; AVX-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX-NEXT: retq +; +; AVX512-LABEL: test_v16i32: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqu32 (%rdi), %zmm0 +; AVX512-NEXT: retq + %1 = load <16 x i32>, <16 x i32>* %src, align 32, !nontemporal !1 + ret <16 x i32> %1 +} + +define <8 x double> @test_v8f64(<8 x double>* %src) { +; SSE-LABEL: test_v8f64: +; SSE: # BB#0: +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: movaps 16(%rdi), %xmm1 +; SSE-NEXT: movaps 32(%rdi), %xmm2 +; SSE-NEXT: movaps 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v8f64: +; AVX: # BB#0: +; AVX-NEXT: vmovaps (%rdi), %ymm0 +; AVX-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX-NEXT: retq +; +; AVX512-LABEL: test_v8f64: +; AVX512: # BB#0: +; AVX512-NEXT: vmovupd (%rdi), %zmm0 +; AVX512-NEXT: retq + %1 = load <8 x double>, <8 x double>* %src, align 32, !nontemporal !1 + ret <8 x double> %1 +} + +define <8 x i64> @test_v8i64(<8 x i64>* %src) { +; SSE-LABEL: test_v8i64: +; SSE: # BB#0: +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: movaps 16(%rdi), %xmm1 +; SSE-NEXT: movaps 32(%rdi), %xmm2 +; SSE-NEXT: movaps 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v8i64: +; AVX: # BB#0: +; AVX-NEXT: vmovaps (%rdi), %ymm0 +; AVX-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX-NEXT: retq +; +; AVX512-LABEL: test_v8i64: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512-NEXT: retq + %1 = load <8 x i64>, <8 x i64>* %src, align 32, !nontemporal !1 + ret <8 x i64> %1 +} + +define <32 x i16> @test_v32i16(<32 x i16>* %src) { +; SSE-LABEL: test_v32i16: +; SSE: # BB#0: +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: movaps 16(%rdi), %xmm1 +; SSE-NEXT: movaps 32(%rdi), %xmm2 +; SSE-NEXT: movaps 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v32i16: +; AVX: # BB#0: +; AVX-NEXT: vmovaps (%rdi), %ymm0 +; AVX-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX-NEXT: retq +; +; AVX512F-LABEL: test_v32i16: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovaps (%rdi), %ymm0 +; AVX512F-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_v32i16: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_v32i16: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqa64 (%rdi), %ymm0 +; AVX512VL-NEXT: vmovdqa64 32(%rdi), %ymm1 +; AVX512VL-NEXT: retq + %1 = load <32 x i16>, <32 x i16>* %src, align 32, !nontemporal !1 + ret <32 x i16> %1 +} + +define <64 x i8> @test_v64i8(<64 x i8>* %src) { +; SSE-LABEL: test_v64i8: +; SSE: # BB#0: +; SSE-NEXT: movaps (%rdi), %xmm0 +; SSE-NEXT: movaps 16(%rdi), %xmm1 +; SSE-NEXT: movaps 32(%rdi), %xmm2 +; SSE-NEXT: movaps 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_v64i8: +; AVX: # BB#0: +; AVX-NEXT: vmovaps (%rdi), %ymm0 +; AVX-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX-NEXT: retq +; +; AVX512F-LABEL: test_v64i8: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovaps (%rdi), %ymm0 +; AVX512F-NEXT: vmovaps 32(%rdi), %ymm1 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_v64i8: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovdqu8 (%rdi), %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_v64i8: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqa64 (%rdi), %ymm0 +; AVX512VL-NEXT: vmovdqa64 32(%rdi), %ymm1 +; AVX512VL-NEXT: retq + %1 = load <64 x i8>, <64 x i8>* %src, align 32, !nontemporal !1 + ret <64 x i8> %1 +} + ; Check cases where the load would be folded. @@ -257,10 +523,10 @@ define <4 x float> @test_arg_v4f32(<4 x float> %arg, <4 x float>* %src) { ; AVX-NEXT: vaddps (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_arg_v4f32: -; VLX: # BB#0: -; VLX-NEXT: vaddps (%rdi), %xmm0, %xmm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_arg_v4f32: +; AVX512: # BB#0: +; AVX512-NEXT: vaddps (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: retq %1 = load <4 x float>, <4 x float>* %src, align 16, !nontemporal !1 %2 = fadd <4 x float> %arg, %1 ret <4 x float> %2 @@ -277,10 +543,10 @@ define <4 x i32> @test_arg_v4i32(<4 x i32> %arg, <4 x i32>* %src) { ; AVX-NEXT: vpaddd (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_arg_v4i32: -; VLX: # BB#0: -; VLX-NEXT: vpaddd (%rdi), %xmm0, %xmm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_arg_v4i32: +; AVX512: # BB#0: +; AVX512-NEXT: vpaddd (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: retq %1 = load <4 x i32>, <4 x i32>* %src, align 16, !nontemporal !1 %2 = add <4 x i32> %arg, %1 ret <4 x i32> %2 @@ -297,10 +563,10 @@ define <2 x double> @test_arg_v2f64(<2 x double> %arg, <2 x double>* %src) { ; AVX-NEXT: vaddpd (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_arg_v2f64: -; VLX: # BB#0: -; VLX-NEXT: vaddpd (%rdi), %xmm0, %xmm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_arg_v2f64: +; AVX512: # BB#0: +; AVX512-NEXT: vaddpd (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: retq %1 = load <2 x double>, <2 x double>* %src, align 16, !nontemporal !1 %2 = fadd <2 x double> %arg, %1 ret <2 x double> %2 @@ -317,10 +583,10 @@ define <2 x i64> @test_arg_v2i64(<2 x i64> %arg, <2 x i64>* %src) { ; AVX-NEXT: vpaddq (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_arg_v2i64: -; VLX: # BB#0: -; VLX-NEXT: vpaddq (%rdi), %xmm0, %xmm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_arg_v2i64: +; AVX512: # BB#0: +; AVX512-NEXT: vpaddq (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: retq %1 = load <2 x i64>, <2 x i64>* %src, align 16, !nontemporal !1 %2 = add <2 x i64> %arg, %1 ret <2 x i64> %2 @@ -337,10 +603,10 @@ define <8 x i16> @test_arg_v8i16(<8 x i16> %arg, <8 x i16>* %src) { ; AVX-NEXT: vpaddw (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_arg_v8i16: -; VLX: # BB#0: -; VLX-NEXT: vpaddw (%rdi), %xmm0, %xmm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_arg_v8i16: +; AVX512: # BB#0: +; AVX512-NEXT: vpaddw (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %src, align 16, !nontemporal !1 %2 = add <8 x i16> %arg, %1 ret <8 x i16> %2 @@ -357,10 +623,10 @@ define <16 x i8> @test_arg_v16i8(<16 x i8> %arg, <16 x i8>* %src) { ; AVX-NEXT: vpaddb (%rdi), %xmm0, %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_arg_v16i8: -; VLX: # BB#0: -; VLX-NEXT: vpaddb (%rdi), %xmm0, %xmm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_arg_v16i8: +; AVX512: # BB#0: +; AVX512-NEXT: vpaddb (%rdi), %xmm0, %xmm0 +; AVX512-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %src, align 16, !nontemporal !1 %2 = add <16 x i8> %arg, %1 ret <16 x i8> %2 @@ -380,10 +646,10 @@ define <8 x float> @test_arg_v8f32(<8 x float> %arg, <8 x float>* %src) { ; AVX-NEXT: vaddps (%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_arg_v8f32: -; VLX: # BB#0: -; VLX-NEXT: vaddps (%rdi), %ymm0, %ymm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_arg_v8f32: +; AVX512: # BB#0: +; AVX512-NEXT: vaddps (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: retq %1 = load <8 x float>, <8 x float>* %src, align 32, !nontemporal !1 %2 = fadd <8 x float> %arg, %1 ret <8 x float> %2 @@ -411,10 +677,10 @@ define <8 x i32> @test_arg_v8i32(<8 x i32> %arg, <8 x i32>* %src) { ; AVX2-NEXT: vpaddd (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: retq ; -; VLX-LABEL: test_arg_v8i32: -; VLX: # BB#0: -; VLX-NEXT: vpaddd (%rdi), %ymm0, %ymm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_arg_v8i32: +; AVX512: # BB#0: +; AVX512-NEXT: vpaddd (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: retq %1 = load <8 x i32>, <8 x i32>* %src, align 32, !nontemporal !1 %2 = add <8 x i32> %arg, %1 ret <8 x i32> %2 @@ -432,10 +698,10 @@ define <4 x double> @test_arg_v4f64(<4 x double> %arg, <4 x double>* %src) { ; AVX-NEXT: vaddpd (%rdi), %ymm0, %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_arg_v4f64: -; VLX: # BB#0: -; VLX-NEXT: vaddpd (%rdi), %ymm0, %ymm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_arg_v4f64: +; AVX512: # BB#0: +; AVX512-NEXT: vaddpd (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: retq %1 = load <4 x double>, <4 x double>* %src, align 32, !nontemporal !1 %2 = fadd <4 x double> %arg, %1 ret <4 x double> %2 @@ -463,10 +729,10 @@ define <4 x i64> @test_arg_v4i64(<4 x i64> %arg, <4 x i64>* %src) { ; AVX2-NEXT: vpaddq (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: retq ; -; VLX-LABEL: test_arg_v4i64: -; VLX: # BB#0: -; VLX-NEXT: vpaddq (%rdi), %ymm0, %ymm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_arg_v4i64: +; AVX512: # BB#0: +; AVX512-NEXT: vpaddq (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: retq %1 = load <4 x i64>, <4 x i64>* %src, align 32, !nontemporal !1 %2 = add <4 x i64> %arg, %1 ret <4 x i64> %2 @@ -494,10 +760,10 @@ define <16 x i16> @test_arg_v16i16(<16 x i16> %arg, <16 x i16>* %src) { ; AVX2-NEXT: vpaddw (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: retq ; -; VLX-LABEL: test_arg_v16i16: -; VLX: # BB#0: -; VLX-NEXT: vpaddw (%rdi), %ymm0, %ymm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_arg_v16i16: +; AVX512: # BB#0: +; AVX512-NEXT: vpaddw (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: retq %1 = load <16 x i16>, <16 x i16>* %src, align 32, !nontemporal !1 %2 = add <16 x i16> %arg, %1 ret <16 x i16> %2 @@ -525,15 +791,249 @@ define <32 x i8> @test_arg_v32i8(<32 x i8> %arg, <32 x i8>* %src) { ; AVX2-NEXT: vpaddb (%rdi), %ymm0, %ymm0 ; AVX2-NEXT: retq ; -; VLX-LABEL: test_arg_v32i8: -; VLX: # BB#0: -; VLX-NEXT: vpaddb (%rdi), %ymm0, %ymm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_arg_v32i8: +; AVX512: # BB#0: +; AVX512-NEXT: vpaddb (%rdi), %ymm0, %ymm0 +; AVX512-NEXT: retq %1 = load <32 x i8>, <32 x i8>* %src, align 32, !nontemporal !1 %2 = add <32 x i8> %arg, %1 ret <32 x i8> %2 } +; And now ZMM versions. + +define <16 x float> @test_arg_v16f32(<16 x float> %arg, <16 x float>* %src) { +; SSE-LABEL: test_arg_v16f32: +; SSE: # BB#0: +; SSE-NEXT: addps (%rdi), %xmm0 +; SSE-NEXT: addps 16(%rdi), %xmm1 +; SSE-NEXT: addps 32(%rdi), %xmm2 +; SSE-NEXT: addps 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_arg_v16f32: +; AVX: # BB#0: +; AVX-NEXT: vaddps (%rdi), %ymm0, %ymm0 +; AVX-NEXT: vaddps 32(%rdi), %ymm1, %ymm1 +; AVX-NEXT: retq +; +; AVX512-LABEL: test_arg_v16f32: +; AVX512: # BB#0: +; AVX512-NEXT: vaddps (%rdi), %zmm0, %zmm0 +; AVX512-NEXT: retq + %1 = load <16 x float>, <16 x float>* %src, align 32, !nontemporal !1 + %2 = fadd <16 x float> %arg, %1 + ret <16 x float> %2 +} + +define <16 x i32> @test_arg_v16i32(<16 x i32> %arg, <16 x i32>* %src) { +; SSE-LABEL: test_arg_v16i32: +; SSE: # BB#0: +; SSE-NEXT: paddd (%rdi), %xmm0 +; SSE-NEXT: paddd 16(%rdi), %xmm1 +; SSE-NEXT: paddd 32(%rdi), %xmm2 +; SSE-NEXT: paddd 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: test_arg_v16i32: +; AVX1: # BB#0: +; AVX1-NEXT: vmovaps (%rdi), %ymm2 +; AVX1-NEXT: vmovaps 32(%rdi), %ymm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 +; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 +; AVX1-NEXT: vpaddd %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddd %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_arg_v16i32: +; AVX2: # BB#0: +; AVX2-NEXT: vpaddd (%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vpaddd 32(%rdi), %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_arg_v16i32: +; AVX512: # BB#0: +; AVX512-NEXT: vpaddd (%rdi), %zmm0, %zmm0 +; AVX512-NEXT: retq + %1 = load <16 x i32>, <16 x i32>* %src, align 32, !nontemporal !1 + %2 = add <16 x i32> %arg, %1 + ret <16 x i32> %2 +} + +define <8 x double> @test_arg_v8f64(<8 x double> %arg, <8 x double>* %src) { +; SSE-LABEL: test_arg_v8f64: +; SSE: # BB#0: +; SSE-NEXT: addpd (%rdi), %xmm0 +; SSE-NEXT: addpd 16(%rdi), %xmm1 +; SSE-NEXT: addpd 32(%rdi), %xmm2 +; SSE-NEXT: addpd 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_arg_v8f64: +; AVX: # BB#0: +; AVX-NEXT: vaddpd (%rdi), %ymm0, %ymm0 +; AVX-NEXT: vaddpd 32(%rdi), %ymm1, %ymm1 +; AVX-NEXT: retq +; +; AVX512-LABEL: test_arg_v8f64: +; AVX512: # BB#0: +; AVX512-NEXT: vaddpd (%rdi), %zmm0, %zmm0 +; AVX512-NEXT: retq + %1 = load <8 x double>, <8 x double>* %src, align 32, !nontemporal !1 + %2 = fadd <8 x double> %arg, %1 + ret <8 x double> %2 +} + +define <8 x i64> @test_arg_v8i64(<8 x i64> %arg, <8 x i64>* %src) { +; SSE-LABEL: test_arg_v8i64: +; SSE: # BB#0: +; SSE-NEXT: paddq (%rdi), %xmm0 +; SSE-NEXT: paddq 16(%rdi), %xmm1 +; SSE-NEXT: paddq 32(%rdi), %xmm2 +; SSE-NEXT: paddq 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: test_arg_v8i64: +; AVX1: # BB#0: +; AVX1-NEXT: vmovaps (%rdi), %ymm2 +; AVX1-NEXT: vmovaps 32(%rdi), %ymm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 +; AVX1-NEXT: vpaddq %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpaddq %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 +; AVX1-NEXT: vpaddq %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddq %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_arg_v8i64: +; AVX2: # BB#0: +; AVX2-NEXT: vpaddq (%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vpaddq 32(%rdi), %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512-LABEL: test_arg_v8i64: +; AVX512: # BB#0: +; AVX512-NEXT: vpaddq (%rdi), %zmm0, %zmm0 +; AVX512-NEXT: retq + %1 = load <8 x i64>, <8 x i64>* %src, align 32, !nontemporal !1 + %2 = add <8 x i64> %arg, %1 + ret <8 x i64> %2 +} + +define <32 x i16> @test_arg_v32i16(<32 x i16> %arg, <32 x i16>* %src) { +; SSE-LABEL: test_arg_v32i16: +; SSE: # BB#0: +; SSE-NEXT: paddw (%rdi), %xmm0 +; SSE-NEXT: paddw 16(%rdi), %xmm1 +; SSE-NEXT: paddw 32(%rdi), %xmm2 +; SSE-NEXT: paddw 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: test_arg_v32i16: +; AVX1: # BB#0: +; AVX1-NEXT: vmovaps (%rdi), %ymm2 +; AVX1-NEXT: vmovaps 32(%rdi), %ymm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 +; AVX1-NEXT: vpaddw %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpaddw %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 +; AVX1-NEXT: vpaddw %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddw %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_arg_v32i16: +; AVX2: # BB#0: +; AVX2-NEXT: vpaddw (%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vpaddw 32(%rdi), %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: test_arg_v32i16: +; AVX512F: # BB#0: +; AVX512F-NEXT: vpaddw (%rdi), %ymm0, %ymm0 +; AVX512F-NEXT: vpaddw 32(%rdi), %ymm1, %ymm1 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_arg_v32i16: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vpaddw (%rdi), %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_arg_v32i16: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpaddw (%rdi), %ymm0, %ymm0 +; AVX512VL-NEXT: vpaddw 32(%rdi), %ymm1, %ymm1 +; AVX512VL-NEXT: retq + %1 = load <32 x i16>, <32 x i16>* %src, align 32, !nontemporal !1 + %2 = add <32 x i16> %arg, %1 + ret <32 x i16> %2 +} + +define <64 x i8> @test_arg_v64i8(<64 x i8> %arg, <64 x i8>* %src) { +; SSE-LABEL: test_arg_v64i8: +; SSE: # BB#0: +; SSE-NEXT: paddb (%rdi), %xmm0 +; SSE-NEXT: paddb 16(%rdi), %xmm1 +; SSE-NEXT: paddb 32(%rdi), %xmm2 +; SSE-NEXT: paddb 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: test_arg_v64i8: +; AVX1: # BB#0: +; AVX1-NEXT: vmovaps (%rdi), %ymm2 +; AVX1-NEXT: vmovaps 32(%rdi), %ymm3 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5 +; AVX1-NEXT: vpaddb %xmm5, %xmm4, %xmm4 +; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 +; AVX1-NEXT: vpaddb %xmm4, %xmm2, %xmm2 +; AVX1-NEXT: vpaddb %xmm3, %xmm1, %xmm1 +; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 +; AVX1-NEXT: retq +; +; AVX2-LABEL: test_arg_v64i8: +; AVX2: # BB#0: +; AVX2-NEXT: vpaddb (%rdi), %ymm0, %ymm0 +; AVX2-NEXT: vpaddb 32(%rdi), %ymm1, %ymm1 +; AVX2-NEXT: retq +; +; AVX512F-LABEL: test_arg_v64i8: +; AVX512F: # BB#0: +; AVX512F-NEXT: vpaddb (%rdi), %ymm0, %ymm0 +; AVX512F-NEXT: vpaddb 32(%rdi), %ymm1, %ymm1 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_arg_v64i8: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vpaddb (%rdi), %zmm0, %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_arg_v64i8: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpaddb (%rdi), %ymm0, %ymm0 +; AVX512VL-NEXT: vpaddb 32(%rdi), %ymm1, %ymm1 +; AVX512VL-NEXT: retq + %1 = load <64 x i8>, <64 x i8>* %src, align 32, !nontemporal !1 + %2 = add <64 x i8> %arg, %1 + ret <64 x i8> %2 +} + ; Unaligned non-temporal loads (not supported) @@ -548,10 +1048,10 @@ define <4 x float> @test_unaligned_v4f32(<4 x float>* %src) { ; AVX-NEXT: vmovups (%rdi), %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_unaligned_v4f32: -; VLX: # BB#0: -; VLX-NEXT: vmovups (%rdi), %xmm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_unaligned_v4f32: +; AVX512: # BB#0: +; AVX512-NEXT: vmovups (%rdi), %xmm0 +; AVX512-NEXT: retq %1 = load <4 x float>, <4 x float>* %src, align 1, !nontemporal !1 ret <4 x float> %1 } @@ -567,10 +1067,20 @@ define <4 x i32> @test_unaligned_v4i32(<4 x i32>* %src) { ; AVX-NEXT: vmovups (%rdi), %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_unaligned_v4i32: -; VLX: # BB#0: -; VLX-NEXT: vmovdqu32 (%rdi), %xmm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_unaligned_v4i32: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovups (%rdi), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_unaligned_v4i32: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovups (%rdi), %xmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_unaligned_v4i32: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqu32 (%rdi), %xmm0 +; AVX512VL-NEXT: retq %1 = load <4 x i32>, <4 x i32>* %src, align 1, !nontemporal !1 ret <4 x i32> %1 } @@ -586,10 +1096,20 @@ define <2 x double> @test_unaligned_v2f64(<2 x double>* %src) { ; AVX-NEXT: vmovups (%rdi), %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_unaligned_v2f64: -; VLX: # BB#0: -; VLX-NEXT: vmovupd (%rdi), %xmm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_unaligned_v2f64: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovups (%rdi), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_unaligned_v2f64: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovups (%rdi), %xmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_unaligned_v2f64: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovupd (%rdi), %xmm0 +; AVX512VL-NEXT: retq %1 = load <2 x double>, <2 x double>* %src, align 1, !nontemporal !1 ret <2 x double> %1 } @@ -605,10 +1125,20 @@ define <2 x i64> @test_unaligned_v2i64(<2 x i64>* %src) { ; AVX-NEXT: vmovups (%rdi), %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_unaligned_v2i64: -; VLX: # BB#0: -; VLX-NEXT: vmovdqu64 (%rdi), %xmm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_unaligned_v2i64: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovups (%rdi), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_unaligned_v2i64: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovups (%rdi), %xmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_unaligned_v2i64: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqu64 (%rdi), %xmm0 +; AVX512VL-NEXT: retq %1 = load <2 x i64>, <2 x i64>* %src, align 1, !nontemporal !1 ret <2 x i64> %1 } @@ -624,10 +1154,20 @@ define <8 x i16> @test_unaligned_v8i16(<8 x i16>* %src) { ; AVX-NEXT: vmovups (%rdi), %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_unaligned_v8i16: -; VLX: # BB#0: -; VLX-NEXT: vmovdqu64 (%rdi), %xmm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_unaligned_v8i16: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovups (%rdi), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_unaligned_v8i16: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovups (%rdi), %xmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_unaligned_v8i16: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqu64 (%rdi), %xmm0 +; AVX512VL-NEXT: retq %1 = load <8 x i16>, <8 x i16>* %src, align 1, !nontemporal !1 ret <8 x i16> %1 } @@ -643,10 +1183,20 @@ define <16 x i8> @test_unaligned_v16i8(<16 x i8>* %src) { ; AVX-NEXT: vmovups (%rdi), %xmm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_unaligned_v16i8: -; VLX: # BB#0: -; VLX-NEXT: vmovdqu64 (%rdi), %xmm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_unaligned_v16i8: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovups (%rdi), %xmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_unaligned_v16i8: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovups (%rdi), %xmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_unaligned_v16i8: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqu64 (%rdi), %xmm0 +; AVX512VL-NEXT: retq %1 = load <16 x i8>, <16 x i8>* %src, align 1, !nontemporal !1 ret <16 x i8> %1 } @@ -665,10 +1215,10 @@ define <8 x float> @test_unaligned_v8f32(<8 x float>* %src) { ; AVX-NEXT: vmovups (%rdi), %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_unaligned_v8f32: -; VLX: # BB#0: -; VLX-NEXT: vmovups (%rdi), %ymm0 -; VLX-NEXT: retq +; AVX512-LABEL: test_unaligned_v8f32: +; AVX512: # BB#0: +; AVX512-NEXT: vmovups (%rdi), %ymm0 +; AVX512-NEXT: retq %1 = load <8 x float>, <8 x float>* %src, align 1, !nontemporal !1 ret <8 x float> %1 } @@ -685,10 +1235,20 @@ define <8 x i32> @test_unaligned_v8i32(<8 x i32>* %src) { ; AVX-NEXT: vmovups (%rdi), %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_unaligned_v8i32: -; VLX: # BB#0: -; VLX-NEXT: vmovdqu32 (%rdi), %ymm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_unaligned_v8i32: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovups (%rdi), %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_unaligned_v8i32: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovups (%rdi), %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_unaligned_v8i32: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqu32 (%rdi), %ymm0 +; AVX512VL-NEXT: retq %1 = load <8 x i32>, <8 x i32>* %src, align 1, !nontemporal !1 ret <8 x i32> %1 } @@ -705,10 +1265,20 @@ define <4 x double> @test_unaligned_v4f64(<4 x double>* %src) { ; AVX-NEXT: vmovups (%rdi), %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_unaligned_v4f64: -; VLX: # BB#0: -; VLX-NEXT: vmovupd (%rdi), %ymm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_unaligned_v4f64: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovups (%rdi), %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_unaligned_v4f64: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovups (%rdi), %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_unaligned_v4f64: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovupd (%rdi), %ymm0 +; AVX512VL-NEXT: retq %1 = load <4 x double>, <4 x double>* %src, align 1, !nontemporal !1 ret <4 x double> %1 } @@ -725,10 +1295,20 @@ define <4 x i64> @test_unaligned_v4i64(<4 x i64>* %src) { ; AVX-NEXT: vmovups (%rdi), %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_unaligned_v4i64: -; VLX: # BB#0: -; VLX-NEXT: vmovdqu64 (%rdi), %ymm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_unaligned_v4i64: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovups (%rdi), %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_unaligned_v4i64: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovups (%rdi), %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_unaligned_v4i64: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqu64 (%rdi), %ymm0 +; AVX512VL-NEXT: retq %1 = load <4 x i64>, <4 x i64>* %src, align 1, !nontemporal !1 ret <4 x i64> %1 } @@ -745,10 +1325,20 @@ define <16 x i16> @test_unaligned_v16i16(<16 x i16>* %src) { ; AVX-NEXT: vmovups (%rdi), %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_unaligned_v16i16: -; VLX: # BB#0: -; VLX-NEXT: vmovdqu64 (%rdi), %ymm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_unaligned_v16i16: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovups (%rdi), %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_unaligned_v16i16: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovups (%rdi), %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_unaligned_v16i16: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqu64 (%rdi), %ymm0 +; AVX512VL-NEXT: retq %1 = load <16 x i16>, <16 x i16>* %src, align 1, !nontemporal !1 ret <16 x i16> %1 } @@ -765,12 +1355,186 @@ define <32 x i8> @test_unaligned_v32i8(<32 x i8>* %src) { ; AVX-NEXT: vmovups (%rdi), %ymm0 ; AVX-NEXT: retq ; -; VLX-LABEL: test_unaligned_v32i8: -; VLX: # BB#0: -; VLX-NEXT: vmovdqu64 (%rdi), %ymm0 -; VLX-NEXT: retq +; AVX512F-LABEL: test_unaligned_v32i8: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovups (%rdi), %ymm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_unaligned_v32i8: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovups (%rdi), %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_unaligned_v32i8: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqu64 (%rdi), %ymm0 +; AVX512VL-NEXT: retq %1 = load <32 x i8>, <32 x i8>* %src, align 1, !nontemporal !1 ret <32 x i8> %1 } +; And now ZMM versions. + +define <16 x float> @test_unaligned_v16f32(<16 x float>* %src) { +; SSE-LABEL: test_unaligned_v16f32: +; SSE: # BB#0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: movups 32(%rdi), %xmm2 +; SSE-NEXT: movups 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_unaligned_v16f32: +; AVX: # BB#0: +; AVX-NEXT: vmovups (%rdi), %ymm0 +; AVX-NEXT: vmovups 32(%rdi), %ymm1 +; AVX-NEXT: retq +; +; AVX512-LABEL: test_unaligned_v16f32: +; AVX512: # BB#0: +; AVX512-NEXT: vmovups (%rdi), %zmm0 +; AVX512-NEXT: retq + %1 = load <16 x float>, <16 x float>* %src, align 1, !nontemporal !1 + ret <16 x float> %1 +} + +define <16 x i32> @test_unaligned_v16i32(<16 x i32>* %src) { +; SSE-LABEL: test_unaligned_v16i32: +; SSE: # BB#0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: movups 32(%rdi), %xmm2 +; SSE-NEXT: movups 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_unaligned_v16i32: +; AVX: # BB#0: +; AVX-NEXT: vmovups (%rdi), %ymm0 +; AVX-NEXT: vmovups 32(%rdi), %ymm1 +; AVX-NEXT: retq +; +; AVX512-LABEL: test_unaligned_v16i32: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqu32 (%rdi), %zmm0 +; AVX512-NEXT: retq + %1 = load <16 x i32>, <16 x i32>* %src, align 1, !nontemporal !1 + ret <16 x i32> %1 +} + +define <8 x double> @test_unaligned_v8f64(<8 x double>* %src) { +; SSE-LABEL: test_unaligned_v8f64: +; SSE: # BB#0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: movups 32(%rdi), %xmm2 +; SSE-NEXT: movups 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_unaligned_v8f64: +; AVX: # BB#0: +; AVX-NEXT: vmovups (%rdi), %ymm0 +; AVX-NEXT: vmovups 32(%rdi), %ymm1 +; AVX-NEXT: retq +; +; AVX512-LABEL: test_unaligned_v8f64: +; AVX512: # BB#0: +; AVX512-NEXT: vmovupd (%rdi), %zmm0 +; AVX512-NEXT: retq + %1 = load <8 x double>, <8 x double>* %src, align 1, !nontemporal !1 + ret <8 x double> %1 +} + +define <8 x i64> @test_unaligned_v8i64(<8 x i64>* %src) { +; SSE-LABEL: test_unaligned_v8i64: +; SSE: # BB#0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: movups 32(%rdi), %xmm2 +; SSE-NEXT: movups 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_unaligned_v8i64: +; AVX: # BB#0: +; AVX-NEXT: vmovups (%rdi), %ymm0 +; AVX-NEXT: vmovups 32(%rdi), %ymm1 +; AVX-NEXT: retq +; +; AVX512-LABEL: test_unaligned_v8i64: +; AVX512: # BB#0: +; AVX512-NEXT: vmovdqu64 (%rdi), %zmm0 +; AVX512-NEXT: retq + %1 = load <8 x i64>, <8 x i64>* %src, align 1, !nontemporal !1 + ret <8 x i64> %1 +} + +define <32 x i16> @test_unaligned_v32i16(<32 x i16>* %src) { +; SSE-LABEL: test_unaligned_v32i16: +; SSE: # BB#0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: movups 32(%rdi), %xmm2 +; SSE-NEXT: movups 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_unaligned_v32i16: +; AVX: # BB#0: +; AVX-NEXT: vmovups (%rdi), %ymm0 +; AVX-NEXT: vmovups 32(%rdi), %ymm1 +; AVX-NEXT: retq +; +; AVX512F-LABEL: test_unaligned_v32i16: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovups (%rdi), %ymm0 +; AVX512F-NEXT: vmovups 32(%rdi), %ymm1 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_unaligned_v32i16: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovdqu16 (%rdi), %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_unaligned_v32i16: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqu64 (%rdi), %ymm0 +; AVX512VL-NEXT: vmovdqu64 32(%rdi), %ymm1 +; AVX512VL-NEXT: retq + %1 = load <32 x i16>, <32 x i16>* %src, align 1, !nontemporal !1 + ret <32 x i16> %1 +} + +define <64 x i8> @test_unaligned_v64i8(<64 x i8>* %src) { +; SSE-LABEL: test_unaligned_v64i8: +; SSE: # BB#0: +; SSE-NEXT: movups (%rdi), %xmm0 +; SSE-NEXT: movups 16(%rdi), %xmm1 +; SSE-NEXT: movups 32(%rdi), %xmm2 +; SSE-NEXT: movups 48(%rdi), %xmm3 +; SSE-NEXT: retq +; +; AVX-LABEL: test_unaligned_v64i8: +; AVX: # BB#0: +; AVX-NEXT: vmovups (%rdi), %ymm0 +; AVX-NEXT: vmovups 32(%rdi), %ymm1 +; AVX-NEXT: retq +; +; AVX512F-LABEL: test_unaligned_v64i8: +; AVX512F: # BB#0: +; AVX512F-NEXT: vmovups (%rdi), %ymm0 +; AVX512F-NEXT: vmovups 32(%rdi), %ymm1 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: test_unaligned_v64i8: +; AVX512BW: # BB#0: +; AVX512BW-NEXT: vmovdqu8 (%rdi), %zmm0 +; AVX512BW-NEXT: retq +; +; AVX512VL-LABEL: test_unaligned_v64i8: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vmovdqu64 (%rdi), %ymm0 +; AVX512VL-NEXT: vmovdqu64 32(%rdi), %ymm1 +; AVX512VL-NEXT: retq + %1 = load <64 x i8>, <64 x i8>* %src, align 1, !nontemporal !1 + ret <64 x i8> %1 +} + !1 = !{i32 1} |