diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2016-06-10 14:48:50 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2016-06-10 14:48:50 +0000 |
| commit | 330a359fb308b3671f954f595f0927ebbeba579a (patch) | |
| tree | 08b03c0c5c227b67c4829d71171fa52686b91022 /llvm/test/CodeGen/X86 | |
| parent | 12b9c5ba989fe637427429095f57c4028c380391 (diff) | |
| download | bcm5719-llvm-330a359fb308b3671f954f595f0927ebbeba579a.tar.gz bcm5719-llvm-330a359fb308b3671f954f595f0927ebbeba579a.zip | |
[x86] regenerate checks
llvm-svn: 272396
Diffstat (limited to 'llvm/test/CodeGen/X86')
| -rw-r--r-- | llvm/test/CodeGen/X86/commute-fcmp.ll | 493 |
1 files changed, 309 insertions, 184 deletions
diff --git a/llvm/test/CodeGen/X86/commute-fcmp.ll b/llvm/test/CodeGen/X86/commute-fcmp.ll index 6f43ebe1fcd..b2fa970ab9a 100644 --- a/llvm/test/CodeGen/X86/commute-fcmp.ll +++ b/llvm/test/CodeGen/X86/commute-fcmp.ll @@ -1,3 +1,4 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE ; RUN: llc -O3 -mtriple=x86_64-unknown -mcpu=x86-64 -mattr=+avx2 < %s | FileCheck %s --check-prefix=AVX @@ -6,164 +7,226 @@ ; Only equal/not-equal/ordered/unordered can be safely commuted ; -define <4 x i32> @commute_cmpps_eq(<4 x float>* %a0, <4 x float> %a1) #0 { - ;SSE-LABEL: commute_cmpps_eq - ;SSE: cmpeqps (%rdi), %xmm0 - ;SSE-NEXT: retq - - ;AVX-LABEL: commute_cmpps_eq - ;AVX: vcmpeqps (%rdi), %xmm0, %xmm0 - ;AVX-NEXT: retq - +define <4 x i32> @commute_cmpps_eq(<4 x float>* %a0, <4 x float> %a1) { +; SSE-LABEL: commute_cmpps_eq: +; SSE: # BB#0: +; SSE-NEXT: cmpeqps (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmpps_eq: +; AVX: # BB#0: +; AVX-NEXT: vcmpeqps (%rdi), %xmm0, %xmm0 +; AVX-NEXT: retq +; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp oeq <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> ret <4 x i32> %3 } -define <4 x i32> @commute_cmpps_ne(<4 x float>* %a0, <4 x float> %a1) #0 { - ;SSE-LABEL: commute_cmpps_ne - ;SSE: cmpneqps (%rdi), %xmm0 - ;SSE-NEXT: retq - - ;AVX-LABEL: commute_cmpps_ne - ;AVX: vcmpneqps (%rdi), %xmm0, %xmm0 - ;AVX-NEXT: retq - +define <4 x i32> @commute_cmpps_ne(<4 x float>* %a0, <4 x float> %a1) { +; SSE-LABEL: commute_cmpps_ne: +; SSE: # BB#0: +; SSE-NEXT: cmpneqps (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmpps_ne: +; AVX: # BB#0: +; AVX-NEXT: vcmpneqps (%rdi), %xmm0, %xmm0 +; AVX-NEXT: retq +; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp une <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> ret <4 x i32> %3 } -define <4 x i32> @commute_cmpps_ord(<4 x float>* %a0, <4 x float> %a1) #0 { - ;SSE-LABEL: commute_cmpps_ord - ;SSE: cmpordps (%rdi), %xmm0 - ;SSE-NEXT: retq - - ;AVX-LABEL: commute_cmpps_ord - ;AVX: vcmpordps (%rdi), %xmm0, %xmm0 - ;AVX-NEXT: retq - +define <4 x i32> @commute_cmpps_ord(<4 x float>* %a0, <4 x float> %a1) { +; SSE-LABEL: commute_cmpps_ord: +; SSE: # BB#0: +; SSE-NEXT: cmpordps (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmpps_ord: +; AVX: # BB#0: +; AVX-NEXT: vcmpordps (%rdi), %xmm0, %xmm0 +; AVX-NEXT: retq +; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp ord <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> ret <4 x i32> %3 } -define <4 x i32> @commute_cmpps_uno(<4 x float>* %a0, <4 x float> %a1) #0 { - ;SSE-LABEL: commute_cmpps_uno - ;SSE: cmpunordps (%rdi), %xmm0 - ;SSE-NEXT: retq - - ;AVX-LABEL: commute_cmpps_uno - ;AVX: vcmpunordps (%rdi), %xmm0, %xmm0 - ;AVX-NEXT: retq - +define <4 x i32> @commute_cmpps_uno(<4 x float>* %a0, <4 x float> %a1) { +; SSE-LABEL: commute_cmpps_uno: +; SSE: # BB#0: +; SSE-NEXT: cmpunordps (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmpps_uno: +; AVX: # BB#0: +; AVX-NEXT: vcmpunordps (%rdi), %xmm0, %xmm0 +; AVX-NEXT: retq +; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp uno <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> ret <4 x i32> %3 } -define <4 x i32> @commute_cmpps_lt(<4 x float>* %a0, <4 x float> %a1) #0 { - ;SSE-LABEL: commute_cmpps_lt - ;SSE: movaps (%rdi), %xmm1 - ;SSE-NEXT: cmpltps %xmm0, %xmm1 - ;SSE-NEXT: movaps %xmm1, %xmm0 - ;SSE-NEXT: retq - - ;AVX-LABEL: commute_cmpps_lt - ;AVX: vmovaps (%rdi), %xmm1 - ;AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0 - ;AVX-NEXT: retq - +define <4 x i32> @commute_cmpps_lt(<4 x float>* %a0, <4 x float> %a1) { +; SSE-LABEL: commute_cmpps_lt: +; SSE: # BB#0: +; SSE-NEXT: movaps (%rdi), %xmm1 +; SSE-NEXT: cmpltps %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmpps_lt: +; AVX: # BB#0: +; AVX-NEXT: vmovaps (%rdi), %xmm1 +; AVX-NEXT: vcmpltps %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq +; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp olt <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> ret <4 x i32> %3 } -define <4 x i32> @commute_cmpps_le(<4 x float>* %a0, <4 x float> %a1) #0 { - ;SSE-LABEL: commute_cmpps_le - ;SSE: movaps (%rdi), %xmm1 - ;SSE-NEXT: cmpleps %xmm0, %xmm1 - ;SSE-NEXT: movaps %xmm1, %xmm0 - ;SSE-NEXT: retq - - ;AVX-LABEL: commute_cmpps_le - ;AVX: vmovaps (%rdi), %xmm1 - ;AVX-NEXT: vcmpleps %xmm0, %xmm1, %xmm0 - ;AVX-NEXT: retq - +define <4 x i32> @commute_cmpps_le(<4 x float>* %a0, <4 x float> %a1) { +; SSE-LABEL: commute_cmpps_le: +; SSE: # BB#0: +; SSE-NEXT: movaps (%rdi), %xmm1 +; SSE-NEXT: cmpleps %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmpps_le: +; AVX: # BB#0: +; AVX-NEXT: vmovaps (%rdi), %xmm1 +; AVX-NEXT: vcmpleps %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq +; %1 = load <4 x float>, <4 x float>* %a0 %2 = fcmp ole <4 x float> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i32> ret <4 x i32> %3 } -define <8 x i32> @commute_cmpps_eq_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { - ;AVX-LABEL: commute_cmpps_eq_ymm - ;AVX: vcmpeqps (%rdi), %ymm0, %ymm0 - ;AVX-NEXT: retq - +define <8 x i32> @commute_cmpps_eq_ymm(<8 x float>* %a0, <8 x float> %a1) { +; SSE-LABEL: commute_cmpps_eq_ymm: +; SSE: # BB#0: +; SSE-NEXT: cmpeqps (%rdi), %xmm0 +; SSE-NEXT: cmpeqps 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmpps_eq_ymm: +; AVX: # BB#0: +; AVX-NEXT: vcmpeqps (%rdi), %ymm0, %ymm0 +; AVX-NEXT: retq +; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp oeq <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> ret <8 x i32> %3 } -define <8 x i32> @commute_cmpps_ne_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { - ;AVX-LABEL: commute_cmpps_ne_ymm - ;AVX: vcmpneqps (%rdi), %ymm0, %ymm0 - ;AVX-NEXT: retq - +define <8 x i32> @commute_cmpps_ne_ymm(<8 x float>* %a0, <8 x float> %a1) { +; SSE-LABEL: commute_cmpps_ne_ymm: +; SSE: # BB#0: +; SSE-NEXT: cmpneqps (%rdi), %xmm0 +; SSE-NEXT: cmpneqps 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmpps_ne_ymm: +; AVX: # BB#0: +; AVX-NEXT: vcmpneqps (%rdi), %ymm0, %ymm0 +; AVX-NEXT: retq +; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp une <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> ret <8 x i32> %3 } -define <8 x i32> @commute_cmpps_ord_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { - ;AVX-LABEL: commute_cmpps_ord_ymm - ;AVX: vcmpordps (%rdi), %ymm0, %ymm0 - ;AVX-NEXT: retq - +define <8 x i32> @commute_cmpps_ord_ymm(<8 x float>* %a0, <8 x float> %a1) { +; SSE-LABEL: commute_cmpps_ord_ymm: +; SSE: # BB#0: +; SSE-NEXT: cmpordps (%rdi), %xmm0 +; SSE-NEXT: cmpordps 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmpps_ord_ymm: +; AVX: # BB#0: +; AVX-NEXT: vcmpordps (%rdi), %ymm0, %ymm0 +; AVX-NEXT: retq +; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp ord <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> ret <8 x i32> %3 } -define <8 x i32> @commute_cmpps_uno_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { - ;AVX-LABEL: commute_cmpps_uno_ymm - ;AVX: vcmpunordps (%rdi), %ymm0, %ymm0 - ;AVX-NEXT: retq - +define <8 x i32> @commute_cmpps_uno_ymm(<8 x float>* %a0, <8 x float> %a1) { +; SSE-LABEL: commute_cmpps_uno_ymm: +; SSE: # BB#0: +; SSE-NEXT: cmpunordps (%rdi), %xmm0 +; SSE-NEXT: cmpunordps 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmpps_uno_ymm: +; AVX: # BB#0: +; AVX-NEXT: vcmpunordps (%rdi), %ymm0, %ymm0 +; AVX-NEXT: retq +; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp uno <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> ret <8 x i32> %3 } -define <8 x i32> @commute_cmpps_lt_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { - ;AVX-LABEL: commute_cmpps_lt_ymm - ;AVX: vmovaps (%rdi), %ymm1 - ;AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0 - ;AVX-NEXT: retq - +define <8 x i32> @commute_cmpps_lt_ymm(<8 x float>* %a0, <8 x float> %a1) { +; SSE-LABEL: commute_cmpps_lt_ymm: +; SSE: # BB#0: +; SSE-NEXT: movaps (%rdi), %xmm2 +; SSE-NEXT: movaps 16(%rdi), %xmm3 +; SSE-NEXT: cmpltps %xmm0, %xmm2 +; SSE-NEXT: cmpltps %xmm1, %xmm3 +; SSE-NEXT: movaps %xmm2, %xmm0 +; SSE-NEXT: movaps %xmm3, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmpps_lt_ymm: +; AVX: # BB#0: +; AVX-NEXT: vmovaps (%rdi), %ymm1 +; AVX-NEXT: vcmpltps %ymm0, %ymm1, %ymm0 +; AVX-NEXT: retq +; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp olt <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> ret <8 x i32> %3 } -define <8 x i32> @commute_cmpps_le_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { - ;AVX-LABEL: commute_cmpps_le_ymm - ;AVX: vmovaps (%rdi), %ymm1 - ;AVX-NEXT: vcmpleps %ymm0, %ymm1, %ymm0 - ;AVX-NEXT: retq - +define <8 x i32> @commute_cmpps_le_ymm(<8 x float>* %a0, <8 x float> %a1) { +; SSE-LABEL: commute_cmpps_le_ymm: +; SSE: # BB#0: +; SSE-NEXT: movaps (%rdi), %xmm2 +; SSE-NEXT: movaps 16(%rdi), %xmm3 +; SSE-NEXT: cmpleps %xmm0, %xmm2 +; SSE-NEXT: cmpleps %xmm1, %xmm3 +; SSE-NEXT: movaps %xmm2, %xmm0 +; SSE-NEXT: movaps %xmm3, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmpps_le_ymm: +; AVX: # BB#0: +; AVX-NEXT: vmovaps (%rdi), %ymm1 +; AVX-NEXT: vcmpleps %ymm0, %ymm1, %ymm0 +; AVX-NEXT: retq +; %1 = load <8 x float>, <8 x float>* %a0 %2 = fcmp ole <8 x float> %1, %a1 %3 = sext <8 x i1> %2 to <8 x i32> @@ -175,164 +238,226 @@ define <8 x i32> @commute_cmpps_le_ymm(<8 x float>* %a0, <8 x float> %a1) #0 { ; Only equal/not-equal/ordered/unordered can be safely commuted ; -define <2 x i64> @commute_cmppd_eq(<2 x double>* %a0, <2 x double> %a1) #0 { - ;SSE-LABEL: commute_cmppd_eq - ;SSE: cmpeqpd (%rdi), %xmm0 - ;SSE-NEXT: retq - - ;AVX-LABEL: commute_cmppd_eq - ;AVX: vcmpeqpd (%rdi), %xmm0, %xmm0 - ;AVX-NEXT: retq - +define <2 x i64> @commute_cmppd_eq(<2 x double>* %a0, <2 x double> %a1) { +; SSE-LABEL: commute_cmppd_eq: +; SSE: # BB#0: +; SSE-NEXT: cmpeqpd (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmppd_eq: +; AVX: # BB#0: +; AVX-NEXT: vcmpeqpd (%rdi), %xmm0, %xmm0 +; AVX-NEXT: retq +; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp oeq <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> ret <2 x i64> %3 } -define <2 x i64> @commute_cmppd_ne(<2 x double>* %a0, <2 x double> %a1) #0 { - ;SSE-LABEL: commute_cmppd_ne - ;SSE: cmpneqpd (%rdi), %xmm0 - ;SSE-NEXT: retq - - ;AVX-LABEL: commute_cmppd_ne - ;AVX: vcmpneqpd (%rdi), %xmm0, %xmm0 - ;AVX-NEXT: retq - +define <2 x i64> @commute_cmppd_ne(<2 x double>* %a0, <2 x double> %a1) { +; SSE-LABEL: commute_cmppd_ne: +; SSE: # BB#0: +; SSE-NEXT: cmpneqpd (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmppd_ne: +; AVX: # BB#0: +; AVX-NEXT: vcmpneqpd (%rdi), %xmm0, %xmm0 +; AVX-NEXT: retq +; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp une <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> ret <2 x i64> %3 } -define <2 x i64> @commute_cmppd_ord(<2 x double>* %a0, <2 x double> %a1) #0 { - ;SSE-LABEL: commute_cmppd_ord - ;SSE: cmpordpd (%rdi), %xmm0 - ;SSE-NEXT: retq - - ;AVX-LABEL: commute_cmppd_ord - ;AVX: vcmpordpd (%rdi), %xmm0, %xmm0 - ;AVX-NEXT: retq - +define <2 x i64> @commute_cmppd_ord(<2 x double>* %a0, <2 x double> %a1) { +; SSE-LABEL: commute_cmppd_ord: +; SSE: # BB#0: +; SSE-NEXT: cmpordpd (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmppd_ord: +; AVX: # BB#0: +; AVX-NEXT: vcmpordpd (%rdi), %xmm0, %xmm0 +; AVX-NEXT: retq +; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp ord <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> ret <2 x i64> %3 } -define <2 x i64> @commute_cmppd_uno(<2 x double>* %a0, <2 x double> %a1) #0 { - ;SSE-LABEL: commute_cmppd_uno - ;SSE: cmpunordpd (%rdi), %xmm0 - ;SSE-NEXT: retq - - ;AVX-LABEL: commute_cmppd_uno - ;AVX: vcmpunordpd (%rdi), %xmm0, %xmm0 - ;AVX-NEXT: retq - +define <2 x i64> @commute_cmppd_uno(<2 x double>* %a0, <2 x double> %a1) { +; SSE-LABEL: commute_cmppd_uno: +; SSE: # BB#0: +; SSE-NEXT: cmpunordpd (%rdi), %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmppd_uno: +; AVX: # BB#0: +; AVX-NEXT: vcmpunordpd (%rdi), %xmm0, %xmm0 +; AVX-NEXT: retq +; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp uno <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> ret <2 x i64> %3 } -define <2 x i64> @commute_cmppd_lt(<2 x double>* %a0, <2 x double> %a1) #0 { - ;SSE-LABEL: commute_cmppd_lt - ;SSE: movapd (%rdi), %xmm1 - ;SSE-NEXT: cmpltpd %xmm0, %xmm1 - ;SSE-NEXT: movapd %xmm1, %xmm0 - ;SSE-NEXT: retq - - ;AVX-LABEL: commute_cmppd_lt - ;AVX: vmovapd (%rdi), %xmm1 - ;AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 - ;AVX-NEXT: retq - +define <2 x i64> @commute_cmppd_lt(<2 x double>* %a0, <2 x double> %a1) { +; SSE-LABEL: commute_cmppd_lt: +; SSE: # BB#0: +; SSE-NEXT: movapd (%rdi), %xmm1 +; SSE-NEXT: cmpltpd %xmm0, %xmm1 +; SSE-NEXT: movapd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmppd_lt: +; AVX: # BB#0: +; AVX-NEXT: vmovapd (%rdi), %xmm1 +; AVX-NEXT: vcmpltpd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq +; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp olt <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> ret <2 x i64> %3 } -define <2 x i64> @commute_cmppd_le(<2 x double>* %a0, <2 x double> %a1) #0 { - ;SSE-LABEL: commute_cmppd_le - ;SSE: movapd (%rdi), %xmm1 - ;SSE-NEXT: cmplepd %xmm0, %xmm1 - ;SSE-NEXT: movapd %xmm1, %xmm0 - ;SSE-NEXT: retq - - ;AVX-LABEL: commute_cmppd_le - ;AVX: vmovapd (%rdi), %xmm1 - ;AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm0 - ;AVX-NEXT: retq - +define <2 x i64> @commute_cmppd_le(<2 x double>* %a0, <2 x double> %a1) { +; SSE-LABEL: commute_cmppd_le: +; SSE: # BB#0: +; SSE-NEXT: movapd (%rdi), %xmm1 +; SSE-NEXT: cmplepd %xmm0, %xmm1 +; SSE-NEXT: movapd %xmm1, %xmm0 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmppd_le: +; AVX: # BB#0: +; AVX-NEXT: vmovapd (%rdi), %xmm1 +; AVX-NEXT: vcmplepd %xmm0, %xmm1, %xmm0 +; AVX-NEXT: retq +; %1 = load <2 x double>, <2 x double>* %a0 %2 = fcmp ole <2 x double> %1, %a1 %3 = sext <2 x i1> %2 to <2 x i64> ret <2 x i64> %3 } -define <4 x i64> @commute_cmppd_eq_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 { - ;AVX-LABEL: commute_cmppd_eq - ;AVX: vcmpeqpd (%rdi), %ymm0, %ymm0 - ;AVX-NEXT: retq - +define <4 x i64> @commute_cmppd_eq_ymmm(<4 x double>* %a0, <4 x double> %a1) { +; SSE-LABEL: commute_cmppd_eq_ymmm: +; SSE: # BB#0: +; SSE-NEXT: cmpeqpd (%rdi), %xmm0 +; SSE-NEXT: cmpeqpd 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmppd_eq_ymmm: +; AVX: # BB#0: +; AVX-NEXT: vcmpeqpd (%rdi), %ymm0, %ymm0 +; AVX-NEXT: retq +; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp oeq <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> ret <4 x i64> %3 } -define <4 x i64> @commute_cmppd_ne_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 { - ;AVX-LABEL: commute_cmppd_ne - ;AVX: vcmpneqpd (%rdi), %ymm0, %ymm0 - ;AVX-NEXT: retq - +define <4 x i64> @commute_cmppd_ne_ymmm(<4 x double>* %a0, <4 x double> %a1) { +; SSE-LABEL: commute_cmppd_ne_ymmm: +; SSE: # BB#0: +; SSE-NEXT: cmpneqpd (%rdi), %xmm0 +; SSE-NEXT: cmpneqpd 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmppd_ne_ymmm: +; AVX: # BB#0: +; AVX-NEXT: vcmpneqpd (%rdi), %ymm0, %ymm0 +; AVX-NEXT: retq +; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp une <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> ret <4 x i64> %3 } -define <4 x i64> @commute_cmppd_ord_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 { - ;AVX-LABEL: commute_cmppd_ord - ;AVX: vcmpordpd (%rdi), %ymm0, %ymm0 - ;AVX-NEXT: retq - +define <4 x i64> @commute_cmppd_ord_ymmm(<4 x double>* %a0, <4 x double> %a1) { +; SSE-LABEL: commute_cmppd_ord_ymmm: +; SSE: # BB#0: +; SSE-NEXT: cmpordpd (%rdi), %xmm0 +; SSE-NEXT: cmpordpd 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmppd_ord_ymmm: +; AVX: # BB#0: +; AVX-NEXT: vcmpordpd (%rdi), %ymm0, %ymm0 +; AVX-NEXT: retq +; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp ord <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> ret <4 x i64> %3 } -define <4 x i64> @commute_cmppd_uno_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 { - ;AVX-LABEL: commute_cmppd_uno - ;AVX: vcmpunordpd (%rdi), %ymm0, %ymm0 - ;AVX-NEXT: retq - +define <4 x i64> @commute_cmppd_uno_ymmm(<4 x double>* %a0, <4 x double> %a1) { +; SSE-LABEL: commute_cmppd_uno_ymmm: +; SSE: # BB#0: +; SSE-NEXT: cmpunordpd (%rdi), %xmm0 +; SSE-NEXT: cmpunordpd 16(%rdi), %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmppd_uno_ymmm: +; AVX: # BB#0: +; AVX-NEXT: vcmpunordpd (%rdi), %ymm0, %ymm0 +; AVX-NEXT: retq +; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp uno <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> ret <4 x i64> %3 } -define <4 x i64> @commute_cmppd_lt_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 { - ;AVX-LABEL: commute_cmppd_lt - ;AVX: vmovapd (%rdi), %ymm1 - ;AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0 - ;AVX-NEXT: retq - +define <4 x i64> @commute_cmppd_lt_ymmm(<4 x double>* %a0, <4 x double> %a1) { +; SSE-LABEL: commute_cmppd_lt_ymmm: +; SSE: # BB#0: +; SSE-NEXT: movapd (%rdi), %xmm2 +; SSE-NEXT: movapd 16(%rdi), %xmm3 +; SSE-NEXT: cmpltpd %xmm0, %xmm2 +; SSE-NEXT: cmpltpd %xmm1, %xmm3 +; SSE-NEXT: movapd %xmm2, %xmm0 +; SSE-NEXT: movapd %xmm3, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmppd_lt_ymmm: +; AVX: # BB#0: +; AVX-NEXT: vmovapd (%rdi), %ymm1 +; AVX-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0 +; AVX-NEXT: retq +; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp olt <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> ret <4 x i64> %3 } -define <4 x i64> @commute_cmppd_le_ymmm(<4 x double>* %a0, <4 x double> %a1) #0 { - ;AVX-LABEL: commute_cmppd_le - ;AVX: vmovapd (%rdi), %ymm1 - ;AVX-NEXT: vcmplepd %ymm0, %ymm1, %ymm0 - ;AVX-NEXT: retq - +define <4 x i64> @commute_cmppd_le_ymmm(<4 x double>* %a0, <4 x double> %a1) { +; SSE-LABEL: commute_cmppd_le_ymmm: +; SSE: # BB#0: +; SSE-NEXT: movapd (%rdi), %xmm2 +; SSE-NEXT: movapd 16(%rdi), %xmm3 +; SSE-NEXT: cmplepd %xmm0, %xmm2 +; SSE-NEXT: cmplepd %xmm1, %xmm3 +; SSE-NEXT: movapd %xmm2, %xmm0 +; SSE-NEXT: movapd %xmm3, %xmm1 +; SSE-NEXT: retq +; +; AVX-LABEL: commute_cmppd_le_ymmm: +; AVX: # BB#0: +; AVX-NEXT: vmovapd (%rdi), %ymm1 +; AVX-NEXT: vcmplepd %ymm0, %ymm1, %ymm0 +; AVX-NEXT: retq +; %1 = load <4 x double>, <4 x double>* %a0 %2 = fcmp ole <4 x double> %1, %a1 %3 = sext <4 x i1> %2 to <4 x i64> |

