diff options
| author | Cameron McInally <mcinally@cray.com> | 2019-12-11 17:07:07 -0600 |
|---|---|---|
| committer | Cameron McInally <mcinally@cray.com> | 2019-12-11 20:15:44 -0600 |
| commit | 7aa5c160885c92c95ad84216de9b9b02dbc95936 (patch) | |
| tree | 56e66e76e20dbc78985d5a31342d638ac83be0d3 /llvm/test/CodeGen/AArch64 | |
| parent | 5d986953c8b917bacfaa1f800fc1e242559f76be (diff) | |
| download | bcm5719-llvm-7aa5c160885c92c95ad84216de9b9b02dbc95936.tar.gz bcm5719-llvm-7aa5c160885c92c95ad84216de9b9b02dbc95936.zip | |
[AArch64][SVE] Add patterns for scalable vselect
This patch matches scalable vector selects to predicated move instructions.
Differential Revision: https://reviews.llvm.org/D71298
Diffstat (limited to 'llvm/test/CodeGen/AArch64')
| -rw-r--r-- | llvm/test/CodeGen/AArch64/sve-select.ll | 85 |
1 files changed, 85 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AArch64/sve-select.ll b/llvm/test/CodeGen/AArch64/sve-select.ll new file mode 100644 index 00000000000..2d2ea47ae35 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-select.ll @@ -0,0 +1,85 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +; Integer vector select + +define <vscale x 16 x i8> @sel_nxv16i8(<vscale x 16 x i1> %p, + <vscale x 16 x i8> %dst, + <vscale x 16 x i8> %a) { +; CHECK-LABEL: sel_nxv16i8: +; CHECK: mov z0.b, p0/m, z1.b +; CHECK-NEXT: ret + %sel = select <vscale x 16 x i1> %p, <vscale x 16 x i8> %a, <vscale x 16 x i8> %dst + ret <vscale x 16 x i8> %sel +} + +define <vscale x 8 x i16> @sel_nxv8i16(<vscale x 8 x i1> %p, + <vscale x 8 x i16> %dst, + <vscale x 8 x i16> %a) { +; CHECK-LABEL: sel_nxv8i16: +; CHECK: mov z0.h, p0/m, z1.h +; CHECK-NEXT: ret + %sel = select <vscale x 8 x i1> %p, <vscale x 8 x i16> %a, <vscale x 8 x i16> %dst + ret <vscale x 8 x i16> %sel +} + +define <vscale x 4 x i32> @sel_nxv4i32(<vscale x 4 x i1> %p, + <vscale x 4 x i32> %dst, + <vscale x 4 x i32> %a) { +; CHECK-LABEL: sel_nxv4i32: +; CHECK: mov z0.s, p0/m, z1.s +; CHECK-NEXT: ret + %sel = select <vscale x 4 x i1> %p, <vscale x 4 x i32> %a, <vscale x 4 x i32> %dst + ret <vscale x 4 x i32> %sel +} + +define <vscale x 2 x i64> @sel_nxv2i64(<vscale x 2 x i1> %p, + <vscale x 2 x i64> %dst, + <vscale x 2 x i64> %a) { +; CHECK-LABEL: sel_nxv2i64: +; CHECK: mov z0.d, p0/m, z1.d +; CHECK-NEXT: ret + %sel = select <vscale x 2 x i1> %p, <vscale x 2 x i64> %a, <vscale x 2 x i64> %dst + ret <vscale x 2 x i64> %sel +} + +; Floating point vector select + +define <vscale x 8 x half> @sel_nxv8f16(<vscale x 8 x i1> %p, + <vscale x 8 x half> %dst, + <vscale x 8 x half> %a) { +; CHECK-LABEL: sel_nxv8f16: +; CHECK: mov z0.h, p0/m, z1.h +; CHECK-NEXT: ret + %sel = select <vscale x 8 x i1> %p, <vscale x 8 x half> %a, <vscale x 8 x half> %dst + ret <vscale x 8 x half> %sel +} + +define <vscale x 4 x float> @sel_nxv4f32(<vscale x 4 x i1> %p, + <vscale x 4 x float> %dst, + <vscale x 4 x float> %a) { +; CHECK-LABEL: sel_nxv4f32: +; CHECK: mov z0.s, p0/m, z1.s +; CHECK-NEXT: ret + %sel = select <vscale x 4 x i1> %p, <vscale x 4 x float> %a, <vscale x 4 x float> %dst + ret <vscale x 4 x float> %sel +} + +define <vscale x 2 x float> @sel_nxv2f32(<vscale x 2 x i1> %p, + <vscale x 2 x float> %dst, + <vscale x 2 x float> %a) { +; CHECK-LABEL: sel_nxv2f32: +; CHECK: mov z0.d, p0/m, z1.d +; CHECK-NEXT: ret + %sel = select <vscale x 2 x i1> %p, <vscale x 2 x float> %a, <vscale x 2 x float> %dst + ret <vscale x 2 x float> %sel +} + +define <vscale x 2 x double> @sel_nxv8f64(<vscale x 2 x i1> %p, + <vscale x 2 x double> %dst, + <vscale x 2 x double> %a) { +; CHECK-LABEL: sel_nxv8f64: +; CHECK: mov z0.d, p0/m, z1.d +; CHECK-NEXT: ret + %sel = select <vscale x 2 x i1> %p, <vscale x 2 x double> %a, <vscale x 2 x double> %dst + ret <vscale x 2 x double> %sel +} |

