diff options
Diffstat (limited to 'llvm/test/CodeGen/AArch64/sve-int-log-pred.ll')
-rw-r--r-- | llvm/test/CodeGen/AArch64/sve-int-log-pred.ll | 140 |
1 files changed, 140 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AArch64/sve-int-log-pred.ll b/llvm/test/CodeGen/AArch64/sve-int-log-pred.ll new file mode 100644 index 00000000000..5e12981fd67 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/sve-int-log-pred.ll @@ -0,0 +1,140 @@ +; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s + +define <vscale x 16 x i8> @and_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { +; CHECK-LABEL: and_pred_i8: +; CHECK: and z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: ret + %out = call <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv2i8(<vscale x 16 x i1> %pg, + <vscale x 16 x i8> %a, + <vscale x 16 x i8> %b) + ret <vscale x 16 x i8> %out +} + +define <vscale x 8 x i16> @and_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { +; CHECK-LABEL: and_pred_i16: +; CHECK: and z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %out = call <vscale x 8 x i16> @llvm.aarch64.sve.and.nxv2i16(<vscale x 8 x i1> %pg, + <vscale x 8 x i16> %a, + <vscale x 8 x i16> %b) + ret <vscale x 8 x i16> %out +} + + +define <vscale x 4 x i32> @and_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { +; CHECK-LABEL: and_pred_i32: +; CHECK: and z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret + %out = call <vscale x 4 x i32> @llvm.aarch64.sve.and.nxv2i32(<vscale x 4 x i1> %pg, + <vscale x 4 x i32> %a, + <vscale x 4 x i32> %b) + ret <vscale x 4 x i32> %out +} + +define <vscale x 2 x i64> @and_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { +; CHECK-LABEL: and_pred_i64: +; CHECK: and z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret + %out = call <vscale x 2 x i64> @llvm.aarch64.sve.and.nxv2i64(<vscale x 2 x i1> %pg, + <vscale x 2 x i64> %a, + <vscale x 2 x i64> %b) + ret <vscale x 2 x i64> %out +} + + +define <vscale x 16 x i8> @or_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { +; CHECK-LABEL: or_pred_i8: +; CHECK: orr z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: ret + %out = call <vscale x 16 x i8> @llvm.aarch64.sve.or.nxv2i8(<vscale x 16 x i1> %pg, + <vscale x 16 x i8> %a, + <vscale x 16 x i8> %b) + ret <vscale x 16 x i8> %out +} + +define <vscale x 8 x i16> @or_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { +; CHECK-LABEL: or_pred_i16: +; CHECK: orr z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %out = call <vscale x 8 x i16> @llvm.aarch64.sve.or.nxv2i16(<vscale x 8 x i1> %pg, + <vscale x 8 x i16> %a, + <vscale x 8 x i16> %b) + ret <vscale x 8 x i16> %out +} + + +define <vscale x 4 x i32> @or_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { +; CHECK-LABEL: or_pred_i32: +; CHECK: orr z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret + %out = call <vscale x 4 x i32> @llvm.aarch64.sve.or.nxv2i32(<vscale x 4 x i1> %pg, + <vscale x 4 x i32> %a, + <vscale x 4 x i32> %b) + ret <vscale x 4 x i32> %out +} + +define <vscale x 2 x i64> @or_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { +; CHECK-LABEL: or_pred_i64: +; CHECK: orr z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret + %out = call <vscale x 2 x i64> @llvm.aarch64.sve.or.nxv2i64(<vscale x 2 x i1> %pg, + <vscale x 2 x i64> %a, + <vscale x 2 x i64> %b) + ret <vscale x 2 x i64> %out +} + + +define <vscale x 16 x i8> @xor_pred_i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) { +; CHECK-LABEL: xor_pred_i8: +; CHECK: eor z0.b, p0/m, z0.b, z1.b +; CHECK-NEXT: ret + %out = call <vscale x 16 x i8> @llvm.aarch64.sve.xor.nxv2i8(<vscale x 16 x i1> %pg, + <vscale x 16 x i8> %a, + <vscale x 16 x i8> %b) + ret <vscale x 16 x i8> %out +} + +define <vscale x 8 x i16> @xor_pred_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { +; CHECK-LABEL: xor_pred_i16: +; CHECK: eor z0.h, p0/m, z0.h, z1.h +; CHECK-NEXT: ret + %out = call <vscale x 8 x i16> @llvm.aarch64.sve.xor.nxv2i16(<vscale x 8 x i1> %pg, + <vscale x 8 x i16> %a, + <vscale x 8 x i16> %b) + ret <vscale x 8 x i16> %out +} + + +define <vscale x 4 x i32> @xor_pred_i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) { +; CHECK-LABEL: xor_pred_i32: +; CHECK: eor z0.s, p0/m, z0.s, z1.s +; CHECK-NEXT: ret + %out = call <vscale x 4 x i32> @llvm.aarch64.sve.xor.nxv2i32(<vscale x 4 x i1> %pg, + <vscale x 4 x i32> %a, + <vscale x 4 x i32> %b) + ret <vscale x 4 x i32> %out +} + +define <vscale x 2 x i64> @xor_pred_i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { +; CHECK-LABEL: xor_pred_i64: +; CHECK: eor z0.d, p0/m, z0.d, z1.d +; CHECK-NEXT: ret + %out = call <vscale x 2 x i64> @llvm.aarch64.sve.xor.nxv2i64(<vscale x 2 x i1> %pg, + <vscale x 2 x i64> %a, + <vscale x 2 x i64> %b) + ret <vscale x 2 x i64> %out +} + + +declare <vscale x 16 x i8> @llvm.aarch64.sve.and.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.and.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.and.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.and.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>) +declare <vscale x 16 x i8> @llvm.aarch64.sve.or.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.or.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.or.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.or.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>) +declare <vscale x 16 x i8> @llvm.aarch64.sve.xor.nxv2i8(<vscale x 16 x i1>,<vscale x 16 x i8>,<vscale x 16 x i8>) +declare <vscale x 8 x i16> @llvm.aarch64.sve.xor.nxv2i16(<vscale x 8 x i1>,<vscale x 8 x i16>,<vscale x 8 x i16>) +declare <vscale x 4 x i32> @llvm.aarch64.sve.xor.nxv2i32(<vscale x 4 x i1>,<vscale x 4 x i32>,<vscale x 4 x i32>) +declare <vscale x 2 x i64> @llvm.aarch64.sve.xor.nxv2i64(<vscale x 2 x i1>,<vscale x 2 x i64>,<vscale x 2 x i64>) |