diff options
| author | Tim Northover <tnorthover@apple.com> | 2014-05-24 12:42:26 +0000 |
|---|---|---|
| committer | Tim Northover <tnorthover@apple.com> | 2014-05-24 12:42:26 +0000 |
| commit | cc08e1fe1b3feef12a1eba31f8afcc3bbefc733e (patch) | |
| tree | 944d86a337d00e62dbc49d2ff0aad7925472afa7 /llvm/test/CodeGen/AArch64/neon-add-sub.ll | |
| parent | f6ee78cfb7869dba4f797cbc0573bf02beac7810 (diff) | |
| download | bcm5719-llvm-cc08e1fe1b3feef12a1eba31f8afcc3bbefc733e.tar.gz bcm5719-llvm-cc08e1fe1b3feef12a1eba31f8afcc3bbefc733e.zip | |
AArch64/ARM64: remove AArch64 from tree prior to renaming ARM64.
I'm doing this in two phases for a better "git blame" record. This
commit removes the previous AArch64 backend and redirects all
functionality to ARM64. It also deduplicates test-lines and removes
orphaned AArch64 tests.
The next step will be "git mv ARM64 AArch64" and rewire most of the
tests.
Hopefully LLVM is still functional, though it would be even better if
no-one ever had to care because the rename happens straight
afterwards.
llvm-svn: 209576
Diffstat (limited to 'llvm/test/CodeGen/AArch64/neon-add-sub.ll')
| -rw-r--r-- | llvm/test/CodeGen/AArch64/neon-add-sub.ll | 280 |
1 files changed, 0 insertions, 280 deletions
diff --git a/llvm/test/CodeGen/AArch64/neon-add-sub.ll b/llvm/test/CodeGen/AArch64/neon-add-sub.ll deleted file mode 100644 index eebad4df106..00000000000 --- a/llvm/test/CodeGen/AArch64/neon-add-sub.ll +++ /dev/null @@ -1,280 +0,0 @@ -; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s -; arm64 has its own copy of this test - -define <8 x i8> @add8xi8(<8 x i8> %A, <8 x i8> %B) { -;CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b - %tmp3 = add <8 x i8> %A, %B; - ret <8 x i8> %tmp3 -} - -define <16 x i8> @add16xi8(<16 x i8> %A, <16 x i8> %B) { -;CHECK: add {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b - %tmp3 = add <16 x i8> %A, %B; - ret <16 x i8> %tmp3 -} - -define <4 x i16> @add4xi16(<4 x i16> %A, <4 x i16> %B) { -;CHECK: add {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h - %tmp3 = add <4 x i16> %A, %B; - ret <4 x i16> %tmp3 -} - -define <8 x i16> @add8xi16(<8 x i16> %A, <8 x i16> %B) { -;CHECK: add {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h - %tmp3 = add <8 x i16> %A, %B; - ret <8 x i16> %tmp3 -} - -define <2 x i32> @add2xi32(<2 x i32> %A, <2 x i32> %B) { -;CHECK: add {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s - %tmp3 = add <2 x i32> %A, %B; - ret <2 x i32> %tmp3 -} - -define <4 x i32> @add4x32(<4 x i32> %A, <4 x i32> %B) { -;CHECK: add {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s - %tmp3 = add <4 x i32> %A, %B; - ret <4 x i32> %tmp3 -} - -define <2 x i64> @add2xi64(<2 x i64> %A, <2 x i64> %B) { -;CHECK: add {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d - %tmp3 = add <2 x i64> %A, %B; - ret <2 x i64> %tmp3 -} - -define <2 x float> @add2xfloat(<2 x float> %A, <2 x float> %B) { -;CHECK: fadd {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s - %tmp3 = fadd <2 x float> %A, %B; - ret <2 x float> %tmp3 -} - -define <4 x float> @add4xfloat(<4 x float> %A, <4 x float> %B) { -;CHECK: fadd {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s - %tmp3 = fadd <4 x float> %A, %B; - ret <4 x float> %tmp3 -} -define <2 x double> @add2xdouble(<2 x double> %A, <2 x double> %B) { -;CHECK: add {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d - %tmp3 = fadd <2 x double> %A, %B; - ret <2 x double> %tmp3 -} - -define <8 x i8> @sub8xi8(<8 x i8> %A, <8 x i8> %B) { -;CHECK: sub {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b - %tmp3 = sub <8 x i8> %A, %B; - ret <8 x i8> %tmp3 -} - -define <16 x i8> @sub16xi8(<16 x i8> %A, <16 x i8> %B) { -;CHECK: sub {{v[0-9]+}}.16b, {{v[0-9]+}}.16b, {{v[0-9]+}}.16b - %tmp3 = sub <16 x i8> %A, %B; - ret <16 x i8> %tmp3 -} - -define <4 x i16> @sub4xi16(<4 x i16> %A, <4 x i16> %B) { -;CHECK: sub {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h - %tmp3 = sub <4 x i16> %A, %B; - ret <4 x i16> %tmp3 -} - -define <8 x i16> @sub8xi16(<8 x i16> %A, <8 x i16> %B) { -;CHECK: sub {{v[0-9]+}}.8h, {{v[0-9]+}}.8h, {{v[0-9]+}}.8h - %tmp3 = sub <8 x i16> %A, %B; - ret <8 x i16> %tmp3 -} - -define <2 x i32> @sub2xi32(<2 x i32> %A, <2 x i32> %B) { -;CHECK: sub {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s - %tmp3 = sub <2 x i32> %A, %B; - ret <2 x i32> %tmp3 -} - -define <4 x i32> @sub4x32(<4 x i32> %A, <4 x i32> %B) { -;CHECK: sub {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s - %tmp3 = sub <4 x i32> %A, %B; - ret <4 x i32> %tmp3 -} - -define <2 x i64> @sub2xi64(<2 x i64> %A, <2 x i64> %B) { -;CHECK: sub {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d - %tmp3 = sub <2 x i64> %A, %B; - ret <2 x i64> %tmp3 -} - -define <2 x float> @sub2xfloat(<2 x float> %A, <2 x float> %B) { -;CHECK: fsub {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s - %tmp3 = fsub <2 x float> %A, %B; - ret <2 x float> %tmp3 -} - -define <4 x float> @sub4xfloat(<4 x float> %A, <4 x float> %B) { -;CHECK: fsub {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s - %tmp3 = fsub <4 x float> %A, %B; - ret <4 x float> %tmp3 -} -define <2 x double> @sub2xdouble(<2 x double> %A, <2 x double> %B) { -;CHECK: sub {{v[0-9]+}}.2d, {{v[0-9]+}}.2d, {{v[0-9]+}}.2d - %tmp3 = fsub <2 x double> %A, %B; - ret <2 x double> %tmp3 -} - -define <1 x double> @test_vadd_f64(<1 x double> %a, <1 x double> %b) { -; CHECK-LABEL: test_vadd_f64 -; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = fadd <1 x double> %a, %b - ret <1 x double> %1 -} - -define <1 x double> @test_vmul_f64(<1 x double> %a, <1 x double> %b) { -; CHECK-LABEL: test_vmul_f64 -; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = fmul <1 x double> %a, %b - ret <1 x double> %1 -} - -define <1 x double> @test_vdiv_f64(<1 x double> %a, <1 x double> %b) { -; CHECK-LABEL: test_vdiv_f64 -; CHECK: fdiv d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = fdiv <1 x double> %a, %b - ret <1 x double> %1 -} - -define <1 x double> @test_vmla_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) { -; CHECK-LABEL: test_vmla_f64 -; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} -; CHECK: fadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = fmul <1 x double> %b, %c - %2 = fadd <1 x double> %1, %a - ret <1 x double> %2 -} - -define <1 x double> @test_vmls_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) { -; CHECK-LABEL: test_vmls_f64 -; CHECK: fmul d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} -; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = fmul <1 x double> %b, %c - %2 = fsub <1 x double> %a, %1 - ret <1 x double> %2 -} - -define <1 x double> @test_vfms_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) { -; CHECK-LABEL: test_vfms_f64 -; CHECK: fmsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = fsub <1 x double> <double -0.000000e+00>, %b - %2 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %1, <1 x double> %c, <1 x double> %a) - ret <1 x double> %2 -} - -define <1 x double> @test_vfma_f64(<1 x double> %a, <1 x double> %b, <1 x double> %c) { -; CHECK-LABEL: test_vfma_f64 -; CHECK: fmadd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = tail call <1 x double> @llvm.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a) - ret <1 x double> %1 -} - -define <1 x double> @test_vsub_f64(<1 x double> %a, <1 x double> %b) { -; CHECK-LABEL: test_vsub_f64 -; CHECK: fsub d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = fsub <1 x double> %a, %b - ret <1 x double> %1 -} - -define <1 x double> @test_vabd_f64(<1 x double> %a, <1 x double> %b) { -; CHECK-LABEL: test_vabd_f64 -; CHECK: fabd d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = tail call <1 x double> @llvm.arm.neon.vabds.v1f64(<1 x double> %a, <1 x double> %b) - ret <1 x double> %1 -} - -define <1 x double> @test_vmax_f64(<1 x double> %a, <1 x double> %b) { -; CHECK-LABEL: test_vmax_f64 -; CHECK: fmax d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = tail call <1 x double> @llvm.arm.neon.vmaxs.v1f64(<1 x double> %a, <1 x double> %b) - ret <1 x double> %1 -} - -define <1 x double> @test_vmin_f64(<1 x double> %a, <1 x double> %b) { -; CHECK-LABEL: test_vmin_f64 -; CHECK: fmin d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = tail call <1 x double> @llvm.arm.neon.vmins.v1f64(<1 x double> %a, <1 x double> %b) - ret <1 x double> %1 -} - -define <1 x double> @test_vmaxnm_f64(<1 x double> %a, <1 x double> %b) { -; CHECK-LABEL: test_vmaxnm_f64 -; CHECK: fmaxnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxnm.v1f64(<1 x double> %a, <1 x double> %b) - ret <1 x double> %1 -} - -define <1 x double> @test_vminnm_f64(<1 x double> %a, <1 x double> %b) { -; CHECK-LABEL: test_vminnm_f64 -; CHECK: fminnm d{{[0-9]+}}, d{{[0-9]+}}, d{{[0-9]+}} - %1 = tail call <1 x double> @llvm.aarch64.neon.vminnm.v1f64(<1 x double> %a, <1 x double> %b) - ret <1 x double> %1 -} - -define <1 x double> @test_vabs_f64(<1 x double> %a) { -; CHECK-LABEL: test_vabs_f64 -; CHECK: fabs d{{[0-9]+}}, d{{[0-9]+}} - %1 = tail call <1 x double> @llvm.fabs.v1f64(<1 x double> %a) - ret <1 x double> %1 -} - -define <1 x double> @test_vneg_f64(<1 x double> %a) { -; CHECK-LABEL: test_vneg_f64 -; CHECK: fneg d{{[0-9]+}}, d{{[0-9]+}} - %1 = fsub <1 x double> <double -0.000000e+00>, %a - ret <1 x double> %1 -} - -declare <1 x double> @llvm.fabs.v1f64(<1 x double>) -declare <1 x double> @llvm.aarch64.neon.vminnm.v1f64(<1 x double>, <1 x double>) -declare <1 x double> @llvm.aarch64.neon.vmaxnm.v1f64(<1 x double>, <1 x double>) -declare <1 x double> @llvm.arm.neon.vmins.v1f64(<1 x double>, <1 x double>) -declare <1 x double> @llvm.arm.neon.vmaxs.v1f64(<1 x double>, <1 x double>) -declare <1 x double> @llvm.arm.neon.vabds.v1f64(<1 x double>, <1 x double>) -declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>) - -define <1 x i8> @test_add_v1i8(<1 x i8> %a, <1 x i8> %b) { -;CHECK-LABEL: test_add_v1i8: -;CHECK: add {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b - %c = add <1 x i8> %a, %b - ret <1 x i8> %c -} - -define <1 x i16> @test_add_v1i16(<1 x i16> %a, <1 x i16> %b) { -;CHECK-LABEL: test_add_v1i16: -;CHECK: add {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h - %c = add <1 x i16> %a, %b - ret <1 x i16> %c -} - -define <1 x i32> @test_add_v1i32(<1 x i32> %a, <1 x i32> %b) { -;CHECK-LABEL: test_add_v1i32: -;CHECK: add {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s - %c = add <1 x i32> %a, %b - ret <1 x i32> %c -} - -define <1 x i8> @test_sub_v1i8(<1 x i8> %a, <1 x i8> %b) { -;CHECK-LABEL: test_sub_v1i8: -;CHECK: sub {{v[0-9]+}}.8b, {{v[0-9]+}}.8b, {{v[0-9]+}}.8b - %c = sub <1 x i8> %a, %b - ret <1 x i8> %c -} - -define <1 x i16> @test_sub_v1i16(<1 x i16> %a, <1 x i16> %b) { -;CHECK-LABEL: test_sub_v1i16: -;CHECK: sub {{v[0-9]+}}.4h, {{v[0-9]+}}.4h, {{v[0-9]+}}.4h - %c = sub <1 x i16> %a, %b - ret <1 x i16> %c -} - -define <1 x i32> @test_sub_v1i32(<1 x i32> %a, <1 x i32> %b) { -;CHECK-LABEL: test_sub_v1i32: -;CHECK: sub {{v[0-9]+}}.2s, {{v[0-9]+}}.2s, {{v[0-9]+}}.2s - %c = sub <1 x i32> %a, %b - ret <1 x i32> %c -} |

