diff options
| author | Jiangning Liu <jiangning.liu@arm.com> | 2013-11-19 01:46:48 +0000 |
|---|---|---|
| committer | Jiangning Liu <jiangning.liu@arm.com> | 2013-11-19 01:46:48 +0000 |
| commit | 0c0c1e85988e2dc76e36a6d9cf4ad9ae5ca75f73 (patch) | |
| tree | a1970e8d4be935fd59fee7afa358ca0d8ad46884 | |
| parent | fe916e20f27fe4b36e3eea10ac227e0a24ccb0bc (diff) | |
| download | bcm5719-llvm-0c0c1e85988e2dc76e36a6d9cf4ad9ae5ca75f73.tar.gz bcm5719-llvm-0c0c1e85988e2dc76e36a6d9cf4ad9ae5ca75f73.zip | |
Implement AArch64 SISD intrinsics for vget_high and vget_low.
llvm-svn: 195074
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64InstrNEON.td | 37 | ||||
| -rw-r--r-- | llvm/test/CodeGen/AArch64/neon-simd-vget.ll | 225 |
2 files changed, 258 insertions, 4 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64InstrNEON.td b/llvm/lib/Target/AArch64/AArch64InstrNEON.td index 6332745085b..b6fa6fa8939 100644 --- a/llvm/lib/Target/AArch64/AArch64InstrNEON.td +++ b/llvm/lib/Target/AArch64/AArch64InstrNEON.td @@ -1590,16 +1590,31 @@ def Neon_High8H : PatFrag<(ops node:$in), (extract_subvector (v8i16 node:$in), (iPTR 4))>; def Neon_High4S : PatFrag<(ops node:$in), (extract_subvector (v4i32 node:$in), (iPTR 2))>; - +def Neon_High2D : PatFrag<(ops node:$in), + (extract_subvector (v2i64 node:$in), (iPTR 1))>; +def Neon_High4f : PatFrag<(ops node:$in), + (extract_subvector (v4f32 node:$in), (iPTR 2))>; +def Neon_High2d : PatFrag<(ops node:$in), + (extract_subvector (v2f64 node:$in), (iPTR 1))>; + +def Neon_low16B : PatFrag<(ops node:$in), + (v8i8 (extract_subvector (v16i8 node:$in), + (iPTR 0)))>; def Neon_low8H : PatFrag<(ops node:$in), (v4i16 (extract_subvector (v8i16 node:$in), (iPTR 0)))>; def Neon_low4S : PatFrag<(ops node:$in), (v2i32 (extract_subvector (v4i32 node:$in), (iPTR 0)))>; +def Neon_low2D : PatFrag<(ops node:$in), + (v1i64 (extract_subvector (v2i64 node:$in), + (iPTR 0)))>; def Neon_low4f : PatFrag<(ops node:$in), (v2f32 (extract_subvector (v4f32 node:$in), (iPTR 0)))>; +def Neon_low2d : PatFrag<(ops node:$in), + (v1f64 (extract_subvector (v2f64 node:$in), + (iPTR 0)))>; def neon_uimm3_shift : Operand<i32>, ImmLeaf<i32, [{return Imm < 8;}]> { @@ -5136,9 +5151,8 @@ def DUPdv_D : NeonI_Scalar_DUP<"dup", "d", FPR64, VPR128, neon_uimm1_bare> { multiclass NeonI_Scalar_DUP_Elt_pattern<Instruction DUPI, ValueType ResTy, ValueType OpTy, Operand OpImm, ValueType OpNTy, ValueType ExTy, Operand OpNImm> { - - def : Pat<(ResTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)), - (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>; + def : Pat<(ResTy (vector_extract (OpTy VPR128:$Rn), OpImm:$Imm)), + (ResTy (DUPI (OpTy VPR128:$Rn), OpImm:$Imm))>; def : Pat<(ResTy (vector_extract (OpNTy VPR64:$Rn), OpNImm:$Imm)), (ResTy (DUPI @@ -5146,6 +5160,21 @@ multiclass NeonI_Scalar_DUP_Elt_pattern<Instruction DUPI, ValueType ResTy, OpNImm:$Imm))>; } +multiclass NeonI_SDUP<PatFrag GetLow, PatFrag GetHigh, + ValueType ResTy, ValueType OpTy> { + def : Pat<(ResTy (GetLow VPR128:$Rn)), + (ResTy (DUPdv_D (OpTy VPR128:$Rn), 0))>; + def : Pat<(ResTy (GetHigh VPR128:$Rn)), + (ResTy (DUPdv_D (OpTy VPR128:$Rn), 1))>; +} + +defm : NeonI_SDUP<Neon_low16B, Neon_High16B, v8i8, v16i8>; +defm : NeonI_SDUP<Neon_low8H, Neon_High8H, v4i16, v8i16>; +defm : NeonI_SDUP<Neon_low4S, Neon_High4S, v2i32, v4i32>; +defm : NeonI_SDUP<Neon_low2D, Neon_High2D, v1i64, v2i64>; +defm : NeonI_SDUP<Neon_low4f, Neon_High4f, v2f32, v4f32>; +defm : NeonI_SDUP<Neon_low2d, Neon_High2d, v1f64, v2f64>; + // Patterns for vector extract of FP data using scalar DUP instructions defm : NeonI_Scalar_DUP_Elt_pattern<DUPsv_S, f32, v4f32, neon_uimm2_bare, v2f32, v4f32, neon_uimm1_bare>; diff --git a/llvm/test/CodeGen/AArch64/neon-simd-vget.ll b/llvm/test/CodeGen/AArch64/neon-simd-vget.ll new file mode 100644 index 00000000000..f3897032e38 --- /dev/null +++ b/llvm/test/CodeGen/AArch64/neon-simd-vget.ll @@ -0,0 +1,225 @@ +; RUN: llc < %s -verify-machineinstrs -mtriple=aarch64-none-linux-gnu -mattr=+neon | FileCheck %s + +define <8 x i8> @test_vget_high_s8(<16 x i8> %a) { +; CHECK: test_vget_high_s8: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + ret <8 x i8> %shuffle.i +} + +define <4 x i16> @test_vget_high_s16(<8 x i16> %a) { +; CHECK: test_vget_high_s16: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + ret <4 x i16> %shuffle.i +} + +define <2 x i32> @test_vget_high_s32(<4 x i32> %a) { +; CHECK: test_vget_high_s32: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3> + ret <2 x i32> %shuffle.i +} + +define <1 x i64> @test_vget_high_s64(<2 x i64> %a) { +; CHECK: test_vget_high_s64: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1> + ret <1 x i64> %shuffle.i +} + +define <8 x i8> @test_vget_high_u8(<16 x i8> %a) { +; CHECK: test_vget_high_u8: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + ret <8 x i8> %shuffle.i +} + +define <4 x i16> @test_vget_high_u16(<8 x i16> %a) { +; CHECK: test_vget_high_u16: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + ret <4 x i16> %shuffle.i +} + +define <2 x i32> @test_vget_high_u32(<4 x i32> %a) { +; CHECK: test_vget_high_u32: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3> + ret <2 x i32> %shuffle.i +} + +define <1 x i64> @test_vget_high_u64(<2 x i64> %a) { +; CHECK: test_vget_high_u64: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1> + ret <1 x i64> %shuffle.i +} + +define <1 x i64> @test_vget_high_p64(<2 x i64> %a) { +; CHECK: test_vget_high_p64: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> <i32 1> + ret <1 x i64> %shuffle.i +} + +define <4 x i16> @test_vget_high_f16(<8 x i16> %a) { +; CHECK: test_vget_high_f16: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + ret <4 x i16> %shuffle.i +} + +define <2 x float> @test_vget_high_f32(<4 x float> %a) { +; CHECK: test_vget_high_f32: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 2, i32 3> + ret <2 x float> %shuffle.i +} + +define <8 x i8> @test_vget_high_p8(<16 x i8> %a) { +; CHECK: test_vget_high_p8: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15> + ret <8 x i8> %shuffle.i +} + +define <4 x i16> @test_vget_high_p16(<8 x i16> %a) { +; CHECK: test_vget_high_p16: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7> + ret <4 x i16> %shuffle.i +} + +define <1 x double> @test_vget_high_f64(<2 x double> %a) { +; CHECK: test_vget_high_f64: +; CHECK: dup d0, {{v[0-9]+}}.d[1] +entry: + %shuffle.i = shufflevector <2 x double> %a, <2 x double> undef, <1 x i32> <i32 1> + ret <1 x double> %shuffle.i +} + +define <8 x i8> @test_vget_low_s8(<16 x i8> %a) { +; CHECK: test_vget_low_s8: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + ret <8 x i8> %shuffle.i +} + +define <4 x i16> @test_vget_low_s16(<8 x i16> %a) { +; CHECK: test_vget_low_s16: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + ret <4 x i16> %shuffle.i +} + +define <2 x i32> @test_vget_low_s32(<4 x i32> %a) { +; CHECK: test_vget_low_s32: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1> + ret <2 x i32> %shuffle.i +} + +define <1 x i64> @test_vget_low_s64(<2 x i64> %a) { +; CHECK: test_vget_low_s64: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer + ret <1 x i64> %shuffle.i +} + +define <8 x i8> @test_vget_low_u8(<16 x i8> %a) { +; CHECK: test_vget_low_u8: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + ret <8 x i8> %shuffle.i +} + +define <4 x i16> @test_vget_low_u16(<8 x i16> %a) { +; CHECK: test_vget_low_u16: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + ret <4 x i16> %shuffle.i +} + +define <2 x i32> @test_vget_low_u32(<4 x i32> %a) { +; CHECK: test_vget_low_u32: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 0, i32 1> + ret <2 x i32> %shuffle.i +} + +define <1 x i64> @test_vget_low_u64(<2 x i64> %a) { +; CHECK: test_vget_low_u64: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer + ret <1 x i64> %shuffle.i +} + +define <1 x i64> @test_vget_low_p64(<2 x i64> %a) { +; CHECK: test_vget_low_p64: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <2 x i64> %a, <2 x i64> undef, <1 x i32> zeroinitializer + ret <1 x i64> %shuffle.i +} + +define <4 x i16> @test_vget_low_f16(<8 x i16> %a) { +; CHECK: test_vget_low_f16: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + ret <4 x i16> %shuffle.i +} + +define <2 x float> @test_vget_low_f32(<4 x float> %a) { +; CHECK: test_vget_low_f32: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <4 x float> %a, <4 x float> undef, <2 x i32> <i32 0, i32 1> + ret <2 x float> %shuffle.i +} + +define <8 x i8> @test_vget_low_p8(<16 x i8> %a) { +; CHECK: test_vget_low_p8: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <16 x i8> %a, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> + ret <8 x i8> %shuffle.i +} + +define <4 x i16> @test_vget_low_p16(<8 x i16> %a) { +; CHECK: test_vget_low_p16: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> + ret <4 x i16> %shuffle.i +} + +define <1 x double> @test_vget_low_f64(<2 x double> %a) { +; CHECK: test_vget_low_f64: +; CHECK: dup d0, {{v[0-9]+}}.d[0] +entry: + %shuffle.i = shufflevector <2 x double> %a, <2 x double> undef, <1 x i32> zeroinitializer + ret <1 x double> %shuffle.i +} |

