diff options
author | Sander de Smalen <sander.desmalen@arm.com> | 2019-06-17 12:01:53 +0000 |
---|---|---|
committer | Sander de Smalen <sander.desmalen@arm.com> | 2019-06-17 12:01:53 +0000 |
commit | 74ac20158a068633e0a84f9f95e7242ceab6b61d (patch) | |
tree | 4c864a0d86d7b13b22d7c305d7dd787c832e1005 | |
parent | d5323f6a707e376aab20ebe7864a78fc38fcedaa (diff) | |
download | bcm5719-llvm-74ac20158a068633e0a84f9f95e7242ceab6b61d.tar.gz bcm5719-llvm-74ac20158a068633e0a84f9f95e7242ceab6b61d.zip |
Test forward references in IntrinsicEmitter on Neon LD(2|3|4)
This patch tests the forward-referencing added in D62995 by changing
some existing intrinsics to use forward referencing of overloadable
parameters, rather than backward referencing.
This patch changes the TableGen definition/implementation of
llvm.aarch64.neon.ld2lane and llvm.aarch64.neon.ld2lane intrinsics
(and similar for ld3 and ld4). This change is intended to be
non-functional, since the behaviour of the intrinsics is
expected to be the same.
Reviewers: arsenm, dmgreen, RKSimon, greened, rnk
Reviewed By: RKSimon
Differential Revision: https://reviews.llvm.org/D63189
llvm-svn: 363546
-rw-r--r-- | llvm/include/llvm/IR/IntrinsicsAArch64.td | 20 | ||||
-rw-r--r-- | llvm/test/Verifier/intrinsic-arg-overloading-struct-ret.ll | 79 |
2 files changed, 89 insertions, 10 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsAArch64.td b/llvm/include/llvm/IR/IntrinsicsAArch64.td index e2a53c28257..720a7bdde23 100644 --- a/llvm/include/llvm/IR/IntrinsicsAArch64.td +++ b/llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -462,12 +462,12 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". [IntrArgMemOnly, NoCapture<2>]>; class AdvSIMD_2Vec_Load_Intrinsic - : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], + : Intrinsic<[LLVMMatchType<0>, llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>], [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_2Vec_Load_Lane_Intrinsic - : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], - [LLVMMatchType<0>, LLVMMatchType<0>, + : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>], + [LLVMMatchType<0>, llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_2Vec_Store_Intrinsic @@ -480,12 +480,12 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". [IntrArgMemOnly, NoCapture<3>]>; class AdvSIMD_3Vec_Load_Intrinsic - : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>], + : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>], [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_3Vec_Load_Lane_Intrinsic - : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>], - [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, + : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_3Vec_Store_Intrinsic @@ -499,15 +499,15 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". [IntrArgMemOnly, NoCapture<4>]>; class AdvSIMD_4Vec_Load_Intrinsic - : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, - LLVMMatchType<0>, LLVMMatchType<0>], + : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, + LLVMMatchType<0>, llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>], [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_4Vec_Load_Lane_Intrinsic - : Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, + : Intrinsic<[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [LLVMMatchType<0>, LLVMMatchType<0>, - LLVMMatchType<0>, LLVMMatchType<0>, + LLVMMatchType<0>, llvm_anyvector_ty, llvm_i64_ty, llvm_anyptr_ty], [IntrReadMem, IntrArgMemOnly]>; class AdvSIMD_4Vec_Store_Intrinsic diff --git a/llvm/test/Verifier/intrinsic-arg-overloading-struct-ret.ll b/llvm/test/Verifier/intrinsic-arg-overloading-struct-ret.ll new file mode 100644 index 00000000000..6fd4716d648 --- /dev/null +++ b/llvm/test/Verifier/intrinsic-arg-overloading-struct-ret.ll @@ -0,0 +1,79 @@ +; RUN: not opt -verify -S < %s 2>&1 | FileCheck %s + +; LD2 and LD2LANE + +; CHECK: Intrinsic has incorrect return type +; CHECK-NEXT: llvm.aarch64.neon.ld2.v4i32 +define { <4 x i64>, <4 x i32> } @test_ld2_ret(<4 x i32>* %ptr) { + %res = call { <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32(<4 x i32>* %ptr) + ret{ <4 x i64>, <4 x i32> } %res +} +declare { <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld2.v4i32(<4 x i32>* %ptr) + +; CHECK: Intrinsic has incorrect return type +; CHECK-NEXT: llvm.aarch64.neon.ld2lane.v4i64 +define { <4 x i64>, <4 x i32> } @test_ld2lane_ret(i8* %ptr, <4 x i64> %a, <4 x i64> %b) { + %res = call { <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i64(<4 x i64> %a, <4 x i64> %b, i64 0, i8* %ptr) + ret{ <4 x i64>, <4 x i32> } %res +} +declare { <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i64(<4 x i64>, <4 x i64>, i64, i8*) + +; CHECK: Intrinsic has incorrect argument type +; CHECK-NEXT: llvm.aarch64.neon.ld2lane.v4i32 +define { <4 x i32>, <4 x i32> } @test_ld2lane_arg(i8* %ptr, <4 x i64> %a, <4 x i32> %b) { + %res = call { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32(<4 x i64> %a, <4 x i32> %b, i64 0, i8* %ptr) + ret{ <4 x i32>, <4 x i32> } %res +} +declare { <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld2lane.v4i32(<4 x i64>, <4 x i32>, i64, i8*) + +; LD3 and LD3LANE + +; CHECK: Intrinsic has incorrect return type +; CHECK-NEXT: llvm.aarch64.neon.ld3.v4i32 +define { <4 x i32>, <4 x i64>, <4 x i32> } @test_ld3_ret(<4 x i32>* %ptr) { + %res = call { <4 x i32>, <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32(<4 x i32>* %ptr) + ret{ <4 x i32>, <4 x i64>, <4 x i32> } %res +} +declare { <4 x i32>, <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld3.v4i32(<4 x i32>* %ptr) + +; CHECK: Intrinsic has incorrect return type +; CHECK-NEXT: llvm.aarch64.neon.ld3lane.v4i64 +define { <4 x i64>, <4 x i32>, <4 x i64> } @test_ld3lane_ret(i8* %ptr, <4 x i64> %a, <4 x i64> %b, <4 x i64> %c) { + %res = call { <4 x i64>, <4 x i32>, <4 x i64> } @llvm.aarch64.neon.ld3lane.v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, i64 0, i8* %ptr) + ret{ <4 x i64>, <4 x i32>, <4 x i64> } %res +} +declare { <4 x i64>, <4 x i32>, <4 x i64> } @llvm.aarch64.neon.ld3lane.v4i64(<4 x i64>, <4 x i64>, <4 x i64>, i64, i8*) + +; CHECK: Intrinsic has incorrect argument type +; CHECK-NEXT: llvm.aarch64.neon.ld3lane.v4i32 +define { <4 x i32>, <4 x i32>, <4 x i32> } @test_ld3lane_arg(i8* %ptr, <4 x i64> %a, <4 x i32> %b, <4 x i32> %c) { + %res = call { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32(<4 x i64> %a, <4 x i32> %b, <4 x i32> %c, i64 0, i8* %ptr) + ret{ <4 x i32>, <4 x i32>, <4 x i32> } %res +} +declare { <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld3lane.v4i32(<4 x i64>, <4 x i32>, <4 x i32>, i64, i8*) + +; LD4 and LD4LANE + +; CHECK: Intrinsic has incorrect return type +; CHECK-NEXT: llvm.aarch64.neon.ld4.v4i32 +define { <4 x i32>, <4 x i32>, <4 x i64>, <4 x i32> } @test_ld4_ret(<4 x i32>* %ptr) { + %res = call { <4 x i32>, <4 x i32>, <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32(<4 x i32>* %ptr) + ret{ <4 x i32>, <4 x i32>, <4 x i64>, <4 x i32> } %res +} +declare { <4 x i32>, <4 x i32>, <4 x i64>, <4 x i32> } @llvm.aarch64.neon.ld4.v4i32(<4 x i32>* %ptr) + +; CHECK: Intrinsic has incorrect return type +; CHECK-NEXT: llvm.aarch64.neon.ld4lane.v4i64 +define { <4 x i64>, <4 x i64>, <4 x i32>, <4 x i64> } @test_ld4lane_ret(i8* %ptr, <4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) { + %res = call { <4 x i64>, <4 x i64>, <4 x i32>, <4 x i64> } @llvm.aarch64.neon.ld4lane.v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d, i64 0, i8* %ptr) + ret{ <4 x i64>, <4 x i64>, <4 x i32>, <4 x i64> } %res +} +declare { <4 x i64>, <4 x i64>, <4 x i32>, <4 x i64> } @llvm.aarch64.neon.ld4lane.v4i64(<4 x i64>, <4 x i64>, <4 x i64>, <4 x i64>, i64, i8*) + +; CHECK: Intrinsic has incorrect argument type +; CHECK-NEXT: llvm.aarch64.neon.ld4lane.v4i32 +define { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @test_ld4lane_arg(i8* %ptr, <4 x i64> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d) { + %res = call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32(<4 x i64> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32> %d, i64 0, i8* %ptr) + ret{ <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } %res +} +declare { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> } @llvm.aarch64.neon.ld4lane.v4i32(<4 x i64>, <4 x i32>, <4 x i32>, <4 x i32>, i64, i8*) |