diff options
Diffstat (limited to 'llvm/test/CodeGen')
7 files changed, 464 insertions, 1 deletions
diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather-double.ll b/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather-double.ll new file mode 100644 index 00000000000..453f690f89f --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather-double.ll @@ -0,0 +1,60 @@ +; RUN: llc -mv65 -mattr=+hvxv65,hvx-length128b -march=hexagon -O2 < %s | FileCheck %s + +; CHECK-LABEL: V6_vgathermw_128B +; CHECK: vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w +; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new +; CHECK-LABEL: V6_vgathermh_128B +; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h +; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new +; CHECK-LABEL: V6_vgathermhw_128B +; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h +; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new +; CHECK-LABEL: V6_vgathermwq_128B +; CHECK: if (q{{[0-3]+}}) vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w +; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new +; CHECK-LABEL: V6_vgathermhq_128B +; CHECK: if (q{{[0-3]+}}) vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h +; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new +; CHECK-LABEL: V6_vgathermhwq_128B +; CHECK: if (q{{[0-3]+}}) vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h +; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new + +declare void @llvm.hexagon.V6.vgathermw.128B(i8*, i32, i32, <32 x i32>) +define void @V6_vgathermw_128B(i8* %a, i32 %b, i32 %c, <32 x i32> %d) { + call void @llvm.hexagon.V6.vgathermw.128B(i8* %a, i32 %b, i32 %c, <32 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vgathermh.128B(i8*, i32, i32, <32 x i32>) +define void @V6_vgathermh_128B(i8* %a, i32 %b, i32 %c, <32 x i32> %d) { + call void @llvm.hexagon.V6.vgathermh.128B(i8* %a, i32 %b, i32 %c, <32 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vgathermhw.128B(i8*, i32, i32, <64 x i32>) +define void @V6_vgathermhw_128B(i8* %a, i32 %b, i32 %c, <64 x i32> %d) { + call void @llvm.hexagon.V6.vgathermhw.128B(i8* %a, i32 %b, i32 %c, <64 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vgathermwq.128B(i8*, <1024 x i1>, i32, i32, <32 x i32>) +define void @V6_vgathermwq_128B(i8* %a, <32 x i32> %b, i32 %c, i32 %d, <32 x i32> %e) { + %1 = bitcast <32 x i32> %b to <1024 x i1> + call void @llvm.hexagon.V6.vgathermwq.128B(i8* %a, <1024 x i1> %1, i32 %c, i32 %d, <32 x i32> %e) + ret void +} + +declare void @llvm.hexagon.V6.vgathermhq.128B(i8*, <1024 x i1>, i32, i32, <32 x i32>) +define void @V6_vgathermhq_128B(i8* %a, <32 x i32> %b, i32 %c, i32 %d, <32 x i32> %e) { + %1 = bitcast <32 x i32> %b to <1024 x i1> + call void @llvm.hexagon.V6.vgathermhq.128B(i8* %a, <1024 x i1> %1, i32 %c, i32 %d, <32 x i32> %e) + ret void +} + +declare void @llvm.hexagon.V6.vgathermhwq.128B(i8*, <1024 x i1>, i32, i32, <64 x i32>) +define void @V6_vgathermhwq_128B(i8* %a, <32 x i32> %b, i32 %c, i32 %d, <64 x i32> %e) { + %1 = bitcast <32 x i32> %b to <1024 x i1> + call void @llvm.hexagon.V6.vgathermhwq.128B(i8* %a, <1024 x i1> %1, i32 %c, i32 %d, <64 x i32> %e) + ret void +} + diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather.ll b/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather.ll new file mode 100644 index 00000000000..bc8591527c0 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/intrinsics/v65-gather.ll @@ -0,0 +1,59 @@ +; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O2 < %s | FileCheck %s + +; CHECK-LABEL: V6_vgathermw +; CHECK: vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w +; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new +; CHECK-LABEL: V6_vgathermh +; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h +; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new +; CHECK-LABEL: V6_vgathermhw +; CHECK: vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h +; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new +; CHECK-LABEL: V6_vgathermwq +; CHECK: if (q{{[0-3]+}}) vtmp.w = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.w).w +; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new +; CHECK-LABEL: V6_vgathermhq +; CHECK: if (q{{[0-3]+}}) vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}.h).h +; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new +; CHECK-LABEL: V6_vgathermhwq +; CHECK: if (q{{[0-3]+}}) vtmp.h = vgather(r1,m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h +; CHECK: vmem(r{{[0-9]+}}+#0) = vtmp.new + +declare void @llvm.hexagon.V6.vgathermw(i8*, i32, i32, <16 x i32>) +define void @V6_vgathermw(i8* %a, i32 %b, i32 %c, <16 x i32> %d) { + call void @llvm.hexagon.V6.vgathermw(i8* %a, i32 %b, i32 %c, <16 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vgathermh(i8*, i32, i32, <16 x i32>) +define void @V6_vgathermh(i8* %a, i32 %b, i32 %c, <16 x i32> %d) { + call void @llvm.hexagon.V6.vgathermh(i8* %a, i32 %b, i32 %c, <16 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vgathermhw(i8*, i32, i32, <32 x i32>) +define void @V6_vgathermhw(i8* %a, i32 %b, i32 %c, <32 x i32> %d) { + call void @llvm.hexagon.V6.vgathermhw(i8* %a, i32 %b, i32 %c, <32 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vgathermwq(i8*, <512 x i1>, i32, i32, <16 x i32>) +define void @V6_vgathermwq(i8* %a, <16 x i32> %b, i32 %c, i32 %d, <16 x i32> %e) { + %1 = bitcast <16 x i32> %b to <512 x i1> + call void @llvm.hexagon.V6.vgathermwq(i8* %a, <512 x i1> %1, i32 %c, i32 %d, <16 x i32> %e) + ret void +} + +declare void @llvm.hexagon.V6.vgathermhq(i8*, <512 x i1>, i32, i32, <16 x i32>) +define void @V6_vgathermhq(i8* %a, <16 x i32> %b, i32 %c, i32 %d, <16 x i32> %e) { + %1 = bitcast <16 x i32> %b to <512 x i1> + call void @llvm.hexagon.V6.vgathermhq(i8* %a, <512 x i1> %1, i32 %c, i32 %d, <16 x i32> %e) + ret void +} + +declare void @llvm.hexagon.V6.vgathermhwq(i8*, <512 x i1>, i32, i32, <32 x i32>) +define void @V6_vgathermhwq(i8* %a, <16 x i32> %b, i32 %c, i32 %d, <32 x i32> %e) { + %1 = bitcast <16 x i32> %b to <512 x i1> + call void @llvm.hexagon.V6.vgathermhwq(i8* %a, <512 x i1> %1, i32 %c, i32 %d, <32 x i32> %e) + ret void +} diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter-double.ll b/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter-double.ll new file mode 100644 index 00000000000..40366fa3af1 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter-double.ll @@ -0,0 +1,78 @@ +; RUN: llc -mv65 -mattr=+hvxv65,hvx-length128b -march=hexagon -O2 < %s | FileCheck %s + +; CHECK-LABEL: V6_vscattermw_128B +; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.w).w = v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermh_128B +; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h = v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermw_add_128B +; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.w).w += v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermh_add_128B +; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h += v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermwq_128B +; CHECK: if (q{{[0-3]}}) vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.w).w = v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermhq_128B +; CHECK: if (q{{[0-3]}}) vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h = v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermhw_128B +; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h = v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermhw_add_128B +; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h += v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermhwq_128B +; CHECK: if (q{{[0-3]}}) vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h = v{{[0-9]+}} + + +declare void @llvm.hexagon.V6.vscattermw.128B(i32, i32, <32 x i32>, <32 x i32>) +define void @V6_vscattermw_128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d) { + call void @llvm.hexagon.V6.vscattermw.128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vscattermh.128B(i32, i32, <32 x i32>, <32 x i32>) +define void @V6_vscattermh_128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d) { + call void @llvm.hexagon.V6.vscattermh.128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vscattermw.add.128B(i32, i32, <32 x i32>, <32 x i32>) +define void @V6_vscattermw_add_128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d) { + call void @llvm.hexagon.V6.vscattermw.add.128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vscattermh.add.128B(i32, i32, <32 x i32>, <32 x i32>) +define void @V6_vscattermh_add_128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d) { + call void @llvm.hexagon.V6.vscattermh.add.128B(i32 %a, i32 %b, <32 x i32> %c, <32 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vscattermwq.128B(<1024 x i1>, i32, i32, <32 x i32>, <32 x i32>) +define void @V6_vscattermwq_128B(<32 x i32> %a, i32 %b, i32 %c, <32 x i32> %d, <32 x i32> %e) { + %1 = bitcast <32 x i32> %a to <1024 x i1> + call void @llvm.hexagon.V6.vscattermwq.128B(<1024 x i1> %1, i32 %b, i32 %c, <32 x i32> %d, <32 x i32> %e) + ret void +} + +declare void @llvm.hexagon.V6.vscattermhq.128B(<1024 x i1>, i32, i32, <32 x i32>, <32 x i32>) +define void @V6_vscattermhq_128B(<32 x i32> %a, i32 %b, i32 %c, <32 x i32> %d, <32 x i32> %e) { + %1 = bitcast <32 x i32> %a to <1024 x i1> + call void @llvm.hexagon.V6.vscattermhq.128B(<1024 x i1> %1, i32 %b, i32 %c, <32 x i32> %d, <32 x i32> %e) + ret void +} + +declare void @llvm.hexagon.V6.vscattermhw.128B(i32, i32, <64 x i32>, <32 x i32>) +define void @V6_vscattermhw_128B(i32 %a, i32 %b, <64 x i32> %c, <32 x i32> %d) { + call void @llvm.hexagon.V6.vscattermhw.128B(i32 %a, i32 %b, <64 x i32> %c, <32 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vscattermhw.add.128B(i32, i32, <64 x i32>, <32 x i32>) +define void @V6_vscattermhw_add_128B(i32 %a, i32 %b, <64 x i32> %c, <32 x i32> %d) { + call void @llvm.hexagon.V6.vscattermhw.add.128B(i32 %a, i32 %b, <64 x i32> %c, <32 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vscattermhwq.128B(<1024 x i1>, i32, i32, <64 x i32>, <32 x i32>) +define void @V6_vscattermhwq_128B(<32 x i32> %a, i32 %b, i32 %c, <64 x i32> %d, <32 x i32> %e) { + %1 = bitcast <32 x i32> %a to <1024 x i1> + call void @llvm.hexagon.V6.vscattermhwq.128B(<1024 x i1> %1, i32 %b, i32 %c, <64 x i32> %d, <32 x i32> %e) + ret void +} diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter-gather.ll b/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter-gather.ll new file mode 100644 index 00000000000..2ebd22bdfb4 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter-gather.ll @@ -0,0 +1,32 @@ +; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O2 < %s | FileCheck %s +; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O2 -disable-packetizer < %s | FileCheck %s +; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O0 < %s | FileCheck %s + +; CHECK: vtmp.h = vgather(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h +; CHECK-NEXT: vmem(r{{[0-9]+}}+#0) = vtmp.new +; CHECK-NEXT: } + +declare i32 @add_translation_extended(i32, i8*, i64, i32, i32, i32, i32, i32, i32) local_unnamed_addr + +; Function Attrs: nounwind +define i32 @main() local_unnamed_addr { +entry: + %hvx_vector = alloca <16 x i32>, align 64 + %0 = bitcast <16 x i32>* %hvx_vector to i8* + %call.i = tail call i32 @add_translation_extended(i32 1, i8* inttoptr (i32 -668991488 to i8*), i64 3625975808, i32 16, i32 15, i32 0, i32 0, i32 0, i32 3) + %1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 1) + %2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2) + tail call void @llvm.hexagon.V6.vscattermh.add(i32 -668991488, i32 1023, <16 x i32> %1, <16 x i32> %2) + call void @llvm.hexagon.V6.vgathermh(i8* %0, i32 -668991488, i32 1023, <16 x i32> %1) + ret i32 0 +} + +; Function Attrs: nounwind writeonly +declare void @llvm.hexagon.V6.vscattermh.add(i32, i32, <16 x i32>, <16 x i32>) + +; Function Attrs: nounwind readnone +declare <16 x i32> @llvm.hexagon.V6.lvsplatw(i32) + +; Function Attrs: argmemonly nounwind +declare void @llvm.hexagon.V6.vgathermh(i8*, i32, i32, <16 x i32>) + diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter.ll b/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter.ll new file mode 100644 index 00000000000..405211c5dfa --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/intrinsics/v65-scatter.ll @@ -0,0 +1,78 @@ +; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O2 < %s | FileCheck %s + +; CHECK-LABEL: V6_vscattermw +; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.w).w = v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermh +; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h = v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermw_add +; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.w).w += v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermh_add +; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h += v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermwq +; CHECK: if (q{{[0-3]}}) vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.w).w = v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermhq +; CHECK: if (q{{[0-3]}}) vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}.h).h = v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermhw +; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h = v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermhw_add +; CHECK: vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h += v{{[0-9]+}} +; CHECK-LABEL: V6_vscattermhwq +; CHECK: if (q{{[0-3]}}) vscatter(r{{[0-9]+}},m{{[0-9]+}},v{{[0-9]+}}:{{[0-9]+}}.w).h = v{{[0-9]+}} + + +declare void @llvm.hexagon.V6.vscattermw(i32, i32, <16 x i32>, <16 x i32>) +define void @V6_vscattermw(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d) { + call void @llvm.hexagon.V6.vscattermw(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vscattermh(i32, i32, <16 x i32>, <16 x i32>) +define void @V6_vscattermh(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d) { + call void @llvm.hexagon.V6.vscattermh(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vscattermw.add(i32, i32, <16 x i32>, <16 x i32>) +define void @V6_vscattermw_add(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d) { + call void @llvm.hexagon.V6.vscattermw.add(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vscattermh.add(i32, i32, <16 x i32>, <16 x i32>) +define void @V6_vscattermh_add(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d) { + call void @llvm.hexagon.V6.vscattermh.add(i32 %a, i32 %b, <16 x i32> %c, <16 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vscattermwq(<512 x i1>, i32, i32, <16 x i32>, <16 x i32>) +define void @V6_vscattermwq(<16 x i32> %a, i32 %b, i32 %c, <16 x i32> %d, <16 x i32> %e) { + %1 = bitcast <16 x i32> %a to <512 x i1> + call void @llvm.hexagon.V6.vscattermwq(<512 x i1> %1, i32 %b, i32 %c, <16 x i32> %d, <16 x i32> %e) + ret void +} + +declare void @llvm.hexagon.V6.vscattermhq(<512 x i1>, i32, i32, <16 x i32>, <16 x i32>) +define void @V6_vscattermhq(<16 x i32> %a, i32 %b, i32 %c, <16 x i32> %d, <16 x i32> %e) { + %1 = bitcast <16 x i32> %a to <512 x i1> + call void @llvm.hexagon.V6.vscattermhq(<512 x i1> %1, i32 %b, i32 %c, <16 x i32> %d, <16 x i32> %e) + ret void +} + +declare void @llvm.hexagon.V6.vscattermhw(i32, i32, <32 x i32>, <16 x i32>) +define void @V6_vscattermhw(i32 %a, i32 %b, <32 x i32> %c, <16 x i32> %d) { + call void @llvm.hexagon.V6.vscattermhw(i32 %a, i32 %b, <32 x i32> %c, <16 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vscattermhw.add(i32, i32, <32 x i32>, <16 x i32>) +define void @V6_vscattermhw_add(i32 %a, i32 %b, <32 x i32> %c, <16 x i32> %d) { + call void @llvm.hexagon.V6.vscattermhw.add(i32 %a, i32 %b, <32 x i32> %c, <16 x i32> %d) + ret void +} + +declare void @llvm.hexagon.V6.vscattermhwq(<512 x i1>, i32, i32, <32 x i32>, <16 x i32>) +define void @V6_vscattermhwq(<16 x i32> %a, i32 %b, i32 %c, <32 x i32> %d, <16 x i32> %e) { + %1 = bitcast <16 x i32> %a to <512 x i1> + call void @llvm.hexagon.V6.vscattermhwq(<512 x i1> %1, i32 %b, i32 %c, <32 x i32> %d, <16 x i32> %e) + ret void +} diff --git a/llvm/test/CodeGen/Hexagon/intrinsics/v65.ll b/llvm/test/CodeGen/Hexagon/intrinsics/v65.ll new file mode 100644 index 00000000000..8d503f11800 --- /dev/null +++ b/llvm/test/CodeGen/Hexagon/intrinsics/v65.ll @@ -0,0 +1,156 @@ +; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O0 < %s | FileCheck %s +; RUN: llc -mv65 -mattr=+hvxv65,hvx-length64b -march=hexagon -O0 < %s | FileCheck -check-prefix=CHECK-CALL %s + +; CHECK-CALL-NOT: call + +declare i32 @llvm.hexagon.A6.vcmpbeq.notany(i64, i64) +define i32 @A6_vcmpbeq_notany(i64 %a, i64 %b) { + %c = call i32 @llvm.hexagon.A6.vcmpbeq.notany(i64 %a, i64 %b) + ret i32 %c +} +; CHECK = !any8(vcmpb.eq(r1:0,r3:2)) + +declare <16 x i32> @llvm.hexagon.V6.vabsb(<16 x i32>) +define <16 x i32> @V6_vabsb(<16 x i32> %a) { + %b = call <16 x i32> @llvm.hexagon.V6.vabsb(<16 x i32> %a) + ret <16 x i32> %b +} +; CHECK: = vabs(v0.b) + +declare <16 x i32> @llvm.hexagon.V6.vabsb.sat(<16 x i32>) +define <16 x i32> @V6_vabsb_sat(<16 x i32> %a) { + %b = call <16 x i32> @llvm.hexagon.V6.vabsb.sat(<16 x i32> %a) + ret <16 x i32> %b +} +; CHECK: = vabs(v0.b):sat + +declare <16 x i32> @llvm.hexagon.V6.vaslh.acc(<16 x i32>, <16 x i32>, i32) +define <16 x i32> @V6_vaslh_acc(<16 x i32> %a, <16 x i32> %b, i32 %c) { + %d = call <16 x i32> @llvm.hexagon.V6.vaslh.acc(<16 x i32> %a, <16 x i32> %b, i32 %c) + ret <16 x i32> %d +} +; CHECK: += vasl(v1.h,r0) + +declare <16 x i32> @llvm.hexagon.V6.vasrh.acc(<16 x i32>, <16 x i32>, i32) +define <16 x i32> @V6_vasrh_acc(<16 x i32> %a, <16 x i32> %b, i32 %c) { + %d = call <16 x i32> @llvm.hexagon.V6.vasrh.acc(<16 x i32> %a, <16 x i32> %b, i32 %c) + ret <16 x i32> %d +} +; CHECK: += vasr(v1.h,r0) + +declare <16 x i32> @llvm.hexagon.V6.vasruwuhsat(<16 x i32>, <16 x i32>, i32) +define <16 x i32> @V6_vasruwuhsat(<16 x i32> %a, <16 x i32> %b, i32 %c) { + %d = call <16 x i32> @llvm.hexagon.V6.vasruwuhsat(<16 x i32> %a, <16 x i32> %b, i32 %c) + ret <16 x i32> %d +} +; CHECK: = vasr(v0.uw,v1.uw,r0):sat + +declare <16 x i32> @llvm.hexagon.V6.vasruhubsat(<16 x i32>, <16 x i32>, i32) +define <16 x i32> @V6_vasruhubsat(<16 x i32> %a, <16 x i32> %b, i32 %c) { + %d = call <16 x i32> @llvm.hexagon.V6.vasruhubsat(<16 x i32> %a, <16 x i32> %b, i32 %c) + ret <16 x i32> %d +} +; CHECK: = vasr(v0.uh,v1.uh,r0):sat + +declare <16 x i32> @llvm.hexagon.V6.vasruhubrndsat(<16 x i32>, <16 x i32>, i32) +define <16 x i32> @V6_vasruhubrndsat(<16 x i32> %a, <16 x i32> %b, i32 %c) { + %d = call <16 x i32> @llvm.hexagon.V6.vasruhubrndsat(<16 x i32> %a, <16 x i32> %b, i32 %c) + ret <16 x i32> %d +} +; CHECK: = vasr(v0.uh,v1.uh,r0):rnd:sat + +declare <16 x i32> @llvm.hexagon.V6.vavguw(<16 x i32>, <16 x i32>) +define <16 x i32> @V6_vavguw(<16 x i32> %a, <16 x i32> %b) { + %c = call <16 x i32> @llvm.hexagon.V6.vavguw(<16 x i32> %a, <16 x i32> %b) + ret <16 x i32> %c +} +; CHECK: = vavg(v0.uw,v1.uw) + +declare <16 x i32> @llvm.hexagon.V6.vavguwrnd(<16 x i32>, <16 x i32>) +define <16 x i32> @V6_vavguwrnd(<16 x i32> %a, <16 x i32> %b) { + %c = call <16 x i32> @llvm.hexagon.V6.vavguwrnd(<16 x i32> %a, <16 x i32> %b) + ret <16 x i32> %c +} +; CHECK: = vavg(v0.uw,v1.uw):rnd + +declare <16 x i32> @llvm.hexagon.V6.vavgb(<16 x i32>, <16 x i32>) +define <16 x i32> @V6_vavgb(<16 x i32> %a, <16 x i32> %b) { + %c = call <16 x i32> @llvm.hexagon.V6.vavgb(<16 x i32> %a, <16 x i32> %b) + ret <16 x i32> %c +} +; CHECK: = vavg(v0.b,v1.b) + +declare <16 x i32> @llvm.hexagon.V6.vavgbrnd(<16 x i32>, <16 x i32>) +define <16 x i32> @V6_vavgbrnd(<16 x i32> %a, <16 x i32> %b) { + %c = call <16 x i32> @llvm.hexagon.V6.vavgbrnd(<16 x i32> %a, <16 x i32> %b) + ret <16 x i32> %c +} +; CHECK: = vavg(v0.b,v1.b):rnd + +declare <16 x i32> @llvm.hexagon.V6.vnavgb(<16 x i32>, <16 x i32>) +define <16 x i32> @V6_vnavgb(<16 x i32> %a, <16 x i32> %b) { + %c = call <16 x i32> @llvm.hexagon.V6.vnavgb(<16 x i32> %a, <16 x i32> %b) + ret <16 x i32> %c +} +; CHECK: = vnavg(v0.b,v1.b) + +declare <32 x i32> @llvm.hexagon.V6.vmpabuu(<32 x i32>, i32) +define <32 x i32> @V6_vmpabuu(<32 x i32> %a, i32 %b) { + %c = call <32 x i32> @llvm.hexagon.V6.vmpabuu(<32 x i32> %a, i32 %b) + ret <32 x i32> %c +} +; CHECK: = vmpa(v1:0.ub,r0.ub) + +declare <32 x i32> @llvm.hexagon.V6.vmpabuu.acc(<32 x i32>, <32 x i32>, i32) +define <32 x i32> @V6_vmpabuu_acc(<32 x i32> %a, <32 x i32> %b, i32 %c) { + %d = call <32 x i32> @llvm.hexagon.V6.vmpabuu.acc(<32 x i32> %a, <32 x i32> %b, i32 %c) + ret <32 x i32> %d +} +; CHECK: += vmpa(v3:2.ub,r0.ub) + +declare <16 x i32> @llvm.hexagon.V6.vmpauhuhsat(<16 x i32>, <16 x i32>, i64) +define <16 x i32> @V6_vmpauhuhsat(<16 x i32> %a, <16 x i32> %b, i64 %c) { + %d = call <16 x i32> @llvm.hexagon.V6.vmpauhuhsat(<16 x i32> %a, <16 x i32> %b, i64 %c) + ret <16 x i32> %d +} +; CHECK: = vmpa(v0.h,v1.uh,r1:0.uh):sat + +declare <16 x i32> @llvm.hexagon.V6.vmpsuhuhsat(<16 x i32>, <16 x i32>, i64) +define <16 x i32> @V6_vmpsuhuhsat(<16 x i32> %a, <16 x i32> %b, i64 %c) { + %d = call <16 x i32> @llvm.hexagon.V6.vmpsuhuhsat(<16 x i32> %a, <16 x i32> %b, i64 %c) + ret <16 x i32> %d +} +; CHECK: = vmps(v0.h,v1.uh,r1:0.uh):sat + +declare <32 x i32> @llvm.hexagon.V6.vmpyh.acc(<32 x i32>, <16 x i32>, i32) +define <32 x i32> @V6_vmpyh_acc(<32 x i32> %a, <16 x i32> %b, i32 %c) { + %d = call <32 x i32> @llvm.hexagon.V6.vmpyh.acc(<32 x i32> %a, <16 x i32> %b, i32 %c) + ret <32 x i32> %d +} +; CHECK: += vmpy(v2.h,r0.h) + +declare <16 x i32> @llvm.hexagon.V6.vmpyuhe(<16 x i32>, i32) +define <16 x i32> @V6_vmpyuhe(<16 x i32> %a, i32 %b) { + %c = call <16 x i32> @llvm.hexagon.V6.vmpyuhe(<16 x i32> %a, i32 %b) + ret <16 x i32> %c +} +; CHECK: = vmpye(v0.uh,r0.uh) + +;declare <16 x i32> @llvm.hexagon.V6.vprefixqb(<512 x i1>) +;define <16 x i32> @V6_vprefixqb(<512 x i1> %a) { +; %b = call <16 x i32> @llvm.hexagon.V6.vprefixqb(<512 x i1> %a) +; ret <16 x i32> %b +;} + +;declare <16 x i32> @llvm.hexagon.V6.vprefixqh(<512 x i1>) +;define <16 x i32> @V6_vprefixqh(<512 x i1> %a) { +; %b = call <16 x i32> @llvm.hexagon.V6.vprefixqh(<512 x i1> %a) +; ret <16 x i32> %b +;} + +;declare <16 x i32> @llvm.hexagon.V6.vprefixqw(<512 x i1>) +;define <16 x i32> @V6_vprefixqw(<512 x i1> %a) { +; %b = call <16 x i32> @llvm.hexagon.V6.vprefixqw(<512 x i1> %a) +; ret <16 x i32> %b +;} + diff --git a/llvm/test/CodeGen/Hexagon/livephysregs-lane-masks.mir b/llvm/test/CodeGen/Hexagon/livephysregs-lane-masks.mir index b2e1968bb59..82be6b21d5e 100644 --- a/llvm/test/CodeGen/Hexagon/livephysregs-lane-masks.mir +++ b/llvm/test/CodeGen/Hexagon/livephysregs-lane-masks.mir @@ -36,5 +36,5 @@ body: | bb.2: liveins: %r0 %d8 = L2_loadrd_io %r29, 8 - L4_return implicit-def %r29, implicit-def %r30, implicit-def %r31, implicit-def %pc, implicit %r30 + %d15 = L4_return %r29, implicit-def %r29, implicit-def %pc, implicit %r30, implicit %framekey |