summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-bitreverse.ll
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2016-03-30 14:14:00 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2016-03-30 14:14:00 +0000
commitb87ffe851903721481d953d37e7c654cd79cc22e (patch)
tree3c0cfc56bd8626f9da3c8e08852a07c9908bbe0f /llvm/test/CodeGen/X86/vector-bitreverse.ll
parent832a6790f66c5ec637b3be95703fedf520a38b33 (diff)
downloadbcm5719-llvm-b87ffe851903721481d953d37e7c654cd79cc22e.tar.gz
bcm5719-llvm-b87ffe851903721481d953d37e7c654cd79cc22e.zip
[X86][XOP] BITREVERSE lowering using VPPERM
XOP's VPPERM has some great 'permute operations' that it can do as well as part of shuffling the bytes of a 128-bit vector - in this case we use it to perform BITREVERSE in a single instruction. llvm-svn: 264870
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-bitreverse.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-bitreverse.ll186
1 files changed, 186 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-bitreverse.ll b/llvm/test/CodeGen/X86/vector-bitreverse.ll
new file mode 100644
index 00000000000..c5b830001a0
--- /dev/null
+++ b/llvm/test/CodeGen/X86/vector-bitreverse.ll
@@ -0,0 +1,186 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
+
+define i8 @test_bitreverse_i8(i8 %a) {
+; ALL-LABEL: test_bitreverse_i8:
+; ALL: # BB#0:
+; ALL-NEXT: vmovd %edi, %xmm0
+; ALL-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
+; ALL-NEXT: vpextrb $0, %xmm0, %eax
+; ALL-NEXT: retq
+ %b = call i8 @llvm.bitreverse.i8(i8 %a)
+ ret i8 %b
+}
+
+define i16 @test_bitreverse_i16(i16 %a) {
+; ALL-LABEL: test_bitreverse_i16:
+; ALL: # BB#0:
+; ALL-NEXT: vmovd %edi, %xmm0
+; ALL-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
+; ALL-NEXT: vmovd %xmm0, %eax
+; ALL-NEXT: retq
+ %b = call i16 @llvm.bitreverse.i16(i16 %a)
+ ret i16 %b
+}
+
+define i32 @test_bitreverse_i32(i32 %a) {
+; ALL-LABEL: test_bitreverse_i32:
+; ALL: # BB#0:
+; ALL-NEXT: vmovd %edi, %xmm0
+; ALL-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
+; ALL-NEXT: vmovd %xmm0, %eax
+; ALL-NEXT: retq
+ %b = call i32 @llvm.bitreverse.i32(i32 %a)
+ ret i32 %b
+}
+
+define i64 @test_bitreverse_i64(i64 %a) {
+; ALL-LABEL: test_bitreverse_i64:
+; ALL: # BB#0:
+; ALL-NEXT: vmovq %rdi, %xmm0
+; ALL-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
+; ALL-NEXT: vmovq %xmm0, %rax
+; ALL-NEXT: retq
+ %b = call i64 @llvm.bitreverse.i64(i64 %a)
+ ret i64 %b
+}
+
+define <16 x i8> @test_bitreverse_v16i8(<16 x i8> %a) {
+; ALL-LABEL: test_bitreverse_v16i8:
+; ALL: # BB#0:
+; ALL-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
+; ALL-NEXT: retq
+ %b = call <16 x i8> @llvm.bitreverse.v16i8(<16 x i8> %a)
+ ret <16 x i8> %b
+}
+
+define <8 x i16> @test_bitreverse_v8i16(<8 x i16> %a) {
+; ALL-LABEL: test_bitreverse_v8i16:
+; ALL: # BB#0:
+; ALL-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
+; ALL-NEXT: retq
+ %b = call <8 x i16> @llvm.bitreverse.v8i16(<8 x i16> %a)
+ ret <8 x i16> %b
+}
+
+define <4 x i32> @test_bitreverse_v4i32(<4 x i32> %a) {
+; ALL-LABEL: test_bitreverse_v4i32:
+; ALL: # BB#0:
+; ALL-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
+; ALL-NEXT: retq
+ %b = call <4 x i32> @llvm.bitreverse.v4i32(<4 x i32> %a)
+ ret <4 x i32> %b
+}
+
+define <2 x i64> @test_bitreverse_v2i64(<2 x i64> %a) {
+; ALL-LABEL: test_bitreverse_v2i64:
+; ALL: # BB#0:
+; ALL-NEXT: vpperm {{.*}}(%rip), %xmm0, %xmm0, %xmm0
+; ALL-NEXT: retq
+ %b = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %a)
+ ret <2 x i64> %b
+}
+
+define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) {
+; XOPAVX1-LABEL: test_bitreverse_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
+; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: test_bitreverse_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95]
+; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
+; XOPAVX2-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
+ %b = call <32 x i8> @llvm.bitreverse.v32i8(<32 x i8> %a)
+ ret <32 x i8> %b
+}
+
+define <16 x i16> @test_bitreverse_v16i16(<16 x i16> %a) {
+; XOPAVX1-LABEL: test_bitreverse_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
+; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: test_bitreverse_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [81,80,83,82,85,84,87,86,89,88,91,90,93,92,95,94]
+; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
+; XOPAVX2-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
+ %b = call <16 x i16> @llvm.bitreverse.v16i16(<16 x i16> %a)
+ ret <16 x i16> %b
+}
+
+define <8 x i32> @test_bitreverse_v8i32(<8 x i32> %a) {
+; XOPAVX1-LABEL: test_bitreverse_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
+; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: test_bitreverse_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [83,82,81,80,87,86,85,84,91,90,89,88,95,94,93,92]
+; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
+; XOPAVX2-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
+ %b = call <8 x i32> @llvm.bitreverse.v8i32(<8 x i32> %a)
+ ret <8 x i32> %b
+}
+
+define <4 x i64> @test_bitreverse_v4i64(<4 x i64> %a) {
+; XOPAVX1-LABEL: test_bitreverse_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
+; XOPAVX1-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
+; XOPAVX1-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: test_bitreverse_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [87,86,85,84,83,82,81,80,95,94,93,92,91,90,89,88]
+; XOPAVX2-NEXT: vpperm %xmm2, %xmm1, %xmm0, %xmm1
+; XOPAVX2-NEXT: vpperm %xmm2, %xmm0, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
+ %b = call <4 x i64> @llvm.bitreverse.v4i64(<4 x i64> %a)
+ ret <4 x i64> %b
+}
+
+declare i8 @llvm.bitreverse.i8(i8) readnone
+declare i16 @llvm.bitreverse.i16(i16) readnone
+declare i32 @llvm.bitreverse.i32(i32) readnone
+declare i64 @llvm.bitreverse.i64(i64) readnone
+
+declare <16 x i8> @llvm.bitreverse.v16i8(<16 x i8>) readnone
+declare <8 x i16> @llvm.bitreverse.v8i16(<8 x i16>) readnone
+declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) readnone
+declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>) readnone
+
+declare <32 x i8> @llvm.bitreverse.v32i8(<32 x i8>) readnone
+declare <16 x i16> @llvm.bitreverse.v16i16(<16 x i16>) readnone
+declare <8 x i32> @llvm.bitreverse.v8i32(<8 x i32>) readnone
+declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) readnone
OpenPOWER on IntegriCloud