From 9681dc9627b1ea50fd90cdea84290ddc021d3fca Mon Sep 17 00:00:00 2001 From: Kai Luo Date: Mon, 23 Dec 2019 02:06:40 +0000 Subject: [PowerPC] Exploit `vrl(b|h|w|d)` to perform vector rotation Summary: Currently, we set legalization action of `ISD::ROTL` vectors as `Expand` in `PPCISelLowering`. However, we can exploit `vrl(b|h|w|d)` to lower `ISD::ROTL` directly. Differential Revision: https://reviews.llvm.org/D71324 --- llvm/test/CodeGen/PowerPC/vector-rotates.ll | 136 ++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 llvm/test/CodeGen/PowerPC/vector-rotates.ll (limited to 'llvm/test/CodeGen/PowerPC/vector-rotates.ll') diff --git a/llvm/test/CodeGen/PowerPC/vector-rotates.ll b/llvm/test/CodeGen/PowerPC/vector-rotates.ll new file mode 100644 index 00000000000..d5fc48173d4 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/vector-rotates.ll @@ -0,0 +1,136 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -O3 -mtriple=powerpc64le-unknown-unknown -ppc-asm-full-reg-names \ +; RUN: -verify-machineinstrs -mcpu=pwr8 < %s | \ +; RUN: FileCheck --check-prefix=CHECK-P8 %s +; RUN: llc -O3 -mtriple=powerpc64-unknown-unknown -ppc-asm-full-reg-names \ +; RUN: -verify-machineinstrs -mcpu=pwr7 < %s | \ +; RUN: FileCheck --check-prefix=CHECK-P7 %s + +define <16 x i8> @rotl_v16i8(<16 x i8> %a) { +; CHECK-P8-LABEL: rotl_v16i8: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: addis r3, r2, .LCPI0_0@toc@ha +; CHECK-P8-NEXT: addi r3, r3, .LCPI0_0@toc@l +; CHECK-P8-NEXT: lvx v3, 0, r3 +; CHECK-P8-NEXT: vrlb v2, v2, v3 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: rotl_v16i8: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: addis r3, r2, .LCPI0_0@toc@ha +; CHECK-P7-NEXT: addi r3, r3, .LCPI0_0@toc@l +; CHECK-P7-NEXT: lxvw4x vs35, 0, r3 +; CHECK-P7-NEXT: vrlb v2, v2, v3 +; CHECK-P7-NEXT: blr +entry: + %b = shl <16 x i8> %a, + %c = lshr <16 x i8> %a, + %d = or <16 x i8> %b, %c + ret <16 x i8> %d +} + +define <8 x i16> @rotl_v8i16(<8 x i16> %a) { +; CHECK-P8-LABEL: rotl_v8i16: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: addis r3, r2, .LCPI1_0@toc@ha +; CHECK-P8-NEXT: addi r3, r3, .LCPI1_0@toc@l +; CHECK-P8-NEXT: lvx v3, 0, r3 +; CHECK-P8-NEXT: vrlh v2, v2, v3 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: rotl_v8i16: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: addis r3, r2, .LCPI1_0@toc@ha +; CHECK-P7-NEXT: addi r3, r3, .LCPI1_0@toc@l +; CHECK-P7-NEXT: lxvw4x vs35, 0, r3 +; CHECK-P7-NEXT: vrlh v2, v2, v3 +; CHECK-P7-NEXT: blr +entry: + %b = shl <8 x i16> %a, + %c = lshr <8 x i16> %a, + %d = or <8 x i16> %b, %c + ret <8 x i16> %d +} + +define <4 x i32> @rotl_v4i32_0(<4 x i32> %a) { +; CHECK-P8-LABEL: rotl_v4i32_0: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: addis r3, r2, .LCPI2_0@toc@ha +; CHECK-P8-NEXT: addi r3, r3, .LCPI2_0@toc@l +; CHECK-P8-NEXT: lvx v3, 0, r3 +; CHECK-P8-NEXT: vrlw v2, v2, v3 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: rotl_v4i32_0: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: addis r3, r2, .LCPI2_0@toc@ha +; CHECK-P7-NEXT: addi r3, r3, .LCPI2_0@toc@l +; CHECK-P7-NEXT: lxvw4x vs35, 0, r3 +; CHECK-P7-NEXT: vrlw v2, v2, v3 +; CHECK-P7-NEXT: blr +entry: + %b = shl <4 x i32> %a, + %c = lshr <4 x i32> %a, + %d = or <4 x i32> %b, %c + ret <4 x i32> %d +} + +define <4 x i32> @rotl_v4i32_1(<4 x i32> %a) { +; CHECK-P8-LABEL: rotl_v4i32_1: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: vspltisw v3, -16 +; CHECK-P8-NEXT: vspltisw v4, 7 +; CHECK-P8-NEXT: vsubuwm v3, v4, v3 +; CHECK-P8-NEXT: vrlw v2, v2, v3 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: rotl_v4i32_1: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: vspltisw v3, -16 +; CHECK-P7-NEXT: vspltisw v4, 7 +; CHECK-P7-NEXT: vsubuwm v3, v4, v3 +; CHECK-P7-NEXT: vrlw v2, v2, v3 +; CHECK-P7-NEXT: blr +entry: + %b = shl <4 x i32> %a, + %c = lshr <4 x i32> %a, + %d = or <4 x i32> %b, %c + ret <4 x i32> %d +} + +define <2 x i64> @rotl_v2i64(<2 x i64> %a) { +; CHECK-P8-LABEL: rotl_v2i64: +; CHECK-P8: # %bb.0: # %entry +; CHECK-P8-NEXT: addis r3, r2, .LCPI4_0@toc@ha +; CHECK-P8-NEXT: addi r3, r3, .LCPI4_0@toc@l +; CHECK-P8-NEXT: lxvd2x vs0, 0, r3 +; CHECK-P8-NEXT: xxswapd vs35, vs0 +; CHECK-P8-NEXT: vrld v2, v2, v3 +; CHECK-P8-NEXT: blr +; +; CHECK-P7-LABEL: rotl_v2i64: +; CHECK-P7: # %bb.0: # %entry +; CHECK-P7-NEXT: addi r3, r1, -48 +; CHECK-P7-NEXT: stxvd2x vs34, 0, r3 +; CHECK-P7-NEXT: ld r3, -40(r1) +; CHECK-P7-NEXT: sldi r4, r3, 53 +; CHECK-P7-NEXT: rldicl r3, r3, 53, 11 +; CHECK-P7-NEXT: std r4, -8(r1) +; CHECK-P7-NEXT: ld r4, -48(r1) +; CHECK-P7-NEXT: sldi r5, r4, 41 +; CHECK-P7-NEXT: rldicl r4, r4, 41, 23 +; CHECK-P7-NEXT: std r5, -16(r1) +; CHECK-P7-NEXT: addi r5, r1, -16 +; CHECK-P7-NEXT: lxvw4x vs0, 0, r5 +; CHECK-P7-NEXT: std r3, -24(r1) +; CHECK-P7-NEXT: addi r3, r1, -32 +; CHECK-P7-NEXT: std r4, -32(r1) +; CHECK-P7-NEXT: lxvw4x vs1, 0, r3 +; CHECK-P7-NEXT: xxlor vs34, vs0, vs1 +; CHECK-P7-NEXT: blr +entry: + %b = shl <2 x i64> %a, + %c = lshr <2 x i64> %a, + %d = or <2 x i64> %b, %c + ret <2 x i64> %d +} -- cgit v1.2.3