summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/InstCombine/ARM
diff options
context:
space:
mode:
authorSimon Tatham <simon.tatham@arm.com>2019-12-02 16:18:34 +0000
committerSimon Tatham <simon.tatham@arm.com>2019-12-02 16:20:30 +0000
commit01aefae4a173c32a0235feb9600beffbcd0308b4 (patch)
tree5c598fdc8af5b499f056a8e20a70a3febfeb3538 /llvm/test/Transforms/InstCombine/ARM
parenteffcdc3a82f2a32829170e7f7a2ff3d7853b612d (diff)
downloadbcm5719-llvm-01aefae4a173c32a0235feb9600beffbcd0308b4.tar.gz
bcm5719-llvm-01aefae4a173c32a0235feb9600beffbcd0308b4.zip
[ARM,MVE] Add an InstCombine rule permitting VPNOT.
Summary: If a user writing C code using the ACLE MVE intrinsics generates a predicate and then complements it, then the resulting IR will use the `pred_v2i` IR intrinsic to turn some `<n x i1>` vector into a 16-bit integer; complement that integer; and convert back. This will generate machine code that moves the predicate out of the `P0` register, complements it in an integer GPR, and moves it back in again. This InstCombine rule replaces `i2v(~v2i(x))` with a direct complement of the original predicate vector, which we can already instruction- select as the VPNOT instruction which complements P0 in place. Reviewers: ostannard, MarkMurrayARM, dmgreen Reviewed By: dmgreen Subscribers: kristof.beyls, hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D70484
Diffstat (limited to 'llvm/test/Transforms/InstCombine/ARM')
-rw-r--r--llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll94
1 files changed, 94 insertions, 0 deletions
diff --git a/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll b/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll
index 4594102a468..7c511135ba1 100644
--- a/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll
+++ b/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll
@@ -1,6 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -instcombine -S -o - %s | FileCheck %s
+target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64"
+
declare i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1>)
declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>)
declare i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1>)
@@ -234,3 +236,95 @@ entry:
%vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2)
ret <4 x i1> %vout
}
+
+; If a predicate vector is round-tripped to an integer and back, and
+; complemented while it's in integer form, we should collapse that to
+; a complement of the vector itself. (Rationale: this is likely to
+; allow it to be code-generated as MVE VPNOT.)
+
+define <4 x i1> @vpnot_4(<4 x i1> %vin) {
+; CHECK-LABEL: @vpnot_4(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VOUT:%.*]] = xor <4 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: ret <4 x i1> [[VOUT]]
+;
+entry:
+ %int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin)
+ %flipped = xor i32 %int, 65535
+ %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %flipped)
+ ret <4 x i1> %vout
+}
+
+define <8 x i1> @vpnot_8(<8 x i1> %vin) {
+; CHECK-LABEL: @vpnot_8(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VOUT:%.*]] = xor <8 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: ret <8 x i1> [[VOUT]]
+;
+entry:
+ %int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin)
+ %flipped = xor i32 %int, 65535
+ %vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %flipped)
+ ret <8 x i1> %vout
+}
+
+define <16 x i1> @vpnot_16(<16 x i1> %vin) {
+; CHECK-LABEL: @vpnot_16(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VOUT:%.*]] = xor <16 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: ret <16 x i1> [[VOUT]]
+;
+entry:
+ %int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin)
+ %flipped = xor i32 %int, 65535
+ %vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %flipped)
+ ret <16 x i1> %vout
+}
+
+; And this still works even if the i32 is narrowed to i16 and back on
+; opposite sides of the xor.
+
+define <4 x i1> @vpnot_narrow_4(<4 x i1> %vin) {
+; CHECK-LABEL: @vpnot_narrow_4(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VOUT:%.*]] = xor <4 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: ret <4 x i1> [[VOUT]]
+;
+entry:
+ %int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin)
+ %narrow = trunc i32 %int to i16
+ %flipped_narrow = xor i16 %narrow, -1
+ %flipped = zext i16 %flipped_narrow to i32
+ %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %flipped)
+ ret <4 x i1> %vout
+}
+
+define <8 x i1> @vpnot_narrow_8(<8 x i1> %vin) {
+; CHECK-LABEL: @vpnot_narrow_8(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VOUT:%.*]] = xor <8 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: ret <8 x i1> [[VOUT]]
+;
+entry:
+ %int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin)
+ %narrow = trunc i32 %int to i16
+ %flipped_narrow = xor i16 %narrow, -1
+ %flipped = zext i16 %flipped_narrow to i32
+ %vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %flipped)
+ ret <8 x i1> %vout
+}
+
+define <16 x i1> @vpnot_narrow_16(<16 x i1> %vin) {
+; CHECK-LABEL: @vpnot_narrow_16(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[VOUT:%.*]] = xor <16 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>
+; CHECK-NEXT: ret <16 x i1> [[VOUT]]
+;
+entry:
+ %int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin)
+ %narrow = trunc i32 %int to i16
+ %flipped_narrow = xor i16 %narrow, -1
+ %flipped = zext i16 %flipped_narrow to i32
+ %vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %flipped)
+ ret <16 x i1> %vout
+}
OpenPOWER on IntegriCloud