diff options
Diffstat (limited to 'llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll')
-rw-r--r-- | llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll | 94 |
1 files changed, 94 insertions, 0 deletions
diff --git a/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll b/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll index 4594102a468..7c511135ba1 100644 --- a/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll +++ b/llvm/test/Transforms/InstCombine/ARM/mve-v2i2v.ll @@ -1,6 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -instcombine -S -o - %s | FileCheck %s +target datalayout = "e-m:e-p:32:32-Fi8-i64:64-v128:64:128-a:0:32-n32-S64" + declare i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1>) declare i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1>) declare i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1>) @@ -234,3 +236,95 @@ entry: %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %wide2) ret <4 x i1> %vout } + +; If a predicate vector is round-tripped to an integer and back, and +; complemented while it's in integer form, we should collapse that to +; a complement of the vector itself. (Rationale: this is likely to +; allow it to be code-generated as MVE VPNOT.) + +define <4 x i1> @vpnot_4(<4 x i1> %vin) { +; CHECK-LABEL: @vpnot_4( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = xor <4 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true> +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %flipped = xor i32 %int, 65535 + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %flipped) + ret <4 x i1> %vout +} + +define <8 x i1> @vpnot_8(<8 x i1> %vin) { +; CHECK-LABEL: @vpnot_8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = xor <8 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true> +; CHECK-NEXT: ret <8 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin) + %flipped = xor i32 %int, 65535 + %vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %flipped) + ret <8 x i1> %vout +} + +define <16 x i1> @vpnot_16(<16 x i1> %vin) { +; CHECK-LABEL: @vpnot_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = xor <16 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true> +; CHECK-NEXT: ret <16 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin) + %flipped = xor i32 %int, 65535 + %vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %flipped) + ret <16 x i1> %vout +} + +; And this still works even if the i32 is narrowed to i16 and back on +; opposite sides of the xor. + +define <4 x i1> @vpnot_narrow_4(<4 x i1> %vin) { +; CHECK-LABEL: @vpnot_narrow_4( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = xor <4 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true> +; CHECK-NEXT: ret <4 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v4i1(<4 x i1> %vin) + %narrow = trunc i32 %int to i16 + %flipped_narrow = xor i16 %narrow, -1 + %flipped = zext i16 %flipped_narrow to i32 + %vout = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %flipped) + ret <4 x i1> %vout +} + +define <8 x i1> @vpnot_narrow_8(<8 x i1> %vin) { +; CHECK-LABEL: @vpnot_narrow_8( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = xor <8 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true> +; CHECK-NEXT: ret <8 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v8i1(<8 x i1> %vin) + %narrow = trunc i32 %int to i16 + %flipped_narrow = xor i16 %narrow, -1 + %flipped = zext i16 %flipped_narrow to i32 + %vout = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 %flipped) + ret <8 x i1> %vout +} + +define <16 x i1> @vpnot_narrow_16(<16 x i1> %vin) { +; CHECK-LABEL: @vpnot_narrow_16( +; CHECK-NEXT: entry: +; CHECK-NEXT: [[VOUT:%.*]] = xor <16 x i1> [[VIN:%.*]], <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true> +; CHECK-NEXT: ret <16 x i1> [[VOUT]] +; +entry: + %int = call i32 @llvm.arm.mve.pred.v2i.v16i1(<16 x i1> %vin) + %narrow = trunc i32 %int to i16 + %flipped_narrow = xor i16 %narrow, -1 + %flipped = zext i16 %flipped_narrow to i32 + %vout = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %flipped) + ret <16 x i1> %vout +} |