summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2018-11-12 20:32:59 +0000
committerSanjay Patel <spatel@rotateright.com>2018-11-12 20:32:59 +0000
commitb32d03dfed227ad073f7f8614472744ac38a6dca (patch)
tree29536c40a74acd595d9e23ae645aee37614673ec /llvm/test/Transforms
parent3409cd2f09c7cc68a7c5de61455ffb6bd34ea7d4 (diff)
downloadbcm5719-llvm-b32d03dfed227ad073f7f8614472744ac38a6dca.tar.gz
bcm5719-llvm-b32d03dfed227ad073f7f8614472744ac38a6dca.zip
[InstCombine] add more tests for rotate narrowing; NFC
llvm-svn: 346703
Diffstat (limited to 'llvm/test/Transforms')
-rw-r--r--llvm/test/Transforms/InstCombine/rotate.ll187
1 files changed, 144 insertions, 43 deletions
diff --git a/llvm/test/Transforms/InstCombine/rotate.ll b/llvm/test/Transforms/InstCombine/rotate.ll
index 53bc5d5a88e..6ee450b89d6 100644
--- a/llvm/test/Transforms/InstCombine/rotate.ll
+++ b/llvm/test/Transforms/InstCombine/rotate.ll
@@ -122,110 +122,211 @@ define i8 @rotate8_not_safe(i8 %v, i32 %shamt) {
ret i8 %ret
}
-; The next two tests mask sure we narrower (x << (x & 15)) | (x >> (-x & 15))
-; when types have been promoted.
-; FIXME: We should be able to narrow this.
+; FIXME:
+; We should narrow (v << (s & 15)) | (v >> (-s & 15))
+; when both v and s have been promoted.
-define i16 @rotate16_neg_mask(i16 %v, i16 %shamt) {
-; CHECK-LABEL: @rotate16_neg_mask(
-; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[V:%.*]] to i32
-; CHECK-NEXT: [[RSHAMT:%.*]] = and i16 [[SHAMT:%.*]], 15
-; CHECK-NEXT: [[RSHAMTCONV:%.*]] = zext i16 [[RSHAMT]] to i32
-; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMTCONV]]
-; CHECK-NEXT: [[NEG:%.*]] = sub i16 0, [[SHAMT]]
-; CHECK-NEXT: [[LSHAMT:%.*]] = and i16 [[NEG]], 15
+define i16 @rotateleft_16_neg_mask(i16 %v, i16 %shamt) {
+; CHECK-LABEL: @rotateleft_16_neg_mask(
+; CHECK-NEXT: [[NEG:%.*]] = sub i16 0, [[SHAMT:%.*]]
+; CHECK-NEXT: [[LSHAMT:%.*]] = and i16 [[SHAMT]], 15
; CHECK-NEXT: [[LSHAMTCONV:%.*]] = zext i16 [[LSHAMT]] to i32
+; CHECK-NEXT: [[RSHAMT:%.*]] = and i16 [[NEG]], 15
+; CHECK-NEXT: [[RSHAMTCONV:%.*]] = zext i16 [[RSHAMT]] to i32
+; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[V:%.*]] to i32
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[CONV]], [[LSHAMTCONV]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMTCONV]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
; CHECK-NEXT: [[RET:%.*]] = trunc i32 [[OR]] to i16
; CHECK-NEXT: ret i16 [[RET]]
;
- %conv = zext i16 %v to i32
- %rshamt = and i16 %shamt, 15
+ %neg = sub i16 0, %shamt
+ %lshamt = and i16 %shamt, 15
+ %lshamtconv = zext i16 %lshamt to i32
+ %rshamt = and i16 %neg, 15
%rshamtconv = zext i16 %rshamt to i32
+ %conv = zext i16 %v to i32
+ %shl = shl i32 %conv, %lshamtconv
%shr = lshr i32 %conv, %rshamtconv
+ %or = or i32 %shr, %shl
+ %ret = trunc i32 %or to i16
+ ret i16 %ret
+}
+
+define i16 @rotateleft_16_neg_mask_commute(i16 %v, i16 %shamt) {
+; CHECK-LABEL: @rotateleft_16_neg_mask_commute(
+; CHECK-NEXT: [[NEG:%.*]] = sub i16 0, [[SHAMT:%.*]]
+; CHECK-NEXT: [[LSHAMT:%.*]] = and i16 [[SHAMT]], 15
+; CHECK-NEXT: [[LSHAMTCONV:%.*]] = zext i16 [[LSHAMT]] to i32
+; CHECK-NEXT: [[RSHAMT:%.*]] = and i16 [[NEG]], 15
+; CHECK-NEXT: [[RSHAMTCONV:%.*]] = zext i16 [[RSHAMT]] to i32
+; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[V:%.*]] to i32
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[CONV]], [[LSHAMTCONV]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMTCONV]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT: [[RET:%.*]] = trunc i32 [[OR]] to i16
+; CHECK-NEXT: ret i16 [[RET]]
+;
%neg = sub i16 0, %shamt
- %lshamt = and i16 %neg, 15
+ %lshamt = and i16 %shamt, 15
%lshamtconv = zext i16 %lshamt to i32
+ %rshamt = and i16 %neg, 15
+ %rshamtconv = zext i16 %rshamt to i32
+ %conv = zext i16 %v to i32
%shl = shl i32 %conv, %lshamtconv
- %or = or i32 %shr, %shl
+ %shr = lshr i32 %conv, %rshamtconv
+ %or = or i32 %shl, %shr
%ret = trunc i32 %or to i16
ret i16 %ret
}
-define i8 @rotate8_neg_mask(i8 %v, i8 %shamt) {
-; CHECK-LABEL: @rotate8_neg_mask(
-; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[V:%.*]] to i32
-; CHECK-NEXT: [[RSHAMT:%.*]] = and i8 [[SHAMT:%.*]], 7
+define i8 @rotateright_8_neg_mask(i8 %v, i8 %shamt) {
+; CHECK-LABEL: @rotateright_8_neg_mask(
+; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[SHAMT:%.*]]
+; CHECK-NEXT: [[RSHAMT:%.*]] = and i8 [[SHAMT]], 7
; CHECK-NEXT: [[RSHAMTCONV:%.*]] = zext i8 [[RSHAMT]] to i32
-; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMTCONV]]
-; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[SHAMT]]
; CHECK-NEXT: [[LSHAMT:%.*]] = and i8 [[NEG]], 7
; CHECK-NEXT: [[LSHAMTCONV:%.*]] = zext i8 [[LSHAMT]] to i32
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[V:%.*]] to i32
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[CONV]], [[LSHAMTCONV]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMTCONV]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
; CHECK-NEXT: [[RET:%.*]] = trunc i32 [[OR]] to i8
; CHECK-NEXT: ret i8 [[RET]]
;
- %conv = zext i8 %v to i32
+ %neg = sub i8 0, %shamt
%rshamt = and i8 %shamt, 7
%rshamtconv = zext i8 %rshamt to i32
+ %lshamt = and i8 %neg, 7
+ %lshamtconv = zext i8 %lshamt to i32
+ %conv = zext i8 %v to i32
+ %shl = shl i32 %conv, %lshamtconv
%shr = lshr i32 %conv, %rshamtconv
+ %or = or i32 %shr, %shl
+ %ret = trunc i32 %or to i8
+ ret i8 %ret
+}
+
+define i8 @rotateright_8_neg_mask_commute(i8 %v, i8 %shamt) {
+; CHECK-LABEL: @rotateright_8_neg_mask_commute(
+; CHECK-NEXT: [[NEG:%.*]] = sub i8 0, [[SHAMT:%.*]]
+; CHECK-NEXT: [[RSHAMT:%.*]] = and i8 [[SHAMT]], 7
+; CHECK-NEXT: [[RSHAMTCONV:%.*]] = zext i8 [[RSHAMT]] to i32
+; CHECK-NEXT: [[LSHAMT:%.*]] = and i8 [[NEG]], 7
+; CHECK-NEXT: [[LSHAMTCONV:%.*]] = zext i8 [[LSHAMT]] to i32
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[V:%.*]] to i32
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[CONV]], [[LSHAMTCONV]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMTCONV]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT: [[RET:%.*]] = trunc i32 [[OR]] to i8
+; CHECK-NEXT: ret i8 [[RET]]
+;
%neg = sub i8 0, %shamt
+ %rshamt = and i8 %shamt, 7
+ %rshamtconv = zext i8 %rshamt to i32
%lshamt = and i8 %neg, 7
%lshamtconv = zext i8 %lshamt to i32
+ %conv = zext i8 %v to i32
%shl = shl i32 %conv, %lshamtconv
- %or = or i32 %shr, %shl
+ %shr = lshr i32 %conv, %rshamtconv
+ %or = or i32 %shl, %shr
%ret = trunc i32 %or to i8
ret i8 %ret
}
-; The next two types have a shift amount that is already i32 so we would still
-; need a truncate for it going into the rotate pattern.
-; FIXME: We can narrow this, but we would still need a trunc on the shift amt.
+; FIXME:
+; The shift amount may already be in the wide type,
+; so we need to truncate it going into the rotate pattern.
-define i16 @rotate16_neg_mask_wide_amount(i16 %v, i32 %shamt) {
-; CHECK-LABEL: @rotate16_neg_mask_wide_amount(
-; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[V:%.*]] to i32
-; CHECK-NEXT: [[RSHAMT:%.*]] = and i32 [[SHAMT:%.*]], 15
-; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMT]]
-; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[SHAMT]]
+define i16 @rotateright_16_neg_mask_wide_amount(i16 %v, i32 %shamt) {
+; CHECK-LABEL: @rotateright_16_neg_mask_wide_amount(
+; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[SHAMT:%.*]]
+; CHECK-NEXT: [[RSHAMT:%.*]] = and i32 [[SHAMT]], 15
; CHECK-NEXT: [[LSHAMT:%.*]] = and i32 [[NEG]], 15
+; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[V:%.*]] to i32
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[CONV]], [[LSHAMT]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMT]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
; CHECK-NEXT: [[RET:%.*]] = trunc i32 [[OR]] to i16
; CHECK-NEXT: ret i16 [[RET]]
;
- %conv = zext i16 %v to i32
+ %neg = sub i32 0, %shamt
%rshamt = and i32 %shamt, 15
+ %lshamt = and i32 %neg, 15
+ %conv = zext i16 %v to i32
+ %shl = shl i32 %conv, %lshamt
%shr = lshr i32 %conv, %rshamt
+ %or = or i32 %shr, %shl
+ %ret = trunc i32 %or to i16
+ ret i16 %ret
+}
+
+define i16 @rotateright_16_neg_mask_wide_amount_commute(i16 %v, i32 %shamt) {
+; CHECK-LABEL: @rotateright_16_neg_mask_wide_amount_commute(
+; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[SHAMT:%.*]]
+; CHECK-NEXT: [[RSHAMT:%.*]] = and i32 [[SHAMT]], 15
+; CHECK-NEXT: [[LSHAMT:%.*]] = and i32 [[NEG]], 15
+; CHECK-NEXT: [[CONV:%.*]] = zext i16 [[V:%.*]] to i32
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[CONV]], [[LSHAMT]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMT]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT: [[RET:%.*]] = trunc i32 [[OR]] to i16
+; CHECK-NEXT: ret i16 [[RET]]
+;
%neg = sub i32 0, %shamt
+ %rshamt = and i32 %shamt, 15
%lshamt = and i32 %neg, 15
+ %conv = zext i16 %v to i32
%shl = shl i32 %conv, %lshamt
- %or = or i32 %shr, %shl
+ %shr = lshr i32 %conv, %rshamt
+ %or = or i32 %shl, %shr
%ret = trunc i32 %or to i16
ret i16 %ret
}
-define i8 @rotate8_neg_mask_wide_amount(i8 %v, i32 %shamt) {
-; CHECK-LABEL: @rotate8_neg_mask_wide_amount(
+define i8 @rotateleft_8_neg_mask_wide_amount(i8 %v, i32 %shamt) {
+; CHECK-LABEL: @rotateleft_8_neg_mask_wide_amount(
+; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[SHAMT:%.*]]
+; CHECK-NEXT: [[LSHAMT:%.*]] = and i32 [[SHAMT]], 7
+; CHECK-NEXT: [[RSHAMT:%.*]] = and i32 [[NEG]], 7
; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[V:%.*]] to i32
-; CHECK-NEXT: [[RSHAMT:%.*]] = and i32 [[SHAMT:%.*]], 7
-; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMT]]
-; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[SHAMT]]
-; CHECK-NEXT: [[LSHAMT:%.*]] = and i32 [[NEG]], 7
; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[CONV]], [[LSHAMT]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMT]]
; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]]
; CHECK-NEXT: [[RET:%.*]] = trunc i32 [[OR]] to i8
; CHECK-NEXT: ret i8 [[RET]]
;
+ %neg = sub i32 0, %shamt
+ %lshamt = and i32 %shamt, 7
+ %rshamt = and i32 %neg, 7
%conv = zext i8 %v to i32
- %rshamt = and i32 %shamt, 7
+ %shl = shl i32 %conv, %lshamt
%shr = lshr i32 %conv, %rshamt
+ %or = or i32 %shr, %shl
+ %ret = trunc i32 %or to i8
+ ret i8 %ret
+}
+
+define i8 @rotateleft_8_neg_mask_wide_amount_commute(i8 %v, i32 %shamt) {
+; CHECK-LABEL: @rotateleft_8_neg_mask_wide_amount_commute(
+; CHECK-NEXT: [[NEG:%.*]] = sub i32 0, [[SHAMT:%.*]]
+; CHECK-NEXT: [[LSHAMT:%.*]] = and i32 [[SHAMT]], 7
+; CHECK-NEXT: [[RSHAMT:%.*]] = and i32 [[NEG]], 7
+; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[V:%.*]] to i32
+; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[CONV]], [[LSHAMT]]
+; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[CONV]], [[RSHAMT]]
+; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]]
+; CHECK-NEXT: [[RET:%.*]] = trunc i32 [[OR]] to i8
+; CHECK-NEXT: ret i8 [[RET]]
+;
%neg = sub i32 0, %shamt
- %lshamt = and i32 %neg, 7
+ %lshamt = and i32 %shamt, 7
+ %rshamt = and i32 %neg, 7
+ %conv = zext i8 %v to i32
%shl = shl i32 %conv, %lshamt
- %or = or i32 %shr, %shl
+ %shr = lshr i32 %conv, %rshamt
+ %or = or i32 %shl, %shr
%ret = trunc i32 %or to i8
ret i8 %ret
}
+
OpenPOWER on IntegriCloud