summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms
diff options
context:
space:
mode:
authorSanjoy Das <sanjoy@playingwithpointers.com>2017-09-26 21:54:27 +0000
committerSanjoy Das <sanjoy@playingwithpointers.com>2017-09-26 21:54:27 +0000
commiteda7a86d42ff6f62b93fd1c4d963385d436dfc08 (patch)
tree62119b2758c1694e3d7ba655f27933f5473d9e96 /llvm/test/Transforms
parentbbfa246ad3c7c7bc7cf3184e64fbcc4fb483fe54 (diff)
downloadbcm5719-llvm-eda7a86d42ff6f62b93fd1c4d963385d436dfc08.tar.gz
bcm5719-llvm-eda7a86d42ff6f62b93fd1c4d963385d436dfc08.zip
[BypassSlowDivision] Improve our handling of divisions by constants
Summary: Don't bail out on constant divisors for divisions that can be narrowed without introducing control flow . This gives us a 32 bit multiply instead of an emulated 64 bit multiply in the generated PTX assembly. Reviewers: jlebar Subscribers: jholewinski, mcrosier, llvm-commits Differential Revision: https://reviews.llvm.org/D38265 llvm-svn: 314253
Diffstat (limited to 'llvm/test/Transforms')
-rw-r--r--llvm/test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll77
1 files changed, 77 insertions, 0 deletions
diff --git a/llvm/test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll b/llvm/test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll
index 4846d52f4d2..4d824e450ff 100644
--- a/llvm/test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/NVPTX/bypass-slow-div.ll
@@ -27,3 +27,80 @@ define void @rem_only(i64 %a, i64 %b, i64* %retptr) {
store i64 %d, i64* %retptr
ret void
}
+
+; CHECK-LABEL: @udiv_by_constant(
+define i64 @udiv_by_constant(i32 %a) {
+; CHECK-NEXT: [[A_ZEXT:%.*]] = zext i32 [[A:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A_ZEXT]] to i32
+; CHECK-NEXT: [[TMP2:%.*]] = udiv i32 [[TMP1]], 50
+; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT: ret i64 [[TMP3]]
+
+ %a.zext = zext i32 %a to i64
+ %wide.div = udiv i64 %a.zext, 50
+ ret i64 %wide.div
+}
+
+; CHECK-LABEL: @urem_by_constant(
+define i64 @urem_by_constant(i32 %a) {
+; CHECK-NEXT: [[A_ZEXT:%.*]] = zext i32 [[A:%.*]] to i64
+; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[A_ZEXT]] to i32
+; CHECK-NEXT: [[TMP2:%.*]] = urem i32 [[TMP1]], 50
+; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[TMP2]] to i64
+; CHECK-NEXT: ret i64 [[TMP3]]
+
+ %a.zext = zext i32 %a to i64
+ %wide.div = urem i64 %a.zext, 50
+ ret i64 %wide.div
+}
+
+; Negative test: instead of emitting a runtime check on %a, we prefer to let the
+; DAGCombiner transform this division by constant into a multiplication (with a
+; "magic constant").
+;
+; CHECK-LABEL: @udiv_by_constant_negative_0(
+define i64 @udiv_by_constant_negative_0(i64 %a) {
+; CHECK-NEXT: [[WIDE_DIV:%.*]] = udiv i64 [[A:%.*]], 50
+; CHECK-NEXT: ret i64 [[WIDE_DIV]]
+
+ %wide.div = udiv i64 %a, 50
+ ret i64 %wide.div
+}
+
+; Negative test: while we know the dividend is short, the divisor isn't. This
+; test is here for completeness, but instcombine will optimize this to return 0.
+;
+; CHECK-LABEL: @udiv_by_constant_negative_1(
+define i64 @udiv_by_constant_negative_1(i32 %a) {
+; CHECK-NEXT: [[A_ZEXT:%.*]] = zext i32 [[A:%.*]] to i64
+; CHECK-NEXT: [[WIDE_DIV:%.*]] = udiv i64 [[A_ZEXT]], 8589934592
+; CHECK-NEXT: ret i64 [[WIDE_DIV]]
+
+ %a.zext = zext i32 %a to i64
+ %wide.div = udiv i64 %a.zext, 8589934592 ;; == 1 << 33
+ ret i64 %wide.div
+}
+
+; URem version of udiv_by_constant_negative_0
+;
+; CHECK-LABEL: @urem_by_constant_negative_0(
+define i64 @urem_by_constant_negative_0(i64 %a) {
+; CHECK-NEXT: [[WIDE_DIV:%.*]] = urem i64 [[A:%.*]], 50
+; CHECK-NEXT: ret i64 [[WIDE_DIV]]
+
+ %wide.div = urem i64 %a, 50
+ ret i64 %wide.div
+}
+
+; URem version of udiv_by_constant_negative_1
+;
+; CHECK-LABEL: @urem_by_constant_negative_1(
+define i64 @urem_by_constant_negative_1(i32 %a) {
+; CHECK-NEXT: [[A_ZEXT:%.*]] = zext i32 [[A:%.*]] to i64
+; CHECK-NEXT: [[WIDE_DIV:%.*]] = urem i64 [[A_ZEXT]], 8589934592
+; CHECK-NEXT: ret i64 [[WIDE_DIV]]
+
+ %a.zext = zext i32 %a to i64
+ %wide.div = urem i64 %a.zext, 8589934592 ;; == 1 << 33
+ ret i64 %wide.div
+}
OpenPOWER on IntegriCloud