summaryrefslogtreecommitdiffstats
path: root/clang/test
diff options
context:
space:
mode:
authorVedant Kumar <vsk@apple.com>2017-12-16 01:28:25 +0000
committerVedant Kumar <vsk@apple.com>2017-12-16 01:28:25 +0000
commitfa5a0e59f0a54d6e9aec3a8333c796b743ca4d01 (patch)
treeb7905bec51743c8c43f3dbd31b6e3d548b8b2c79 /clang/test
parent2ff24731bbf905faab6580de7c3e68092ca1ae17 (diff)
downloadbcm5719-llvm-fa5a0e59f0a54d6e9aec3a8333c796b743ca4d01.tar.gz
bcm5719-llvm-fa5a0e59f0a54d6e9aec3a8333c796b743ca4d01.zip
[CodeGen] Specialize mixed-sign mul-with-overflow (fix PR34920)
This patch introduces a specialized way to lower overflow-checked multiplications with mixed-sign operands. This fixes link failures and ICEs on code like this: void mul(int64_t a, uint64_t b) { int64_t res; __builtin_mul_overflow(a, b, &res); } The generic checked-binop irgen would use a 65-bit multiplication intrinsic here, which requires runtime support for _muloti4 (128-bit multiplication), and therefore fails to link on i386. To get an ICE on x86_64, change the example to use __int128_t / __uint128_t. Adding runtime and backend support for 65-bit or 129-bit checked multiplication on all of our supported targets is infeasible. This patch solves the problem by using simpler, specialized irgen for the mixed-sign case. llvm.org/PR34920, rdar://34963321 Testing: Apart from check-clang, I compared the output from this fairly comprehensive test driver using unpatched & patched clangs: https://gist.github.com/vedantk/3eb9c88f82e5c32f2e590555b4af5081 Differential Revision: https://reviews.llvm.org/D41149 llvm-svn: 320902
Diffstat (limited to 'clang/test')
-rw-r--r--clang/test/CodeGen/builtins-overflow.c119
1 files changed, 119 insertions, 0 deletions
diff --git a/clang/test/CodeGen/builtins-overflow.c b/clang/test/CodeGen/builtins-overflow.c
index c8d828dd33e..7a30cfbd46e 100644
--- a/clang/test/CodeGen/builtins-overflow.c
+++ b/clang/test/CodeGen/builtins-overflow.c
@@ -338,3 +338,122 @@ long long test_smulll_overflow(long long x, long long y) {
return LongLongErrorCode;
return result;
}
+
+int test_mixed_sign_mull_overflow(int x, unsigned y) {
+// CHECK: @test_mixed_sign_mull_overflow
+// CHECK: [[IsNeg:%.*]] = icmp slt i32 [[Op1:%.*]], 0
+// CHECK-NEXT: [[Signed:%.*]] = sub i32 0, [[Op1]]
+// CHECK-NEXT: [[AbsSigned:%.*]] = select i1 [[IsNeg]], i32 [[Signed]], i32 [[Op1]]
+// CHECK-NEXT: call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[AbsSigned]], i32 %{{.*}})
+// CHECK-NEXT: [[UnsignedOFlow:%.*]] = extractvalue { i32, i1 } %{{.*}}, 1
+// CHECK-NEXT: [[UnsignedResult:%.*]] = extractvalue { i32, i1 } %{{.*}}, 0
+// CHECK-NEXT: [[IsNegZext:%.*]] = zext i1 [[IsNeg]] to i32
+// CHECK-NEXT: [[MaxResult:%.*]] = add i32 2147483647, [[IsNegZext]]
+// CHECK-NEXT: [[SignedOFlow:%.*]] = icmp ugt i32 [[UnsignedResult]], [[MaxResult]]
+// CHECK-NEXT: [[OFlow:%.*]] = or i1 [[UnsignedOFlow]], [[SignedOFlow]]
+// CHECK-NEXT: [[NegativeResult:%.*]] = sub i32 0, [[UnsignedResult]]
+// CHECK-NEXT: [[Result:%.*]] = select i1 [[IsNeg]], i32 [[NegativeResult]], i32 [[UnsignedResult]]
+// CHECK-NEXT: store i32 [[Result]], i32* %{{.*}}, align 4
+// CHECK: br i1 [[OFlow]]
+
+ int result;
+ if (__builtin_mul_overflow(x, y, &result))
+ return LongErrorCode;
+ return result;
+}
+
+int test_mixed_sign_mull_overflow_unsigned(int x, unsigned y) {
+// CHECK: @test_mixed_sign_mull_overflow_unsigned
+// CHECK: [[IsNeg:%.*]] = icmp slt i32 [[Op1:%.*]], 0
+// CHECK-NEXT: [[Signed:%.*]] = sub i32 0, [[Op1]]
+// CHECK-NEXT: [[AbsSigned:%.*]] = select i1 [[IsNeg]], i32 [[Signed]], i32 [[Op1]]
+// CHECK-NEXT: call { i32, i1 } @llvm.umul.with.overflow.i32(i32 [[AbsSigned]], i32 %{{.*}})
+// CHECK-NEXT: [[UnsignedOFlow:%.*]] = extractvalue { i32, i1 } %{{.*}}, 1
+// CHECK-NEXT: [[UnsignedResult:%.*]] = extractvalue { i32, i1 } %{{.*}}, 0
+// CHECK-NEXT: [[NotNull:%.*]] = icmp ne i32 [[UnsignedResult]], 0
+// CHECK-NEXT: [[Underflow:%.*]] = and i1 [[IsNeg]], [[NotNull]]
+// CHECK-NEXT: [[OFlow:%.*]] = or i1 [[UnsignedOFlow]], [[Underflow]]
+// CHECK-NEXT: store i32 [[UnsignedResult]], i32* %{{.*}}, align 4
+// CHECK: br i1 [[OFlow]]
+
+ unsigned result;
+ if (__builtin_mul_overflow(x, y, &result))
+ return LongErrorCode;
+ return result;
+}
+
+int test_mixed_sign_mull_overflow_swapped(int x, unsigned y) {
+// CHECK: @test_mixed_sign_mull_overflow_swapped
+// CHECK: call { i32, i1 } @llvm.umul.with.overflow.i32
+// CHECK: add i32 2147483647
+ int result;
+ if (__builtin_mul_overflow(y, x, &result))
+ return LongErrorCode;
+ return result;
+}
+
+long long test_mixed_sign_mulll_overflow(long long x, unsigned long long y) {
+// CHECK: @test_mixed_sign_mulll_overflow
+// CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
+// CHECK: add i64 92233720368547
+ long long result;
+ if (__builtin_mul_overflow(x, y, &result))
+ return LongLongErrorCode;
+ return result;
+}
+
+long long test_mixed_sign_mulll_overflow_swapped(long long x, unsigned long long y) {
+// CHECK: @test_mixed_sign_mulll_overflow_swapped
+// CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
+// CHECK: add i64 92233720368547
+ long long result;
+ if (__builtin_mul_overflow(y, x, &result))
+ return LongLongErrorCode;
+ return result;
+}
+
+long long test_mixed_sign_mulll_overflow_trunc_signed(long long x, unsigned long long y) {
+// CHECK: @test_mixed_sign_mulll_overflow_trunc_signed
+// CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
+// CHECK: add i64 2147483647
+// CHECK: trunc
+// CHECK: store
+ int result;
+ if (__builtin_mul_overflow(y, x, &result))
+ return LongLongErrorCode;
+ return result;
+}
+
+long long test_mixed_sign_mulll_overflow_trunc_unsigned(long long x, unsigned long long y) {
+// CHECK: @test_mixed_sign_mulll_overflow_trunc_unsigned
+// CHECK: call { i64, i1 } @llvm.umul.with.overflow.i64
+// CHECK: [[NON_ZERO:%.*]] = icmp ne i64 [[UNSIGNED_RESULT:%.*]], 0
+// CHECK-NEXT: [[UNDERFLOW:%.*]] = and i1 {{.*}}, [[NON_ZERO]]
+// CHECK-NEXT: [[OVERFLOW_PRE_TRUNC:%.*]] = or i1 {{.*}}, [[UNDERFLOW]]
+// CHECK-NEXT: [[TRUNC_OVERFLOW:%.*]] = icmp ugt i64 [[UNSIGNED_RESULT]], 4294967295
+// CHECK-NEXT: [[OVERFLOW:%.*]] = or i1 [[OVERFLOW_PRE_TRUNC]], [[TRUNC_OVERFLOW]]
+// CHECK-NEXT: trunc i64 [[UNSIGNED_RESULT]] to i32
+// CHECK-NEXT: store
+ unsigned result;
+ if (__builtin_mul_overflow(y, x, &result))
+ return LongLongErrorCode;
+ return result;
+}
+
+long long test_mixed_sign_mul_overflow_extend_signed(int x, unsigned y) {
+// CHECK: @test_mixed_sign_mul_overflow_extend_signed
+// CHECK: call { i64, i1 } @llvm.smul.with.overflow.i64
+ long long result;
+ if (__builtin_mul_overflow(y, x, &result))
+ return LongLongErrorCode;
+ return result;
+}
+
+long long test_mixed_sign_mul_overflow_extend_unsigned(int x, unsigned y) {
+// CHECK: @test_mixed_sign_mul_overflow_extend_unsigned
+// CHECK: call { i65, i1 } @llvm.smul.with.overflow.i65
+ unsigned long long result;
+ if (__builtin_mul_overflow(y, x, &result))
+ return LongLongErrorCode;
+ return result;
+}
OpenPOWER on IntegriCloud