diff options
author | Ulrich Weigand <ulrich.weigand@de.ibm.com> | 2018-04-30 17:54:28 +0000 |
---|---|---|
committer | Ulrich Weigand <ulrich.weigand@de.ibm.com> | 2018-04-30 17:54:28 +0000 |
commit | c3ec80fea186b6dd956001467a1076c6c2ee4fb6 (patch) | |
tree | 18dd0151abdf2a9dbbd50abfc01123edc3d00a69 /llvm/test/CodeGen/SystemZ/int-uadd-06.ll | |
parent | b32f3656d2bc376d223b5cf38c5067e3942d1471 (diff) | |
download | bcm5719-llvm-c3ec80fea186b6dd956001467a1076c6c2ee4fb6.tar.gz bcm5719-llvm-c3ec80fea186b6dd956001467a1076c6c2ee4fb6.zip |
[SystemZ] Handle SADDO et.al. and ADD/SUBCARRY
This provides an optimized implementation of SADDO/SSUBO/UADDO/USUBO
as well as ADDCARRY/SUBCARRY on top of the new CC implementation.
In particular, multi-word arithmetic now uses UADDO/ADDCARRY instead
of the old ADDC/ADDE logic, which means we no longer need to use
"glue" links for those instructions. This also allows making full
use of the memory-based instructions like ALSI, which couldn't be
recognized due to limitations in the DAG matcher previously.
Also, the llvm.sadd.with.overflow et.al. intrinsincs now expand to
directly using the ADD instructions and checking for a CC 3 result.
llvm-svn: 331203
Diffstat (limited to 'llvm/test/CodeGen/SystemZ/int-uadd-06.ll')
-rw-r--r-- | llvm/test/CodeGen/SystemZ/int-uadd-06.ll | 80 |
1 files changed, 80 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/SystemZ/int-uadd-06.ll b/llvm/test/CodeGen/SystemZ/int-uadd-06.ll new file mode 100644 index 00000000000..2c1864de3a5 --- /dev/null +++ b/llvm/test/CodeGen/SystemZ/int-uadd-06.ll @@ -0,0 +1,80 @@ +; Test the three-operand form of 32-bit addition. +; +; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | FileCheck %s + +declare i32 @foo(i32, i32, i32) + +; Check ALRK. +define i32 @f1(i32 %dummy, i32 %a, i32 %b, i32 *%flag) { +; CHECK-LABEL: f1: +; CHECK: alrk %r2, %r3, %r4 +; CHECK: ipm [[REG1:%r[0-5]]] +; CHECK: risblg [[REG2:%r[0-5]]], [[REG1]], 31, 159, 35 +; CHECK: st [[REG2]], 0(%r5) +; CHECK: br %r14 + %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue {i32, i1} %t, 0 + %obit = extractvalue {i32, i1} %t, 1 + %ext = zext i1 %obit to i32 + store i32 %ext, i32 *%flag + ret i32 %val +} + +; Check using the overflow result for a branch. +define i32 @f2(i32 %dummy, i32 %a, i32 %b) { +; CHECK-LABEL: f2: +; CHECK: alrk %r2, %r3, %r4 +; CHECK-NEXT: bler %r14 +; CHECK: lhi %r2, 0 +; CHECK: jg foo@PLT + %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue {i32, i1} %t, 0 + %obit = extractvalue {i32, i1} %t, 1 + br i1 %obit, label %call, label %exit + +call: + %res = tail call i32 @foo(i32 0, i32 %a, i32 %b) + ret i32 %res + +exit: + ret i32 %val +} + +; ... and the same with the inverted direction. +define i32 @f3(i32 %dummy, i32 %a, i32 %b) { +; CHECK-LABEL: f3: +; CHECK: alrk %r2, %r3, %r4 +; CHECK-NEXT: bnler %r14 +; CHECK: lhi %r2, 0 +; CHECK: jg foo@PLT + %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue {i32, i1} %t, 0 + %obit = extractvalue {i32, i1} %t, 1 + br i1 %obit, label %exit, label %call + +call: + %res = tail call i32 @foo(i32 0, i32 %a, i32 %b) + ret i32 %res + +exit: + ret i32 %val +} + +; Check that we can still use ALR in obvious cases. +define i32 @f4(i32 %a, i32 %b, i32 *%flag) { +; CHECK-LABEL: f4: +; CHECK: alr %r2, %r3 +; CHECK: ipm [[REG1:%r[0-5]]] +; CHECK: risblg [[REG2:%r[0-5]]], [[REG1]], 31, 159, 35 +; CHECK: st [[REG2]], 0(%r4) +; CHECK: br %r14 + %t = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b) + %val = extractvalue {i32, i1} %t, 0 + %obit = extractvalue {i32, i1} %t, 1 + %ext = zext i1 %obit to i32 + store i32 %ext, i32 *%flag + ret i32 %val +} + +declare {i32, i1} @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone + |