summaryrefslogtreecommitdiffstats
path: root/llvm/docs
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2019-02-25 21:05:09 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2019-02-25 21:05:09 +0000
commit7166ab47045e64bcf3178a7d2be47d77700f6564 (patch)
tree5d2b1886a050835cf09159bd48f4edadf73645d6 /llvm/docs
parent0397f495c0c46a7e4ebe304e6ebf14d85fb76dc7 (diff)
downloadbcm5719-llvm-7166ab47045e64bcf3178a7d2be47d77700f6564.tar.gz
bcm5719-llvm-7166ab47045e64bcf3178a7d2be47d77700f6564.zip
[LangRef] *.overflow intrinsics now support vectors
We have all the necessary legalization, expansion and unrolling support required for the *.overflow intrinsics with vector types, so update the docs to make that clear. Note: vectorization is not in place yet (the non-homogenous return types aren't well supported) so we still must explicitly use the vectors intrinsics and not reply on slp/loop. Differential Revision: https://reviews.llvm.org/D58618 llvm-svn: 354821
Diffstat (limited to 'llvm/docs')
-rw-r--r--llvm/docs/LangRef.rst18
1 files changed, 12 insertions, 6 deletions
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 8483d74dd4f..6af6ddfb3f2 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -12641,13 +12641,14 @@ Syntax:
"""""""
This is an overloaded intrinsic. You can use ``llvm.sadd.with.overflow``
-on any integer bit width.
+on any integer bit width or vectors of integers.
::
declare {i16, i1} @llvm.sadd.with.overflow.i16(i16 %a, i16 %b)
declare {i32, i1} @llvm.sadd.with.overflow.i32(i32 %a, i32 %b)
declare {i64, i1} @llvm.sadd.with.overflow.i64(i64 %a, i64 %b)
+ declare {<4 x i32>, <4 x i1>} @llvm.sadd.with.overflow.v4i32(<4 x i32> %a, <4 x i32> %b)
Overview:
"""""""""
@@ -12691,13 +12692,14 @@ Syntax:
"""""""
This is an overloaded intrinsic. You can use ``llvm.uadd.with.overflow``
-on any integer bit width.
+on any integer bit width or vectors of integers.
::
declare {i16, i1} @llvm.uadd.with.overflow.i16(i16 %a, i16 %b)
declare {i32, i1} @llvm.uadd.with.overflow.i32(i32 %a, i32 %b)
declare {i64, i1} @llvm.uadd.with.overflow.i64(i64 %a, i64 %b)
+ declare {<4 x i32>, <4 x i1>} @llvm.uadd.with.overflow.v4i32(<4 x i32> %a, <4 x i32> %b)
Overview:
"""""""""
@@ -12740,13 +12742,14 @@ Syntax:
"""""""
This is an overloaded intrinsic. You can use ``llvm.ssub.with.overflow``
-on any integer bit width.
+on any integer bit width or vectors of integers.
::
declare {i16, i1} @llvm.ssub.with.overflow.i16(i16 %a, i16 %b)
declare {i32, i1} @llvm.ssub.with.overflow.i32(i32 %a, i32 %b)
declare {i64, i1} @llvm.ssub.with.overflow.i64(i64 %a, i64 %b)
+ declare {<4 x i32>, <4 x i1>} @llvm.ssub.with.overflow.v4i32(<4 x i32> %a, <4 x i32> %b)
Overview:
"""""""""
@@ -12790,13 +12793,14 @@ Syntax:
"""""""
This is an overloaded intrinsic. You can use ``llvm.usub.with.overflow``
-on any integer bit width.
+on any integer bit width or vectors of integers.
::
declare {i16, i1} @llvm.usub.with.overflow.i16(i16 %a, i16 %b)
declare {i32, i1} @llvm.usub.with.overflow.i32(i32 %a, i32 %b)
declare {i64, i1} @llvm.usub.with.overflow.i64(i64 %a, i64 %b)
+ declare {<4 x i32>, <4 x i1>} @llvm.usub.with.overflow.v4i32(<4 x i32> %a, <4 x i32> %b)
Overview:
"""""""""
@@ -12840,13 +12844,14 @@ Syntax:
"""""""
This is an overloaded intrinsic. You can use ``llvm.smul.with.overflow``
-on any integer bit width.
+on any integer bit width or vectors of integers.
::
declare {i16, i1} @llvm.smul.with.overflow.i16(i16 %a, i16 %b)
declare {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b)
declare {i64, i1} @llvm.smul.with.overflow.i64(i64 %a, i64 %b)
+ declare {<4 x i32>, <4 x i1>} @llvm.smul.with.overflow.v4i32(<4 x i32> %a, <4 x i32> %b)
Overview:
"""""""""
@@ -12890,13 +12895,14 @@ Syntax:
"""""""
This is an overloaded intrinsic. You can use ``llvm.umul.with.overflow``
-on any integer bit width.
+on any integer bit width or vectors of integers.
::
declare {i16, i1} @llvm.umul.with.overflow.i16(i16 %a, i16 %b)
declare {i32, i1} @llvm.umul.with.overflow.i32(i32 %a, i32 %b)
declare {i64, i1} @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
+ declare {<4 x i32>, <4 x i1>} @llvm.umul.with.overflow.v4i32(<4 x i32> %a, <4 x i32> %b)
Overview:
"""""""""
OpenPOWER on IntegriCloud