summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/Thumb2
diff options
context:
space:
mode:
authorSimon Tatham <simon.tatham@arm.com>2019-06-25 16:49:32 +0000
committerSimon Tatham <simon.tatham@arm.com>2019-06-25 16:49:32 +0000
commite8de8ba6a637c60d53db2433c0962c3fdbbdbf5c (patch)
treeb9aa1a7a9ea0881dee88867e12d7271baa5f7f19 /llvm/test/CodeGen/Thumb2
parent88139c143c5f419aa0ed6ba8bb8e70263ffb37cb (diff)
downloadbcm5719-llvm-e8de8ba6a637c60d53db2433c0962c3fdbbdbf5c.tar.gz
bcm5719-llvm-e8de8ba6a637c60d53db2433c0962c3fdbbdbf5c.zip
[ARM] Support inline assembler constraints for MVE.
"To" selects an odd-numbered GPR, and "Te" an even one. There are some 8.1-M instructions that have one too few bits in their register fields and require registers of particular parity, without necessarily using a consecutive even/odd pair. Also, the constraint letter "t" should select an MVE q-register, when MVE is present. This didn't need any source changes, but some extra tests have been added. Reviewers: dmgreen, samparker, SjoerdMeijer Subscribers: javed.absar, eraman, kristof.beyls, hiraditya, cfe-commits, llvm-commits Tags: #clang, #llvm Differential Revision: https://reviews.llvm.org/D60709 llvm-svn: 364331
Diffstat (limited to 'llvm/test/CodeGen/Thumb2')
-rw-r--r--llvm/test/CodeGen/Thumb2/inlineasm-error-t-toofewregs-mve.ll14
-rw-r--r--llvm/test/CodeGen/Thumb2/inlineasm-mve.ll48
2 files changed, 62 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/Thumb2/inlineasm-error-t-toofewregs-mve.ll b/llvm/test/CodeGen/Thumb2/inlineasm-error-t-toofewregs-mve.ll
new file mode 100644
index 00000000000..419ff71c07d
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/inlineasm-error-t-toofewregs-mve.ll
@@ -0,0 +1,14 @@
+; RUN: not llc -mtriple=armv8.1-m-eabi -mattr=+mve %s -o /dev/null 2>&1 | FileCheck %s
+
+; CHECK: inline assembly requires more registers than available
+define arm_aapcs_vfpcc <4 x i32> @t-constraint-i32-vectors-too-few-regs(<4 x i32> %a, <4 x i32> %b) {
+entry:
+ %0 = tail call { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>,
+ <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32> }
+ asm "",
+ "=t,=t,=t,=t,=t,=t,=t,=t,=t,=t,t,t"(<4 x i32> %a, <4 x i32> %b)
+ %asmresult = extractvalue { <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>,
+ <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>,
+ <4 x i32>, <4 x i32> } %0, 0
+ ret <4 x i32> %asmresult
+}
diff --git a/llvm/test/CodeGen/Thumb2/inlineasm-mve.ll b/llvm/test/CodeGen/Thumb2/inlineasm-mve.ll
new file mode 100644
index 00000000000..a8c0622b316
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/inlineasm-mve.ll
@@ -0,0 +1,48 @@
+; RUN: llc -mtriple=armv8.1-m-eabi -mattr=+mve %s -o - | FileCheck %s
+
+define i32 @test1(i32 %tmp54) {
+ %tmp56 = tail call i32 asm "uxtb16 $0,$1", "=r,r"( i32 %tmp54 )
+ ret i32 %tmp56
+}
+
+define void @test2() {
+ tail call void asm sideeffect "/* number: ${0:c} */", "i"( i32 1 )
+ ret void
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mve-t-constraint-128bit(<4 x i32>, <4 x i32>) {
+; CHECK-LABEL: mve-t-constraint-128bit
+; CHECK: vadd.i32 q{{[0-7]}}, q{{[0-7]}}, q{{[0-7]}}
+ %ret = tail call <4 x i32>
+ asm "vadd.i32 $0, $1, $2", "=t,t,t"
+ (<4 x i32> %0, <4 x i32> %1)
+ ret <4 x i32> %ret
+}
+
+define i32 @even-GPR-constraint() {
+entry:
+ ; CHECK-LABEL: even-GPR-constraint
+ ; CHECK: add [[REG:r1*[0, 2, 4, 6, 8]]], [[REG]], #1
+ ; CHECK: add [[REG:r1*[0, 2, 4, 6, 8]]], [[REG]], #2
+ ; CHECK: add [[REG:r1*[0, 2, 4, 6, 8]]], [[REG]], #3
+ ; CHECK: add [[REG:r1*[0, 2, 4, 6, 8]]], [[REG]], #4
+ %0 = tail call { i32, i32, i32, i32 }
+ asm "add $0, #1\0Aadd $1, #2\0Aadd $2, #3\0Aadd $3, #4\0A", "=^Te,=^Te,=^Te,=^Te,0,1,2,3"
+ (i32 0, i32 0, i32 0, i32 0)
+ %asmresult = extractvalue { i32, i32, i32, i32 } %0, 0
+ ret i32 %asmresult
+}
+
+define i32 @odd-GPR-constraint() {
+entry:
+ ; CHECK-LABEL: odd-GPR-constraint
+ ; CHECK: add [[REG:r1*[1, 3, 5, 7, 9]]], [[REG]], #1
+ ; CHECK: add [[REG:r1*[1, 3, 5, 7, 9]]], [[REG]], #2
+ ; CHECK: add [[REG:r1*[1, 3, 5, 7, 9]]], [[REG]], #3
+ ; CHECK: add [[REG:r1*[1, 3, 5, 7, 9]]], [[REG]], #4
+ %0 = tail call { i32, i32, i32, i32 }
+ asm "add $0, #1\0Aadd $1, #2\0Aadd $2, #3\0Aadd $3, #4\0A", "=^To,=^To,=^To,=^To,0,1,2,3"
+ (i32 0, i32 0, i32 0, i32 0)
+ %asmresult = extractvalue { i32, i32, i32, i32 } %0, 0
+ ret i32 %asmresult
+}
OpenPOWER on IntegriCloud