summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-arith-shifted-reg.mir395
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/select-logical-shifted-reg.mir75
-rw-r--r--llvm/test/CodeGen/AArch64/addsub-shifted.ll41
-rw-r--r--llvm/test/CodeGen/AArch64/eon.ll3
4 files changed, 513 insertions, 1 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-arith-shifted-reg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-arith-shifted-reg.mir
new file mode 100644
index 00000000000..24d2b396bb8
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-arith-shifted-reg.mir
@@ -0,0 +1,395 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: add_shl_s64_rhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ ; CHECK-LABEL: name: add_shl_s64_rhs
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY]], [[COPY]], 8
+ ; CHECK: $x0 = COPY [[ADDXrs]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_SHL %0, %1:gpr(s64)
+ %3:gpr(s64) = G_ADD %0, %2:gpr(s64)
+ $x0 = COPY %3:gpr(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: add_shl_s64_lhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: add_shl_s64_lhs
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+ ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY1]], [[COPY]], 8
+ ; CHECK: $x0 = COPY [[ADDXrs]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %4:gpr(s64) = COPY $x1
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_SHL %0, %1:gpr(s64)
+ %3:gpr(s64) = G_ADD %2, %4:gpr(s64)
+ $x0 = COPY %3:gpr(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: sub_shl_s64_rhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ ; CHECK-LABEL: name: sub_shl_s64_rhs
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs [[COPY]], [[COPY]], 8, implicit-def $nzcv
+ ; CHECK: $x0 = COPY [[SUBSXrs]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_SHL %0, %1:gpr(s64)
+ %3:gpr(s64) = G_SUB %0, %2:gpr(s64)
+ $x0 = COPY %3:gpr(s64)
+ RET_ReallyLR implicit $x0
+
+---
+name: add_lshr_s64_rhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_LSHR %0, %1:gpr(s64)
+ %3:gpr(s64) = G_ADD %0, %2:gpr(s64)
+ $x0 = COPY %3:gpr(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: add_lshr_s64_lhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: add_lshr_s64_lhs
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: %param2:gpr64 = COPY $x1
+ ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs %param2, [[COPY]], 72
+ ; CHECK: $x0 = COPY [[ADDXrs]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %param2:gpr(s64) = COPY $x1
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_LSHR %0, %1:gpr(s64)
+ %3:gpr(s64) = G_ADD %2, %param2:gpr(s64)
+ $x0 = COPY %3:gpr(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: sub_lshr_s64_rhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ ; CHECK-LABEL: name: sub_lshr_s64_rhs
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs [[COPY]], [[COPY]], 72, implicit-def $nzcv
+ ; CHECK: $x0 = COPY [[SUBSXrs]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_LSHR %0, %1:gpr(s64)
+ %3:gpr(s64) = G_SUB %0, %2:gpr(s64)
+ $x0 = COPY %3:gpr(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: add_ashr_s64_rhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ ; CHECK-LABEL: name: add_ashr_s64_rhs
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY]], [[COPY]], 136
+ ; CHECK: $x0 = COPY [[ADDXrs]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_ASHR %0, %1:gpr(s64)
+ %3:gpr(s64) = G_ADD %0, %2:gpr(s64)
+ $x0 = COPY %3:gpr(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: add_ashr_s64_lhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0, $x1
+ ; CHECK-LABEL: name: add_ashr_s64_lhs
+ ; CHECK: liveins: $x0, $x1
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: %param2:gpr64 = COPY $x1
+ ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs %param2, [[COPY]], 136
+ ; CHECK: $x0 = COPY [[ADDXrs]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %param2:gpr(s64) = COPY $x1
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_ASHR %0, %1:gpr(s64)
+ %3:gpr(s64) = G_ADD %2, %param2:gpr(s64)
+ $x0 = COPY %3:gpr(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: sub_ashr_s64_rhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ ; CHECK-LABEL: name: sub_ashr_s64_rhs
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs [[COPY]], [[COPY]], 136, implicit-def $nzcv
+ ; CHECK: $x0 = COPY [[SUBSXrs]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_ASHR %0, %1:gpr(s64)
+ %3:gpr(s64) = G_SUB %0, %2:gpr(s64)
+ $x0 = COPY %3:gpr(s64)
+ RET_ReallyLR implicit $x0
+
+---
+name: add_shl_s32_rhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0
+ %0:gpr(s32) = COPY $w0
+ %1:gpr(s32) = G_CONSTANT i32 8
+ %2:gpr(s32) = G_SHL %0, %1:gpr(s32)
+ %3:gpr(s32) = G_ADD %0, %2:gpr(s32)
+ $w0 = COPY %3:gpr(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: add_shl_s32_lhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_shl_s32_lhs
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: %param2:gpr32 = COPY $w1
+ ; CHECK: [[ADDWrs:%[0-9]+]]:gpr32 = ADDWrs %param2, [[COPY]], 8
+ ; CHECK: $w0 = COPY [[ADDWrs]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %param2:gpr(s32) = COPY $w1
+ %1:gpr(s32) = G_CONSTANT i32 8
+ %2:gpr(s32) = G_SHL %0, %1:gpr(s32)
+ %3:gpr(s32) = G_ADD %2, %param2:gpr(s32)
+ $w0 = COPY %3:gpr(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: sub_shl_s32_rhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0
+ ; CHECK-LABEL: name: sub_shl_s32_rhs
+ ; CHECK: liveins: $w0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs [[COPY]], [[COPY]], 8, implicit-def $nzcv
+ ; CHECK: $w0 = COPY [[SUBSWrs]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %1:gpr(s32) = G_CONSTANT i32 8
+ %2:gpr(s32) = G_SHL %0, %1:gpr(s32)
+ %3:gpr(s32) = G_SUB %0, %2:gpr(s32)
+ $w0 = COPY %3:gpr(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: add_lshr_s32_rhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0
+ ; CHECK-LABEL: name: add_lshr_s32_rhs
+ ; CHECK: liveins: $w0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: [[ADDWrs:%[0-9]+]]:gpr32 = ADDWrs [[COPY]], [[COPY]], 72
+ ; CHECK: $w0 = COPY [[ADDWrs]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %1:gpr(s32) = G_CONSTANT i32 8
+ %2:gpr(s32) = G_LSHR %0, %1:gpr(s32)
+ %3:gpr(s32) = G_ADD %0, %2:gpr(s32)
+ $w0 = COPY %3:gpr(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: add_lshr_s32_lhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_lshr_s32_lhs
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: %param2:gpr32 = COPY $w1
+ ; CHECK: [[ADDWrs:%[0-9]+]]:gpr32 = ADDWrs %param2, [[COPY]], 72
+ ; CHECK: $w0 = COPY [[ADDWrs]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %param2:gpr(s32) = COPY $w1
+ %1:gpr(s32) = G_CONSTANT i32 8
+ %2:gpr(s32) = G_LSHR %0, %1:gpr(s32)
+ %3:gpr(s32) = G_ADD %2, %param2:gpr(s32)
+ $w0 = COPY %3:gpr(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: sub_lshr_s32_rhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0
+ ; CHECK-LABEL: name: sub_lshr_s32_rhs
+ ; CHECK: liveins: $w0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs [[COPY]], [[COPY]], 72, implicit-def $nzcv
+ ; CHECK: $w0 = COPY [[SUBSWrs]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %1:gpr(s32) = G_CONSTANT i32 8
+ %2:gpr(s32) = G_LSHR %0, %1:gpr(s32)
+ %3:gpr(s32) = G_SUB %0, %2:gpr(s32)
+ $w0 = COPY %3:gpr(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: add_ashr_s32_rhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0
+ ; CHECK-LABEL: name: add_ashr_s32_rhs
+ ; CHECK: liveins: $w0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: [[ADDWrs:%[0-9]+]]:gpr32 = ADDWrs [[COPY]], [[COPY]], 136
+ ; CHECK: $w0 = COPY [[ADDWrs]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %1:gpr(s32) = G_CONSTANT i32 8
+ %2:gpr(s32) = G_ASHR %0, %1:gpr(s32)
+ %3:gpr(s32) = G_ADD %0, %2:gpr(s32)
+ $w0 = COPY %3:gpr(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: add_ashr_s32_lhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; CHECK-LABEL: name: add_ashr_s32_lhs
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: %param2:gpr32 = COPY $w1
+ ; CHECK: [[ADDWrs:%[0-9]+]]:gpr32 = ADDWrs %param2, [[COPY]], 136
+ ; CHECK: $w0 = COPY [[ADDWrs]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %param2:gpr(s32) = COPY $w1
+ %1:gpr(s32) = G_CONSTANT i32 8
+ %2:gpr(s32) = G_ASHR %0, %1:gpr(s32)
+ %3:gpr(s32) = G_ADD %2, %param2:gpr(s32)
+ $w0 = COPY %3:gpr(s32)
+ RET_ReallyLR implicit $w0
+
+...
+---
+name: sub_ashr_s32_rhs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0
+ ; CHECK-LABEL: name: sub_ashr_s32_rhs
+ ; CHECK: liveins: $w0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0
+ ; CHECK: [[SUBSWrs:%[0-9]+]]:gpr32 = SUBSWrs [[COPY]], [[COPY]], 136, implicit-def $nzcv
+ ; CHECK: $w0 = COPY [[SUBSWrs]]
+ ; CHECK: RET_ReallyLR implicit $w0
+ %0:gpr(s32) = COPY $w0
+ %1:gpr(s32) = G_CONSTANT i32 8
+ %2:gpr(s32) = G_ASHR %0, %1:gpr(s32)
+ %3:gpr(s32) = G_SUB %0, %2:gpr(s32)
+ $w0 = COPY %3:gpr(s32)
+ RET_ReallyLR implicit $w0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-logical-shifted-reg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-logical-shifted-reg.mir
new file mode 100644
index 00000000000..96a35714d16
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-logical-shifted-reg.mir
@@ -0,0 +1,75 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-unknown-unknown -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
+#
+# Test patterns related to logical_shifted_reg32/logical_shifted_reg64 which
+# are not shared with arith_shifted_reg32/arith_shifted_reg64.
+
+---
+name: and_xor_bicxrs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ ; CHECK-LABEL: name: and_xor_bicxrs
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[BICXrs:%[0-9]+]]:gpr64 = BICXrs [[COPY]], [[COPY]], 8
+ ; CHECK: $x0 = COPY [[BICXrs]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_CONSTANT i64 -1
+ %3:gpr(s64) = G_SHL %0, %1:gpr(s64)
+ %4:gpr(s64) = G_XOR %3, %2:gpr(s64)
+ %5:gpr(s64) = G_AND %0, %4:gpr(s64)
+ $x0 = COPY %5:gpr(s64)
+ RET_ReallyLR implicit $x0
+...
+---
+name: or_xor_ornxrs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ ; CHECK-LABEL: name: or_xor_ornxrs
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[ORNXrs:%[0-9]+]]:gpr64 = ORNXrs [[COPY]], [[COPY]], 8
+ ; CHECK: $x0 = COPY [[ORNXrs]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_CONSTANT i64 -1
+ %3:gpr(s64) = G_SHL %0, %1:gpr(s64)
+ %4:gpr(s64) = G_XOR %3, %2:gpr(s64)
+ %5:gpr(s64) = G_OR %0, %4:gpr(s64)
+ $x0 = COPY %5:gpr(s64)
+ RET_ReallyLR implicit $x0
+...
+---
+name: xor_xor_eonxrs
+legalized: true
+regBankSelected: true
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $x0
+ ; CHECK-LABEL: name: xor_xor_eonxrs
+ ; CHECK: liveins: $x0
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+ ; CHECK: [[EONXrs:%[0-9]+]]:gpr64 = EONXrs [[COPY]], [[COPY]], 8
+ ; CHECK: $x0 = COPY [[EONXrs]]
+ ; CHECK: RET_ReallyLR implicit $x0
+ %0:gpr(s64) = COPY $x0
+ %1:gpr(s64) = G_CONSTANT i64 8
+ %2:gpr(s64) = G_CONSTANT i64 -1
+ %3:gpr(s64) = G_SHL %0, %1:gpr(s64)
+ %4:gpr(s64) = G_XOR %3, %2:gpr(s64)
+ %5:gpr(s64) = G_XOR %0, %4:gpr(s64)
+ $x0 = COPY %5:gpr(s64)
+ RET_ReallyLR implicit $x0
+...
diff --git a/llvm/test/CodeGen/AArch64/addsub-shifted.ll b/llvm/test/CodeGen/AArch64/addsub-shifted.ll
index 7c7d6545993..c0034024914 100644
--- a/llvm/test/CodeGen/AArch64/addsub-shifted.ll
+++ b/llvm/test/CodeGen/AArch64/addsub-shifted.ll
@@ -1,28 +1,35 @@
; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 | FileCheck %s
+; RUN: llc -verify-machineinstrs %s -o - -mtriple=arm64-apple-ios7.0 -global-isel -pass-remarks-missed=gisel* 2>&1 | FileCheck %s --check-prefixes=GISEL,FALLBACK
+
+; FALLBACK-NOT: remark
@var32 = global i32 0
@var64 = global i64 0
define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
; CHECK-LABEL: test_lsl_arith:
+; GISEL-LABEL: test_lsl_arith:
%rhs1 = load volatile i32, i32* @var32
%shift1 = shl i32 %rhs1, 18
%val1 = add i32 %lhs32, %shift1
store volatile i32 %val1, i32* @var32
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
+; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #18
%rhs2 = load volatile i32, i32* @var32
%shift2 = shl i32 %rhs2, 31
%val2 = add i32 %shift2, %lhs32
store volatile i32 %val2, i32* @var32
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
+; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #31
%rhs3 = load volatile i32, i32* @var32
%shift3 = shl i32 %rhs3, 5
%val3 = sub i32 %lhs32, %shift3
store volatile i32 %val3, i32* @var32
; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
+; GISEL: subs {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #5
; Subtraction is not commutative!
%rhs4 = load volatile i32, i32* @var32
@@ -30,30 +37,35 @@ define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%val4 = sub i32 %shift4, %lhs32
store volatile i32 %val4, i32* @var32
; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
+; GISEL-NOT: sub{{[s]?}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsl #19
%lhs4a = load volatile i32, i32* @var32
%shift4a = shl i32 %lhs4a, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, i32* @var32
; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
+; GISEL: negs {{w[0-9]+}}, {{w[0-9]+}}, lsl #15
%rhs5 = load volatile i64, i64* @var64
%shift5 = shl i64 %rhs5, 18
%val5 = add i64 %lhs64, %shift5
store volatile i64 %val5, i64* @var64
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
+; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #18
%rhs6 = load volatile i64, i64* @var64
%shift6 = shl i64 %rhs6, 31
%val6 = add i64 %shift6, %lhs64
store volatile i64 %val6, i64* @var64
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
+; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #31
%rhs7 = load volatile i64, i64* @var64
%shift7 = shl i64 %rhs7, 5
%val7 = sub i64 %lhs64, %shift7
store volatile i64 %val7, i64* @var64
; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
+; GISEL: subs {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #5
; Subtraction is not commutative!
%rhs8 = load volatile i64, i64* @var64
@@ -61,12 +73,14 @@ define void @test_lsl_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%val8 = sub i64 %shift8, %lhs64
store volatile i64 %val8, i64* @var64
; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
+; GISEL-NOT: sub{{[s]?}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsl #19
%lhs8a = load volatile i64, i64* @var64
%shift8a = shl i64 %lhs8a, 60
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, i64* @var64
; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
+; GISEL: negs {{x[0-9]+}}, {{x[0-9]+}}, lsl #60
ret void
; CHECK: ret
@@ -79,56 +93,67 @@ define void @test_lsr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%val1 = add i32 %lhs32, %shift1
store volatile i32 %val1, i32* @var32
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
+; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #18
%shift2 = lshr i32 %rhs32, 31
%val2 = add i32 %shift2, %lhs32
store volatile i32 %val2, i32* @var32
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
+; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #31
%shift3 = lshr i32 %rhs32, 5
%val3 = sub i32 %lhs32, %shift3
store volatile i32 %val3, i32* @var32
; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
+; GISEL: subs {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #5
; Subtraction is not commutative!
%shift4 = lshr i32 %rhs32, 19
%val4 = sub i32 %shift4, %lhs32
store volatile i32 %val4, i32* @var32
; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
+; GISEL-NOT: sub{{[s]?}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, lsr #19
%shift4a = lshr i32 %lhs32, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, i32* @var32
; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
+; GISEL: negs {{w[0-9]+}}, {{w[0-9]+}}, lsr #15
%shift5 = lshr i64 %rhs64, 18
%val5 = add i64 %lhs64, %shift5
store volatile i64 %val5, i64* @var64
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
+; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #18
%shift6 = lshr i64 %rhs64, 31
%val6 = add i64 %shift6, %lhs64
store volatile i64 %val6, i64* @var64
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
+; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #31
%shift7 = lshr i64 %rhs64, 5
%val7 = sub i64 %lhs64, %shift7
store volatile i64 %val7, i64* @var64
; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
+; GISEL: subs {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #5
; Subtraction is not commutative!
%shift8 = lshr i64 %rhs64, 19
%val8 = sub i64 %shift8, %lhs64
store volatile i64 %val8, i64* @var64
; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
+; GISEL-NOT: sub{{[s]?}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, lsr #19
%shift8a = lshr i64 %lhs64, 45
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, i64* @var64
; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
+; GISEL: negs {{x[0-9]+}}, {{x[0-9]+}}, lsr #45
ret void
; CHECK: ret
+; GISEL: ret
}
define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
@@ -138,53 +163,63 @@ define void @test_asr_arith(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
%val1 = add i32 %lhs32, %shift1
store volatile i32 %val1, i32* @var32
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
+; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #18
%shift2 = ashr i32 %rhs32, 31
%val2 = add i32 %shift2, %lhs32
store volatile i32 %val2, i32* @var32
; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
+; GISEL: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #31
%shift3 = ashr i32 %rhs32, 5
%val3 = sub i32 %lhs32, %shift3
store volatile i32 %val3, i32* @var32
; CHECK: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
+; GISEL: subs {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #5
; Subtraction is not commutative!
%shift4 = ashr i32 %rhs32, 19
%val4 = sub i32 %shift4, %lhs32
store volatile i32 %val4, i32* @var32
; CHECK-NOT: sub {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
+; GISEL-NOT: sub{{[s]?}} {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, asr #19
%shift4a = ashr i32 %lhs32, 15
%val4a = sub i32 0, %shift4a
store volatile i32 %val4a, i32* @var32
; CHECK: neg {{w[0-9]+}}, {{w[0-9]+}}, asr #15
+; GISEL: negs {{w[0-9]+}}, {{w[0-9]+}}, asr #15
%shift5 = ashr i64 %rhs64, 18
%val5 = add i64 %lhs64, %shift5
store volatile i64 %val5, i64* @var64
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
+; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #18
%shift6 = ashr i64 %rhs64, 31
%val6 = add i64 %shift6, %lhs64
store volatile i64 %val6, i64* @var64
; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
+; GISEL: add {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #31
%shift7 = ashr i64 %rhs64, 5
%val7 = sub i64 %lhs64, %shift7
store volatile i64 %val7, i64* @var64
; CHECK: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
+; GISEL: subs {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #5
; Subtraction is not commutative!
%shift8 = ashr i64 %rhs64, 19
%val8 = sub i64 %shift8, %lhs64
store volatile i64 %val8, i64* @var64
; CHECK-NOT: sub {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
+; GISEL-NOT: sub{{[s]?}} {{x[0-9]+}}, {{x[0-9]+}}, {{x[0-9]+}}, asr #19
%shift8a = ashr i64 %lhs64, 45
%val8a = sub i64 0, %shift8a
store volatile i64 %val8a, i64* @var64
; CHECK: neg {{x[0-9]+}}, {{x[0-9]+}}, asr #45
+; GISEL: negs {{x[0-9]+}}, {{x[0-9]+}}, asr #45
ret void
; CHECK: ret
@@ -253,6 +288,8 @@ define i32 @test_cmn(i32 %lhs32, i32 %rhs32, i64 %lhs64, i64 %rhs64) {
; 0 then the results will differ.
; CHECK: neg [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
; CHECK: cmp {{w[0-9]+}}, [[RHS]]
+; GISEL: negs [[RHS:w[0-9]+]], {{w[0-9]+}}, lsl #13
+; GISEL: cmp {{w[0-9]+}}, [[RHS]]
t2:
%shift2 = lshr i32 %rhs32, 20
@@ -276,6 +313,8 @@ t4:
; Again, it's important that cmn isn't used here in case %rhs64 == 0.
; CHECK: neg [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
; CHECK: cmp {{x[0-9]+}}, [[RHS]]
+; GISEL: negs [[RHS:x[0-9]+]], {{x[0-9]+}}, lsl #43
+; GISEL: cmp {{x[0-9]+}}, [[RHS]]
t5:
%shift5 = lshr i64 %rhs64, 20
@@ -297,5 +336,5 @@ end:
ret i32 0
; CHECK: ret
+; GISEL: ret
}
-
diff --git a/llvm/test/CodeGen/AArch64/eon.ll b/llvm/test/CodeGen/AArch64/eon.ll
index ea61ce34c05..29c4c8ffd20 100644
--- a/llvm/test/CodeGen/AArch64/eon.ll
+++ b/llvm/test/CodeGen/AArch64/eon.ll
@@ -1,4 +1,7 @@
; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s
+; RUN: llc %s -pass-remarks-missed=gisel* -mtriple=aarch64-none-linux-gnu -global-isel -o - 2>&1 | FileCheck %s
+
+; CHECK-NOT: remark
; Check that the eon instruction is generated instead of eor,movn
define i64 @test1(i64 %a, i64 %b, i64 %c) {
OpenPOWER on IntegriCloud