diff options
Diffstat (limited to 'llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir')
-rw-r--r-- | llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir | 176 |
1 files changed, 176 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir index 3a071a3f806..af830b721d3 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir +++ b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir @@ -7,6 +7,12 @@ define void @more_than_one_use(i64* %addr) { ret void } define void @ldrxrox_shl(i64* %addr) { ret void } define void @ldrdrox_shl(i64* %addr) { ret void } + define void @ldrxrox_mul_rhs(i64* %addr) { ret void } + define void @ldrdrox_mul_rhs(i64* %addr) { ret void } + define void @ldrxrox_mul_lhs(i64* %addr) { ret void } + define void @ldrdrox_mul_lhs(i64* %addr) { ret void } + define void @mul_not_pow_2(i64* %addr) { ret void } + define void @mul_wrong_pow_2(i64* %addr) { ret void } define void @more_than_one_use_shl_1(i64* %addr) { ret void } define void @more_than_one_use_shl_2(i64* %addr) { ret void } define void @more_than_one_use_shl_lsl_fast(i64* %addr) #1 { ret void } @@ -154,6 +160,176 @@ body: | ... --- +name: ldrxrox_mul_rhs +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $x0, $x1, $x2 + ; CHECK-LABEL: name: ldrxrox_mul_rhs + ; CHECK: liveins: $x0, $x1, $x2 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1 + ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr) + ; CHECK: $x2 = COPY [[LDRXroX]] + ; CHECK: RET_ReallyLR implicit $x2 + %0:gpr(s64) = COPY $x0 + %1:gpr(s64) = G_CONSTANT i64 8 + %2:gpr(s64) = G_MUL %0, %1(s64) + %3:gpr(p0) = COPY $x1 + %4:gpr(p0) = G_GEP %3, %2 + %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr) + $x2 = COPY %5(s64) + RET_ReallyLR implicit $x2 + +... +--- +name: ldrdrox_mul_rhs +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $x0, $x1, $d2 + ; CHECK-LABEL: name: ldrdrox_mul_rhs + ; CHECK: liveins: $x0, $x1, $d2 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1 + ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr) + ; CHECK: $d2 = COPY [[LDRDroX]] + ; CHECK: RET_ReallyLR implicit $d2 + %0:gpr(s64) = COPY $x0 + %1:gpr(s64) = G_CONSTANT i64 8 + %2:gpr(s64) = G_MUL %0, %1(s64) + %3:gpr(p0) = COPY $x1 + %4:gpr(p0) = G_GEP %3, %2 + %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr) + $d2 = COPY %5(s64) + RET_ReallyLR implicit $d2 + +... +--- +name: ldrxrox_mul_lhs +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $x0, $x1, $x2 + ; CHECK-LABEL: name: ldrxrox_mul_lhs + ; CHECK: liveins: $x0, $x1, $x2 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1 + ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr) + ; CHECK: $x2 = COPY [[LDRXroX]] + ; CHECK: RET_ReallyLR implicit $x2 + %0:gpr(s64) = COPY $x0 + %1:gpr(s64) = G_CONSTANT i64 8 + %2:gpr(s64) = G_MUL %1, %0(s64) + %3:gpr(p0) = COPY $x1 + %4:gpr(p0) = G_GEP %3, %2 + %5:gpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr) + $x2 = COPY %5(s64) + RET_ReallyLR implicit $x2 + +... +--- +name: ldrdrox_mul_lhs +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + liveins: $x0, $x1, $d2 + ; CHECK-LABEL: name: ldrdrox_mul_lhs + ; CHECK: liveins: $x0, $x1, $d2 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1 + ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load 8 from %ir.addr) + ; CHECK: $d2 = COPY [[LDRDroX]] + ; CHECK: RET_ReallyLR implicit $d2 + %0:gpr(s64) = COPY $x0 + %1:gpr(s64) = G_CONSTANT i64 8 + %2:gpr(s64) = G_MUL %1, %0(s64) + %3:gpr(p0) = COPY $x1 + %4:gpr(p0) = G_GEP %3, %2 + %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr) + $d2 = COPY %5(s64) + RET_ReallyLR implicit $d2 + +... +--- +name: mul_not_pow_2 +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + ; Show that we don't get a shifted load from a mul when we don't have a + ; power of 2. (The bit isn't set on the load.) + liveins: $x0, $x1, $d2 + ; CHECK-LABEL: name: mul_not_pow_2 + ; CHECK: liveins: $x0, $x1, $d2 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 7 + ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[MOVi64imm]], [[COPY]], $xzr + ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1 + ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load 8 from %ir.addr) + ; CHECK: $d2 = COPY [[LDRDroX]] + ; CHECK: RET_ReallyLR implicit $d2 + %0:gpr(s64) = COPY $x0 + %1:gpr(s64) = G_CONSTANT i64 7 + %2:gpr(s64) = G_MUL %1, %0(s64) + %3:gpr(p0) = COPY $x1 + %4:gpr(p0) = G_GEP %3, %2 + %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr) + $d2 = COPY %5(s64) + RET_ReallyLR implicit $d2 + +... +--- +name: mul_wrong_pow_2 +alignment: 2 +legalized: true +regBankSelected: true +tracksRegLiveness: true +machineFunctionInfo: {} +body: | + bb.0: + ; Show that we don't get a shifted load from a mul when we don't have + ; the right power of 2. (The bit isn't set on the load.) + liveins: $x0, $x1, $d2 + ; CHECK-LABEL: name: mul_wrong_pow_2 + ; CHECK: liveins: $x0, $x1, $d2 + ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 + ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 16 + ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[MOVi64imm]], [[COPY]], $xzr + ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1 + ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load 8 from %ir.addr) + ; CHECK: $d2 = COPY [[LDRDroX]] + ; CHECK: RET_ReallyLR implicit $d2 + %0:gpr(s64) = COPY $x0 + %1:gpr(s64) = G_CONSTANT i64 16 + %2:gpr(s64) = G_MUL %1, %0(s64) + %3:gpr(p0) = COPY $x1 + %4:gpr(p0) = G_GEP %3, %2 + %5:fpr(s64) = G_LOAD %4(p0) :: (load 8 from %ir.addr) + $d2 = COPY %5(s64) + RET_ReallyLR implicit $d2 + +... +--- name: more_than_one_use_shl_1 alignment: 2 legalized: true |