# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py # RUN: llc -march=amdgcn -mcpu=fiji -run-pass=instruction-select -verify-machineinstrs -o - %s | FileCheck %s -check-prefixes=GCN --- name: fmul_f32 legalized: true regBankSelected: true body: | bb.0: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4 ; GCN-LABEL: name: fmul_f32 ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[COPY2:%[0-9]+]]:vgpr_32 = COPY $vgpr1 ; GCN: [[COPY3:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4 ; GCN: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec ; GCN: [[V_MUL_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN: FLAT_STORE_DWORD [[COPY3]], [[V_MUL_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr %0:sgpr(s32) = COPY $sgpr0 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s32) = COPY $vgpr1 %3:vgpr(p1) = COPY $vgpr3_vgpr4 ; fmul vs %4:vgpr(s32) = G_FMUL %1, %0 ; fmul sv %5:vgpr(s32) = G_FMUL %0, %1 ; fmul vv %6:vgpr(s32) = G_FMUL %1, %2 G_STORE %4, %3 :: (store 4, addrspace 1) G_STORE %5, %3 :: (store 4, addrspace 1) G_STORE %6, %3 :: (store 4, addrspace 1) ... --- name: fmul_f64 legalized: true regBankSelected: true body: | bb.0: liveins: $sgpr0_sgpr1, $vgpr0_vgpr1, $vgpr2_vgpr3, $vgpr4_vgpr5 ; GCN-LABEL: name: fmul_f64 ; GCN: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1 ; GCN: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1 ; GCN: [[COPY2:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 ; GCN: [[V_MUL_F64_:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F64_1:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec ; GCN: [[V_MUL_F64_2:%[0-9]+]]:vreg_64 = V_MUL_F64 0, [[COPY1]], 0, [[COPY2]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MUL_F64_]], implicit [[V_MUL_F64_1]], implicit [[V_MUL_F64_2]] %0:sgpr(s64) = COPY $sgpr0_sgpr1 %1:vgpr(s64) = COPY $vgpr0_vgpr1 %2:vgpr(s64) = COPY $vgpr2_vgpr3 %3:vgpr(p1) = COPY $vgpr4_vgpr5 ; fmul vs %4:vgpr(s64) = G_FMUL %1, %0 ; fmul sv %5:vgpr(s64) = G_FMUL %0, %1 ; fmul vv %6:vgpr(s64) = G_FMUL %1, %2 S_ENDPGM 0, implicit %4, implicit %5, implicit %6 ... --- name: fmul_f16 legalized: true regBankSelected: true body: | bb.0: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4 ; GCN-LABEL: name: fmul_f16 ; GCN: [[COPY:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 ; GCN: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[V_MUL_F16_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F16_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F16_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F16_e64 0, [[COPY]], 0, [[COPY1]], 0, 0, implicit $exec ; GCN: S_ENDPGM 0, implicit [[V_MUL_F16_e64_]], implicit [[V_MUL_F16_e64_1]], implicit [[V_MUL_F16_e64_2]] %0:sgpr(s32) = COPY $sgpr0 %1:vgpr(s32) = COPY $vgpr0 %2:vgpr(s32) = COPY $vgpr1 %3:vgpr(p1) = COPY $vgpr3_vgpr4 %4:sgpr(s16) = G_TRUNC %0 %5:vgpr(s16) = G_TRUNC %1 %6:vgpr(s16) = G_TRUNC %2 ; fmul vs %8:vgpr(s16) = G_FMUL %4, %4 ; fmul sv %9:vgpr(s16) = G_FMUL %4, %4 ; fmul vv %10:vgpr(s16) = G_FMUL %4, %5 S_ENDPGM 0, implicit %8, implicit %9, implicit %10 ... --- name: fmul_modifiers_f32 legalized: true regBankSelected: true body: | bb.0: liveins: $vgpr0, $vgpr1, $vgpr2_vgpr3 ; GCN-LABEL: name: fmul_modifiers_f32 ; GCN: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0 ; GCN: [[COPY1:%[0-9]+]]:vreg_64 = COPY $vgpr2_vgpr3 ; GCN: [[V_MUL_F32_e64_:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 2, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F32_e64_1:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 2, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F32_e64_2:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 2, [[COPY]], 2, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F32_e64_3:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 1, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F32_e64_4:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 1, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F32_e64_5:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 1, [[COPY]], 1, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F32_e64_6:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 0, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F32_e64_7:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 0, [[COPY]], 3, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F32_e64_8:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 3, [[COPY]], 0, 0, implicit $exec ; GCN: [[V_MUL_F32_e64_9:%[0-9]+]]:vgpr_32 = V_MUL_F32_e64 3, [[COPY]], 1, [[COPY]], 0, 0, implicit $exec ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_1]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_2]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_3]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_4]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_5]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_6]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_7]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_8]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr ; GCN: FLAT_STORE_DWORD [[COPY1]], [[V_MUL_F32_e64_9]], 0, 0, 0, 0, implicit $exec, implicit $flat_scr %0:vgpr(s32) = COPY $vgpr0 %1:vgpr(s32) = COPY $vgpr1 %2:vgpr(p1) = COPY $vgpr2_vgpr3 %3:vgpr(s32) = G_FABS %0 %4:vgpr(s32) = G_FNEG %0 %5:vgpr(s32) = G_FNEG %3 ; fabs lhs %6:vgpr(s32) = G_FMUL %3, %0 ; fabs rhs %7:vgpr(s32) = G_FMUL %0, %3 ; fabs lhs, rhs %8:vgpr(s32) = G_FMUL %3, %3 ; fneg lhs %9:vgpr(s32) = G_FMUL %4, %0 ; fneg rhs %10:vgpr(s32) = G_FMUL %0, %4 ; fneg lhs, rhs %11:vgpr(s32) = G_FMUL %4, %4 ; fneg fabs lhs %12:vgpr(s32) = G_FMUL %5, %0 ; fneg fabs rhs %13:vgpr(s32) = G_FMUL %0, %5 ; fneg fabs lhs, rhs %14:vgpr(s32) = G_FMUL %5, %5 ; fneg fabs lhs, fneg rhs %15:vgpr(s32) = G_FMUL %5, %4 G_STORE %6, %2 :: (store 4, addrspace 1) G_STORE %7, %2 :: (store 4, addrspace 1) G_STORE %8, %2 :: (store 4, addrspace 1) G_STORE %9, %2 :: (store 4, addrspace 1) G_STORE %10, %2 :: (store 4, addrspace 1) G_STORE %11, %2 :: (store 4, addrspace 1) G_STORE %12, %2 :: (store 4, addrspace 1) G_STORE %13, %2 :: (store 4, addrspace 1) G_STORE %14, %2 :: (store 4, addrspace 1) G_STORE %15, %2 :: (store 4, addrspace 1) ...