summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AMDGPU/fadd.f16.ll150
-rw-r--r--llvm/test/CodeGen/AMDGPU/fcmp.f16.ll744
-rw-r--r--llvm/test/CodeGen/AMDGPU/fdiv.f16.ll31
-rw-r--r--llvm/test/CodeGen/AMDGPU/fmul.f16.ll150
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp_to_sint.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/fp_to_uint.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/fpext.f16.ll70
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptosi.f16.ll112
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptoui.f16.ll113
-rw-r--r--llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll72
-rw-r--r--llvm/test/CodeGen/AMDGPU/fsub.f16.ll150
-rw-r--r--llvm/test/CodeGen/AMDGPU/half.ll30
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll155
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cos.f16.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.f16.ll129
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fract.f16.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.f16.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.f16.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll45
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.f16.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.f16.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sin.f16.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.ceil.f16.ll49
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.cos.f16.ll55
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.exp2.f16.ll49
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.floor.f16.ll49
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.fma.f16.ll235
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll116
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.log2.f16.ll49
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll153
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll153
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.rint.f16.ll49
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.sin.f16.ll55
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.sqrt.f16.ll49
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.trunc.f16.ll49
-rw-r--r--llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll62
-rw-r--r--llvm/test/CodeGen/AMDGPU/sitofp.f16.ll87
-rw-r--r--llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll57
-rw-r--r--llvm/test/CodeGen/AMDGPU/uitofp.f16.ll87
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_mac_f16.ll608
-rw-r--r--llvm/test/CodeGen/AMDGPU/v_madak_f16.ll50
-rw-r--r--llvm/test/MC/Disassembler/AMDGPU/sdwa_vi.txt4
42 files changed, 4124 insertions, 38 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/fadd.f16.ll b/llvm/test/CodeGen/AMDGPU/fadd.f16.ll
new file mode 100644
index 00000000000..b2afc054ce1
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fadd.f16.ll
@@ -0,0 +1,150 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}fadd_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_add_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fadd_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fadd half %a.val, %b.val
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fadd_f16_imm_a
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x3c00{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_add_f16_e32 v[[R_F16:[0-9]+]], 0x3c00, v[[B_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fadd_f16_imm_a(
+ half addrspace(1)* %r,
+ half addrspace(1)* %b) {
+entry:
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fadd half 1.0, %b.val
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fadd_f16_imm_b
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4000{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_add_f16_e32 v[[R_F16:[0-9]+]], 0x4000, v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fadd_f16_imm_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = fadd half %a.val, 2.0
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fadd_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_add_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_add_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI: v_add_f16_e32 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fadd_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fadd <2 x half> %a.val, %b.val
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fadd_v2f16_imm_a
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], 0x3c00{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], 0x4000{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_add_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_add_f16_e32 v[[R_F16_0:[0-9]+]], 0x3c00, v[[B_V2_F16]]
+; VI: v_add_f16_e32 v[[R_F16_1:[0-9]+]], 0x4000, v[[B_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fadd_v2f16_imm_a(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fadd <2 x half> <half 1.0, half 2.0>, %b.val
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fadd_v2f16_imm_b
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], 0x4000{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], 0x3c00{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_add_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_add_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_add_f16_e32 v[[R_F16_0:[0-9]+]], 0x4000, v[[A_V2_F16]]
+; VI: v_add_f16_e32 v[[R_F16_1:[0-9]+]], 0x3c00, v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fadd_v2f16_imm_b(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = fadd <2 x half> %a.val, <half 2.0, half 1.0>
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fcmp.f16.ll b/llvm/test/CodeGen/AMDGPU/fcmp.f16.ll
new file mode 100644
index 00000000000..8c7277acbde
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fcmp.f16.ll
@@ -0,0 +1,744 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}fcmp_f16_lt
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_lt_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_lt_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_lt(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp olt half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_eq
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_eq_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_eq_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_eq(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp oeq half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_le
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_le_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_le_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_le(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp ole half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_gt
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_gt_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_gt_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_gt(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp ogt half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_lg
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_lg_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_lg_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_lg(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp one half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_ge
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_ge_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_ge_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_ge(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp oge half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_o
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_o_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_o_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_o(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp ord half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_u
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_u_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_u_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_u(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp uno half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_nge
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_nge_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_nge_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_nge(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp ult half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_nlg
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_nlg_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_nlg_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_nlg(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp ueq half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_ngt
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_ngt_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_ngt_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_ngt(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp ule half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_nle
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_nle_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_nle_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_nle(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp ugt half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_neq
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_neq_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_neq_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_neq(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp une half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_f16_nlt
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cmp_nlt_f32_e32 vcc, v[[A_F32]], v[[B_F32]]
+; VI: v_cmp_nlt_f16_e32 vcc, v[[A_F16]], v[[B_F16]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fcmp_f16_nlt(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fcmp uge half %a.val, %b.val
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_lt
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_lt_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_lt_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_lt_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_lt_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_lt(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp olt <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_eq
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_eq_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_eq_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_eq_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_eq_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_eq(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp oeq <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_le
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_le_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_le_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_le_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_le_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_le(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp ole <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_gt
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_gt_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_gt_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_gt_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_gt_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_gt(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp ogt <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_lg
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_lg_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_lg_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_lg_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_lg_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_lg(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp one <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_ge
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_ge_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_ge_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_ge_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_ge_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_ge(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp oge <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_o
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_o_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_o_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_o_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_o_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_o(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp ord <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_u
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_u_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_u_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_u_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_u_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_u(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp uno <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_nge
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_nge_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_nge_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_nge_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_nge_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_nge(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp ult <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_nlg
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_nlg_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_nlg_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_nlg_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_nlg_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_nlg(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp ueq <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_ngt
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_ngt_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_ngt_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_ngt_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_ngt_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_ngt(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp ule <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_nle
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_nle_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_nle_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_nle_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_nle_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_nle(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp ugt <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_neq
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_neq_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_neq_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_neq_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_neq_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_neq(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp une <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fcmp_v2f16_nlt
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cmp_nlt_f32_e32 vcc, v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cmp_nlt_f32_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F32_1]], v[[B_F32_1]]
+; VI: v_cmp_nlt_f16_e32 vcc, v[[A_V2_F16]], v[[B_V2_F16]]
+; VI: v_cmp_nlt_f16_e64 s[{{[0-9]+}}:{{[0-9]+}}], v[[A_F16_1]], v[[B_F16_1]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_0:[0-9]+]]
+; GCN: v_cndmask_b32_e64 v[[R_I32_1:[0-9]+]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_I32_0]]:[[R_I32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fcmp_v2f16_nlt(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fcmp uge <2 x half> %a.val, %b.val
+ %r.val.sext = sext <2 x i1> %r.val to <2 x i32>
+ store <2 x i32> %r.val.sext, <2 x i32> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
new file mode 100644
index 00000000000..bad04326193
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fdiv.f16.ll
@@ -0,0 +1,31 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; Make sure fdiv is promoted to f32.
+
+; GCN-LABEL: {{^}}fdiv_f16
+; GCN: v_cvt_f32_f16
+; GCN: v_cvt_f32_f16
+; GCN: v_div_scale_f32
+; GCN-DAG: v_div_scale_f32
+; GCN-DAG: v_rcp_f32
+; GCN: v_fma_f32
+; GCN: v_fma_f32
+; GCN: v_mul_f32
+; GCN: v_fma_f32
+; GCN: v_fma_f32
+; GCN: v_fma_f32
+; GCN: v_div_fmas_f32
+; GCN: v_div_fixup_f32
+; GCN: v_cvt_f16_f32
+define void @fdiv_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fdiv half %a.val, %b.val
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fmul.f16.ll b/llvm/test/CodeGen/AMDGPU/fmul.f16.ll
new file mode 100644
index 00000000000..da0e01d6a7f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fmul.f16.ll
@@ -0,0 +1,150 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}fmul_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_mul_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_mul_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fmul_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fmul half %a.val, %b.val
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fmul_f16_imm_a
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_mul_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_mul_f16_e32 v[[R_F16:[0-9]+]], 0x4200, v[[B_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fmul_f16_imm_a(
+ half addrspace(1)* %r,
+ half addrspace(1)* %b) {
+entry:
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fmul half 3.0, %b.val
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fmul_f16_imm_b
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4400{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_mul_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_mul_f16_e32 v[[R_F16:[0-9]+]], 0x4400, v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fmul_f16_imm_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = fmul half %a.val, 4.0
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fmul_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_mul_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_mul_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_mul_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI: v_mul_f16_e32 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fmul_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fmul <2 x half> %a.val, %b.val
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fmul_v2f16_imm_a
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], 0x4400{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_mul_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_mul_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_mul_f16_e32 v[[R_F16_0:[0-9]+]], 0x4200, v[[B_V2_F16]]
+; VI: v_mul_f16_e32 v[[R_F16_1:[0-9]+]], 0x4400, v[[B_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fmul_v2f16_imm_a(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fmul <2 x half> <half 3.0, half 4.0>, %b.val
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fmul_v2f16_imm_b
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], 0x4400{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_mul_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_mul_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_mul_f16_e32 v[[R_F16_0:[0-9]+]], 0x4400, v[[A_V2_F16]]
+; VI: v_mul_f16_e32 v[[R_F16_1:[0-9]+]], 0x4200, v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fmul_v2f16_imm_b(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = fmul <2 x half> %a.val, <half 4.0, half 3.0>
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll b/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
index 2a804baca3d..a273f349784 100644
--- a/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp_to_sint.ll
@@ -1,5 +1,5 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s --check-prefix=SI --check-prefix=FUNC
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s --check-prefix=SI --check-prefix=FUNC --check-prefix=GCN
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN
; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck %s --check-prefix=EG --check-prefix=FUNC
declare float @llvm.fabs.f32(float) #1
@@ -249,8 +249,10 @@ define void @fp_to_uint_fabs_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
}
; FUNC-LABEL: {{^}}fp_to_sint_f32_i16:
-; SI: v_cvt_i32_f32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
-; SI: buffer_store_short [[VAL]]
+; SI: v_cvt_i32_f32_e32 v[[VAL:[0-9]+]], s{{[0-9]+}}
+; VI: v_cvt_f16_f32_e32 v[[IN_F16:[0-9]+]], s{{[0-9]+}}
+; VI: v_cvt_i16_f16_e32 v[[VAL:[0-9]+]], v[[IN_F16]]
+; SI: buffer_store_short v[[VAL]]
define void @fp_to_sint_f32_i16(i16 addrspace(1)* %out, float %in) #0 {
%sint = fptosi float %in to i16
store i16 %sint, i16 addrspace(1)* %out
diff --git a/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll b/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
index d089c291f46..d1fc9fadebd 100644
--- a/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
+++ b/llvm/test/CodeGen/AMDGPU/fp_to_uint.ll
@@ -240,12 +240,10 @@ define void @fp_to_uint_fabs_f32_to_i1(i1 addrspace(1)* %out, float %in) #0 {
}
; FUNC-LABEL: {{^}}fp_to_uint_f32_to_i16:
-; The reason different instructions are used on SI and VI is because for
-; SI fp_to_uint is legalized by the type legalizer and for VI it is
-; legalized by the dag legalizer and they legalize fp_to_uint differently.
-; SI: v_cvt_u32_f32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
-; VI: v_cvt_i32_f32_e32 [[VAL:v[0-9]+]], s{{[0-9]+}}
-; GCN: buffer_store_short [[VAL]]
+; SI: v_cvt_u32_f32_e32 v[[VAL:[0-9]+]], s{{[0-9]+}}
+; VI: v_cvt_f16_f32_e32 v[[IN_F16:[0-9]+]], s{{[0-9]+}}
+; VI: v_cvt_u16_f16_e32 v[[VAL:[0-9]+]], v[[IN_F16]]
+; GCN: buffer_store_short v[[VAL]]
define void @fp_to_uint_f32_to_i16(i16 addrspace(1)* %out, float %in) #0 {
%uint = fptoui float %in to i16
store i16 %uint, i16 addrspace(1)* %out
diff --git a/llvm/test/CodeGen/AMDGPU/fpext.f16.ll b/llvm/test/CodeGen/AMDGPU/fpext.f16.ll
new file mode 100644
index 00000000000..37796a999b1
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fpext.f16.ll
@@ -0,0 +1,70 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}fpext_f16_to_f32
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: v_cvt_f32_f16_e32 v[[R_F32:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_dword v[[R_F32]]
+; GCN: s_endpgm
+define void @fpext_f16_to_f32(
+ float addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = fpext half %a.val to float
+ store float %r.val, float addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fpext_f16_to_f64
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; GCN: v_cvt_f64_f32_e32 v{{\[}}[[R_F64_0:[0-9]+]]:[[R_F64_1:[0-9]+]]{{\]}}, v[[A_F32]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_F64_0]]:[[R_F64_1]]{{\]}}
+; GCN: s_endpgm
+define void @fpext_f16_to_f64(
+ double addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = fpext half %a.val to double
+ store double %r.val, double addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fpext_v2f16_to_v2f32
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; VI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_cvt_f32_f16_e32 v[[R_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_cvt_f32_f16_e32 v[[R_F32_1:[0-9]+]], v[[A_F16_1]]
+; GCN: buffer_store_dwordx2 v{{\[}}[[R_F32_0]]:[[R_F32_1]]{{\]}}
+; GCN: s_endpgm
+define void @fpext_v2f16_to_v2f32(
+ <2 x float> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = fpext <2 x half> %a.val to <2 x float>
+ store <2 x float> %r.val, <2 x float> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fpext_v2f16_to_v2f64
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; GCN: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_cvt_f64_f32_e32 v{{\[}}{{[0-9]+}}:[[R_F64_3:[0-9]+]]{{\]}}, v[[A_F32_1]]
+; GCN: v_cvt_f64_f32_e32 v{{\[}}[[R_F64_0:[0-9]+]]:{{[0-9]+}}{{\]}}, v[[A_F32_0]]
+; GCN: buffer_store_dwordx4 v{{\[}}[[R_F64_0]]:[[R_F64_3]]{{\]}}
+; GCN: s_endpgm
+define void @fpext_v2f16_to_v2f64(
+ <2 x double> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = fpext <2 x half> %a.val to <2 x double>
+ store <2 x double> %r.val, <2 x double> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll b/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll
new file mode 100644
index 00000000000..5991efb44ce
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fptosi.f16.ll
@@ -0,0 +1,112 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}fptosi_f16_to_i16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_i32_f32_e32 v[[R_I16:[0-9]+]], v[[A_F32]]
+; VI: v_cvt_i16_f16_e32 v[[R_I16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_I16]]
+; GCN: s_endpgm
+define void @fptosi_f16_to_i16(
+ i16 addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = fptosi half %a.val to i16
+ store i16 %r.val, i16 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fptosi_f16_to_i32
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; GCN: v_cvt_i32_f32_e32 v[[R_I32:[0-9]+]], v[[A_F32]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fptosi_f16_to_i32(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = fptosi half %a.val to i32
+ store i32 %r.val, i32 addrspace(1)* %r
+ ret void
+}
+
+; Need to make sure we promote f16 to f32 when converting f16 to i64. Existing
+; test checks code generated for 'i64 = fp_to_sint f32'.
+
+; GCN-LABEL: {{^}}fptosi_f16_to_i64
+; GCN: buffer_load_ushort
+; GCN: v_cvt_f32_f16_e32
+; GCN: s_endpgm
+define void @fptosi_f16_to_i64(
+ i64 addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = fptosi half %a.val to i64
+ store i64 %r.val, i64 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fptosi_v2f16_to_v2i16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_i32_f32_e32 v[[R_I16_0:[0-9]+]], v[[A_F32_0]]
+; SI: v_cvt_i32_f32_e32 v[[R_I16_1:[0-9]+]], v[[A_F32_1]]
+; VI: v_cvt_i16_f16_e32 v[[R_I16_0:[0-9]+]], v[[A_V2_F16]]
+; VI: v_cvt_i16_f16_e32 v[[R_I16_1:[0-9]+]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_I16_LO:[0-9]+]], 0xffff, v[[R_I16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_I16_HI:[0-9]+]], 16, v[[R_I16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_HI]], v[[R_I16_LO]]
+; GCN: buffer_store_dword v[[R_V2_I16]]
+; GCN: s_endpgm
+define void @fptosi_v2f16_to_v2i16(
+ <2 x i16> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = fptosi <2 x half> %a.val to <2 x i16>
+ store <2 x i16> %r.val, <2 x i16> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fptosi_v2f16_to_v2i32
+; GCN: buffer_load_dword
+; GCN: v_cvt_f32_f16_e32
+; GCN: v_cvt_f32_f16_e32
+; GCN: v_cvt_i32_f32_e32
+; GCN: v_cvt_i32_f32_e32
+; GCN: buffer_store_dwordx2
+; GCN: s_endpgm
+define void @fptosi_v2f16_to_v2i32(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = fptosi <2 x half> %a.val to <2 x i32>
+ store <2 x i32> %r.val, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; Need to make sure we promote f16 to f32 when converting f16 to i64. Existing
+; test checks code generated for 'i64 = fp_to_sint f32'.
+
+; GCN-LABEL: {{^}}fptosi_v2f16_to_v2i64
+; GCN: buffer_load_dword
+; GCN: v_cvt_f32_f16_e32
+; GCN: v_cvt_f32_f16_e32
+; GCN: s_endpgm
+define void @fptosi_v2f16_to_v2i64(
+ <2 x i64> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = fptosi <2 x half> %a.val to <2 x i64>
+ store <2 x i64> %r.val, <2 x i64> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll b/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll
new file mode 100644
index 00000000000..592c15a3d3c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fptoui.f16.ll
@@ -0,0 +1,113 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}fptoui_f16_to_i16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_u32_f32_e32 v[[R_I16:[0-9]+]], v[[A_F32]]
+; VI: v_cvt_u16_f16_e32 v[[R_I16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_I16]]
+; GCN: s_endpgm
+define void @fptoui_f16_to_i16(
+ i16 addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = fptoui half %a.val to i16
+ store i16 %r.val, i16 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fptoui_f16_to_i32
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; GCN: v_cvt_u32_f32_e32 v[[R_I32:[0-9]+]], v[[A_F32]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @fptoui_f16_to_i32(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = fptoui half %a.val to i32
+ store i32 %r.val, i32 addrspace(1)* %r
+ ret void
+}
+
+; Need to make sure we promote f16 to f32 when converting f16 to i64. Existing
+; test checks code generated for 'i64 = fp_to_uint f32'.
+
+; GCN-LABEL: {{^}}fptoui_f16_to_i64
+; GCN: buffer_load_ushort
+; GCN: v_cvt_f32_f16_e32
+; GCN: s_endpgm
+define void @fptoui_f16_to_i64(
+ i64 addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = fptoui half %a.val to i64
+ store i64 %r.val, i64 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fptoui_v2f16_to_v2i16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_u32_f32_e32 v[[R_I16_1:[0-9]+]], v[[A_F32_1]]
+; SI: v_cvt_u32_f32_e32 v[[R_I16_0:[0-9]+]], v[[A_F32_0]]
+; VI: v_cvt_u16_f16_e32 v[[R_I16_0:[0-9]+]], v[[A_V2_F16]]
+; VI: v_cvt_u16_f16_e32 v[[R_I16_1:[0-9]+]], v[[A_F16_1]]
+; VI: v_and_b32_e32 v[[R_I16_LO:[0-9]+]], 0xffff, v[[R_I16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_I16_HI:[0-9]+]], 16, v[[R_I16_1]]
+; SI: v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_HI]], v[[R_I16_0]]
+; VI: v_or_b32_e32 v[[R_V2_I16:[0-9]+]], v[[R_I16_HI]], v[[R_I16_LO]]
+; GCN: buffer_store_dword v[[R_V2_I16]]
+; GCN: s_endpgm
+define void @fptoui_v2f16_to_v2i16(
+ <2 x i16> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = fptoui <2 x half> %a.val to <2 x i16>
+ store <2 x i16> %r.val, <2 x i16> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fptoui_v2f16_to_v2i32
+; GCN: buffer_load_dword
+; GCN: v_cvt_f32_f16_e32
+; GCN: v_cvt_f32_f16_e32
+; GCN: v_cvt_u32_f32_e32
+; GCN: v_cvt_u32_f32_e32
+; GCN: buffer_store_dwordx2
+; GCN: s_endpgm
+define void @fptoui_v2f16_to_v2i32(
+ <2 x i32> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = fptoui <2 x half> %a.val to <2 x i32>
+ store <2 x i32> %r.val, <2 x i32> addrspace(1)* %r
+ ret void
+}
+
+; Need to make sure we promote f16 to f32 when converting f16 to i64. Existing
+; test checks code generated for 'i64 = fp_to_uint f32'.
+
+; GCN-LABEL: {{^}}fptoui_v2f16_to_v2i64
+; GCN: buffer_load_dword
+; GCN: v_cvt_f32_f16_e32
+; GCN: v_cvt_f32_f16_e32
+; GCN: s_endpgm
+define void @fptoui_v2f16_to_v2i64(
+ <2 x i64> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = fptoui <2 x half> %a.val to <2 x i64>
+ store <2 x i64> %r.val, <2 x i64> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
new file mode 100644
index 00000000000..a067960c584
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fptrunc.f16.ll
@@ -0,0 +1,72 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}fptrunc_f32_to_f16
+; GCN: buffer_load_dword v[[A_F32:[0-9]+]]
+; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_F32]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fptrunc_f32_to_f16(
+ half addrspace(1)* %r,
+ float addrspace(1)* %a) {
+entry:
+ %a.val = load float, float addrspace(1)* %a
+ %r.val = fptrunc float %a.val to half
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fptrunc_f64_to_f16
+; GCN: buffer_load_dwordx2 v{{\[}}[[A_F64_0:[0-9]+]]:[[A_F64_1:[0-9]+]]{{\]}}
+; GCN: v_cvt_f32_f64_e32 v[[A_F32:[0-9]+]], v{{\[}}[[A_F64_0]]:[[A_F64_1]]{{\]}}
+; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_F32]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fptrunc_f64_to_f16(
+ half addrspace(1)* %r,
+ double addrspace(1)* %a) {
+entry:
+ %a.val = load double, double addrspace(1)* %a
+ %r.val = fptrunc double %a.val to half
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fptrunc_v2f32_to_v2f16
+; GCN: buffer_load_dwordx2 v{{\[}}[[A_F32_0:[0-9]+]]:[[A_F32_1:[0-9]+]]{{\]}}
+; GCN-DAG: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[A_F32_0]]
+; GCN-DAG: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[A_F32_1]]
+; GCN-DAG: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN-DAG: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fptrunc_v2f32_to_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x float> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x float>, <2 x float> addrspace(1)* %a
+ %r.val = fptrunc <2 x float> %a.val to <2 x half>
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fptrunc_v2f64_to_v2f16
+; GCN: buffer_load_dwordx4 v{{\[}}[[A_F64_0:[0-9]+]]:[[A_F64_3:[0-9]+]]{{\]}}
+; GCN: v_cvt_f32_f64_e32 v[[A_F32_0:[0-9]+]], v{{\[}}[[A_F64_0]]:{{[0-9]+}}{{\]}}
+; GCN: v_cvt_f32_f64_e32 v[[A_F32_1:[0-9]+]], v{{\[}}{{[0-9]+}}:[[A_F64_3]]{{\]}}
+; GCN: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[A_F32_0]]
+; GCN: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[A_F32_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+define void @fptrunc_v2f64_to_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x double> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x double>, <2 x double> addrspace(1)* %a
+ %r.val = fptrunc <2 x double> %a.val to <2 x half>
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/fsub.f16.ll b/llvm/test/CodeGen/AMDGPU/fsub.f16.ll
new file mode 100644
index 00000000000..a5c84b84bd2
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fsub.f16.ll
@@ -0,0 +1,150 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}fsub_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_subrev_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_subrev_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fsub_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fsub half %a.val, %b.val
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fsub_f16_imm_a
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x3c00{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_subrev_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_sub_f16_e32 v[[R_F16:[0-9]+]], 0x3c00, v[[B_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fsub_f16_imm_a(
+ half addrspace(1)* %r,
+ half addrspace(1)* %b) {
+entry:
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = fsub half 1.0, %b.val
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fsub_f16_imm_b
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0xc000{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_add_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_add_f16_e32 v[[R_F16:[0-9]+]], 0xc000, v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fsub_f16_imm_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = fsub half %a.val, 2.0
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fsub_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_subrev_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_subrev_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_subrev_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI: v_subrev_f16_e32 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fsub_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fsub <2 x half> %a.val, %b.val
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fsub_v2f16_imm_a
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], 0x3c00{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], 0x4000{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_subrev_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_subrev_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_sub_f16_e32 v[[R_F16_0:[0-9]+]], 0x3c00, v[[B_V2_F16]]
+; VI: v_sub_f16_e32 v[[R_F16_1:[0-9]+]], 0x4000, v[[B_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fsub_v2f16_imm_a(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = fsub <2 x half> <half 1.0, half 2.0>, %b.val
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fsub_v2f16_imm_b
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], 0x4000{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], 0x3c00{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_subrev_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_subrev_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_add_f16_e32 v[[R_F16_0:[0-9]+]], 0xc000, v[[A_V2_F16]]
+; VI: v_add_f16_e32 v[[R_F16_1:[0-9]+]], 0xbc00, v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fsub_v2f16_imm_b(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = fsub <2 x half> %a.val, <half 2.0, half 1.0>
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/half.ll b/llvm/test/CodeGen/AMDGPU/half.ll
index b63ba8e3632..f2bb3f9d110 100644
--- a/llvm/test/CodeGen/AMDGPU/half.ll
+++ b/llvm/test/CodeGen/AMDGPU/half.ll
@@ -1,11 +1,12 @@
; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
-; half args should be promoted to float
+; half args should be promoted to float for SI and lower.
; GCN-LABEL: {{^}}load_f16_arg:
; GCN: s_load_dword [[ARG:s[0-9]+]]
-; GCN: v_cvt_f16_f32_e32 [[CVT:v[0-9]+]], [[ARG]]
+; SI: v_cvt_f16_f32_e32 [[CVT:v[0-9]+]], [[ARG]]
+; VI: v_trunc_f16_e32 [[CVT:v[0-9]+]], [[ARG]]
; GCN: buffer_store_short [[CVT]]
define void @load_f16_arg(half addrspace(1)* %out, half %arg) #0 {
store half %arg, half addrspace(1)* %out
@@ -131,8 +132,11 @@ define void @extload_v8f16_to_v8f32_arg(<8 x float> addrspace(1)* %out, <8 x hal
; GCN-LABEL: {{^}}extload_f16_to_f64_arg:
; SI: s_load_dword [[ARG:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb{{$}}
+; SI: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[ARG]]
; VI: s_load_dword [[ARG:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c{{$}}
-; GCN: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[ARG]]
+; VI: v_trunc_f16_e32 v[[VARG:[0-9]+]], [[ARG]]
+; VI: v_cvt_f32_f16_e32 v[[VARG_F32:[0-9]+]], v[[VARG]]
+; VI: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], v[[VARG_F32]]
; GCN: buffer_store_dwordx2 [[RESULT]]
define void @extload_f16_to_f64_arg(double addrspace(1)* %out, half %arg) #0 {
%ext = fpext half %arg to double
@@ -279,8 +283,9 @@ define void @global_extload_f16_to_f32(float addrspace(1)* %out, half addrspace(
; GCN-LABEL: {{^}}global_extload_v2f16_to_v2f32:
; GCN: buffer_load_dword [[LOAD:v[0-9]+]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
+; VI: v_lshrrev_b32_e32 [[HI:v[0-9]+]], 16, [[LOAD]]
; GCN: v_cvt_f32_f16_e32 v[[CVT0:[0-9]+]], [[LOAD]]
-; GCN: v_lshrrev_b32_e32 [[HI:v[0-9]+]], 16, [[LOAD]]
+; SI: v_lshrrev_b32_e32 [[HI:v[0-9]+]], 16, [[LOAD]]
; GCN: v_cvt_f32_f16_e32 v[[CVT1:[0-9]+]], [[HI]]
; GCN: buffer_store_dwordx2 v{{\[}}[[CVT0]]:[[CVT1]]{{\]}}
; GCN: s_endpgm
@@ -387,16 +392,17 @@ define void @global_extload_v2f16_to_v2f64(<2 x double> addrspace(1)* %out, <2 x
; XSI-NOT: v_cvt_f32_f16
; XVI: buffer_load_dwordx2 [[LOAD:v\[[0-9]+:[0-9]+\]]]
+; XVI-DAG: v_lshrrev_b32_e32 {{v[0-9]+}}, 16, {{v[0-9]+}}
; XVI: v_cvt_f32_f16_e32
; XVI: v_cvt_f32_f16_e32
-; XVI-DAG: v_lshrrev_b32_e32 {{v[0-9]+}}, 16, {{v[0-9]+}}
; XVI: v_cvt_f32_f16_e32
; XVI-NOT: v_cvt_f32_f16
; GCN: buffer_load_dwordx2 v{{\[}}[[IN_LO:[0-9]+]]:[[IN_HI:[0-9]+]]
+; VI: v_lshrrev_b32_e32 [[Y16:v[0-9]+]], 16, v[[IN_LO]]
; GCN: v_cvt_f32_f16_e32 [[Z32:v[0-9]+]], v[[IN_HI]]
; GCN: v_cvt_f32_f16_e32 [[X32:v[0-9]+]], v[[IN_LO]]
-; GCN: v_lshrrev_b32_e32 [[Y16:v[0-9]+]], 16, v[[IN_LO]]
+; SI: v_lshrrev_b32_e32 [[Y16:v[0-9]+]], 16, v[[IN_LO]]
; GCN: v_cvt_f32_f16_e32 [[Y32:v[0-9]+]], [[Y16]]
; GCN: v_cvt_f64_f32_e32 [[Z:v\[[0-9]+:[0-9]+\]]], [[Z32]]
@@ -601,18 +607,6 @@ define void @fadd_v8f16(<8 x half> addrspace(1)* %out, <8 x half> %a, <8 x half>
ret void
}
-; GCN-LABEL: {{^}}fsub_f16:
-; GCN: v_subrev_f32_e32
-; GCN: s_endpgm
-define void @fsub_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
- %b_ptr = getelementptr half, half addrspace(1)* %in, i32 1
- %a = load half, half addrspace(1)* %in
- %b = load half, half addrspace(1)* %b_ptr
- %sub = fsub half %a, %b
- store half %sub, half addrspace(1)* %out
- ret void
-}
-
; GCN-LABEL: {{^}}test_bitcast_from_half:
; GCN: buffer_load_ushort [[TMP:v[0-9]+]]
; GCN: buffer_store_short [[TMP]]
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll
new file mode 100644
index 00000000000..2247485f299
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.class.f16.ll
@@ -0,0 +1,155 @@
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.fabs.f16(half %a)
+declare i1 @llvm.amdgcn.class.f16(half %a, i32 %b)
+
+; GCN-LABEL: {{^}}class_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_I32:[0-9]+]]
+; VI: v_cmp_class_f16_e32 vcc, v[[A_F16]], v[[B_I32]]
+; GCN: v_cndmask_b32_e64 v[[R_I32:[0-9]+]]
+; GCN: buffer_store_dword v[[R_I32]]
+; GCN: s_endpgm
+define void @class_f16(
+ i32 addrspace(1)* %r,
+ half addrspace(1)* %a,
+ i32 addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load i32, i32 addrspace(1)* %b
+ %r.val = call i1 @llvm.amdgcn.class.f16(half %a.val, i32 %b.val)
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}class_f16_fabs
+; GCN: s_load_dword s[[SA_F16:[0-9]+]]
+; GCN: s_load_dword s[[SB_I32:[0-9]+]]
+; VI: v_trunc_f16_e32 v[[VA_F16:[0-9]+]], s[[SA_F16]]
+; VI: v_cmp_class_f16_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |v[[VA_F16]]|, s[[SB_I32]]
+; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, [[CMP]]
+; GCN: buffer_store_dword v[[VR_I32]]
+; GCN: s_endpgm
+define void @class_f16_fabs(
+ i32 addrspace(1)* %r,
+ half %a.val,
+ i32 %b.val) {
+entry:
+ %a.val.fabs = call half @llvm.fabs.f16(half %a.val)
+ %r.val = call i1 @llvm.amdgcn.class.f16(half %a.val.fabs, i32 %b.val)
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}class_f16_fneg
+; GCN: s_load_dword s[[SA_F16:[0-9]+]]
+; GCN: s_load_dword s[[SB_I32:[0-9]+]]
+; VI: v_trunc_f16_e32 v[[VA_F16:[0-9]+]], s[[SA_F16]]
+; VI: v_cmp_class_f16_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -v[[VA_F16]], s[[SB_I32]]
+; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, [[CMP]]
+; GCN: buffer_store_dword v[[VR_I32]]
+; GCN: s_endpgm
+define void @class_f16_fneg(
+ i32 addrspace(1)* %r,
+ half %a.val,
+ i32 %b.val) {
+entry:
+ %a.val.fneg = fsub half -0.0, %a.val
+ %r.val = call i1 @llvm.amdgcn.class.f16(half %a.val.fneg, i32 %b.val)
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}class_f16_fabs_fneg
+; GCN: s_load_dword s[[SA_F16:[0-9]+]]
+; GCN: s_load_dword s[[SB_I32:[0-9]+]]
+; VI: v_trunc_f16_e32 v[[VA_F16:[0-9]+]], s[[SA_F16]]
+; VI: v_cmp_class_f16_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|v[[VA_F16]]|, s[[SB_I32]]
+; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, [[CMP]]
+; GCN: buffer_store_dword v[[VR_I32]]
+; GCN: s_endpgm
+define void @class_f16_fabs_fneg(
+ i32 addrspace(1)* %r,
+ half %a.val,
+ i32 %b.val) {
+entry:
+ %a.val.fabs = call half @llvm.fabs.f16(half %a.val)
+ %a.val.fabs.fneg = fsub half -0.0, %a.val.fabs
+ %r.val = call i1 @llvm.amdgcn.class.f16(half %a.val.fabs.fneg, i32 %b.val)
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}class_f16_1
+; GCN: s_load_dword s[[SA_F16:[0-9]+]]
+; VI: v_trunc_f16_e32 v[[VA_F16:[0-9]+]], s[[SA_F16]]
+; VI: v_cmp_class_f16_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], v[[VA_F16]], 1{{$}}
+; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, [[CMP]]
+; GCN: buffer_store_dword v[[VR_I32]]
+; GCN: s_endpgm
+define void @class_f16_1(
+ i32 addrspace(1)* %r,
+ half %a.val) {
+entry:
+ %r.val = call i1 @llvm.amdgcn.class.f16(half %a.val, i32 1)
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}class_f16_64
+; GCN: s_load_dword s[[SA_F16:[0-9]+]]
+; VI: v_trunc_f16_e32 v[[VA_F16:[0-9]+]], s[[SA_F16]]
+; VI: v_cmp_class_f16_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], v[[VA_F16]], 64{{$}}
+; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, [[CMP]]
+; GCN: buffer_store_dword v[[VR_I32]]
+; GCN: s_endpgm
+define void @class_f16_64(
+ i32 addrspace(1)* %r,
+ half %a.val) {
+entry:
+ %r.val = call i1 @llvm.amdgcn.class.f16(half %a.val, i32 64)
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}class_f16_full_mask
+; GCN: s_load_dword s[[SA_F16:[0-9]+]]
+; VI: v_mov_b32_e32 v[[MASK:[0-9]+]], 0x3ff{{$}}
+; VI: v_trunc_f16_e32 v[[VA_F16:[0-9]+]], s[[SA_F16]]
+; VI: v_cmp_class_f16_e32 vcc, v[[VA_F16]], v[[MASK]]
+; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, vcc
+; GCN: buffer_store_dword v[[VR_I32]]
+; GCN: s_endpgm
+define void @class_f16_full_mask(
+ i32 addrspace(1)* %r,
+ half %a.val) {
+entry:
+ %r.val = call i1 @llvm.amdgcn.class.f16(half %a.val, i32 1023)
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}class_f16_nine_bit_mask
+; GCN: s_load_dword s[[SA_F16:[0-9]+]]
+; VI: v_mov_b32_e32 v[[MASK:[0-9]+]], 0x1ff{{$}}
+; VI: v_trunc_f16_e32 v[[VA_F16:[0-9]+]], s[[SA_F16]]
+; VI: v_cmp_class_f16_e32 vcc, v[[VA_F16]], v[[MASK]]
+; VI: v_cndmask_b32_e64 v[[VR_I32:[0-9]+]], 0, -1, vcc
+; GCN: buffer_store_dword v[[VR_I32]]
+; GCN: s_endpgm
+define void @class_f16_nine_bit_mask(
+ i32 addrspace(1)* %r,
+ half %a.val) {
+entry:
+ %r.val = call i1 @llvm.amdgcn.class.f16(half %a.val, i32 511)
+ %r.val.sext = sext i1 %r.val to i32
+ store i32 %r.val.sext, i32 addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cos.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cos.f16.ll
new file mode 100644
index 00000000000..5d9f1b824a6
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.cos.f16.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.amdgcn.cos.f16(half %a)
+
+; GCN-LABEL: {{^}}cos_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; VI: v_cos_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @cos_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.amdgcn.cos.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.f16.ll
new file mode 100644
index 00000000000..d69cd7c6068
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.div.fixup.f16.ll
@@ -0,0 +1,129 @@
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.amdgcn.div.fixup.f16(half %a, half %b, half %c)
+
+; GCN-LABEL: {{^}}div_fixup_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
+; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]], v[[C_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @div_fixup_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.amdgcn.div.fixup.f16(half %a.val, half %b.val, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}div_fixup_f16_imm_a
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
+; VI: v_mov_b32_e32 v[[A_F16:[0-9]+]], 0x4200{{$}}
+; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]], v[[C_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @div_fixup_f16_imm_a(
+ half addrspace(1)* %r,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) {
+entry:
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.amdgcn.div.fixup.f16(half 3.0, half %b.val, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}div_fixup_f16_imm_b
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
+; VI: v_mov_b32_e32 v[[B_F16:[0-9]+]], 0x4200{{$}}
+; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]], v[[C_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @div_fixup_f16_imm_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %c) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.amdgcn.div.fixup.f16(half %a.val, half 3.0, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}div_fixup_f16_imm_c
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; VI: v_mov_b32_e32 v[[C_F16:[0-9]+]], 0x4200{{$}}
+; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]], v[[C_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @div_fixup_f16_imm_c(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = call half @llvm.amdgcn.div.fixup.f16(half %a.val, half %b.val, half 3.0)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}div_fixup_f16_imm_a_imm_b
+; VI: v_mov_b32_e32 v[[AB_F16:[0-9]+]], 0x4200{{$}}
+; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
+; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[AB_F16]], v[[AB_F16]], v[[C_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @div_fixup_f16_imm_a_imm_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %c) {
+entry:
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.amdgcn.div.fixup.f16(half 3.0, half 3.0, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}div_fixup_f16_imm_b_imm_c
+; VI: v_mov_b32_e32 v[[BC_F16:[0-9]+]], 0x4200{{$}}
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[A_F16]], v[[BC_F16]], v[[BC_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @div_fixup_f16_imm_b_imm_c(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.amdgcn.div.fixup.f16(half %a.val, half 3.0, half 3.0)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}div_fixup_f16_imm_a_imm_c
+; VI: v_mov_b32_e32 v[[AC_F16:[0-9]+]], 0x4200{{$}}
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; VI: v_div_fixup_f16 v[[R_F16:[0-9]+]], v[[AC_F16]], v[[B_F16]], v[[AC_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @div_fixup_f16_imm_a_imm_c(
+ half addrspace(1)* %r,
+ half addrspace(1)* %b) {
+entry:
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = call half @llvm.amdgcn.div.fixup.f16(half 3.0, half %b.val, half 3.0)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fract.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fract.f16.ll
new file mode 100644
index 00000000000..c1af0ecb3ed
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.fract.f16.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.amdgcn.fract.f16(half %a)
+
+; GCN-LABEL: {{^}}fract_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; VI: v_fract_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fract_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.amdgcn.fract.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.f16.ll
new file mode 100644
index 00000000000..509fa28038b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.exp.f16.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare i32 @llvm.amdgcn.frexp.exp.f16(half %a)
+
+; GCN-LABEL: {{^}}frexp_exp_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; VI: v_frexp_exp_i16_f16_e32 v[[R_I16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_I16]]
+define void @frexp_exp_f16(
+ i16 addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call i32 @llvm.amdgcn.frexp.exp.f16(half %a.val)
+ %r.val.i16 = trunc i32 %r.val to i16
+ store i16 %r.val.i16, i16 addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.f16.ll
new file mode 100644
index 00000000000..4b33549537a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.frexp.mant.f16.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.amdgcn.frexp.mant.f16(half %a)
+
+; GCN-LABEL: {{^}}frexp_mant_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; VI: v_frexp_mant_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @frexp_mant_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.amdgcn.frexp.mant.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll
new file mode 100644
index 00000000000..8ab2efe651b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.ldexp.f16.ll
@@ -0,0 +1,45 @@
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.amdgcn.ldexp.f16(half %a, i32 %b)
+
+; GCN-LABEL: {{^}}ldexp_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_I32:[0-9]+]]
+; VI: v_ldexp_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_I32]]
+; GCN: buffer_store_short v[[R_F16]]
+define void @ldexp_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ i32 addrspace(1)* %b) {
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load i32, i32 addrspace(1)* %b
+ %r.val = call half @llvm.amdgcn.ldexp.f16(half %a.val, i32 %b.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}ldexp_f16_imm_a
+; GCN: buffer_load_dword v[[B_I32:[0-9]+]]
+; VI: v_ldexp_f16_e32 v[[R_F16:[0-9]+]], 0x4000, v[[B_I32]]
+; GCN: buffer_store_short v[[R_F16]]
+define void @ldexp_f16_imm_a(
+ half addrspace(1)* %r,
+ i32 addrspace(1)* %b) {
+ %b.val = load i32, i32 addrspace(1)* %b
+ %r.val = call half @llvm.amdgcn.ldexp.f16(half 2.0, i32 %b.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}ldexp_f16_imm_b
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; VI: v_ldexp_f16_e64 v[[R_F16:[0-9]+]], v[[A_F16]], 2{{$}}
+; GCN: buffer_store_short v[[R_F16]]
+define void @ldexp_f16_imm_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.amdgcn.ldexp.f16(half %a.val, i32 2)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.f16.ll
new file mode 100644
index 00000000000..3fda4a59df2
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rcp.f16.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.amdgcn.rcp.f16(half %a)
+
+; GCN-LABEL: {{^}}rcp_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; VI: v_rcp_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @rcp_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.amdgcn.rcp.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.f16.ll
new file mode 100644
index 00000000000..74415e3658a
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.rsq.f16.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.amdgcn.rsq.f16(half %a)
+
+; GCN-LABEL: {{^}}rsq_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; VI: v_rsq_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @rsq_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.amdgcn.rsq.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sin.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sin.f16.ll
new file mode 100644
index 00000000000..0ebe012fe1c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.amdgcn.sin.f16.ll
@@ -0,0 +1,18 @@
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.amdgcn.sin.f16(half %a)
+
+; GCN-LABEL: {{^}}sin_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; VI: v_sin_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @sin_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.amdgcn.sin.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.ceil.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.ceil.f16.ll
new file mode 100644
index 00000000000..dc984e91eec
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.ceil.f16.ll
@@ -0,0 +1,49 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.ceil.f16(half %a)
+declare <2 x half> @llvm.ceil.v2f16(<2 x half> %a)
+
+; GCN-LABEL: {{^}}ceil_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_ceil_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_ceil_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @ceil_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.ceil.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}ceil_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_ceil_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_ceil_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_ceil_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI: v_ceil_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @ceil_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = call <2 x half> @llvm.ceil.v2f16(<2 x half> %a.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.cos.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.cos.f16.ll
new file mode 100644
index 00000000000..d2b85cfd933
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.cos.f16.ll
@@ -0,0 +1,55 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.cos.f16(half %a)
+declare <2 x half> @llvm.cos.v2f16(<2 x half> %a)
+
+; GCN-LABEL: {{^}}cos_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; GCN: v_mul_f32_e32 v[[M_F32:[0-9]+]], {{1/2pi|0x3e22f983}}, v[[A_F32]]
+; GCN: v_fract_f32_e32 v[[F_F32:[0-9]+]], v[[M_F32]]
+; GCN: v_cos_f32_e32 v[[R_F32:[0-9]+]], v[[F_F32]]
+; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @cos_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.cos.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}cos_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_mov_b32_e32 v[[HALF_PIE:[0-9]+]], 0x3e22f983{{$}}
+; GCN: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], v[[HALF_PIE]], v[[A_F32_0]]
+; VI: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], 1/2pi, v[[A_F32_0]]
+; GCN: v_fract_f32_e32 v[[F_F32_0:[0-9]+]], v[[M_F32_0]]
+; SI: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], v[[HALF_PIE]], v[[A_F32_1]]
+; VI: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], 1/2pi, v[[A_F32_1]]
+; GCN: v_fract_f32_e32 v[[F_F32_1:[0-9]+]], v[[M_F32_1]]
+; GCN: v_cos_f32_e32 v[[R_F32_0:[0-9]+]], v[[F_F32_0]]
+; GCN: v_cos_f32_e32 v[[R_F32_1:[0-9]+]], v[[F_F32_1]]
+; GCN: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; GCN: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @cos_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = call <2 x half> @llvm.cos.v2f16(<2 x half> %a.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.exp2.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.exp2.f16.ll
new file mode 100644
index 00000000000..9165698cb81
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.exp2.f16.ll
@@ -0,0 +1,49 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.exp2.f16(half %a)
+declare <2 x half> @llvm.exp2.v2f16(<2 x half> %a)
+
+; GCN-LABEL: {{^}}exp2_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_exp_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_exp_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @exp2_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.exp2.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}exp2_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_exp_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_exp_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_exp_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI: v_exp_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @exp2_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = call <2 x half> @llvm.exp2.v2f16(<2 x half> %a.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.floor.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.floor.f16.ll
new file mode 100644
index 00000000000..a7bdb1e9428
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.floor.f16.ll
@@ -0,0 +1,49 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.floor.f16(half %a)
+declare <2 x half> @llvm.floor.v2f16(<2 x half> %a)
+
+; GCN-LABEL: {{^}}floor_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_floor_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_floor_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @floor_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.floor.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}floor_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_floor_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_floor_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_floor_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI: v_floor_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @floor_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = call <2 x half> @llvm.floor.v2f16(<2 x half> %a.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.fma.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.fma.f16.ll
new file mode 100644
index 00000000000..e6f85ef08f4
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.fma.f16.ll
@@ -0,0 +1,235 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.fma.f16(half %a, half %b, half %c)
+declare <2 x half> @llvm.fma.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
+
+; GCN-LABEL: {{^}}fma_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
+; SI: v_fma_f32 v[[R_F32:[0-9]+]], v[[A_F32:[0-9]]], v[[B_F32:[0-9]]], v[[C_F32:[0-9]]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_fma_f16 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]], v[[C_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fma_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) {
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.fma.f16(half %a.val, half %b.val, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fma_f16_imm_a
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
+; SI: v_fma_f32 v[[R_F32:[0-9]+]], v[[A_F32:[0-9]]], v[[B_F32:[0-9]]], v[[C_F32:[0-9]]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_mov_b32_e32 v[[A_F16:[0-9]+]], 0x4200{{$}}
+; VI: v_fma_f16 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]], v[[C_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fma_f16_imm_a(
+ half addrspace(1)* %r,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) {
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.fma.f16(half 3.0, half %b.val, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fma_f16_imm_b
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
+; SI: v_fma_f32 v[[R_F32:[0-9]+]], v[[A_F32:[0-9]]], v[[B_F32:[0-9]]], v[[C_F32:[0-9]]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_mov_b32_e32 v[[B_F16:[0-9]+]], 0x4200{{$}}
+; VI: v_fma_f16 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]], v[[C_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fma_f16_imm_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %c) {
+ %a.val = load half, half addrspace(1)* %a
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.fma.f16(half %a.val, half 3.0, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fma_f16_imm_c
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_fma_f32 v[[R_F32:[0-9]+]], v[[A_F32:[0-9]]], v[[B_F32:[0-9]]], v[[C_F32:[0-9]]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_mov_b32_e32 v[[C_F16:[0-9]+]], 0x4200{{$}}
+; VI: v_fma_f16 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]], v[[C_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @fma_f16_imm_c(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = call half @llvm.fma.f16(half %a.val, half %b.val, half 3.0)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fma_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[C_V2_F16:[0-9]+]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
+; SI: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]], v[[C_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]], v[[C_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]], v[[C_V2_F16]]
+; VI: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16_1]], v[[C_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fma_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) {
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+ %r.val = call <2 x half> @llvm.fma.v2f16(<2 x half> %a.val, <2 x half> %b.val, <2 x half> %c.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fma_v2f16_imm_a
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[C_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
+; VI: v_mov_b32_e32 v[[A_F16:[0-9]+]], 0x4200{{$}}
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
+; SI: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32]], v[[C_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32]], v[[C_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_F16]], v[[C_V2_F16]]
+; VI: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16]], v[[C_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fma_v2f16_imm_a(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) {
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+ %r.val = call <2 x half> @llvm.fma.v2f16(<2 x half> <half 3.0, half 3.0>, <2 x half> %b.val, <2 x half> %c.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fma_v2f16_imm_b
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[C_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4200{{$}}
+; VI: v_mov_b32_e32 v[[B_F16:[0-9]+]], 0x4200{{$}}
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
+; SI: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32]], v[[C_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32]], v[[C_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_F16]], v[[C_V2_F16]]
+; VI: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16]], v[[C_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fma_v2f16_imm_b(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %c) {
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+ %r.val = call <2 x half> @llvm.fma.v2f16(<2 x half> %a.val, <2 x half> <half 3.0, half 3.0>, <2 x half> %c.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fma_v2f16_imm_c
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], 0x4200{{$}}
+; VI: v_mov_b32_e32 v[[C_F16:[0-9]+]], 0x4200{{$}}
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_fma_f32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]], v[[C_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_fma_f32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]], v[[C_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_fma_f16 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]], v[[B_V2_F16]], v[[C_F16]]
+; VI: v_fma_f16 v[[R_F16_1:[0-9]+]], v[[A_F16_1]], v[[B_F16_1]], v[[C_F16]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fma_v2f16_imm_c(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = call <2 x half> @llvm.fma.v2f16(<2 x half> %a.val, <2 x half> %b.val, <2 x half> <half 3.0, half 3.0>)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll
new file mode 100644
index 00000000000..2e2526f22f4
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.fmuladd.f16.ll
@@ -0,0 +1,116 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.fmuladd.f16(half %a, half %b, half %c)
+declare <2 x half> @llvm.fmuladd.v2f16(<2 x half> %a, <2 x half> %b, <2 x half> %c)
+
+; GCN-LABEL: {{^}}fmuladd_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
+; SI: v_mac_f32_e32 v[[C_F32]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[C_F32]]
+; SI: buffer_store_short v[[R_F16]]
+; VI: v_mac_f16_e32 v[[C_F16]], v[[B_F16]], v[[A_F16]]
+; VI: buffer_store_short v[[C_F16]]
+; GCN: s_endpgm
+define void @fmuladd_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) {
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.fmuladd.f16(half %a.val, half %b.val, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fmuladd_f16_imm_a
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
+; SI: v_mac_f32_e32 v[[C_F32]], v[[A_F32]], v[[B_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[C_F32]]
+; SI: buffer_store_short v[[R_F16]]
+; VI: v_mac_f16_e32 v[[C_F16]], 0x4200, v[[B_F16]]
+; VI: buffer_store_short v[[C_F16]]
+; GCN: s_endpgm
+define void @fmuladd_f16_imm_a(
+ half addrspace(1)* %r,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) {
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.fmuladd.f16(half 3.0, half %b.val, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fmuladd_f16_imm_b
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
+; SI: v_mac_f32_e32 v[[C_F32]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[C_F32]]
+; SI: buffer_store_short v[[R_F16]]
+; VI: v_mac_f16_e32 v[[C_F16]], 0x4200, v[[A_F16]]
+; VI: buffer_store_short v[[C_F16]]
+; GCN: s_endpgm
+define void @fmuladd_f16_imm_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %c) {
+ %a.val = load half, half addrspace(1)* %a
+ %c.val = load half, half addrspace(1)* %c
+ %r.val = call half @llvm.fmuladd.f16(half %a.val, half 3.0, half %c.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}fmuladd_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[C_V2_F16:[0-9]+]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
+; SI: v_mac_f32_e32 v[[C_F32_0]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[C_F32_0]]
+; SI: v_mac_f32_e32 v[[C_F32_1]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[C_F32_1]]
+; SI: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; VI: v_mac_f16_e32 v[[C_V2_F16]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI: v_mac_f16_e32 v[[C_F16_1]], v[[B_F16_1]], v[[A_F16_1]]
+; VI: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[C_V2_F16]]
+; VI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[C_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @fmuladd_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) {
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+ %r.val = call <2 x half> @llvm.fmuladd.v2f16(<2 x half> %a.val, <2 x half> %b.val, <2 x half> %c.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.log2.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.log2.f16.ll
new file mode 100644
index 00000000000..8c35412b168
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.log2.f16.ll
@@ -0,0 +1,49 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.log2.f16(half %a)
+declare <2 x half> @llvm.log2.v2f16(<2 x half> %a)
+
+; GCN-LABEL: {{^}}log2_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_log_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_log_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @log2_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.log2.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}log2_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_log_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
+; SI: v_log_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_log_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI: v_log_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @log2_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = call <2 x half> @llvm.log2.v2f16(<2 x half> %a.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
new file mode 100644
index 00000000000..0accbad9988
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.maxnum.f16.ll
@@ -0,0 +1,153 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.maxnum.f16(half %a, half %b)
+declare <2 x half> @llvm.maxnum.v2f16(<2 x half> %a, <2 x half> %b)
+
+; GCN-LABEL: {{^}}maxnum_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_max_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_max_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @maxnum_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = call half @llvm.maxnum.f16(half %a.val, half %b.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}maxnum_f16_imm_a
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_max_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_max_f16_e32 v[[R_F16:[0-9]+]], 0x4200, v[[B_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @maxnum_f16_imm_a(
+ half addrspace(1)* %r,
+ half addrspace(1)* %b) {
+entry:
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = call half @llvm.maxnum.f16(half 3.0, half %b.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}maxnum_f16_imm_b
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4400{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_max_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_max_f16_e32 v[[R_F16:[0-9]+]], 0x4400, v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @maxnum_f16_imm_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.maxnum.f16(half %a.val, half 4.0)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}maxnum_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_max_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_max_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_max_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI: v_max_f16_e32 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @maxnum_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %a.val, <2 x half> %b.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}maxnum_v2f16_imm_a
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], 0x4400{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_max_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_max_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_max_f16_e32 v[[R_F16_0:[0-9]+]], 0x4200, v[[B_V2_F16]]
+; VI: v_max_f16_e32 v[[R_F16_1:[0-9]+]], 0x4400, v[[B_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @maxnum_v2f16_imm_a(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = call <2 x half> @llvm.maxnum.v2f16(<2 x half> <half 3.0, half 4.0>, <2 x half> %b.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}maxnum_v2f16_imm_b
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], 0x4400{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_max_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_max_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_max_f16_e32 v[[R_F16_0:[0-9]+]], 0x4400, v[[A_V2_F16]]
+; VI: v_max_f16_e32 v[[R_F16_1:[0-9]+]], 0x4200, v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @maxnum_v2f16_imm_b(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = call <2 x half> @llvm.maxnum.v2f16(<2 x half> %a.val, <2 x half> <half 4.0, half 3.0>)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
new file mode 100644
index 00000000000..9f41df6fd25
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.minnum.f16.ll
@@ -0,0 +1,153 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.minnum.f16(half %a, half %b)
+declare <2 x half> @llvm.minnum.v2f16(<2 x half> %a, <2 x half> %b)
+
+; GCN-LABEL: {{^}}minnum_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_min_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_min_f16_e32 v[[R_F16:[0-9]+]], v[[B_F16]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @minnum_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = call half @llvm.minnum.f16(half %a.val, half %b.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}minnum_f16_imm_a
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_min_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]], v[[B_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_min_f16_e32 v[[R_F16:[0-9]+]], 0x4200, v[[B_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @minnum_f16_imm_a(
+ half addrspace(1)* %r,
+ half addrspace(1)* %b) {
+entry:
+ %b.val = load half, half addrspace(1)* %b
+ %r.val = call half @llvm.minnum.f16(half 3.0, half %b.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}minnum_f16_imm_b
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], 0x4400{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_min_f32_e32 v[[R_F32:[0-9]+]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_min_f16_e32 v[[R_F16:[0-9]+]], 0x4400, v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @minnum_f16_imm_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.minnum.f16(half %a.val, half 4.0)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}minnum_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_min_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_min_f16_e32 v[[R_F16_0:[0-9]+]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI: v_min_f16_e32 v[[R_F16_1:[0-9]+]], v[[B_F16_1]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @minnum_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = call <2 x half> @llvm.minnum.v2f16(<2 x half> %a.val, <2 x half> %b.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}minnum_v2f16_imm_a
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], 0x4400{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]], v[[B_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_min_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]], v[[B_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_min_f16_e32 v[[R_F16_0:[0-9]+]], 0x4200, v[[B_V2_F16]]
+; VI: v_min_f16_e32 v[[R_F16_1:[0-9]+]], 0x4400, v[[B_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @minnum_v2f16_imm_a(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %b) {
+entry:
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %r.val = call <2 x half> @llvm.minnum.v2f16(<2 x half> <half 3.0, half 4.0>, <2 x half> %b.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}minnum_v2f16_imm_b
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], 0x4400{{$}}
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], 0x4200{{$}}
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_min_f32_e32 v[[R_F32_0:[0-9]+]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_min_f32_e32 v[[R_F32_1:[0-9]+]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_min_f16_e32 v[[R_F16_0:[0-9]+]], 0x4400, v[[A_V2_F16]]
+; VI: v_min_f16_e32 v[[R_F16_1:[0-9]+]], 0x4200, v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @minnum_v2f16_imm_b(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = call <2 x half> @llvm.minnum.v2f16(<2 x half> %a.val, <2 x half> <half 4.0, half 3.0>)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.rint.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.rint.f16.ll
new file mode 100644
index 00000000000..ad9b66c9038
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.rint.f16.ll
@@ -0,0 +1,49 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.rint.f16(half %a)
+declare <2 x half> @llvm.rint.v2f16(<2 x half> %a)
+
+; GCN-LABEL: {{^}}rint_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_rndne_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_rndne_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @rint_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.rint.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}rint_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_rndne_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_rndne_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_rndne_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI: v_rndne_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @rint_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = call <2 x half> @llvm.rint.v2f16(<2 x half> %a.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.sin.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.sin.f16.ll
new file mode 100644
index 00000000000..8a1ae31cb99
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.sin.f16.ll
@@ -0,0 +1,55 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.sin.f16(half %a)
+declare <2 x half> @llvm.sin.v2f16(<2 x half> %a)
+
+; GCN-LABEL: {{^}}sin_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; GCN: v_mul_f32_e32 v[[M_F32:[0-9]+]], {{1/2pi|0x3e22f983}}, v[[A_F32]]
+; GCN: v_fract_f32_e32 v[[F_F32:[0-9]+]], v[[M_F32]]
+; GCN: v_sin_f32_e32 v[[R_F32:[0-9]+]], v[[F_F32]]
+; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @sin_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.sin.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}sin_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_mov_b32_e32 v[[HALF_PIE:[0-9]+]], 0x3e22f983{{$}}
+; GCN: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], v[[HALF_PIE]], v[[A_F32_0]]
+; VI: v_mul_f32_e32 v[[M_F32_0:[0-9]+]], 1/2pi, v[[A_F32_0]]
+; GCN: v_fract_f32_e32 v[[F_F32_0:[0-9]+]], v[[M_F32_0]]
+; SI: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], v[[HALF_PIE]], v[[A_F32_1]]
+; VI: v_mul_f32_e32 v[[M_F32_1:[0-9]+]], 1/2pi, v[[A_F32_1]]
+; GCN: v_fract_f32_e32 v[[F_F32_1:[0-9]+]], v[[M_F32_1]]
+; GCN: v_sin_f32_e32 v[[R_F32_0:[0-9]+]], v[[F_F32_0]]
+; GCN: v_sin_f32_e32 v[[R_F32_1:[0-9]+]], v[[F_F32_1]]
+; GCN: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; GCN: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @sin_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = call <2 x half> @llvm.sin.v2f16(<2 x half> %a.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.sqrt.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.sqrt.f16.ll
new file mode 100644
index 00000000000..8217ebf4673
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.sqrt.f16.ll
@@ -0,0 +1,49 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.sqrt.f16(half %a)
+declare <2 x half> @llvm.sqrt.v2f16(<2 x half> %a)
+
+; GCN-LABEL: {{^}}sqrt_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_sqrt_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_sqrt_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @sqrt_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.sqrt.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}sqrt_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_sqrt_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_sqrt_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_sqrt_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI: v_sqrt_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @sqrt_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = call <2 x half> @llvm.sqrt.v2f16(<2 x half> %a.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.trunc.f16.ll b/llvm/test/CodeGen/AMDGPU/llvm.trunc.f16.ll
new file mode 100644
index 00000000000..04054c2b70f
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.trunc.f16.ll
@@ -0,0 +1,49 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+declare half @llvm.trunc.f16(half %a)
+declare <2 x half> @llvm.trunc.v2f16(<2 x half> %a)
+
+; GCN-LABEL: {{^}}trunc_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_trunc_f32_e32 v[[R_F32:[0-9]+]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[R_F32]]
+; VI: v_trunc_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @trunc_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %r.val = call half @llvm.trunc.f16(half %a.val)
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}trunc_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_trunc_f32_e32 v[[R_F32_0:[0-9]+]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[R_F32_0]]
+; SI: v_trunc_f32_e32 v[[R_F32_1:[0-9]+]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[R_F32_1]]
+; VI: v_trunc_f16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_F16]]
+; VI: v_trunc_f16_e32 v[[R_F16_1:[0-9]+]], v[[A_F16_1]]
+; GCN: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @trunc_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %r.val = call <2 x half> @llvm.trunc.v2f16(<2 x half> %a.val)
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll b/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
index 1c9196a8bd6..b0ae1c2969f 100644
--- a/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sint_to_fp.i64.ll
@@ -3,6 +3,41 @@
; FIXME: This should be merged with sint_to_fp.ll, but s_sint_to_fp_v2i64 crashes on r600
+; FUNC-LABEL: {{^}}s_sint_to_fp_i64_to_f16:
+define void @s_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 %in) #0 {
+ %result = sitofp i64 %in to half
+ store half %result, half addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_sint_to_fp_i64_to_f16:
+; GCN: {{buffer|flat}}_load_dwordx2
+
+; GCN: v_ashrrev_i32_e32 v{{[0-9]+}}, 31, v{{[0-9]+}}
+; GCN: v_xor_b32
+
+; GCN: v_ffbh_u32
+; GCN: v_ffbh_u32
+; GCN: v_cndmask
+; GCN: v_cndmask
+
+; GCN-DAG: v_cmp_eq_u64
+; GCN-DAG: v_cmp_lt_u64
+
+; GCN: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
+; GCN: v_cndmask_b32_e{{32|64}} [[SIGN_SEL:v[0-9]+]],
+; GCN: v_cvt_f16_f32_e32 [[SIGN_SEL_F16:v[0-9]+]], [[SIGN_SEL]]
+; GCN: {{buffer|flat}}_store_short {{.*}}[[SIGN_SEL_F16]]
+define void @v_sint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid
+ %val = load i64, i64 addrspace(1)* %in.gep
+ %result = sitofp i64 %val to half
+ store half %result, half addrspace(1)* %out.gep
+ ret void
+}
+
; FUNC-LABEL: {{^}}s_sint_to_fp_i64_to_f32:
define void @s_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
%result = sitofp i64 %in to float
@@ -37,16 +72,16 @@ define void @v_sint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 addrspace(1)*
ret void
}
-; FUNC-LABEL: {{^}}s_sint_to_fp_v2i64:
+; FUNC-LABEL: {{^}}s_sint_to_fp_v2i64_to_v2f32:
; GCN-NOT: v_and_b32_e32 v{{[0-9]+}}, -1,
-define void @s_sint_to_fp_v2i64(<2 x float> addrspace(1)* %out, <2 x i64> %in) #0{
+define void @s_sint_to_fp_v2i64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i64> %in) #0{
%result = sitofp <2 x i64> %in to <2 x float>
store <2 x float> %result, <2 x float> addrspace(1)* %out
ret void
}
-; FUNC-LABEL: {{^}}v_sint_to_fp_v4i64:
-define void @v_sint_to_fp_v4i64(<4 x float> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+; FUNC-LABEL: {{^}}v_sint_to_fp_v4i64_to_v4f32:
+define void @v_sint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
%out.gep = getelementptr <4 x float>, <4 x float> addrspace(1)* %out, i32 %tid
@@ -56,6 +91,25 @@ define void @v_sint_to_fp_v4i64(<4 x float> addrspace(1)* %out, <4 x i64> addrsp
ret void
}
+; FUNC-LABEL: {{^}}s_sint_to_fp_v2i64_to_v2f16:
+; GCN-NOT: v_and_b32_e32 v{{[0-9]+}}, -1,
+define void @s_sint_to_fp_v2i64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x i64> %in) #0{
+ %result = sitofp <2 x i64> %in to <2 x half>
+ store <2 x half> %result, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_sint_to_fp_v4i64_to_v4f16:
+define void @v_sint_to_fp_v4i64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.amdgcn.workitem.id.x()
+ %in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr <4 x half>, <4 x half> addrspace(1)* %out, i32 %tid
+ %value = load <4 x i64>, <4 x i64> addrspace(1)* %in.gep
+ %result = sitofp <4 x i64> %value to <4 x half>
+ store <4 x half> %result, <4 x half> addrspace(1)* %out.gep
+ ret void
+}
+
declare i32 @llvm.amdgcn.workitem.id.x() #1
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/sitofp.f16.ll b/llvm/test/CodeGen/AMDGPU/sitofp.f16.ll
new file mode 100644
index 00000000000..6c1d8ec2005
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/sitofp.f16.ll
@@ -0,0 +1,87 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}sitofp_i16_to_f16
+; GCN: buffer_load_{{sshort|ushort}} v[[A_I16:[0-9]+]]
+; SI: v_cvt_f32_i32_e32 v[[A_F32:[0-9]+]], v[[A_I16]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_F32]]
+; VI: v_cvt_f16_i16_e32 v[[R_F16:[0-9]+]], v[[A_I16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @sitofp_i16_to_f16(
+ half addrspace(1)* %r,
+ i16 addrspace(1)* %a) {
+entry:
+ %a.val = load i16, i16 addrspace(1)* %a
+ %r.val = sitofp i16 %a.val to half
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}sitofp_i32_to_f16
+; GCN: buffer_load_dword v[[A_I32:[0-9]+]]
+; GCN: v_cvt_f32_i32_e32 v[[A_I16:[0-9]+]], v[[A_I32]]
+; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_I16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @sitofp_i32_to_f16(
+ half addrspace(1)* %r,
+ i32 addrspace(1)* %a) {
+entry:
+ %a.val = load i32, i32 addrspace(1)* %a
+ %r.val = sitofp i32 %a.val to half
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; f16 = sitofp i64 is in sint_to_fp.i64.ll
+
+; GCN-LABEL: {{^}}sitofp_v2i16_to_v2f16
+; GCN: buffer_load_dword v[[A_V2_I16:[0-9]+]]
+; SI: v_bfe_i32 v[[A_I16_0:[0-9]+]], v[[A_V2_I16]], 0, 16
+; SI: v_ashrrev_i32_e32 v[[A_I16_1:[0-9]+]], 16, v[[A_V2_I16]]
+; SI: v_cvt_f32_i32_e32 v[[A_F32_1:[0-9]+]], v[[A_I16_1]]
+; SI: v_cvt_f32_i32_e32 v[[A_F32_0:[0-9]+]], v[[A_I16_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[A_F32_0]]
+; VI: v_lshrrev_b32_e32 v[[A_I16_1:[0-9]+]], 16, v[[A_V2_I16]]
+; VI: v_cvt_f16_i16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_I16]]
+; VI: v_cvt_f16_i16_e32 v[[R_F16_1:[0-9]+]], v[[A_I16_1]]
+; VI: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @sitofp_v2i16_to_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x i16> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x i16>, <2 x i16> addrspace(1)* %a
+ %r.val = sitofp <2 x i16> %a.val to <2 x half>
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}sitofp_v2i32_to_v2f16
+; GCN: buffer_load_dwordx2
+; GCN: v_cvt_f32_i32_e32
+; GCN: v_cvt_f32_i32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN-DAG: v_and_b32_e32
+; GCN-DAG: v_lshlrev_b32_e32
+; GCN-DAG: v_or_b32_e32
+; GCN: buffer_store_dword
+; GCN: s_endpgm
+define void @sitofp_v2i32_to_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x i32> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x i32>, <2 x i32> addrspace(1)* %a
+ %r.val = sitofp <2 x i32> %a.val to <2 x half>
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; v2f16 = sitofp v2i64 is in sint_to_fp.i64.ll
diff --git a/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll b/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
index 88edbb097be..22d563d40b7 100644
--- a/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/uint_to_fp.i64.ll
@@ -3,6 +3,37 @@
; FIXME: This should be merged with uint_to_fp.ll, but s_uint_to_fp_v2i64 crashes on r600
+; FUNC-LABEL: {{^}}s_uint_to_fp_i64_to_f16:
+define void @s_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 %in) #0 {
+ %result = uitofp i64 %in to half
+ store half %result, half addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_uint_to_fp_i64_to_f16:
+; GCN: {{buffer|flat}}_load_dwordx2
+
+; GCN: v_ffbh_u32
+; GCN: v_ffbh_u32
+; GCN: v_cndmask
+; GCN: v_cndmask
+
+; GCN-DAG: v_cmp_eq_u64
+; GCN-DAG: v_cmp_lt_u64
+
+; GCN: v_add_i32_e32 [[VR:v[0-9]+]]
+; GCN: v_cvt_f16_f32_e32 [[VR_F16:v[0-9]+]], [[VR]]
+; GCN: {{buffer|flat}}_store_short {{.*}}[[VR_F16]]
+define void @v_uint_to_fp_i64_to_f16(half addrspace(1)* %out, i64 addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr half, half addrspace(1)* %out, i32 %tid
+ %val = load i64, i64 addrspace(1)* %in.gep
+ %result = uitofp i64 %val to half
+ store half %result, half addrspace(1)* %out.gep
+ ret void
+}
+
; FUNC-LABEL: {{^}}s_uint_to_fp_i64_to_f32:
define void @s_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 %in) #0 {
%result = uitofp i64 %in to float
@@ -33,15 +64,15 @@ define void @v_uint_to_fp_i64_to_f32(float addrspace(1)* %out, i64 addrspace(1)*
ret void
}
-; FUNC-LABEL: {{^}}s_uint_to_fp_v2i64:
-define void @s_uint_to_fp_v2i64(<2 x float> addrspace(1)* %out, <2 x i64> %in) #0{
+; FUNC-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f32:
+define void @s_uint_to_fp_v2i64_to_v2f32(<2 x float> addrspace(1)* %out, <2 x i64> %in) #0{
%result = uitofp <2 x i64> %in to <2 x float>
store <2 x float> %result, <2 x float> addrspace(1)* %out
ret void
}
-; FUNC-LABEL: {{^}}v_uint_to_fp_v4i64:
-define void @v_uint_to_fp_v4i64(<4 x float> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+; FUNC-LABEL: {{^}}v_uint_to_fp_v4i64_to_v4f32:
+define void @v_uint_to_fp_v4i64_to_v4f32(<4 x float> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
%tid = call i32 @llvm.r600.read.tidig.x()
%in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
%out.gep = getelementptr <4 x float>, <4 x float> addrspace(1)* %out, i32 %tid
@@ -51,6 +82,24 @@ define void @v_uint_to_fp_v4i64(<4 x float> addrspace(1)* %out, <4 x i64> addrsp
ret void
}
+; FUNC-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f16:
+define void @s_uint_to_fp_v2i64_to_v2f16(<2 x half> addrspace(1)* %out, <2 x i64> %in) #0{
+ %result = uitofp <2 x i64> %in to <2 x half>
+ store <2 x half> %result, <2 x half> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}v_uint_to_fp_v4i64_to_v4f16:
+define void @v_uint_to_fp_v4i64_to_v4f16(<4 x half> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) #0 {
+ %tid = call i32 @llvm.r600.read.tidig.x()
+ %in.gep = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i32 %tid
+ %out.gep = getelementptr <4 x half>, <4 x half> addrspace(1)* %out, i32 %tid
+ %value = load <4 x i64>, <4 x i64> addrspace(1)* %in.gep
+ %result = uitofp <4 x i64> %value to <4 x half>
+ store <4 x half> %result, <4 x half> addrspace(1)* %out.gep
+ ret void
+}
+
declare i32 @llvm.r600.read.tidig.x() #1
attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/uitofp.f16.ll b/llvm/test/CodeGen/AMDGPU/uitofp.f16.ll
new file mode 100644
index 00000000000..62131e7cd29
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/uitofp.f16.ll
@@ -0,0 +1,87 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}uitofp_i16_to_f16
+; GCN: buffer_load_ushort v[[A_I16:[0-9]+]]
+; SI: v_cvt_f32_u32_e32 v[[A_F32:[0-9]+]], v[[A_I16]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_F32]]
+; VI: v_cvt_f16_u16_e32 v[[R_F16:[0-9]+]], v[[A_I16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @uitofp_i16_to_f16(
+ half addrspace(1)* %r,
+ i16 addrspace(1)* %a) {
+entry:
+ %a.val = load i16, i16 addrspace(1)* %a
+ %r.val = uitofp i16 %a.val to half
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}uitofp_i32_to_f16
+; GCN: buffer_load_dword v[[A_I32:[0-9]+]]
+; GCN: v_cvt_f32_u32_e32 v[[A_I16:[0-9]+]], v[[A_I32]]
+; GCN: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[A_I16]]
+; GCN: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @uitofp_i32_to_f16(
+ half addrspace(1)* %r,
+ i32 addrspace(1)* %a) {
+entry:
+ %a.val = load i32, i32 addrspace(1)* %a
+ %r.val = uitofp i32 %a.val to half
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; f16 = uitofp i64 is in uint_to_fp.i64.ll
+
+; GCN-LABEL: {{^}}uitofp_v2i16_to_v2f16
+; GCN: buffer_load_dword v[[A_V2_I16:[0-9]+]]
+; SI: s_mov_b32 s[[MASK:[0-9]+]], 0xffff{{$}}
+; SI: v_and_b32_e32 v[[A_I16_0:[0-9]+]], s[[MASK]], v[[A_V2_I16]]
+; GCN: v_lshrrev_b32_e32 v[[A_I16_1:[0-9]+]], 16, v[[A_V2_I16]]
+; SI: v_cvt_f32_u32_e32 v[[A_F32_1:[0-9]+]], v[[A_I16_1]]
+; SI: v_cvt_f32_u32_e32 v[[A_F32_0:[0-9]+]], v[[A_I16_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[A_F32_0]]
+; VI: v_cvt_f16_u16_e32 v[[R_F16_0:[0-9]+]], v[[A_V2_I16]]
+; VI: v_cvt_f16_u16_e32 v[[R_F16_1:[0-9]+]], v[[A_I16_1]]
+; VI: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; GCN: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; SI: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], s[[MASK]], v[[R_F16_0]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @uitofp_v2i16_to_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x i16> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x i16>, <2 x i16> addrspace(1)* %a
+ %r.val = uitofp <2 x i16> %a.val to <2 x half>
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}uitofp_v2i32_to_v2f16
+; GCN: buffer_load_dwordx2
+; GCN: v_cvt_f32_u32_e32
+; GCN: v_cvt_f32_u32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN: v_cvt_f16_f32_e32
+; GCN-DAG: v_and_b32_e32
+; GCN-DAG: v_lshlrev_b32_e32
+; GCN-DAG: v_or_b32_e32
+; GCN: buffer_store_dword
+; GCN: s_endpgm
+define void @uitofp_v2i32_to_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x i32> addrspace(1)* %a) {
+entry:
+ %a.val = load <2 x i32>, <2 x i32> addrspace(1)* %a
+ %r.val = uitofp <2 x i32> %a.val to <2 x half>
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; f16 = uitofp i64 is in uint_to_fp.i64.ll
diff --git a/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll
new file mode 100644
index 00000000000..ecd5b01545d
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/v_mac_f16.ll
@@ -0,0 +1,608 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}mac_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[C_F16:[0-9]+]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32:[0-9]+]], v[[A_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32:[0-9]+]], v[[B_F16]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32:[0-9]+]], v[[C_F16]]
+; SI: v_mac_f32_e32 v[[C_F32]], v[[B_F32]], v[[A_F32]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16:[0-9]+]], v[[C_F32]]
+; SI: buffer_store_short v[[R_F16]]
+; VI: v_mac_f16_e32 v[[C_F16]], v[[B_F16]], v[[A_F16]]
+; VI: buffer_store_short v[[C_F16]]
+; GCN: s_endpgm
+define void @mac_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) #0 {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+
+ %t.val = fmul half %a.val, %b.val
+ %r.val = fadd half %t.val, %c.val
+
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_f16_same_add
+; SI: v_mad_f32 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD:v[0-9]+]]
+; SI: v_mac_f32_e32 [[ADD]], v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_mad_f16 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD:v[0-9]+]]
+; VI: v_mac_f16_e32 [[ADD]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: s_endpgm
+define void @mac_f16_same_add(
+ half addrspace(1)* %r0,
+ half addrspace(1)* %r1,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c,
+ half addrspace(1)* %d,
+ half addrspace(1)* %e) #0 {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+ %d.val = load half, half addrspace(1)* %d
+ %e.val = load half, half addrspace(1)* %e
+
+ %t0.val = fmul half %a.val, %b.val
+ %r0.val = fadd half %t0.val, %c.val
+
+ %t1.val = fmul half %d.val, %e.val
+ %r1.val = fadd half %t1.val, %c.val
+
+ store half %r0.val, half addrspace(1)* %r0
+ store half %r1.val, half addrspace(1)* %r1
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_f16_neg_a
+; SI-NOT: v_mac_f32
+; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI-NOT: v_mac_f16
+; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: s_endpgm
+define void @mac_f16_neg_a(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) #0 {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+
+ %a.neg = fsub half -0.0, %a.val
+ %t.val = fmul half %a.neg, %b.val
+ %r.val = fadd half %t.val, %c.val
+
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_f16_neg_b
+; SI-NOT: v_mac_f32
+; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI-NOT: v_mac_f16
+; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: s_endpgm
+define void @mac_f16_neg_b(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) #0 {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+
+ %b.neg = fsub half -0.0, %b.val
+ %t.val = fmul half %a.val, %b.neg
+ %r.val = fadd half %t.val, %c.val
+
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_f16_neg_c
+; SI-NOT: v_mac_f32
+; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+; VI-NOT: v_mac_f16
+; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+; GCN: s_endpgm
+define void @mac_f16_neg_c(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) #0 {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+
+ %c.neg = fsub half -0.0, %c.val
+ %t.val = fmul half %a.val, %b.val
+ %r.val = fadd half %t.val, %c.neg
+
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_f16_neg_a_safe_fp_math
+; SI: v_cvt_f32_f16_e32 v[[ZERO:[0-9]+]], 0{{$}}
+; SI: v_subrev_f32_e32 v[[NEG_A:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
+; SI: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A]]
+; VI: v_sub_f16_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
+; VI: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A]]
+; GCN: s_endpgm
+define void @mac_f16_neg_a_safe_fp_math(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) #0 {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+
+ %a.neg = fsub half 0.0, %a.val
+ %t.val = fmul half %a.neg, %b.val
+ %r.val = fadd half %t.val, %c.val
+
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_f16_neg_b_safe_fp_math
+; SI: v_cvt_f32_f16_e32 v[[ZERO:[0-9]+]], 0{{$}}
+; SI: v_subrev_f32_e32 v[[NEG_A:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
+; SI: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A]], v{{[0-9]+}}
+; VI: v_sub_f16_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
+; VI: v_mac_f16_e32 v{{[0-9]+}}, v[[NEG_A]], v{{[0-9]+}}
+; GCN: s_endpgm
+define void @mac_f16_neg_b_safe_fp_math(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) #0 {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+
+ %b.neg = fsub half 0.0, %b.val
+ %t.val = fmul half %a.val, %b.neg
+ %r.val = fadd half %t.val, %c.val
+
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_f16_neg_c_safe_fp_math
+; SI: v_cvt_f32_f16_e32 v[[ZERO:[0-9]+]], 0{{$}}
+; SI: v_subrev_f32_e32 v[[NEG_A:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
+; SI: v_mac_f32_e32 v[[NEG_A]], v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_sub_f16_e32 v[[NEG_A:[0-9]+]], 0, v{{[0-9]+}}
+; VI: v_mac_f16_e32 v[[NEG_A]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: s_endpgm
+define void @mac_f16_neg_c_safe_fp_math(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) #0 {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+
+ %c.neg = fsub half 0.0, %c.val
+ %t.val = fmul half %a.val, %b.val
+ %r.val = fadd half %t.val, %c.neg
+
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_f16_neg_a_unsafe_fp_math
+; SI-NOT: v_mac_f32
+; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]}}
+; VI-NOT: v_mac_f16
+; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]}}
+; GCN: s_endpgm
+define void @mac_f16_neg_a_unsafe_fp_math(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) #1 {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+
+ %a.neg = fsub half 0.0, %a.val
+ %t.val = fmul half %a.neg, %b.val
+ %r.val = fadd half %t.val, %c.val
+
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_f16_neg_b_unsafe_fp_math
+; SI-NOT: v_mac_f32
+; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]}}
+; VI-NOT: v_mac_f16
+; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]}}
+; GCN: s_endpgm
+define void @mac_f16_neg_b_unsafe_fp_math(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) #1 {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+
+ %b.neg = fsub half 0.0, %b.val
+ %t.val = fmul half %a.val, %b.neg
+ %r.val = fadd half %t.val, %c.val
+
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_f16_neg_c_unsafe_fp_math
+; SI-NOT: v_mac_f32
+; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]}}
+; VI-NOT: v_mac_f16
+; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]}}
+; GCN: s_endpgm
+define void @mac_f16_neg_c_unsafe_fp_math(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) #1 {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+
+ %c.neg = fsub half 0.0, %c.val
+ %t.val = fmul half %a.val, %b.val
+ %r.val = fadd half %t.val, %c.neg
+
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_v2f16
+; GCN: buffer_load_dword v[[A_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[B_V2_F16:[0-9]+]]
+; GCN: buffer_load_dword v[[C_V2_F16:[0-9]+]]
+; GCN: v_lshrrev_b32_e32 v[[A_F16_1:[0-9]+]], 16, v[[A_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[B_F16_1:[0-9]+]], 16, v[[B_V2_F16]]
+; GCN: v_lshrrev_b32_e32 v[[C_F16_1:[0-9]+]], 16, v[[C_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_0:[0-9]+]], v[[A_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_0:[0-9]+]], v[[B_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32_0:[0-9]+]], v[[C_V2_F16]]
+; SI: v_cvt_f32_f16_e32 v[[A_F32_1:[0-9]+]], v[[A_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[B_F32_1:[0-9]+]], v[[B_F16_1]]
+; SI: v_cvt_f32_f16_e32 v[[C_F32_1:[0-9]+]], v[[C_F16_1]]
+; SI: v_mac_f32_e32 v[[C_F32_0]], v[[B_F32_0]], v[[A_F32_0]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_0:[0-9]+]], v[[C_F32_0]]
+; SI: v_mac_f32_e32 v[[C_F32_1]], v[[B_F32_1]], v[[A_F32_1]]
+; SI: v_cvt_f16_f32_e32 v[[R_F16_1:[0-9]+]], v[[C_F32_1]]
+; SI: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[R_F16_0]]
+; SI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[R_F16_1]]
+; VI: v_mac_f16_e32 v[[C_V2_F16]], v[[B_V2_F16]], v[[A_V2_F16]]
+; VI: v_mac_f16_e32 v[[C_F16_1]], v[[B_F16_1]], v[[A_F16_1]]
+; VI: v_and_b32_e32 v[[R_F16_LO:[0-9]+]], 0xffff, v[[C_V2_F16]]
+; VI: v_lshlrev_b32_e32 v[[R_F16_HI:[0-9]+]], 16, v[[C_F16_1]]
+; GCN: v_or_b32_e32 v[[R_V2_F16:[0-9]+]], v[[R_F16_HI]], v[[R_F16_LO]]
+; GCN: buffer_store_dword v[[R_V2_F16]]
+; GCN: s_endpgm
+define void @mac_v2f16(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) #0 {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+
+ %t.val = fmul <2 x half> %a.val, %b.val
+ %r.val = fadd <2 x half> %t.val, %c.val
+
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_v2f16_same_add
+; SI: v_mad_f32 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD0:v[0-9]+]]
+; SI: v_mad_f32 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD1:v[0-9]+]]
+; SI: v_mac_f32_e32 [[ADD0]], v{{[0-9]+}}, v{{[0-9]+}}
+; SI: v_mac_f32_e32 [[ADD1]], v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_mad_f16 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD0:v[0-9]+]]
+; VI: v_mad_f16 v{{[0-9]}}, v{{[0-9]+}}, v{{[0-9]+}}, [[ADD1:v[0-9]+]]
+; VI: v_mac_f16_e32 [[ADD0]], v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_mac_f16_e32 [[ADD1]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: s_endpgm
+define void @mac_v2f16_same_add(
+ <2 x half> addrspace(1)* %r0,
+ <2 x half> addrspace(1)* %r1,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c,
+ <2 x half> addrspace(1)* %d,
+ <2 x half> addrspace(1)* %e) #0 {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+ %d.val = load <2 x half>, <2 x half> addrspace(1)* %d
+ %e.val = load <2 x half>, <2 x half> addrspace(1)* %e
+
+ %t0.val = fmul <2 x half> %a.val, %b.val
+ %r0.val = fadd <2 x half> %t0.val, %c.val
+
+ %t1.val = fmul <2 x half> %d.val, %e.val
+ %r1.val = fadd <2 x half> %t1.val, %c.val
+
+ store <2 x half> %r0.val, <2 x half> addrspace(1)* %r0
+ store <2 x half> %r1.val, <2 x half> addrspace(1)* %r1
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_v2f16_neg_a
+; SI-NOT: v_mac_f32
+; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI-NOT: v_mac_f16
+; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: s_endpgm
+define void @mac_v2f16_neg_a(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) #0 {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+
+ %a.neg = fsub <2 x half> <half -0.0, half -0.0>, %a.val
+ %t.val = fmul <2 x half> %a.neg, %b.val
+ %r.val = fadd <2 x half> %t.val, %c.val
+
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_v2f16_neg_b
+; SI-NOT: v_mac_f32
+; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI-NOT: v_mac_f16
+; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: s_endpgm
+define void @mac_v2f16_neg_b(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) #0 {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+
+ %b.neg = fsub <2 x half> <half -0.0, half -0.0>, %b.val
+ %t.val = fmul <2 x half> %a.val, %b.neg
+ %r.val = fadd <2 x half> %t.val, %c.val
+
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_v2f16_neg_c
+; SI-NOT: v_mac_f32
+; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+; VI-NOT: v_mac_f16
+; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}
+; GCN: s_endpgm
+define void @mac_v2f16_neg_c(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) #0 {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+
+ %c.neg = fsub <2 x half> <half -0.0, half -0.0>, %c.val
+ %t.val = fmul <2 x half> %a.val, %b.val
+ %r.val = fadd <2 x half> %t.val, %c.neg
+
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_v2f16_neg_a_safe_fp_math
+; SI: v_cvt_f32_f16_e32 v[[ZERO:[0-9]+]], 0{{$}}
+; SI: v_subrev_f32_e32 v[[NEG_A0:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
+; SI: v_subrev_f32_e32 v[[NEG_A1:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
+; SI: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A0]]
+; SI: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A1]]
+; VI: v_sub_f16_e32 v[[NEG_A0:[0-9]+]], 0, v{{[0-9]+}}
+; VI: v_sub_f16_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
+; VI: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A0]]
+; VI: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[NEG_A1]]
+; GCN: s_endpgm
+define void @mac_v2f16_neg_a_safe_fp_math(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) #0 {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+
+ %a.neg = fsub <2 x half> <half 0.0, half 0.0>, %a.val
+ %t.val = fmul <2 x half> %a.neg, %b.val
+ %r.val = fadd <2 x half> %t.val, %c.val
+
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_v2f16_neg_b_safe_fp_math
+; SI: v_cvt_f32_f16_e32 v[[ZERO:[0-9]+]], 0{{$}}
+; SI: v_subrev_f32_e32 v[[NEG_A0:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
+; SI: v_subrev_f32_e32 v[[NEG_A1:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
+; SI: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A0]], v{{[0-9]+}}
+; SI: v_mac_f32_e32 v{{[0-9]+}}, v[[NEG_A1]], v{{[0-9]+}}
+; VI: v_sub_f16_e32 v[[NEG_A0:[0-9]+]], 0, v{{[0-9]+}}
+; VI: v_sub_f16_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
+; VI: v_mac_f16_e32 v{{[0-9]+}}, v[[NEG_A0]], v{{[0-9]+}}
+; VI: v_mac_f16_e32 v{{[0-9]+}}, v[[NEG_A1]], v{{[0-9]+}}
+; GCN: s_endpgm
+define void @mac_v2f16_neg_b_safe_fp_math(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) #0 {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+
+ %b.neg = fsub <2 x half> <half 0.0, half 0.0>, %b.val
+ %t.val = fmul <2 x half> %a.val, %b.neg
+ %r.val = fadd <2 x half> %t.val, %c.val
+
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_v2f16_neg_c_safe_fp_math
+; SI: v_cvt_f32_f16_e32 v[[ZERO:[0-9]+]], 0{{$}}
+; SI: v_subrev_f32_e32 v[[NEG_A0:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
+; SI: v_subrev_f32_e32 v[[NEG_A1:[0-9]+]], v{{[0-9]+}}, v[[ZERO]]
+; SI: v_mac_f32_e32 v[[NEG_A0]], v{{[0-9]+}}, v{{[0-9]+}}
+; SI: v_mac_f32_e32 v[[NEG_A1]], v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_sub_f16_e32 v[[NEG_A0:[0-9]+]], 0, v{{[0-9]+}}
+; VI: v_sub_f16_e32 v[[NEG_A1:[0-9]+]], 0, v{{[0-9]+}}
+; VI: v_mac_f16_e32 v[[NEG_A0]], v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_mac_f16_e32 v[[NEG_A1]], v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: s_endpgm
+define void @mac_v2f16_neg_c_safe_fp_math(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) #0 {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+
+ %c.neg = fsub <2 x half> <half 0.0, half 0.0>, %c.val
+ %t.val = fmul <2 x half> %a.val, %b.val
+ %r.val = fadd <2 x half> %t.val, %c.neg
+
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_v2f16_neg_a_unsafe_fp_math
+; SI-NOT: v_mac_f32
+; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
+; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
+; VI-NOT: v_mac_f16
+; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
+; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
+; GCN: s_endpgm
+define void @mac_v2f16_neg_a_unsafe_fp_math(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) #1 {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+
+ %a.neg = fsub <2 x half> <half 0.0, half 0.0>, %a.val
+ %t.val = fmul <2 x half> %a.neg, %b.val
+ %r.val = fadd <2 x half> %t.val, %c.val
+
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_v2f16_neg_b_unsafe_fp_math
+; SI-NOT: v_mac_f32
+; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
+; SI: v_mad_f32 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
+; VI-NOT: v_mac_f16
+; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
+; VI: v_mad_f16 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}, v{{[-0-9]}}
+; GCN: s_endpgm
+define void @mac_v2f16_neg_b_unsafe_fp_math(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) #1 {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+
+ %b.neg = fsub <2 x half> <half 0.0, half 0.0>, %b.val
+ %t.val = fmul <2 x half> %a.val, %b.neg
+ %r.val = fadd <2 x half> %t.val, %c.val
+
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}mac_v2f16_neg_c_unsafe_fp_math
+; SI-NOT: v_mac_f32
+; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[-0-9]}}
+; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[-0-9]}}
+; VI-NOT: v_mac_f16
+; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[-0-9]}}
+; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, -v{{[-0-9]}}
+; GCN: s_endpgm
+define void @mac_v2f16_neg_c_unsafe_fp_math(
+ <2 x half> addrspace(1)* %r,
+ <2 x half> addrspace(1)* %a,
+ <2 x half> addrspace(1)* %b,
+ <2 x half> addrspace(1)* %c) #1 {
+entry:
+ %a.val = load <2 x half>, <2 x half> addrspace(1)* %a
+ %b.val = load <2 x half>, <2 x half> addrspace(1)* %b
+ %c.val = load <2 x half>, <2 x half> addrspace(1)* %c
+
+ %c.neg = fsub <2 x half> <half 0.0, half 0.0>, %c.val
+ %t.val = fmul <2 x half> %a.val, %b.val
+ %r.val = fadd <2 x half> %t.val, %c.neg
+
+ store <2 x half> %r.val, <2 x half> addrspace(1)* %r
+ ret void
+}
+
+attributes #0 = {"unsafe-fp-math"="false"}
+attributes #1 = {"unsafe-fp-math"="true"}
diff --git a/llvm/test/CodeGen/AMDGPU/v_madak_f16.ll b/llvm/test/CodeGen/AMDGPU/v_madak_f16.ll
new file mode 100644
index 00000000000..fd5ad3e3d60
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/v_madak_f16.ll
@@ -0,0 +1,50 @@
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
+
+; GCN-LABEL: {{^}}madak_f16
+; GCN: buffer_load_ushort v[[A_F16:[0-9]+]]
+; GCN: buffer_load_ushort v[[B_F16:[0-9]+]]
+; VI: v_madak_f16_e32 v[[R_F16:[0-9]+]], v[[A_F16]], v[[B_F16]], 0x4900{{$}}
+; VI: buffer_store_short v[[R_F16]]
+; GCN: s_endpgm
+define void @madak_f16(
+ half addrspace(1)* %r,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+
+ %t.val = fmul half %a.val, %b.val
+ %r.val = fadd half %t.val, 10.0
+
+ store half %r.val, half addrspace(1)* %r
+ ret void
+}
+
+; GCN-LABEL: {{^}}madak_f16_use_2
+; SI: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; SI: v_mac_f32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_mad_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; VI: v_mac_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+; GCN: s_endpgm
+define void @madak_f16_use_2(
+ half addrspace(1)* %r0,
+ half addrspace(1)* %r1,
+ half addrspace(1)* %a,
+ half addrspace(1)* %b,
+ half addrspace(1)* %c) {
+entry:
+ %a.val = load half, half addrspace(1)* %a
+ %b.val = load half, half addrspace(1)* %b
+ %c.val = load half, half addrspace(1)* %c
+
+ %t0.val = fmul half %a.val, %b.val
+ %t1.val = fmul half %a.val, %c.val
+ %r0.val = fadd half %t0.val, 10.0
+ %r1.val = fadd half %t1.val, 10.0
+
+ store half %r0.val, half addrspace(1)* %r0
+ store half %r1.val, half addrspace(1)* %r1
+ ret void
+}
diff --git a/llvm/test/MC/Disassembler/AMDGPU/sdwa_vi.txt b/llvm/test/MC/Disassembler/AMDGPU/sdwa_vi.txt
index 2daf89a6121..4fadef7bdaa 100644
--- a/llvm/test/MC/Disassembler/AMDGPU/sdwa_vi.txt
+++ b/llvm/test/MC/Disassembler/AMDGPU/sdwa_vi.txt
@@ -300,8 +300,8 @@
# VI: v_mul_f16_sdwa v1, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:BYTE_2 ; encoding: [0xf9,0x06,0x02,0x44,0x02,0x06,0x05,0x02]
0xf9 0x06 0x02 0x44 0x02 0x06 0x05 0x02
-# VI: v_mac_f16_sdwa v1, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:BYTE_2 ; encoding: [0xf9,0x06,0x02,0x46,0x02,0x06,0x05,0x02]
-0xf9 0x06 0x02 0x46 0x02 0x06 0x05 0x02
+# VI: v_mac_f16_e32 v1, v2, v3 ; encoding: [0x02,0x07,0x02,0x46]
+0x02,0x07,0x02,0x46
# VI: v_add_u16_sdwa v1, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:BYTE_2 ; encoding: [0xf9,0x06,0x02,0x4c,0x02,0x06,0x05,0x02]
0xf9 0x06 0x02 0x4c 0x02 0x06 0x05 0x02
OpenPOWER on IntegriCloud