summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2016-08-27 01:32:27 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2016-08-27 01:32:27 +0000
commit2712d4a3d81d31621e100eaba9daf95ab8b75aa8 (patch)
tree95ab47bf2001d0c795cdfbce7ca30e557f62401d /llvm/test/CodeGen
parent22e417956d67efb602a756eb95ab61e48482a3f6 (diff)
downloadbcm5719-llvm-2712d4a3d81d31621e100eaba9daf95ab8b75aa8.tar.gz
bcm5719-llvm-2712d4a3d81d31621e100eaba9daf95ab8b75aa8.zip
AMDGPU: Select mulhi 24-bit instructions
llvm-svn: 279902
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/AMDGPU/mul_int24.ll156
-rw-r--r--llvm/test/CodeGen/AMDGPU/mul_uint24.ll174
2 files changed, 293 insertions, 37 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/mul_int24.ll b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
index 1a323fbaa1a..c8f8ba223b9 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_int24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_int24.ll
@@ -1,23 +1,151 @@
-; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=CM -check-prefix=FUNC %s
+
+; FUNC-LABEL: {{^}}test_smul24_i32:
+; GCN-NOT: bfe
+; GCN: v_mul_i32_i24
-; FUNC-LABEL: {{^}}i32_mul24:
; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
; EG: MULLO_INT
+
; Make sure we are not masking the inputs
; CM-NOT: AND
; CM: MUL_INT24
-; SI-NOT: and
-; SI: v_mul_i32_i24
-define void @i32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define void @test_smul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
entry:
- %0 = shl i32 %a, 8
- %a_24 = ashr i32 %0, 8
- %1 = shl i32 %b, 8
- %b_24 = ashr i32 %1, 8
- %2 = mul i32 %a_24, %b_24
- store i32 %2, i32 addrspace(1)* %out
+ %a.shl = shl i32 %a, 8
+ %a.24 = ashr i32 %a.shl, 8
+ %b.shl = shl i32 %b, 8
+ %b.24 = ashr i32 %b.shl, 8
+ %mul24 = mul i32 %a.24, %b.24
+ store i32 %mul24, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_smulhi24_i64:
+; GCN-NOT: bfe
+; GCN-NOT: ashr
+; GCN: v_mul_hi_i32_i24_e32 [[RESULT:v[0-9]+]],
+; GCN-NEXT: buffer_store_dword [[RESULT]]
+
+; EG: ASHR
+; EG: ASHR
+; EG: MULHI_INT
+
+; CM-NOT: ASHR
+; CM: MULHI_INT24
+; CM: MULHI_INT24
+; CM: MULHI_INT24
+; CM: MULHI_INT24
+define void @test_smulhi24_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+entry:
+ %a.shl = shl i32 %a, 8
+ %a.24 = ashr i32 %a.shl, 8
+ %b.shl = shl i32 %b, 8
+ %b.24 = ashr i32 %b.shl, 8
+ %a.24.i64 = sext i32 %a.24 to i64
+ %b.24.i64 = sext i32 %b.24 to i64
+ %mul48 = mul i64 %a.24.i64, %b.24.i64
+ %mul48.hi = lshr i64 %mul48, 32
+ %mul24hi = trunc i64 %mul48.hi to i32
+ store i32 %mul24hi, i32 addrspace(1)* %out
+ ret void
+}
+
+; This requires handling of the original 64-bit mul node to eliminate
+; unnecessary extension instructions because after legalization they
+; will not be removed by SimplifyDemandedBits because there are
+; multiple uses by the separate mul and mulhi.
+
+; FUNC-LABEL: {{^}}test_smul24_i64:
+; GCN: s_load_dword s
+; GCN: s_load_dword s
+
+; GCN-NOT: bfe
+; GCN-NOT: ashr
+
+; GCN-DAG: v_mul_hi_i32_i24_e32
+; GCN-DAG: v_mul_i32_i24_e32
+
+; GCN: buffer_store_dwordx2
+define void @test_smul24_i64(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+ %shl.i = shl i32 %a, 8
+ %shr.i = ashr i32 %shl.i, 8
+ %conv.i = sext i32 %shr.i to i64
+ %shl1.i = shl i32 %b, 8
+ %shr2.i = ashr i32 %shl1.i, 8
+ %conv3.i = sext i32 %shr2.i to i64
+ %mul.i = mul i64 %conv3.i, %conv.i
+ store i64 %mul.i, i64 addrspace(1)* %out
+ ret void
+}
+
+; FIXME: Should be able to eliminate bfe
+; FUNC-LABEL: {{^}}test_smul24_i64_square:
+; GCN: s_load_dword [[A:s[0-9]+]]
+; GCN: s_bfe_i32 [[SEXT:s[0-9]+]], [[A]], 0x180000{{$}}
+; GCN-DAG: v_mul_hi_i32_i24_e64 v{{[0-9]+}}, [[SEXT]], [[SEXT]]
+; GCN-DAG: v_mul_i32_i24_e64 v{{[0-9]+}}, [[SEXT]], [[SEXT]]
+; GCN: buffer_store_dwordx2
+define void @test_smul24_i64_square(i64 addrspace(1)* %out, i32 %a, i32 %b) #0 {
+ %shl.i = shl i32 %a, 8
+ %shr.i = ashr i32 %shl.i, 8
+ %conv.i = sext i32 %shr.i to i64
+ %mul.i = mul i64 %conv.i, %conv.i
+ store i64 %mul.i, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_smul24_i33:
+; GCN: s_load_dword s
+; GCN: s_load_dword s
+
+; GCN-NOT: and
+; GCN-NOT: lshr
+
+; GCN-DAG: v_mul_i32_i24_e32
+; GCN-DAG: v_mul_hi_i32_i24_e32
+; SI: v_lshl_b64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, 31
+; SI: v_ashr_i64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, 31
+
+; VI: v_lshlrev_b64 v{{\[[0-9]+:[0-9]+\]}}, 31, v{{\[[0-9]+:[0-9]+\]}}
+; VI: v_ashrrev_i64 v{{\[[0-9]+:[0-9]+\]}}, 31, v{{\[[0-9]+:[0-9]+\]}}
+
+; GCN: buffer_store_dwordx2
+define void @test_smul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) #0 {
+entry:
+ %a.shl = shl i33 %a, 9
+ %a.24 = ashr i33 %a.shl, 9
+ %b.shl = shl i33 %b, 9
+ %b.24 = ashr i33 %b.shl, 9
+ %mul24 = mul i33 %a.24, %b.24
+ %ext = sext i33 %mul24 to i64
+ store i64 %ext, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_smulhi24_i33:
+; SI: s_load_dword s
+; SI: s_load_dword s
+
+; SI-NOT: bfe
+
+; SI: v_mul_hi_i32_i24_e32 v[[MUL_HI:[0-9]+]],
+; SI-NEXT: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
+; SI-NEXT: buffer_store_dword v[[HI]]
+define void @test_smulhi24_i33(i32 addrspace(1)* %out, i33 %a, i33 %b) {
+entry:
+ %tmp0 = shl i33 %a, 9
+ %a_24 = ashr i33 %tmp0, 9
+ %tmp1 = shl i33 %b, 9
+ %b_24 = ashr i33 %tmp1, 9
+ %tmp2 = mul i33 %a_24, %b_24
+ %hi = lshr i33 %tmp2, 32
+ %trunc = trunc i33 %hi to i32
+
+ store i32 %trunc, i32 addrspace(1)* %out
ret void
}
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/AMDGPU/mul_uint24.ll b/llvm/test/CodeGen/AMDGPU/mul_uint24.ll
index fdd348403ed..b882a4dd634 100644
--- a/llvm/test/CodeGen/AMDGPU/mul_uint24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul_uint24.ll
@@ -1,13 +1,12 @@
-; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=cayman < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
-; FUNC-LABEL: {{^}}u32_mul24:
+; FUNC-LABEL: {{^}}test_umul24_i32:
; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
; SI: v_mul_u32_u24
-
-define void @u32_mul24(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+define void @test_umul24_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) {
entry:
%0 = shl i32 %a, 8
%a_24 = lshr i32 %0, 8
@@ -18,46 +17,98 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}i16_mul24:
+; FUNC-LABEL: {{^}}test_umul24_i16_sext:
; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
; The result must be sign-extended
; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
; EG: 16
+
; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
; SI: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 16
-define void @i16_mul24(i32 addrspace(1)* %out, i16 %a, i16 %b) {
+define void @test_umul24_i16_sext(i32 addrspace(1)* %out, i16 %a, i16 %b) {
+entry:
+ %mul = mul i16 %a, %b
+ %ext = sext i16 %mul to i32
+ store i32 %ext, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_umul24_i16:
+; SI: s_and_b32
+; SI: v_mul_u32_u24_e32
+; SI: v_and_b32_e32
+define void @test_umul24_i16(i32 addrspace(1)* %out, i16 %a, i16 %b) {
entry:
- %0 = mul i16 %a, %b
- %1 = sext i16 %0 to i32
- store i32 %1, i32 addrspace(1)* %out
+ %mul = mul i16 %a, %b
+ %ext = zext i16 %mul to i32
+ store i32 %ext, i32 addrspace(1)* %out
ret void
}
-; FUNC-LABEL: {{^}}i8_mul24:
+; FUNC-LABEL: {{^}}test_umul24_i8:
; EG: MUL_UINT24 {{[* ]*}}T{{[0-9]}}.[[MUL_CHAN:[XYZW]]]
; The result must be sign-extended
; EG: BFE_INT {{[* ]*}}T{{[0-9]}}.{{[XYZW]}}, PV.[[MUL_CHAN]], 0.0, literal.x
; SI: v_mul_u32_u24_e{{(32|64)}} [[MUL:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
; SI: v_bfe_i32 v{{[0-9]}}, [[MUL]], 0, 8
-define void @i8_mul24(i32 addrspace(1)* %out, i8 %a, i8 %b) {
+define void @test_umul24_i8(i32 addrspace(1)* %out, i8 %a, i8 %b) {
+entry:
+ %mul = mul i8 %a, %b
+ %ext = sext i8 %mul to i32
+ store i32 %ext, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_umulhi24_i32_i64:
+; SI-NOT: and
+; SI: v_mul_hi_u32_u24_e32 [[RESULT:v[0-9]+]],
+; SI-NEXT: buffer_store_dword [[RESULT]]
+
+; EG: MULHI_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].Z, KC0[2].W
+define void @test_umulhi24_i32_i64(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+entry:
+ %a.24 = and i32 %a, 16777215
+ %b.24 = and i32 %b, 16777215
+ %a.24.i64 = zext i32 %a.24 to i64
+ %b.24.i64 = zext i32 %b.24 to i64
+ %mul48 = mul i64 %a.24.i64, %b.24.i64
+ %mul48.hi = lshr i64 %mul48, 32
+ %mul24hi = trunc i64 %mul48.hi to i32
+ store i32 %mul24hi, i32 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_umulhi24:
+; SI-NOT: and
+; SI: v_mul_hi_u32_u24_e32 [[RESULT:v[0-9]+]],
+; SI-NEXT: buffer_store_dword [[RESULT]]
+
+; EG: MULHI_UINT24 {{[* ]*}}T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y
+define void @test_umulhi24(i32 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
- %0 = mul i8 %a, %b
- %1 = sext i8 %0 to i32
- store i32 %1, i32 addrspace(1)* %out
+ %a.24 = and i64 %a, 16777215
+ %b.24 = and i64 %b, 16777215
+ %mul48 = mul i64 %a.24, %b.24
+ %mul48.hi = lshr i64 %mul48, 32
+ %mul24.hi = trunc i64 %mul48.hi to i32
+ store i32 %mul24.hi, i32 addrspace(1)* %out
ret void
}
; Multiply with 24-bit inputs and 64-bit output
-; FUNC_LABEL: {{^}}mul24_i64:
+; FUNC-LABEL: {{^}}test_umul24_i64:
; EG; MUL_UINT24
; EG: MULHI
-; FIXME: SI support 24-bit mulhi
-; SI-DAG: v_mul_u32_u24
-; SI-DAG: v_mul_hi_u32
-; SI: s_endpgm
-define void @mul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b, i64 %c) {
+; SI-NOT: and
+; SI-NOT: lshr
+
+; SI-DAG: v_mul_u32_u24_e32
+; SI-DAG: v_mul_hi_u32_u24_e32
+
+; SI: buffer_store_dwordx2
+define void @test_umul24_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
entry:
%tmp0 = shl i64 %a, 40
%a_24 = lshr i64 %tmp0, 40
@@ -67,3 +118,80 @@ entry:
store i64 %tmp2, i64 addrspace(1)* %out
ret void
}
+
+; FIXME: Should be able to eliminate the and
+; FUNC-LABEL: {{^}}test_umul24_i64_square:
+; SI: s_load_dword [[A:s[0-9]+]]
+; SI: s_and_b32 [[TRUNC:s[0-9]+]], [[A]], 0xffffff{{$}}
+; SI-DAG: v_mul_hi_u32_u24_e64 v{{[0-9]+}}, [[TRUNC]], [[TRUNC]]
+; SI-DAG: v_mul_u32_u24_e64 v{{[0-9]+}}, [[TRUNC]], [[TRUNC]]
+define void @test_umul24_i64_square(i64 addrspace(1)* %out, i64 %a) {
+entry:
+ %tmp0 = shl i64 %a, 40
+ %a.24 = lshr i64 %tmp0, 40
+ %tmp2 = mul i64 %a.24, %a.24
+ store i64 %tmp2, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_umulhi16_i32:
+; SI: s_and_b32
+; SI: s_and_b32
+; SI: v_mul_u32_u24_e32 [[MUL24:v[0-9]+]]
+; SI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, [[MUL24]]
+define void @test_umulhi16_i32(i16 addrspace(1)* %out, i32 %a, i32 %b) {
+entry:
+ %a.16 = and i32 %a, 65535
+ %b.16 = and i32 %b, 65535
+ %mul = mul i32 %a.16, %b.16
+ %hi = lshr i32 %mul, 16
+ %mulhi = trunc i32 %hi to i16
+ store i16 %mulhi, i16 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_umul24_i33:
+; SI: s_load_dword s
+; SI: s_load_dword s
+
+; SI-NOT: and
+; SI-NOT: lshr
+
+; SI-DAG: v_mul_u32_u24_e32 v[[MUL_LO:[0-9]+]],
+; SI-DAG: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]],
+; SI-DAG: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
+; SI: buffer_store_dwordx2 v{{\[}}[[MUL_LO]]:[[HI]]{{\]}}
+define void @test_umul24_i33(i64 addrspace(1)* %out, i33 %a, i33 %b) {
+entry:
+ %tmp0 = shl i33 %a, 9
+ %a_24 = lshr i33 %tmp0, 9
+ %tmp1 = shl i33 %b, 9
+ %b_24 = lshr i33 %tmp1, 9
+ %tmp2 = mul i33 %a_24, %b_24
+ %ext = zext i33 %tmp2 to i64
+ store i64 %ext, i64 addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: {{^}}test_umulhi24_i33:
+; SI: s_load_dword s
+; SI: s_load_dword s
+
+; SI-NOT: and
+; SI-NOT: lshr
+
+; SI: v_mul_hi_u32_u24_e32 v[[MUL_HI:[0-9]+]],
+; SI-NEXT: v_and_b32_e32 v[[HI:[0-9]+]], 1, v[[MUL_HI]]
+; SI-NEXT: buffer_store_dword v[[HI]]
+define void @test_umulhi24_i33(i32 addrspace(1)* %out, i33 %a, i33 %b) {
+entry:
+ %tmp0 = shl i33 %a, 9
+ %a_24 = lshr i33 %tmp0, 9
+ %tmp1 = shl i33 %b, 9
+ %b_24 = lshr i33 %tmp1, 9
+ %tmp2 = mul i33 %a_24, %b_24
+ %hi = lshr i33 %tmp2, 32
+ %trunc = trunc i33 %hi to i32
+ store i32 %trunc, i32 addrspace(1)* %out
+ ret void
+}
OpenPOWER on IntegriCloud