diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-04-03 00:00:58 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-04-03 00:00:58 +0000 |
| commit | 2065680b471d0f635d31199531042478748cd10f (patch) | |
| tree | ea5b62fdf7ea94f9282179f29da5e25d6ea83432 /llvm/test/CodeGen/AMDGPU/bitreverse.ll | |
| parent | ed23352379c83f0d30fbaa1770af4dd8d06a50c6 (diff) | |
| download | bcm5719-llvm-2065680b471d0f635d31199531042478748cd10f.tar.gz bcm5719-llvm-2065680b471d0f635d31199531042478748cd10f.zip | |
AMDGPU: Don't use the default cpu in a few tests
Avoids unnecessary test changes in a future commit.
llvm-svn: 357539
Diffstat (limited to 'llvm/test/CodeGen/AMDGPU/bitreverse.ll')
| -rw-r--r-- | llvm/test/CodeGen/AMDGPU/bitreverse.ll | 534 |
1 files changed, 266 insertions, 268 deletions
diff --git a/llvm/test/CodeGen/AMDGPU/bitreverse.ll b/llvm/test/CodeGen/AMDGPU/bitreverse.ll index 4225ab332c1..4e81381dbfd 100644 --- a/llvm/test/CodeGen/AMDGPU/bitreverse.ll +++ b/llvm/test/CodeGen/AMDGPU/bitreverse.ll @@ -1,5 +1,5 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=amdgcn-- -verify-machineinstrs | FileCheck %s --check-prefixes=FUNC,SI +; RUN: llc < %s -mtriple=amdgcn-- -mcpu=tahiti -verify-machineinstrs | FileCheck %s --check-prefixes=FUNC,SI ; RUN: llc < %s -mtriple=amdgcn-- -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefixes=FUNC,FLAT,TONGA ; RUN: llc < %s -mtriple=amdgcn-- -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefixes=FUNC,FLAT,VI @@ -18,15 +18,15 @@ declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) #1 define amdgpu_kernel void @s_brev_i16(i16 addrspace(1)* noalias %out, i16 %val) #0 { ; SI-LABEL: s_brev_i16: ; SI: ; %bb.0: -; SI-NEXT: s_load_dword s2, s[0:1], 0xb -; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 -; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s0, s[0:1], 0xb +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_brev_b32 s2, s2 -; SI-NEXT: s_lshr_b32 s4, s2, 16 -; SI-NEXT: s_mov_b32 s2, -1 -; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: buffer_store_short v0, off, s[0:3], 0 +; SI-NEXT: s_brev_b32 s0, s0 +; SI-NEXT: s_lshr_b32 s0, s0, 16 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 ; SI-NEXT: s_endpgm ; ; FLAT-LABEL: s_brev_i16: @@ -50,19 +50,18 @@ define amdgpu_kernel void @s_brev_i16(i16 addrspace(1)* noalias %out, i16 %val) define amdgpu_kernel void @v_brev_i16(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %valptr) #0 { ; SI-LABEL: v_brev_i16: ; SI: ; %bb.0: -; SI-NEXT: s_mov_b32 s3, 0xf000 -; SI-NEXT: s_mov_b32 s2, -1 -; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb -; SI-NEXT: s_mov_b32 s6, s2 -; SI-NEXT: s_mov_b32 s7, s3 +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_mov_b32 s2, s6 +; SI-NEXT: s_mov_b32 s3, s7 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: buffer_load_ushort v0, off, s[4:7], 0 -; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: buffer_load_ushort v0, off, s[0:3], 0 ; SI-NEXT: s_waitcnt vmcnt(0) ; SI-NEXT: v_bfrev_b32_e32 v0, v0 ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 -; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: buffer_store_short v0, off, s[0:3], 0 +; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 ; SI-NEXT: s_endpgm ; ; FLAT-LABEL: v_brev_i16: @@ -89,14 +88,14 @@ define amdgpu_kernel void @v_brev_i16(i16 addrspace(1)* noalias %out, i16 addrsp define amdgpu_kernel void @s_brev_i32(i32 addrspace(1)* noalias %out, i32 %val) #0 { ; SI-LABEL: s_brev_i32: ; SI: ; %bb.0: -; SI-NEXT: s_load_dword s2, s[0:1], 0xb -; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 -; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dword s0, s[0:1], 0xb +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_brev_b32 s4, s2 -; SI-NEXT: s_mov_b32 s2, -1 -; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0 +; SI-NEXT: s_brev_b32 s0, s0 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 ; SI-NEXT: s_endpgm ; ; FLAT-LABEL: s_brev_i32: @@ -122,9 +121,9 @@ define amdgpu_kernel void @v_brev_i32(i32 addrspace(1)* noalias %out, i32 addrsp ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, s7 ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 ; SI-NEXT: v_mov_b32_e32 v1, 0 -; SI-NEXT: s_mov_b32 s3, s7 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64 ; SI-NEXT: s_mov_b32 s6, -1 @@ -160,16 +159,16 @@ define amdgpu_kernel void @v_brev_i32(i32 addrspace(1)* noalias %out, i32 addrsp define amdgpu_kernel void @s_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> %val) #0 { ; SI-LABEL: s_brev_v2i32: ; SI: ; %bb.0: -; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb -; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 -; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: s_brev_b32 s5, s5 -; SI-NEXT: s_brev_b32 s4, s4 -; SI-NEXT: s_mov_b32 s2, -1 -; SI-NEXT: v_mov_b32_e32 v0, s4 -; SI-NEXT: v_mov_b32_e32 v1, s5 -; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 +; SI-NEXT: s_brev_b32 s1, s1 +; SI-NEXT: s_brev_b32 s0, s0 +; SI-NEXT: v_mov_b32_e32 v0, s0 +; SI-NEXT: v_mov_b32_e32 v1, s1 +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 ; SI-NEXT: s_endpgm ; ; FLAT-LABEL: s_brev_v2i32: @@ -197,9 +196,9 @@ define amdgpu_kernel void @v_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, s7 ; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; SI-NEXT: v_mov_b32_e32 v1, 0 -; SI-NEXT: s_mov_b32 s3, s7 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[0:3], 0 addr64 ; SI-NEXT: s_mov_b32 s6, -1 @@ -237,74 +236,74 @@ define amdgpu_kernel void @v_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 define amdgpu_kernel void @s_brev_i64(i64 addrspace(1)* noalias %out, i64 %val) #0 { ; SI-LABEL: s_brev_i64: ; SI: ; %bb.0: -; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 -; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: s_mov_b32 s17, 0xff0000 +; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb ; SI-NEXT: s_mov_b32 s3, 0 -; SI-NEXT: s_mov_b32 s13, 0xff00 -; SI-NEXT: s_mov_b32 s22, 0xf0f0f0f -; SI-NEXT: s_mov_b32 s23, 0xf0f0f0f0 -; SI-NEXT: s_mov_b32 s24, 0x33333333 -; SI-NEXT: s_mov_b32 s25, 0xcccccccc -; SI-NEXT: s_mov_b32 s26, 0x55555555 -; SI-NEXT: s_mov_b32 s27, 0xaaaaaaaa -; SI-NEXT: s_mov_b32 s9, s3 -; SI-NEXT: s_mov_b32 s10, s3 -; SI-NEXT: s_mov_b32 s12, s3 -; SI-NEXT: s_mov_b32 s14, s3 -; SI-NEXT: s_mov_b32 s16, s3 +; SI-NEXT: s_mov_b32 s10, 0xff0000 +; SI-NEXT: s_mov_b32 s11, 0xff00 +; SI-NEXT: s_mov_b32 s7, s3 ; SI-NEXT: s_waitcnt lgkmcnt(0) -; SI-NEXT: v_mov_b32_e32 v0, s0 -; SI-NEXT: v_alignbit_b32 v1, s1, v0, 24 -; SI-NEXT: v_alignbit_b32 v0, s1, v0, 8 -; SI-NEXT: s_lshr_b32 s2, s1, 24 -; SI-NEXT: s_lshr_b32 s8, s1, 8 -; SI-NEXT: s_lshl_b64 s[18:19], s[0:1], 8 -; SI-NEXT: s_lshl_b64 s[20:21], s[0:1], 24 -; SI-NEXT: s_lshl_b32 s15, s0, 24 -; SI-NEXT: s_lshl_b32 s0, s0, 8 -; SI-NEXT: v_and_b32_e32 v1, s17, v1 +; SI-NEXT: v_mov_b32_e32 v0, s4 +; SI-NEXT: v_alignbit_b32 v1, s5, v0, 24 +; SI-NEXT: v_alignbit_b32 v0, s5, v0, 8 +; SI-NEXT: s_lshr_b32 s6, s5, 8 +; SI-NEXT: v_and_b32_e32 v1, s10, v1 ; SI-NEXT: v_and_b32_e32 v0, 0xff000000, v0 -; SI-NEXT: s_and_b32 s8, s8, s13 -; SI-NEXT: s_and_b32 s11, s19, 0xff -; SI-NEXT: s_and_b32 s13, s21, s13 -; SI-NEXT: s_and_b32 s17, s0, s17 +; SI-NEXT: s_lshr_b32 s2, s5, 24 +; SI-NEXT: s_and_b32 s6, s6, s11 +; SI-NEXT: s_or_b64 s[6:7], s[6:7], s[2:3] ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: s_or_b64 s[0:1], s[8:9], s[2:3] -; SI-NEXT: s_or_b64 s[2:3], s[12:13], s[10:11] -; SI-NEXT: s_or_b64 s[8:9], s[14:15], s[16:17] -; SI-NEXT: v_or_b32_e32 v0, s0, v0 -; SI-NEXT: v_mov_b32_e32 v1, s1 -; SI-NEXT: s_or_b64 s[0:1], s[8:9], s[2:3] -; SI-NEXT: v_or_b32_e32 v2, s0, v0 -; SI-NEXT: v_or_b32_e32 v3, s1, v1 -; SI-NEXT: v_and_b32_e32 v1, s22, v3 -; SI-NEXT: v_and_b32_e32 v0, s22, v2 -; SI-NEXT: v_and_b32_e32 v3, s23, v3 -; SI-NEXT: v_and_b32_e32 v2, s23, v2 +; SI-NEXT: s_lshl_b64 s[8:9], s[4:5], 24 +; SI-NEXT: v_or_b32_e32 v0, s6, v0 +; SI-NEXT: v_mov_b32_e32 v1, s7 +; SI-NEXT: s_lshl_b64 s[6:7], s[4:5], 8 +; SI-NEXT: s_lshl_b32 s2, s4, 8 +; SI-NEXT: s_and_b32 s7, s7, 0xff +; SI-NEXT: s_mov_b32 s6, s3 +; SI-NEXT: s_and_b32 s9, s9, s11 +; SI-NEXT: s_mov_b32 s8, s3 +; SI-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7] +; SI-NEXT: s_lshl_b32 s9, s4, 24 +; SI-NEXT: s_and_b32 s5, s2, s10 +; SI-NEXT: s_mov_b32 s4, s3 +; SI-NEXT: s_or_b64 s[2:3], s[8:9], s[4:5] +; SI-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7] +; SI-NEXT: v_or_b32_e32 v2, s2, v0 +; SI-NEXT: v_or_b32_e32 v3, s3, v1 +; SI-NEXT: s_mov_b32 s2, 0xf0f0f0f +; SI-NEXT: v_and_b32_e32 v1, s2, v3 +; SI-NEXT: v_and_b32_e32 v0, s2, v2 +; SI-NEXT: s_mov_b32 s2, 0xf0f0f0f0 +; SI-NEXT: v_and_b32_e32 v3, s2, v3 +; SI-NEXT: v_and_b32_e32 v2, s2, v2 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 4 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 4 +; SI-NEXT: s_mov_b32 s2, 0x33333333 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 -; SI-NEXT: v_and_b32_e32 v1, s24, v3 -; SI-NEXT: v_and_b32_e32 v0, s24, v2 -; SI-NEXT: v_and_b32_e32 v3, s25, v3 -; SI-NEXT: v_and_b32_e32 v2, s25, v2 +; SI-NEXT: v_and_b32_e32 v1, s2, v3 +; SI-NEXT: v_and_b32_e32 v0, s2, v2 +; SI-NEXT: s_mov_b32 s2, 0xcccccccc +; SI-NEXT: v_and_b32_e32 v3, s2, v3 +; SI-NEXT: v_and_b32_e32 v2, s2, v2 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 2 +; SI-NEXT: s_mov_b32 s2, 0x55555555 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 -; SI-NEXT: v_and_b32_e32 v1, s26, v3 -; SI-NEXT: v_and_b32_e32 v0, s26, v2 -; SI-NEXT: v_and_b32_e32 v3, s27, v3 -; SI-NEXT: v_and_b32_e32 v2, s27, v2 +; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 +; SI-NEXT: v_and_b32_e32 v1, s2, v3 +; SI-NEXT: v_and_b32_e32 v0, s2, v2 +; SI-NEXT: s_mov_b32 s2, 0xaaaaaaaa +; SI-NEXT: v_and_b32_e32 v3, s2, v3 +; SI-NEXT: v_and_b32_e32 v2, s2, v2 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 1 +; SI-NEXT: s_mov_b32 s3, 0xf000 +; SI-NEXT: s_mov_b32 s2, -1 ; SI-NEXT: v_or_b32_e32 v0, v2, v0 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 -; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 +; SI-NEXT: s_waitcnt lgkmcnt(0) +; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 ; SI-NEXT: s_endpgm ; ; FLAT-LABEL: s_brev_i64: @@ -388,61 +387,61 @@ define amdgpu_kernel void @v_brev_i64(i64 addrspace(1)* noalias %out, i64 addrsp ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, s7 ; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0 ; SI-NEXT: v_mov_b32_e32 v1, 0 -; SI-NEXT: s_mov_b32 s3, s7 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[0:3], 0 addr64 ; SI-NEXT: s_mov_b32 s0, 0xff0000 ; SI-NEXT: s_mov_b32 s1, 0xff00 ; SI-NEXT: s_mov_b32 s2, 0xf0f0f0f ; SI-NEXT: s_mov_b32 s3, 0xf0f0f0f0 -; SI-NEXT: s_mov_b32 s8, 0x33333333 -; SI-NEXT: s_mov_b32 s9, 0xcccccccc -; SI-NEXT: s_mov_b32 s10, 0x55555555 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: s_mov_b32 s11, 0xaaaaaaaa +; SI-NEXT: s_mov_b32 s6, 0x33333333 +; SI-NEXT: s_mov_b32 s8, 0xcccccccc +; SI-NEXT: s_mov_b32 s9, 0x55555555 +; SI-NEXT: s_mov_b32 s10, 0xaaaaaaaa ; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], 8 ; SI-NEXT: v_alignbit_b32 v4, v1, v0, 24 ; SI-NEXT: v_alignbit_b32 v5, v1, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v6, 24, v1 ; SI-NEXT: v_lshrrev_b32_e32 v7, 8, v1 -; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], 8 +; SI-NEXT: v_lshrrev_b32_e32 v6, 24, v1 ; SI-NEXT: v_lshl_b64 v[1:2], v[0:1], 24 ; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v0 ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 +; SI-NEXT: v_and_b32_e32 v0, s0, v0 ; SI-NEXT: v_and_b32_e32 v4, s0, v4 ; SI-NEXT: v_and_b32_e32 v5, 0xff000000, v5 ; SI-NEXT: v_and_b32_e32 v7, s1, v7 ; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 ; SI-NEXT: v_and_b32_e32 v2, s1, v2 -; SI-NEXT: v_and_b32_e32 v0, s0, v0 ; SI-NEXT: v_or_b32_e32 v4, v5, v4 ; SI-NEXT: v_or_b32_e32 v5, v7, v6 -; SI-NEXT: v_or_b32_e32 v2, v2, v3 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_or_b32_e32 v2, v0, v2 -; SI-NEXT: v_and_b32_e32 v1, s2, v2 -; SI-NEXT: v_and_b32_e32 v0, s2, v4 -; SI-NEXT: v_and_b32_e32 v3, s3, v2 -; SI-NEXT: v_and_b32_e32 v2, s3, v4 +; SI-NEXT: v_or_b32_e32 v2, v2, v3 +; SI-NEXT: v_or_b32_e32 v1, v4, v5 +; SI-NEXT: v_or_b32_e32 v3, v0, v2 +; SI-NEXT: v_and_b32_e32 v0, s2, v1 +; SI-NEXT: v_and_b32_e32 v2, s3, v1 +; SI-NEXT: v_and_b32_e32 v1, s2, v3 +; SI-NEXT: v_and_b32_e32 v3, s3, v3 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 4 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 4 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 -; SI-NEXT: v_and_b32_e32 v1, s8, v3 -; SI-NEXT: v_and_b32_e32 v0, s8, v2 -; SI-NEXT: v_and_b32_e32 v3, s9, v3 -; SI-NEXT: v_and_b32_e32 v2, s9, v2 +; SI-NEXT: v_and_b32_e32 v1, s6, v3 +; SI-NEXT: v_and_b32_e32 v0, s6, v2 +; SI-NEXT: v_and_b32_e32 v3, s8, v3 +; SI-NEXT: v_and_b32_e32 v2, s8, v2 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 2 +; SI-NEXT: s_mov_b32 s6, -1 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 -; SI-NEXT: v_and_b32_e32 v1, s10, v3 -; SI-NEXT: v_and_b32_e32 v0, s10, v2 -; SI-NEXT: v_and_b32_e32 v3, s11, v3 -; SI-NEXT: v_and_b32_e32 v2, s11, v2 +; SI-NEXT: v_and_b32_e32 v1, s9, v3 +; SI-NEXT: v_and_b32_e32 v0, s9, v2 +; SI-NEXT: v_and_b32_e32 v3, s10, v3 +; SI-NEXT: v_and_b32_e32 v2, s10, v2 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 1 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 @@ -527,126 +526,125 @@ define amdgpu_kernel void @s_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 ; SI: ; %bb.0: ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 ; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd -; SI-NEXT: s_mov_b32 s7, 0xf000 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: s_mov_b32 s25, 0xff0000 ; SI-NEXT: s_mov_b32 s9, 0 -; SI-NEXT: s_mov_b32 s20, 0xff000000 -; SI-NEXT: s_mov_b32 s29, 0xff00 -; SI-NEXT: s_movk_i32 s27, 0xff -; SI-NEXT: s_mov_b32 s32, 0xf0f0f0f -; SI-NEXT: s_mov_b32 s33, 0xf0f0f0f0 -; SI-NEXT: s_mov_b32 s34, 0x33333333 -; SI-NEXT: s_mov_b32 s35, 0xcccccccc -; SI-NEXT: s_mov_b32 s36, 0x55555555 -; SI-NEXT: s_mov_b32 s37, 0xaaaaaaaa -; SI-NEXT: s_mov_b32 s11, s9 -; SI-NEXT: s_mov_b32 s12, s9 -; SI-NEXT: s_mov_b32 s14, s9 -; SI-NEXT: s_mov_b32 s16, s9 -; SI-NEXT: s_mov_b32 s18, s9 -; SI-NEXT: s_mov_b32 s21, s9 -; SI-NEXT: s_mov_b32 s22, s9 -; SI-NEXT: s_mov_b32 s24, s9 -; SI-NEXT: s_mov_b32 s26, s9 -; SI-NEXT: s_mov_b32 s28, s9 +; SI-NEXT: s_mov_b32 s12, 0xff0000 +; SI-NEXT: s_mov_b32 s13, 0xff000000 +; SI-NEXT: s_mov_b32 s14, 0xff00 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: v_mov_b32_e32 v0, s2 ; SI-NEXT: v_alignbit_b32 v1, s3, v0, 24 ; SI-NEXT: v_alignbit_b32 v0, s3, v0, 8 +; SI-NEXT: s_lshr_b32 s6, s3, 8 +; SI-NEXT: v_and_b32_e32 v1, s12, v1 +; SI-NEXT: v_and_b32_e32 v0, s13, v0 ; SI-NEXT: s_lshr_b32 s8, s3, 24 -; SI-NEXT: s_lshr_b32 s10, s3, 8 -; SI-NEXT: s_lshl_b32 s13, s2, 24 -; SI-NEXT: s_lshl_b32 s15, s2, 8 -; SI-NEXT: s_lshl_b64 s[30:31], s[2:3], 8 -; SI-NEXT: s_and_b32 s17, s31, s27 -; SI-NEXT: s_lshl_b64 s[2:3], s[2:3], 24 -; SI-NEXT: v_mov_b32_e32 v2, s0 -; SI-NEXT: v_alignbit_b32 v3, s1, v2, 24 -; SI-NEXT: v_alignbit_b32 v2, s1, v2, 8 -; SI-NEXT: s_and_b32 s10, s10, s29 -; SI-NEXT: s_lshr_b32 s30, s1, 8 -; SI-NEXT: s_lshl_b32 s23, s0, 24 -; SI-NEXT: s_and_b32 s15, s15, s25 -; SI-NEXT: s_lshl_b32 s38, s0, 8 -; SI-NEXT: s_and_b32 s19, s3, s29 -; SI-NEXT: s_lshl_b64 s[2:3], s[0:1], 8 -; SI-NEXT: v_and_b32_e32 v0, s20, v0 -; SI-NEXT: v_and_b32_e32 v2, s20, v2 -; SI-NEXT: s_and_b32 s20, s30, s29 -; SI-NEXT: s_lshl_b64 s[30:31], s[0:1], 24 -; SI-NEXT: v_and_b32_e32 v1, s25, v1 -; SI-NEXT: v_and_b32_e32 v3, s25, v3 -; SI-NEXT: s_and_b32 s25, s38, s25 -; SI-NEXT: s_and_b32 s27, s3, s27 -; SI-NEXT: s_and_b32 s29, s31, s29 +; SI-NEXT: s_and_b32 s6, s6, s14 +; SI-NEXT: s_mov_b32 s7, s9 +; SI-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9] ; SI-NEXT: v_or_b32_e32 v0, v0, v1 -; SI-NEXT: s_or_b64 s[2:3], s[10:11], s[8:9] -; SI-NEXT: s_or_b64 s[10:11], s[12:13], s[14:15] -; SI-NEXT: s_or_b64 s[12:13], s[18:19], s[16:17] -; SI-NEXT: v_or_b32_e32 v1, v2, v3 +; SI-NEXT: s_lshl_b32 s8, s2, 8 +; SI-NEXT: v_or_b32_e32 v0, s6, v0 +; SI-NEXT: v_mov_b32_e32 v1, s7 +; SI-NEXT: s_and_b32 s11, s8, s12 +; SI-NEXT: s_lshl_b32 s7, s2, 24 +; SI-NEXT: s_mov_b32 s6, s9 +; SI-NEXT: s_mov_b32 s10, s9 +; SI-NEXT: s_or_b64 s[6:7], s[6:7], s[10:11] +; SI-NEXT: s_lshl_b64 s[10:11], s[2:3], 8 +; SI-NEXT: s_lshl_b64 s[2:3], s[2:3], 24 +; SI-NEXT: s_movk_i32 s15, 0xff +; SI-NEXT: s_and_b32 s11, s11, s15 +; SI-NEXT: s_mov_b32 s10, s9 +; SI-NEXT: s_and_b32 s3, s3, s14 +; SI-NEXT: s_mov_b32 s2, s9 +; SI-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11] +; SI-NEXT: s_or_b64 s[2:3], s[6:7], s[2:3] +; SI-NEXT: v_mov_b32_e32 v4, s0 +; SI-NEXT: v_alignbit_b32 v5, s1, v4, 24 +; SI-NEXT: v_alignbit_b32 v4, s1, v4, 8 +; SI-NEXT: v_or_b32_e32 v2, s2, v0 +; SI-NEXT: s_lshr_b32 s2, s1, 8 +; SI-NEXT: v_or_b32_e32 v3, s3, v1 +; SI-NEXT: v_and_b32_e32 v5, s12, v5 +; SI-NEXT: v_and_b32_e32 v4, s13, v4 ; SI-NEXT: s_lshr_b32 s8, s1, 24 -; SI-NEXT: s_or_b64 s[0:1], s[22:23], s[24:25] -; SI-NEXT: s_or_b64 s[14:15], s[28:29], s[26:27] -; SI-NEXT: v_or_b32_e32 v0, s2, v0 -; SI-NEXT: v_mov_b32_e32 v2, s3 -; SI-NEXT: s_or_b64 s[2:3], s[10:11], s[12:13] -; SI-NEXT: s_or_b64 s[8:9], s[20:21], s[8:9] -; SI-NEXT: s_or_b64 s[0:1], s[0:1], s[14:15] -; SI-NEXT: v_or_b32_e32 v3, s2, v0 -; SI-NEXT: v_or_b32_e32 v4, s3, v2 -; SI-NEXT: v_or_b32_e32 v5, s8, v1 -; SI-NEXT: v_mov_b32_e32 v0, s9 -; SI-NEXT: v_or_b32_e32 v6, s1, v0 -; SI-NEXT: v_and_b32_e32 v0, s32, v3 -; SI-NEXT: v_and_b32_e32 v1, s32, v4 -; SI-NEXT: v_and_b32_e32 v2, s33, v3 -; SI-NEXT: v_and_b32_e32 v3, s33, v4 -; SI-NEXT: v_or_b32_e32 v5, s0, v5 -; SI-NEXT: v_and_b32_e32 v4, s32, v6 -; SI-NEXT: v_and_b32_e32 v6, s33, v6 +; SI-NEXT: s_and_b32 s2, s2, s14 +; SI-NEXT: s_mov_b32 s3, s9 +; SI-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9] +; SI-NEXT: v_or_b32_e32 v4, v4, v5 +; SI-NEXT: s_lshl_b32 s8, s0, 8 +; SI-NEXT: v_or_b32_e32 v4, s2, v4 +; SI-NEXT: v_mov_b32_e32 v5, s3 +; SI-NEXT: s_lshl_b32 s3, s0, 24 +; SI-NEXT: s_mov_b32 s2, s9 +; SI-NEXT: s_and_b32 s11, s8, s12 +; SI-NEXT: s_mov_b32 s16, 0xf0f0f0f +; SI-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11] +; SI-NEXT: s_lshl_b64 s[10:11], s[0:1], 8 +; SI-NEXT: s_lshl_b64 s[0:1], s[0:1], 24 +; SI-NEXT: s_mov_b32 s17, 0xf0f0f0f0 +; SI-NEXT: v_and_b32_e32 v0, s16, v2 +; SI-NEXT: v_and_b32_e32 v1, s16, v3 +; SI-NEXT: v_and_b32_e32 v2, s17, v2 +; SI-NEXT: v_and_b32_e32 v3, s17, v3 +; SI-NEXT: s_and_b32 s11, s11, s15 +; SI-NEXT: s_mov_b32 s10, s9 +; SI-NEXT: s_and_b32 s1, s1, s14 +; SI-NEXT: s_mov_b32 s0, s9 +; SI-NEXT: s_or_b64 s[0:1], s[0:1], s[10:11] ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 4 -; SI-NEXT: v_lshr_b64 v[7:8], v[2:3], 4 -; SI-NEXT: v_and_b32_e32 v3, s32, v5 -; SI-NEXT: v_and_b32_e32 v5, s33, v5 -; SI-NEXT: v_or_b32_e32 v7, v7, v0 -; SI-NEXT: v_or_b32_e32 v8, v8, v1 -; SI-NEXT: v_lshl_b64 v[0:1], v[3:4], 4 -; SI-NEXT: v_lshr_b64 v[2:3], v[5:6], 4 -; SI-NEXT: v_and_b32_e32 v4, s34, v7 -; SI-NEXT: v_and_b32_e32 v5, s34, v8 -; SI-NEXT: v_and_b32_e32 v6, s35, v7 -; SI-NEXT: v_and_b32_e32 v7, s35, v8 -; SI-NEXT: v_or_b32_e32 v8, v2, v0 -; SI-NEXT: v_or_b32_e32 v9, v3, v1 -; SI-NEXT: v_lshl_b64 v[0:1], v[4:5], 2 -; SI-NEXT: v_lshr_b64 v[2:3], v[6:7], 2 -; SI-NEXT: v_and_b32_e32 v4, s34, v8 -; SI-NEXT: v_and_b32_e32 v5, s34, v9 -; SI-NEXT: v_and_b32_e32 v6, s35, v8 -; SI-NEXT: v_and_b32_e32 v7, s35, v9 -; SI-NEXT: v_or_b32_e32 v8, v2, v0 -; SI-NEXT: v_or_b32_e32 v9, v3, v1 -; SI-NEXT: v_lshl_b64 v[0:1], v[4:5], 2 -; SI-NEXT: v_lshr_b64 v[2:3], v[6:7], 2 -; SI-NEXT: v_and_b32_e32 v4, s36, v8 -; SI-NEXT: v_and_b32_e32 v5, s36, v9 -; SI-NEXT: v_and_b32_e32 v6, s37, v8 -; SI-NEXT: v_and_b32_e32 v7, s37, v9 -; SI-NEXT: v_or_b32_e32 v8, v2, v0 -; SI-NEXT: v_or_b32_e32 v9, v3, v1 -; SI-NEXT: v_lshl_b64 v[0:1], v[4:5], 1 -; SI-NEXT: v_lshr_b64 v[2:3], v[6:7], 1 -; SI-NEXT: v_and_b32_e32 v4, s36, v8 -; SI-NEXT: v_and_b32_e32 v5, s36, v9 -; SI-NEXT: v_and_b32_e32 v6, s37, v8 -; SI-NEXT: v_and_b32_e32 v7, s37, v9 +; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 4 +; SI-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1] +; SI-NEXT: v_or_b32_e32 v6, s0, v4 +; SI-NEXT: v_or_b32_e32 v7, s1, v5 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 +; SI-NEXT: s_mov_b32 s18, 0x33333333 +; SI-NEXT: v_or_b32_e32 v3, v3, v1 +; SI-NEXT: s_mov_b32 s19, 0xcccccccc +; SI-NEXT: v_and_b32_e32 v0, s18, v2 +; SI-NEXT: v_and_b32_e32 v1, s18, v3 +; SI-NEXT: v_and_b32_e32 v4, s16, v6 +; SI-NEXT: v_and_b32_e32 v5, s16, v7 +; SI-NEXT: v_and_b32_e32 v2, s19, v2 +; SI-NEXT: v_and_b32_e32 v3, s19, v3 +; SI-NEXT: v_and_b32_e32 v6, s17, v6 +; SI-NEXT: v_and_b32_e32 v7, s17, v7 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 +; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 2 +; SI-NEXT: v_lshl_b64 v[4:5], v[4:5], 4 +; SI-NEXT: v_lshr_b64 v[6:7], v[6:7], 4 +; SI-NEXT: v_or_b32_e32 v2, v2, v0 +; SI-NEXT: v_or_b32_e32 v6, v6, v4 +; SI-NEXT: v_or_b32_e32 v7, v7, v5 +; SI-NEXT: s_mov_b32 s20, 0x55555555 +; SI-NEXT: v_or_b32_e32 v3, v3, v1 +; SI-NEXT: s_mov_b32 s21, 0xaaaaaaaa +; SI-NEXT: v_and_b32_e32 v0, s20, v2 +; SI-NEXT: v_and_b32_e32 v1, s20, v3 +; SI-NEXT: v_and_b32_e32 v4, s18, v6 +; SI-NEXT: v_and_b32_e32 v5, s18, v7 +; SI-NEXT: v_and_b32_e32 v2, s21, v2 +; SI-NEXT: v_and_b32_e32 v3, s21, v3 +; SI-NEXT: v_and_b32_e32 v6, s19, v6 +; SI-NEXT: v_and_b32_e32 v7, s19, v7 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 +; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 1 +; SI-NEXT: v_lshl_b64 v[4:5], v[4:5], 2 +; SI-NEXT: v_lshr_b64 v[6:7], v[6:7], 2 +; SI-NEXT: v_or_b32_e32 v2, v2, v0 +; SI-NEXT: v_or_b32_e32 v0, v6, v4 +; SI-NEXT: v_or_b32_e32 v7, v7, v5 +; SI-NEXT: v_and_b32_e32 v5, s20, v7 +; SI-NEXT: v_and_b32_e32 v4, s20, v0 +; SI-NEXT: v_and_b32_e32 v6, s21, v0 +; SI-NEXT: v_and_b32_e32 v7, s21, v7 +; SI-NEXT: v_lshl_b64 v[4:5], v[4:5], 1 +; SI-NEXT: v_lshr_b64 v[6:7], v[6:7], 1 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 -; SI-NEXT: v_lshl_b64 v[0:1], v[4:5], 1 -; SI-NEXT: v_lshr_b64 v[4:5], v[6:7], 1 -; SI-NEXT: v_or_b32_e32 v0, v4, v0 -; SI-NEXT: v_or_b32_e32 v1, v5, v1 +; SI-NEXT: s_mov_b32 s7, 0xf000 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: v_or_b32_e32 v0, v6, v4 +; SI-NEXT: v_or_b32_e32 v1, v7, v5 ; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 ; SI-NEXT: s_endpgm ; @@ -787,31 +785,13 @@ define amdgpu_kernel void @v_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb ; SI-NEXT: s_mov_b32 s7, 0xf000 ; SI-NEXT: s_mov_b32 s2, 0 +; SI-NEXT: s_mov_b32 s3, s7 ; SI-NEXT: v_lshlrev_b32_e32 v0, 4, v0 ; SI-NEXT: v_mov_b32_e32 v1, 0 -; SI-NEXT: s_mov_b32 s3, s7 ; SI-NEXT: s_waitcnt lgkmcnt(0) ; SI-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[0:3], 0 addr64 ; SI-NEXT: s_mov_b32 s0, 0xff0000 ; SI-NEXT: s_mov_b32 s1, 0xff000000 -; SI-NEXT: s_mov_b32 s6, -1 -; SI-NEXT: s_waitcnt vmcnt(0) -; SI-NEXT: v_alignbit_b32 v8, v3, v2, 24 -; SI-NEXT: v_alignbit_b32 v9, v3, v2, 8 -; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v3 -; SI-NEXT: v_lshl_b64 v[4:5], v[2:3], 8 -; SI-NEXT: v_lshl_b64 v[6:7], v[2:3], 24 -; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v2 -; SI-NEXT: v_alignbit_b32 v6, v1, v0, 24 -; SI-NEXT: v_alignbit_b32 v11, v1, v0, 8 -; SI-NEXT: v_lshrrev_b32_e32 v12, 8, v1 -; SI-NEXT: v_lshrrev_b32_e32 v13, 24, v3 -; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v2 -; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v1 -; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v0 -; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v0 -; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], 8 -; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 24 ; SI-NEXT: s_mov_b32 s2, 0xff00 ; SI-NEXT: s_movk_i32 s3, 0xff ; SI-NEXT: s_mov_b32 s8, 0xf0f0f0f @@ -820,38 +800,56 @@ define amdgpu_kernel void @v_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 ; SI-NEXT: s_mov_b32 s11, 0xcccccccc ; SI-NEXT: s_mov_b32 s12, 0x55555555 ; SI-NEXT: s_mov_b32 s13, 0xaaaaaaaa -; SI-NEXT: v_and_b32_e32 v0, s0, v8 -; SI-NEXT: v_and_b32_e32 v2, s1, v9 -; SI-NEXT: v_and_b32_e32 v8, s2, v10 +; SI-NEXT: s_mov_b32 s6, -1 +; SI-NEXT: s_waitcnt vmcnt(0) +; SI-NEXT: v_lshl_b64 v[4:5], v[2:3], 8 +; SI-NEXT: v_alignbit_b32 v6, v3, v2, 24 +; SI-NEXT: v_alignbit_b32 v7, v3, v2, 8 +; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v3 +; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v3 +; SI-NEXT: v_lshl_b64 v[3:4], v[2:3], 24 +; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v2 +; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v2 +; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], 8 +; SI-NEXT: v_alignbit_b32 v12, v1, v0, 24 +; SI-NEXT: v_alignbit_b32 v13, v1, v0, 8 +; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v1 +; SI-NEXT: v_lshrrev_b32_e32 v15, 8, v1 +; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v0 +; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v0 +; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 24 +; SI-NEXT: v_and_b32_e32 v0, s0, v6 +; SI-NEXT: v_and_b32_e32 v2, s1, v7 +; SI-NEXT: v_and_b32_e32 v6, s2, v9 +; SI-NEXT: v_and_b32_e32 v7, s0, v11 +; SI-NEXT: v_and_b32_e32 v9, s0, v12 +; SI-NEXT: v_and_b32_e32 v11, s1, v13 +; SI-NEXT: v_or_b32_e32 v0, v2, v0 +; SI-NEXT: v_or_b32_e32 v2, v6, v8 +; SI-NEXT: v_and_b32_e32 v12, s2, v15 +; SI-NEXT: v_and_b32_e32 v13, s0, v17 ; SI-NEXT: v_and_b32_e32 v5, s3, v5 -; SI-NEXT: v_and_b32_e32 v7, s2, v7 -; SI-NEXT: v_and_b32_e32 v4, s0, v4 -; SI-NEXT: v_and_b32_e32 v6, s0, v6 -; SI-NEXT: v_and_b32_e32 v9, s1, v11 -; SI-NEXT: v_and_b32_e32 v10, s2, v12 +; SI-NEXT: v_and_b32_e32 v4, s2, v4 ; SI-NEXT: v_and_b32_e32 v3, s3, v3 ; SI-NEXT: v_and_b32_e32 v1, s2, v1 -; SI-NEXT: v_and_b32_e32 v11, s0, v17 -; SI-NEXT: v_or_b32_e32 v0, v2, v0 -; SI-NEXT: v_or_b32_e32 v2, v8, v13 -; SI-NEXT: v_or_b32_e32 v5, v7, v5 -; SI-NEXT: v_or_b32_e32 v4, v14, v4 -; SI-NEXT: v_or_b32_e32 v6, v9, v6 -; SI-NEXT: v_or_b32_e32 v7, v10, v15 -; SI-NEXT: v_or_b32_e32 v1, v1, v3 -; SI-NEXT: v_or_b32_e32 v3, v16, v11 +; SI-NEXT: v_or_b32_e32 v6, v10, v7 +; SI-NEXT: v_or_b32_e32 v7, v11, v9 ; SI-NEXT: v_or_b32_e32 v2, v0, v2 -; SI-NEXT: v_or_b32_e32 v4, v4, v5 -; SI-NEXT: v_or_b32_e32 v6, v6, v7 -; SI-NEXT: v_or_b32_e32 v7, v3, v1 -; SI-NEXT: v_and_b32_e32 v1, s8, v4 +; SI-NEXT: v_or_b32_e32 v8, v12, v14 +; SI-NEXT: v_or_b32_e32 v0, v4, v5 +; SI-NEXT: v_or_b32_e32 v1, v1, v3 +; SI-NEXT: v_or_b32_e32 v9, v16, v13 +; SI-NEXT: v_or_b32_e32 v5, v7, v8 +; SI-NEXT: v_or_b32_e32 v3, v6, v0 +; SI-NEXT: v_or_b32_e32 v7, v9, v1 ; SI-NEXT: v_and_b32_e32 v0, s8, v2 -; SI-NEXT: v_and_b32_e32 v3, s9, v4 +; SI-NEXT: v_and_b32_e32 v1, s8, v3 ; SI-NEXT: v_and_b32_e32 v2, s9, v2 +; SI-NEXT: v_and_b32_e32 v3, s9, v3 +; SI-NEXT: v_and_b32_e32 v4, s8, v5 +; SI-NEXT: v_and_b32_e32 v6, s9, v5 ; SI-NEXT: v_and_b32_e32 v5, s8, v7 -; SI-NEXT: v_and_b32_e32 v4, s8, v6 ; SI-NEXT: v_and_b32_e32 v7, s9, v7 -; SI-NEXT: v_and_b32_e32 v6, s9, v6 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 4 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 4 ; SI-NEXT: v_lshl_b64 v[4:5], v[4:5], 4 @@ -862,10 +860,10 @@ define amdgpu_kernel void @v_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 ; SI-NEXT: v_or_b32_e32 v6, v6, v4 ; SI-NEXT: v_and_b32_e32 v1, s10, v3 ; SI-NEXT: v_and_b32_e32 v0, s10, v2 -; SI-NEXT: v_and_b32_e32 v3, s11, v3 -; SI-NEXT: v_and_b32_e32 v2, s11, v2 ; SI-NEXT: v_and_b32_e32 v5, s10, v7 ; SI-NEXT: v_and_b32_e32 v4, s10, v6 +; SI-NEXT: v_and_b32_e32 v3, s11, v3 +; SI-NEXT: v_and_b32_e32 v2, s11, v2 ; SI-NEXT: v_and_b32_e32 v7, s11, v7 ; SI-NEXT: v_and_b32_e32 v6, s11, v6 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 @@ -878,10 +876,10 @@ define amdgpu_kernel void @v_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 ; SI-NEXT: v_or_b32_e32 v6, v6, v4 ; SI-NEXT: v_and_b32_e32 v1, s12, v3 ; SI-NEXT: v_and_b32_e32 v0, s12, v2 -; SI-NEXT: v_and_b32_e32 v3, s13, v3 -; SI-NEXT: v_and_b32_e32 v2, s13, v2 ; SI-NEXT: v_and_b32_e32 v5, s12, v7 ; SI-NEXT: v_and_b32_e32 v4, s12, v6 +; SI-NEXT: v_and_b32_e32 v3, s13, v3 +; SI-NEXT: v_and_b32_e32 v2, s13, v2 ; SI-NEXT: v_and_b32_e32 v7, s13, v7 ; SI-NEXT: v_and_b32_e32 v6, s13, v6 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 |

