diff options
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPU.td | 6 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp | 5 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h | 5 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIISelLowering.cpp | 22 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstrInfo.td | 6 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/amdgpu.private-memory.ll | 8 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll | 407 |
7 files changed, 287 insertions, 172 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td index 607e8d9bfdd..72c45535441 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPU.td +++ b/llvm/lib/Target/AMDGPU/AMDGPU.td @@ -61,6 +61,12 @@ def FeatureFlatAddressSpace : SubtargetFeature<"flat-address-space", "Support flat address space" >; +def FeatureUnalignedBufferAccess : SubtargetFeature<"unaligned-buffer-access", + "UnalignedBufferAccess", + "true", + "Support unaligned global loads and stores" +>; + def FeatureXNACK : SubtargetFeature<"xnack", "EnableXNACK", "true", diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp index 9843ddf590a..10fa9cf4673 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp @@ -47,7 +47,7 @@ AMDGPUSubtarget::initializeSubtargetDependencies(const Triple &TT, SmallString<256> FullFS("+promote-alloca,+fp64-denormals,+load-store-opt,"); if (isAmdHsaOS()) // Turn on FlatForGlobal for HSA. - FullFS += "+flat-for-global,"; + FullFS += "+flat-for-global,+unaligned-buffer-access,"; FullFS += FS; ParseSubtargetFeatures(GPU, FullFS); @@ -85,6 +85,8 @@ AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS, FP64Denormals(false), FPExceptions(false), FlatForGlobal(false), + UnalignedBufferAccess(false), + EnableXNACK(false), DebuggerInsertNops(false), DebuggerReserveRegs(false), @@ -114,7 +116,6 @@ AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS, TexVTXClauseSize(0), FeatureDisable(false), - InstrItins(getInstrItineraryForCPU(GPU)) { initializeSubtargetDependencies(TT, GPU, FS); } diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h index 44560f30327..3fe61aa449e 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h @@ -74,6 +74,7 @@ protected: bool FP64Denormals; bool FPExceptions; bool FlatForGlobal; + bool UnalignedBufferAccess; bool EnableXNACK; bool DebuggerInsertNops; bool DebuggerReserveRegs; @@ -254,6 +255,10 @@ public: return FlatForGlobal; } + bool hasUnalignedBufferAccess() const { + return UnalignedBufferAccess; + } + bool isXNACKEnabled() const { return EnableXNACK; } diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp index a20f8d00e82..8f36aaa2f45 100644 --- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp @@ -438,24 +438,30 @@ bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT, if (!VT.isSimple() || VT == MVT::Other) return false; - // TODO - CI+ supports unaligned memory accesses, but this requires driver - // support. - - // XXX - The only mention I see of this in the ISA manual is for LDS direct - // reads the "byte address and must be dword aligned". Is it also true for the - // normal loads and stores? - if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS) { + if (AddrSpace == AMDGPUAS::LOCAL_ADDRESS || + AddrSpace == AMDGPUAS::REGION_ADDRESS) { // ds_read/write_b64 require 8-byte alignment, but we can do a 4 byte // aligned, 8 byte access in a single operation using ds_read2/write2_b32 // with adjacent offsets. bool AlignedBy4 = (Align % 4 == 0); if (IsFast) *IsFast = AlignedBy4; + return AlignedBy4; } + if (Subtarget->hasUnalignedBufferAccess()) { + // If we have an uniform constant load, it still requires using a slow + // buffer instruction if unaligned. + if (IsFast) { + *IsFast = (AddrSpace == AMDGPUAS::CONSTANT_ADDRESS) ? + (Align % 4 == 0) : true; + } + + return true; + } + // Smaller than dword value must be aligned. - // FIXME: This should be allowed on CI+ if (VT.bitsLT(MVT::i32)) return false; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td index e706dfa4124..a113ca2a25a 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -183,8 +183,10 @@ def mubuf_load_atomic : PatFrag <(ops node:$ptr), (atomic_load node:$ptr), [{ def smrd_load : PatFrag <(ops node:$ptr), (load node:$ptr), [{ - return isConstantLoad(cast<LoadSDNode>(N), -1) && - static_cast<const SITargetLowering *>(getTargetLowering())->isMemOpUniform(N); + auto Ld = cast<LoadSDNode>(N); + return Ld->getAlignment() >= 4 && + isConstantLoad(Ld, -1) && + static_cast<const SITargetLowering *>(getTargetLowering())->isMemOpUniform(N); }]>; //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu.private-memory.ll b/llvm/test/CodeGen/AMDGPU/amdgpu.private-memory.ll index dc89ec102d2..7b515862909 100644 --- a/llvm/test/CodeGen/AMDGPU/amdgpu.private-memory.ll +++ b/llvm/test/CodeGen/AMDGPU/amdgpu.private-memory.ll @@ -1,9 +1,9 @@ ; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE +; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE ; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA -; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC +; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA +; RUN: llc -show-mc-encoding -mattr=+promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC +; RUN: llc -show-mc-encoding -mattr=-promote-alloca -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=HSAOPT -check-prefix=OPT %s ; RUN: opt -S -mtriple=amdgcn-unknown-unknown -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -check-prefix=NOHSAOPT -check-prefix=OPT %s diff --git a/llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll b/llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll index 8da4d80806d..129748afd93 100644 --- a/llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll +++ b/llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll @@ -1,30 +1,28 @@ -; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s -; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-HSA -check-prefix=FUNC %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=GCN-NOHSA -check-prefix=FUNC %s -; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=ALIGNED %s +; RUN: llc -march=amdgcn -mcpu=bonaire -mattr=+unaligned-buffer-access -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=UNALIGNED %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=ALIGNED %s -; FUNC-LABEL: {{^}}local_unaligned_load_store_i16: -; GCN: ds_read_u8 -; GCN: ds_read_u8 -; GCN: ds_write_b8 -; GCN: ds_write_b8 -; GCN: s_endpgm +; SI-LABEL: {{^}}local_unaligned_load_store_i16: +; SI: ds_read_u8 +; SI: ds_read_u8 +; SI: ds_write_b8 +; SI: ds_write_b8 +; SI: s_endpgm define void @local_unaligned_load_store_i16(i16 addrspace(3)* %p, i16 addrspace(3)* %r) #0 { %v = load i16, i16 addrspace(3)* %p, align 1 store i16 %v, i16 addrspace(3)* %r, align 1 ret void } -; FUNC-LABEL: {{^}}global_unaligned_load_store_i16: -; GCN-NOHSA: buffer_load_ubyte -; GCN-NOHSA: buffer_load_ubyte -; GCN-NOHSA: buffer_store_byte -; GCN-NOHSA: buffer_store_byte +; SI-LABEL: {{^}}global_unaligned_load_store_i16: +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte -; GCN-HSA: flat_load_ubyte -; GCN-HSA: flat_load_ubyte -; GCN-HSA: flat_store_byte -; GCN-HSA: flat_store_byte +; UNALIGNED: buffer_load_ushort +; UNALIGNED: buffer_store_short +; SI: s_endpgm define void @global_unaligned_load_store_i16(i16 addrspace(1)* %p, i16 addrspace(1)* %r) #0 { %v = load i16, i16 addrspace(1)* %p, align 1 store i16 %v, i16 addrspace(1)* %r, align 1 @@ -50,40 +48,32 @@ define void @local_unaligned_load_store_i32(i32 addrspace(3)* %p, i32 addrspace( ret void } -; FUNC-LABEL: {{^}}global_unaligned_load_store_i32: -; GCN-NOHSA: buffer_load_ubyte -; GCN-NOHSA: buffer_load_ubyte -; GCN-NOHSA: buffer_load_ubyte -; GCN-NOHSA: buffer_load_ubyte -; GCN-NOHSA: buffer_store_byte -; GCN-NOHSA: buffer_store_byte -; GCN-NOHSA: buffer_store_byte -; GCN-NOHSA: buffer_store_byte - -; GCN-HSA: flat_load_ubyte -; GCN-HSA: flat_load_ubyte -; GCN-HSA: flat_load_ubyte -; GCN-HSA: flat_load_ubyte -; GCN-HSA: flat_store_byte -; GCN-HSA: flat_store_byte -; GCN-HSA: flat_store_byte -; GCN-HSA: flat_store_byte +; SI-LABEL: {{^}}global_unaligned_load_store_i32: +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte + +; UNALIGNED: buffer_load_dword +; UNALIGNED: buffer_store_dword define void @global_unaligned_load_store_i32(i32 addrspace(1)* %p, i32 addrspace(1)* %r) #0 { %v = load i32, i32 addrspace(1)* %p, align 1 store i32 %v, i32 addrspace(1)* %r, align 1 ret void } -; FUNC-LABEL: {{^}}global_align2_load_store_i32: -; GCN-NOHSA: buffer_load_ushort -; GCN-NOHSA: buffer_load_ushort -; GCN-NOHSA: buffer_store_short -; GCN-NOHSA: buffer_store_short +; SI-LABEL: {{^}}global_align2_load_store_i32: +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_store_short +; ALIGNED: buffer_store_short -; GCN-HSA: flat_load_ushort -; GCN-HSA: flat_load_ushort -; GCN-HSA: flat_store_short -; GCN-HSA: flat_store_short +; UNALIGNED: buffer_load_dword +; UNALIGNED: buffer_store_dword define void @global_align2_load_store_i32(i32 addrspace(1)* %p, i32 addrspace(1)* %r) #0 { %v = load i32, i32 addrspace(1)* %p, align 2 store i32 %v, i32 addrspace(1)* %r, align 2 @@ -142,7 +132,7 @@ define void @local_align2_load_store_i32(i32 addrspace(3)* %p, i32 addrspace(3)* ; SI-NOT: v_lshl ; SI: ds_write_b8 ; SI: s_endpgm -define void @local_unaligned_load_store_i64(i64 addrspace(3)* %p, i64 addrspace(3)* %r) { +define void @local_unaligned_load_store_i64(i64 addrspace(3)* %p, i64 addrspace(3)* %r) #0 { %v = load i64, i64 addrspace(3)* %p, align 1 store i64 %v, i64 addrspace(3)* %r, align 1 ret void @@ -189,61 +179,67 @@ define void @local_unaligned_load_store_i64(i64 addrspace(3)* %p, i64 addrspace( ; SI-NOT: v_lshl ; SI: ds_write_b8 ; SI: s_endpgm -define void @local_unaligned_load_store_v2i32(<2 x i32> addrspace(3)* %p, <2 x i32> addrspace(3)* %r) { +define void @local_unaligned_load_store_v2i32(<2 x i32> addrspace(3)* %p, <2 x i32> addrspace(3)* %r) #0 { %v = load <2 x i32>, <2 x i32> addrspace(3)* %p, align 1 store <2 x i32> %v, <2 x i32> addrspace(3)* %r, align 1 ret void } ; SI-LABEL: {{^}}global_align2_load_store_i64: -; SI: buffer_load_ushort -; SI: buffer_load_ushort +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_load_ushort -; SI-NOT: v_or_ -; SI-NOT: v_lshl +; ALIGNED-NOT: v_or_ +; ALIGNED-NOT: v_lshl -; SI: buffer_load_ushort +; ALIGNED: buffer_load_ushort -; SI-NOT: v_or_ -; SI-NOT: v_lshl +; ALIGNED-NOT: v_or_ +; ALIGNED-NOT: v_lshl -; SI: buffer_load_ushort +; ALIGNED: buffer_load_ushort -; SI-NOT: v_or_ -; SI-NOT: v_lshl +; ALIGNED-NOT: v_or_ +; ALIGNED-NOT: v_lshl + +; ALIGNED: buffer_store_short +; ALIGNED: buffer_store_short +; ALIGNED: buffer_store_short +; ALIGNED: buffer_store_short -; SI: buffer_store_short -; SI: buffer_store_short -; SI: buffer_store_short -; SI: buffer_store_short -define void @global_align2_load_store_i64(i64 addrspace(1)* %p, i64 addrspace(1)* %r) { +; UNALIGNED: buffer_load_dwordx2 +; UNALIGNED: buffer_store_dwordx2 +define void @global_align2_load_store_i64(i64 addrspace(1)* %p, i64 addrspace(1)* %r) #0 { %v = load i64, i64 addrspace(1)* %p, align 2 store i64 %v, i64 addrspace(1)* %r, align 2 ret void } ; SI-LABEL: {{^}}unaligned_load_store_i64_global: -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte - -; SI-NOT: v_or_ -; SI-NOT: v_lshl - -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -define void @unaligned_load_store_i64_global(i64 addrspace(1)* %p, i64 addrspace(1)* %r) { +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; ALIGNED-NOT: v_or_ +; ALIGNED-NOT: v_lshl + +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte + +; UNALIGNED: buffer_load_dwordx2 +; UNALIGNED: buffer_store_dwordx2 +define void @unaligned_load_store_i64_global(i64 addrspace(1)* %p, i64 addrspace(1)* %r) #0 { %v = load i64, i64 addrspace(1)* %p, align 1 store i64 %v, i64 addrspace(1)* %r, align 1 ret void @@ -297,40 +293,43 @@ define void @local_unaligned_load_store_v4i32(<4 x i32> addrspace(3)* %p, <4 x i } ; SI-LABEL: {{^}}global_unaligned_load_store_v4i32 -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte - -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -; SI: buffer_store_byte -define void @global_unaligned_load_store_v4i32(<4 x i32> addrspace(1)* %p, <4 x i32> addrspace(1)* %r) nounwind { +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte +; ALIGNED: buffer_store_byte + +; UNALIGNED: buffer_load_dwordx4 +; UNALIGNED: buffer_store_dwordx4 +define void @global_unaligned_load_store_v4i32(<4 x i32> addrspace(1)* %p, <4 x i32> addrspace(1)* %r) #0 { %v = load <4 x i32>, <4 x i32> addrspace(1)* %p, align 1 store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 1 ret void @@ -410,50 +409,146 @@ define void @local_store_i64_align_4_with_split_offset(i64 addrspace(3)* %out) # ret void } -; FUNC-LABEL: {{^}}constant_load_unaligned_i16: -; GCN-NOHSA: buffer_load_ushort -; GCN-HSA: flat_load_ushort - -; EG: VTX_READ_16 T{{[0-9]+\.X, T[0-9]+\.X}} -define void @constant_load_unaligned_i16(i32 addrspace(1)* %out, i16 addrspace(2)* %in) { -entry: - %tmp0 = getelementptr i16, i16 addrspace(2)* %in, i32 1 - %tmp1 = load i16, i16 addrspace(2)* %tmp0 - %tmp2 = zext i16 %tmp1 to i32 - store i32 %tmp2, i32 addrspace(1)* %out +; SI-LABEL: {{^}}constant_unaligned_load_i32: +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; UNALIGNED: s_load_dword + +; SI: buffer_store_dword +define void @constant_unaligned_load_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r) #0 { + %v = load i32, i32 addrspace(2)* %p, align 1 + store i32 %v, i32 addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}constant_align2_load_i32: +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_load_ushort + +; UNALIGNED: s_load_dword +; UNALIGNED: buffer_store_dword +define void @constant_align2_load_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r) #0 { + %v = load i32, i32 addrspace(2)* %p, align 2 + store i32 %v, i32 addrspace(1)* %r, align 4 ret void } -; FUNC-LABEL: {{^}}constant_load_unaligned_i32: -; GCN-NOHSA: buffer_load_ubyte -; GCN-NOHSA: buffer_load_ubyte -; GCN-NOHSA: buffer_load_ubyte -; GCN-NOHSA: buffer_load_ubyte - -; GCN-HSA: flat_load_ubyte -; GCN-HSA: flat_load_ubyte -; GCN-HSA: flat_load_ubyte -; GCN-HSA: flat_load_ubyte -define void @constant_load_unaligned_i32(i32 addrspace(1)* %out, i32 addrspace(2)* %in) { -entry: - %tmp0 = load i32, i32 addrspace(2)* %in, align 1 - store i32 %tmp0, i32 addrspace(1)* %out +; SI-LABEL: {{^}}constant_align2_load_i64: +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_load_ushort +; ALIGNED: buffer_load_ushort + +; UNALIGNED: s_load_dwordx2 +; UNALIGNED: buffer_store_dwordx2 +define void @constant_align2_load_i64(i64 addrspace(2)* %p, i64 addrspace(1)* %r) #0 { + %v = load i64, i64 addrspace(2)* %p, align 2 + store i64 %v, i64 addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}constant_align4_load_i64: +; SI: s_load_dwordx2 +; SI: buffer_store_dwordx2 +define void @constant_align4_load_i64(i64 addrspace(2)* %p, i64 addrspace(1)* %r) #0 { + %v = load i64, i64 addrspace(2)* %p, align 4 + store i64 %v, i64 addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}constant_align4_load_v4i32: +; SI: s_load_dwordx4 +; SI: buffer_store_dwordx4 +define void @constant_align4_load_v4i32(<4 x i32> addrspace(2)* %p, <4 x i32> addrspace(1)* %r) #0 { + %v = load <4 x i32>, <4 x i32> addrspace(2)* %p, align 4 + store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}constant_unaligned_load_v2i32: +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; UNALIGNED: buffer_load_dwordx2 + +; SI: buffer_store_dwordx2 +define void @constant_unaligned_load_v2i32(<2 x i32> addrspace(2)* %p, <2 x i32> addrspace(1)* %r) #0 { + %v = load <2 x i32>, <2 x i32> addrspace(2)* %p, align 1 + store <2 x i32> %v, <2 x i32> addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}constant_unaligned_load_v4i32: +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte +; ALIGNED: buffer_load_ubyte + +; UNALIGNED: buffer_load_dwordx4 + +; SI: buffer_store_dwordx4 +define void @constant_unaligned_load_v4i32(<4 x i32> addrspace(2)* %p, <4 x i32> addrspace(1)* %r) #0 { + %v = load <4 x i32>, <4 x i32> addrspace(2)* %p, align 1 + store <4 x i32> %v, <4 x i32> addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}constant_align4_load_i8: +; SI: buffer_load_ubyte +; SI: buffer_store_byte +define void @constant_align4_load_i8(i8 addrspace(2)* %p, i8 addrspace(1)* %r) #0 { + %v = load i8, i8 addrspace(2)* %p, align 4 + store i8 %v, i8 addrspace(1)* %r, align 4 + ret void +} + +; SI-LABEL: {{^}}constant_align2_load_i8: +; SI: buffer_load_ubyte +; SI: buffer_store_byte +define void @constant_align2_load_i8(i8 addrspace(2)* %p, i8 addrspace(1)* %r) #0 { + %v = load i8, i8 addrspace(2)* %p, align 2 + store i8 %v, i8 addrspace(1)* %r, align 2 ret void } -; FUNC-LABEL: {{^}}constant_load_unaligned_f32: -; GCN-NOHSA: buffer_load_ubyte -; GCN-NOHSA: buffer_load_ubyte -; GCN-NOHSA: buffer_load_ubyte -; GCN-NOHSA: buffer_load_ubyte - -; GCN-HSA: flat_load_ubyte -; GCN-HSA: flat_load_ubyte -; GCN-HSA: flat_load_ubyte -; GCN-HSA: flat_load_ubyte -define void @constant_load_unaligned_f32(float addrspace(1)* %out, float addrspace(2)* %in) { - %tmp1 = load float, float addrspace(2)* %in, align 1 - store float %tmp1, float addrspace(1)* %out +; SI-LABEL: {{^}}constant_align4_merge_load_2_i32: +; SI: s_load_dwordx2 s{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0x0{{$}} +; SI-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], s[[LO]] +; SI-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], s[[HI]] +; SI: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}} +define void @constant_align4_merge_load_2_i32(i32 addrspace(2)* %p, i32 addrspace(1)* %r) #0 { + %gep0 = getelementptr i32, i32 addrspace(2)* %p, i64 1 + %v0 = load i32, i32 addrspace(2)* %p, align 4 + %v1 = load i32, i32 addrspace(2)* %gep0, align 4 + + %gep1 = getelementptr i32, i32 addrspace(1)* %r, i64 1 + store i32 %v0, i32 addrspace(1)* %r, align 4 + store i32 %v1, i32 addrspace(1)* %gep1, align 4 ret void } |