diff options
Diffstat (limited to 'llvm/lib/Target/AMDGPU')
| -rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUInstructions.td | 84 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/BUFInstructions.td | 4 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/FLATInstructions.td | 4 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/R600Instructions.td | 25 |
4 files changed, 24 insertions, 93 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td index cc006f38fe5..6541470f06a 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td +++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td @@ -336,12 +336,6 @@ class Aligned<int Bytes> { int MinAlignment = Bytes; } -class LoadFrag <SDPatternOperator op> : PatFrag<(ops node:$ptr), (op node:$ptr)>; - -class StoreFrag<SDPatternOperator op> : PatFrag < - (ops node:$value, node:$ptr), (op node:$value, node:$ptr) ->; - class StoreHi16<SDPatternOperator op> : PatFrag < (ops node:$value, node:$ptr), (op (srl node:$value, (i32 16)), node:$ptr)> { let IsStore = 1; @@ -367,48 +361,6 @@ def StoreAddress_region : AddressSpaceList<[ AddrSpaces.Region ]>; -class GlobalLoadAddress : CodePatPred<[{ - auto AS = cast<MemSDNode>(N)->getAddressSpace(); - return AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::CONSTANT_ADDRESS; -}]>; - -class FlatLoadAddress : CodePatPred<[{ - const auto AS = cast<MemSDNode>(N)->getAddressSpace(); - return AS == AMDGPUAS::FLAT_ADDRESS || - AS == AMDGPUAS::GLOBAL_ADDRESS || - AS == AMDGPUAS::CONSTANT_ADDRESS; -}]>; - -class GlobalAddress : CodePatPred<[{ - return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; -}]>; - -class PrivateAddress : CodePatPred<[{ - return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS; -}]>; - -class LocalAddress : CodePatPred<[{ - return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; -}]>; - -class RegionAddress : CodePatPred<[{ - return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::REGION_ADDRESS; -}]>; - -class FlatStoreAddress : CodePatPred<[{ - const auto AS = cast<MemSDNode>(N)->getAddressSpace(); - return AS == AMDGPUAS::FLAT_ADDRESS || - AS == AMDGPUAS::GLOBAL_ADDRESS; -}]>; - -// TODO: Remove these when stores to new PatFrag format. -class PrivateStore <SDPatternOperator op> : StoreFrag <op>, PrivateAddress; -class LocalStore <SDPatternOperator op> : StoreFrag <op>, LocalAddress; -class RegionStore <SDPatternOperator op> : StoreFrag <op>, RegionAddress; -class GlobalStore <SDPatternOperator op> : StoreFrag<op>, GlobalAddress; -class FlatStore <SDPatternOperator op> : StoreFrag <op>, FlatStoreAddress; - - foreach as = [ "global", "flat", "constant", "local", "private", "region" ] in { let AddressSpaces = !cast<AddressSpaceList>("LoadAddress_"#as).AddrSpaces in { @@ -525,9 +477,6 @@ defm atomic_load_xor : ret_noret_binary_atomic_op<atomic_load_xor>; defm atomic_load_fadd : ret_noret_binary_atomic_op<atomic_load_fadd, 0>; defm AMDGPUatomic_cmp_swap : ret_noret_binary_atomic_op<AMDGPUatomic_cmp_swap>; -def store_atomic_global : GlobalStore<atomic_store>; -def atomic_store_local : LocalStore <atomic_store>; - def load_align8_local : PatFrag <(ops node:$ptr), (load_local node:$ptr)> { let IsLoad = 1; @@ -553,28 +502,6 @@ def store_align16_local: PatFrag<(ops node:$val, node:$ptr), let IsTruncStore = 0; } - -def atomic_store_flat : FlatStore <atomic_store>; - - -class local_binary_atomic_op<SDNode atomic_op> : - PatFrag<(ops node:$ptr, node:$value), - (atomic_op node:$ptr, node:$value), [{ - return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS; -}]>; - -class region_binary_atomic_op<SDNode atomic_op> : - PatFrag<(ops node:$ptr, node:$value), - (atomic_op node:$ptr, node:$value), [{ - return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::REGION_ADDRESS; -}]>; - - -def mskor_global : PatFrag<(ops node:$val, node:$ptr), - (AMDGPUstore_mskor node:$val, node:$ptr), [{ - return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; -}]>; - let AddressSpaces = StoreAddress_local.AddrSpaces in { defm atomic_cmp_swap_local : ternary_atomic_op<atomic_cmp_swap>; defm atomic_cmp_swap_local_m0 : ternary_atomic_op<atomic_cmp_swap_glue>; @@ -585,17 +512,6 @@ defm atomic_cmp_swap_region : ternary_atomic_op<atomic_cmp_swap>; defm atomic_cmp_swap_region_m0 : ternary_atomic_op<atomic_cmp_swap_glue>; } -// Legacy. -def atomic_cmp_swap_global_noret : PatFrag< - (ops node:$ptr, node:$cmp, node:$value), - (atomic_cmp_swap node:$ptr, node:$cmp, node:$value), - [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (SDValue(N, 0).use_empty());}]>; - -def atomic_cmp_swap_global_ret : PatFrag< - (ops node:$ptr, node:$cmp, node:$value), - (atomic_cmp_swap node:$ptr, node:$cmp, node:$value), - [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (!SDValue(N, 0).use_empty());}]>; - //===----------------------------------------------------------------------===// // Misc Pattern Fragments //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td index 1b12550aed8..691aff4ecbb 100644 --- a/llvm/lib/Target/AMDGPU/BUFInstructions.td +++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td @@ -1621,8 +1621,8 @@ multiclass MUBUFStore_Atomic_Pattern <MUBUF_Pseudo Instr_ADDR64, MUBUF_Pseudo In >; } let SubtargetPredicate = isGFX6GFX7 in { -defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_DWORD_ADDR64, BUFFER_STORE_DWORD_OFFSET, i32, store_atomic_global>; -defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_DWORDX2_ADDR64, BUFFER_STORE_DWORDX2_OFFSET, i64, store_atomic_global>; +defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_DWORD_ADDR64, BUFFER_STORE_DWORD_OFFSET, i32, atomic_store_global_32>; +defm : MUBUFStore_Atomic_Pattern <BUFFER_STORE_DWORDX2_ADDR64, BUFFER_STORE_DWORDX2_OFFSET, i64, atomic_store_global_64>; } // End Predicates = isGFX6GFX7 diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td index e106af42ded..2057cac346d 100644 --- a/llvm/lib/Target/AMDGPU/FLATInstructions.td +++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td @@ -896,8 +896,8 @@ def : FlatSignedLoadPat_D16 <GLOBAL_LOAD_SHORT_D16, load_d16_lo_global, v2i16>; def : FlatSignedLoadPat_D16 <GLOBAL_LOAD_SHORT_D16, load_d16_lo_global, v2f16>; } -def : FlatStoreSignedAtomicPat <GLOBAL_STORE_DWORD, store_atomic_global, i32>; -def : FlatStoreSignedAtomicPat <GLOBAL_STORE_DWORDX2, store_atomic_global, i64, VReg_64>; +def : FlatStoreSignedAtomicPat <GLOBAL_STORE_DWORD, atomic_store_global_32, i32>; +def : FlatStoreSignedAtomicPat <GLOBAL_STORE_DWORDX2, atomic_store_global_64, i64, VReg_64>; def : FlatSignedAtomicPat <GLOBAL_ATOMIC_ADD_RTN, atomic_load_add_global_32, i32>; def : FlatSignedAtomicPat <GLOBAL_ATOMIC_SUB_RTN, atomic_load_sub_global_32, i32>; diff --git a/llvm/lib/Target/AMDGPU/R600Instructions.td b/llvm/lib/Target/AMDGPU/R600Instructions.td index f40eece859e..cbdf0de44f8 100644 --- a/llvm/lib/Target/AMDGPU/R600Instructions.td +++ b/llvm/lib/Target/AMDGPU/R600Instructions.td @@ -295,9 +295,23 @@ class VTX_READ <string name, dag outs, list<dag> pattern> let VTXInst = 1; } -// FIXME: Deprecated. -class LocalLoad <SDPatternOperator op> : LoadFrag <op>, LocalAddress; +// Legacy. +def atomic_cmp_swap_global_noret : PatFrag< + (ops node:$ptr, node:$cmp, node:$value), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$value), + [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (SDValue(N, 0).use_empty());}]>; + +def atomic_cmp_swap_global_ret : PatFrag< + (ops node:$ptr, node:$cmp, node:$value), + (atomic_cmp_swap node:$ptr, node:$cmp, node:$value), + [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (!SDValue(N, 0).use_empty());}]>; + +def mskor_global : PatFrag<(ops node:$val, node:$ptr), + (AMDGPUstore_mskor node:$val, node:$ptr), [{ + return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS; +}]>; +// FIXME: These are deprecated class AZExtLoadBase <SDPatternOperator ld_node>: PatFrag<(ops node:$ptr), (ld_node node:$ptr), [{ LoadSDNode *L = cast<LoadSDNode>(N); @@ -319,9 +333,10 @@ def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{ return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32; }]>; -// FIXME: These are deprecated -def az_extloadi8_local : LocalLoad <az_extloadi8>; -def az_extloadi16_local : LocalLoad <az_extloadi16>; +let AddressSpaces = LoadAddress_local.AddrSpaces in { +def az_extloadi8_local : PatFrag<(ops node:$ptr), (az_extloadi8 node:$ptr)>; +def az_extloadi16_local : PatFrag<(ops node:$ptr), (az_extloadi16 node:$ptr)>; +} class LoadParamFrag <PatFrag load_type> : PatFrag < (ops node:$ptr), (load_type node:$ptr), |

