diff options
| author | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-09-09 16:18:07 +0000 |
|---|---|---|
| committer | Matt Arsenault <Matthew.Arsenault@amd.com> | 2019-09-09 16:18:07 +0000 |
| commit | 63e6d8db1cbfe75142669c55819c655c600f00a5 (patch) | |
| tree | 493f6115bf83dc6376311bfcbd134a1b31d0c2f7 /llvm/lib | |
| parent | f707dac742f39774aef446f275cc70f43586312a (diff) | |
| download | bcm5719-llvm-63e6d8db1cbfe75142669c55819c655c600f00a5.tar.gz bcm5719-llvm-63e6d8db1cbfe75142669c55819c655c600f00a5.zip | |
AMDGPU/GlobalISel: Select atomic loads
A new check for an explicitly atomic MMO is needed to avoid
incorrectly matching pattern for non-atomic loads
llvm-svn: 371418
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUGISel.td | 13 | ||||
| -rw-r--r-- | llvm/lib/Target/AMDGPU/SIInstrInfo.td | 10 |
2 files changed, 18 insertions, 5 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td index 769e56b4b09..01441bf77d0 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td +++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td @@ -46,12 +46,19 @@ def gi_smrd_sgpr : GIComplexOperandMatcher<s64, "selectSmrdSgpr">, GIComplexPatternEquiv<SMRDSgpr>; +// FIXME: Why are the atomic versions separated? def gi_flat_offset : GIComplexOperandMatcher<s64, "selectFlatOffset">, GIComplexPatternEquiv<FLATOffset>; def gi_flat_offset_signed : GIComplexOperandMatcher<s64, "selectFlatOffsetSigned">, GIComplexPatternEquiv<FLATOffsetSigned>; +def gi_flat_atomic : + GIComplexOperandMatcher<s64, "selectFlatOffset">, + GIComplexPatternEquiv<FLATAtomic>; +def gi_flat_signed_atomic : + GIComplexOperandMatcher<s64, "selectFlatOffsetSigned">, + GIComplexPatternEquiv<FLATSignedAtomic>; def gi_mubuf_scratch_offset : GIComplexOperandMatcher<s32, "selectMUBUFScratchOffset">, @@ -69,6 +76,7 @@ def gi_ds_1addr_1offset : // SelectionDAG. The GISel selector can just insert m0 initialization // directly before before selecting a glue-less load, so hide this // distinction. + def : GINodeEquiv<G_LOAD, AMDGPUld_glue> { let CheckMMOIsNonAtomic = 1; } @@ -77,6 +85,11 @@ def : GINodeEquiv<G_STORE, AMDGPUst_glue> { let CheckMMOIsNonAtomic = 1; } +def : GINodeEquiv<G_LOAD, AMDGPUatomic_ld_glue> { + bit CheckMMOIsAtomic = 1; +} + + def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap_glue>; def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap_glue>; diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td index 9fda47d541f..69e76f14e47 100644 --- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td +++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td @@ -456,16 +456,16 @@ def load_align16_local_m0 : PatFrag<(ops node:$ptr), } // End IsLoad = 1 -let AddressSpaces = LoadAddress_local.AddrSpaces in { - +let IsAtomic = 1, AddressSpaces = LoadAddress_local.AddrSpaces in { def atomic_load_32_local_m0 : PatFrag<(ops node:$ptr), (atomic_load_32_glue node:$ptr)> { - let IsAtomic = 1; + let MemoryVT = i32; } def atomic_load_64_local_m0 : PatFrag<(ops node:$ptr), - (atomic_load_64_glue node:$ptr)> { - let IsAtomic = 1; + (atomic_load_64_glue node:$ptr)> { + let MemoryVT = i64; } + } // End let AddressSpaces = LoadAddress_local.AddrSpaces |

