diff options
author | Francis Visoiu Mistrih <francisvm@yahoo.com> | 2018-01-26 11:47:28 +0000 |
---|---|---|
committer | Francis Visoiu Mistrih <francisvm@yahoo.com> | 2018-01-26 11:47:28 +0000 |
commit | e4718e84e831651213fb944d3517787f69ea0297 (patch) | |
tree | a51ab4cc93f8c948f6f546119f91dd5f231f0cfd | |
parent | e8524e0ca2e248c6b2973dbe6d3c95025ebef4d8 (diff) | |
download | bcm5719-llvm-e4718e84e831651213fb944d3517787f69ea0297.tar.gz bcm5719-llvm-e4718e84e831651213fb944d3517787f69ea0297.zip |
[MIR] Add support for addrspace in MIR
Add support for printing / parsing the addrspace of a MachineMemOperand.
Fixes PR35970.
Differential Revision: https://reviews.llvm.org/D42502
llvm-svn: 323521
-rw-r--r-- | llvm/lib/CodeGen/MIRParser/MILexer.cpp | 1 | ||||
-rw-r--r-- | llvm/lib/CodeGen/MIRParser/MILexer.h | 1 | ||||
-rw-r--r-- | llvm/lib/CodeGen/MIRParser/MIParser.cpp | 16 | ||||
-rw-r--r-- | llvm/lib/CodeGen/MIRPrinter.cpp | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll | 4 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir | 2 | ||||
-rw-r--r-- | llvm/test/CodeGen/AMDGPU/syncscopes.ll | 6 | ||||
-rw-r--r-- | llvm/test/CodeGen/MIR/AArch64/addrspace-memoperands.mir | 31 | ||||
-rw-r--r-- | llvm/test/CodeGen/MIR/AMDGPU/syncscopes.mir | 6 |
9 files changed, 60 insertions, 9 deletions
diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.cpp b/llvm/lib/CodeGen/MIRParser/MILexer.cpp index a5d66b5c979..9f41660684a 100644 --- a/llvm/lib/CodeGen/MIRParser/MILexer.cpp +++ b/llvm/lib/CodeGen/MIRParser/MILexer.cpp @@ -242,6 +242,7 @@ static MIToken::TokenKind getIdentifierKind(StringRef Identifier) { .Case("dereferenceable", MIToken::kw_dereferenceable) .Case("invariant", MIToken::kw_invariant) .Case("align", MIToken::kw_align) + .Case("addrspace", MIToken::kw_addrspace) .Case("stack", MIToken::kw_stack) .Case("got", MIToken::kw_got) .Case("jump-table", MIToken::kw_jump_table) diff --git a/llvm/lib/CodeGen/MIRParser/MILexer.h b/llvm/lib/CodeGen/MIRParser/MILexer.h index 275f92985f7..c6a9b79baa9 100644 --- a/llvm/lib/CodeGen/MIRParser/MILexer.h +++ b/llvm/lib/CodeGen/MIRParser/MILexer.h @@ -93,6 +93,7 @@ struct MIToken { kw_non_temporal, kw_invariant, kw_align, + kw_addrspace, kw_stack, kw_got, kw_jump_table, diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp index 4fa84c7bbd9..f3d46afb0b0 100644 --- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp +++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp @@ -228,6 +228,7 @@ public: Optional<unsigned> &TiedDefIdx); bool parseOffset(int64_t &Offset); bool parseAlignment(unsigned &Alignment); + bool parseAddrspace(unsigned &Addrspace); bool parseOperandsOffset(MachineOperand &Op); bool parseIRValue(const Value *&V); bool parseMemoryOperandFlag(MachineMemOperand::Flags &Flags); @@ -2094,6 +2095,17 @@ bool MIParser::parseAlignment(unsigned &Alignment) { return false; } +bool MIParser::parseAddrspace(unsigned &Addrspace) { + assert(Token.is(MIToken::kw_addrspace)); + lex(); + if (Token.isNot(MIToken::IntegerLiteral) || Token.integerValue().isSigned()) + return error("expected an integer literal after 'addrspace'"); + if (getUnsigned(Addrspace)) + return true; + lex(); + return false; +} + bool MIParser::parseOperandsOffset(MachineOperand &Op) { int64_t Offset = 0; if (parseOffset(Offset)) @@ -2405,6 +2417,10 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) { if (parseAlignment(BaseAlignment)) return true; break; + case MIToken::kw_addrspace: + if (parseAddrspace(Ptr.AddrSpace)) + return true; + break; case MIToken::md_tbaa: lex(); if (parseMDNode(AAInfo.TBAA)) diff --git a/llvm/lib/CodeGen/MIRPrinter.cpp b/llvm/lib/CodeGen/MIRPrinter.cpp index 09316175a78..658b596262d 100644 --- a/llvm/lib/CodeGen/MIRPrinter.cpp +++ b/llvm/lib/CodeGen/MIRPrinter.cpp @@ -892,6 +892,8 @@ void MIPrinter::print(const LLVMContext &Context, const TargetInstrInfo &TII, OS << ", !range "; Op.getRanges()->printAsOperand(OS, MST); } + if (unsigned AS = Op.getAddrSpace()) + OS << ", addrspace " << AS; OS << ')'; } diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll index 077c21c0557..a8b37f4a7aa 100644 --- a/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -457,7 +457,7 @@ define void @trunc(i64 %a) { ; CHECK: [[ADDR:%[0-9]+]]:_(p0) = COPY %x0 ; CHECK: [[ADDR42:%[0-9]+]]:_(p42) = COPY %x1 ; CHECK: [[VAL1:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (load 8 from %ir.addr, align 16) -; CHECK: [[VAL2:%[0-9]+]]:_(s64) = G_LOAD [[ADDR42]](p42) :: (load 8 from %ir.addr42) +; CHECK: [[VAL2:%[0-9]+]]:_(s64) = G_LOAD [[ADDR42]](p42) :: (load 8 from %ir.addr42, addrspace 42) ; CHECK: [[SUM2:%.*]]:_(s64) = G_ADD [[VAL1]], [[VAL2]] ; CHECK: [[VAL3:%[0-9]+]]:_(s64) = G_LOAD [[ADDR]](p0) :: (volatile load 8 from %ir.addr) ; CHECK: [[SUM3:%[0-9]+]]:_(s64) = G_ADD [[SUM2]], [[VAL3]] @@ -480,7 +480,7 @@ define i64 @load(i64* %addr, i64 addrspace(42)* %addr42) { ; CHECK: [[VAL1:%[0-9]+]]:_(s64) = COPY %x2 ; CHECK: [[VAL2:%[0-9]+]]:_(s64) = COPY %x3 ; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (store 8 into %ir.addr, align 16) -; CHECK: G_STORE [[VAL2]](s64), [[ADDR42]](p42) :: (store 8 into %ir.addr42) +; CHECK: G_STORE [[VAL2]](s64), [[ADDR42]](p42) :: (store 8 into %ir.addr42, addrspace 42) ; CHECK: G_STORE [[VAL1]](s64), [[ADDR]](p0) :: (volatile store 8 into %ir.addr) ; CHECK: RET_ReallyLR define void @store(i64* %addr, i64 addrspace(42)* %addr42, i64 %val1, i64 %val2) { diff --git a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir index 24e8ed8e29c..4ed83607560 100644 --- a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir +++ b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir @@ -321,7 +321,7 @@ body: | --- # CHECK-LABEL: name: optimize_if_and_saveexec_xor_valu_middle # CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc -# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`) +# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1) # CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc # CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3 # CHECK-NEXT: SI_MASK_BRANCH diff --git a/llvm/test/CodeGen/AMDGPU/syncscopes.ll b/llvm/test/CodeGen/AMDGPU/syncscopes.ll index 5cea1588d4b..61d9e93e03a 100644 --- a/llvm/test/CodeGen/AMDGPU/syncscopes.ll +++ b/llvm/test/CodeGen/AMDGPU/syncscopes.ll @@ -1,9 +1,9 @@ ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -stop-before=si-debugger-insert-nops < %s | FileCheck --check-prefix=GCN %s ; GCN-LABEL: name: syncscopes -; GCN: FLAT_STORE_DWORD killed renamable %vgpr1_vgpr2, killed renamable %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("agent") seq_cst 4 into %ir.agent_out) -; GCN: FLAT_STORE_DWORD killed renamable %vgpr4_vgpr5, killed renamable %vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out) -; GCN: FLAT_STORE_DWORD killed renamable %vgpr7_vgpr8, killed renamable %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out) +; GCN: FLAT_STORE_DWORD killed renamable %vgpr1_vgpr2, killed renamable %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("agent") seq_cst 4 into %ir.agent_out, addrspace 4) +; GCN: FLAT_STORE_DWORD killed renamable %vgpr4_vgpr5, killed renamable %vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out, addrspace 4) +; GCN: FLAT_STORE_DWORD killed renamable %vgpr7_vgpr8, killed renamable %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out, addrspace 4) define void @syncscopes( i32 %agent, i32 addrspace(4)* %agent_out, diff --git a/llvm/test/CodeGen/MIR/AArch64/addrspace-memoperands.mir b/llvm/test/CodeGen/MIR/AArch64/addrspace-memoperands.mir new file mode 100644 index 00000000000..a7906e04ca8 --- /dev/null +++ b/llvm/test/CodeGen/MIR/AArch64/addrspace-memoperands.mir @@ -0,0 +1,31 @@ +# RUN: llc -mtriple=aarch64-none-linux-gnu -run-pass none -o - %s | FileCheck %s + +--- | + + define void @addrspace_memoperands() { + ret void + } + +... +--- +name: addrspace_memoperands +body: | + bb.0: + + ; CHECK-LABEL: name: addrspace_memoperands + ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY %x0 + ; CHECK: [[LOAD:%[0-9]+]]:_(s64) = G_LOAD [[COPY]](p0) :: (load 8, addrspace 1) + ; CHECK: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (load 4, align 2, addrspace 3) + ; CHECK: G_STORE [[LOAD]](s64), [[COPY]](p0) :: (store 8, addrspace 1) + ; CHECK: G_STORE [[LOAD1]](s32), [[COPY]](p0) :: (store 4, align 2, addrspace 3) + ; CHECK: G_STORE [[LOAD1]](s32), [[COPY]](p0) :: (store 4) + ; CHECK: RET_ReallyLR + %0:_(p0) = COPY %x0 + %1:_(s64) = G_LOAD %0(p0) :: (load 8, addrspace 1) + %2:_(s32) = G_LOAD %0(p0) :: (load 4, align 2, addrspace 3) + G_STORE %1(s64), %0(p0) :: (store 8, addrspace 1) + G_STORE %2(s32), %0(p0) :: (store 4, align 2, addrspace 3) + ; addrspace 0 is accepted by the parser but not printed + G_STORE %2(s32), %0(p0) :: (store 4, addrspace 0) + RET_ReallyLR +... diff --git a/llvm/test/CodeGen/MIR/AMDGPU/syncscopes.mir b/llvm/test/CodeGen/MIR/AMDGPU/syncscopes.mir index 81f3ad9b966..1f29fb156fb 100644 --- a/llvm/test/CodeGen/MIR/AMDGPU/syncscopes.mir +++ b/llvm/test/CodeGen/MIR/AMDGPU/syncscopes.mir @@ -42,9 +42,9 @@ !0 = !{i32 1} # GCN-LABEL: name: syncscopes -# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("agent") seq_cst 4 into %ir.agent_out) -# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out) -# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out) +# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("agent") seq_cst 4 into %ir.agent_out, addrspace 4) +# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out, addrspace 4) +# GCN: FLAT_STORE_DWORD killed %vgpr0_vgpr1, killed %vgpr2, 0, -1, 0, implicit %exec, implicit %flat_scr :: (volatile non-temporal store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out, addrspace 4) ... --- name: syncscopes |