summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2019-12-27 17:41:16 -0500
committerMatt Arsenault <arsenm2@gmail.com>2019-12-27 17:52:12 -0500
commit5ce2ca524e99189fb778d21f63ec0c78944383e5 (patch)
treea20bcdd2b9603052442b469f1addcc0c2b8f841a /llvm/lib
parente9775bb5d81a1eb1d73319877519e51ed3b9f865 (diff)
downloadbcm5719-llvm-5ce2ca524e99189fb778d21f63ec0c78944383e5.tar.gz
bcm5719-llvm-5ce2ca524e99189fb778d21f63ec0c78944383e5.zip
AMDGPU/GlobalISel: Use SReg_32 for readfirstlane constraining
This matches the DAG behavior where we don't use SReg_32_XM0 everywhere anymore, and fixes not coalescing the copies into m0.
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp2
1 files changed, 1 insertions, 1 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 04b774beb0f..7463ed8fcd5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -1022,7 +1022,7 @@ void AMDGPURegisterBankInfo::constrainOpWithReadfirstlane(
return;
MachineIRBuilder B(MI);
- Register SGPR = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass);
+ Register SGPR = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
B.buildInstr(AMDGPU::V_READFIRSTLANE_B32)
.addDef(SGPR)
.addReg(Reg);
OpenPOWER on IntegriCloud