summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdkfd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h1454
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm1993
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm395
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm547
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c80
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c28
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c8
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c332
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c173
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c13
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c96
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c53
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c348
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c103
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c52
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c (renamed from drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c)149
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c (renamed from drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c)41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h62
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c146
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c49
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h9
41 files changed, 3014 insertions, 3363 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
index a1a35d4d594b..b3672d10ea54 100644
--- a/drivers/gpu/drm/amd/amdkfd/Kconfig
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -1,11 +1,11 @@
-# SPDX-License-Identifier: GPL-2.0-only
+# SPDX-License-Identifier: MIT
#
# Heterogenous system architecture configuration
#
config HSA_AMD
bool "HSA kernel driver for AMD GPU devices"
- depends on DRM_AMDGPU && (X86_64 || ARM64)
+ depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64)
imply AMD_IOMMU_V2 if X86_64
select MMU_NOTIFIER
help
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index 48155060a57c..61474627a32c 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -38,11 +38,9 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \
$(AMDKFD_PATH)/kfd_mqd_manager_v9.o \
$(AMDKFD_PATH)/kfd_mqd_manager_v10.o \
$(AMDKFD_PATH)/kfd_kernel_queue.o \
- $(AMDKFD_PATH)/kfd_kernel_queue_cik.o \
- $(AMDKFD_PATH)/kfd_kernel_queue_vi.o \
- $(AMDKFD_PATH)/kfd_kernel_queue_v9.o \
- $(AMDKFD_PATH)/kfd_kernel_queue_v10.o \
$(AMDKFD_PATH)/kfd_packet_manager.o \
+ $(AMDKFD_PATH)/kfd_packet_manager_vi.o \
+ $(AMDKFD_PATH)/kfd_packet_manager_v9.o \
$(AMDKFD_PATH)/kfd_process_queue_manager.o \
$(AMDKFD_PATH)/kfd_device_queue_manager.o \
$(AMDKFD_PATH)/kfd_device_queue_manager_cik.o \
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 177d1e5329a5..9f59ba93cfe0 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -33,7 +33,9 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
- unsigned int vmid, pasid;
+ unsigned int vmid;
+ uint16_t pasid;
+ bool ret;
/* This workaround is due to HW/FW limitation on Hawaii that
* VMID and PASID are not written into ih_ring_entry
@@ -48,13 +50,13 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
*tmp_ihre = *ihre;
vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
- pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+ ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid);
tmp_ihre->ring_id &= 0x000000ff;
tmp_ihre->ring_id |= vmid << 8;
tmp_ihre->ring_id |= pasid << 16;
- return (pasid != 0) &&
+ return ret && (pasid != 0) &&
vmid >= dev->vm_info.first_vmid_kfd &&
vmid <= dev->vm_info.last_vmid_kfd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index 826913c70766..d3400da6ab64 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -274,154 +274,227 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf82015e,
+ 0xbf820001, 0xbf820248,
0xb8f8f802, 0x89788678,
- 0xb8fbf803, 0x866eff7b,
- 0x00000400, 0xbf85003b,
- 0x866eff7b, 0x00000800,
- 0xbf850003, 0x866eff7b,
- 0x00000100, 0xbf84000c,
+ 0xb8eef801, 0x866eff6e,
+ 0x00000800, 0xbf840003,
0x866eff78, 0x00002000,
- 0xbf840005, 0xbf8e0010,
- 0xb8eef803, 0x866eff6e,
- 0x00000400, 0xbf84fffb,
- 0x8778ff78, 0x00002000,
- 0x80ec886c, 0x82ed806d,
- 0xb8eef807, 0x866fff6e,
- 0x001f8000, 0x8e6f8b6f,
- 0x8977ff77, 0xfc000000,
- 0x87776f77, 0x896eff6e,
- 0x001f8000, 0xb96ef807,
- 0xb8faf812, 0xb8fbf813,
- 0x8efa887a, 0xc0071bbd,
- 0x00000000, 0xbf8cc07f,
- 0xc0071ebd, 0x00000008,
- 0xbf8cc07f, 0x86ee6e6e,
- 0xbf840001, 0xbe801d6e,
- 0xb8fbf803, 0x867bff7b,
- 0x000001ff, 0xbf850002,
- 0x806c846c, 0x826d806d,
+ 0xbf840016, 0xb8fbf803,
+ 0x866eff7b, 0x00000400,
+ 0xbf85003b, 0x866eff7b,
+ 0x00000800, 0xbf850003,
+ 0x866eff7b, 0x00000100,
+ 0xbf84000c, 0x866eff78,
+ 0x00002000, 0xbf840005,
+ 0xbf8e0010, 0xb8eef803,
+ 0x866eff6e, 0x00000400,
+ 0xbf84fffb, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xb8eef807,
+ 0x866fff6e, 0x001f8000,
+ 0x8e6f8b6f, 0x8977ff77,
+ 0xfc000000, 0x87776f77,
+ 0x896eff6e, 0x001f8000,
+ 0xb96ef807, 0xb8faf812,
+ 0xb8fbf813, 0x8efa887a,
+ 0xc0071bbd, 0x00000000,
+ 0xbf8cc07f, 0xc0071ebd,
+ 0x00000008, 0xbf8cc07f,
+ 0x86ee6e6e, 0xbf840001,
+ 0xbe801d6e, 0xb8fbf803,
+ 0x867bff7b, 0x000001ff,
+ 0xbf850002, 0x806c846c,
+ 0x826d806d, 0x866dff6d,
+ 0x0000ffff, 0x8f6e8b77,
+ 0x866eff6e, 0x001f8000,
+ 0xb96ef807, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8378,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9780002, 0xbe801f6c,
0x866dff6d, 0x0000ffff,
- 0x8f6e8b77, 0x866eff6e,
- 0x001f8000, 0xb96ef807,
- 0x86fe7e7e, 0x86ea6a6a,
- 0x8f6e8378, 0xb96ee0c2,
- 0xbf800002, 0xb9780002,
- 0xbe801f6c, 0x866dff6d,
- 0x0000ffff, 0xbefa0080,
- 0xb97a0283, 0xb8fa2407,
- 0x8e7a9b7a, 0x876d7a6d,
- 0xb8fa03c7, 0x8e7a9a7a,
- 0x876d7a6d, 0xb8faf807,
- 0x867aff7a, 0x00007fff,
- 0xb97af807, 0xbeee007e,
- 0xbeef007f, 0xbefe0180,
- 0xbf900004, 0x877a8478,
- 0xb97af802, 0xbf8e0002,
- 0xbf88fffe, 0xb8fa2a05,
- 0x807a817a, 0x8e7a8a7a,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b867b, 0x807a7b7a,
- 0x807a7e7a, 0x827b807f,
- 0x867bff7b, 0x0000ffff,
- 0xc04b1c3d, 0x00000050,
- 0xbf8cc07f, 0xc04b1d3d,
- 0x00000060, 0xbf8cc07f,
- 0xc0431e7d, 0x00000074,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x867aff7f,
- 0x08000000, 0x8f7a837a,
- 0x87777a77, 0x867aff7f,
- 0x70000000, 0x8f7a817a,
- 0x87777a77, 0xbef1007c,
- 0xbef00080, 0xb8f02a05,
- 0x80708170, 0x8e708a70,
- 0xb8fa1605, 0x807a817a,
- 0x8e7a867a, 0x80707a70,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xbefe007c,
- 0xbefc0070, 0xc0611c7a,
- 0x0000007c, 0xbf8cc07f,
- 0x80708470, 0xbefc007e,
+ 0xbefa0080, 0xb97a0283,
+ 0xb8fa2407, 0x8e7a9b7a,
+ 0x876d7a6d, 0xb8fa03c7,
+ 0x8e7a9a7a, 0x876d7a6d,
+ 0xb8faf807, 0x867aff7a,
+ 0x00007fff, 0xb97af807,
+ 0xbeee007e, 0xbeef007f,
+ 0xbefe0180, 0xbf900004,
+ 0x877a8478, 0xb97af802,
+ 0xbf8e0002, 0xbf88fffe,
+ 0xb8fa2a05, 0x807a817a,
+ 0x8e7a8a7a, 0xb8fb1605,
+ 0x807b817b, 0x8e7b867b,
+ 0x807a7b7a, 0x807a7e7a,
+ 0x827b807f, 0x867bff7b,
+ 0x0000ffff, 0xc04b1c3d,
+ 0x00000050, 0xbf8cc07f,
+ 0xc04b1d3d, 0x00000060,
+ 0xbf8cc07f, 0xc0431e7d,
+ 0x00000074, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x867aff7f, 0x08000000,
+ 0x8f7a837a, 0x87777a77,
+ 0x867aff7f, 0x70000000,
+ 0x8f7a817a, 0x87777a77,
+ 0xbef1007c, 0xbef00080,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0xbef60084,
+ 0xbef600ff, 0x01000000,
0xbefe007c, 0xbefc0070,
- 0xc0611b3a, 0x0000007c,
+ 0xc0611c7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611b7a,
+ 0xbefc0070, 0xc0611b3a,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611bba, 0x0000007c,
+ 0xc0611b7a, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611bfa,
+ 0xbefc0070, 0xc0611bba,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611e3a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xb8fbf803,
- 0xbefe007c, 0xbefc0070,
- 0xc0611efa, 0x0000007c,
+ 0xc0611bfa, 0x0000007c,
0xbf8cc07f, 0x80708470,
0xbefc007e, 0xbefe007c,
- 0xbefc0070, 0xc0611a3a,
+ 0xbefc0070, 0xc0611e3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xb8fbf803, 0xbefe007c,
+ 0xbefc0070, 0xc0611efa,
0x0000007c, 0xbf8cc07f,
0x80708470, 0xbefc007e,
0xbefe007c, 0xbefc0070,
- 0xc0611a7a, 0x0000007c,
- 0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0xb8f1f801,
- 0xbefe007c, 0xbefc0070,
- 0xc0611c7a, 0x0000007c,
+ 0xc0611a3a, 0x0000007c,
0xbf8cc07f, 0x80708470,
- 0xbefc007e, 0x867aff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f7a, 0xb8f02a05,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611a7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xb8f1f801, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0xb8fb1605,
+ 0x807b817b, 0x8e7b847b,
+ 0x8e76827b, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747074, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a7b7c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbef00080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf85004d,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbf820008, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb4306,
+ 0x867bc17b, 0xbf840063,
+ 0xbf8a0000, 0x867aff6f,
+ 0x04000000, 0xbf84005f,
+ 0x8e7b867b, 0x8e7b827b,
+ 0xbef6007b, 0xb8f02a05,
0x80708170, 0x8e708a70,
- 0xb8fb1605, 0x807b817b,
- 0x8e7b847b, 0x8e76827b,
- 0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747074,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a7b7c, 0xbf85ffe7,
- 0xbef40172, 0xbef00080,
- 0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
0xbef600ff, 0x01000000,
- 0xe0724000, 0x701d0000,
- 0xe0724100, 0x701d0100,
- 0xe0724200, 0x701d0200,
- 0xe0724300, 0x701d0300,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8fb4306, 0x867bc17b,
- 0xbf84002c, 0xbf8a0000,
- 0x867aff6f, 0x04000000,
- 0xbf840028, 0x8e7b867b,
- 0x8e7b827b, 0xbef6007b,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8fa1605,
- 0x807a817a, 0x8e7a867a,
- 0x80707a70, 0x8070ff70,
- 0x00000080, 0xbef600ff,
- 0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
0xd1060002, 0x00011103,
0x7e0602ff, 0x00000200,
0xbefc00ff, 0x00010000,
@@ -438,9 +511,53 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x807b817b, 0x8e7b827b,
0x8e76887b, 0xbef600ff,
0x01000000, 0xbefc0084,
- 0xbf0a7b7c, 0xbf840015,
+ 0xbf0a7b7c, 0xbf84006d,
0xbf11017c, 0x807bff7b,
- 0x00001000, 0x7e000300,
+ 0x00001000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850051,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffb1, 0xbf9c0000,
+ 0xbf820012, 0x7e000300,
0x7e020301, 0x7e040302,
0x7e060303, 0xe0724000,
0x701d0000, 0xe0724100,
@@ -563,24 +680,47 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
};
static const uint32_t cwsr_trap_gfx10_hex[] = {
- 0xbf820001, 0xbf82012e,
- 0xb0804004, 0xb970f802,
- 0x8a708670, 0xb971f803,
- 0x8771ff71, 0x00000400,
- 0xbf850008, 0xb971f803,
- 0x8771ff71, 0x000001ff,
- 0xbf850001, 0x806c846c,
+ 0xbf820001, 0xbf8201c1,
+ 0xb0804004, 0xb978f802,
+ 0x8a788678, 0xb971f803,
+ 0x876eff71, 0x00000400,
+ 0xbf850033, 0x876eff71,
+ 0x00000100, 0xbf840002,
+ 0x8878ff78, 0x00002000,
+ 0x8a77ff77, 0xff000000,
+ 0xb96ef807, 0x876fff6e,
+ 0x02000000, 0x8f6f866f,
+ 0x88776f77, 0x876fff6e,
+ 0x003f8000, 0x8f6f896f,
+ 0x88776f77, 0x8a6eff6e,
+ 0x023f8000, 0xb9eef807,
+ 0xb97af812, 0xb97bf813,
+ 0x8ffa887a, 0xf4051bbd,
+ 0xfa000000, 0xbf8cc07f,
+ 0xf4051ebd, 0xfa000008,
+ 0xbf8cc07f, 0x87ee6e6e,
+ 0xbf840001, 0xbe80206e,
+ 0xb971f803, 0x8771ff71,
+ 0x000001ff, 0xbf850002,
+ 0x806c846c, 0x826d806d,
+ 0x876dff6d, 0x0000ffff,
+ 0x906e8977, 0x876fff6e,
+ 0x003f8000, 0x906e8677,
+ 0x876eff6e, 0x02000000,
+ 0x886e6f6e, 0xb9eef807,
+ 0x87fe7e7e, 0x87ea6a6a,
+ 0xb9f8f802, 0xbe80226c,
+ 0xb971f803, 0x8771ff71,
+ 0x00000100, 0xbf840006,
+ 0xbef60380, 0xb9f60203,
0x876dff6d, 0x0000ffff,
- 0xbe80226c, 0xb971f803,
- 0x8771ff71, 0x00000100,
- 0xbf840006, 0xbef60380,
- 0xb9f60203, 0x876dff6d,
- 0x0000ffff, 0x80ec886c,
- 0x82ed806d, 0xbef60380,
- 0xb9f60283, 0xb973f816,
- 0xb9762c07, 0x8f769c76,
- 0x886d766d, 0xb97603c7,
- 0x8f769b76, 0x886d766d,
+ 0x80ec886c, 0x82ed806d,
+ 0xbef60380, 0xb9f60283,
+ 0xb972f816, 0xb9762c07,
+ 0x8f769a76, 0x886d766d,
+ 0xb97603c7, 0x8f769976,
+ 0x886d766d, 0xb9760647,
+ 0x8f769876, 0x886d766d,
0xb976f807, 0x8776ff76,
0x00007fff, 0xb9f6f807,
0xbeee037e, 0xbeef037f,
@@ -589,274 +729,834 @@ static const uint32_t cwsr_trap_gfx10_hex[] = {
0xbef4037e, 0x8775ff7f,
0x0000ffff, 0x8875ff75,
0x00040000, 0xbef60380,
- 0xbef703ff, 0x00807fac,
+ 0xbef703ff, 0x10807fac,
0x8776ff7f, 0x08000000,
0x90768376, 0x88777677,
0x8776ff7f, 0x70000000,
0x90768176, 0x88777677,
0xbefb037c, 0xbefa0380,
- 0xb97202dc, 0x8872727f,
- 0xbefe03c1, 0x877c8172,
- 0xbf06817c, 0xbf850002,
- 0xbeff0380, 0xbf820001,
- 0xbeff03c1, 0xb9712a05,
- 0x80718171, 0x8f718271,
- 0x877c8172, 0xbf06817c,
- 0xbf85000d, 0x8f768771,
+ 0xb97302dc, 0x8f739973,
+ 0x8873737f, 0xb97a2a05,
+ 0x807a817a, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbf850002, 0x8f7a897a,
+ 0xbf820001, 0x8f7a8a7a,
+ 0xb9761e06, 0x8f768a76,
+ 0x807a767a, 0x807aff7a,
+ 0x00000200, 0xbef603ff,
+ 0x01000000, 0xbefe037c,
+ 0xbefc037a, 0xf4611efa,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xbefe037c,
+ 0xbefc037a, 0xf4611b3a,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xbefe037c,
+ 0xbefc037a, 0xf4611b7a,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xbefe037c,
+ 0xbefc037a, 0xf4611bba,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xbefe037c,
+ 0xbefc037a, 0xf4611bfa,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xbefe037c,
+ 0xbefc037a, 0xf4611e3a,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xb971f803,
+ 0xbefe037c, 0xbefc037a,
+ 0xf4611c7a, 0xf8000000,
+ 0x807a847a, 0xbefc037e,
+ 0xbefe037c, 0xbefc037a,
+ 0xf4611cba, 0xf8000000,
+ 0x807a847a, 0xbefc037e,
+ 0xb97bf801, 0xbefe037c,
+ 0xbefc037a, 0xf4611efa,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0xb97bf814,
+ 0xbefe037c, 0xbefc037a,
+ 0xf4611efa, 0xf8000000,
+ 0x807a847a, 0xbefc037e,
+ 0xb97bf815, 0xbefe037c,
+ 0xbefc037a, 0xf4611efa,
+ 0xf8000000, 0x807a847a,
+ 0xbefc037e, 0x8776ff7f,
+ 0x04000000, 0xbeef0380,
+ 0x886f6f76, 0xb97a2a05,
+ 0x807a817a, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbf850002, 0x8f7a897a,
+ 0xbf820001, 0x8f7a8a7a,
+ 0xb9761e06, 0x8f768a76,
+ 0x807a767a, 0xbef603ff,
+ 0x01000000, 0xbef20374,
+ 0x80747a74, 0x82758075,
+ 0xbefc0380, 0xbf800000,
+ 0xbe802f00, 0xbe822f02,
+ 0xbe842f04, 0xbe862f06,
+ 0xbe882f08, 0xbe8a2f0a,
+ 0xbe8c2f0c, 0xbe8e2f0e,
+ 0xf469003a, 0xfa000000,
+ 0xf469013a, 0xfa000010,
+ 0xf469023a, 0xfa000020,
+ 0xf469033a, 0xfa000030,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0aff7c,
+ 0x00000060, 0xbf85ffea,
+ 0xbe802f00, 0xbe822f02,
+ 0xbe842f04, 0xbe862f06,
+ 0xbe882f08, 0xbe8a2f0a,
+ 0xf469003a, 0xfa000000,
+ 0xf469013a, 0xfa000010,
+ 0xf469023a, 0xfa000020,
+ 0x8074b074, 0x82758075,
+ 0xbef40372, 0xbefa0380,
+ 0xbefe03c1, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbf850002, 0xbeff0380,
+ 0xbf820002, 0xbeff03c1,
+ 0xbf82000b, 0xbef603ff,
+ 0x01000000, 0xe0704000,
+ 0x7a5d0000, 0xe0704080,
+ 0x7a5d0100, 0xe0704100,
+ 0x7a5d0200, 0xe0704180,
+ 0x7a5d0300, 0xbf82000a,
0xbef603ff, 0x01000000,
- 0xbefc0380, 0x7e008700,
- 0xe0704000, 0x7a5d0000,
- 0x807c817c, 0x807aff7a,
- 0x00000080, 0xbf0a717c,
- 0xbf85fff8, 0xbf82001b,
- 0x8f768871, 0xbef603ff,
- 0x01000000, 0xbefc0380,
- 0x7e008700, 0xe0704000,
- 0x7a5d0000, 0x807c817c,
- 0x807aff7a, 0x00000100,
- 0xbf0a717c, 0xbf85fff8,
- 0xb9711e06, 0x8771c171,
- 0xbf84000c, 0x8f718371,
- 0x80717c71, 0xbefe03c1,
- 0xbeff0380, 0x7e008700,
0xe0704000, 0x7a5d0000,
- 0x807c817c, 0x807aff7a,
- 0x00000080, 0xbf0a717c,
- 0xbf85fff8, 0xbf8a0000,
- 0x8776ff72, 0x04000000,
- 0xbf84002b, 0xbefe03c1,
- 0x877c8172, 0xbf06817c,
+ 0xe0704100, 0x7a5d0100,
+ 0xe0704200, 0x7a5d0200,
+ 0xe0704300, 0x7a5d0300,
+ 0xbefe03c1, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
0xbf850002, 0xbeff0380,
0xbf820001, 0xbeff03c1,
0xb9714306, 0x8771c171,
- 0xbf840021, 0x8f718671,
+ 0xbf840046, 0xbf8a0000,
+ 0x8776ff6f, 0x04000000,
+ 0xbf840042, 0x8f718671,
0x8f718271, 0xbef60371,
+ 0xb97a2a05, 0x807a817a,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f7a897a, 0xbf820001,
+ 0x8f7a8a7a, 0xb9761e06,
+ 0x8f768a76, 0x807a767a,
+ 0x807aff7a, 0x00000200,
+ 0x807aff7a, 0x00000080,
0xbef603ff, 0x01000000,
0xd7650000, 0x000100c1,
0xd7660000, 0x000200c1,
- 0x16000084, 0x877c8172,
- 0xbf06817c, 0xbefc0380,
- 0xbf85000a, 0x807cff7c,
- 0x00000080, 0x807aff7a,
- 0x00000080, 0xd5250000,
- 0x0001ff00, 0x00000080,
- 0xbf0a717c, 0xbf85fff7,
- 0xbf820009, 0x807cff7c,
- 0x00000100, 0x807aff7a,
- 0x00000100, 0xd5250000,
- 0x0001ff00, 0x00000100,
- 0xbf0a717c, 0xbf85fff7,
- 0x877c8172, 0xbf06817c,
- 0xbf850003, 0x8f7687ff,
- 0x0000006a, 0xbf820002,
- 0x8f7688ff, 0x0000006a,
+ 0x16000084, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbefc0380, 0xbf850012,
+ 0xbe8303ff, 0x00000080,
+ 0xbf800000, 0xbf800000,
+ 0xbf800000, 0xd8d80000,
+ 0x01000000, 0xbf8c0000,
+ 0xe0704000, 0x7a5d0100,
+ 0x807c037c, 0x807a037a,
+ 0xd5250000, 0x0001ff00,
+ 0x00000080, 0xbf0a717c,
+ 0xbf85fff4, 0xbf820011,
+ 0xbe8303ff, 0x00000100,
+ 0xbf800000, 0xbf800000,
+ 0xbf800000, 0xd8d80000,
+ 0x01000000, 0xbf8c0000,
+ 0xe0704000, 0x7a5d0100,
+ 0x807c037c, 0x807a037a,
+ 0xd5250000, 0x0001ff00,
+ 0x00000100, 0xbf0a717c,
+ 0xbf85fff4, 0xbefe03c1,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850004,
+ 0xbefa03ff, 0x00000200,
+ 0xbeff0380, 0xbf820003,
+ 0xbefa03ff, 0x00000400,
+ 0xbeff03c1, 0xb9712a05,
+ 0x80718171, 0x8f718271,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850017,
0xbef603ff, 0x01000000,
- 0x877c8172, 0xbf06817c,
- 0xbefc0380, 0xbf800000,
- 0xbf85000b, 0xbe802e00,
- 0x7e000200, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000080, 0x807c817c,
- 0xbf0aff7c, 0x0000006a,
- 0xbf85fff6, 0xbf82000a,
- 0xbe802e00, 0x7e000200,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000100,
- 0x807c817c, 0xbf0aff7c,
- 0x0000006a, 0xbf85fff6,
- 0xbef60384, 0xbef603ff,
- 0x01000000, 0x877c8172,
- 0xbf06817c, 0xbf850030,
- 0x7e00027b, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000080, 0x7e00026c,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000080,
- 0x7e00026d, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000080, 0x7e00026e,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000080,
- 0x7e00026f, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000080, 0x7e000270,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000080,
- 0xb971f803, 0x7e000271,
+ 0xbefc0384, 0xbf0a717c,
+ 0xbf840037, 0x7e008700,
+ 0x7e028701, 0x7e048702,
+ 0x7e068703, 0xe0704000,
+ 0x7a5d0000, 0xe0704080,
+ 0x7a5d0100, 0xe0704100,
+ 0x7a5d0200, 0xe0704180,
+ 0x7a5d0300, 0x807c847c,
+ 0x807aff7a, 0x00000200,
+ 0xbf0a717c, 0xbf85ffef,
+ 0xbf820025, 0xbef603ff,
+ 0x01000000, 0xbefc0384,
+ 0xbf0a717c, 0xbf840020,
+ 0x7e008700, 0x7e028701,
+ 0x7e048702, 0x7e068703,
0xe0704000, 0x7a5d0000,
+ 0xe0704100, 0x7a5d0100,
+ 0xe0704200, 0x7a5d0200,
+ 0xe0704300, 0x7a5d0300,
+ 0x807c847c, 0x807aff7a,
+ 0x00000400, 0xbf0a717c,
+ 0xbf85ffef, 0xb9711e06,
+ 0x8771c171, 0xbf84000c,
+ 0x8f718371, 0x80717c71,
+ 0xbefe03c1, 0xbeff0380,
+ 0x7e008700, 0xe0704000,
+ 0x7a5d0000, 0x807c817c,
0x807aff7a, 0x00000080,
- 0x7e000273, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000080, 0xb97bf801,
- 0x7e00027b, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000080, 0xbf82002f,
- 0x7e00027b, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000100, 0x7e00026c,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000100,
- 0x7e00026d, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000100, 0x7e00026e,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000100,
- 0x7e00026f, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000100, 0x7e000270,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000100,
- 0xb971f803, 0x7e000271,
- 0xe0704000, 0x7a5d0000,
- 0x807aff7a, 0x00000100,
- 0x7e000273, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000100, 0xb97bf801,
- 0x7e00027b, 0xe0704000,
- 0x7a5d0000, 0x807aff7a,
- 0x00000100, 0xbf820119,
- 0xbef4037e, 0x8775ff7f,
- 0x0000ffff, 0x8875ff75,
- 0x00040000, 0xbef60380,
- 0xbef703ff, 0x00807fac,
- 0x8772ff7f, 0x08000000,
- 0x90728372, 0x88777277,
- 0x8772ff7f, 0x70000000,
- 0x90728172, 0x88777277,
- 0xb97902dc, 0x8879797f,
- 0xbef80380, 0xbefe03c1,
- 0x877c8179, 0xbf06817c,
+ 0xbf0a717c, 0xbf85fff8,
+ 0xbf820142, 0xbef4037e,
+ 0x8775ff7f, 0x0000ffff,
+ 0x8875ff75, 0x00040000,
+ 0xbef60380, 0xbef703ff,
+ 0x10807fac, 0x8772ff7f,
+ 0x08000000, 0x90728372,
+ 0x88777277, 0x8772ff7f,
+ 0x70000000, 0x90728172,
+ 0x88777277, 0xb97302dc,
+ 0x8f739973, 0x8873737f,
+ 0x8772ff7f, 0x04000000,
+ 0xbf840036, 0xbefe03c1,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0xbeff0380, 0xbf820001,
+ 0xbeff03c1, 0xb96f4306,
+ 0x876fc16f, 0xbf84002b,
+ 0x8f6f866f, 0x8f6f826f,
+ 0xbef6036f, 0xb9782a05,
+ 0x80788178, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbf850002, 0x8f788978,
+ 0xbf820001, 0x8f788a78,
+ 0xb9721e06, 0x8f728a72,
+ 0x80787278, 0x8078ff78,
+ 0x00000200, 0x8078ff78,
+ 0x00000080, 0xbef603ff,
+ 0x01000000, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbefc0380, 0xbf850009,
+ 0xe0310000, 0x781d0000,
+ 0x807cff7c, 0x00000080,
+ 0x8078ff78, 0x00000080,
+ 0xbf0a6f7c, 0xbf85fff8,
+ 0xbf820008, 0xe0310000,
+ 0x781d0000, 0x807cff7c,
+ 0x00000100, 0x8078ff78,
+ 0x00000100, 0xbf0a6f7c,
+ 0xbf85fff8, 0xbef80380,
+ 0xbefe03c1, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
0xbf850002, 0xbeff0380,
0xbf820001, 0xbeff03c1,
0xb96f2a05, 0x806f816f,
- 0x8f6f826f, 0x877c8179,
- 0xbf06817c, 0xbf850013,
- 0x8f76876f, 0xbef603ff,
+ 0x8f6f826f, 0x907c9973,
+ 0x877c817c, 0xbf06817c,
+ 0xbf850021, 0xbef603ff,
0x01000000, 0xbef20378,
- 0x8078ff78, 0x00000080,
- 0xbefc0381, 0xe0304000,
- 0x785d0000, 0xbf8c3f70,
- 0x7e008500, 0x807c817c,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7c, 0xbf85fff7,
- 0xe0304000, 0x725d0000,
- 0xbf820023, 0x8f76886f,
+ 0x8078ff78, 0x00000200,
+ 0xbefc0384, 0xe0304000,
+ 0x785d0000, 0xe0304080,
+ 0x785d0100, 0xe0304100,
+ 0x785d0200, 0xe0304180,
+ 0x785d0300, 0xbf8c3f70,
+ 0x7e008500, 0x7e028501,
+ 0x7e048502, 0x7e068503,
+ 0x807c847c, 0x8078ff78,
+ 0x00000200, 0xbf0a6f7c,
+ 0xbf85ffee, 0xe0304000,
+ 0x725d0000, 0xe0304080,
+ 0x725d0100, 0xe0304100,
+ 0x725d0200, 0xe0304180,
+ 0x725d0300, 0xbf820032,
0xbef603ff, 0x01000000,
0xbef20378, 0x8078ff78,
- 0x00000100, 0xbefc0381,
- 0xe0304000, 0x785d0000,
- 0xbf8c3f70, 0x7e008500,
- 0x807c817c, 0x8078ff78,
- 0x00000100, 0xbf0a6f7c,
- 0xbf85fff7, 0xb96f1e06,
- 0x876fc16f, 0xbf84000e,
- 0x8f6f836f, 0x806f7c6f,
- 0xbefe03c1, 0xbeff0380,
+ 0x00000400, 0xbefc0384,
0xe0304000, 0x785d0000,
+ 0xe0304100, 0x785d0100,
+ 0xe0304200, 0x785d0200,
+ 0xe0304300, 0x785d0300,
0xbf8c3f70, 0x7e008500,
- 0x807c817c, 0x8078ff78,
- 0x00000080, 0xbf0a6f7c,
- 0xbf85fff7, 0xbeff03c1,
- 0xe0304000, 0x725d0000,
- 0x8772ff79, 0x04000000,
- 0xbf840020, 0xbefe03c1,
- 0x877c8179, 0xbf06817c,
- 0xbf850002, 0xbeff0380,
- 0xbf820001, 0xbeff03c1,
- 0xb96f4306, 0x876fc16f,
- 0xbf840016, 0x8f6f866f,
- 0x8f6f826f, 0xbef6036f,
- 0xbef603ff, 0x01000000,
- 0x877c8172, 0xbf06817c,
- 0xbefc0380, 0xbf850007,
- 0x807cff7c, 0x00000080,
- 0x8078ff78, 0x00000080,
- 0xbf0a6f7c, 0xbf85fffa,
- 0xbf820006, 0x807cff7c,
- 0x00000100, 0x8078ff78,
- 0x00000100, 0xbf0a6f7c,
- 0xbf85fffa, 0x877c8179,
- 0xbf06817c, 0xbf850003,
- 0x8f7687ff, 0x0000006a,
- 0xbf820002, 0x8f7688ff,
- 0x0000006a, 0xbef603ff,
- 0x01000000, 0x877c8179,
- 0xbf06817c, 0xbf850012,
- 0xf4211cba, 0xf0000000,
+ 0x7e028501, 0x7e048502,
+ 0x7e068503, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xb96f1e06, 0x876fc16f,
+ 0xbf84000e, 0x8f6f836f,
+ 0x806f7c6f, 0xbefe03c1,
+ 0xbeff0380, 0xe0304000,
+ 0x785d0000, 0xbf8c3f70,
+ 0x7e008500, 0x807c817c,
0x8078ff78, 0x00000080,
- 0xbefc0381, 0xf421003a,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xbf8cc07f,
- 0xbe803000, 0xbf800000,
- 0x807c817c, 0xbf0aff7c,
- 0x0000006a, 0xbf85fff5,
- 0xbe800372, 0xbf820011,
- 0xf4211cba, 0xf0000000,
- 0x8078ff78, 0x00000100,
- 0xbefc0381, 0xf421003a,
- 0xf0000000, 0x8078ff78,
- 0x00000100, 0xbf8cc07f,
- 0xbe803000, 0xbf800000,
- 0x807c817c, 0xbf0aff7c,
- 0x0000006a, 0xbf85fff5,
- 0xbe800372, 0xbef60384,
+ 0xbf0a6f7c, 0xbf85fff7,
+ 0xbeff03c1, 0xe0304000,
+ 0x725d0000, 0xe0304100,
+ 0x725d0100, 0xe0304200,
+ 0x725d0200, 0xe0304300,
+ 0x725d0300, 0xbf8c3f70,
+ 0xb9782a05, 0x80788178,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb9721e06,
+ 0x8f728a72, 0x80787278,
+ 0x8078ff78, 0x00000200,
+ 0x80f8ff78, 0x00000050,
+ 0xbef603ff, 0x01000000,
+ 0xbefc03ff, 0x0000006c,
+ 0x80f89078, 0xf429003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc847c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0x80f8a078, 0xf42d003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc887c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0x80f8c078, 0xf431003a,
+ 0xf0000000, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe803100, 0xbe823102,
+ 0xbe843104, 0xbe863106,
+ 0xbe883108, 0xbe8a310a,
+ 0xbe8c310c, 0xbe8e310e,
+ 0xbf06807c, 0xbf84fff0,
+ 0xb9782a05, 0x80788178,
+ 0x907c9973, 0x877c817c,
+ 0xbf06817c, 0xbf850002,
+ 0x8f788978, 0xbf820001,
+ 0x8f788a78, 0xb9721e06,
+ 0x8f728a72, 0x80787278,
+ 0x8078ff78, 0x00000200,
0xbef603ff, 0x01000000,
- 0x877c8179, 0xbf06817c,
- 0xbf850025, 0xf4211bfa,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211b3a,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211b7a,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211eba,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211efa,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211c3a,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211c7a,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211cfa,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xf4211e7a,
- 0xf0000000, 0x8078ff78,
- 0x00000080, 0xbf820024,
0xf4211bfa, 0xf0000000,
- 0x8078ff78, 0x00000100,
- 0xf4211b3a, 0xf0000000,
- 0x8078ff78, 0x00000100,
+ 0x80788478, 0xf4211b3a,
+ 0xf0000000, 0x80788478,
0xf4211b7a, 0xf0000000,
- 0x8078ff78, 0x00000100,
- 0xf4211eba, 0xf0000000,
- 0x8078ff78, 0x00000100,
+ 0x80788478, 0xf4211eba,
+ 0xf0000000, 0x80788478,
0xf4211efa, 0xf0000000,
- 0x8078ff78, 0x00000100,
- 0xf4211c3a, 0xf0000000,
- 0x8078ff78, 0x00000100,
+ 0x80788478, 0xf4211c3a,
+ 0xf0000000, 0x80788478,
0xf4211c7a, 0xf0000000,
- 0x8078ff78, 0x00000100,
+ 0x80788478, 0xf4211e7a,
+ 0xf0000000, 0x80788478,
0xf4211cfa, 0xf0000000,
- 0x8078ff78, 0x00000100,
- 0xf4211e7a, 0xf0000000,
- 0x8078ff78, 0x00000100,
- 0xbf8cc07f, 0x876dff6d,
- 0x0000ffff, 0xbefc036f,
- 0xbefe037a, 0xbeff037b,
- 0x876f71ff, 0x000003ff,
- 0xb9ef4803, 0xb9f3f816,
- 0x876f71ff, 0xfffff800,
- 0x906f8b6f, 0xb9efa2c3,
- 0xb9f9f801, 0x876fff6d,
- 0xf0000000, 0x906f9c6f,
- 0x8f6f906f, 0xbef20380,
- 0x88726f72, 0x876fff6d,
- 0x08000000, 0x906f9b6f,
- 0x8f6f8f6f, 0x88726f72,
+ 0x80788478, 0xf4211bba,
+ 0xf0000000, 0x80788478,
+ 0xbf8cc07f, 0xb9eef814,
+ 0xf4211bba, 0xf0000000,
+ 0x80788478, 0xbf8cc07f,
+ 0xb9eef815, 0xbef2036d,
+ 0x876dff72, 0x0000ffff,
+ 0xbefc036f, 0xbefe037a,
+ 0xbeff037b, 0x876f71ff,
+ 0x000003ff, 0xb9ef4803,
+ 0xb9f9f816, 0x876f71ff,
+ 0xfffff800, 0x906f8b6f,
+ 0xb9efa2c3, 0xb9f3f801,
+ 0x876fff72, 0xfc000000,
+ 0x906f9a6f, 0x8f6f906f,
+ 0xbef30380, 0x88736f73,
+ 0x876fff72, 0x02000000,
+ 0x906f996f, 0x8f6f8f6f,
+ 0x88736f73, 0x876fff72,
+ 0x01000000, 0x906f986f,
+ 0x8f6f996f, 0x88736f73,
0x876fff70, 0x00800000,
- 0x906f976f, 0xb9f2f807,
+ 0x906f976f, 0xb9f3f807,
+ 0x87fe7e7e, 0x87ea6a6a,
0xb9f0f802, 0xbf8a0000,
0xbe80226c, 0xbf810000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0xbf9f0000,
0xbf9f0000, 0x00000000,
};
+static const uint32_t cwsr_trap_arcturus_hex[] = {
+ 0xbf820001, 0xbf8202c4,
+ 0xb8f8f802, 0x89788678,
+ 0xb8eef801, 0x866eff6e,
+ 0x00000800, 0xbf840003,
+ 0x866eff78, 0x00002000,
+ 0xbf840016, 0xb8fbf803,
+ 0x866eff7b, 0x00000400,
+ 0xbf85003b, 0x866eff7b,
+ 0x00000800, 0xbf850003,
+ 0x866eff7b, 0x00000100,
+ 0xbf84000c, 0x866eff78,
+ 0x00002000, 0xbf840005,
+ 0xbf8e0010, 0xb8eef803,
+ 0x866eff6e, 0x00000400,
+ 0xbf84fffb, 0x8778ff78,
+ 0x00002000, 0x80ec886c,
+ 0x82ed806d, 0xb8eef807,
+ 0x866fff6e, 0x001f8000,
+ 0x8e6f8b6f, 0x8977ff77,
+ 0xfc000000, 0x87776f77,
+ 0x896eff6e, 0x001f8000,
+ 0xb96ef807, 0xb8faf812,
+ 0xb8fbf813, 0x8efa887a,
+ 0xc0071bbd, 0x00000000,
+ 0xbf8cc07f, 0xc0071ebd,
+ 0x00000008, 0xbf8cc07f,
+ 0x86ee6e6e, 0xbf840001,
+ 0xbe801d6e, 0xb8fbf803,
+ 0x867bff7b, 0x000001ff,
+ 0xbf850002, 0x806c846c,
+ 0x826d806d, 0x866dff6d,
+ 0x0000ffff, 0x8f6e8b77,
+ 0x866eff6e, 0x001f8000,
+ 0xb96ef807, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8378,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9780002, 0xbe801f6c,
+ 0x866dff6d, 0x0000ffff,
+ 0xbefa0080, 0xb97a0283,
+ 0xb8fa2407, 0x8e7a9b7a,
+ 0x876d7a6d, 0xb8fa03c7,
+ 0x8e7a9a7a, 0x876d7a6d,
+ 0xb8faf807, 0x867aff7a,
+ 0x00007fff, 0xb97af807,
+ 0xbeee007e, 0xbeef007f,
+ 0xbefe0180, 0xbf900004,
+ 0x877a8478, 0xb97af802,
+ 0xbf8e0002, 0xbf88fffe,
+ 0xb8fa2a05, 0x807a817a,
+ 0x8e7a8a7a, 0x8e7a817a,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b867b, 0x807a7b7a,
+ 0x807a7e7a, 0x827b807f,
+ 0x867bff7b, 0x0000ffff,
+ 0xc04b1c3d, 0x00000050,
+ 0xbf8cc07f, 0xc04b1d3d,
+ 0x00000060, 0xbf8cc07f,
+ 0xc0431e7d, 0x00000074,
+ 0xbf8cc07f, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x867aff7f,
+ 0x08000000, 0x8f7a837a,
+ 0x87777a77, 0x867aff7f,
+ 0x70000000, 0x8f7a817a,
+ 0x87777a77, 0xbef1007c,
+ 0xbef00080, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0x8e708170, 0xb8fa1605,
+ 0x807a817a, 0x8e7a867a,
+ 0x80707a70, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611c7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611b3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611b7a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611bba,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611bfa, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611e3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xb8fbf803, 0xbefe007c,
+ 0xbefc0070, 0xc0611efa,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xbefe007c, 0xbefc0070,
+ 0xc0611a3a, 0x0000007c,
+ 0xbf8cc07f, 0x80708470,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc0070, 0xc0611a7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0xb8f1f801, 0xbefe007c,
+ 0xbefc0070, 0xc0611c7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x80708470, 0xbefc007e,
+ 0x867aff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f7a,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fb1605, 0x807b817b,
+ 0x8e7b847b, 0x8e76827b,
+ 0xbef600ff, 0x01000000,
+ 0xbef20174, 0x80747074,
+ 0x82758075, 0xbefc0080,
+ 0xbf800000, 0xbe802b00,
+ 0xbe822b02, 0xbe842b04,
+ 0xbe862b06, 0xbe882b08,
+ 0xbe8a2b0a, 0xbe8c2b0c,
+ 0xbe8e2b0e, 0xc06b003a,
+ 0x00000000, 0xbf8cc07f,
+ 0xc06b013a, 0x00000010,
+ 0xbf8cc07f, 0xc06b023a,
+ 0x00000020, 0xbf8cc07f,
+ 0xc06b033a, 0x00000030,
+ 0xbf8cc07f, 0x8074c074,
+ 0x82758075, 0x807c907c,
+ 0xbf0a7b7c, 0xbf85ffe7,
+ 0xbef40172, 0xbef00080,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xbee80080, 0xbee90080,
+ 0xbef600ff, 0x01000000,
+ 0x867aff78, 0x00400000,
+ 0xbf850003, 0xb8faf803,
+ 0x897a7aff, 0x10000000,
+ 0xbf85004d, 0xbe840080,
+ 0xd2890000, 0x00000900,
+ 0x80048104, 0xd2890001,
+ 0x00000900, 0x80048104,
+ 0xd2890002, 0x00000900,
+ 0x80048104, 0xd2890003,
+ 0x00000900, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000901, 0x80048104,
+ 0xd2890001, 0x00000901,
+ 0x80048104, 0xd2890002,
+ 0x00000901, 0x80048104,
+ 0xd2890003, 0x00000901,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000902,
+ 0x80048104, 0xd2890001,
+ 0x00000902, 0x80048104,
+ 0xd2890002, 0x00000902,
+ 0x80048104, 0xd2890003,
+ 0x00000902, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000903, 0x80048104,
+ 0xd2890001, 0x00000903,
+ 0x80048104, 0xd2890002,
+ 0x00000903, 0x80048104,
+ 0xd2890003, 0x00000903,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbf820008,
+ 0xe0724000, 0x701d0000,
+ 0xe0724100, 0x701d0100,
+ 0xe0724200, 0x701d0200,
+ 0xe0724300, 0x701d0300,
+ 0xbefe00c1, 0xbeff00c1,
+ 0xb8fb4306, 0x867bc17b,
+ 0xbf840064, 0xbf8a0000,
+ 0x867aff6f, 0x04000000,
+ 0xbf840060, 0x8e7b867b,
+ 0x8e7b827b, 0xbef6007b,
+ 0xb8f02a05, 0x80708170,
+ 0x8e708a70, 0x8e708170,
+ 0xb8fa1605, 0x807a817a,
+ 0x8e7a867a, 0x80707a70,
+ 0x8070ff70, 0x00000080,
+ 0xbef600ff, 0x01000000,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850030,
+ 0x24040682, 0xd86e4000,
+ 0x00000002, 0xbf8cc07f,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x680404ff, 0x00000200,
+ 0xd0c9006a, 0x0000f702,
+ 0xbf87ffd2, 0xbf820015,
+ 0xd1060002, 0x00011103,
+ 0x7e0602ff, 0x00000200,
+ 0xbefc00ff, 0x00010000,
+ 0xbe800077, 0x8677ff77,
+ 0xff7fffff, 0x8777ff77,
+ 0x00058000, 0xd8ec0000,
+ 0x00000002, 0xbf8cc07f,
+ 0xe0765000, 0x701d0002,
+ 0x68040702, 0xd0c9006a,
+ 0x0000f702, 0xbf87fff7,
+ 0xbef70000, 0xbef000ff,
+ 0x00000400, 0xbefe00c1,
+ 0xbeff00c1, 0xb8fb2a05,
+ 0x807b817b, 0x8e7b827b,
+ 0x8e76887b, 0xbef600ff,
+ 0x01000000, 0xbefc0084,
+ 0xbf0a7b7c, 0xbf84006d,
+ 0xbf11017c, 0x807bff7b,
+ 0x00001000, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850051,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffb1, 0xbf9c0000,
+ 0xbf820012, 0x7e000300,
+ 0x7e020301, 0x7e040302,
+ 0x7e060303, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffef,
+ 0xbf9c0000, 0xbefc0080,
+ 0xbf11017c, 0x867aff78,
+ 0x00400000, 0xbf850003,
+ 0xb8faf803, 0x897a7aff,
+ 0x10000000, 0xbf850059,
+ 0xd3d84000, 0x18000100,
+ 0xd3d84001, 0x18000101,
+ 0xd3d84002, 0x18000102,
+ 0xd3d84003, 0x18000103,
+ 0xbe840080, 0xd2890000,
+ 0x00000900, 0x80048104,
+ 0xd2890001, 0x00000900,
+ 0x80048104, 0xd2890002,
+ 0x00000900, 0x80048104,
+ 0xd2890003, 0x00000900,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000901,
+ 0x80048104, 0xd2890001,
+ 0x00000901, 0x80048104,
+ 0xd2890002, 0x00000901,
+ 0x80048104, 0xd2890003,
+ 0x00000901, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0xbe840080, 0xd2890000,
+ 0x00000902, 0x80048104,
+ 0xd2890001, 0x00000902,
+ 0x80048104, 0xd2890002,
+ 0x00000902, 0x80048104,
+ 0xd2890003, 0x00000902,
+ 0x80048104, 0xc069003a,
+ 0x00000070, 0xbf8cc07f,
+ 0x80709070, 0xbf06c004,
+ 0xbf84ffee, 0xbe840080,
+ 0xd2890000, 0x00000903,
+ 0x80048104, 0xd2890001,
+ 0x00000903, 0x80048104,
+ 0xd2890002, 0x00000903,
+ 0x80048104, 0xd2890003,
+ 0x00000903, 0x80048104,
+ 0xc069003a, 0x00000070,
+ 0xbf8cc07f, 0x80709070,
+ 0xbf06c004, 0xbf84ffee,
+ 0x807c847c, 0xbf0a7b7c,
+ 0xbf85ffa9, 0xbf9c0000,
+ 0xbf820016, 0xd3d84000,
+ 0x18000100, 0xd3d84001,
+ 0x18000101, 0xd3d84002,
+ 0x18000102, 0xd3d84003,
+ 0x18000103, 0xe0724000,
+ 0x701d0000, 0xe0724100,
+ 0x701d0100, 0xe0724200,
+ 0x701d0200, 0xe0724300,
+ 0x701d0300, 0x807c847c,
+ 0x8070ff70, 0x00000400,
+ 0xbf0a7b7c, 0xbf85ffeb,
+ 0xbf9c0000, 0xbf820106,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x866eff7f, 0x08000000,
+ 0x8f6e836e, 0x87776e77,
+ 0x866eff7f, 0x70000000,
+ 0x8f6e816e, 0x87776e77,
+ 0x866eff7f, 0x04000000,
+ 0xbf84001f, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef4306,
+ 0x866fc16f, 0xbf84001a,
+ 0x8e6f866f, 0x8e6f826f,
+ 0xbef6006f, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
+ 0x00000080, 0xbef600ff,
+ 0x01000000, 0xbefc0080,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
+ 0xbef80080, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef2a05,
+ 0x806f816f, 0x8e6f826f,
+ 0x8e76886f, 0xbef90076,
+ 0xbef600ff, 0x01000000,
+ 0xbeee0078, 0x8078ff78,
+ 0x00000400, 0xbef30079,
+ 0x8079ff79, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0x806fff6f, 0x00008000,
+ 0xe0524000, 0x791d0000,
+ 0xe0524100, 0x791d0100,
+ 0xe0524200, 0x791d0200,
+ 0xe0524300, 0x791d0300,
+ 0x8079ff79, 0x00000400,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0xe0524000,
+ 0x781d0000, 0xe0524100,
+ 0x781d0100, 0xe0524200,
+ 0x781d0200, 0xe0524300,
+ 0x781d0300, 0xbf8c0f70,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
+ 0x807c847c, 0x8078ff78,
+ 0x00000400, 0xbf0a6f7c,
+ 0xbf85ffdb, 0xbf9c0000,
+ 0xe0524000, 0x731d0000,
+ 0xe0524100, 0x731d0100,
+ 0xe0524200, 0x731d0200,
+ 0xe0524300, 0x731d0300,
+ 0xbf8c0f70, 0xd3d94000,
+ 0x18000100, 0xd3d94001,
+ 0x18000101, 0xd3d94002,
+ 0x18000102, 0xd3d94003,
+ 0x18000103, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x80f8c078,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f846f, 0x8e76826f,
+ 0xbef600ff, 0x01000000,
+ 0xbefc006f, 0xc031003a,
+ 0x00000078, 0x80f8c078,
+ 0xbf8cc07f, 0x80fc907c,
+ 0xbf800000, 0xbe802d00,
+ 0xbe822d02, 0xbe842d04,
+ 0xbe862d06, 0xbe882d08,
+ 0xbe8a2d0a, 0xbe8c2d0c,
+ 0xbe8e2d0e, 0xbf06807c,
+ 0xbf84fff0, 0xb8f82a05,
+ 0x80788178, 0x8e788a78,
+ 0x8e788178, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0xbef60084,
+ 0xbef600ff, 0x01000000,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
+ 0x00000078, 0x80788478,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211c3a,
+ 0x00000078, 0x80788478,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
+ 0x00000078, 0x80788478,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211a3a,
+ 0x00000078, 0x80788478,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
+ 0x00000078, 0x80788478,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe0070, 0xbeff0071,
+ 0x866f7bff, 0x000003ff,
+ 0xb96f4803, 0x866f7bff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2a05, 0x806e816e,
+ 0x8e6e8a6e, 0x8e6e816e,
+ 0xb8ef1605, 0x806f816f,
+ 0x8e6f866f, 0x806e6f6e,
+ 0x806e746e, 0x826f8075,
+ 0x866fff6f, 0x0000ffff,
+ 0xc00b1c37, 0x00000050,
+ 0xc00b1d37, 0x00000060,
+ 0xc0031e77, 0x00000074,
+ 0xbf8cc07f, 0x866fff6d,
+ 0xf8000000, 0x8f6f9b6f,
+ 0x8e6f906f, 0xbeee0080,
+ 0x876e6f6e, 0x866fff6d,
+ 0x04000000, 0x8f6f9a6f,
+ 0x8e6f8f6f, 0x876e6f6e,
+ 0x866fff7a, 0x00800000,
+ 0x8f6f976f, 0xb96ef807,
+ 0x866dff6d, 0x0000ffff,
+ 0x86fe7e7e, 0x86ea6a6a,
+ 0x8f6e837a, 0xb96ee0c2,
+ 0xbf800002, 0xb97a0002,
+ 0xbf8a0000, 0x95806f6c,
+ 0xbf810000, 0x00000000,
+};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
index f20e463e748b..4433bda2ce25 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm
@@ -20,1105 +20,948 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
+var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
+var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+var SQ_WAVE_STATUS_HALT_MASK = 0x2000
+
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
+var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
+var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6
+var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24
+var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 4
+var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24
+var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4
+var SQ_WAVE_IB_STS2_WAVE64_SHIFT = 11
+var SQ_WAVE_IB_STS2_WAVE64_SIZE = 1
+
+var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
+var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF
+var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
+var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
+var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT = 0x0
+var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
+var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
+var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
+
+var SQ_WAVE_IB_STS_RCNT_SHIFT = 16
+var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15
+var SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT = 25
+var SQ_WAVE_IB_STS_REPLAY_W64H_SIZE = 1
+var SQ_WAVE_IB_STS_REPLAY_W64H_MASK = 0x02000000
+var SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE = 1
+var SQ_WAVE_IB_STS_RCNT_SIZE = 6
+var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x003F8000
+var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF
+
+var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
+var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
+
+// bits [31:24] unused by SPI debug data
+var TTMP11_SAVE_REPLAY_W64H_SHIFT = 31
+var TTMP11_SAVE_REPLAY_W64H_MASK = 0x80000000
+var TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT = 24
+var TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK = 0x7F000000
+
+// SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14]
+// when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
+var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000
+var S_SAVE_BUF_RSRC_WORD3_MISC = 0x10807FAC
+
+var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000
+var S_SAVE_SPI_INIT_ATC_SHIFT = 27
+var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000
+var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28
+var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
+var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+
+var S_SAVE_PC_HI_RCNT_SHIFT = 26
+var S_SAVE_PC_HI_RCNT_MASK = 0xFC000000
+var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 25
+var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x02000000
+var S_SAVE_PC_HI_REPLAY_W64H_SHIFT = 24
+var S_SAVE_PC_HI_REPLAY_W64H_MASK = 0x01000000
+
+var s_sgpr_save_num = 108
+
+var s_save_spi_init_lo = exec_lo
+var s_save_spi_init_hi = exec_hi
+var s_save_pc_lo = ttmp0
+var s_save_pc_hi = ttmp1
+var s_save_exec_lo = ttmp2
+var s_save_exec_hi = ttmp3
+var s_save_status = ttmp12
+var s_save_trapsts = ttmp5
+var s_save_xnack_mask = ttmp6
+var s_wave_size = ttmp7
+var s_save_buf_rsrc0 = ttmp8
+var s_save_buf_rsrc1 = ttmp9
+var s_save_buf_rsrc2 = ttmp10
+var s_save_buf_rsrc3 = ttmp11
+var s_save_mem_offset = ttmp14
+var s_save_alloc_size = s_save_trapsts
+var s_save_tmp = s_save_buf_rsrc2
+var s_save_m0 = ttmp15
+
+var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
+var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
+
+var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000
+var S_RESTORE_SPI_INIT_ATC_SHIFT = 27
+var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000
+var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28
+var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000
+var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
+var S_WAVE_SIZE = 25
+
+var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT
+var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK
+var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK
+
+var s_restore_spi_init_lo = exec_lo
+var s_restore_spi_init_hi = exec_hi
+var s_restore_mem_offset = ttmp12
+var s_restore_alloc_size = ttmp3
+var s_restore_tmp = ttmp6
+var s_restore_mem_offset_save = s_restore_tmp
+var s_restore_m0 = s_restore_alloc_size
+var s_restore_mode = ttmp7
+var s_restore_flat_scratch = ttmp2
+var s_restore_pc_lo = ttmp0
+var s_restore_pc_hi = ttmp1
+var s_restore_exec_lo = ttmp14
+var s_restore_exec_hi = ttmp15
+var s_restore_status = ttmp4
+var s_restore_trapsts = ttmp5
+var s_restore_xnack_mask = ttmp13
+var s_restore_buf_rsrc0 = ttmp8
+var s_restore_buf_rsrc1 = ttmp9
+var s_restore_buf_rsrc2 = ttmp10
+var s_restore_buf_rsrc3 = ttmp11
+var s_restore_size = ttmp7
shader main
+ asic(DEFAULT)
+ type(CS)
+ wave_size(32)
-asic(DEFAULT)
-
-type(CS)
-
-wave_size(32)
-/*************************************************************************/
-/* control on how to run the shader */
-/*************************************************************************/
-//any hack that needs to be made to run this code in EMU (either becasue various EMU code are not ready or no compute save & restore in EMU run)
-var EMU_RUN_HACK = 0
-var EMU_RUN_HACK_RESTORE_NORMAL = 0
-var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
-var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0
-var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
-var SAVE_LDS = 0
-var WG_BASE_ADDR_LO = 0x9000a000
-var WG_BASE_ADDR_HI = 0x0
-var WAVE_SPACE = 0x9000 //memory size that each wave occupies in workgroup state mem, increase from 5000 to 9000 for more SGPR need to be saved
-var CTX_SAVE_CONTROL = 0x0
-var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either becasue various RTL code are not ready or no compute save & restore in RTL run)
-var SGPR_SAVE_USE_SQC = 0 //use SQC D$ to do the write
-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //need to change BUF_DATA_FORMAT in S_SAVE_BUF_RSRC_WORD3_MISC from 0 to BUF_DATA_FORMAT_32 if set to 1 (i.e. 0x00827FAC)
-var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
-var SAVE_RESTORE_HWID_DDID = 0
-var RESTORE_DDID_IN_SGPR18 = 0
-/**************************************************************************/
-/* variables */
-/**************************************************************************/
-var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
-var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
-var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
-
-var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
-var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
-var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT = 8
-var SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE = 6
-var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT = 24
-var SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE = 4 //FIXME sq.blk still has 4 bits at this time while SQ programming guide has 3 bits
-var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT = 24
-var SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE = 4
-var SQ_WAVE_IB_STS2_WAVE64_SHIFT = 11
-var SQ_WAVE_IB_STS2_WAVE64_SIZE = 1
-
-var SQ_WAVE_TRAPSTS_SAVECTX_MASK = 0x400
-var SQ_WAVE_TRAPSTS_EXCE_MASK = 0x1FF // Exception mask
-var SQ_WAVE_TRAPSTS_SAVECTX_SHIFT = 10
-var SQ_WAVE_TRAPSTS_MEM_VIOL_MASK = 0x100
-var SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT = 8
-var SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK = 0x3FF
-var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT = 0x0
-var SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE = 10
-var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
-var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
-var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
-
-var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME
-var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME
-var SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE = 1 //FIXME
-var SQ_WAVE_IB_STS_RCNT_SIZE = 6 //FIXME
-var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME
-
-var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
-var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
-
-
-/* Save */
-var S_SAVE_BUF_RSRC_WORD1_STRIDE = 0x00040000 //stride is 4 bytes
-var S_SAVE_BUF_RSRC_WORD3_MISC = 0x00807FAC //SQ_SEL_X/Y/Z/W, BUF_NUM_FORMAT_FLOAT, (0 for MUBUF stride[17:14] when ADD_TID_ENABLE and BUF_DATA_FORMAT_32 for MTBUF), ADD_TID_ENABLE
-
-var S_SAVE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit
-var S_SAVE_SPI_INIT_ATC_SHIFT = 27
-var S_SAVE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype
-var S_SAVE_SPI_INIT_MTYPE_SHIFT = 28
-var S_SAVE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
-var S_SAVE_SPI_INIT_FIRST_WAVE_SHIFT = 26
-
-var S_SAVE_PC_HI_RCNT_SHIFT = 28 //FIXME check with Brian to ensure all fields other than PC[47:0] can be used
-var S_SAVE_PC_HI_RCNT_MASK = 0xF0000000 //FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_SHIFT = 27 //FIXME
-var S_SAVE_PC_HI_FIRST_REPLAY_MASK = 0x08000000 //FIXME
-
-var s_save_spi_init_lo = exec_lo
-var s_save_spi_init_hi = exec_hi
-
-var s_save_pc_lo = ttmp0 //{TTMP1, TTMP0} = {3ˇŻh0,pc_rewind[3:0], HT[0],trapID[7:0], PC[47:0]}
-var s_save_pc_hi = ttmp1
-var s_save_exec_lo = ttmp2
-var s_save_exec_hi = ttmp3
-var s_save_status = ttmp4
-var s_save_trapsts = ttmp5 //not really used until the end of the SAVE routine
-var s_wave_size = ttmp6 //ttmp6 is not needed now, since it's only 32bit xnack mask, now use it to determine wave32 or wave64 in EMU_HACK
-var s_save_xnack_mask = ttmp7
-var s_save_buf_rsrc0 = ttmp8
-var s_save_buf_rsrc1 = ttmp9
-var s_save_buf_rsrc2 = ttmp10
-var s_save_buf_rsrc3 = ttmp11
-
-var s_save_mem_offset = ttmp14
-var s_sgpr_save_num = 106 //in gfx10, all sgpr must be saved
-var s_save_alloc_size = s_save_trapsts //conflict
-var s_save_tmp = s_save_buf_rsrc2 //shared with s_save_buf_rsrc2 (conflict: should not use mem access with s_save_tmp at the same time)
-var s_save_m0 = ttmp15
-
-/* Restore */
-var S_RESTORE_BUF_RSRC_WORD1_STRIDE = S_SAVE_BUF_RSRC_WORD1_STRIDE
-var S_RESTORE_BUF_RSRC_WORD3_MISC = S_SAVE_BUF_RSRC_WORD3_MISC
-
-var S_RESTORE_SPI_INIT_ATC_MASK = 0x08000000 //bit[27]: ATC bit
-var S_RESTORE_SPI_INIT_ATC_SHIFT = 27
-var S_RESTORE_SPI_INIT_MTYPE_MASK = 0x70000000 //bit[30:28]: Mtype
-var S_RESTORE_SPI_INIT_MTYPE_SHIFT = 28
-var S_RESTORE_SPI_INIT_FIRST_WAVE_MASK = 0x04000000 //bit[26]: FirstWaveInTG
-var S_RESTORE_SPI_INIT_FIRST_WAVE_SHIFT = 26
-
-var S_RESTORE_PC_HI_RCNT_SHIFT = S_SAVE_PC_HI_RCNT_SHIFT
-var S_RESTORE_PC_HI_RCNT_MASK = S_SAVE_PC_HI_RCNT_MASK
-var S_RESTORE_PC_HI_FIRST_REPLAY_SHIFT = S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
-var S_RESTORE_PC_HI_FIRST_REPLAY_MASK = S_SAVE_PC_HI_FIRST_REPLAY_MASK
-
-var s_restore_spi_init_lo = exec_lo
-var s_restore_spi_init_hi = exec_hi
-
-var s_restore_mem_offset = ttmp12
-var s_restore_alloc_size = ttmp3
-var s_restore_tmp = ttmp6
-var s_restore_mem_offset_save = s_restore_tmp //no conflict
-
-var s_restore_m0 = s_restore_alloc_size //no conflict
-
-var s_restore_mode = ttmp13
-var s_restore_hwid1 = ttmp2
-var s_restore_ddid = s_restore_hwid1
-var s_restore_pc_lo = ttmp0
-var s_restore_pc_hi = ttmp1
-var s_restore_exec_lo = ttmp14
-var s_restore_exec_hi = ttmp15
-var s_restore_status = ttmp4
-var s_restore_trapsts = ttmp5
-//var s_restore_xnack_mask_lo = xnack_mask_lo
-//var s_restore_xnack_mask_hi = xnack_mask_hi
-var s_restore_xnack_mask = ttmp7
-var s_restore_buf_rsrc0 = ttmp8
-var s_restore_buf_rsrc1 = ttmp9
-var s_restore_buf_rsrc2 = ttmp10
-var s_restore_buf_rsrc3 = ttmp11
-var s_restore_size = ttmp13 //ttmp13 has no conflict
-
-/**************************************************************************/
-/* trap handler entry points */
-/**************************************************************************/
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore
- //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
- s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC
- s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
- s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE
- //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE
- s_branch L_SKIP_RESTORE //NOT restore, SAVE actually
- else
- s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
- end
+ s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
L_JUMP_TO_RESTORE:
- s_branch L_RESTORE //restore
+ s_branch L_RESTORE
L_SKIP_RESTORE:
-
- s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
- s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
- s_cbranch_scc1 L_SAVE //this is the operation for save
-
- // ********* Handle non-CWSR traps *******************
- if (!EMU_RUN_HACK)
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK // Check whether it is an exception
- s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly.
- s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0
-
- L_EXCP_CASE:
- s_and_b32 ttmp1, ttmp1, 0xFFFF
- s_rfe_b64 [ttmp0, ttmp1]
- end
- // ********* End handling of non-CWSR traps *******************
-
-/**************************************************************************/
-/* save routine */
-/**************************************************************************/
-
-L_SAVE:
-
+ s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
+ s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
+ s_cbranch_scc1 L_SAVE
+
+ // If STATUS.MEM_VIOL is asserted then halt the wave to prevent
+ // the exception raising again and blocking context save.
+ s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
+ s_cbranch_scc0 L_FETCH_2ND_TRAP
+ s_or_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+
+L_FETCH_2ND_TRAP:
+ // Preserve and clear scalar XNACK state before issuing scalar loads.
+ // Save IB_STS.REPLAY_W64H[25], RCNT[21:16], FIRST_REPLAY[15] into
+ // unused space ttmp11[31:24].
+ s_andn2_b32 ttmp11, ttmp11, (TTMP11_SAVE_REPLAY_W64H_MASK | TTMP11_SAVE_RCNT_FIRST_REPLAY_MASK)
+ s_getreg_b32 ttmp2, hwreg(HW_REG_IB_STS)
+ s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
+ s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
+ s_or_b32 ttmp11, ttmp11, ttmp3
+ s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+ s_lshl_b32 ttmp3, ttmp3, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_or_b32 ttmp11, ttmp11, ttmp3
+ s_andn2_b32 ttmp2, ttmp2, (SQ_WAVE_IB_STS_REPLAY_W64H_MASK | SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK)
+ s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+
+ // Read second-level TBA/TMA from first-level TMA and jump if available.
+ // ttmp[2:5] and ttmp12 can be used (others hold SPI-initialized debug data)
+ // ttmp12 holds SQ_WAVE_STATUS
+ s_getreg_b32 ttmp14, hwreg(HW_REG_SHADER_TMA_LO)
+ s_getreg_b32 ttmp15, hwreg(HW_REG_SHADER_TMA_HI)
+ s_lshl_b64 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8
+ s_load_dwordx2 [ttmp2, ttmp3], [ttmp14, ttmp15], 0x0 glc:1 // second-level TBA
+ s_waitcnt lgkmcnt(0)
+ s_load_dwordx2 [ttmp14, ttmp15], [ttmp14, ttmp15], 0x8 glc:1 // second-level TMA
+ s_waitcnt lgkmcnt(0)
+ s_and_b64 [ttmp2, ttmp3], [ttmp2, ttmp3], [ttmp2, ttmp3]
+ s_cbranch_scc0 L_NO_NEXT_TRAP // second-level trap handler not been set
+ s_setpc_b64 [ttmp2, ttmp3] // jump to second-level trap handler
+
+L_NO_NEXT_TRAP:
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_EXCE_MASK
+ s_cbranch_scc1 L_EXCP_CASE // Exception, jump back to the shader program directly.
+ s_add_u32 ttmp0, ttmp0, 4 // S_TRAP case, add 4 to ttmp0
+ s_addc_u32 ttmp1, ttmp1, 0
+L_EXCP_CASE:
+ s_and_b32 ttmp1, ttmp1, 0xFFFF
+
+ // Restore SQ_WAVE_IB_STS.
+ s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_RCNT_FIRST_REPLAY_SHIFT - SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT)
+ s_and_b32 ttmp3, ttmp2, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK
+ s_lshr_b32 ttmp2, ttmp11, (TTMP11_SAVE_REPLAY_W64H_SHIFT - SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_IB_STS_REPLAY_W64H_MASK
+ s_or_b32 ttmp2, ttmp2, ttmp3
+ s_setreg_b32 hwreg(HW_REG_IB_STS), ttmp2
+
+ // Restore SQ_WAVE_STATUS.
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status
+
+ s_rfe_b64 [ttmp0, ttmp1]
+
+L_SAVE:
//check whether there is mem_viol
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ s_and_b32 s_save_trapsts, s_save_trapsts, SQ_WAVE_TRAPSTS_MEM_VIOL_MASK
s_cbranch_scc0 L_NO_PC_REWIND
-
+
//if so, need rewind PC assuming GDS operation gets NACKed
- s_mov_b32 s_save_tmp, 0 //clear mem_viol bit
- s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT, 1), s_save_tmp //clear mem_viol bit
- s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
- s_sub_u32 s_save_pc_lo, s_save_pc_lo, 8 //pc[31:0]-8
- s_subb_u32 s_save_pc_hi, s_save_pc_hi, 0x0 // -scc
+ s_mov_b32 s_save_tmp, 0
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_MEM_VIOL_SHIFT, 1), s_save_tmp //clear mem_viol bit
+ s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
+ s_sub_u32 s_save_pc_lo, s_save_pc_lo, 8 //pc[31:0]-8
+ s_subb_u32 s_save_pc_hi, s_save_pc_hi, 0x0
L_NO_PC_REWIND:
- s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
- s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
-
- //s_mov_b32 s_save_xnack_mask_lo, xnack_mask_lo //save XNACK_MASK
- //s_mov_b32 s_save_xnack_mask_hi, xnack_mask_hi
- s_getreg_b32 s_save_xnack_mask, hwreg(HW_REG_SHADER_XNACK_MASK)
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE) //save RCNT
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE) //save FIRST_REPLAY
- s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
- s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
- s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY in IB_STS
- s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
+ s_mov_b32 s_save_tmp, 0
+ s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
+
+ s_getreg_b32 s_save_xnack_mask, hwreg(HW_REG_SHADER_XNACK_MASK)
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_RCNT_SHIFT, SQ_WAVE_IB_STS_RCNT_SIZE)
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_RCNT_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT, SQ_WAVE_IB_STS_FIRST_REPLAY_SIZE)
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS, SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT, SQ_WAVE_IB_STS_REPLAY_W64H_SIZE)
+ s_lshl_b32 s_save_tmp, s_save_tmp, S_SAVE_PC_HI_REPLAY_W64H_SHIFT
+ s_or_b32 s_save_pc_hi, s_save_pc_hi, s_save_tmp
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_IB_STS) //clear RCNT and FIRST_REPLAY and REPLAY_W64H in IB_STS
+ s_and_b32 s_save_tmp, s_save_tmp, SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG
s_setreg_b32 hwreg(HW_REG_IB_STS), s_save_tmp
-
- /* inform SPI the readiness and wait for SPI's go signal */
- s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
- s_mov_b32 s_save_exec_hi, exec_hi
- s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
- if (EMU_RUN_HACK)
-
- else
- s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
- end
-
- L_SLEEP:
- s_sleep 0x2
-
- if (EMU_RUN_HACK)
-
- else
- s_cbranch_execz L_SLEEP
- end
-
-
- /* setup Resource Contants */
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
- //calculate wd_addr using absolute thread id
- v_readlane_b32 s_save_tmp, v9, 0
- //determine it is wave32 or wave64
- s_getreg_b32 s_wave_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
- s_cmp_eq_u32 s_wave_size, 0
- s_cbranch_scc1 L_SAVE_WAVE32
- s_lshr_b32 s_save_tmp, s_save_tmp, 6 //SAVE WAVE64
- s_branch L_SAVE_CON
- L_SAVE_WAVE32:
- s_lshr_b32 s_save_tmp, s_save_tmp, 5 //SAVE WAVE32
- L_SAVE_CON:
- s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
- else
- end
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
- else
- end
-
-
- s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
- s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
- s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
- s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
- s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
- s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position
- s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC
- s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
- s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position
- s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE
-
- s_mov_b32 s_save_m0, m0 //save M0
-
- /* global mem offset */
- s_mov_b32 s_save_mem_offset, 0x0 //mem offset initial value = 0
- s_getreg_b32 s_wave_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE) //get wave_save_size
- s_or_b32 s_wave_size, s_save_spi_init_hi, s_wave_size //share s_wave_size with exec_hi
-
- /* save VGPRs */
- //////////////////////////////
- L_SAVE_VGPR:
-
- s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_ENABLE_SAVE_VGPR_EXEC_HI
- s_mov_b32 exec_hi, 0x00000000
- s_branch L_SAVE_VGPR_NORMAL
- L_ENABLE_SAVE_VGPR_EXEC_HI:
- s_mov_b32 exec_hi, 0xFFFFFFFF
- L_SAVE_VGPR_NORMAL:
- s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size
- //for wave32 and wave64, the num of vgpr function is the same?
- s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
- s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible
- //determine it is wave32 or wave64
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_SAVE_VGPR_WAVE64
-
- //zhenxu added it for save vgpr for wave32
- s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 7 //NUM_RECORDS in bytes (32 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_mov_b32 m0, 0x0 //VGPR initial index value =0
- //s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
- //s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later, doesn't need this in gfx10
-
- L_SAVE_VGPR_WAVE32_LOOP:
- v_movrels_b32 v0, v0 //v0 = v[0+m0]
-
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
- buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- end
-
- s_add_u32 m0, m0, 1 //next vgpr index
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 128 //every buffer_store_dword does 128 bytes
- s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_VGPR_WAVE32_LOOP //VGPR save is complete?
- s_branch L_SAVE_LDS
- //save vgpr for wave32 ends
-
- L_SAVE_VGPR_WAVE64:
- s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_mov_b32 m0, 0x0 //VGPR initial index value =0
- //s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
- //s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later, doesn't need this in gfx10
-
- L_SAVE_VGPR_WAVE64_LOOP:
- v_movrels_b32 v0, v0 //v0 = v[0+m0]
-
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
- buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- end
-
- s_add_u32 m0, m0, 1 //next vgpr index
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 256 //every buffer_store_dword does 256 bytes
- s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_VGPR_WAVE64_LOOP //VGPR save is complete?
- //s_set_gpr_idx_off
- //
- //Below part will be the save shared vgpr part (new for gfx10)
- s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size
- s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
- s_cbranch_scc0 L_SAVE_LDS //no shared_vgpr used? jump to L_SAVE_LDS
- s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
- //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
- //save shared_vgpr will start from the index of m0
- s_add_u32 s_save_alloc_size, s_save_alloc_size, m0
- s_mov_b32 exec_lo, 0xFFFFFFFF
- s_mov_b32 exec_hi, 0x00000000
- L_SAVE_SHARED_VGPR_WAVE64_LOOP:
- v_movrels_b32 v0, v0 //v0 = v[0+m0]
- buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- s_add_u32 m0, m0, 1 //next vgpr index
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 128 //every buffer_store_dword does 256 bytes
- s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP //SHARED_VGPR save is complete?
-
- /* save LDS */
- //////////////////////////////
- L_SAVE_LDS:
-
- //Only check the first wave need LDS
- /* the first wave in the threadgroup */
- s_barrier //FIXME not performance-optimal "LDS is used? wait for other waves in the same TG"
- s_and_b32 s_save_tmp, s_wave_size, S_SAVE_SPI_INIT_FIRST_WAVE_MASK //exec is still used here
- s_cbranch_scc0 L_SAVE_SGPR
-
- s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_ENABLE_SAVE_LDS_EXEC_HI
- s_mov_b32 exec_hi, 0x00000000
- s_branch L_SAVE_LDS_NORMAL
- L_ENABLE_SAVE_LDS_EXEC_HI:
- s_mov_b32 exec_hi, 0xFFFFFFFF
- L_SAVE_LDS_NORMAL:
- s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size
- s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
- s_cbranch_scc0 L_SAVE_SGPR //no lds used? jump to L_SAVE_VGPR
- s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 6 //LDS size in dwords = lds_size * 64dw
- s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //LDS size in bytes
- s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- //load 0~63*4(byte address) to vgpr v15
- v_mbcnt_lo_u32_b32 v0, -1, 0
- v_mbcnt_hi_u32_b32 v0, -1, v0
- v_mul_u32_u24 v0, 4, v0
-
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_mov_b32 m0, 0x0
- s_cbranch_scc1 L_SAVE_LDS_LOOP_W64
-
- L_SAVE_LDS_LOOP_W32:
- if (SAVE_LDS)
- ds_read_b32 v1, v0
- s_waitcnt 0 //ensure data ready
- buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- //buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 //save lds to memory doesn't exist in 10
- end
- s_add_u32 m0, m0, 128 //every buffer_store_lds does 128 bytes
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 128 //mem offset increased by 128 bytes
- v_add_nc_u32 v0, v0, 128
- s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_LDS_LOOP_W32 //LDS save is complete?
- s_branch L_SAVE_SGPR
-
- L_SAVE_LDS_LOOP_W64:
- if (SAVE_LDS)
- ds_read_b32 v1, v0
- s_waitcnt 0 //ensure data ready
- buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- //buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 //save lds to memory doesn't exist in 10
- end
- s_add_u32 m0, m0, 256 //every buffer_store_lds does 256 bytes
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 256 //mem offset increased by 256 bytes
- v_add_nc_u32 v0, v0, 256
- s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_LDS_LOOP_W64 //LDS save is complete?
-
-
- /* save SGPRs */
- //////////////////////////////
- //s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size
- //s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
- //s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
- //s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //In gfx10, Number of SGPRs = (sgpr_size + 1) * 8 (non-zero value)
- L_SAVE_SGPR:
- //need to look at it is wave32 or wave64
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_SAVE_SGPR_VMEM_WAVE64
- if (SGPR_SAVE_USE_SQC)
- s_lshl_b32 s_save_buf_rsrc2, s_sgpr_save_num, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_save_buf_rsrc2, s_sgpr_save_num, 7 //NUM_RECORDS in bytes (32 threads)
- end
- s_branch L_SAVE_SGPR_CONT
- L_SAVE_SGPR_VMEM_WAVE64:
- if (SGPR_SAVE_USE_SQC)
- s_lshl_b32 s_save_buf_rsrc2, s_sgpr_save_num, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_save_buf_rsrc2, s_sgpr_save_num, 8 //NUM_RECORDS in bytes (64 threads)
- end
- L_SAVE_SGPR_CONT:
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- //s_mov_b32 m0, 0x0 //SGPR initial index value =0
- //s_nop 0x0 //Manually inserted wait states
-
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
-
- s_mov_b32 m0, 0x0 //SGPR initial index value =0
- s_nop 0x0 //Manually inserted wait states
-
- s_cbranch_scc1 L_SAVE_SGPR_LOOP_WAVE64
-
- L_SAVE_SGPR_LOOP_WAVE32:
- s_movrels_b32 s0, s0 //s0 = s[0+m0]
- //zhenxu, adding one more argument to save sgpr function, this is only for vmem, using sqc is not change
- write_sgpr_to_mem_wave32(s0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PV: the best performance should be using s_buffer_store_dwordx4
- s_add_u32 m0, m0, 1 //next sgpr index
- s_cmp_lt_u32 m0, s_sgpr_save_num //scc = (m0 < s_sgpr_save_num) ? 1 : 0
- s_cbranch_scc1 L_SAVE_SGPR_LOOP_WAVE32 //SGPR save is complete?
- s_branch L_SAVE_HWREG
-
- L_SAVE_SGPR_LOOP_WAVE64:
- s_movrels_b32 s0, s0 //s0 = s[0+m0]
- //zhenxu, adding one more argument to save sgpr function, this is only for vmem, using sqc is not change
- write_sgpr_to_mem_wave64(s0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PV: the best performance should be using s_buffer_store_dwordx4
- s_add_u32 m0, m0, 1 //next sgpr index
- s_cmp_lt_u32 m0, s_sgpr_save_num //scc = (m0 < s_sgpr_save_num) ? 1 : 0
- s_cbranch_scc1 L_SAVE_SGPR_LOOP_WAVE64 //SGPR save is complete?
-
-
- /* save HW registers */
- //////////////////////////////
- L_SAVE_HWREG:
- s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_SAVE_HWREG_WAVE64
-
- write_sgpr_to_mem_wave32(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //M0
-
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- end
-
- write_sgpr_to_mem_wave32(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PC
- write_sgpr_to_mem_wave32(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
- write_sgpr_to_mem_wave32(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //EXEC
- write_sgpr_to_mem_wave32(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
- write_sgpr_to_mem_wave32(s_save_status, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //STATUS
-
- //s_save_trapsts conflicts with s_save_alloc_size
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- write_sgpr_to_mem_wave32(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //TRAPSTS
-
- //write_sgpr_to_mem_wave32(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_LO
- write_sgpr_to_mem_wave32(s_save_xnack_mask, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_HI
-
- //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2
- s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
- write_sgpr_to_mem_wave32(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
- if(SAVE_RESTORE_HWID_DDID)
- s_getreg_b32 s_save_m0, hwreg(HW_REG_HW_ID1) //HW_ID1, handler records the SE/SA/WGP/SIMD/wave of the original wave
- write_sgpr_to_mem_wave32(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
- end
- s_branch L_S_PGM_END_SAVED
-
- L_SAVE_HWREG_WAVE64:
- write_sgpr_to_mem_wave64(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //M0
-
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- end
-
- write_sgpr_to_mem_wave64(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //PC
- write_sgpr_to_mem_wave64(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
- write_sgpr_to_mem_wave64(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //EXEC
- write_sgpr_to_mem_wave64(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
- write_sgpr_to_mem_wave64(s_save_status, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //STATUS
-
- //s_save_trapsts conflicts with s_save_alloc_size
- s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
- write_sgpr_to_mem_wave64(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //TRAPSTS
-
- //write_sgpr_to_mem_wave64(s_save_xnack_mask_lo, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_LO
- write_sgpr_to_mem_wave64(s_save_xnack_mask, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF) //XNACK_MASK_HI
-
- //use s_save_tmp would introduce conflict here between s_save_tmp and s_save_buf_rsrc2
- s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE) //MODE
- write_sgpr_to_mem_wave64(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
-
-
- if(SAVE_RESTORE_HWID_DDID)
- s_getreg_b32 s_save_m0, hwreg(HW_REG_HW_ID1) //HW_ID1, handler records the SE/SA/WGP/SIMD/wave of the original wave
- write_sgpr_to_mem_wave64(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
-
- /* save DDID */
- //////////////////////////////
- L_SAVE_DDID:
- //EXEC has been saved, no vector inst following
- s_mov_b32 exec_lo, 0x80000000 //Set MSB to 1. Cleared when draw index is returned
- s_sendmsg sendmsg(MSG_GET_DDID)
-
- L_WAIT_DDID_LOOP:
- s_nop 7 // sleep a bit
- s_bitcmp0_b32 exec_lo, 31 // test to see if MSB is cleared, meaning done
- s_cbranch_scc0 L_WAIT_DDID_LOOP
-
- s_mov_b32 s_save_m0, exec_lo
-
-
- s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_SAVE_DDID_WAVE64
-
- write_sgpr_to_mem_wave32(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
-
- L_SAVE_DDID_WAVE64:
- write_sgpr_to_mem_wave64(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset, SGPR_SAVE_USE_SQC, USE_MTBUF_INSTEAD_OF_MUBUF)
-
- end
-
- L_S_PGM_END_SAVED:
- /* S_PGM_END_SAVED */ //FIXME graphics ONLY
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
- s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- s_rfe_b64 s_save_pc_lo //Return to the main shader program
- else
- end
-
-
- s_branch L_END_PGM
-
-
-
-/**************************************************************************/
-/* restore routine */
-/**************************************************************************/
+
+ /* inform SPI the readiness and wait for SPI's go signal */
+ s_mov_b32 s_save_exec_lo, exec_lo //save EXEC and use EXEC for the go signal from SPI
+ s_mov_b32 s_save_exec_hi, exec_hi
+ s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
+
+ s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
+
+L_SLEEP:
+ // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause
+ // SQ hang, since the 7,8th wave could not get arbit to exec inst, while
+ // other waves are stuck into the sleep-loop and waiting for wrexec!=0
+ s_sleep 0x2
+ s_cbranch_execz L_SLEEP
+
+ /* setup Resource Contants */
+ s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
+ s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_save_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes) although not neccessarily inited
+ s_mov_b32 s_save_buf_rsrc3, S_SAVE_BUF_RSRC_WORD3_MISC
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_ATC_MASK
+ s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)
+ s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or ATC
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_MTYPE_MASK
+ s_lshr_b32 s_save_tmp, s_save_tmp, (S_SAVE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)
+ s_or_b32 s_save_buf_rsrc3, s_save_buf_rsrc3, s_save_tmp //or MTYPE
+
+ s_mov_b32 s_save_m0, m0
+
+ /* global mem offset */
+ s_mov_b32 s_save_mem_offset, 0x0
+ s_getreg_b32 s_wave_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
+ s_lshl_b32 s_wave_size, s_wave_size, S_WAVE_SIZE
+ s_or_b32 s_wave_size, s_save_spi_init_hi, s_wave_size //share s_wave_size with exec_hi, it's at bit25
+
+ /* save HW registers */
+
+L_SAVE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_exec_hi, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_status, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
+ write_hwreg_to_mem(s_save_trapsts, s_save_buf_rsrc0, s_save_mem_offset)
+ write_hwreg_to_mem(s_save_xnack_mask, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_MODE)
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_LO)
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_getreg_b32 s_save_m0, hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI)
+ write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset)
+
+ /* the first wave in the threadgroup */
+ s_and_b32 s_save_tmp, s_save_spi_init_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+ s_mov_b32 s_save_exec_hi, 0x0
+ s_or_b32 s_save_exec_hi, s_save_tmp, s_save_exec_hi // save first wave bit to s_save_exec_hi.bits[26]
+
+ /* save SGPRs */
+ // Save SGPR before LDS save, then the s0 to s4 can be used during LDS save...
+
+ // SGPR SR memory offset : size(VGPR)+size(SVGPR)
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
+ s_mov_b32 s_save_xnack_mask, s_save_buf_rsrc0
+ s_add_u32 s_save_buf_rsrc0, s_save_buf_rsrc0, s_save_mem_offset
+ s_addc_u32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0
+
+ s_mov_b32 m0, 0x0 //SGPR initial index value =0
+ s_nop 0x0 //Manually inserted wait states
+L_SAVE_SGPR_LOOP:
+ // SGPR is allocated in 16 SGPR granularity
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ s_movrels_b64 s12, s12 //s12 = s[12+m0], s13 = s[13+m0]
+ s_movrels_b64 s14, s14 //s14 = s[14+m0], s15 = s[15+m0]
+
+ write_16sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset)
+ s_add_u32 m0, m0, 16 //next sgpr index
+ s_cmp_lt_u32 m0, 96 //scc = (m0 < first 96 SGPR) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SGPR_LOOP //first 96 SGPR save is complete?
+
+ //save the rest 12 SGPR
+ s_movrels_b64 s0, s0 //s0 = s[0+m0], s1 = s[1+m0]
+ s_movrels_b64 s2, s2 //s2 = s[2+m0], s3 = s[3+m0]
+ s_movrels_b64 s4, s4 //s4 = s[4+m0], s5 = s[5+m0]
+ s_movrels_b64 s6, s6 //s6 = s[6+m0], s7 = s[7+m0]
+ s_movrels_b64 s8, s8 //s8 = s[8+m0], s9 = s[9+m0]
+ s_movrels_b64 s10, s10 //s10 = s[10+m0], s11 = s[11+m0]
+ write_12sgpr_to_mem(s0, s_save_buf_rsrc0, s_save_mem_offset)
+
+ // restore s_save_buf_rsrc0,1
+ s_mov_b32 s_save_buf_rsrc0, s_save_xnack_mask
+
+ /* save first 4 VGPR, then LDS save could use */
+ // each wave will alloc 4 vgprs at least...
+
+ s_mov_b32 s_save_mem_offset, 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_4VGPR_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_4VGPR_WAVE32
+L_ENABLE_SAVE_4VGPR_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+ s_branch L_SAVE_4VGPR_WAVE64
+L_SAVE_4VGPR_WAVE32:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*3
+ s_branch L_SAVE_LDS
+
+L_SAVE_4VGPR_WAVE64:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+
+ /* save LDS */
+
+L_SAVE_LDS:
+ // Change EXEC to all threads...
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_LDS_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_LDS_NORMAL
+L_ENABLE_SAVE_LDS_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_SAVE_LDS_NORMAL:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_SAVE_LDS_DONE //no lds used? jump to L_SAVE_DONE
+
+ s_barrier //LDS is used? wait for other waves in the same TG
+ s_and_b32 s_save_tmp, s_save_exec_hi, S_SAVE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_SAVE_LDS_DONE
+
+ // first wave do LDS save;
+
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 6 //LDS size in dwords = lds_size * 64dw
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //LDS size in bytes
+ s_mov_b32 s_save_buf_rsrc2, s_save_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_save_mem_offset, s_wave_size)
+ get_svgpr_size_bytes(s_save_tmp)
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s_save_tmp
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_sgpr_size_bytes()
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ //load 0~63*4(byte address) to vgpr v0
+ v_mbcnt_lo_u32_b32 v0, -1, 0
+ v_mbcnt_hi_u32_b32 v0, -1, v0
+ v_mul_u32_u24 v0, 4, v0
+
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_mov_b32 m0, 0x0
+ s_cbranch_scc1 L_SAVE_LDS_W64
+
+L_SAVE_LDS_W32:
+ s_mov_b32 s3, 128
+ s_nop 0
+ s_nop 0
+ s_nop 0
+L_SAVE_LDS_LOOP_W32:
+ ds_read_b32 v1, v0
+ s_waitcnt 0
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
+ v_add_nc_u32 v0, v0, 128 //mem offset increased by 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_W32 //LDS save is complete?
+
+ s_branch L_SAVE_LDS_DONE
+
+L_SAVE_LDS_W64:
+ s_mov_b32 s3, 256
+ s_nop 0
+ s_nop 0
+ s_nop 0
+L_SAVE_LDS_LOOP_W64:
+ ds_read_b32 v1, v0
+ s_waitcnt 0
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+
+ s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, s3
+ v_add_nc_u32 v0, v0, 256 //mem offset increased by 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_LDS_LOOP_W64 //LDS save is complete?
+
+L_SAVE_LDS_DONE:
+ /* save VGPRs - set the Rest VGPRs */
+L_SAVE_VGPR:
+ // VGPR SR memory offset: 0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SAVE_VGPR_EXEC_HI
+ s_mov_b32 s_save_mem_offset, (0+128*4) // for the rest VGPRs
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_SAVE_VGPR_NORMAL
+L_ENABLE_SAVE_VGPR_EXEC_HI:
+ s_mov_b32 s_save_mem_offset, (0+256*4) // for the rest VGPRs
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_SAVE_VGPR_NORMAL:
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ //determine it is wave32 or wave64
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_SAVE_VGPR_WAVE64
+
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR Allocated in 4-GPR granularity
+
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_VGPR_END
+
+L_SAVE_VGPR_W32_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:128*3
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 128*4 //every buffer_store_dword does 128 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_W32_LOOP //VGPR save is complete?
+
+ s_branch L_SAVE_VGPR_END
+
+L_SAVE_VGPR_WAVE64:
+ s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR store using dw burst
+ s_mov_b32 m0, 0x4 //VGPR initial index value =4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc0 L_SAVE_VGPR_END
+
+L_SAVE_VGPR_W64_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ v_movrels_b32 v1, v1 //v1 = v[1+m0]
+ v_movrels_b32 v2, v2 //v2 = v[2+m0]
+ v_movrels_b32 v3, v3 //v3 = v[3+m0]
+
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_VGPR_W64_LOOP //VGPR save is complete?
+
+ //Below part will be the save shared vgpr part (new for gfx10)
+ s_getreg_b32 s_save_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
+ s_and_b32 s_save_alloc_size, s_save_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
+ s_cbranch_scc0 L_SAVE_VGPR_END //no shared_vgpr used? jump to L_SAVE_LDS
+ s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
+ //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
+ //save shared_vgpr will start from the index of m0
+ s_add_u32 s_save_alloc_size, s_save_alloc_size, m0
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0x00000000
+L_SAVE_SHARED_VGPR_WAVE64_LOOP:
+ v_movrels_b32 v0, v0 //v0 = v[0+m0]
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ s_add_u32 m0, m0, 1 //next vgpr index
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 128
+ s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_SAVE_SHARED_VGPR_WAVE64_LOOP //SHARED_VGPR save is complete?
+
+L_SAVE_VGPR_END:
+ s_branch L_END_PGM
L_RESTORE:
- /* Setup Resource Contants */
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
- //calculate wd_addr using absolute thread id
- v_readlane_b32 s_restore_tmp, v9, 0
- //determine it is wave32 or wave64
- s_getreg_b32 s_restore_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE) //change to ttmp13
- s_cmp_eq_u32 s_restore_size, 0
- s_cbranch_scc1 L_RESTORE_WAVE32
- s_lshr_b32 s_restore_tmp, s_restore_tmp, 6 //SAVE WAVE64
- s_branch L_RESTORE_CON
- L_RESTORE_WAVE32:
- s_lshr_b32 s_restore_tmp, s_restore_tmp, 5 //SAVE WAVE32
- L_RESTORE_CON:
- s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
- s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
- else
- end
-
- s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
- s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
- s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
- s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
- s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT) //get ATC bit into position
- s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC
- s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
- s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT) //get MTYPE bits into position
- s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE
- //determine it is wave32 or wave64
- s_getreg_b32 s_restore_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
- s_or_b32 s_restore_size, s_restore_spi_init_hi, s_restore_size //share s_wave_size with exec_hi
-
- /* global mem offset */
- s_mov_b32 s_restore_mem_offset, 0x0 //mem offset initial value = 0
-
- /* restore VGPRs */
- //////////////////////////////
- L_RESTORE_VGPR:
-
- s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_ENABLE_RESTORE_VGPR_EXEC_HI
- s_mov_b32 exec_hi, 0x00000000
- s_branch L_RESTORE_VGPR_NORMAL
- L_ENABLE_RESTORE_VGPR_EXEC_HI:
- s_mov_b32 exec_hi, 0xFFFFFFFF
- L_RESTORE_VGPR_NORMAL:
- s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size
- s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
- s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
- //determine it is wave32 or wave64
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_RESTORE_VGPR_WAVE64
-
- s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 7 //NUM_RECORDS in bytes (32 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128
- s_mov_b32 m0, 1 //VGPR initial index value = 1
- //s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8
- //s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later, might not need this in gfx10
-
- L_RESTORE_VGPR_WAVE32_LOOP:
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
- end
- s_waitcnt vmcnt(0) //ensure data ready
- v_movreld_b32 v0, v0 //v[0+m0] = v0
- s_add_u32 m0, m0, 1 //next vgpr index
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //every buffer_load_dword does 128 bytes
- s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_VGPR_WAVE32_LOOP //VGPR restore (except v0) is complete?
- //s_set_gpr_idx_off
- /* VGPR restore on v0 */
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
- end
-
- s_branch L_RESTORE_LDS
-
- L_RESTORE_VGPR_WAVE64:
- s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256
- s_mov_b32 m0, 1 //VGPR initial index value = 1
- L_RESTORE_VGPR_WAVE64_LOOP:
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
- end
- s_waitcnt vmcnt(0) //ensure data ready
- v_movreld_b32 v0, v0 //v[0+m0] = v0
- s_add_u32 m0, m0, 1 //next vgpr index
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //every buffer_load_dword does 256 bytes
- s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
- //s_set_gpr_idx_off
- //
- //Below part will be the restore shared vgpr part (new for gfx10)
- s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size
- s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
- s_cbranch_scc0 L_RESTORE_V0 //no shared_vgpr used? jump to L_SAVE_LDS
- s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
- //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
- //restore shared_vgpr will start from the index of m0
- s_add_u32 s_restore_alloc_size, s_restore_alloc_size, m0
- s_mov_b32 exec_lo, 0xFFFFFFFF
- s_mov_b32 exec_hi, 0x00000000
- L_RESTORE_SHARED_VGPR_WAVE64_LOOP:
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
- s_waitcnt vmcnt(0) //ensure data ready
- v_movreld_b32 v0, v0 //v[0+m0] = v0
- s_add_u32 m0, m0, 1 //next vgpr index
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //every buffer_load_dword does 256 bytes
- s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_SHARED_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
-
- s_mov_b32 exec_hi, 0xFFFFFFFF //restore back exec_hi before restoring V0!!
-
- /* VGPR restore on v0 */
- L_RESTORE_V0:
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
- end
-
-
- /* restore LDS */
- //////////////////////////////
- L_RESTORE_LDS:
-
- //Only need to check the first wave
- /* the first wave in the threadgroup */
- s_and_b32 s_restore_tmp, s_restore_size, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
- s_cbranch_scc0 L_RESTORE_SGPR
-
- s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on //be consistent with SAVE although can be moved ahead
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_ENABLE_RESTORE_LDS_EXEC_HI
- s_mov_b32 exec_hi, 0x00000000
- s_branch L_RESTORE_LDS_NORMAL
- L_ENABLE_RESTORE_LDS_EXEC_HI:
- s_mov_b32 exec_hi, 0xFFFFFFFF
- L_RESTORE_LDS_NORMAL:
- s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE) //lds_size
- s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero?
- s_cbranch_scc0 L_RESTORE_SGPR //no lds used? jump to L_RESTORE_VGPR
- s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 6 //LDS size in dwords = lds_size * 64dw
- s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //LDS size in bytes
- s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_and_b32 m0, s_wave_size, 1
- s_cmp_eq_u32 m0, 1
- s_mov_b32 m0, 0x0
- s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64
-
- L_RESTORE_LDS_LOOP_W32:
- if (SAVE_LDS)
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1
- s_waitcnt 0
- end
- s_add_u32 m0, m0, 128 //every buffer_load_dword does 256 bytes
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 256 bytes
- s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_LDS_LOOP_W32 //LDS restore is complete?
- s_branch L_RESTORE_SGPR
-
- L_RESTORE_LDS_LOOP_W64:
- if (SAVE_LDS)
- buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1
- s_waitcnt 0
- end
- s_add_u32 m0, m0, 256 //every buffer_load_dword does 256 bytes
- s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256 bytes
- s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 //LDS restore is complete?
-
-
- /* restore SGPRs */
- //////////////////////////////
- //s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_SGPR_SIZE_SIZE) //spgr_size
- //s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
- //s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
- //s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SGPRs = (sgpr_size + 1) * 8 (non-zero value)
- L_RESTORE_SGPR:
- //need to look at it is wave32 or wave64
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_RESTORE_SGPR_VMEM_WAVE64
- if (SGPR_SAVE_USE_SQC)
- s_lshl_b32 s_restore_buf_rsrc2, s_sgpr_save_num, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_restore_buf_rsrc2, s_sgpr_save_num, 7 //NUM_RECORDS in bytes (32 threads)
- end
- s_branch L_RESTORE_SGPR_CONT
- L_RESTORE_SGPR_VMEM_WAVE64:
- if (SGPR_SAVE_USE_SQC)
- s_lshl_b32 s_restore_buf_rsrc2, s_sgpr_save_num, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_restore_buf_rsrc2, s_sgpr_save_num, 8 //NUM_RECORDS in bytes (64 threads)
- end
-
- L_RESTORE_SGPR_CONT:
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_RESTORE_SGPR_WAVE64
-
- read_sgpr_from_mem_wave32(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //save s0 to s_restore_tmp
- s_mov_b32 m0, 0x1
-
- L_RESTORE_SGPR_LOOP_WAVE32:
- read_sgpr_from_mem_wave32(s0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PV: further performance improvement can be made
- s_waitcnt lgkmcnt(0) //ensure data ready
- s_movreld_b32 s0, s0 //s[0+m0] = s0
- s_nop 0 // hazard SALU M0=> S_MOVREL
- s_add_u32 m0, m0, 1 //next sgpr index
- s_cmp_lt_u32 m0, s_sgpr_save_num //scc = (m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_SGPR_LOOP_WAVE32 //SGPR restore (except s0) is complete?
- s_mov_b32 s0, s_restore_tmp /* SGPR restore on s0 */
- s_branch L_RESTORE_HWREG
-
- L_RESTORE_SGPR_WAVE64:
- read_sgpr_from_mem_wave64(s_restore_tmp, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //save s0 to s_restore_tmp
- s_mov_b32 m0, 0x1 //SGPR initial index value =1 //go on with with s1
-
- L_RESTORE_SGPR_LOOP_WAVE64:
- read_sgpr_from_mem_wave64(s0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PV: further performance improvement can be made
- s_waitcnt lgkmcnt(0) //ensure data ready
- s_movreld_b32 s0, s0 //s[0+m0] = s0
- s_nop 0 // hazard SALU M0=> S_MOVREL
- s_add_u32 m0, m0, 1 //next sgpr index
- s_cmp_lt_u32 m0, s_sgpr_save_num //scc = (m0 < s_restore_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_RESTORE_SGPR_LOOP_WAVE64 //SGPR restore (except s0) is complete?
- s_mov_b32 s0, s_restore_tmp /* SGPR restore on s0 */
-
-
- /* restore HW registers */
- //////////////////////////////
- L_RESTORE_HWREG:
- s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_RESTORE_HWREG_WAVE64
-
- read_sgpr_from_mem_wave32(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //M0
- read_sgpr_from_mem_wave32(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PC
- read_sgpr_from_mem_wave32(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC)
- read_sgpr_from_mem_wave32(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //EXEC
- read_sgpr_from_mem_wave32(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC)
- read_sgpr_from_mem_wave32(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //STATUS
- read_sgpr_from_mem_wave32(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //TRAPSTS
- //read_sgpr_from_mem_wave32(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_LO
- //read_sgpr_from_mem_wave32(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_HI
- read_sgpr_from_mem_wave32(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK
- read_sgpr_from_mem_wave32(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //MODE
- if(SAVE_RESTORE_HWID_DDID)
- read_sgpr_from_mem_wave32(s_restore_hwid1, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //HW_ID1
- end
- s_branch L_RESTORE_HWREG_FINISH
-
- L_RESTORE_HWREG_WAVE64:
- read_sgpr_from_mem_wave64(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //M0
- read_sgpr_from_mem_wave64(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //PC
- read_sgpr_from_mem_wave64(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC)
- read_sgpr_from_mem_wave64(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //EXEC
- read_sgpr_from_mem_wave64(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC)
- read_sgpr_from_mem_wave64(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //STATUS
- read_sgpr_from_mem_wave64(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //TRAPSTS
- //read_sgpr_from_mem_wave64(xnack_mask_lo, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_LO
- //read_sgpr_from_mem_wave64(xnack_mask_hi, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK_HI
- read_sgpr_from_mem_wave64(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //XNACK_MASK
- read_sgpr_from_mem_wave64(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //MODE
- if(SAVE_RESTORE_HWID_DDID)
- read_sgpr_from_mem_wave64(s_restore_hwid1, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC) //HW_ID1
- end
- L_RESTORE_HWREG_FINISH:
- s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
-
-
-
- if(SAVE_RESTORE_HWID_DDID)
- L_RESTORE_DDID:
- s_mov_b32 m0, s_restore_hwid1 //virture ttrace support: The save-context handler records the SE/SA/WGP/SIMD/wave of the original wave
- s_ttracedata //and then can output it as SHADER_DATA to ttrace on restore to provide a correlation across the save-restore
-
- s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
- s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
- s_and_b32 m0, s_restore_size, 1
- s_cmp_eq_u32 m0, 1
- s_cbranch_scc1 L_RESTORE_DDID_WAVE64
-
- read_sgpr_from_mem_wave32(s_restore_ddid, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC)
- s_branch L_RESTORE_DDID_FINISH
- L_RESTORE_DDID_WAVE64:
- read_sgpr_from_mem_wave64(s_restore_ddid, s_restore_buf_rsrc0, s_restore_mem_offset, SGPR_SAVE_USE_SQC)
-
- L_RESTORE_DDID_FINISH:
- s_waitcnt lgkmcnt(0)
- //s_mov_b32 m0, s_restore_ddid
- //s_ttracedata
- if (RESTORE_DDID_IN_SGPR18)
- s_mov_b32 s18, s_restore_ddid
- end
-
- end
-
- s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
-
- //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
- end
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal
- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
- end
-
- s_mov_b32 m0, s_restore_m0
- s_mov_b32 exec_lo, s_restore_exec_lo
- s_mov_b32 exec_hi, s_restore_exec_hi
-
- s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
+ /* Setup Resource Contants */
+ s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
+ s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
+ s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
+ s_mov_b32 s_restore_buf_rsrc2, 0 //NUM_RECORDS initial value = 0 (in bytes)
+ s_mov_b32 s_restore_buf_rsrc3, S_RESTORE_BUF_RSRC_WORD3_MISC
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_ATC_MASK
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_ATC_SHIFT-SQ_BUF_RSRC_WORD1_ATC_SHIFT)
+ s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or ATC
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_MTYPE_MASK
+ s_lshr_b32 s_restore_tmp, s_restore_tmp, (S_RESTORE_SPI_INIT_MTYPE_SHIFT-SQ_BUF_RSRC_WORD3_MTYPE_SHIFT)
+ s_or_b32 s_restore_buf_rsrc3, s_restore_buf_rsrc3, s_restore_tmp //or MTYPE
+ //determine it is wave32 or wave64
+ s_getreg_b32 s_restore_size, hwreg(HW_REG_IB_STS2,SQ_WAVE_IB_STS2_WAVE64_SHIFT,SQ_WAVE_IB_STS2_WAVE64_SIZE)
+ s_lshl_b32 s_restore_size, s_restore_size, S_WAVE_SIZE
+ s_or_b32 s_restore_size, s_restore_spi_init_hi, s_restore_size
+
+ s_and_b32 s_restore_tmp, s_restore_spi_init_hi, S_RESTORE_SPI_INIT_FIRST_WAVE_MASK
+ s_cbranch_scc0 L_RESTORE_VGPR
+
+ /* restore LDS */
+L_RESTORE_LDS:
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_RESTORE_LDS_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_RESTORE_LDS_NORMAL
+L_ENABLE_RESTORE_LDS_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_RESTORE_LDS_NORMAL:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //lds_size is zero?
+ s_cbranch_scc0 L_RESTORE_VGPR //no lds used? jump to L_RESTORE_VGPR
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 6 //LDS size in dwords = lds_size * 64dw
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //LDS size in bytes
+ s_mov_b32 s_restore_buf_rsrc2, s_restore_alloc_size //NUM_RECORDS in bytes
+
+ // LDS at offset: size(VGPR)+size(SVGPR)+SIZE(SGPR)+SIZE(HWREG)
+ //
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes()
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_lshr_b32 m0, s_wave_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_mov_b32 m0, 0x0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64
+
+L_RESTORE_LDS_LOOP_W32:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+ s_add_u32 m0, m0, 128 // 128 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128 //mem offset increased by 128DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W32 //LDS restore is complete?
+ s_branch L_RESTORE_VGPR
+
+L_RESTORE_LDS_LOOP_W64:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
+ s_add_u32 m0, m0, 256 // 256 DW
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256 //mem offset increased by 256DW
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_LDS_LOOP_W64 //LDS restore is complete?
+
+ /* restore VGPRs */
+L_RESTORE_VGPR:
+ // VGPR SR memory offset : 0
+ s_mov_b32 s_restore_mem_offset, 0x0
+ s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_RESTORE_VGPR_EXEC_HI
+ s_mov_b32 exec_hi, 0x00000000
+ s_branch L_RESTORE_VGPR_NORMAL
+L_ENABLE_RESTORE_VGPR_EXEC_HI:
+ s_mov_b32 exec_hi, 0xFFFFFFFF
+L_RESTORE_VGPR_NORMAL:
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
+ //determine it is wave32 or wave64
+ s_lshr_b32 m0, s_restore_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE64
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 4
+
+L_RESTORE_VGPR_WAVE32_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:128
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:128*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:128*3
+ s_waitcnt vmcnt(0)
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ v_movreld_b32 v1, v1
+ v_movreld_b32 v2, v2
+ v_movreld_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128*4 //every buffer_load_dword does 128 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE32_LOOP //VGPR restore (except v0) is complete?
+
+ /* VGPR restore on v0 */
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:128*3
+
+ s_branch L_RESTORE_SGPR
+
+L_RESTORE_VGPR_WAVE64:
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ // VGPR load using dw burst
+ s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v4, v0 will be the last
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+ s_mov_b32 m0, 4 //VGPR initial index value = 4
+
+L_RESTORE_VGPR_WAVE64_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
+ s_waitcnt vmcnt(0)
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ v_movreld_b32 v1, v1
+ v_movreld_b32 v2, v2
+ v_movreld_b32 v3, v3
+ s_add_u32 m0, m0, 4 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4 //every buffer_load_dword does 256 bytes
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+
+ //Below part will be the restore shared vgpr part (new for gfx10)
+ s_getreg_b32 s_restore_alloc_size, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE) //shared_vgpr_size
+ s_and_b32 s_restore_alloc_size, s_restore_alloc_size, 0xFFFFFFFF //shared_vgpr_size is zero?
+ s_cbranch_scc0 L_RESTORE_V0 //no shared_vgpr used?
+ s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 3 //Number of SHARED_VGPRs = shared_vgpr_size * 8 (non-zero value)
+ //m0 now has the value of normal vgpr count, just add the m0 with shared_vgpr count to get the total count.
+ //restore shared_vgpr will start from the index of m0
+ s_add_u32 s_restore_alloc_size, s_restore_alloc_size, m0
+ s_mov_b32 exec_lo, 0xFFFFFFFF
+ s_mov_b32 exec_hi, 0x00000000
+L_RESTORE_SHARED_VGPR_WAVE64_LOOP:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
+ s_waitcnt vmcnt(0)
+ v_movreld_b32 v0, v0 //v[0+m0] = v0
+ s_add_u32 m0, m0, 1 //next vgpr index
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 128
+ s_cmp_lt_u32 m0, s_restore_alloc_size //scc = (m0 < s_restore_alloc_size) ? 1 : 0
+ s_cbranch_scc1 L_RESTORE_SHARED_VGPR_WAVE64_LOOP //VGPR restore (except v0) is complete?
+
+ s_mov_b32 exec_hi, 0xFFFFFFFF //restore back exec_hi before restoring V0!!
+
+ /* VGPR restore on v0 */
+L_RESTORE_V0:
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
+ s_waitcnt vmcnt(0)
+
+ /* restore SGPRs */
+ //will be 2+8+16*6
+ // SGPR SR memory offset : size(VGPR)+size(SVGPR)
+L_RESTORE_SGPR:
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+ s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 20*4 //s108~s127 is not saved
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ s_mov_b32 m0, s_sgpr_save_num
+
+ read_4sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_waitcnt lgkmcnt(0)
+
+ s_sub_u32 m0, m0, 4 // Restore from S[0] to S[104]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+
+ read_8sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_waitcnt lgkmcnt(0)
+
+ s_sub_u32 m0, m0, 8 // Restore from S[0] to S[96]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+
+ L_RESTORE_SGPR_LOOP:
+ read_16sgpr_from_mem(s0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_waitcnt lgkmcnt(0)
+
+ s_sub_u32 m0, m0, 16 // Restore from S[n] to S[0]
+ s_nop 0 // hazard SALU M0=> S_MOVREL
+
+ s_movreld_b64 s0, s0 //s[0+m0] = s0
+ s_movreld_b64 s2, s2
+ s_movreld_b64 s4, s4
+ s_movreld_b64 s6, s6
+ s_movreld_b64 s8, s8
+ s_movreld_b64 s10, s10
+ s_movreld_b64 s12, s12
+ s_movreld_b64 s14, s14
+
+ s_cmp_eq_u32 m0, 0 //scc = (m0 < s_sgpr_save_num) ? 1 : 0
+ s_cbranch_scc0 L_RESTORE_SGPR_LOOP
+
+ /* restore HW registers */
+L_RESTORE_HWREG:
+ // HWREG SR memory offset : size(VGPR)+size(SVGPR)+size(SGPR)
+ get_vgpr_size_bytes(s_restore_mem_offset, s_restore_size)
+ get_svgpr_size_bytes(s_restore_tmp)
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, s_restore_tmp
+ s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_sgpr_size_bytes()
+
+ s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
+
+ read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_pc_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_lo, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_exec_hi, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_status, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_trapsts, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_xnack_mask, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_mode, s_restore_buf_rsrc0, s_restore_mem_offset)
+ read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_waitcnt lgkmcnt(0)
+
+ s_setreg_b32 hwreg(HW_REG_SHADER_FLAT_SCRATCH_LO), s_restore_flat_scratch
+
+ read_hwreg_from_mem(s_restore_flat_scratch, s_restore_buf_rsrc0, s_restore_mem_offset)
+ s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
+
+ s_setreg_b32 hwreg(HW_REG_SHADER_FLAT_SCRATCH_HI), s_restore_flat_scratch
+
+ s_mov_b32 s_restore_tmp, s_restore_pc_hi
+ s_and_b32 s_restore_pc_hi, s_restore_tmp, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
+
+ s_mov_b32 m0, s_restore_m0
+ s_mov_b32 exec_lo, s_restore_exec_lo
+ s_mov_b32 exec_hi, s_restore_exec_hi
+
+ s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_PRE_SAVECTX_MASK, s_restore_trapsts
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_PRE_SAVECTX_SIZE), s_restore_m0
- s_setreg_b32 hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask //restore xnack_mask
- s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts
- s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT
+ s_setreg_b32 hwreg(HW_REG_SHADER_XNACK_MASK), s_restore_xnack_mask
+ s_and_b32 s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK, s_restore_trapsts
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT, SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE), s_restore_m0
- //s_setreg_b32 hwreg(HW_REG_TRAPSTS), s_restore_trapsts //don't overwrite SAVECTX bit as it may be set through external SAVECTX during restore
- s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
- //reuse s_restore_m0 as a temp register
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_RCNT_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
- s_mov_b32 s_restore_tmp, 0x0 //IB_STS is zero
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
- s_and_b32 s_restore_m0, s_restore_pc_hi, S_SAVE_PC_HI_FIRST_REPLAY_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
- s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
- s_or_b32 s_restore_tmp, s_restore_tmp, s_restore_m0
- s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
- s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
- s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_tmp
- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status
-
- s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG //FIXME not performance-optimal at this time
-
-
-// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
- s_rfe_b64 s_restore_pc_lo // s_restore_m0[0] is used to set STATUS.inst_atc
-
-
-/**************************************************************************/
-/* the END */
-/**************************************************************************/
-L_END_PGM:
+ s_setreg_b32 hwreg(HW_REG_MODE), s_restore_mode
+ s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_RCNT_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_RCNT_SHIFT
+ s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_RCNT_SHIFT
+ s_mov_b32 s_restore_mode, 0x0
+ s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0
+ s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_FIRST_REPLAY_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_FIRST_REPLAY_SHIFT
+ s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT
+ s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0
+ s_and_b32 s_restore_m0, s_restore_tmp, S_SAVE_PC_HI_REPLAY_W64H_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, S_SAVE_PC_HI_REPLAY_W64H_SHIFT
+ s_lshl_b32 s_restore_m0, s_restore_m0, SQ_WAVE_IB_STS_REPLAY_W64H_SHIFT
+ s_or_b32 s_restore_mode, s_restore_mode, s_restore_m0
+
+ s_and_b32 s_restore_m0, s_restore_status, SQ_WAVE_STATUS_INST_ATC_MASK
+ s_lshr_b32 s_restore_m0, s_restore_m0, SQ_WAVE_STATUS_INST_ATC_SHIFT
+ s_setreg_b32 hwreg(HW_REG_IB_STS), s_restore_mode
+
+ s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
+ s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
+ s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+
+ s_barrier //barrier to ensure the readiness of LDS before access attemps from any other wave in the same TG
+
+ s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
+
+L_END_PGM:
s_endpgm
-
-end
-
-
-/**************************************************************************/
-/* the helper functions */
-/**************************************************************************/
-function write_sgpr_to_mem_wave32(s, s_rsrc, s_mem_offset, use_sqc, use_mtbuf)
- if (use_sqc)
- s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on
- s_mov_b32 m0, s_mem_offset
- s_buffer_store_dword s, s_rsrc, m0 glc:1
- s_add_u32 s_mem_offset, s_mem_offset, 4
- s_mov_b32 m0, exec_lo
- elsif (use_mtbuf)
- v_mov_b32 v0, s
- tbuffer_store_format_x v0, v0, s_rsrc, s_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- s_add_u32 s_mem_offset, s_mem_offset, 128
- else
- v_mov_b32 v0, s
- buffer_store_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1
- s_add_u32 s_mem_offset, s_mem_offset, 128
- end
end
-function write_sgpr_to_mem_wave64(s, s_rsrc, s_mem_offset, use_sqc, use_mtbuf)
- if (use_sqc)
- s_mov_b32 exec_lo, m0 //assuming exec_lo is not needed anymore from this point on
- s_mov_b32 m0, s_mem_offset
- s_buffer_store_dword s, s_rsrc, m0 glc:1
- s_add_u32 s_mem_offset, s_mem_offset, 4
- s_mov_b32 m0, exec_lo
- elsif (use_mtbuf)
- v_mov_b32 v0, s
- tbuffer_store_format_x v0, v0, s_rsrc, s_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- s_add_u32 s_mem_offset, s_mem_offset, 256
- else
- v_mov_b32 v0, s
- buffer_store_dword v0, v0, s_rsrc, s_mem_offset slc:1 glc:1
- s_add_u32 s_mem_offset, s_mem_offset, 256
- end
+function write_hwreg_to_mem(s, s_rsrc, s_mem_offset)
+ s_mov_b32 exec_lo, m0
+ s_mov_b32 m0, s_mem_offset
+ s_buffer_store_dword s, s_rsrc, m0 glc:1
+ s_add_u32 s_mem_offset, s_mem_offset, 4
+ s_mov_b32 m0, exec_lo
+end
+
+
+function write_16sgpr_to_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1
+ s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1
+ s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1
+ s_buffer_store_dwordx4 s[12], s_rsrc, 48 glc:1
+ s_add_u32 s_rsrc[0], s_rsrc[0], 4*16
+ s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0
+end
+
+function write_12sgpr_to_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_store_dwordx4 s[0], s_rsrc, 0 glc:1
+ s_buffer_store_dwordx4 s[4], s_rsrc, 16 glc:1
+ s_buffer_store_dwordx4 s[8], s_rsrc, 32 glc:1
+ s_add_u32 s_rsrc[0], s_rsrc[0], 4*12
+ s_addc_u32 s_rsrc[1], s_rsrc[1], 0x0
+end
+
+
+function read_hwreg_from_mem(s, s_rsrc, s_mem_offset)
+ s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1
+ s_add_u32 s_mem_offset, s_mem_offset, 4
end
-function read_sgpr_from_mem_wave32(s, s_rsrc, s_mem_offset, use_sqc)
- s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1
- if (use_sqc)
- s_add_u32 s_mem_offset, s_mem_offset, 4
- else
- s_add_u32 s_mem_offset, s_mem_offset, 128
- end
+function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*16
+ s_buffer_load_dwordx16 s, s_rsrc, s_mem_offset glc:1
end
-function read_sgpr_from_mem_wave64(s, s_rsrc, s_mem_offset, use_sqc)
- s_buffer_load_dword s, s_rsrc, s_mem_offset glc:1
- if (use_sqc)
- s_add_u32 s_mem_offset, s_mem_offset, 4
- else
- s_add_u32 s_mem_offset, s_mem_offset, 256
- end
+function read_8sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*8
+ s_buffer_load_dwordx8 s, s_rsrc, s_mem_offset glc:1
end
+function read_4sgpr_from_mem(s, s_rsrc, s_mem_offset)
+ s_sub_u32 s_mem_offset, s_mem_offset, 4*4
+ s_buffer_load_dwordx4 s, s_rsrc, s_mem_offset glc:1
+end
+
+
+function get_lds_size_bytes(s_lds_size_byte)
+ s_getreg_b32 s_lds_size_byte, hwreg(HW_REG_LDS_ALLOC, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT, SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE)
+ s_lshl_b32 s_lds_size_byte, s_lds_size_byte, 8 //LDS size in dwords = lds_size * 64 *4Bytes // granularity 64DW
+end
+
+function get_vgpr_size_bytes(s_vgpr_size_byte, s_size)
+ s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE)
+ s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1
+ s_lshr_b32 m0, s_size, S_WAVE_SIZE
+ s_and_b32 m0, m0, 1
+ s_cmp_eq_u32 m0, 1
+ s_cbranch_scc1 L_ENABLE_SHIFT_W64
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+7) //Number of VGPRs = (vgpr_size + 1) * 4 * 32 * 4 (non-zero value)
+ s_branch L_SHIFT_DONE
+L_ENABLE_SHIFT_W64:
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value)
+L_SHIFT_DONE:
+end
+
+function get_svgpr_size_bytes(s_svgpr_size_byte)
+ s_getreg_b32 s_svgpr_size_byte, hwreg(HW_REG_LDS_ALLOC,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SHIFT,SQ_WAVE_LDS_ALLOC_VGPR_SHARED_SIZE_SIZE)
+ s_lshl_b32 s_svgpr_size_byte, s_svgpr_size_byte, (3+7)
+end
+
+function get_sgpr_size_bytes
+ return 512
+end
+
+function get_hwreg_size_bytes
+ return 128
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
index a47f5b933120..b195b7cd8a17 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
@@ -24,78 +24,6 @@
* PROJECT=vi ./sp3 cwsr_trap_handler_gfx8.asm -hex tmp.hex
*/
-/* HW (VI) source code for CWSR trap handler */
-/* Version 18 + multiple trap handler */
-
-// this performance-optimal version was originally from Seven Xu at SRDC
-
-// Revison #18 --...
-/* Rev History
-** #1. Branch from gc dv. //gfxip/gfx8/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV)
-** #4. SR Memory Layout:
-** 1. VGPR-SGPR-HWREG-{LDS}
-** 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern..
-** #5. Update: 1. Accurate g8sr_ts_save_d timestamp
-** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation)
-** #7. Update: 1. don't barrier if noLDS
-** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version
-** 2. Fix SQ issue by s_sleep 2
-** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last
-** 2. optimize s_buffer save by burst 16sgprs...
-** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs.
-** #11. Update 1. Add 2 more timestamp for debug version
-** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance
-** #13. Integ 1. Always use MUBUF for PV trap shader...
-** #14. Update 1. s_buffer_store soft clause...
-** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot.
-** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree
-** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part]
-** 2. PERF - Save LDS before save VGPR to cover LDS save long latency...
-** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32
-** 2. FUNC - Handle non-CWSR traps
-*/
-
-var G8SR_WDMEM_HWREG_OFFSET = 0
-var G8SR_WDMEM_SGPR_OFFSET = 128 // in bytes
-
-// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore.
-
-var G8SR_DEBUG_TIMESTAMP = 0
-var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4 // ts_save_d timestamp offset relative to SGPR_SR_memory_offset
-var s_g8sr_ts_save_s = s[34:35] // save start
-var s_g8sr_ts_sq_save_msg = s[36:37] // The save shader send SAVEWAVE msg to spi
-var s_g8sr_ts_spi_wrexec = s[38:39] // the SPI write the sr address to SQ
-var s_g8sr_ts_save_d = s[40:41] // save end
-var s_g8sr_ts_restore_s = s[42:43] // restore start
-var s_g8sr_ts_restore_d = s[44:45] // restore end
-
-var G8SR_VGPR_SR_IN_DWX4 = 0
-var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000 // DWx4 stride is 4*4Bytes
-var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4
-
-
-/*************************************************************************/
-/* control on how to run the shader */
-/*************************************************************************/
-//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
-var EMU_RUN_HACK = 0
-var EMU_RUN_HACK_RESTORE_NORMAL = 0
-var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
-var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0
-var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
-var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
-var EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
-var SAVE_LDS = 1
-var WG_BASE_ADDR_LO = 0x9000a000
-var WG_BASE_ADDR_HI = 0x0
-var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
-var CTX_SAVE_CONTROL = 0x0
-var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
-var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
-var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
-
/**************************************************************************/
/* variables */
/**************************************************************************/
@@ -226,16 +154,7 @@ shader main
type(CS)
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore
- //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
- s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC
- s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
- s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE
- //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE
- s_branch L_SKIP_RESTORE //NOT restore, SAVE actually
- else
s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
- end
L_JUMP_TO_RESTORE:
s_branch L_RESTORE //restore
@@ -249,7 +168,7 @@ L_SKIP_RESTORE:
s_cbranch_scc1 L_SAVE //this is the operation for save
// ********* Handle non-CWSR traps *******************
-if (!EMU_RUN_HACK)
+
/* read tba and tma for next level trap handler, ttmp4 is used as s_save_status */
s_load_dwordx4 [ttmp8,ttmp9,ttmp10, ttmp11], [tma_lo,tma_hi], 0
s_waitcnt lgkmcnt(0)
@@ -268,7 +187,7 @@ L_EXCP_CASE:
s_and_b32 ttmp1, ttmp1, 0xFFFF
set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
s_rfe_b64 [ttmp0, ttmp1]
-end
+
// ********* End handling of non-CWSR traps *******************
/**************************************************************************/
@@ -276,12 +195,6 @@ end
/**************************************************************************/
L_SAVE:
-
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_save_s
- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
-end
-
s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
s_setreg_b32 hwreg(HW_REG_TRAPSTS, SQ_WAVE_TRAPSTS_SAVECTX_SHIFT, 1), s_save_tmp //clear saveCtx bit
@@ -303,16 +216,7 @@ end
s_mov_b32 s_save_exec_hi, exec_hi
s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_sq_save_msg
- s_waitcnt lgkmcnt(0)
-end
-
- if (EMU_RUN_HACK)
-
- else
s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
- end
// Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
@@ -321,36 +225,9 @@ end
L_SLEEP:
s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
- if (EMU_RUN_HACK)
-
- else
s_cbranch_execz L_SLEEP
- end
-
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_spi_wrexec
- s_waitcnt lgkmcnt(0)
-end
/* setup Resource Contants */
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
- //calculate wd_addr using absolute thread id
- v_readlane_b32 s_save_tmp, v9, 0
- s_lshr_b32 s_save_tmp, s_save_tmp, 6
- s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
- else
- end
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
- else
- end
-
-
s_mov_b32 s_save_buf_rsrc0, s_save_spi_init_lo //base_addr_lo
s_and_b32 s_save_buf_rsrc1, s_save_spi_init_hi, 0x0000FFFF //base_addr_hi
s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE
@@ -383,22 +260,10 @@ end
s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) //M0
-
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- s_mov_b32 tba_lo, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_LO
- s_mov_b32 tba_hi, EMU_RUN_HACK_SAVE_FIRST_TIME_TBA_HI
- end
-
write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) //PC
write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset) //EXEC
@@ -440,18 +305,8 @@ end
s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
- if (SGPR_SAVE_USE_SQC)
s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
- end
-
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
// backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
//s_mov_b64 s_save_pc_lo, s_save_buf_rsrc0
@@ -490,30 +345,14 @@ end
s_mov_b32 exec_lo, 0xFFFFFFFF //need every thread from now on
s_mov_b32 exec_hi, 0xFFFFFFFF
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
// VGPR Allocated in 4-GPR granularity
-if G8SR_VGPR_SR_IN_DWX4
- // the const stride for DWx4 is 4*4 bytes
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
-
- buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
-
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
-else
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
-end
@@ -549,64 +388,10 @@ end
s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
s_mov_b32 m0, 0x0 //lds_offset initial value = 0
-var LDS_DMA_ENABLE = 0
-var UNROLL = 0
-if UNROLL==0 && LDS_DMA_ENABLE==1
- s_mov_b32 s3, 256*2
- s_nop 0
- s_nop 0
- s_nop 0
- L_SAVE_LDS_LOOP:
- //TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.???
- if (SAVE_LDS) //SPI always alloc LDS space in 128DW granularity
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 // first 64DW
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
- end
-
- s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
- s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 //mem offset increased by 256 bytes
- s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete?
-
-elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL , has ichace miss
- // store from higest LDS address to lowest
- s_mov_b32 s3, 256*2
- s_sub_u32 m0, s_save_alloc_size, s3
- s_add_u32 s_save_mem_offset, s_save_mem_offset, m0
- s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9 // how many 128 trunks...
- s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size // store from higheset addr to lowest
- s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4 // PC offset increment, each LDS save block cost 6*4 Bytes instruction
- s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4 //2is the below 2 inst...//s_addc and s_setpc
- s_nop 0
- s_nop 0
- s_nop 0 //pad 3 dw to let LDS_DMA align with 64Bytes
- s_getpc_b64 s[0:1] // reuse s[0:1], since s[0:1] already saved
- s_add_u32 s0, s0,s_save_alloc_size
- s_addc_u32 s1, s1, 0
- s_setpc_b64 s[0:1]
-
-
- for var i =0; i< 128; i++
- // be careful to make here a 64Byte aligned address, which could improve performance...
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0 // first 64DW
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
-
- if i!=127
- s_sub_u32 m0, m0, s3 // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e. pack more LDS_DMA inst to one Cacheline
- s_sub_u32 s_save_mem_offset, s_save_mem_offset, s3
- end
- end
-
-else // BUFFER_STORE
v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0
v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2 // tid
v_mul_i32_i24 v2, v3, 8 // tid*8
@@ -628,8 +413,6 @@ L_SAVE_LDS_LOOP_VECTOR:
// restore rsrc3
s_mov_b32 s_save_buf_rsrc3, s0
-end
-
L_SAVE_LDS_DONE:
@@ -647,44 +430,8 @@ L_SAVE_LDS_DONE:
s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible
s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
-
- // VGPR Allocated in 4-GPR granularity
-
-if G8SR_VGPR_SR_IN_DWX4
- // the const stride for DWx4 is 4*4 bytes
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
-
- s_mov_b32 m0, 4 // skip first 4 VGPRs
- s_cmp_lt_u32 m0, s_save_alloc_size
- s_cbranch_scc0 L_SAVE_VGPR_LOOP_END // no more vgprs
- s_set_gpr_idx_on m0, 0x1 // This will change M0
- s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 // because above inst change m0
-L_SAVE_VGPR_LOOP:
- v_mov_b32 v0, v0 // v0 = v[0+m0]
- v_mov_b32 v1, v1
- v_mov_b32 v2, v2
- v_mov_b32 v3, v3
-
-
- buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- s_add_u32 m0, m0, 4
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4
- s_cmp_lt_u32 m0, s_save_alloc_size
- s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
- s_set_gpr_idx_off
-L_SAVE_VGPR_LOOP_END:
-
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
-else
// VGPR store using dw burst
s_mov_b32 m0, 0x4 //VGPR initial index value =0
s_cmp_lt_u32 m0, s_save_alloc_size
@@ -700,52 +447,18 @@ else
v_mov_b32 v2, v2 //v0 = v[0+m0]
v_mov_b32 v3, v3 //v0 = v[0+m0]
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
- end
s_add_u32 m0, m0, 4 //next vgpr index
s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
s_set_gpr_idx_off
-end
L_SAVE_VGPR_END:
-
-
-
-
-
-
- /* S_PGM_END_SAVED */ //FIXME graphics ONLY
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
- s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- s_rfe_b64 s_save_pc_lo //Return to the main shader program
- else
- end
-
-// Save Done timestamp
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_save_d
- // SGPR SR memory offset : size(VGPR)
- get_vgpr_size_bytes(s_save_mem_offset)
- s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET
- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
- // Need reset rsrc2??
- s_mov_b32 m0, s_save_mem_offset
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0 glc:1
-end
-
-
s_branch L_END_PGM
@@ -756,27 +469,6 @@ end
L_RESTORE:
/* Setup Resource Contants */
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
- //calculate wd_addr using absolute thread id
- v_readlane_b32 s_restore_tmp, v9, 0
- s_lshr_b32 s_restore_tmp, s_restore_tmp, 6
- s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
- s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
- else
- end
-
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_restore_s
- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
- // tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case...
- s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0]
- s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1] //backup ts to ttmp0/1, sicne exec will be finally restored..
-end
-
-
-
s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
@@ -818,18 +510,12 @@ end
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() //FIXME, Check if offset overflow???
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
s_mov_b32 m0, 0x0 //lds_offset initial value = 0
L_RESTORE_LDS_LOOP:
- if (SAVE_LDS)
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW
- end
s_add_u32 m0, m0, 256*2 // 128 DW
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW
s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
@@ -848,40 +534,8 @@ end
s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
-if G8SR_VGPR_SR_IN_DWX4
- get_vgpr_size_bytes(s_restore_mem_offset)
- s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
-
- // the const stride for DWx4 is 4*4 bytes
- s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
-
- s_mov_b32 m0, s_restore_alloc_size
- s_set_gpr_idx_on m0, 0x8 // Note.. This will change m0
-
-L_RESTORE_VGPR_LOOP:
- buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
- s_waitcnt vmcnt(0)
- s_sub_u32 m0, m0, 4
- v_mov_b32 v0, v0 // v[0+m0] = v0
- v_mov_b32 v1, v1
- v_mov_b32 v2, v2
- v_mov_b32 v3, v3
- s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
- s_cmp_eq_u32 m0, 0x8000
- s_cbranch_scc0 L_RESTORE_VGPR_LOOP
- s_set_gpr_idx_off
-
- s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE // const stride to 4*4 bytes
-
-else
+
// VGPR load using dw burst
s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
@@ -890,14 +544,10 @@ else
s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later
L_RESTORE_VGPR_LOOP:
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
- end
s_waitcnt vmcnt(0) //ensure data ready
v_mov_b32 v0, v0 //v[0+m0] = v0
v_mov_b32 v1, v1
@@ -909,16 +559,10 @@ else
s_cbranch_scc1 L_RESTORE_VGPR_LOOP //VGPR restore (except v0) is complete?
s_set_gpr_idx_off
/* VGPR restore on v0 */
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
- end
-
-end
/* restore SGPRs */
//////////////////////////////
@@ -934,16 +578,8 @@ end
s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
- if (SGPR_SAVE_USE_SQC)
s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
- end
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
/* If 112 SGPRs ar allocated, 4 sgprs are not used TBA(108,109),TMA(110,111),
However, we are safe to restore these 4 SGPRs anyway, since TBA,TMA will later be restored by HWREG
@@ -972,12 +608,6 @@ end
//////////////////////////////
L_RESTORE_HWREG:
-
-if G8SR_DEBUG_TIMESTAMP
- s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo
- s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi
-end
-
// HWREG SR memory offset : size(VGPR)+size(SGPR)
get_vgpr_size_bytes(s_restore_mem_offset)
get_sgpr_size_bytes(s_restore_tmp)
@@ -985,11 +615,7 @@ end
s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) //M0
read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //PC
@@ -1006,16 +632,6 @@ end
s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
- //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
- end
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal
- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
- end
-
s_mov_b32 m0, s_restore_m0
s_mov_b32 exec_lo, s_restore_exec_lo
s_mov_b32 exec_hi, s_restore_exec_hi
@@ -1048,11 +664,6 @@ end
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_restore_d
- s_waitcnt lgkmcnt(0)
-end
-
// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 6bae2e022c6e..75f29d13c90f 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -24,76 +24,9 @@
* PROJECT=greenland ./sp3 cwsr_trap_handler_gfx9.asm -hex tmp.hex
*/
-/* HW (GFX9) source code for CWSR trap handler */
-/* Version 18 + multiple trap handler */
-
-// this performance-optimal version was originally from Seven Xu at SRDC
-
-// Revison #18 --...
-/* Rev History
-** #1. Branch from gc dv. //gfxip/gfx9/main/src/test/suites/block/cs/sr/cs_trap_handler.sp3#1,#50, #51, #52-53(Skip, Already Fixed by PV), #54-56(merged),#57-58(mergerd, skiped-already fixed by PV)
-** #4. SR Memory Layout:
-** 1. VGPR-SGPR-HWREG-{LDS}
-** 2. tba_hi.bits.26 - reconfigured as the first wave in tg bits, for defer Save LDS for a threadgroup.. performance concern..
-** #5. Update: 1. Accurate g8sr_ts_save_d timestamp
-** #6. Update: 1. Fix s_barrier usage; 2. VGPR s/r using swizzle buffer?(NoNeed, already matched the swizzle pattern, more investigation)
-** #7. Update: 1. don't barrier if noLDS
-** #8. Branch: 1. Branch to ver#0, which is very similar to gc dv version
-** 2. Fix SQ issue by s_sleep 2
-** #9. Update: 1. Fix scc restore failed issue, restore wave_status at last
-** 2. optimize s_buffer save by burst 16sgprs...
-** #10. Update 1. Optimize restore sgpr by busrt 16 sgprs.
-** #11. Update 1. Add 2 more timestamp for debug version
-** #12. Update 1. Add VGPR SR using DWx4, some case improve and some case drop performance
-** #13. Integ 1. Always use MUBUF for PV trap shader...
-** #14. Update 1. s_buffer_store soft clause...
-** #15. Update 1. PERF - sclar write with glc:0/mtype0 to allow L2 combine. perf improvement a lot.
-** #16. Update 1. PRRF - UNROLL LDS_DMA got 2500cycle save in IP tree
-** #17. Update 1. FUNC - LDS_DMA has issues while ATC, replace with ds_read/buffer_store for save part[TODO restore part]
-** 2. PERF - Save LDS before save VGPR to cover LDS save long latency...
-** #18. Update 1. FUNC - Implicitly estore STATUS.VCCZ, which is not writable by s_setreg_b32
-** 2. FUNC - Handle non-CWSR traps
-*/
-
-var G8SR_WDMEM_HWREG_OFFSET = 0
-var G8SR_WDMEM_SGPR_OFFSET = 128 // in bytes
-
-// Keep definition same as the app shader, These 2 time stamps are part of the app shader... Should before any Save and after restore.
-
-var G8SR_DEBUG_TIMESTAMP = 0
-var G8SR_DEBUG_TS_SAVE_D_OFFSET = 40*4 // ts_save_d timestamp offset relative to SGPR_SR_memory_offset
-var s_g8sr_ts_save_s = s[34:35] // save start
-var s_g8sr_ts_sq_save_msg = s[36:37] // The save shader send SAVEWAVE msg to spi
-var s_g8sr_ts_spi_wrexec = s[38:39] // the SPI write the sr address to SQ
-var s_g8sr_ts_save_d = s[40:41] // save end
-var s_g8sr_ts_restore_s = s[42:43] // restore start
-var s_g8sr_ts_restore_d = s[44:45] // restore end
-
-var G8SR_VGPR_SR_IN_DWX4 = 0
-var G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 = 0x00100000 // DWx4 stride is 4*4Bytes
-var G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 = G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4
-
-
-/*************************************************************************/
-/* control on how to run the shader */
-/*************************************************************************/
-//any hack that needs to be made to run this code in EMU (either because various EMU code are not ready or no compute save & restore in EMU run)
-var EMU_RUN_HACK = 0
-var EMU_RUN_HACK_RESTORE_NORMAL = 0
-var EMU_RUN_HACK_SAVE_NORMAL_EXIT = 0
-var EMU_RUN_HACK_SAVE_SINGLE_WAVE = 0
-var EMU_RUN_HACK_SAVE_FIRST_TIME = 0 //for interrupted restore in which the first save is through EMU_RUN_HACK
-var SAVE_LDS = 1
-var WG_BASE_ADDR_LO = 0x9000a000
-var WG_BASE_ADDR_HI = 0x0
-var WAVE_SPACE = 0x5000 //memory size that each wave occupies in workgroup state mem
-var CTX_SAVE_CONTROL = 0x0
-var CTX_RESTORE_CONTROL = CTX_SAVE_CONTROL
-var SIM_RUN_HACK = 0 //any hack that needs to be made to run this code in SIM (either because various RTL code are not ready or no compute save & restore in RTL run)
-var SGPR_SAVE_USE_SQC = 1 //use SQC D$ to do the write
-var USE_MTBUF_INSTEAD_OF_MUBUF = 0 //because TC EMU currently asserts on 0 of // overload DFMT field to carry 4 more bits of stride for MUBUF opcodes
-var SWIZZLE_EN = 0 //whether we use swizzled buffer addressing
var ACK_SQC_STORE = 1 //workaround for suspected SQC store bug causing incorrect stores under concurrency
+var SAVE_AFTER_XNACK_ERROR = 1 //workaround for TCP store failure after XNACK error when ALLOW_REPLAY=0, for debugger
+var SINGLE_STEP_MISSED_WORKAROUND = 1 //workaround for lost MODE.DEBUG_EN exception when SAVECTX raised
/**************************************************************************/
/* variables */
@@ -107,6 +40,7 @@ var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
+var SQ_WAVE_STATUS_ALLOW_REPLAY_MASK = 0x400000
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -127,12 +61,15 @@ var SQ_WAVE_TRAPSTS_POST_SAVECTX_MASK = 0xFFFFF800
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SHIFT = 11
var SQ_WAVE_TRAPSTS_POST_SAVECTX_SIZE = 21
var SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK = 0x800
+var SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK = 0x10000000
var SQ_WAVE_IB_STS_RCNT_SHIFT = 16 //FIXME
var SQ_WAVE_IB_STS_FIRST_REPLAY_SHIFT = 15 //FIXME
var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK = 0x1F8000
var SQ_WAVE_IB_STS_RCNT_FIRST_REPLAY_MASK_NEG = 0x00007FFF //FIXME
+var SQ_WAVE_MODE_DEBUG_EN_MASK = 0x800
+
var SQ_BUF_RSRC_WORD1_ATC_SHIFT = 24
var SQ_BUF_RSRC_WORD3_MTYPE_SHIFT = 27
@@ -197,13 +134,15 @@ var s_restore_spi_init_lo = exec_lo
var s_restore_spi_init_hi = exec_hi
var s_restore_mem_offset = ttmp12
+var s_restore_accvgpr_offset = ttmp13
var s_restore_alloc_size = ttmp3
var s_restore_tmp = ttmp2
var s_restore_mem_offset_save = s_restore_tmp //no conflict
+var s_restore_accvgpr_offset_save = ttmp7
var s_restore_m0 = s_restore_alloc_size //no conflict
-var s_restore_mode = ttmp7
+var s_restore_mode = s_restore_accvgpr_offset_save
var s_restore_pc_lo = ttmp0
var s_restore_pc_hi = ttmp1
@@ -226,20 +165,11 @@ var s_restore_ttmps_hi = s_restore_alloc_size //no conflict
/* Shader Main*/
shader main
- asic(GFX9)
+ asic(DEFAULT)
type(CS)
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL)) //hack to use trap_id for determining save/restore
- //FIXME VCCZ un-init assertion s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
- s_and_b32 s_save_tmp, s_save_pc_hi, 0xffff0000 //change SCC
- s_cmp_eq_u32 s_save_tmp, 0x007e0000 //Save: trap_id = 0x7e. Restore: trap_id = 0x7f.
- s_cbranch_scc0 L_JUMP_TO_RESTORE //do not need to recover STATUS here since we are going to RESTORE
- //FIXME s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //need to recover STATUS since we are going to SAVE
- s_branch L_SKIP_RESTORE //NOT restore, SAVE actually
- else
s_branch L_SKIP_RESTORE //NOT restore. might be a regular trap or save
- end
L_JUMP_TO_RESTORE:
s_branch L_RESTORE //restore
@@ -248,12 +178,29 @@ L_SKIP_RESTORE:
s_getreg_b32 s_save_status, hwreg(HW_REG_STATUS) //save STATUS since we will change SCC
s_andn2_b32 s_save_status, s_save_status, SQ_WAVE_STATUS_SPI_PRIO_MASK //check whether this is for save
+
+if SINGLE_STEP_MISSED_WORKAROUND
+ // No single step exceptions if MODE.DEBUG_EN=0.
+ s_getreg_b32 ttmp2, hwreg(HW_REG_MODE)
+ s_and_b32 ttmp2, ttmp2, SQ_WAVE_MODE_DEBUG_EN_MASK
+ s_cbranch_scc0 L_NO_SINGLE_STEP_WORKAROUND
+
+ // Second-level trap already handled exception if STATUS.HALT=1.
+ s_and_b32 ttmp2, s_save_status, SQ_WAVE_STATUS_HALT_MASK
+
+ // Prioritize single step exception over context save.
+ // Second-level trap will halt wave and RFE, re-entering for SAVECTX.
+ s_cbranch_scc0 L_FETCH_2ND_TRAP
+
+L_NO_SINGLE_STEP_WORKAROUND:
+end
+
s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS)
s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_SAVECTX_MASK //check whether this is for save
s_cbranch_scc1 L_SAVE //this is the operation for save
// ********* Handle non-CWSR traps *******************
-if (!EMU_RUN_HACK)
+
// Illegal instruction is a non-maskable exception which blocks context save.
// Halt the wavefront and return from the trap.
s_and_b32 ttmp2, s_save_trapsts, SQ_WAVE_TRAPSTS_ILLEGAL_INST_MASK
@@ -330,7 +277,7 @@ L_EXCP_CASE:
set_status_without_spi_prio(s_save_status, ttmp2)
s_rfe_b64 [ttmp0, ttmp1]
-end
+
// ********* End handling of non-CWSR traps *******************
/**************************************************************************/
@@ -338,12 +285,6 @@ end
/**************************************************************************/
L_SAVE:
-
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_save_s
- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
-end
-
s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
s_mov_b32 s_save_tmp, 0 //clear saveCtx bit
@@ -365,16 +306,7 @@ end
s_mov_b32 s_save_exec_hi, exec_hi
s_mov_b64 exec, 0x0 //clear EXEC to get ready to receive
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_sq_save_msg
- s_waitcnt lgkmcnt(0)
-end
-
- if (EMU_RUN_HACK)
-
- else
s_sendmsg sendmsg(MSG_SAVEWAVE) //send SPI a message and wait for SPI's write to EXEC
- end
// Set SPI_PRIO=2 to avoid starving instruction fetch in the waves we're waiting for.
s_or_b32 s_save_tmp, s_save_status, (2 << SQ_WAVE_STATUS_SPI_PRIO_SHIFT)
@@ -383,33 +315,7 @@ end
L_SLEEP:
s_sleep 0x2 // sleep 1 (64clk) is not enough for 8 waves per SIMD, which will cause SQ hang, since the 7,8th wave could not get arbit to exec inst, while other waves are stuck into the sleep-loop and waiting for wrexec!=0
- if (EMU_RUN_HACK)
-
- else
s_cbranch_execz L_SLEEP
- end
-
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_spi_wrexec
- s_waitcnt lgkmcnt(0)
-end
-
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_SINGLE_WAVE))
- //calculate wd_addr using absolute thread id
- v_readlane_b32 s_save_tmp, v9, 0
- s_lshr_b32 s_save_tmp, s_save_tmp, 6
- s_mul_i32 s_save_tmp, s_save_tmp, WAVE_SPACE
- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
- else
- end
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_SINGLE_WAVE))
- s_add_i32 s_save_spi_init_lo, s_save_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_save_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_save_spi_init_hi, s_save_spi_init_hi, CTX_SAVE_CONTROL
- else
- end
// Save trap temporaries 4-11, 13 initialized by SPI debug dispatch logic
// ttmp SR memory offset : size(VGPR)+size(SGPR)+0x40
@@ -459,20 +365,10 @@ end
s_mov_b32 s_save_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
write_hwreg_to_mem(s_save_m0, s_save_buf_rsrc0, s_save_mem_offset) //M0
-
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_SAVE_FIRST_TIME))
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- end
-
write_hwreg_to_mem(s_save_pc_lo, s_save_buf_rsrc0, s_save_mem_offset) //PC
write_hwreg_to_mem(s_save_pc_hi, s_save_buf_rsrc0, s_save_mem_offset)
write_hwreg_to_mem(s_save_exec_lo, s_save_buf_rsrc0, s_save_mem_offset) //EXEC
@@ -510,17 +406,9 @@ end
s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
- if (SGPR_SAVE_USE_SQC)
s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
- end
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
// backup s_save_buf_rsrc0,1 to s_save_pc_lo/hi, since write_16sgpr_to_mem function will change the rsrc0
@@ -563,30 +451,25 @@ end
s_mov_b32 xnack_mask_lo, 0x0
s_mov_b32 xnack_mask_hi, 0x0
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
// VGPR Allocated in 4-GPR granularity
-if G8SR_VGPR_SR_IN_DWX4
- // the const stride for DWx4 is 4*4 bytes
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
+if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_FIRST_VGPRS_WITH_TCP
+
+ write_vgprs_to_mem_with_sqc(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+ s_branch L_SAVE_LDS
- buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+L_SAVE_FIRST_VGPRS_WITH_TCP:
+end
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
-else
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
-end
@@ -621,66 +504,34 @@ end
s_add_u32 s_save_mem_offset, s_save_mem_offset, get_hwreg_size_bytes()
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
s_mov_b32 m0, 0x0 //lds_offset initial value = 0
-var LDS_DMA_ENABLE = 0
-var UNROLL = 0
-if UNROLL==0 && LDS_DMA_ENABLE==1
- s_mov_b32 s3, 256*2
- s_nop 0
- s_nop 0
- s_nop 0
- L_SAVE_LDS_LOOP:
- //TODO: looks the 2 buffer_store/load clause for s/r will hurt performance.???
- if (SAVE_LDS) //SPI always alloc LDS space in 128DW granularity
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 // first 64DW
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
- end
-
- s_add_u32 m0, m0, s3 //every buffer_store_lds does 256 bytes
- s_add_u32 s_save_mem_offset, s_save_mem_offset, s3 //mem offset increased by 256 bytes
- s_cmp_lt_u32 m0, s_save_alloc_size //scc=(m0 < s_save_alloc_size) ? 1 : 0
- s_cbranch_scc1 L_SAVE_LDS_LOOP //LDS save is complete?
-
-elsif LDS_DMA_ENABLE==1 && UNROLL==1 // UNROOL , has ichace miss
- // store from higest LDS address to lowest
- s_mov_b32 s3, 256*2
- s_sub_u32 m0, s_save_alloc_size, s3
- s_add_u32 s_save_mem_offset, s_save_mem_offset, m0
- s_lshr_b32 s_save_alloc_size, s_save_alloc_size, 9 // how many 128 trunks...
- s_sub_u32 s_save_alloc_size, 128, s_save_alloc_size // store from higheset addr to lowest
- s_mul_i32 s_save_alloc_size, s_save_alloc_size, 6*4 // PC offset increment, each LDS save block cost 6*4 Bytes instruction
- s_add_u32 s_save_alloc_size, s_save_alloc_size, 3*4 //2is the below 2 inst...//s_addc and s_setpc
- s_nop 0
- s_nop 0
- s_nop 0 //pad 3 dw to let LDS_DMA align with 64Bytes
- s_getpc_b64 s[0:1] // reuse s[0:1], since s[0:1] already saved
- s_add_u32 s0, s0,s_save_alloc_size
- s_addc_u32 s1, s1, 0
- s_setpc_b64 s[0:1]
-
-
- for var i =0; i< 128; i++
- // be careful to make here a 64Byte aligned address, which could improve performance...
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:0 // first 64DW
- buffer_store_lds_dword s_save_buf_rsrc0, s_save_mem_offset lds:1 offset:256 // second 64DW
-
- if i!=127
- s_sub_u32 m0, m0, s3 // use a sgpr to shrink 2DW-inst to 1DW inst to improve performance , i.e. pack more LDS_DMA inst to one Cacheline
- s_sub_u32 s_save_mem_offset, s_save_mem_offset, s3
- end
- end
-
-else // BUFFER_STORE
v_mbcnt_lo_u32_b32 v2, 0xffffffff, 0x0
v_mbcnt_hi_u32_b32 v3, 0xffffffff, v2 // tid
+
+if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_LDS_WITH_TCP
+
+ v_lshlrev_b32 v2, 2, v3
+L_SAVE_LDS_LOOP_SQC:
+ ds_read2_b32 v[0:1], v2 offset0:0 offset1:0x40
+ s_waitcnt lgkmcnt(0)
+
+ write_vgprs_to_mem_with_sqc(v0, 2, s_save_buf_rsrc0, s_save_mem_offset)
+
+ v_add_u32 v2, 0x200, v2
+ v_cmp_lt_u32 vcc[0:1], v2, s_save_alloc_size
+ s_cbranch_vccnz L_SAVE_LDS_LOOP_SQC
+
+ s_branch L_SAVE_LDS_DONE
+
+L_SAVE_LDS_WITH_TCP:
+end
+
v_mul_i32_i24 v2, v3, 8 // tid*8
v_mov_b32 v3, 256*2
s_mov_b32 m0, 0x10000
@@ -701,8 +552,6 @@ L_SAVE_LDS_LOOP_VECTOR:
// restore rsrc3
s_mov_b32 s_save_buf_rsrc3, s0
-end
-
L_SAVE_LDS_DONE:
@@ -720,44 +569,9 @@ L_SAVE_LDS_DONE:
s_add_u32 s_save_alloc_size, s_save_alloc_size, 1
s_lshl_b32 s_save_alloc_size, s_save_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value) //FIXME for GFX, zero is possible
s_lshl_b32 s_save_buf_rsrc2, s_save_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_save_buf_rsrc2, s_save_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-
-
- // VGPR Allocated in 4-GPR granularity
-
-if G8SR_VGPR_SR_IN_DWX4
- // the const stride for DWx4 is 4*4 bytes
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, G8SR_SAVE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
-
- s_mov_b32 m0, 4 // skip first 4 VGPRs
- s_cmp_lt_u32 m0, s_save_alloc_size
- s_cbranch_scc0 L_SAVE_VGPR_LOOP_END // no more vgprs
-
- s_set_gpr_idx_on m0, 0x1 // This will change M0
- s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 // because above inst change m0
-L_SAVE_VGPR_LOOP:
- v_mov_b32 v0, v0 // v0 = v[0+m0]
- v_mov_b32 v1, v1
- v_mov_b32 v2, v2
- v_mov_b32 v3, v3
-
- buffer_store_dwordx4 v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
- s_add_u32 m0, m0, 4
- s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4
- s_cmp_lt_u32 m0, s_save_alloc_size
- s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
- s_set_gpr_idx_off
-L_SAVE_VGPR_LOOP_END:
- s_and_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_save_buf_rsrc1, s_save_buf_rsrc1, S_SAVE_BUF_RSRC_WORD1_STRIDE // reset const stride to 4 bytes
-else
// VGPR store using dw burst
s_mov_b32 m0, 0x4 //VGPR initial index value =0
s_cmp_lt_u32 m0, s_save_alloc_size
@@ -767,57 +581,82 @@ else
s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
s_add_u32 s_save_alloc_size, s_save_alloc_size, 0x1000 //add 0x1000 since we compare m0 against it later
+if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_VGPR_LOOP
+
+L_SAVE_VGPR_LOOP_SQC:
+ write_vgprs_to_mem_with_sqc(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+
+ s_add_u32 m0, m0, 4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc1 L_SAVE_VGPR_LOOP_SQC
+
+ s_set_gpr_idx_off
+ s_branch L_SAVE_VGPR_END
+end
+
L_SAVE_VGPR_LOOP:
v_mov_b32 v0, v0 //v0 = v[0+m0]
v_mov_b32 v1, v1 //v0 = v[0+m0]
v_mov_b32 v2, v2 //v0 = v[0+m0]
v_mov_b32 v3, v3 //v0 = v[0+m0]
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_store_format_x v0, v0, s_save_buf_rsrc0, s_save_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
- end
s_add_u32 m0, m0, 4 //next vgpr index
s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4 //every buffer_store_dword does 256 bytes
s_cmp_lt_u32 m0, s_save_alloc_size //scc = (m0 < s_save_alloc_size) ? 1 : 0
s_cbranch_scc1 L_SAVE_VGPR_LOOP //VGPR save is complete?
s_set_gpr_idx_off
-end
L_SAVE_VGPR_END:
+if ASIC_TARGET_ARCTURUS
+ // Save ACC VGPRs
+ s_mov_b32 m0, 0x0 //VGPR initial index value =0
+ s_set_gpr_idx_on m0, 0x1 //M0[7:0] = M0[7:0] and M0[15:12] = 0x1
+
+if SAVE_AFTER_XNACK_ERROR
+ check_if_tcp_store_ok()
+ s_cbranch_scc1 L_SAVE_ACCVGPR_LOOP
+L_SAVE_ACCVGPR_LOOP_SQC:
+ for var vgpr = 0; vgpr < 4; ++ vgpr
+ v_accvgpr_read v[vgpr], acc[vgpr] // v[N] = acc[N+m0]
+ end
+ write_vgprs_to_mem_with_sqc(v0, 4, s_save_buf_rsrc0, s_save_mem_offset)
+ s_add_u32 m0, m0, 4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc1 L_SAVE_ACCVGPR_LOOP_SQC
+ s_set_gpr_idx_off
+ s_branch L_SAVE_ACCVGPR_END
+end
- /* S_PGM_END_SAVED */ //FIXME graphics ONLY
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_SAVE_NORMAL_EXIT))
- s_and_b32 s_save_pc_hi, s_save_pc_hi, 0x0000ffff //pc[47:32]
- s_add_u32 s_save_pc_lo, s_save_pc_lo, 4 //pc[31:0]+4
- s_addc_u32 s_save_pc_hi, s_save_pc_hi, 0x0 //carry bit over
- s_rfe_b64 s_save_pc_lo //Return to the main shader program
- else
+L_SAVE_ACCVGPR_LOOP:
+ for var vgpr = 0; vgpr < 4; ++ vgpr
+ v_accvgpr_read v[vgpr], acc[vgpr] // v[N] = acc[N+m0]
end
-// Save Done timestamp
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_save_d
- // SGPR SR memory offset : size(VGPR)
- get_vgpr_size_bytes(s_save_mem_offset)
- s_add_u32 s_save_mem_offset, s_save_mem_offset, G8SR_DEBUG_TS_SAVE_D_OFFSET
- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
- // Need reset rsrc2??
- s_mov_b32 m0, s_save_mem_offset
- s_mov_b32 s_save_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- s_buffer_store_dwordx2 s_g8sr_ts_save_d, s_save_buf_rsrc0, m0 glc:1
-end
+ buffer_store_dword v0, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1
+ buffer_store_dword v1, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256
+ buffer_store_dword v2, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*2
+ buffer_store_dword v3, v0, s_save_buf_rsrc0, s_save_mem_offset slc:1 glc:1 offset:256*3
+ s_add_u32 m0, m0, 4
+ s_add_u32 s_save_mem_offset, s_save_mem_offset, 256*4
+ s_cmp_lt_u32 m0, s_save_alloc_size
+ s_cbranch_scc1 L_SAVE_ACCVGPR_LOOP
+ s_set_gpr_idx_off
+
+L_SAVE_ACCVGPR_END:
+end
s_branch L_END_PGM
@@ -829,27 +668,6 @@ end
L_RESTORE:
/* Setup Resource Contants */
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
- //calculate wd_addr using absolute thread id
- v_readlane_b32 s_restore_tmp, v9, 0
- s_lshr_b32 s_restore_tmp, s_restore_tmp, 6
- s_mul_i32 s_restore_tmp, s_restore_tmp, WAVE_SPACE
- s_add_i32 s_restore_spi_init_lo, s_restore_tmp, WG_BASE_ADDR_LO
- s_mov_b32 s_restore_spi_init_hi, WG_BASE_ADDR_HI
- s_and_b32 s_restore_spi_init_hi, s_restore_spi_init_hi, CTX_RESTORE_CONTROL
- else
- end
-
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_restore_s
- s_waitcnt lgkmcnt(0) //FIXME, will cause xnack??
- // tma_lo/hi are sgpr 110, 111, which will not used for 112 SGPR allocated case...
- s_mov_b32 s_restore_pc_lo, s_g8sr_ts_restore_s[0]
- s_mov_b32 s_restore_pc_hi, s_g8sr_ts_restore_s[1] //backup ts to ttmp0/1, sicne exec will be finally restored..
-end
-
-
-
s_mov_b32 s_restore_buf_rsrc0, s_restore_spi_init_lo //base_addr_lo
s_and_b32 s_restore_buf_rsrc1, s_restore_spi_init_hi, 0x0000FFFF //base_addr_hi
s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE
@@ -891,18 +709,12 @@ end
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, get_hwreg_size_bytes() //FIXME, Check if offset overflow???
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
s_mov_b32 m0, 0x0 //lds_offset initial value = 0
L_RESTORE_LDS_LOOP:
- if (SAVE_LDS)
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 // first 64DW
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset lds:1 offset:256 // second 64DW
- end
s_add_u32 m0, m0, 256*2 // 128 DW
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*2 //mem offset increased by 128DW
s_cmp_lt_u32 m0, s_restore_alloc_size //scc=(m0 < s_restore_alloc_size) ? 1 : 0
@@ -921,56 +733,43 @@ end
s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 2 //Number of VGPRs = (vgpr_size + 1) * 4 (non-zero value)
s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads*4)
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
+
+if ASIC_TARGET_ARCTURUS
+ s_mov_b32 s_restore_accvgpr_offset, s_restore_buf_rsrc2 //ACC VGPRs at end of VGPRs
+end
+
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
-if G8SR_VGPR_SR_IN_DWX4
- get_vgpr_size_bytes(s_restore_mem_offset)
- s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
-
- // the const stride for DWx4 is 4*4 bytes
- s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, G8SR_RESTORE_BUF_RSRC_WORD1_STRIDE_DWx4 // const stride to 4*4 bytes
-
- s_mov_b32 m0, s_restore_alloc_size
- s_set_gpr_idx_on m0, 0x8 // Note.. This will change m0
-
-L_RESTORE_VGPR_LOOP:
- buffer_load_dwordx4 v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
- s_waitcnt vmcnt(0)
- s_sub_u32 m0, m0, 4
- v_mov_b32 v0, v0 // v[0+m0] = v0
- v_mov_b32 v1, v1
- v_mov_b32 v2, v2
- v_mov_b32 v3, v3
- s_sub_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
- s_cmp_eq_u32 m0, 0x8000
- s_cbranch_scc0 L_RESTORE_VGPR_LOOP
- s_set_gpr_idx_off
-
- s_and_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, 0x0000FFFF // reset const stride to 0
- s_or_b32 s_restore_buf_rsrc1, s_restore_buf_rsrc1, S_RESTORE_BUF_RSRC_WORD1_STRIDE // const stride to 4*4 bytes
-
-else
// VGPR load using dw burst
s_mov_b32 s_restore_mem_offset_save, s_restore_mem_offset // restore start with v1, v0 will be the last
s_add_u32 s_restore_mem_offset, s_restore_mem_offset, 256*4
+if ASIC_TARGET_ARCTURUS
+ s_mov_b32 s_restore_accvgpr_offset_save, s_restore_accvgpr_offset
+ s_add_u32 s_restore_accvgpr_offset, s_restore_accvgpr_offset, 256*4
+end
s_mov_b32 m0, 4 //VGPR initial index value = 1
s_set_gpr_idx_on m0, 0x8 //M0[7:0] = M0[7:0] and M0[15:12] = 0x8
s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 0x8000 //add 0x8000 since we compare m0 against it later
L_RESTORE_VGPR_LOOP:
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
+
+if ASIC_TARGET_ARCTURUS
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset slc:1 glc:1 offset:256*3
+ s_add_u32 s_restore_accvgpr_offset, s_restore_accvgpr_offset, 256*4
+ s_waitcnt vmcnt(0)
+
+ for var vgpr = 0; vgpr < 4; ++ vgpr
+ v_accvgpr_write acc[vgpr], v[vgpr]
+ end
+end
+
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset slc:1 glc:1 offset:256*3
- end
s_waitcnt vmcnt(0) //ensure data ready
v_mov_b32 v0, v0 //v[0+m0] = v0
v_mov_b32 v1, v1
@@ -982,16 +781,22 @@ else
s_cbranch_scc1 L_RESTORE_VGPR_LOOP //VGPR restore (except v0) is complete?
s_set_gpr_idx_off
/* VGPR restore on v0 */
- if(USE_MTBUF_INSTEAD_OF_MUBUF)
- tbuffer_load_format_x v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save format:BUF_NUM_FORMAT_FLOAT format: BUF_DATA_FORMAT_32 slc:1 glc:1
- else
+if ASIC_TARGET_ARCTURUS
+ buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1
+ buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 offset:256
+ buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 offset:256*2
+ buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_accvgpr_offset_save slc:1 glc:1 offset:256*3
+ s_waitcnt vmcnt(0)
+
+ for var vgpr = 0; vgpr < 4; ++ vgpr
+ v_accvgpr_write acc[vgpr], v[vgpr]
+ end
+end
+
buffer_load_dword v0, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1
buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256
buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2
buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3
- end
-
-end
/* restore SGPRs */
//////////////////////////////
@@ -1007,16 +812,8 @@ end
s_add_u32 s_restore_alloc_size, s_restore_alloc_size, 1
s_lshl_b32 s_restore_alloc_size, s_restore_alloc_size, 4 //Number of SGPRs = (sgpr_size + 1) * 16 (non-zero value)
- if (SGPR_SAVE_USE_SQC)
s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 2 //NUM_RECORDS in bytes
- else
- s_lshl_b32 s_restore_buf_rsrc2, s_restore_alloc_size, 8 //NUM_RECORDS in bytes (64 threads)
- end
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
s_mov_b32 m0, s_restore_alloc_size
@@ -1044,11 +841,6 @@ end
L_RESTORE_HWREG:
-if G8SR_DEBUG_TIMESTAMP
- s_mov_b32 s_g8sr_ts_restore_s[0], s_restore_pc_lo
- s_mov_b32 s_g8sr_ts_restore_s[1], s_restore_pc_hi
-end
-
// HWREG SR memory offset : size(VGPR)+size(SGPR)
get_vgpr_size_bytes(s_restore_mem_offset)
get_sgpr_size_bytes(s_restore_tmp)
@@ -1056,11 +848,7 @@ end
s_mov_b32 s_restore_buf_rsrc2, 0x4 //NUM_RECORDS in bytes
- if (SWIZZLE_EN)
- s_add_u32 s_restore_buf_rsrc2, s_restore_buf_rsrc2, 0x0 //FIXME need to use swizzle to enable bounds checking?
- else
s_mov_b32 s_restore_buf_rsrc2, 0x1000000 //NUM_RECORDS in bytes
- end
read_hwreg_from_mem(s_restore_m0, s_restore_buf_rsrc0, s_restore_mem_offset) //M0
read_hwreg_from_mem(s_restore_pc_lo, s_restore_buf_rsrc0, s_restore_mem_offset) //PC
@@ -1075,16 +863,6 @@ end
s_waitcnt lgkmcnt(0) //from now on, it is safe to restore STATUS and IB_STS
- //for normal save & restore, the saved PC points to the next inst to execute, no adjustment needs to be made, otherwise:
- if ((EMU_RUN_HACK) && (!EMU_RUN_HACK_RESTORE_NORMAL))
- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 8 //pc[31:0]+8 //two back-to-back s_trap are used (first for save and second for restore)
- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
- end
- if ((EMU_RUN_HACK) && (EMU_RUN_HACK_RESTORE_NORMAL))
- s_add_u32 s_restore_pc_lo, s_restore_pc_lo, 4 //pc[31:0]+4 // save is hack through s_trap but restore is normal
- s_addc_u32 s_restore_pc_hi, s_restore_pc_hi, 0x0 //carry bit over
- end
-
s_mov_b32 m0, s_restore_m0
s_mov_b32 exec_lo, s_restore_exec_lo
s_mov_b32 exec_hi, s_restore_exec_hi
@@ -1131,11 +909,6 @@ end
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
-if G8SR_DEBUG_TIMESTAMP
- s_memrealtime s_g8sr_ts_restore_d
- s_waitcnt lgkmcnt(0)
-end
-
// s_rfe_b64 s_restore_pc_lo //Return to the main shader program and resume execution
s_rfe_restore_b64 s_restore_pc_lo, s_restore_m0 // s_restore_m0[0] is used to set STATUS.inst_atc
@@ -1190,7 +963,39 @@ function read_16sgpr_from_mem(s, s_rsrc, s_mem_offset)
s_sub_u32 s_mem_offset, s_mem_offset, 4*16
end
+function check_if_tcp_store_ok
+ // If STATUS.ALLOW_REPLAY=0 and TRAPSTS.XNACK_ERROR=1 then TCP stores will fail.
+ s_and_b32 s_save_tmp, s_save_status, SQ_WAVE_STATUS_ALLOW_REPLAY_MASK
+ s_cbranch_scc1 L_TCP_STORE_CHECK_DONE
+
+ s_getreg_b32 s_save_tmp, hwreg(HW_REG_TRAPSTS)
+ s_andn2_b32 s_save_tmp, SQ_WAVE_TRAPSTS_XNACK_ERROR_MASK, s_save_tmp
+
+L_TCP_STORE_CHECK_DONE:
+end
+
+function write_vgpr_to_mem_with_sqc(v, s_rsrc, s_mem_offset)
+ s_mov_b32 s4, 0
+L_WRITE_VGPR_LANE_LOOP:
+ for var lane = 0; lane < 4; ++ lane
+ v_readlane_b32 s[lane], v, s4
+ s_add_u32 s4, s4, 1
+ end
+
+ s_buffer_store_dwordx4 s[0:3], s_rsrc, s_mem_offset glc:1
+ ack_sqc_store_workaround()
+
+ s_add_u32 s_mem_offset, s_mem_offset, 0x10
+ s_cmp_eq_u32 s4, 0x40
+ s_cbranch_scc0 L_WRITE_VGPR_LANE_LOOP
+end
+
+function write_vgprs_to_mem_with_sqc(v, n_vgprs, s_rsrc, s_mem_offset)
+ for var vgpr = 0; vgpr < n_vgprs; ++ vgpr
+ write_vgpr_to_mem_with_sqc(v[vgpr], s_rsrc, s_mem_offset)
+ end
+end
function get_lds_size_bytes(s_lds_size_byte)
// SQ LDS granularity is 64DW, while PGM_RSRC2.lds_size is in granularity 128DW
@@ -1202,6 +1007,10 @@ function get_vgpr_size_bytes(s_vgpr_size_byte)
s_getreg_b32 s_vgpr_size_byte, hwreg(HW_REG_GPR_ALLOC,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SHIFT,SQ_WAVE_GPR_ALLOC_VGPR_SIZE_SIZE) //vpgr_size
s_add_u32 s_vgpr_size_byte, s_vgpr_size_byte, 1
s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, (2+8) //Number of VGPRs = (vgpr_size + 1) * 4 * 64 * 4 (non-zero value) //FIXME for GFX, zero is possible
+
+if ASIC_TARGET_ARCTURUS
+ s_lshl_b32 s_vgpr_size_byte, s_vgpr_size_byte, 1 // Double size for ACC VGPRs
+end
end
function get_sgpr_size_bytes(s_sgpr_size_byte)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 1d3cd5c50d5f..3f0300e53727 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -42,6 +42,7 @@
static long kfd_ioctl(struct file *, unsigned int, unsigned long);
static int kfd_open(struct inode *, struct file *);
+static int kfd_release(struct inode *, struct file *);
static int kfd_mmap(struct file *, struct vm_area_struct *);
static const char kfd_dev_name[] = "kfd";
@@ -49,8 +50,9 @@ static const char kfd_dev_name[] = "kfd";
static const struct file_operations kfd_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = kfd_ioctl,
- .compat_ioctl = kfd_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
.open = kfd_open,
+ .release = kfd_release,
.mmap = kfd_mmap,
};
@@ -124,8 +126,13 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
- if (kfd_is_locked())
+ if (kfd_is_locked()) {
+ kfd_unref_process(process);
return -EAGAIN;
+ }
+
+ /* filep now owns the reference returned by kfd_create_process */
+ filep->private_data = process;
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
@@ -133,6 +140,16 @@ static int kfd_open(struct inode *inode, struct file *filep)
return 0;
}
+static int kfd_release(struct inode *inode, struct file *filep)
+{
+ struct kfd_process *process = filep->private_data;
+
+ if (process)
+ kfd_unref_process(process);
+
+ return 0;
+}
+
static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
void *data)
{
@@ -258,6 +275,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
unsigned int queue_id;
struct kfd_process_device *pdd;
struct queue_properties q_properties;
+ uint32_t doorbell_offset_in_process = 0;
memset(&q_properties, 0, sizeof(struct queue_properties));
@@ -282,11 +300,12 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
goto err_bind_process;
}
- pr_debug("Creating queue for PASID %d on gpu 0x%x\n",
+ pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n",
p->pasid,
dev->id);
- err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id);
+ err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, &queue_id,
+ &doorbell_offset_in_process);
if (err != 0)
goto err_create_queue;
@@ -296,14 +315,11 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
/* Return gpu_id as doorbell offset for mmap usage */
args->doorbell_offset = KFD_MMAP_TYPE_DOORBELL;
args->doorbell_offset |= KFD_MMAP_GPU_ID(args->gpu_id);
- args->doorbell_offset <<= PAGE_SHIFT;
if (KFD_IS_SOC15(dev->device_info->asic_family))
- /* On SOC15 ASICs, doorbell allocation must be
- * per-device, and independent from the per-process
- * queue_id. Return the doorbell offset within the
- * doorbell aperture to user mode.
+ /* On SOC15 ASICs, include the doorbell offset within the
+ * process doorbell frame, which is 2 pages.
*/
- args->doorbell_offset |= q_properties.doorbell_off;
+ args->doorbell_offset |= doorbell_offset_in_process;
mutex_unlock(&p->mutex);
@@ -332,7 +348,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
int retval;
struct kfd_ioctl_destroy_queue_args *args = data;
- pr_debug("Destroying queue id %d for pasid %d\n",
+ pr_debug("Destroying queue id %d for pasid 0x%x\n",
args->queue_id,
p->pasid);
@@ -378,7 +394,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
properties.queue_percent = args->queue_percentage;
properties.priority = args->queue_priority;
- pr_debug("Updating queue id %d for pasid %d\n",
+ pr_debug("Updating queue id %d for pasid 0x%x\n",
args->queue_id, p->pasid);
mutex_lock(&p->mutex);
@@ -855,7 +871,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
struct kfd_process_device_apertures *pAperture;
struct kfd_process_device *pdd;
- dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+ dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
args->num_of_nodes = 0;
@@ -913,7 +929,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp,
uint32_t nodes = 0;
int ret;
- dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
+ dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid);
if (args->num_of_nodes == 0) {
/* Return number of nodes, so that user space can alloacate
@@ -1128,7 +1144,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep,
mutex_unlock(&p->mutex);
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS &&
- pdd->qpd.vmid != 0)
+ pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
dev->kfd2kgd->set_scratch_backing_va(
dev->kgd, args->va_addr, pdd->qpd.vmid);
@@ -1312,10 +1328,9 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
/* MMIO is mapped through kfd device
* Generate a kfd mmap offset
*/
- if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP) {
- args->mmap_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(args->gpu_id);
- args->mmap_offset <<= PAGE_SHIFT;
- }
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)
+ args->mmap_offset = KFD_MMAP_TYPE_MMIO
+ | KFD_MMAP_GPU_ID(args->gpu_id);
return 0;
@@ -1801,11 +1816,16 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
} else
goto err_i1;
- dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
+ dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg);
- process = kfd_get_process(current);
- if (IS_ERR(process)) {
- dev_dbg(kfd_device, "no process\n");
+ /* Get the process struct from the filep. Only the process
+ * that opened /dev/kfd can use the file descriptor. Child
+ * processes need to create their own KFD device context.
+ */
+ process = filep->private_data;
+ if (process->lead_thread != current->group_leader) {
+ dev_dbg(kfd_device, "Using KFD FD in wrong process\n");
+ retcode = -EBADF;
goto err_i1;
}
@@ -1856,7 +1876,8 @@ err_i1:
kfree(kdata);
if (retcode)
- dev_dbg(kfd_device, "ret = %d\n", retcode);
+ dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n",
+ nr, arg, retcode);
return retcode;
}
@@ -1877,7 +1898,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process,
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- pr_debug("Process %d mapping mmio page\n"
+ pr_debug("pasid 0x%x mapping mmio page\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
@@ -1898,20 +1919,19 @@ static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct kfd_process *process;
struct kfd_dev *dev = NULL;
- unsigned long vm_pgoff;
+ unsigned long mmap_offset;
unsigned int gpu_id;
process = kfd_get_process(current);
if (IS_ERR(process))
return PTR_ERR(process);
- vm_pgoff = vma->vm_pgoff;
- vma->vm_pgoff = KFD_MMAP_OFFSET_VALUE_GET(vm_pgoff);
- gpu_id = KFD_MMAP_GPU_ID_GET(vm_pgoff);
+ mmap_offset = vma->vm_pgoff << PAGE_SHIFT;
+ gpu_id = KFD_MMAP_GET_GPU_ID(mmap_offset);
if (gpu_id)
dev = kfd_device_by_id(gpu_id);
- switch (vm_pgoff & KFD_MMAP_TYPE_MASK) {
+ switch (mmap_offset & KFD_MMAP_TYPE_MASK) {
case KFD_MMAP_TYPE_DOORBELL:
if (!dev)
return -ENODEV;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 4e3fc284f6ac..de9f68d5c312 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -138,6 +138,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = {
/* TODO - check & update Vega10 cache details */
#define vega10_cache_info carrizo_cache_info
#define raven_cache_info carrizo_cache_info
+#define renoir_cache_info carrizo_cache_info
/* TODO - check & update Navi10 cache details */
#define navi10_cache_info carrizo_cache_info
@@ -662,6 +663,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
pcache_info = vega10_cache_info;
num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
break;
@@ -669,7 +671,13 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info = raven_cache_info;
num_of_cache_types = ARRAY_SIZE(raven_cache_info);
break;
+ case CHIP_RENOIR:
+ pcache_info = renoir_cache_info;
+ num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
+ break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
pcache_info = navi10_cache_info;
num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
break;
@@ -702,7 +710,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
pcache_info,
cu_info,
mem_available,
- cu_info->cu_bitmap[i][j],
+ cu_info->cu_bitmap[i % 4][j + i / 4],
ct,
cu_processor_id,
k);
@@ -788,7 +796,7 @@ int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
* is put in the code to ensure we don't overwrite.
*/
#define VCRAT_SIZE_FOR_CPU (2 * PAGE_SIZE)
-#define VCRAT_SIZE_FOR_GPU (3 * PAGE_SIZE)
+#define VCRAT_SIZE_FOR_GPU (4 * PAGE_SIZE)
/* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index a3441b0e385b..27bcc5b472f6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -72,11 +72,11 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
* The receive packet buff will be sitting on the Indirect Buffer
* and in the PQ we put the IB packet + sync packet(s).
*/
- status = kq->ops.acquire_packet_buffer(kq,
+ status = kq_acquire_packet_buffer(kq,
pq_packets_size_in_bytes / sizeof(uint32_t),
&ib_packet_buff);
if (status) {
- pr_err("acquire_packet_buffer failed\n");
+ pr_err("kq_acquire_packet_buffer failed\n");
return status;
}
@@ -115,7 +115,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
if (status) {
pr_err("Failed to allocate GART memory\n");
- kq->ops.rollback_packet(kq);
+ kq_rollback_packet(kq);
return status;
}
@@ -151,7 +151,7 @@ static int dbgdev_diq_submit_ib(struct kfd_dbgdev *dbgdev,
rm_packet->data_lo = QUEUESTATE__ACTIVE;
- kq->ops.submit_packet(kq);
+ kq_submit_packet(kq);
/* Wait till CP writes sync code: */
status = amdkfd_fence_wait_timeout(
@@ -185,7 +185,7 @@ static int dbgdev_register_diq(struct kfd_dbgdev *dbgdev)
properties.type = KFD_QUEUE_TYPE_DIQ;
status = pqm_create_queue(dbgdev->pqm, dbgdev->dev, NULL,
- &properties, &qid);
+ &properties, &qid, NULL);
if (status) {
pr_err("Failed to create DIQ\n");
@@ -761,6 +761,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
{
int status = 0;
unsigned int vmid;
+ uint16_t queried_pasid;
union SQ_CMD_BITS reg_sq_cmd;
union GRBM_GFX_INDEX_BITS reg_gfx_index;
struct kfd_process_device *pdd;
@@ -782,19 +783,18 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
*/
for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid
- (dev->kgd, vmid)) {
- if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid
- (dev->kgd, vmid) == p->pasid) {
- pr_debug("Killing wave fronts of vmid %d and pasid %d\n",
- vmid, p->pasid);
- break;
- }
+ status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
+ (dev->kgd, vmid, &queried_pasid);
+
+ if (status && queried_pasid == p->pasid) {
+ pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
+ vmid, p->pasid);
+ break;
}
}
if (vmid > last_vmid_to_scan) {
- pr_err("Didn't find vmid for pasid %d\n", p->pasid);
+ pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
index 9d4af961c5d1..9bfa50633654 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c
@@ -96,7 +96,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev)
long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
if (pmgr->pasid != 0) {
- pr_debug("H/W debugger is already active using pasid %d\n",
+ pr_debug("H/W debugger is already active using pasid 0x%x\n",
pmgr->pasid);
return -EBUSY;
}
@@ -117,7 +117,7 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p)
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != p->pasid) {
- pr_debug("H/W debugger is not registered by calling pasid %d\n",
+ pr_debug("H/W debugger is not registered by calling pasid 0x%x\n",
p->pasid);
return -EINVAL;
}
@@ -134,7 +134,7 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr,
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != wac_info->process->pasid) {
- pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
wac_info->process->pasid);
return -EINVAL;
}
@@ -147,7 +147,7 @@ long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr,
{
/* Is the requests coming from the already registered process? */
if (pmgr->pasid != adw_info->process->pasid) {
- pr_debug("H/W debugger support was not registered for requester pasid %d\n",
+ pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n",
adw_info->process->pasid);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 15c523027285..511712c2e382 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -93,7 +93,7 @@ void kfd_debugfs_init(void)
kfd_debugfs_hqds_by_device, &kfd_debugfs_fops);
debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
kfd_debugfs_rls_by_device, &kfd_debugfs_fops);
- debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
+ debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root,
NULL, &kfd_debugfs_hang_hws_fops);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 3322a443dfb2..2a9e40131735 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -39,9 +39,45 @@
*/
static atomic_t kfd_locked = ATOMIC_INIT(0);
+#ifdef CONFIG_DRM_AMDGPU_CIK
+extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
+#endif
+extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
+extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
+extern const struct kfd2kgd_calls arcturus_kfd2kgd;
+extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
+
+static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
+#ifdef KFD_SUPPORT_IOMMU_V2
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ [CHIP_KAVERI] = &gfx_v7_kfd2kgd,
+#endif
+ [CHIP_CARRIZO] = &gfx_v8_kfd2kgd,
+ [CHIP_RAVEN] = &gfx_v9_kfd2kgd,
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ [CHIP_HAWAII] = &gfx_v7_kfd2kgd,
+#endif
+ [CHIP_TONGA] = &gfx_v8_kfd2kgd,
+ [CHIP_FIJI] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS10] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS11] = &gfx_v8_kfd2kgd,
+ [CHIP_POLARIS12] = &gfx_v8_kfd2kgd,
+ [CHIP_VEGAM] = &gfx_v8_kfd2kgd,
+ [CHIP_VEGA10] = &gfx_v9_kfd2kgd,
+ [CHIP_VEGA12] = &gfx_v9_kfd2kgd,
+ [CHIP_VEGA20] = &gfx_v9_kfd2kgd,
+ [CHIP_RENOIR] = &gfx_v9_kfd2kgd,
+ [CHIP_ARCTURUS] = &arcturus_kfd2kgd,
+ [CHIP_NAVI10] = &gfx_v10_kfd2kgd,
+ [CHIP_NAVI12] = &gfx_v10_kfd2kgd,
+ [CHIP_NAVI14] = &gfx_v10_kfd2kgd,
+};
+
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
.asic_family = CHIP_KAVERI,
+ .asic_name = "kaveri",
.max_pasid_bits = 16,
/* max num of queues for KV.TODO should be a dynamic value */
.max_no_of_hqd = 24,
@@ -60,6 +96,7 @@ static const struct kfd_device_info kaveri_device_info = {
static const struct kfd_device_info carrizo_device_info = {
.asic_family = CHIP_CARRIZO,
+ .asic_name = "carrizo",
.max_pasid_bits = 16,
/* max num of queues for CZ.TODO should be a dynamic value */
.max_no_of_hqd = 24,
@@ -78,6 +115,7 @@ static const struct kfd_device_info carrizo_device_info = {
static const struct kfd_device_info raven_device_info = {
.asic_family = CHIP_RAVEN,
+ .asic_name = "raven",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
@@ -96,6 +134,7 @@ static const struct kfd_device_info raven_device_info = {
static const struct kfd_device_info hawaii_device_info = {
.asic_family = CHIP_HAWAII,
+ .asic_name = "hawaii",
.max_pasid_bits = 16,
/* max num of queues for KV.TODO should be a dynamic value */
.max_no_of_hqd = 24,
@@ -114,6 +153,7 @@ static const struct kfd_device_info hawaii_device_info = {
static const struct kfd_device_info tonga_device_info = {
.asic_family = CHIP_TONGA,
+ .asic_name = "tonga",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -131,6 +171,7 @@ static const struct kfd_device_info tonga_device_info = {
static const struct kfd_device_info fiji_device_info = {
.asic_family = CHIP_FIJI,
+ .asic_name = "fiji",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -148,6 +189,7 @@ static const struct kfd_device_info fiji_device_info = {
static const struct kfd_device_info fiji_vf_device_info = {
.asic_family = CHIP_FIJI,
+ .asic_name = "fiji",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -166,6 +208,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
static const struct kfd_device_info polaris10_device_info = {
.asic_family = CHIP_POLARIS10,
+ .asic_name = "polaris10",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -183,6 +226,7 @@ static const struct kfd_device_info polaris10_device_info = {
static const struct kfd_device_info polaris10_vf_device_info = {
.asic_family = CHIP_POLARIS10,
+ .asic_name = "polaris10",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -200,6 +244,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
static const struct kfd_device_info polaris11_device_info = {
.asic_family = CHIP_POLARIS11,
+ .asic_name = "polaris11",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -217,6 +262,7 @@ static const struct kfd_device_info polaris11_device_info = {
static const struct kfd_device_info polaris12_device_info = {
.asic_family = CHIP_POLARIS12,
+ .asic_name = "polaris12",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -234,6 +280,7 @@ static const struct kfd_device_info polaris12_device_info = {
static const struct kfd_device_info vegam_device_info = {
.asic_family = CHIP_VEGAM,
+ .asic_name = "vegam",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
@@ -251,6 +298,7 @@ static const struct kfd_device_info vegam_device_info = {
static const struct kfd_device_info vega10_device_info = {
.asic_family = CHIP_VEGA10,
+ .asic_name = "vega10",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
@@ -268,6 +316,7 @@ static const struct kfd_device_info vega10_device_info = {
static const struct kfd_device_info vega10_vf_device_info = {
.asic_family = CHIP_VEGA10,
+ .asic_name = "vega10",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
@@ -285,6 +334,7 @@ static const struct kfd_device_info vega10_vf_device_info = {
static const struct kfd_device_info vega12_device_info = {
.asic_family = CHIP_VEGA12,
+ .asic_name = "vega12",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
@@ -302,6 +352,7 @@ static const struct kfd_device_info vega12_device_info = {
static const struct kfd_device_info vega20_device_info = {
.asic_family = CHIP_VEGA20,
+ .asic_name = "vega20",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
@@ -317,8 +368,45 @@ static const struct kfd_device_info vega20_device_info = {
.num_sdma_queues_per_engine = 8,
};
+static const struct kfd_device_info arcturus_device_info = {
+ .asic_family = CHIP_ARCTURUS,
+ .asic_name = "arcturus",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 6,
+ .num_sdma_queues_per_engine = 8,
+};
+
+static const struct kfd_device_info renoir_device_info = {
+ .asic_family = CHIP_RENOIR,
+ .asic_name = "renoir",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = false,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 1,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 2,
+};
+
static const struct kfd_device_info navi10_device_info = {
.asic_family = CHIP_NAVI10,
+ .asic_name = "navi10",
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
@@ -334,130 +422,64 @@ static const struct kfd_device_info navi10_device_info = {
.num_sdma_queues_per_engine = 8,
};
-struct kfd_deviceid {
- unsigned short did;
- const struct kfd_device_info *device_info;
+static const struct kfd_device_info navi12_device_info = {
+ .asic_family = CHIP_NAVI12,
+ .asic_name = "navi12",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
+};
+
+static const struct kfd_device_info navi14_device_info = {
+ .asic_family = CHIP_NAVI14,
+ .asic_name = "navi14",
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .needs_iommu_device = false,
+ .supports_cwsr = true,
+ .needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+ .num_xgmi_sdma_engines = 0,
+ .num_sdma_queues_per_engine = 8,
};
-static const struct kfd_deviceid supported_devices[] = {
+/* For each entry, [0] is regular and [1] is virtualisation device. */
+static const struct kfd_device_info *kfd_supported_devices[][2] = {
#ifdef KFD_SUPPORT_IOMMU_V2
- { 0x1304, &kaveri_device_info }, /* Kaveri */
- { 0x1305, &kaveri_device_info }, /* Kaveri */
- { 0x1306, &kaveri_device_info }, /* Kaveri */
- { 0x1307, &kaveri_device_info }, /* Kaveri */
- { 0x1309, &kaveri_device_info }, /* Kaveri */
- { 0x130A, &kaveri_device_info }, /* Kaveri */
- { 0x130B, &kaveri_device_info }, /* Kaveri */
- { 0x130C, &kaveri_device_info }, /* Kaveri */
- { 0x130D, &kaveri_device_info }, /* Kaveri */
- { 0x130E, &kaveri_device_info }, /* Kaveri */
- { 0x130F, &kaveri_device_info }, /* Kaveri */
- { 0x1310, &kaveri_device_info }, /* Kaveri */
- { 0x1311, &kaveri_device_info }, /* Kaveri */
- { 0x1312, &kaveri_device_info }, /* Kaveri */
- { 0x1313, &kaveri_device_info }, /* Kaveri */
- { 0x1315, &kaveri_device_info }, /* Kaveri */
- { 0x1316, &kaveri_device_info }, /* Kaveri */
- { 0x1317, &kaveri_device_info }, /* Kaveri */
- { 0x1318, &kaveri_device_info }, /* Kaveri */
- { 0x131B, &kaveri_device_info }, /* Kaveri */
- { 0x131C, &kaveri_device_info }, /* Kaveri */
- { 0x131D, &kaveri_device_info }, /* Kaveri */
- { 0x9870, &carrizo_device_info }, /* Carrizo */
- { 0x9874, &carrizo_device_info }, /* Carrizo */
- { 0x9875, &carrizo_device_info }, /* Carrizo */
- { 0x9876, &carrizo_device_info }, /* Carrizo */
- { 0x9877, &carrizo_device_info }, /* Carrizo */
- { 0x15DD, &raven_device_info }, /* Raven */
- { 0x15D8, &raven_device_info }, /* Raven */
+ [CHIP_KAVERI] = {&kaveri_device_info, NULL},
+ [CHIP_CARRIZO] = {&carrizo_device_info, NULL},
+ [CHIP_RAVEN] = {&raven_device_info, NULL},
#endif
- { 0x67A0, &hawaii_device_info }, /* Hawaii */
- { 0x67A1, &hawaii_device_info }, /* Hawaii */
- { 0x67A2, &hawaii_device_info }, /* Hawaii */
- { 0x67A8, &hawaii_device_info }, /* Hawaii */
- { 0x67A9, &hawaii_device_info }, /* Hawaii */
- { 0x67AA, &hawaii_device_info }, /* Hawaii */
- { 0x67B0, &hawaii_device_info }, /* Hawaii */
- { 0x67B1, &hawaii_device_info }, /* Hawaii */
- { 0x67B8, &hawaii_device_info }, /* Hawaii */
- { 0x67B9, &hawaii_device_info }, /* Hawaii */
- { 0x67BA, &hawaii_device_info }, /* Hawaii */
- { 0x67BE, &hawaii_device_info }, /* Hawaii */
- { 0x6920, &tonga_device_info }, /* Tonga */
- { 0x6921, &tonga_device_info }, /* Tonga */
- { 0x6928, &tonga_device_info }, /* Tonga */
- { 0x6929, &tonga_device_info }, /* Tonga */
- { 0x692B, &tonga_device_info }, /* Tonga */
- { 0x6938, &tonga_device_info }, /* Tonga */
- { 0x6939, &tonga_device_info }, /* Tonga */
- { 0x7300, &fiji_device_info }, /* Fiji */
- { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
- { 0x67C0, &polaris10_device_info }, /* Polaris10 */
- { 0x67C1, &polaris10_device_info }, /* Polaris10 */
- { 0x67C2, &polaris10_device_info }, /* Polaris10 */
- { 0x67C4, &polaris10_device_info }, /* Polaris10 */
- { 0x67C7, &polaris10_device_info }, /* Polaris10 */
- { 0x67C8, &polaris10_device_info }, /* Polaris10 */
- { 0x67C9, &polaris10_device_info }, /* Polaris10 */
- { 0x67CA, &polaris10_device_info }, /* Polaris10 */
- { 0x67CC, &polaris10_device_info }, /* Polaris10 */
- { 0x67CF, &polaris10_device_info }, /* Polaris10 */
- { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
- { 0x67DF, &polaris10_device_info }, /* Polaris10 */
- { 0x6FDF, &polaris10_device_info }, /* Polaris10 */
- { 0x67E0, &polaris11_device_info }, /* Polaris11 */
- { 0x67E1, &polaris11_device_info }, /* Polaris11 */
- { 0x67E3, &polaris11_device_info }, /* Polaris11 */
- { 0x67E7, &polaris11_device_info }, /* Polaris11 */
- { 0x67E8, &polaris11_device_info }, /* Polaris11 */
- { 0x67E9, &polaris11_device_info }, /* Polaris11 */
- { 0x67EB, &polaris11_device_info }, /* Polaris11 */
- { 0x67EF, &polaris11_device_info }, /* Polaris11 */
- { 0x67FF, &polaris11_device_info }, /* Polaris11 */
- { 0x6980, &polaris12_device_info }, /* Polaris12 */
- { 0x6981, &polaris12_device_info }, /* Polaris12 */
- { 0x6985, &polaris12_device_info }, /* Polaris12 */
- { 0x6986, &polaris12_device_info }, /* Polaris12 */
- { 0x6987, &polaris12_device_info }, /* Polaris12 */
- { 0x6995, &polaris12_device_info }, /* Polaris12 */
- { 0x6997, &polaris12_device_info }, /* Polaris12 */
- { 0x699F, &polaris12_device_info }, /* Polaris12 */
- { 0x694C, &vegam_device_info }, /* VegaM */
- { 0x694E, &vegam_device_info }, /* VegaM */
- { 0x694F, &vegam_device_info }, /* VegaM */
- { 0x6860, &vega10_device_info }, /* Vega10 */
- { 0x6861, &vega10_device_info }, /* Vega10 */
- { 0x6862, &vega10_device_info }, /* Vega10 */
- { 0x6863, &vega10_device_info }, /* Vega10 */
- { 0x6864, &vega10_device_info }, /* Vega10 */
- { 0x6867, &vega10_device_info }, /* Vega10 */
- { 0x6868, &vega10_device_info }, /* Vega10 */
- { 0x6869, &vega10_device_info }, /* Vega10 */
- { 0x686A, &vega10_device_info }, /* Vega10 */
- { 0x686B, &vega10_device_info }, /* Vega10 */
- { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
- { 0x686D, &vega10_device_info }, /* Vega10 */
- { 0x686E, &vega10_device_info }, /* Vega10 */
- { 0x686F, &vega10_device_info }, /* Vega10 */
- { 0x687F, &vega10_device_info }, /* Vega10 */
- { 0x69A0, &vega12_device_info }, /* Vega12 */
- { 0x69A1, &vega12_device_info }, /* Vega12 */
- { 0x69A2, &vega12_device_info }, /* Vega12 */
- { 0x69A3, &vega12_device_info }, /* Vega12 */
- { 0x69AF, &vega12_device_info }, /* Vega12 */
- { 0x66a0, &vega20_device_info }, /* Vega20 */
- { 0x66a1, &vega20_device_info }, /* Vega20 */
- { 0x66a2, &vega20_device_info }, /* Vega20 */
- { 0x66a3, &vega20_device_info }, /* Vega20 */
- { 0x66a4, &vega20_device_info }, /* Vega20 */
- { 0x66a7, &vega20_device_info }, /* Vega20 */
- { 0x66af, &vega20_device_info }, /* Vega20 */
- /* Navi10 */
- { 0x7310, &navi10_device_info }, /* Navi10 */
- { 0x7312, &navi10_device_info }, /* Navi10 */
- { 0x7318, &navi10_device_info }, /* Navi10 */
- { 0x731a, &navi10_device_info }, /* Navi10 */
- { 0x731f, &navi10_device_info }, /* Navi10 */
+ [CHIP_HAWAII] = {&hawaii_device_info, NULL},
+ [CHIP_TONGA] = {&tonga_device_info, NULL},
+ [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
+ [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info},
+ [CHIP_POLARIS11] = {&polaris11_device_info, NULL},
+ [CHIP_POLARIS12] = {&polaris12_device_info, NULL},
+ [CHIP_VEGAM] = {&vegam_device_info, NULL},
+ [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info},
+ [CHIP_VEGA12] = {&vega12_device_info, NULL},
+ [CHIP_VEGA20] = {&vega20_device_info, NULL},
+ [CHIP_RENOIR] = {&renoir_device_info, NULL},
+ [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info},
+ [CHIP_NAVI10] = {&navi10_device_info, NULL},
+ [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info},
+ [CHIP_NAVI14] = {&navi14_device_info, NULL},
};
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
@@ -466,32 +488,25 @@ static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
-static const struct kfd_device_info *lookup_device_info(unsigned short did)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
+ struct pci_dev *pdev, unsigned int asic_type, bool vf)
{
- size_t i;
+ struct kfd_dev *kfd;
+ const struct kfd_device_info *device_info;
+ const struct kfd2kgd_calls *f2g;
- for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
- if (supported_devices[i].did == did) {
- WARN_ON(!supported_devices[i].device_info);
- return supported_devices[i].device_info;
- }
+ if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2)
+ || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) {
+ dev_err(kfd_device, "asic_type %d out of range\n", asic_type);
+ return NULL; /* asic_type out of range */
}
- dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
- did);
-
- return NULL;
-}
+ device_info = kfd_supported_devices[asic_type][vf];
+ f2g = kfd2kgd_funcs[asic_type];
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
- struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
-{
- struct kfd_dev *kfd;
- const struct kfd_device_info *device_info =
- lookup_device_info(pdev->device);
-
- if (!device_info) {
- dev_err(kfd_device, "kgd2kfd_probe failed\n");
+ if (!device_info || !f2g) {
+ dev_err(kfd_device, "%s %s not supported in kfd\n",
+ amdgpu_asic_name[asic_type], vf ? "VF" : "");
return NULL;
}
@@ -536,6 +551,10 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx8_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
+ } else if (kfd->device_info->asic_family == CHIP_ARCTURUS) {
+ BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
+ kfd->cwsr_isa = cwsr_trap_arcturus_hex;
+ kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
} else if (kfd->device_info->asic_family < CHIP_NAVI10) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx9_hex;
@@ -551,10 +570,12 @@ static void kfd_cwsr_init(struct kfd_dev *kfd)
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
+ struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources)
{
unsigned int size;
+ kfd->ddev = ddev;
kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
KGD_ENGINE_MEC1);
kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
@@ -707,10 +728,10 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
{
if (!kfd->init_complete)
return 0;
- kgd2kfd_suspend(kfd);
- /* hold dqm->lock to prevent further execution*/
- dqm_lock(kfd->dqm);
+ kfd->dqm->ops.pre_reset(kfd->dqm);
+
+ kgd2kfd_suspend(kfd);
kfd_signal_reset_event(kfd);
return 0;
@@ -724,17 +745,15 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
int kgd2kfd_post_reset(struct kfd_dev *kfd)
{
- int ret, count;
+ int ret;
if (!kfd->init_complete)
return 0;
- dqm_unlock(kfd->dqm);
-
ret = kfd_resume(kfd);
if (ret)
return ret;
- count = atomic_dec_return(&kfd_locked);
+ atomic_dec(&kfd_locked);
atomic_set(&kfd->sram_ecc_flag, 0);
@@ -806,6 +825,21 @@ dqm_start_error:
return err;
}
+static inline void kfd_queue_work(struct workqueue_struct *wq,
+ struct work_struct *work)
+{
+ int cpu, new_cpu;
+
+ cpu = new_cpu = smp_processor_id();
+ do {
+ new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
+ if (cpu_to_node(new_cpu) == numa_node_id())
+ break;
+ } while (cpu != new_cpu);
+
+ queue_work_on(new_cpu, wq, work);
+}
+
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
@@ -828,7 +862,7 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
patched_ihre, &is_patched)
&& enqueue_ih_ring_entry(kfd,
is_patched ? patched_ihre : ih_ring_entry))
- queue_work(kfd->ih_wq, &kfd->interrupt_work);
+ kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index e6a4288bfaa6..80d22bf702e8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -170,7 +170,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
}
q->properties.doorbell_off =
- kfd_doorbell_id_to_offset(dev, q->process,
+ kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
q->doorbell_id);
return 0;
@@ -195,20 +195,30 @@ static int allocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit, allocated_vmid;
+ int allocated_vmid = -1, i;
- if (dqm->vmid_bitmap == 0)
- return -ENOMEM;
+ for (i = dqm->dev->vm_info.first_vmid_kfd;
+ i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
+ if (!dqm->vmid_pasid[i]) {
+ allocated_vmid = i;
+ break;
+ }
+ }
- bit = ffs(dqm->vmid_bitmap) - 1;
- dqm->vmid_bitmap &= ~(1 << bit);
+ if (allocated_vmid < 0) {
+ pr_err("no more vmid to allocate\n");
+ return -ENOSPC;
+ }
+
+ pr_debug("vmid allocated: %d\n", allocated_vmid);
+
+ dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
+
+ set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
- allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
- pr_debug("vmid allocation %d\n", allocated_vmid);
qpd->vmid = allocated_vmid;
q->properties.vmid = allocated_vmid;
- set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
program_sh_mem_settings(dqm, qpd);
/* qpd->page_table_base is set earlier when register_process()
@@ -220,8 +230,9 @@ static int allocate_vmid(struct device_queue_manager *dqm,
/* invalidate the VM context after pasid and vmid mapping is set up */
kfd_flush_tlb(qpd_to_pdd(qpd));
- dqm->dev->kfd2kgd->set_scratch_backing_va(
- dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
+ if (dqm->dev->kfd2kgd->set_scratch_backing_va)
+ dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
+ qpd->sh_hidden_private_base, qpd->vmid);
return 0;
}
@@ -248,8 +259,6 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
struct qcm_process_device *qpd,
struct queue *q)
{
- int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
-
/* On GFX v7, CP doesn't flush TC at dequeue */
if (q->device->device_info->asic_family == CHIP_HAWAII)
if (flush_texture_cache_nocpsch(q->device, qpd))
@@ -259,8 +268,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
/* Release the vmid mapping */
set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+ dqm->vmid_pasid[qpd->vmid] = 0;
- dqm->vmid_bitmap |= (1 << bit);
qpd->vmid = 0;
q->properties.vmid = 0;
}
@@ -331,6 +340,10 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (q->properties.is_active) {
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
+ goto add_queue_to_list;
+ }
if (WARN(q->process->mm != current->mm,
"should only run in user thread"))
@@ -342,6 +355,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
goto out_free_mqd;
}
+add_queue_to_list:
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
@@ -449,6 +463,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
+ return 0;
+ }
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
@@ -524,6 +543,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA ||
q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
+
+ if (!dqm->sched_running) {
+ WARN_ONCE(1, "Update non-HWS queue while stopped\n");
+ goto out_unlock;
+ }
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -579,7 +604,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID %u queues\n",
+ pr_info_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -593,6 +618,11 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
+ dqm->queue_count--;
+
+ if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
+ continue;
+
retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
@@ -601,7 +631,6 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
* maintain a consistent eviction state
*/
ret = retval;
- dqm->queue_count--;
}
out:
@@ -621,7 +650,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
pdd = qpd_to_pdd(qpd);
- pr_info_ratelimited("Evicting PASID %u queues\n",
+ pr_info_ratelimited("Evicting PASID 0x%x queues\n",
pdd->process->pasid);
/* Mark all queues as evicted. Deactivate all active queues on
@@ -667,7 +696,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID %u queues\n",
+ pr_info_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -702,6 +731,11 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
+ dqm->queue_count++;
+
+ if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
+ continue;
+
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
q->queue, &q->properties, mm);
if (retval && !ret)
@@ -709,7 +743,6 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
* maintain a consistent eviction state
*/
ret = retval;
- dqm->queue_count++;
}
qpd->evicted = 0;
out:
@@ -739,7 +772,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
goto out;
}
- pr_info_ratelimited("Restoring PASID %u queues\n",
+ pr_info_ratelimited("Restoring PASID 0x%x queues\n",
pdd->process->pasid);
/* Update PD Base in QPD */
@@ -879,9 +912,10 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
dqm->allocated_queues[pipe] |= 1 << queue;
}
- dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
- dqm->sdma_bitmap = (1ULL << get_num_sdma_queues(dqm)) - 1;
- dqm->xgmi_sdma_bitmap = (1ULL << get_num_xgmi_sdma_queues(dqm)) - 1;
+ memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
+
+ dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
+ dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
return 0;
}
@@ -896,21 +930,36 @@ static void uninitialize(struct device_queue_manager *dqm)
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
kfree(dqm->mqd_mgrs[i]);
mutex_destroy(&dqm->lock_hidden);
- kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
}
static int start_nocpsch(struct device_queue_manager *dqm)
{
+ pr_info("SW scheduler is used");
init_interrupts(dqm);
- return pm_init(&dqm->packets, dqm);
+
+ if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ return pm_init(&dqm->packets, dqm);
+ dqm->sched_running = true;
+
+ return 0;
}
static int stop_nocpsch(struct device_queue_manager *dqm)
{
- pm_uninit(&dqm->packets);
+ if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
+ pm_uninit(&dqm->packets, false);
+ dqm->sched_running = false;
+
return 0;
}
+static void pre_reset(struct device_queue_manager *dqm)
+{
+ dqm_lock(dqm);
+ dqm->is_resetting = true;
+ dqm_unlock(dqm);
+}
+
static int allocate_sdma_queue(struct device_queue_manager *dqm,
struct queue *q)
{
@@ -1019,8 +1068,8 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
dqm->sdma_queue_count = 0;
dqm->xgmi_sdma_queue_count = 0;
dqm->active_runlist = false;
- dqm->sdma_bitmap = (1ULL << get_num_sdma_queues(dqm)) - 1;
- dqm->xgmi_sdma_bitmap = (1ULL << get_num_xgmi_sdma_queues(dqm)) - 1;
+ dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
+ dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
@@ -1058,25 +1107,32 @@ static int start_cpsch(struct device_queue_manager *dqm)
dqm_lock(dqm);
/* clear hang status when driver try to start the hw scheduler */
dqm->is_hws_hang = false;
+ dqm->is_resetting = false;
+ dqm->sched_running = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
return 0;
fail_allocate_vidmem:
fail_set_sched_resources:
- pm_uninit(&dqm->packets);
+ pm_uninit(&dqm->packets, false);
fail_packet_manager_init:
return retval;
}
static int stop_cpsch(struct device_queue_manager *dqm)
{
+ bool hanging;
+
dqm_lock(dqm);
- unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ if (!dqm->is_hws_hang)
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ hanging = dqm->is_hws_hang || dqm->is_resetting;
+ dqm->sched_running = false;
dqm_unlock(dqm);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
- pm_uninit(&dqm->packets);
+ pm_uninit(&dqm->packets, hanging);
return 0;
}
@@ -1181,16 +1237,18 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
+
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
+ dqm->sdma_queue_count++;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
+ dqm->xgmi_sdma_queue_count++;
+
if (q->properties.is_active) {
dqm->queue_count++;
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
}
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
- dqm->xgmi_sdma_queue_count++;
/*
* Unconditionally increment this counter, regardless of the queue's
* type or whether the queue is active.
@@ -1259,9 +1317,10 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
{
int retval;
+ if (!dqm->sched_running)
+ return 0;
if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
return 0;
-
if (dqm->active_runlist)
return 0;
@@ -1283,6 +1342,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
{
int retval = 0;
+ if (!dqm->sched_running)
+ return 0;
if (dqm->is_hws_hang)
return -EIO;
if (!dqm->active_runlist)
@@ -1305,8 +1366,17 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
/* should be timed out */
retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
queue_preemption_timeout_ms);
- if (retval)
+ if (retval) {
+ pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+ dqm->is_hws_hang = true;
+ /* It's possible we're detecting a HWS hang in the
+ * middle of a GPU reset. No need to schedule another
+ * reset in this case.
+ */
+ if (!dqm->is_resetting)
+ schedule_work(&dqm->hw_exception_work);
return retval;
+ }
pm_release_ib(&dqm->packets);
dqm->active_runlist = false;
@@ -1324,12 +1394,8 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
if (dqm->is_hws_hang)
return -EIO;
retval = unmap_queues_cpsch(dqm, filter, filter_param);
- if (retval) {
- pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
- dqm->is_hws_hang = true;
- schedule_work(&dqm->hw_exception_work);
+ if (retval)
return retval;
- }
return map_queues_cpsch(dqm);
}
@@ -1548,7 +1614,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
goto dqm_unlock;
}
- mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE];
+ mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
if (!mqd_mgr->get_wave_state) {
r = -EINVAL;
@@ -1676,7 +1742,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_dev *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
- dev->device_info->num_sdma_engines *
+ (dev->device_info->num_sdma_engines +
+ dev->device_info->num_xgmi_sdma_engines) *
dev->device_info->num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
@@ -1722,6 +1789,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
dqm->ops.initialize = initialize_cpsch;
dqm->ops.start = start_cpsch;
dqm->ops.stop = stop_cpsch;
+ dqm->ops.pre_reset = pre_reset;
dqm->ops.destroy_queue = destroy_queue_cpsch;
dqm->ops.update_queue = update_queue;
dqm->ops.register_process = register_process;
@@ -1740,6 +1808,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
/* initialize dqm for no cp scheduling */
dqm->ops.start = start_nocpsch;
dqm->ops.stop = stop_nocpsch;
+ dqm->ops.pre_reset = pre_reset;
dqm->ops.create_queue = create_queue_nocpsch;
dqm->ops.destroy_queue = destroy_queue_nocpsch;
dqm->ops.update_queue = update_queue;
@@ -1786,9 +1855,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ case CHIP_ARCTURUS:
device_queue_manager_init_v9(&dqm->asic_ops);
break;
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
device_queue_manager_init_v10_navi10(&dqm->asic_ops);
break;
default:
@@ -1813,7 +1886,8 @@ out_free:
return NULL;
}
-void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, struct kfd_mem_obj *mqd)
+static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
+ struct kfd_mem_obj *mqd)
{
WARN(!mqd, "No hiq sdma mqd trunk to free");
@@ -1881,6 +1955,12 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
int pipe, queue;
int r = 0;
+ if (!dqm->sched_running) {
+ seq_printf(m, " Device is stopped\n");
+
+ return 0;
+ }
+
r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
&dump, &n_regs);
@@ -1915,7 +1995,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
+ get_num_xgmi_sdma_engines(dqm); pipe++) {
for (queue = 0;
queue < dqm->dev->device_info->num_sdma_queues_per_engine;
queue++) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 90db2c9275f6..871d3b628d2d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -32,6 +32,8 @@
#include "kfd_mqd_manager.h"
+#define VMID_NUM 16
+
struct device_process_node {
struct qcm_process_device *qpd;
struct list_head list;
@@ -102,6 +104,7 @@ struct device_queue_manager_ops {
int (*initialize)(struct device_queue_manager *dqm);
int (*start)(struct device_queue_manager *dqm);
int (*stop)(struct device_queue_manager *dqm);
+ void (*pre_reset)(struct device_queue_manager *dqm);
void (*uninitialize)(struct device_queue_manager *dqm);
int (*create_kernel_queue)(struct device_queue_manager *dqm,
struct kernel_queue *kq,
@@ -185,9 +188,9 @@ struct device_queue_manager {
unsigned int *allocated_queues;
uint64_t sdma_bitmap;
uint64_t xgmi_sdma_bitmap;
- unsigned int vmid_bitmap;
+ /* the pasid mapping for each kfd vmid */
+ uint16_t vmid_pasid[VMID_NUM];
uint64_t pipelines_addr;
- struct kfd_mem_obj *pipeline_mem;
uint64_t fence_gpu_addr;
unsigned int *fence_addr;
struct kfd_mem_obj *fence_mem;
@@ -196,8 +199,10 @@ struct device_queue_manager {
/* hw exception */
bool is_hws_hang;
+ bool is_resetting;
struct work_struct hw_exception_work;
struct kfd_mem_obj hiq_sdma_mqd;
+ bool sched_running;
};
void device_queue_manager_init_cik(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index ebe79bf00145..8e0c00b9555e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -91,7 +91,7 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
doorbell_start_offset;
- kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
+ kfd->doorbell_base_dw_offset = doorbell_start_offset / sizeof(u32);
kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
kfd_doorbell_process_slice(kfd));
@@ -103,8 +103,8 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
pr_debug("doorbell base == 0x%08lX\n",
(uintptr_t)kfd->doorbell_base);
- pr_debug("doorbell_id_offset == 0x%08lX\n",
- kfd->doorbell_id_offset);
+ pr_debug("doorbell_base_dw_offset == 0x%08lX\n",
+ kfd->doorbell_base_dw_offset);
pr_debug("doorbell_process_limit == 0x%08lX\n",
doorbell_process_limit);
@@ -185,7 +185,7 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
* Calculating the kernel doorbell offset using the first
* doorbell page.
*/
- *doorbell_off = kfd->doorbell_id_offset + inx;
+ *doorbell_off = kfd->doorbell_base_dw_offset + inx;
pr_debug("Get kernel queue doorbell\n"
" doorbell offset == 0x%08X\n"
@@ -225,17 +225,17 @@ void write_kernel_doorbell64(void __iomem *db, u64 value)
}
}
-unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int doorbell_id)
{
/*
- * doorbell_id_offset accounts for doorbells taken by KGD.
+ * doorbell_base_dw_offset accounts for doorbells taken by KGD.
* index * kfd_doorbell_process_slice/sizeof(u32) adjusts to
* the process's doorbells. The offset returned is in dword
* units regardless of the ASIC-dependent doorbell size.
*/
- return kfd->doorbell_id_offset +
+ return kfd->doorbell_base_dw_offset +
process->doorbell_index
* kfd_doorbell_process_slice(kfd) / sizeof(u32) +
doorbell_id * kfd->device_info->doorbell_size / sizeof(u32);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index d674d4b3340f..1f8365575b12 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -346,7 +346,6 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
ret = create_signal_event(devkfd, p, ev);
if (!ret) {
*event_page_offset = KFD_MMAP_TYPE_EVENTS;
- *event_page_offset <<= PAGE_SHIFT;
*event_slot_index = ev->event_id;
}
break;
@@ -852,8 +851,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (type == KFD_EVENT_TYPE_MEMORY) {
dev_warn(kfd_device,
- "Sending SIGSEGV to HSA Process with PID %d ",
- p->lead_thread->pid);
+ "Sending SIGSEGV to process %d (pasid 0x%x)",
+ p->lead_thread->pid, p->pasid);
send_sig(SIGSEGV, p->lead_thread, 0);
}
@@ -861,13 +860,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
if (send_signal) {
if (send_sigterm) {
dev_warn(kfd_device,
- "Sending SIGTERM to HSA Process with PID %d ",
- p->lead_thread->pid);
+ "Sending SIGTERM to process %d (pasid 0x%x)",
+ p->lead_thread->pid, p->pasid);
send_sig(SIGTERM, p->lead_thread, 0);
} else {
dev_err(kfd_device,
- "HSA Process (PID %d) got unhandled exception",
- p->lead_thread->pid);
+ "Process %d (pasid 0x%x) got unhandled exception",
+ p->lead_thread->pid, p->pasid);
}
}
}
@@ -936,7 +935,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
/* Workaround on Raven to not kill the process when memory is freed
* before IOMMU is able to finish processing all the excessive PPRs
*/
- if (dev->device_info->asic_family != CHIP_RAVEN) {
+ if (dev->device_info->asic_family != CHIP_RAVEN &&
+ dev->device_info->asic_family != CHIP_RENOIR) {
mutex_lock(&p->event_mutex);
/* Lookup events by type and signal them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 60521366dd31..bb77b8890e77 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -369,8 +369,13 @@ int kfd_init_apertures(struct kfd_process *process)
/*Iterating over all devices*/
while (kfd_topology_enum_kfd_devices(id, &dev) == 0) {
- if (!dev) {
- id++; /* Skip non GPU devices */
+ if (!dev || kfd_devcgroup_check_permission(dev)) {
+ /* Skip non GPU devices and devices to which the
+ * current process have no access to. Access can be
+ * limited by placing the process in a specific
+ * cgroup hierarchy
+ */
+ id++;
continue;
}
@@ -405,7 +410,11 @@ int kfd_init_apertures(struct kfd_process *process)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ case CHIP_ARCTURUS:
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
kfd_init_apertures_v9(pdd, id);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index a85904ad0d5f..e05d75ecda21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -54,8 +54,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
memcpy(patched_ihre, ih_ring_entry,
dev->device_info->ih_ring_entry_size);
- pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid(
- dev->kgd, vmid);
+ pasid = dev->dqm->vmid_pasid[vmid];
/* Patch the pasid field */
patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
@@ -80,6 +79,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2;
}
@@ -104,6 +104,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE)
kfd_signal_hw_exception_event(pasid);
else if (client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_VMC1 ||
client_id == SOC15_IH_CLIENTID_UTCL2) {
struct kfd_vm_fault_info info = {0};
uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index c56ac47cd318..bc47f6a44456 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd)
}
kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1);
+ if (unlikely(!kfd->ih_wq)) {
+ kfifo_free(&kfd->ih_fifo);
+ dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n");
+ return -ENOMEM;
+ }
spin_lock_init(&kfd->interrupt_lock);
INIT_WORK(&kfd->interrupt_work, interrupt_wq);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index 5f35df23fb18..8d871514671e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -62,9 +62,6 @@ int kfd_iommu_device_init(struct kfd_dev *kfd)
struct amd_iommu_device_info iommu_info;
unsigned int pasid_limit;
int err;
- struct kfd_topology_device *top_dev;
-
- top_dev = kfd_topology_device_by_id(kfd->id);
if (!kfd->device_info->needs_iommu_device)
return 0;
@@ -160,7 +157,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
if (!p)
return;
- pr_debug("Unbinding process %d from IOMMU\n", pasid);
+ pr_debug("Unbinding process 0x%x from IOMMU\n", pasid);
mutex_lock(kfd_get_dbgmgr_mutex());
@@ -194,7 +191,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
struct kfd_dev *dev;
dev_warn_ratelimited(kfd_device,
- "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
+ "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X",
PCI_BUS_NUM(pdev->devfn),
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn),
@@ -235,7 +232,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd)
err = amd_iommu_bind_pasid(kfd->pdev, p->pasid,
p->lead_thread);
if (err < 0) {
- pr_err("Unexpected pasid %d binding failure\n",
+ pr_err("Unexpected pasid 0x%x binding failure\n",
p->pasid);
mutex_unlock(&p->mutex);
break;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 29c0bd2d7a5c..bae706462f96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -34,7 +34,10 @@
#define PM4_COUNT_ZERO (((1 << 15) - 1) << 16)
-static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
+/* Initialize a kernel queue, including allocations of GART memory
+ * needed for the queue.
+ */
+static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
enum kfd_queue_type type, unsigned int queue_size)
{
struct queue_properties prop;
@@ -87,9 +90,17 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->pq_kernel_addr = kq->pq->cpu_ptr;
kq->pq_gpu_addr = kq->pq->gpu_addr;
- retval = kq->ops_asic_specific.initialize(kq, dev, type, queue_size);
- if (!retval)
- goto err_eop_allocate_vidmem;
+ /* For CIK family asics, kq->eop_mem is not needed */
+ if (dev->device_info->asic_family > CHIP_MULLINS) {
+ retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
+ if (retval != 0)
+ goto err_eop_allocate_vidmem;
+
+ kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
+ kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
+
+ memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
+ }
retval = kfd_gtt_sa_allocate(dev, sizeof(*kq->rptr_kernel),
&kq->rptr_mem);
@@ -183,9 +194,10 @@ err_get_kernel_doorbell:
}
-static void uninitialize(struct kernel_queue *kq)
+/* Uninitialize a kernel queue and free all its memory usages. */
+static void kq_uninitialize(struct kernel_queue *kq, bool hanging)
{
- if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
+ if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ && !hanging)
kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
@@ -200,14 +212,19 @@ static void uninitialize(struct kernel_queue *kq)
kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
- kq->ops_asic_specific.uninitialize(kq);
+
+ /* For CIK family asics, kq->eop_mem is Null, kfd_gtt_sa_free()
+ * is able to handle NULL properly.
+ */
+ kfd_gtt_sa_free(kq->dev, kq->eop_mem);
+
kfd_gtt_sa_free(kq->dev, kq->pq);
kfd_release_kernel_doorbell(kq->dev,
kq->queue->properties.doorbell_ptr);
uninit_queue(kq->queue);
}
-static int acquire_packet_buffer(struct kernel_queue *kq,
+int kq_acquire_packet_buffer(struct kernel_queue *kq,
size_t packet_size_in_dwords, unsigned int **buffer_ptr)
{
size_t available_size;
@@ -268,7 +285,7 @@ err_no_space:
return -ENOMEM;
}
-static void submit_packet(struct kernel_queue *kq)
+void kq_submit_packet(struct kernel_queue *kq)
{
#ifdef DEBUG
int i;
@@ -280,11 +297,18 @@ static void submit_packet(struct kernel_queue *kq)
}
pr_debug("\n");
#endif
-
- kq->ops_asic_specific.submit_packet(kq);
+ if (kq->dev->device_info->doorbell_size == 8) {
+ *kq->wptr64_kernel = kq->pending_wptr64;
+ write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
+ kq->pending_wptr64);
+ } else {
+ *kq->wptr_kernel = kq->pending_wptr;
+ write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
+ kq->pending_wptr);
+ }
}
-static void rollback_packet(struct kernel_queue *kq)
+void kq_rollback_packet(struct kernel_queue *kq)
{
if (kq->dev->device_info->doorbell_size == 8) {
kq->pending_wptr64 = *kq->wptr64_kernel;
@@ -304,56 +328,18 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
if (!kq)
return NULL;
- kq->ops.initialize = initialize;
- kq->ops.uninitialize = uninitialize;
- kq->ops.acquire_packet_buffer = acquire_packet_buffer;
- kq->ops.submit_packet = submit_packet;
- kq->ops.rollback_packet = rollback_packet;
-
- switch (dev->device_info->asic_family) {
- case CHIP_CARRIZO:
- case CHIP_TONGA:
- case CHIP_FIJI:
- case CHIP_POLARIS10:
- case CHIP_POLARIS11:
- case CHIP_POLARIS12:
- case CHIP_VEGAM:
- kernel_queue_init_vi(&kq->ops_asic_specific);
- break;
-
- case CHIP_KAVERI:
- case CHIP_HAWAII:
- kernel_queue_init_cik(&kq->ops_asic_specific);
- break;
-
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- kernel_queue_init_v9(&kq->ops_asic_specific);
- break;
- case CHIP_NAVI10:
- kernel_queue_init_v10(&kq->ops_asic_specific);
- break;
- default:
- WARN(1, "Unexpected ASIC family %u",
- dev->device_info->asic_family);
- goto out_free;
- }
-
- if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
+ if (kq_initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE))
return kq;
pr_err("Failed to init kernel queue\n");
-out_free:
kfree(kq);
return NULL;
}
-void kernel_queue_uninit(struct kernel_queue *kq)
+void kernel_queue_uninit(struct kernel_queue *kq, bool hanging)
{
- kq->ops.uninitialize(kq);
+ kq_uninitialize(kq, hanging);
kfree(kq);
}
@@ -373,7 +359,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
return;
}
- retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
+ retval = kq_acquire_packet_buffer(kq, 5, &buffer);
if (unlikely(retval != 0)) {
pr_err(" Failed to acquire packet buffer\n");
pr_err("Kernel queue test failed\n");
@@ -381,7 +367,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
}
for (i = 0; i < 5; i++)
buffer[i] = kq->nop_packet;
- kq->ops.submit_packet(kq);
+ kq_submit_packet(kq);
pr_err("Ending kernel queue test\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 365fc674fea4..f4cfe9f1871c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -29,45 +29,28 @@
#include "kfd_priv.h"
/**
- * struct kernel_queue_ops
- *
- * @initialize: Initialize a kernel queue, including allocations of GART memory
- * needed for the queue.
- *
- * @uninitialize: Uninitialize a kernel queue and free all its memory usages.
- *
- * @acquire_packet_buffer: Returns a pointer to the location in the kernel
+ * kq_acquire_packet_buffer: Returns a pointer to the location in the kernel
* queue ring buffer where the calling function can write its packet. It is
* Guaranteed that there is enough space for that packet. It also updates the
* pending write pointer to that location so subsequent calls to
* acquire_packet_buffer will get a correct write pointer
*
- * @submit_packet: Update the write pointer and doorbell of a kernel queue.
- *
- * @sync_with_hw: Wait until the write pointer and the read pointer of a kernel
- * queue are equal, which means the CP has read all the submitted packets.
+ * kq_submit_packet: Update the write pointer and doorbell of a kernel queue.
*
- * @rollback_packet: This routine is called if we failed to build an acquired
+ * kq_rollback_packet: This routine is called if we failed to build an acquired
* packet for some reason. It just overwrites the pending wptr with the current
* one
*
*/
-struct kernel_queue_ops {
- bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
- void (*uninitialize)(struct kernel_queue *kq);
- int (*acquire_packet_buffer)(struct kernel_queue *kq,
- size_t packet_size_in_dwords,
- unsigned int **buffer_ptr);
- void (*submit_packet)(struct kernel_queue *kq);
- void (*rollback_packet)(struct kernel_queue *kq);
-};
+int kq_acquire_packet_buffer(struct kernel_queue *kq,
+ size_t packet_size_in_dwords,
+ unsigned int **buffer_ptr);
+void kq_submit_packet(struct kernel_queue *kq);
+void kq_rollback_packet(struct kernel_queue *kq);
-struct kernel_queue {
- struct kernel_queue_ops ops;
- struct kernel_queue_ops ops_asic_specific;
+struct kernel_queue {
/* data */
struct kfd_dev *dev;
struct mqd_manager *mqd_mgr;
@@ -99,9 +82,4 @@ struct kernel_queue {
struct list_head list;
};
-void kernel_queue_init_cik(struct kernel_queue_ops *ops);
-void kernel_queue_init_vi(struct kernel_queue_ops *ops);
-void kernel_queue_init_v9(struct kernel_queue_ops *ops);
-void kernel_queue_init_v10(struct kernel_queue_ops *ops);
-
#endif /* KFD_KERNEL_QUEUE_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
deleted file mode 100644
index 19e54acb4125..000000000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_cik.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "kfd_kernel_queue.h"
-
-static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_cik(struct kernel_queue *kq);
-static void submit_packet_cik(struct kernel_queue *kq);
-
-void kernel_queue_init_cik(struct kernel_queue_ops *ops)
-{
- ops->initialize = initialize_cik;
- ops->uninitialize = uninitialize_cik;
- ops->submit_packet = submit_packet_cik;
-}
-
-static bool initialize_cik(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size)
-{
- return true;
-}
-
-static void uninitialize_cik(struct kernel_queue *kq)
-{
-}
-
-static void submit_packet_cik(struct kernel_queue *kq)
-{
- *kq->wptr_kernel = kq->pending_wptr;
- write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
- kq->pending_wptr);
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c
deleted file mode 100644
index aed32ab7102e..000000000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v10.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/*
- * Copyright 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "kfd_kernel_queue.h"
-#include "kfd_device_queue_manager.h"
-#include "kfd_pm4_headers_ai.h"
-#include "kfd_pm4_opcodes.h"
-#include "gc/gc_10_1_0_sh_mask.h"
-
-static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_v10(struct kernel_queue *kq);
-static void submit_packet_v10(struct kernel_queue *kq);
-
-void kernel_queue_init_v10(struct kernel_queue_ops *ops)
-{
- ops->initialize = initialize_v10;
- ops->uninitialize = uninitialize_v10;
- ops->submit_packet = submit_packet_v10;
-}
-
-static bool initialize_v10(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size)
-{
- int retval;
-
- retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
- if (retval != 0)
- return false;
-
- kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
- kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
-
- memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
-
- return true;
-}
-
-static void uninitialize_v10(struct kernel_queue *kq)
-{
- kfd_gtt_sa_free(kq->dev, kq->eop_mem);
-}
-
-static void submit_packet_v10(struct kernel_queue *kq)
-{
- *kq->wptr64_kernel = kq->pending_wptr64;
- write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
- kq->pending_wptr64);
-}
-
-static int pm_map_process_v10(struct packet_manager *pm,
- uint32_t *buffer, struct qcm_process_device *qpd)
-{
- struct pm4_mes_map_process *packet;
- uint64_t vm_page_table_base_addr = qpd->page_table_base;
-
- packet = (struct pm4_mes_map_process *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mes_map_process));
-
- packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
- sizeof(struct pm4_mes_map_process));
- packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
- packet->bitfields2.process_quantum = 1;
- packet->bitfields2.pasid = qpd->pqm->process->pasid;
- packet->bitfields14.gds_size = qpd->gds_size;
- packet->bitfields14.num_gws = qpd->num_gws;
- packet->bitfields14.num_oac = qpd->num_oac;
- packet->bitfields14.sdma_enable = 1;
-
- packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
-
- packet->sh_mem_config = qpd->sh_mem_config;
- packet->sh_mem_bases = qpd->sh_mem_bases;
- if (qpd->tba_addr) {
- packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
- packet->sq_shader_tba_hi = (1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT) |
- upper_32_bits(qpd->tba_addr >> 8);
- packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
- packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
- }
-
- packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
- packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
-
- packet->vm_context_page_table_base_addr_lo32 =
- lower_32_bits(vm_page_table_base_addr);
- packet->vm_context_page_table_base_addr_hi32 =
- upper_32_bits(vm_page_table_base_addr);
-
- return 0;
-}
-
-static int pm_runlist_v10(struct packet_manager *pm, uint32_t *buffer,
- uint64_t ib, size_t ib_size_in_dwords, bool chain)
-{
- struct pm4_mes_runlist *packet;
-
- int concurrent_proc_cnt = 0;
- struct kfd_dev *kfd = pm->dqm->dev;
-
- /* Determine the number of processes to map together to HW:
- * it can not exceed the number of VMIDs available to the
- * scheduler, and it is determined by the smaller of the number
- * of processes in the runlist and kfd module parameter
- * hws_max_conc_proc.
- * Note: the arbitration between the number of VMIDs and
- * hws_max_conc_proc has been done in
- * kgd2kfd_device_init().
- */
- concurrent_proc_cnt = min(pm->dqm->processes_count,
- kfd->max_proc_per_quantum);
-
-
- packet = (struct pm4_mes_runlist *)buffer;
-
- memset(buffer, 0, sizeof(struct pm4_mes_runlist));
- packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
- sizeof(struct pm4_mes_runlist));
-
- packet->bitfields4.ib_size = ib_size_in_dwords;
- packet->bitfields4.chain = chain ? 1 : 0;
- packet->bitfields4.offload_polling = 0;
- packet->bitfields4.valid = 1;
- packet->bitfields4.process_cnt = concurrent_proc_cnt;
- packet->ordinal2 = lower_32_bits(ib);
- packet->ib_base_hi = upper_32_bits(ib);
-
- return 0;
-}
-
-static int pm_map_queues_v10(struct packet_manager *pm, uint32_t *buffer,
- struct queue *q, bool is_static)
-{
- struct pm4_mes_map_queues *packet;
- bool use_static = is_static;
-
- packet = (struct pm4_mes_map_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
-
- packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
- sizeof(struct pm4_mes_map_queues));
- packet->bitfields2.num_queues = 1;
- packet->bitfields2.queue_sel =
- queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
-
- packet->bitfields2.engine_sel =
- engine_sel__mes_map_queues__compute_vi;
- packet->bitfields2.queue_type =
- queue_type__mes_map_queues__normal_compute_vi;
-
- switch (q->properties.type) {
- case KFD_QUEUE_TYPE_COMPUTE:
- if (use_static)
- packet->bitfields2.queue_type =
- queue_type__mes_map_queues__normal_latency_static_queue_vi;
- break;
- case KFD_QUEUE_TYPE_DIQ:
- packet->bitfields2.queue_type =
- queue_type__mes_map_queues__debug_interface_queue_vi;
- break;
- case KFD_QUEUE_TYPE_SDMA:
- case KFD_QUEUE_TYPE_SDMA_XGMI:
- packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
- engine_sel__mes_map_queues__sdma0_vi;
- use_static = false; /* no static queues under SDMA */
- break;
- default:
- WARN(1, "queue type %d\n", q->properties.type);
- return -EINVAL;
- }
- packet->bitfields3.doorbell_offset =
- q->properties.doorbell_off;
-
- packet->mqd_addr_lo =
- lower_32_bits(q->gart_mqd_addr);
-
- packet->mqd_addr_hi =
- upper_32_bits(q->gart_mqd_addr);
-
- packet->wptr_addr_lo =
- lower_32_bits((uint64_t)q->properties.write_ptr);
-
- packet->wptr_addr_hi =
- upper_32_bits((uint64_t)q->properties.write_ptr);
-
- return 0;
-}
-
-static int pm_unmap_queues_v10(struct packet_manager *pm, uint32_t *buffer,
- enum kfd_queue_type type,
- enum kfd_unmap_queues_filter filter,
- uint32_t filter_param, bool reset,
- unsigned int sdma_engine)
-{
- struct pm4_mes_unmap_queues *packet;
-
- packet = (struct pm4_mes_unmap_queues *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
-
- packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
- sizeof(struct pm4_mes_unmap_queues));
- switch (type) {
- case KFD_QUEUE_TYPE_COMPUTE:
- case KFD_QUEUE_TYPE_DIQ:
- packet->bitfields2.engine_sel =
- engine_sel__mes_unmap_queues__compute;
- break;
- case KFD_QUEUE_TYPE_SDMA:
- case KFD_QUEUE_TYPE_SDMA_XGMI:
- packet->bitfields2.engine_sel =
- engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
- break;
- default:
- WARN(1, "queue type %d\n", type);
- break;
- }
-
- if (reset)
- packet->bitfields2.action =
- action__mes_unmap_queues__reset_queues;
- else
- packet->bitfields2.action =
- action__mes_unmap_queues__preempt_queues;
-
- switch (filter) {
- case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
- packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
- packet->bitfields2.num_queues = 1;
- packet->bitfields3b.doorbell_offset0 = filter_param;
- break;
- case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
- packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
- packet->bitfields3a.pasid = filter_param;
- break;
- case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
- packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__unmap_all_queues;
- break;
- case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
- /* in this case, we do not preempt static queues */
- packet->bitfields2.queue_sel =
- queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
- break;
- default:
- WARN(1, "filter %d\n", filter);
- break;
- }
-
- return 0;
-
-}
-
-static int pm_query_status_v10(struct packet_manager *pm, uint32_t *buffer,
- uint64_t fence_address, uint32_t fence_value)
-{
- struct pm4_mes_query_status *packet;
-
- packet = (struct pm4_mes_query_status *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mes_query_status));
-
-
- packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
- sizeof(struct pm4_mes_query_status));
-
- packet->bitfields2.context_id = 0;
- packet->bitfields2.interrupt_sel =
- interrupt_sel__mes_query_status__completion_status;
- packet->bitfields2.command =
- command__mes_query_status__fence_only_after_write_ack;
-
- packet->addr_hi = upper_32_bits((uint64_t)fence_address);
- packet->addr_lo = lower_32_bits((uint64_t)fence_address);
- packet->data_hi = upper_32_bits((uint64_t)fence_value);
- packet->data_lo = lower_32_bits((uint64_t)fence_value);
-
- return 0;
-}
-
-
-static int pm_release_mem_v10(uint64_t gpu_addr, uint32_t *buffer)
-{
- struct pm4_mec_release_mem *packet;
-
- WARN_ON(!buffer);
-
- packet = (struct pm4_mec_release_mem *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
-
- packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
- sizeof(struct pm4_mec_release_mem));
-
- packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
- packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe;
- packet->bitfields2.tcl1_action_ena = 1;
- packet->bitfields2.tc_action_ena = 1;
- packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru;
-
- packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low;
- packet->bitfields3.int_sel =
- int_sel__mec_release_mem__send_interrupt_after_write_confirm;
-
- packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
- packet->address_hi = upper_32_bits(gpu_addr);
-
- packet->data_lo = 0;
-
- return sizeof(struct pm4_mec_release_mem) / sizeof(unsigned int);
-}
-
-const struct packet_manager_funcs kfd_v10_pm_funcs = {
- .map_process = pm_map_process_v10,
- .runlist = pm_runlist_v10,
- .set_resources = pm_set_resources_vi,
- .map_queues = pm_map_queues_v10,
- .unmap_queues = pm_unmap_queues_v10,
- .query_status = pm_query_status_v10,
- .release_mem = pm_release_mem_v10,
- .map_process_size = sizeof(struct pm4_mes_map_process),
- .runlist_size = sizeof(struct pm4_mes_runlist),
- .set_resources_size = sizeof(struct pm4_mes_set_resources),
- .map_queues_size = sizeof(struct pm4_mes_map_queues),
- .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
- .query_status_size = sizeof(struct pm4_mes_query_status),
- .release_mem_size = sizeof(struct pm4_mec_release_mem)
-};
-
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 986ff52d5750..f4b7f7e6c40e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -82,7 +82,7 @@ static void kfd_exit(void)
kfd_chardev_exit();
}
-int kgd2kfd_init()
+int kgd2kfd_init(void)
{
return kfd_init();
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index d6cf391da591..88813dad731f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -98,8 +98,8 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
uint32_t *se_mask)
{
struct kfd_cu_info cu_info;
- uint32_t cu_per_sh[4] = {0};
- int i, se, cu = 0;
+ uint32_t cu_per_se[KFD_MAX_NUM_SE] = {0};
+ int i, se, sh, cu = 0;
amdgpu_amdkfd_get_cu_info(mm->dev->kgd, &cu_info);
@@ -107,8 +107,8 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
cu_mask_count = cu_info.cu_active_number;
for (se = 0; se < cu_info.num_shader_engines; se++)
- for (i = 0; i < 4; i++)
- cu_per_sh[se] += hweight32(cu_info.cu_bitmap[se][i]);
+ for (sh = 0; sh < cu_info.num_shader_arrays_per_engine; sh++)
+ cu_per_se[se] += hweight32(cu_info.cu_bitmap[se % 4][sh + (se / 4)]);
/* Symmetrically map cu_mask to all SEs:
* cu_mask[0] bit0 -> se_mask[0] bit0;
@@ -128,6 +128,6 @@ void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
se = 0;
cu++;
}
- } while (cu >= cu_per_sh[se] && cu < 32);
+ } while (cu >= cu_per_se[se] && cu < 32);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 550b61e81015..fbdb16418847 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -26,6 +26,8 @@
#include "kfd_priv.h"
+#define KFD_MAX_NUM_SE 8
+
/**
* struct mqd_manager
*
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 28876aceb14b..19f0fe547c57 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -374,7 +374,6 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- case KFD_MQD_TYPE_COMPUTE:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = free_mqd;
@@ -401,7 +400,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
#endif
break;
case KFD_MQD_TYPE_DIQ:
- mqd->allocate_mqd = allocate_hiq_mqd;
+ mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd;
mqd->load_mqd = load_mqd;
@@ -442,7 +441,7 @@ struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type,
mqd = mqd_manager_init_cik(type, dev);
if (!mqd)
return NULL;
- if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
+ if (type == KFD_MQD_TYPE_CP)
mqd->update_mqd = update_mqd_hawaii;
return mqd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index 9cd3eb2d90bd..d1d68a51bfb8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -66,38 +66,22 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m->compute_static_thread_mgmt_se3);
}
+static void set_priority(struct v10_compute_mqd *m, struct queue_properties *q)
+{
+ m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
+ m->cp_hqd_queue_priority = q->priority;
+}
+
static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
struct queue_properties *q)
{
- int retval;
- struct kfd_mem_obj *mqd_mem_obj = NULL;
-
- /* From V9, for CWSR, the control stack is located on the next page
- * boundary after the mqd, we will use the gtt allocation function
- * instead of sub-allocation function.
- */
- if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
- if (!mqd_mem_obj)
- return NULL;
- retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
- ALIGN(q->ctl_stack_size, PAGE_SIZE) +
- ALIGN(sizeof(struct v10_compute_mqd), PAGE_SIZE),
- &(mqd_mem_obj->gtt_mem),
- &(mqd_mem_obj->gpu_addr),
- (void *)&(mqd_mem_obj->cpu_ptr), true);
- } else {
- retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
- &mqd_mem_obj);
- }
+ struct kfd_mem_obj *mqd_mem_obj;
- if (retval) {
- kfree(mqd_mem_obj);
+ if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd),
+ &mqd_mem_obj))
return NULL;
- }
return mqd_mem_obj;
-
}
static void init_mqd(struct mqd_manager *mm, void **mqd,
@@ -131,9 +115,6 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
10 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
- m->cp_hqd_pipe_priority = 1;
- m->cp_hqd_queue_priority = 15;
-
if (q->format == KFD_QUEUE_FORMAT_AQL) {
m->cp_hqd_aql_control =
1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
@@ -172,6 +153,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
return r;
}
+static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+ queue_id, p->doorbell_off);
+}
+
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
@@ -230,11 +219,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
m->cp_hqd_ctx_save_control = 0;
update_cu_mask(mm, mqd, q);
+ set_priority(m, q);
- q->is_active = (q->queue_size > 0 &&
- q->queue_address != 0 &&
- q->queue_percent > 0 &&
- !q->is_evicted);
+ q->is_active = QUEUE_IS_ACTIVE(*q);
}
static int destroy_mqd(struct mqd_manager *mm, void *mqd,
@@ -250,14 +237,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd,
static void free_mqd(struct mqd_manager *mm, void *mqd,
struct kfd_mem_obj *mqd_mem_obj)
{
- struct kfd_dev *kfd = mm->dev;
-
- if (mqd_mem_obj->gtt_mem) {
- amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem);
- kfree(mqd_mem_obj);
- } else {
- kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
- }
+ kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
}
static bool is_occupied(struct mqd_manager *mm, void *mqd,
@@ -276,18 +256,22 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
{
struct v10_compute_mqd *m;
- /* Control stack is located one page after MQD. */
- void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
-
m = get_mqd(mqd);
+ /* Control stack is written backwards, while workgroup context data
+ * is written forwards. Both starts from m->cp_hqd_cntl_stack_size.
+ * Current position is at m->cp_hqd_cntl_stack_offset and
+ * m->cp_hqd_wg_state_offset, respectively.
+ */
*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
m->cp_hqd_cntl_stack_offset;
*save_area_used_size = m->cp_hqd_wg_state_offset -
m->cp_hqd_cntl_stack_size;
- if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
- return -EFAULT;
+ /* Control stack is not copied to user mode for GFXv10 because
+ * it's part of the context save area that is already
+ * accessible to user mode
+ */
return 0;
}
@@ -306,18 +290,6 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
-static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
-{
- struct v10_compute_mqd *m;
-
- update_mqd(mm, mqd, q);
-
- /* TODO: what's the point? update_mqd already does this. */
- m = get_mqd(mqd);
- m->cp_hqd_vmid = q->vmid;
-}
-
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -369,11 +341,7 @@ static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
m->sdma_queue_id = q->sdma_queue_id;
m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
-
- q->is_active = (q->queue_size > 0 &&
- q->queue_address != 0 &&
- q->queue_percent > 0 &&
- !q->is_evicted);
+ q->is_active = QUEUE_IS_ACTIVE(*q);
}
/*
@@ -421,7 +389,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
@@ -429,7 +397,6 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- case KFD_MQD_TYPE_COMPUTE:
pr_debug("%s@%i\n", __func__, __LINE__);
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
@@ -450,8 +417,8 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
+ mqd->load_mqd = hiq_load_mqd_kiq;
+ mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->mqd_size = sizeof(struct v10_compute_mqd);
@@ -461,11 +428,11 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type,
pr_debug("%s@%i\n", __func__, __LINE__);
break;
case KFD_MQD_TYPE_DIQ:
- mqd->allocate_mqd = allocate_hiq_mqd;
+ mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd;
mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
+ mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->mqd_size = sizeof(struct v10_compute_mqd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 0c58f91b3ff3..436b7f518979 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -46,7 +46,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
struct v9_mqd *m;
- uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+ uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
if (q->cu_mask_count == 0)
return;
@@ -59,12 +59,20 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd,
m->compute_static_thread_mgmt_se1 = se_mask[1];
m->compute_static_thread_mgmt_se2 = se_mask[2];
m->compute_static_thread_mgmt_se3 = se_mask[3];
+ m->compute_static_thread_mgmt_se4 = se_mask[4];
+ m->compute_static_thread_mgmt_se5 = se_mask[5];
+ m->compute_static_thread_mgmt_se6 = se_mask[6];
+ m->compute_static_thread_mgmt_se7 = se_mask[7];
- pr_debug("update cu mask to %#x %#x %#x %#x\n",
+ pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
m->compute_static_thread_mgmt_se0,
m->compute_static_thread_mgmt_se1,
m->compute_static_thread_mgmt_se2,
- m->compute_static_thread_mgmt_se3);
+ m->compute_static_thread_mgmt_se3,
+ m->compute_static_thread_mgmt_se4,
+ m->compute_static_thread_mgmt_se5,
+ m->compute_static_thread_mgmt_se6,
+ m->compute_static_thread_mgmt_se7);
}
static void set_priority(struct v9_mqd *m, struct queue_properties *q)
@@ -84,7 +92,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
* instead of sub-allocation function.
*/
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+ mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!mqd_mem_obj)
return NULL;
retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd,
@@ -125,6 +133,10 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
+ m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
@@ -179,6 +191,14 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
wptr_shift, 0, mms);
}
+static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
+ uint32_t pipe_id, uint32_t queue_id,
+ struct queue_properties *p, struct mm_struct *mms)
+{
+ return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id,
+ queue_id, p->doorbell_off);
+}
+
static void update_mqd(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
@@ -290,7 +310,8 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
*ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
m->cp_hqd_cntl_stack_offset;
- *save_area_used_size = m->cp_hqd_wg_state_offset;
+ *save_area_used_size = m->cp_hqd_wg_state_offset -
+ m->cp_hqd_cntl_stack_size;
if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
return -EFAULT;
@@ -312,18 +333,6 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
}
-static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
-{
- struct v9_mqd *m;
-
- update_mqd(mm, mqd, q);
-
- /* TODO: what's the point? update_mqd already does this. */
- m = get_mqd(mqd);
- m->cp_hqd_vmid = q->vmid;
-}
-
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -431,7 +440,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- case KFD_MQD_TYPE_COMPUTE:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = free_mqd;
@@ -449,8 +457,8 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
mqd->allocate_mqd = allocate_hiq_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd_hiq_sdma;
- mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
+ mqd->load_mqd = hiq_load_mqd_kiq;
+ mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->mqd_size = sizeof(struct v9_mqd);
@@ -459,11 +467,11 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
#endif
break;
case KFD_MQD_TYPE_DIQ:
- mqd->allocate_mqd = allocate_hiq_mqd;
+ mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd;
mqd->load_mqd = load_mqd;
- mqd->update_mqd = update_mqd_hiq;
+ mqd->update_mqd = update_mqd;
mqd->destroy_mqd = destroy_mqd;
mqd->is_occupied = is_occupied;
mqd->mqd_size = sizeof(struct v9_mqd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 7d144f56f421..a5e8ff1e5945 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -312,11 +312,7 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
struct queue_properties *q)
{
- struct vi_mqd *m;
__update_mqd(mm, mqd, q, MTYPE_UC, 0);
-
- m = get_mqd(mqd);
- m->cp_hqd_vmid = q->vmid;
}
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
@@ -425,7 +421,6 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
switch (type) {
case KFD_MQD_TYPE_CP:
- case KFD_MQD_TYPE_COMPUTE:
mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd;
mqd->free_mqd = free_mqd;
@@ -453,7 +448,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
#endif
break;
case KFD_MQD_TYPE_DIQ:
- mqd->allocate_mqd = allocate_hiq_mqd;
+ mqd->allocate_mqd = allocate_mqd;
mqd->init_mqd = init_mqd_hiq;
mqd->free_mqd = free_mqd;
mqd->load_mqd = load_mqd;
@@ -494,7 +489,7 @@ struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type,
mqd = mqd_manager_init_vi(type, dev);
if (!mqd)
return NULL;
- if ((type == KFD_MQD_TYPE_CP) || (type == KFD_MQD_TYPE_COMPUTE))
+ if (type == KFD_MQD_TYPE_CP)
mqd->update_mqd = update_mqd_tonga;
return mqd;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index ccf6b2310316..dc406e6dee23 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -239,10 +239,12 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
- pm->pmf = &kfd_v9_pm_funcs;
- break;
+ case CHIP_RENOIR:
+ case CHIP_ARCTURUS:
case CHIP_NAVI10:
- pm->pmf = &kfd_v10_pm_funcs;
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
+ pm->pmf = &kfd_v9_pm_funcs;
break;
default:
WARN(1, "Unexpected ASIC family %u",
@@ -262,10 +264,10 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
return 0;
}
-void pm_uninit(struct packet_manager *pm)
+void pm_uninit(struct packet_manager *pm, bool hanging)
{
mutex_destroy(&pm->lock);
- kernel_queue_uninit(pm->priv_queue);
+ kernel_queue_uninit(pm->priv_queue, hanging);
}
int pm_send_set_resources(struct packet_manager *pm,
@@ -276,7 +278,7 @@ int pm_send_set_resources(struct packet_manager *pm,
size = pm->pmf->set_resources_size;
mutex_lock(&pm->lock);
- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t),
(unsigned int **)&buffer);
if (!buffer) {
@@ -287,9 +289,9 @@ int pm_send_set_resources(struct packet_manager *pm,
retval = pm->pmf->set_resources(pm, buffer, res);
if (!retval)
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
else
- pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+ kq_rollback_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock);
@@ -314,7 +316,7 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
packet_size_dwords = pm->pmf->runlist_size / sizeof(uint32_t);
mutex_lock(&pm->lock);
- retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ retval = kq_acquire_packet_buffer(pm->priv_queue,
packet_size_dwords, &rl_buffer);
if (retval)
goto fail_acquire_packet_buffer;
@@ -324,14 +326,14 @@ int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
if (retval)
goto fail_create_runlist;
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
mutex_unlock(&pm->lock);
return retval;
fail_create_runlist:
- pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+ kq_rollback_packet(pm->priv_queue);
fail_acquire_packet_buffer:
mutex_unlock(&pm->lock);
fail_create_runlist_ib:
@@ -350,7 +352,7 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
size = pm->pmf->query_status_size;
mutex_lock(&pm->lock);
- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
@@ -360,9 +362,9 @@ int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
retval = pm->pmf->query_status(pm, buffer, fence_address, fence_value);
if (!retval)
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
else
- pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+ kq_rollback_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock);
@@ -379,7 +381,7 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
size = pm->pmf->unmap_queues_size;
mutex_lock(&pm->lock);
- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
@@ -390,9 +392,9 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
retval = pm->pmf->unmap_queues(pm, buffer, type, filter, filter_param,
reset, sdma_engine);
if (!retval)
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
else
- pm->priv_queue->ops.rollback_packet(pm->priv_queue);
+ kq_rollback_packet(pm->priv_queue);
out:
mutex_unlock(&pm->lock);
@@ -437,7 +439,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
size = pm->pmf->query_status_size;
mutex_lock(&pm->lock);
- pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ kq_acquire_packet_buffer(pm->priv_queue,
size / sizeof(uint32_t), (unsigned int **)&buffer);
if (!buffer) {
pr_err("Failed to allocate buffer on kernel queue\n");
@@ -445,7 +447,7 @@ int pm_debugfs_hang_hws(struct packet_manager *pm)
goto out;
}
memset(buffer, 0x55, size);
- pm->priv_queue->ops.submit_packet(pm->priv_queue);
+ kq_submit_packet(pm->priv_queue);
pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
buffer[0], buffer[1], buffer[2], buffer[3],
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
index 2d5ddf199bd0..2de01009f1b6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c
@@ -25,47 +25,7 @@
#include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers_ai.h"
#include "kfd_pm4_opcodes.h"
-
-static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_v9(struct kernel_queue *kq);
-static void submit_packet_v9(struct kernel_queue *kq);
-
-void kernel_queue_init_v9(struct kernel_queue_ops *ops)
-{
- ops->initialize = initialize_v9;
- ops->uninitialize = uninitialize_v9;
- ops->submit_packet = submit_packet_v9;
-}
-
-static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size)
-{
- int retval;
-
- retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
- if (retval)
- return false;
-
- kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
- kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
-
- memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
-
- return true;
-}
-
-static void uninitialize_v9(struct kernel_queue *kq)
-{
- kfd_gtt_sa_free(kq->dev, kq->eop_mem);
-}
-
-static void submit_packet_v9(struct kernel_queue *kq)
-{
- *kq->wptr64_kernel = kq->pending_wptr64;
- write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
- kq->pending_wptr64);
-}
+#include "gc/gc_10_1_0_sh_mask.h"
static int pm_map_process_v9(struct packet_manager *pm,
uint32_t *buffer, struct qcm_process_device *qpd)
@@ -81,7 +41,8 @@ static int pm_map_process_v9(struct packet_manager *pm,
packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
packet->bitfields2.process_quantum = 1;
packet->bitfields2.pasid = qpd->pqm->process->pasid;
- packet->bitfields14.gds_size = qpd->gds_size;
+ packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
+ packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
packet->bitfields14.num_gws = qpd->num_gws;
packet->bitfields14.num_oac = qpd->num_oac;
packet->bitfields14.sdma_enable = 1;
@@ -89,10 +50,17 @@ static int pm_map_process_v9(struct packet_manager *pm,
packet->sh_mem_config = qpd->sh_mem_config;
packet->sh_mem_bases = qpd->sh_mem_bases;
- packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
- packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8);
- packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
- packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
+ if (qpd->tba_addr) {
+ packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
+ /* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is
+ * not defined, so setting it won't do any harm.
+ */
+ packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8)
+ | 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT;
+
+ packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
+ packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
+ }
packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
@@ -143,6 +111,34 @@ static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
+static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
+ struct scheduling_resources *res)
+{
+ struct pm4_mes_set_resources *packet;
+
+ packet = (struct pm4_mes_set_resources *)buffer;
+ memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
+
+ packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
+ sizeof(struct pm4_mes_set_resources));
+
+ packet->bitfields2.queue_type =
+ queue_type__mes_set_resources__hsa_interface_queue_hiq;
+ packet->bitfields2.vmid_mask = res->vmid_mask;
+ packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
+ packet->bitfields7.oac_mask = res->oac_mask;
+ packet->bitfields8.gds_heap_base = res->gds_heap_base;
+ packet->bitfields8.gds_heap_size = res->gds_heap_size;
+
+ packet->gws_mask_lo = lower_32_bits(res->gws_mask);
+ packet->gws_mask_hi = upper_32_bits(res->gws_mask);
+
+ packet->queue_mask_lo = lower_32_bits(res->queue_mask);
+ packet->queue_mask_hi = upper_32_bits(res->queue_mask);
+
+ return 0;
+}
+
static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
struct queue *q, bool is_static)
{
@@ -161,6 +157,8 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
packet->bitfields2.engine_sel =
engine_sel__mes_map_queues__compute_vi;
packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_map_queues__legacy_engine_sel;
packet->bitfields2.queue_type =
queue_type__mes_map_queues__normal_compute_vi;
@@ -176,9 +174,15 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
break;
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
- packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
- engine_sel__mes_map_queues__sdma0_vi;
use_static = false; /* no static queues under SDMA */
+ if (q->properties.sdma_engine_id < 2)
+ packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
+ engine_sel__mes_map_queues__sdma0_vi;
+ else {
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_map_queues__sdma0_to_7_sel;
+ packet->bitfields2.engine_sel = q->properties.sdma_engine_id;
+ }
break;
default:
WARN(1, "queue type %d", q->properties.type);
@@ -218,13 +222,23 @@ static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
switch (type) {
case KFD_QUEUE_TYPE_COMPUTE:
case KFD_QUEUE_TYPE_DIQ:
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
packet->bitfields2.engine_sel =
engine_sel__mes_unmap_queues__compute;
break;
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
- packet->bitfields2.engine_sel =
- engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
+ if (sdma_engine < 2) {
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
+ packet->bitfields2.engine_sel =
+ engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
+ } else {
+ packet->bitfields2.extended_engine_sel =
+ extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel;
+ packet->bitfields2.engine_sel = sdma_engine;
+ }
break;
default:
WARN(1, "queue type %d", type);
@@ -294,48 +308,19 @@ static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
return 0;
}
-
-static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
-{
- struct pm4_mec_release_mem *packet;
-
- packet = (struct pm4_mec_release_mem *)buffer;
- memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
-
- packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
- sizeof(struct pm4_mec_release_mem));
-
- packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
- packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe;
- packet->bitfields2.tcl1_action_ena = 1;
- packet->bitfields2.tc_action_ena = 1;
- packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru;
-
- packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low;
- packet->bitfields3.int_sel =
- int_sel__mec_release_mem__send_interrupt_after_write_confirm;
-
- packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
- packet->address_hi = upper_32_bits(gpu_addr);
-
- packet->data_lo = 0;
-
- return 0;
-}
-
const struct packet_manager_funcs kfd_v9_pm_funcs = {
.map_process = pm_map_process_v9,
.runlist = pm_runlist_v9,
- .set_resources = pm_set_resources_vi,
+ .set_resources = pm_set_resources_v9,
.map_queues = pm_map_queues_v9,
.unmap_queues = pm_unmap_queues_v9,
.query_status = pm_query_status_v9,
- .release_mem = pm_release_mem_v9,
+ .release_mem = NULL,
.map_process_size = sizeof(struct pm4_mes_map_process),
.runlist_size = sizeof(struct pm4_mes_runlist),
.set_resources_size = sizeof(struct pm4_mes_set_resources),
.map_queues_size = sizeof(struct pm4_mes_map_queues),
.unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
.query_status_size = sizeof(struct pm4_mes_query_status),
- .release_mem_size = sizeof(struct pm4_mec_release_mem)
+ .release_mem_size = 0,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
index 2adaf40027eb..bed4d0ccb6b1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c
@@ -26,47 +26,6 @@
#include "kfd_pm4_headers_vi.h"
#include "kfd_pm4_opcodes.h"
-static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size);
-static void uninitialize_vi(struct kernel_queue *kq);
-static void submit_packet_vi(struct kernel_queue *kq);
-
-void kernel_queue_init_vi(struct kernel_queue_ops *ops)
-{
- ops->initialize = initialize_vi;
- ops->uninitialize = uninitialize_vi;
- ops->submit_packet = submit_packet_vi;
-}
-
-static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
- enum kfd_queue_type type, unsigned int queue_size)
-{
- int retval;
-
- retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
- if (retval != 0)
- return false;
-
- kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
- kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
-
- memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
-
- return true;
-}
-
-static void uninitialize_vi(struct kernel_queue *kq)
-{
- kfd_gtt_sa_free(kq->dev, kq->eop_mem);
-}
-
-static void submit_packet_vi(struct kernel_queue *kq)
-{
- *kq->wptr_kernel = kq->pending_wptr;
- write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
- kq->pending_wptr);
-}
-
unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
{
union PM4_MES_TYPE_3_HEADER header;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
index e3e21404cfa0..4d7add843746 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h
@@ -83,10 +83,10 @@ struct pm4_mes_set_resources {
union {
struct {
- uint32_t gds_heap_base:6;
- uint32_t reserved3:5;
- uint32_t gds_heap_size:6;
- uint32_t reserved4:15;
+ uint32_t gds_heap_base:10;
+ uint32_t reserved3:1;
+ uint32_t gds_heap_size:10;
+ uint32_t reserved4:11;
} bitfields8;
uint32_t ordinal8;
};
@@ -179,7 +179,7 @@ struct pm4_mes_map_process {
uint32_t num_gws:7;
uint32_t sdma_enable:1;
uint32_t num_oac:4;
- uint32_t reserved8:4;
+ uint32_t gds_size_hi:4;
uint32_t gds_size:6;
uint32_t num_queues:10;
} bitfields14;
@@ -260,6 +260,10 @@ enum mes_map_queues_engine_sel_enum {
engine_sel__mes_map_queues__sdma1_vi = 3
};
+enum mes_map_queues_extended_engine_sel_enum {
+ extended_engine_sel__mes_map_queues__legacy_engine_sel = 0,
+ extended_engine_sel__mes_map_queues__sdma0_to_7_sel = 1
+};
struct pm4_mes_map_queues {
union {
@@ -269,7 +273,8 @@ struct pm4_mes_map_queues {
union {
struct {
- uint32_t reserved1:4;
+ uint32_t reserved1:2;
+ enum mes_map_queues_extended_engine_sel_enum extended_engine_sel:2;
enum mes_map_queues_queue_sel_enum queue_sel:2;
uint32_t reserved5:6;
uint32_t gws_control_queue:1;
@@ -382,6 +387,11 @@ enum mes_unmap_queues_engine_sel_enum {
engine_sel__mes_unmap_queues__sdmal = 3
};
+enum mes_unmap_queues_extended_engine_sel_enum {
+ extended_engine_sel__mes_unmap_queues__legacy_engine_sel = 0,
+ extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel = 1
+};
+
struct pm4_mes_unmap_queues {
union {
union PM4_MES_TYPE_3_HEADER header; /* header */
@@ -391,7 +401,7 @@ struct pm4_mes_unmap_queues {
union {
struct {
enum mes_unmap_queues_action_enum action:2;
- uint32_t reserved1:2;
+ enum mes_unmap_queues_extended_engine_sel_enum extended_engine_sel:2;
enum mes_unmap_queues_queue_sel_enum queue_sel:2;
uint32_t reserved2:20;
enum mes_unmap_queues_engine_sel_enum engine_sel:3;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 3933fb6a371e..6af1b5881f43 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -36,6 +36,10 @@
#include <linux/seq_file.h>
#include <linux/kref.h>
#include <linux/sysfs.h>
+#include <linux/device_cgroup.h>
+#include <drm/drm_file.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_device.h>
#include <kgd_kfd_interface.h>
#include "amd_shared.h"
@@ -55,24 +59,21 @@
* NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these
* defines are w.r.t to PAGE_SIZE
*/
-#define KFD_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT)
+#define KFD_MMAP_TYPE_SHIFT 62
#define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT)
#define KFD_MMAP_TYPE_MMIO (0x0ULL << KFD_MMAP_TYPE_SHIFT)
-#define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT)
+#define KFD_MMAP_GPU_ID_SHIFT 46
#define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \
<< KFD_MMAP_GPU_ID_SHIFT)
#define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\
& KFD_MMAP_GPU_ID_MASK)
-#define KFD_MMAP_GPU_ID_GET(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
+#define KFD_MMAP_GET_GPU_ID(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \
>> KFD_MMAP_GPU_ID_SHIFT)
-#define KFD_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFULL >> PAGE_SHIFT)
-#define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK)
-
/*
* When working with cp scheduler we should assign the HIQ manually or via
* the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
@@ -179,10 +180,6 @@ enum cache_policy {
cache_policy_noncoherent
};
-#define KFD_IS_VI(chip) ((chip) >= CHIP_CARRIZO && (chip) <= CHIP_POLARIS11)
-#define KFD_IS_DGPU(chip) (((chip) >= CHIP_TONGA && \
- (chip) <= CHIP_NAVI10) || \
- (chip) == CHIP_HAWAII)
#define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10)
struct kfd_event_interrupt_class {
@@ -195,6 +192,7 @@ struct kfd_event_interrupt_class {
struct kfd_device_info {
enum amd_asic_type asic_family;
+ const char *asic_name;
const struct kfd_event_interrupt_class *event_interrupt_class;
unsigned int max_pasid_bits;
unsigned int max_no_of_hqd;
@@ -229,6 +227,7 @@ struct kfd_dev {
const struct kfd_device_info *device_info;
struct pci_dev *pdev;
+ struct drm_device *ddev;
unsigned int id; /* topology stub index */
@@ -236,9 +235,10 @@ struct kfd_dev {
* KFD. It is aligned for mapping
* into user mode
*/
- size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
- * to HW doorbell, GFX reserved some
- * at the start)
+ size_t doorbell_base_dw_offset; /* Offset from the start of the PCI
+ * doorbell BAR to the first KFD
+ * doorbell in dwords. GFX reserves
+ * the segment before this offset.
*/
u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
* page used by kernel queue
@@ -508,8 +508,7 @@ struct queue {
* Please read the kfd_mqd_manager.h description.
*/
enum KFD_MQD_TYPE {
- KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */
- KFD_MQD_TYPE_HIQ, /* for hiq */
+ KFD_MQD_TYPE_HIQ = 0, /* for hiq */
KFD_MQD_TYPE_CP, /* for cp queues and diq */
KFD_MQD_TYPE_SDMA, /* for sdma queues */
KFD_MQD_TYPE_DIQ, /* for diq */
@@ -686,10 +685,7 @@ struct kfd_process {
/* We want to receive a notification when the mm_struct is destroyed */
struct mmu_notifier mmu_notifier;
- /* Use for delayed freeing of kfd_process structure */
- struct rcu_head rcu;
-
- unsigned int pasid;
+ uint16_t pasid;
unsigned int doorbell_index;
/*
@@ -819,7 +815,7 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
u32 read_kernel_doorbell(u32 __iomem *db);
void write_kernel_doorbell(void __iomem *db, u32 value);
void write_kernel_doorbell64(void __iomem *db, u64 value);
-unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd,
+unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd,
struct kfd_process *process,
unsigned int doorbell_id);
phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
@@ -887,7 +883,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
enum kfd_queue_type type);
-void kernel_queue_uninit(struct kernel_queue *kq);
+void kernel_queue_uninit(struct kernel_queue *kq, bool hanging);
int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
/* Process Queue Manager */
@@ -905,7 +901,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
- unsigned int *qid);
+ unsigned int *qid,
+ uint32_t *p_doorbell_offset_in_process);
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
@@ -973,10 +970,9 @@ struct packet_manager_funcs {
extern const struct packet_manager_funcs kfd_vi_pm_funcs;
extern const struct packet_manager_funcs kfd_v9_pm_funcs;
-extern const struct packet_manager_funcs kfd_v10_pm_funcs;
int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
-void pm_uninit(struct packet_manager *pm);
+void pm_uninit(struct packet_manager *pm, bool hanging);
int pm_send_set_resources(struct packet_manager *pm,
struct scheduling_resources *res);
int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
@@ -992,9 +988,6 @@ void pm_release_ib(struct packet_manager *pm);
/* Following PM funcs can be shared among VI and AI */
unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size);
-int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
- struct scheduling_resources *res);
-
uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
@@ -1042,6 +1035,21 @@ bool kfd_is_locked(void);
void kfd_inc_compute_active(struct kfd_dev *dev);
void kfd_dec_compute_active(struct kfd_dev *dev);
+/* Cgroup Support */
+/* Check with device cgroup if @kfd device is accessible */
+static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd)
+{
+#if defined(CONFIG_CGROUP_DEVICE)
+ struct drm_device *ddev = kfd->ddev;
+
+ return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major,
+ ddev->render->index,
+ DEVCG_ACC_WRITE | DEVCG_ACC_READ);
+#else
+ return 0;
+#endif
+}
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 8f1076c0c88a..25b90f70aecd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -32,6 +32,7 @@
#include <linux/mman.h>
#include <linux/file.h>
#include "amdgpu_amdkfd.h"
+#include "amdgpu.h"
struct mm_struct;
@@ -62,8 +63,8 @@ static struct workqueue_struct *kfd_restore_wq;
static struct kfd_process *find_process(const struct task_struct *thread);
static void kfd_process_ref_release(struct kref *ref);
-static struct kfd_process *create_process(const struct task_struct *thread,
- struct file *filep);
+static struct kfd_process *create_process(const struct task_struct *thread);
+static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
static void evict_process_worker(struct work_struct *work);
static void restore_process_worker(struct work_struct *work);
@@ -289,7 +290,15 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (process) {
pr_debug("Process already found\n");
} else {
- process = create_process(thread, filep);
+ process = create_process(thread);
+ if (IS_ERR(process))
+ goto out;
+
+ ret = kfd_process_init_cwsr_apu(process, filep);
+ if (ret) {
+ process = ERR_PTR(ret);
+ goto out;
+ }
if (!procfs.kobj)
goto out;
@@ -316,6 +325,8 @@ struct kfd_process *kfd_create_process(struct file *filep)
(int)process->lead_thread->pid);
}
out:
+ if (!IS_ERR(process))
+ kref_get(&process->ref);
mutex_unlock(&kfd_processes_mutex);
return process;
@@ -408,7 +419,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
list_for_each_entry_safe(pdd, temp, &p->per_device_data,
per_device_list) {
- pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
+ pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
pdd->dev->id, p->pasid);
if (pdd->drm_file) {
@@ -478,11 +489,9 @@ static void kfd_process_ref_release(struct kref *ref)
queue_work(kfd_process_wq, &p->release_work);
}
-static void kfd_process_destroy_delayed(struct rcu_head *rcu)
+static void kfd_process_free_notifier(struct mmu_notifier *mn)
{
- struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
-
- kfd_unref_process(p);
+ kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier));
}
static void kfd_process_notifier_release(struct mmu_notifier *mn,
@@ -534,12 +543,12 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
mutex_unlock(&p->mutex);
- mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
- mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
+ mmu_notifier_put(&p->mmu_notifier);
}
static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
.release = kfd_process_notifier_release,
+ .free_notifier = kfd_process_free_notifier,
};
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
@@ -554,8 +563,7 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
continue;
- offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
- << PAGE_SHIFT;
+ offset = KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id);
qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
MAP_SHARED, offset);
@@ -609,81 +617,69 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
return 0;
}
-static struct kfd_process *create_process(const struct task_struct *thread,
- struct file *filep)
+/*
+ * On return the kfd_process is fully operational and will be freed when the
+ * mm is released
+ */
+static struct kfd_process *create_process(const struct task_struct *thread)
{
struct kfd_process *process;
int err = -ENOMEM;
process = kzalloc(sizeof(*process), GFP_KERNEL);
-
if (!process)
goto err_alloc_process;
- process->pasid = kfd_pasid_alloc();
- if (process->pasid == 0)
- goto err_alloc_pasid;
-
- if (kfd_alloc_process_doorbells(process) < 0)
- goto err_alloc_doorbells;
-
kref_init(&process->ref);
-
mutex_init(&process->mutex);
-
process->mm = thread->mm;
-
- /* register notifier */
- process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
- err = mmu_notifier_register(&process->mmu_notifier, process->mm);
- if (err)
- goto err_mmu_notifier;
-
- hash_add_rcu(kfd_processes_table, &process->kfd_processes,
- (uintptr_t)process->mm);
-
process->lead_thread = thread->group_leader;
- get_task_struct(process->lead_thread);
-
INIT_LIST_HEAD(&process->per_device_data);
-
+ INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
+ INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
+ process->last_restore_timestamp = get_jiffies_64();
kfd_event_init_process(process);
+ process->is_32bit_user_mode = in_compat_syscall();
+
+ process->pasid = kfd_pasid_alloc();
+ if (process->pasid == 0)
+ goto err_alloc_pasid;
+
+ if (kfd_alloc_process_doorbells(process) < 0)
+ goto err_alloc_doorbells;
err = pqm_init(&process->pqm, process);
if (err != 0)
goto err_process_pqm_init;
/* init process apertures*/
- process->is_32bit_user_mode = in_compat_syscall();
err = kfd_init_apertures(process);
if (err != 0)
goto err_init_apertures;
- INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
- INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
- process->last_restore_timestamp = get_jiffies_64();
-
- err = kfd_process_init_cwsr_apu(process, filep);
+ /* Must be last, have to use release destruction after this */
+ process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
+ err = mmu_notifier_register(&process->mmu_notifier, process->mm);
if (err)
- goto err_init_cwsr;
+ goto err_register_notifier;
+
+ get_task_struct(process->lead_thread);
+ hash_add_rcu(kfd_processes_table, &process->kfd_processes,
+ (uintptr_t)process->mm);
return process;
-err_init_cwsr:
+err_register_notifier:
kfd_process_free_outstanding_kfd_bos(process);
kfd_process_destroy_pdds(process);
err_init_apertures:
pqm_uninit(&process->pqm);
err_process_pqm_init:
- hash_del_rcu(&process->kfd_processes);
- synchronize_rcu();
- mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
-err_mmu_notifier:
- mutex_destroy(&process->mutex);
kfd_free_process_doorbells(process);
err_alloc_doorbells:
kfd_pasid_free(process->pasid);
err_alloc_pasid:
+ mutex_destroy(&process->mutex);
kfree(process);
err_alloc_process:
return ERR_PTR(err);
@@ -693,6 +689,8 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
struct kfd_dev *dev)
{
unsigned int i;
+ int range_start = dev->shared_resources.non_cp_doorbells_start;
+ int range_end = dev->shared_resources.non_cp_doorbells_end;
if (!KFD_IS_SOC15(dev->device_info->asic_family))
return 0;
@@ -704,14 +702,16 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd,
return -ENOMEM;
/* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end);
+ pr_debug("reserved doorbell 0x%03x - 0x%03x\n",
+ range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
+ range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
+
for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) {
- if (i >= dev->shared_resources.non_cp_doorbells_start
- && i <= dev->shared_resources.non_cp_doorbells_end) {
+ if (i >= range_start && i <= range_end) {
set_bit(i, qpd->doorbell_bitmap);
set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET,
qpd->doorbell_bitmap);
- pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i,
- i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET);
}
}
@@ -801,6 +801,8 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
return ret;
}
+ amdgpu_vm_set_task_info(pdd->vm);
+
ret = kfd_process_device_reserve_ib_mem(pdd);
if (ret)
goto err_reserve_ib_mem;
@@ -1024,7 +1026,7 @@ static void evict_process_worker(struct work_struct *work)
*/
flush_delayed_work(&p->restore_work);
- pr_debug("Started evicting pasid %d\n", p->pasid);
+ pr_debug("Started evicting pasid 0x%x\n", p->pasid);
ret = kfd_process_evict_queues(p);
if (!ret) {
dma_fence_signal(p->ef);
@@ -1033,16 +1035,15 @@ static void evict_process_worker(struct work_struct *work)
queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
- pr_debug("Finished evicting pasid %d\n", p->pasid);
+ pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
} else
- pr_err("Failed to evict queues of pasid %d\n", p->pasid);
+ pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid);
}
static void restore_process_worker(struct work_struct *work)
{
struct delayed_work *dwork;
struct kfd_process *p;
- struct kfd_process_device *pdd;
int ret = 0;
dwork = to_delayed_work(work);
@@ -1051,17 +1052,7 @@ static void restore_process_worker(struct work_struct *work)
* lifetime of this thread, kfd_process p will be valid
*/
p = container_of(dwork, struct kfd_process, restore_work);
-
- /* Call restore_process_bos on the first KGD device. This function
- * takes care of restoring the whole process including other devices.
- * Restore can fail if enough memory is not available. If so,
- * reschedule again.
- */
- pdd = list_first_entry(&p->per_device_data,
- struct kfd_process_device,
- per_device_list);
-
- pr_debug("Started restoring pasid %d\n", p->pasid);
+ pr_debug("Started restoring pasid 0x%x\n", p->pasid);
/* Setting last_restore_timestamp before successful restoration.
* Otherwise this would have to be set by KGD (restore_process_bos)
@@ -1077,7 +1068,7 @@ static void restore_process_worker(struct work_struct *work)
ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info,
&p->ef);
if (ret) {
- pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
+ pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
@@ -1087,9 +1078,9 @@ static void restore_process_worker(struct work_struct *work)
ret = kfd_process_restore_queues(p);
if (!ret)
- pr_debug("Finished restoring pasid %d\n", p->pasid);
+ pr_debug("Finished restoring pasid 0x%x\n", p->pasid);
else
- pr_err("Failed to restore queues of pasid %d\n", p->pasid);
+ pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid);
}
void kfd_suspend_all_processes(void)
@@ -1103,7 +1094,7 @@ void kfd_suspend_all_processes(void)
cancel_delayed_work_sync(&p->restore_work);
if (kfd_process_evict_queues(p))
- pr_err("Failed to suspend process %d\n", p->pasid);
+ pr_err("Failed to suspend process 0x%x\n", p->pasid);
dma_fence_signal(p->ef);
dma_fence_put(p->ef);
p->ef = NULL;
@@ -1162,16 +1153,17 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
void kfd_flush_tlb(struct kfd_process_device *pdd)
{
struct kfd_dev *dev = pdd->dev;
- const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
/* Nothing to flush until a VMID is assigned, which
* only happens when the first queue is created.
*/
if (pdd->qpd.vmid)
- f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
+ amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->kgd,
+ pdd->qpd.vmid);
} else {
- f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
+ amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
+ pdd->process->pasid);
}
}
@@ -1186,7 +1178,7 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
int idx = srcu_read_lock(&kfd_processes_srcu);
hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
- seq_printf(m, "Process %d PASID %d:\n",
+ seq_printf(m, "Process %d PASID 0x%x:\n",
p->lead_thread->tgid, p->pasid);
mutex_lock(&p->mutex);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 7e6c3ee82f5b..31fcd1b51f00 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -53,7 +53,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("The new slot id %lu\n", found);
if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
- pr_info("Cannot open more queues for process with pasid %d\n",
+ pr_info("Cannot open more queues for process with pasid 0x%x\n",
pqm->process->pasid);
return -ENOMEM;
}
@@ -162,7 +162,7 @@ void pqm_uninit(struct process_queue_manager *pqm)
pqm->queue_slot_bitmap = NULL;
}
-static int create_cp_queue(struct process_queue_manager *pqm,
+static int init_user_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev, struct queue **q,
struct queue_properties *q_properties,
struct file *f, unsigned int qid)
@@ -192,7 +192,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
struct kfd_dev *dev,
struct file *f,
struct queue_properties *properties,
- unsigned int *qid)
+ unsigned int *qid,
+ uint32_t *p_doorbell_offset_in_process)
{
int retval;
struct kfd_process_device *pdd;
@@ -250,7 +251,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -271,7 +272,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
goto err_create_queue;
}
- retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+ retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
@@ -298,17 +299,20 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid %d DQM create queue %d failed. ret %d\n",
+ pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n",
pqm->process->pasid, type, retval);
goto err_create_queue;
}
- if (q)
+ if (q && p_doorbell_offset_in_process)
/* Return the doorbell offset within the doorbell page
* to the caller so it can be passed up to user mode
* (in bytes).
+ * There are always 1024 doorbells per process, so in case
+ * of 8-byte doorbells, there are two doorbell pages per
+ * process.
*/
- properties->doorbell_off =
+ *p_doorbell_offset_in_process =
(q->properties.doorbell_off * sizeof(uint32_t)) &
(kfd_doorbell_process_slice(dev) - 1);
@@ -370,14 +374,14 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
/* destroy kernel queue (DIQ) */
dqm = pqn->kq->dev->dqm;
dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
- kernel_queue_uninit(pqn->kq);
+ kernel_queue_uninit(pqn->kq, false);
}
if (pqn->q) {
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
- pr_err("Pasid %d destroy queue %d failed, ret %d\n",
+ pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
pqm->process->pasid,
pqn->q->properties.queue_id, retval);
if (retval != -ETIME)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index c2e6e47abaf2..203c823d65f1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -269,6 +269,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
iolink = container_of(attr, struct kfd_iolink_properties, attr);
+ if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type);
sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj);
sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min);
@@ -305,6 +307,8 @@ static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
mem = container_of(attr, struct kfd_mem_properties, attr);
+ if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type);
sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes);
sysfs_show_32bit_prop(buffer, "flags", mem->flags);
@@ -334,6 +338,8 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
buffer[0] = 0;
cache = container_of(attr, struct kfd_cache_properties, attr);
+ if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "processor_id_low",
cache->processor_id_low);
sysfs_show_32bit_prop(buffer, "level", cache->cache_level);
@@ -406,8 +412,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
char *buffer)
{
struct kfd_topology_device *dev;
- char public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
- uint32_t i;
uint32_t log_max_watch_addr;
/* Making sure that the buffer is an empty string */
@@ -416,24 +420,24 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
if (strcmp(attr->name, "gpu_id") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_gpuid);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
return sysfs_show_32bit_val(buffer, dev->gpu_id);
}
if (strcmp(attr->name, "name") == 0) {
dev = container_of(attr, struct kfd_topology_device,
attr_name);
- for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) {
- public_name[i] =
- (char)dev->node_props.marketing_name[i];
- if (dev->node_props.marketing_name[i] == 0)
- break;
- }
- public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1] = 0x0;
- return sysfs_show_str_val(buffer, public_name);
+
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
+ return sysfs_show_str_val(buffer, dev->node_props.name);
}
dev = container_of(attr, struct kfd_topology_device,
attr_props);
+ if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu))
+ return -EPERM;
sysfs_show_32bit_prop(buffer, "cpu_cores_count",
dev->node_props.cpu_cores_count);
sysfs_show_32bit_prop(buffer, "simd_count",
@@ -482,6 +486,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.num_sdma_engines);
sysfs_show_32bit_prop(buffer, "num_sdma_xgmi_engines",
dev->node_props.num_sdma_xgmi_engines);
+ sysfs_show_32bit_prop(buffer, "num_sdma_queues_per_engine",
+ dev->node_props.num_sdma_queues_per_engine);
+ sysfs_show_32bit_prop(buffer, "num_cp_queues",
+ dev->node_props.num_cp_queues);
if (dev->gpu) {
log_max_watch_addr =
@@ -1106,6 +1114,9 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
struct kfd_topology_device *dev;
struct kfd_topology_device *out_dev = NULL;
+ struct kfd_mem_properties *mem;
+ struct kfd_cache_properties *cache;
+ struct kfd_iolink_properties *iolink;
down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list) {
@@ -1119,6 +1130,13 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
+
+ list_for_each_entry(mem, &dev->mem_props, list)
+ mem->gpu = dev->gpu;
+ list_for_each_entry(cache, &dev->cache_props, list)
+ cache->gpu = dev->gpu;
+ list_for_each_entry(iolink, &dev->io_link_props, list)
+ iolink->gpu = dev->gpu;
break;
}
}
@@ -1274,6 +1292,10 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
*/
amdgpu_amdkfd_get_cu_info(dev->gpu->kgd, &cu_info);
+
+ strncpy(dev->node_props.name, gpu->device_info->asic_name,
+ KFD_TOPOLOGY_PUBLIC_NAME_SIZE);
+
dev->node_props.simd_arrays_per_engine =
cu_info.num_shader_arrays_per_engine;
@@ -1291,9 +1313,12 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines;
dev->node_props.num_sdma_xgmi_engines =
gpu->device_info->num_xgmi_sdma_engines;
+ dev->node_props.num_sdma_queues_per_engine =
+ gpu->device_info->num_sdma_queues_per_engine;
dev->node_props.num_gws = (hws_gws_support &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
+ dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
@@ -1321,7 +1346,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_RAVEN:
+ case CHIP_RENOIR:
+ case CHIP_ARCTURUS:
case CHIP_NAVI10:
+ case CHIP_NAVI12:
+ case CHIP_NAVI14:
dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 <<
HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) &
HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 276354aa0fcc..74e9b1682af8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -27,7 +27,7 @@
#include <linux/list.h>
#include "kfd_crat.h"
-#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 128
+#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 32
#define HSA_CAP_HOT_PLUGGABLE 0x00000001
#define HSA_CAP_ATS_PRESENT 0x00000002
@@ -81,7 +81,9 @@ struct kfd_node_properties {
int32_t drm_render_minor;
uint32_t num_sdma_engines;
uint32_t num_sdma_xgmi_engines;
- uint16_t marketing_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
+ uint32_t num_sdma_queues_per_engine;
+ uint32_t num_cp_queues;
+ char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
};
#define HSA_MEM_HEAP_TYPE_SYSTEM 0
@@ -102,6 +104,7 @@ struct kfd_mem_properties {
uint32_t flags;
uint32_t width;
uint32_t mem_clk_max;
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -123,6 +126,7 @@ struct kfd_cache_properties {
uint32_t cache_latency;
uint32_t cache_type;
uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
@@ -141,6 +145,7 @@ struct kfd_iolink_properties {
uint32_t max_bandwidth;
uint32_t rec_transfer_size;
uint32_t flags;
+ struct kfd_dev *gpu;
struct kobject *kobj;
struct attribute attr;
};
OpenPOWER on IntegriCloud