summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdkfd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c149
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h10
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c2
6 files changed, 81 insertions, 89 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 3f95f7cb4019..88187bfc5ea3 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -226,6 +226,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
+ /* We only use the first MEC */
+ if (kfd->shared_resources.num_mec > 1)
+ kfd->shared_resources.num_mec = 1;
+
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index f49c551195b3..955aa304ff48 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -63,21 +63,44 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
return KFD_MQD_TYPE_CP;
}
-unsigned int get_first_pipe(struct device_queue_manager *dqm)
+static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
+{
+ int i;
+ int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
+ + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
+
+ /* queue is available for KFD usage if bit is 1 */
+ for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
+ if (test_bit(pipe_offset + i,
+ dqm->dev->shared_resources.queue_bitmap))
+ return true;
+ return false;
+}
+
+unsigned int get_mec_num(struct device_queue_manager *dqm)
{
BUG_ON(!dqm || !dqm->dev);
- return dqm->dev->shared_resources.first_compute_pipe;
+
+ return dqm->dev->shared_resources.num_mec;
}
-unsigned int get_pipes_num(struct device_queue_manager *dqm)
+unsigned int get_queues_num(struct device_queue_manager *dqm)
{
BUG_ON(!dqm || !dqm->dev);
- return dqm->dev->shared_resources.compute_pipe_count;
+ return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
+ KGD_MAX_QUEUES);
}
-static inline unsigned int get_pipes_num_cpsch(void)
+unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
{
- return PIPE_PER_ME_CP_SCHEDULING;
+ BUG_ON(!dqm || !dqm->dev);
+ return dqm->dev->shared_resources.num_queue_per_pipe;
+}
+
+unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
+{
+ BUG_ON(!dqm || !dqm->dev);
+ return dqm->dev->shared_resources.num_pipe_per_mec;
}
void program_sh_mem_settings(struct device_queue_manager *dqm,
@@ -200,12 +223,16 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
set = false;
- for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_num(dqm);
- pipe = ((pipe + 1) % get_pipes_num(dqm)), ++i) {
+ for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_per_mec(dqm);
+ pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
+
+ if (!is_pipe_enabled(dqm, 0, pipe))
+ continue;
+
if (dqm->allocated_queues[pipe] != 0) {
bit = find_first_bit(
(unsigned long *)&dqm->allocated_queues[pipe],
- QUEUES_PER_PIPE);
+ get_queues_per_pipe(dqm));
clear_bit(bit,
(unsigned long *)&dqm->allocated_queues[pipe]);
@@ -222,7 +249,7 @@ static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
__func__, q->pipe, q->queue);
/* horizontal hqd allocation */
- dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm);
+ dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
return 0;
}
@@ -469,81 +496,25 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
vmid);
}
-int init_pipelines(struct device_queue_manager *dqm,
- unsigned int pipes_num, unsigned int first_pipe)
-{
- void *hpdptr;
- struct mqd_manager *mqd;
- unsigned int i, err, inx;
- uint64_t pipe_hpd_addr;
-
- BUG_ON(!dqm || !dqm->dev);
-
- pr_debug("kfd: In func %s\n", __func__);
-
- /*
- * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
- * The driver never accesses this memory after zeroing it.
- * It doesn't even have to be saved/restored on suspend/resume
- * because it contains no data when there are no active queues.
- */
-
- err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num,
- &dqm->pipeline_mem);
-
- if (err) {
- pr_err("kfd: error allocate vidmem num pipes: %d\n",
- pipes_num);
- return -ENOMEM;
- }
-
- hpdptr = dqm->pipeline_mem->cpu_ptr;
- dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr;
-
- memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
-
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (mqd == NULL) {
- kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
- return -ENOMEM;
- }
-
- for (i = 0; i < pipes_num; i++) {
- inx = i + first_pipe;
- /*
- * HPD buffer on GTT is allocated by amdkfd, no need to waste
- * space in GTT for pipelines we don't initialize
- */
- pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
- pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
- /* = log2(bytes/4)-1 */
- dqm->dev->kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
- CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
- }
-
- return 0;
-}
-
static void init_interrupts(struct device_queue_manager *dqm)
{
unsigned int i;
BUG_ON(dqm == NULL);
- for (i = 0 ; i < get_pipes_num(dqm) ; i++)
- dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd,
- i + get_first_pipe(dqm));
+ for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
+ if (is_pipe_enabled(dqm, 0, i))
+ dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
}
static int init_scheduler(struct device_queue_manager *dqm)
{
- int retval;
+ int retval = 0;
BUG_ON(!dqm);
pr_debug("kfd: In %s\n", __func__);
- retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
return retval;
}
@@ -554,21 +525,21 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
BUG_ON(!dqm);
pr_debug("kfd: In func %s num of pipes: %d\n",
- __func__, get_pipes_num(dqm));
+ __func__, get_pipes_per_mec(dqm));
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
dqm->sdma_queue_count = 0;
- dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
+ dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
sizeof(unsigned int), GFP_KERNEL);
if (!dqm->allocated_queues) {
mutex_destroy(&dqm->lock);
return -ENOMEM;
}
- for (i = 0; i < get_pipes_num(dqm); i++)
- dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
+ for (i = 0; i < get_pipes_per_mec(dqm); i++)
+ dqm->allocated_queues[i] = (1 << get_queues_per_pipe(dqm)) - 1;
dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
@@ -675,18 +646,38 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
static int set_sched_resources(struct device_queue_manager *dqm)
{
+ int i, mec;
struct scheduling_resources res;
- unsigned int queue_num, queue_mask;
BUG_ON(!dqm);
pr_debug("kfd: In func %s\n", __func__);
- queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE;
- queue_mask = (1 << queue_num) - 1;
res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
res.vmid_mask <<= KFD_VMID_START_OFFSET;
- res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE);
+
+ res.queue_mask = 0;
+ for (i = 0; i < KGD_MAX_QUEUES; ++i) {
+ mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
+ / dqm->dev->shared_resources.num_pipe_per_mec;
+
+ if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
+ continue;
+
+ /* only acquire queues from the first MEC */
+ if (mec > 0)
+ continue;
+
+ /* This situation may be hit in the future if a new HW
+ * generation exposes more than 64 queues. If so, the
+ * definition of res.queue_mask needs updating */
+ if (WARN_ON(i > (sizeof(res.queue_mask)*8))) {
+ pr_err("Invalid queue enabled by amdgpu: %d\n", i);
+ break;
+ }
+
+ res.queue_mask |= (1ull << i);
+ }
res.gws_mask = res.oac_mask = res.gds_heap_base =
res.gds_heap_size = 0;
@@ -705,7 +696,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
BUG_ON(!dqm);
pr_debug("kfd: In func %s num of pipes: %d\n",
- __func__, get_pipes_num_cpsch());
+ __func__, get_pipes_per_mec(dqm));
mutex_init(&dqm->lock);
INIT_LIST_HEAD(&dqm->queues);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index a625b9137da2..66b9615bc3c1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -30,8 +30,6 @@
#include "kfd_mqd_manager.h"
#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (500)
-#define QUEUES_PER_PIPE (8)
-#define PIPE_PER_ME_CP_SCHEDULING (3)
#define CIK_VMID_NUM (8)
#define KFD_VMID_START_OFFSET (8)
#define VMID_PER_DEVICE CIK_VMID_NUM
@@ -182,10 +180,10 @@ void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops);
void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
-int init_pipelines(struct device_queue_manager *dqm,
- unsigned int pipes_num, unsigned int first_pipe);
-unsigned int get_first_pipe(struct device_queue_manager *dqm);
-unsigned int get_pipes_num(struct device_queue_manager *dqm);
+unsigned int get_mec_num(struct device_queue_manager *dqm);
+unsigned int get_queues_num(struct device_queue_manager *dqm);
+unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
+unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
index c6f435aa803f..48dc0561b402 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c
@@ -151,5 +151,5 @@ static void init_sdma_vm(struct device_queue_manager *dqm, struct queue *q,
static int initialize_cpsch_cik(struct device_queue_manager *dqm)
{
- return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index ca8c09326b31..7131998848d7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -65,8 +65,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
/* check if there is over subscription*/
*over_subscription = false;
- if ((process_count > 1) ||
- queue_count > PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE) {
+ if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) {
*over_subscription = true;
pr_debug("kfd: over subscribed runlist\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index e1fb40b84c72..32cdf2b483db 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -209,7 +209,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
/* check if there is over subscription */
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
- (dev->dqm->queue_count >= PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE))) {
+ (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
OpenPOWER on IntegriCloud