summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/panfrost
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/panfrost')
-rw-r--r--drivers/gpu/drm/panfrost/TODO2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.c125
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_devfreq.h3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h14
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c22
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.c21
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h4
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_issues.h81
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c23
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.h2
11 files changed, 172 insertions, 142 deletions
diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO
index 536a0d4f8d29..8c811a9e683b 100644
--- a/drivers/gpu/drm/panfrost/TODO
+++ b/drivers/gpu/drm/panfrost/TODO
@@ -10,3 +10,5 @@
- Compute job support. So called 'compute only' jobs need to be plumbed up to
userspace.
+
+- Support core dump on job failure
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
index 12ff77dacc95..536ba93b0f46 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c
@@ -13,97 +13,45 @@
#include "panfrost_gpu.h"
#include "panfrost_regs.h"
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot);
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev);
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
struct dev_pm_opp *opp;
- unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
- unsigned long target_volt, target_rate;
int err;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
-
- target_rate = dev_pm_opp_get_freq(opp);
- target_volt = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
- if (old_clk_rate == target_rate)
- return 0;
-
- /*
- * If frequency scaling from low to high, adjust voltage first.
- * If frequency scaling from high to low, adjust frequency first.
- */
- if (old_clk_rate < target_rate) {
- err = regulator_set_voltage(pfdev->regulator, target_volt,
- target_volt);
- if (err) {
- dev_err(dev, "Cannot set voltage %lu uV\n",
- target_volt);
- return err;
- }
- }
-
- err = clk_set_rate(pfdev->clock, target_rate);
- if (err) {
- dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
- err);
- regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
- pfdev->devfreq.cur_volt);
+ err = dev_pm_opp_set_rate(dev, *freq);
+ if (err)
return err;
- }
-
- if (old_clk_rate > target_rate) {
- err = regulator_set_voltage(pfdev->regulator, target_volt,
- target_volt);
- if (err)
- dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
- }
-
- pfdev->devfreq.cur_freq = target_rate;
- pfdev->devfreq.cur_volt = target_volt;
return 0;
}
static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
{
- ktime_t now = ktime_get();
- int i;
-
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- pfdev->devfreq.slot[i].busy_time = 0;
- pfdev->devfreq.slot[i].idle_time = 0;
- pfdev->devfreq.slot[i].time_last_update = now;
- }
+ pfdev->devfreq.busy_time = 0;
+ pfdev->devfreq.idle_time = 0;
+ pfdev->devfreq.time_last_update = ktime_get();
}
static int panfrost_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
- int i;
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- panfrost_devfreq_update_utilization(pfdev, i);
- }
+ panfrost_devfreq_update_utilization(pfdev);
status->current_frequency = clk_get_rate(pfdev->clock);
- status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time,
- pfdev->devfreq.slot[0].idle_time));
+ status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.busy_time,
+ pfdev->devfreq.idle_time));
- status->busy_time = 0;
- for (i = 0; i < NUM_JOB_SLOTS; i++) {
- status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time);
- }
-
- /* We're scheduling only to one core atm, so don't divide for now */
- /* status->busy_time /= NUM_JOB_SLOTS; */
+ status->busy_time = ktime_to_ns(pfdev->devfreq.busy_time);
panfrost_devfreq_reset(pfdev);
@@ -115,26 +63,17 @@ static int panfrost_devfreq_get_dev_status(struct device *dev,
return 0;
}
-static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
-{
- struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
-
- *freq = pfdev->devfreq.cur_freq;
-
- return 0;
-}
-
static struct devfreq_dev_profile panfrost_devfreq_profile = {
.polling_ms = 50, /* ~3 frames */
.target = panfrost_devfreq_target,
.get_dev_status = panfrost_devfreq_get_dev_status,
- .get_cur_freq = panfrost_devfreq_get_cur_freq,
};
int panfrost_devfreq_init(struct panfrost_device *pfdev)
{
int ret;
struct dev_pm_opp *opp;
+ unsigned long cur_freq;
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
if (ret == -ENODEV) /* Optional, continue without devfreq */
@@ -144,13 +83,13 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
panfrost_devfreq_reset(pfdev);
- pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);
+ cur_freq = clk_get_rate(pfdev->clock);
- opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
+ opp = devfreq_recommended_opp(&pfdev->pdev->dev, &cur_freq, 0);
if (IS_ERR(opp))
return PTR_ERR(opp);
- panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
+ panfrost_devfreq_profile.initial_freq = cur_freq;
dev_pm_opp_put(opp);
pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
@@ -174,14 +113,10 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev)
void panfrost_devfreq_resume(struct panfrost_device *pfdev)
{
- int i;
-
if (!pfdev->devfreq.devfreq)
return;
panfrost_devfreq_reset(pfdev);
- for (i = 0; i < NUM_JOB_SLOTS; i++)
- pfdev->devfreq.slot[i].busy = false;
devfreq_resume_device(pfdev->devfreq.devfreq);
}
@@ -194,9 +129,8 @@ void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
devfreq_suspend_device(pfdev->devfreq.devfreq);
}
-static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot)
+static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev)
{
- struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
ktime_t now;
ktime_t last;
@@ -204,22 +138,27 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i
return;
now = ktime_get();
- last = pfdev->devfreq.slot[slot].time_last_update;
+ last = pfdev->devfreq.time_last_update;
- /* If we last recorded a transition to busy, we have been idle since */
- if (devfreq_slot->busy)
- pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last);
+ if (atomic_read(&pfdev->devfreq.busy_count) > 0)
+ pfdev->devfreq.busy_time += ktime_sub(now, last);
else
- pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last);
+ pfdev->devfreq.idle_time += ktime_sub(now, last);
- pfdev->devfreq.slot[slot].time_last_update = now;
+ pfdev->devfreq.time_last_update = now;
+}
+
+void panfrost_devfreq_record_busy(struct panfrost_device *pfdev)
+{
+ panfrost_devfreq_update_utilization(pfdev);
+ atomic_inc(&pfdev->devfreq.busy_count);
}
-/* The job scheduler is expected to call this at every transition busy <-> idle */
-void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot)
+void panfrost_devfreq_record_idle(struct panfrost_device *pfdev)
{
- struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
+ int count;
- panfrost_devfreq_update_utilization(pfdev, slot);
- devfreq_slot->busy = !devfreq_slot->busy;
+ panfrost_devfreq_update_utilization(pfdev);
+ count = atomic_dec_if_positive(&pfdev->devfreq.busy_count);
+ WARN_ON(count < 0);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
index e3bc63e82843..0611beffc8d0 100644
--- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h
+++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h
@@ -10,6 +10,7 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev);
void panfrost_devfreq_resume(struct panfrost_device *pfdev);
void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
-void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot);
+void panfrost_devfreq_record_busy(struct panfrost_device *pfdev);
+void panfrost_devfreq_record_idle(struct panfrost_device *pfdev);
#endif /* __PANFROST_DEVFREQ_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 9c39b9794811..06713811b92c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -51,13 +51,6 @@ struct panfrost_features {
unsigned long hw_issues[64 / BITS_PER_LONG];
};
-struct panfrost_devfreq_slot {
- ktime_t busy_time;
- ktime_t idle_time;
- ktime_t time_last_update;
- bool busy;
-};
-
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -93,9 +86,10 @@ struct panfrost_device {
struct {
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
- unsigned long cur_freq;
- unsigned long cur_volt;
- struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS];
+ ktime_t busy_time;
+ ktime_t idle_time;
+ ktime_t time_last_update;
+ atomic_t busy_count;
} devfreq;
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index f21bc8a7ee3a..f61364f7c471 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -303,14 +303,17 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data,
}
/* Don't allow mmapping of heap objects as pages are not pinned. */
- if (to_panfrost_bo(gem_obj)->is_heap)
- return -EINVAL;
+ if (to_panfrost_bo(gem_obj)->is_heap) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = drm_gem_create_mmap_offset(gem_obj);
if (ret == 0)
args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node);
- drm_gem_object_put_unlocked(gem_obj);
+out:
+ drm_gem_object_put_unlocked(gem_obj);
return ret;
}
@@ -347,20 +350,19 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data,
return -ENOENT;
}
+ mutex_lock(&pfdev->shrinker_lock);
args->retained = drm_gem_shmem_madvise(gem_obj, args->madv);
if (args->retained) {
struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj);
- mutex_lock(&pfdev->shrinker_lock);
-
if (args->madv == PANFROST_MADV_DONTNEED)
- list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list);
+ list_add_tail(&bo->base.madv_list,
+ &pfdev->shrinker_list);
else if (args->madv == PANFROST_MADV_WILLNEED)
list_del_init(&bo->base.madv_list);
-
- mutex_unlock(&pfdev->shrinker_lock);
}
+ mutex_unlock(&pfdev->shrinker_lock);
drm_gem_object_put_unlocked(gem_obj);
return 0;
@@ -443,7 +445,7 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file)
{
struct panfrost_file_priv *panfrost_priv = file->driver_priv;
- panfrost_perfcnt_close(panfrost_priv);
+ panfrost_perfcnt_close(file);
panfrost_job_close(panfrost_priv);
panfrost_mmu_pgtable_free(panfrost_priv);
@@ -470,7 +472,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = {
PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW),
};
-DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops);
+DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops);
/*
* Panfrost driver version:
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c
index acb07fe06580..fd766b1395fb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.c
@@ -19,6 +19,16 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_device *pfdev = obj->dev->dev_private;
+ /*
+ * Make sure the BO is no longer inserted in the shrinker list before
+ * taking care of the destruction itself. If we don't do that we have a
+ * race condition between this function and what's done in
+ * panfrost_gem_shrinker_scan().
+ */
+ mutex_lock(&pfdev->shrinker_lock);
+ list_del_init(&bo->base.madv_list);
+ mutex_unlock(&pfdev->shrinker_lock);
+
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
@@ -33,15 +43,10 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
kfree(bo->sgts);
}
- mutex_lock(&pfdev->shrinker_lock);
- if (!list_empty(&bo->base.madv_list))
- list_del(&bo->base.madv_list);
- mutex_unlock(&pfdev->shrinker_lock);
-
drm_gem_shmem_free_object(obj);
}
-static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
int ret;
size_t size = obj->size;
@@ -80,7 +85,7 @@ static int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_p
return ret;
}
-static void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
+void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv)
{
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_file_priv *priv = file_priv->driver_priv;
@@ -112,7 +117,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .vm_ops = &drm_gem_shmem_vm_ops,
+ .mmap = drm_gem_shmem_mmap,
};
/**
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 50920819cc16..4b17e7308764 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -45,6 +45,10 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
u32 flags,
uint32_t *handle);
+int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv);
+void panfrost_gem_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
+
void panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h b/drivers/gpu/drm/panfrost/panfrost_issues.h
index cec6dcdadb5c..8e59d765bf19 100644
--- a/drivers/gpu/drm/panfrost/panfrost_issues.h
+++ b/drivers/gpu/drm/panfrost/panfrost_issues.h
@@ -13,37 +13,118 @@
* to care about.
*/
enum panfrost_hw_issue {
+ /* Need way to guarantee that all previously-translated memory accesses
+ * are commited */
HW_ISSUE_6367,
+
+ /* On job complete with non-done the cache is not flushed */
HW_ISSUE_6787,
+
+ /* Write of PRFCNT_CONFIG_MODE_MANUAL to PRFCNT_CONFIG causes a
+ * instrumentation dump if PRFCNT_TILER_EN is enabled */
HW_ISSUE_8186,
+
+ /* TIB: Reports faults from a vtile which has not yet been allocated */
HW_ISSUE_8245,
+
+ /* uTLB deadlock could occur when writing to an invalid page at the
+ * same time as access to a valid page in the same uTLB cache line ( ==
+ * 4 PTEs == 16K block of mapping) */
HW_ISSUE_8316,
+
+ /* HT: TERMINATE for RUN command ignored if previous LOAD_DESCRIPTOR is
+ * still executing */
HW_ISSUE_8394,
+
+ /* CSE: Sends a TERMINATED response for a task that should not be
+ * terminated */
HW_ISSUE_8401,
+
+ /* Repeatedly Soft-stopping a job chain consisting of (Vertex Shader,
+ * Cache Flush, Tiler) jobs causes DATA_INVALID_FAULT on tiler job. */
HW_ISSUE_8408,
+
+ /* Disable the Pause Buffer in the LS pipe. */
HW_ISSUE_8443,
+
+ /* Change in RMUs in use causes problems related with the core's SDC */
HW_ISSUE_8987,
+
+ /* Compute endpoint has a 4-deep queue of tasks, meaning a soft stop
+ * won't complete until all 4 tasks have completed */
HW_ISSUE_9435,
+
+ /* HT: Tiler returns TERMINATED for non-terminated command */
HW_ISSUE_9510,
+
+ /* Occasionally the GPU will issue multiple page faults for the same
+ * address before the MMU page table has been read by the GPU */
HW_ISSUE_9630,
+
+ /* RA DCD load request to SDC returns invalid load ignore causing
+ * colour buffer mismatch */
HW_ISSUE_10327,
+
+ /* MMU TLB invalidation hazards */
HW_ISSUE_10649,
+
+ /* Missing cache flush in multi core-group configuration */
HW_ISSUE_10676,
+
+ /* Chicken bit on T72X for a hardware workaround in compiler */
HW_ISSUE_10797,
+
+ /* Soft-stopping fragment jobs might fail with TILE_RANGE_FAULT */
HW_ISSUE_10817,
+
+ /* Intermittent missing interrupt on job completion */
HW_ISSUE_10883,
+
+ /* Soft-stopping fragment jobs might fail with TILE_RANGE_ERROR
+ * (similar to issue 10817) and can use #10817 workaround */
HW_ISSUE_10959,
+
+ /* Soft-stopped fragment shader job can restart with out-of-bound
+ * restart index */
HW_ISSUE_10969,
+
+ /* Race condition can cause tile list corruption */
HW_ISSUE_11020,
+
+ /* Write buffer can cause tile list corruption */
HW_ISSUE_11024,
+
+ /* Pause buffer can cause a fragment job hang */
HW_ISSUE_11035,
+
+ /* Dynamic Core Scaling not supported due to errata */
HW_ISSUE_11056,
+
+ /* Clear encoder state for a hard stopped fragment job which is AFBC
+ * encoded by soft resetting the GPU. Only for T76X r0p0, r0p1 and
+ * r0p1_50rel0 */
HW_ISSUE_T76X_3542,
+
+ /* Keep tiler module clock on to prevent GPU stall */
HW_ISSUE_T76X_3953,
+
+ /* Must ensure L2 is not transitioning when we reset. Workaround with a
+ * busy wait until L2 completes transition; ensure there is a maximum
+ * loop count as she may never complete her transition. (On chips
+ * without this errata, it's totally okay if L2 transitions.) */
HW_ISSUE_TMIX_8463,
+
+ /* Don't set SC_LS_ATTR_CHECK_DISABLE/SC_LS_ALLOW_ATTR_TYPES */
GPUCORE_1619,
+
+ /* When a hard-stop follows close after a soft-stop, the completion
+ * code for the terminated job may be incorrectly set to STOPPED */
HW_ISSUE_TMIX_8438,
+
+ /* "Protected mode" is buggy on Mali-G31 some Bifrost chips, so the
+ * kernel must fiddle with L2 caches to prevent data leakage */
HW_ISSUE_TGOX_R1_1234,
+
HW_ISSUE_END
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 21f34d44aac2..d411eb6c8eb9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -155,8 +155,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
}
cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu);
-
- panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_devfreq_record_busy(pfdev);
job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
@@ -404,9 +403,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job)
}
spin_unlock_irqrestore(&pfdev->js->job_lock, flags);
- /* panfrost_core_dump(pfdev); */
-
- panfrost_devfreq_record_transition(pfdev, js);
+ panfrost_devfreq_record_idle(pfdev);
panfrost_device_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
@@ -469,7 +466,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data)
pfdev->jobs[j] = NULL;
panfrost_mmu_as_put(pfdev, &job->file_priv->mmu);
- panfrost_devfreq_record_transition(pfdev, j);
+ panfrost_devfreq_record_idle(pfdev);
dma_fence_signal_locked(job->done_fence);
pm_runtime_put_autosuspend(pfdev->dev);
@@ -570,14 +567,14 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev)
struct panfrost_job_slot *js = pfdev->js;
int i;
+ /* Check whether the hardware is idle */
+ if (atomic_read(&pfdev->devfreq.busy_count))
+ return false;
+
for (i = 0; i < NUM_JOB_SLOTS; i++) {
/* If there are any jobs in the HW queue, we're not idle */
if (atomic_read(&js->queue[i].sched.hw_rq_count))
return false;
-
- /* Check whether the hardware is idle */
- if (pfdev->devfreq.slot[i].busy)
- return false;
}
return true;
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 2dba192bf198..2c04e858c50a 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -67,9 +67,10 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
}
static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
- struct panfrost_file_priv *user,
+ struct drm_file *file_priv,
unsigned int counterset)
{
+ struct panfrost_file_priv *user = file_priv->driver_priv;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_gem_shmem_object *bo;
u32 cfg;
@@ -91,14 +92,14 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
perfcnt->bo = to_panfrost_bo(&bo->base);
/* Map the perfcnt buf in the address space attached to file_priv. */
- ret = panfrost_mmu_map(perfcnt->bo);
+ ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv);
if (ret)
goto err_put_bo;
perfcnt->buf = drm_gem_shmem_vmap(&bo->base);
if (IS_ERR(perfcnt->buf)) {
ret = PTR_ERR(perfcnt->buf);
- goto err_put_bo;
+ goto err_close_bo;
}
/*
@@ -157,14 +158,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
err_vunmap:
drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
+err_close_bo:
+ panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
err_put_bo:
drm_gem_object_put_unlocked(&bo->base);
return ret;
}
static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
- struct panfrost_file_priv *user)
+ struct drm_file *file_priv)
{
+ struct panfrost_file_priv *user = file_priv->driver_priv;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
if (user != perfcnt->user)
@@ -180,6 +184,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
perfcnt->user = NULL;
drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf);
perfcnt->buf = NULL;
+ panfrost_gem_close(&perfcnt->bo->base.base, file_priv);
drm_gem_object_put_unlocked(&perfcnt->bo->base.base);
perfcnt->bo = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
@@ -191,7 +196,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- struct panfrost_file_priv *pfile = file_priv->driver_priv;
struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_panfrost_perfcnt_enable *req = data;
@@ -207,10 +211,10 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
mutex_lock(&perfcnt->lock);
if (req->enable)
- ret = panfrost_perfcnt_enable_locked(pfdev, pfile,
+ ret = panfrost_perfcnt_enable_locked(pfdev, file_priv,
req->counterset);
else
- ret = panfrost_perfcnt_disable_locked(pfdev, pfile);
+ ret = panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
return ret;
@@ -248,15 +252,16 @@ out:
return ret;
}
-void panfrost_perfcnt_close(struct panfrost_file_priv *pfile)
+void panfrost_perfcnt_close(struct drm_file *file_priv)
{
+ struct panfrost_file_priv *pfile = file_priv->driver_priv;
struct panfrost_device *pfdev = pfile->pfdev;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
pm_runtime_get_sync(pfdev->dev);
mutex_lock(&perfcnt->lock);
if (perfcnt->user == pfile)
- panfrost_perfcnt_disable_locked(pfdev, pfile);
+ panfrost_perfcnt_disable_locked(pfdev, file_priv);
mutex_unlock(&perfcnt->lock);
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h
index 13b8fdaa1b43..8bbcf5f5fb33 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h
@@ -9,7 +9,7 @@ void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev);
void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev);
int panfrost_perfcnt_init(struct panfrost_device *pfdev);
void panfrost_perfcnt_fini(struct panfrost_device *pfdev);
-void panfrost_perfcnt_close(struct panfrost_file_priv *pfile);
+void panfrost_perfcnt_close(struct drm_file *file_priv);
int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data,
OpenPOWER on IntegriCloud