diff options
Diffstat (limited to 'drivers/gpu/drm/panfrost')
-rw-r--r-- | drivers/gpu/drm/panfrost/Makefile | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/TODO | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_devfreq.c | 161 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_devfreq.h | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_device.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_device.h | 45 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_drv.c | 296 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_gem.c | 243 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_gem.h | 64 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c | 114 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_gpu.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_issues.h | 81 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_job.c | 113 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_job.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_mmu.c | 501 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_mmu.h | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_perfcnt.c | 50 | ||||
-rw-r--r-- | drivers/gpu/drm/panfrost/panfrost_perfcnt.h | 2 |
18 files changed, 1333 insertions, 414 deletions
diff --git a/drivers/gpu/drm/panfrost/Makefile b/drivers/gpu/drm/panfrost/Makefile index ecf0864cb515..b71935862417 100644 --- a/drivers/gpu/drm/panfrost/Makefile +++ b/drivers/gpu/drm/panfrost/Makefile @@ -5,6 +5,7 @@ panfrost-y := \ panfrost_device.o \ panfrost_devfreq.o \ panfrost_gem.o \ + panfrost_gem_shrinker.o \ panfrost_gpu.o \ panfrost_job.o \ panfrost_mmu.o \ diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO index c2e44add37d8..8c811a9e683b 100644 --- a/drivers/gpu/drm/panfrost/TODO +++ b/drivers/gpu/drm/panfrost/TODO @@ -6,22 +6,9 @@ - Bifrost specific feature and issue handling - Coherent DMA support -- Support for 2MB pages. The io-pgtable code already supports this. Finishing - support involves either copying or adapting the iommu API to handle passing - aligned addresses and sizes to the io-pgtable code. - -- Per FD address space support. The h/w supports multiple addresses spaces. - The hard part is handling when more address spaces are needed than what - the h/w provides. - -- Support pinning pages on demand (GPU page faults). - - Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu) -- Support for madvise and a shrinker. - - Compute job support. So called 'compute only' jobs need to be plumbed up to userspace. -- Performance counter support. (Boris) - +- Support core dump on job failure diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index db798532b0b6..413987038fbf 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright 2019 Collabora ltd. */ #include <linux/devfreq.h> +#include <linux/devfreq_cooling.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/clk.h> @@ -13,97 +14,45 @@ #include "panfrost_gpu.h" #include "panfrost_regs.h" -static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot); +static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev); static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { - struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev)); struct dev_pm_opp *opp; - unsigned long old_clk_rate = pfdev->devfreq.cur_freq; - unsigned long target_volt, target_rate; int err; opp = devfreq_recommended_opp(dev, freq, flags); if (IS_ERR(opp)) return PTR_ERR(opp); - - target_rate = dev_pm_opp_get_freq(opp); - target_volt = dev_pm_opp_get_voltage(opp); dev_pm_opp_put(opp); - if (old_clk_rate == target_rate) - return 0; - - /* - * If frequency scaling from low to high, adjust voltage first. - * If frequency scaling from high to low, adjust frequency first. - */ - if (old_clk_rate < target_rate) { - err = regulator_set_voltage(pfdev->regulator, target_volt, - target_volt); - if (err) { - dev_err(dev, "Cannot set voltage %lu uV\n", - target_volt); - return err; - } - } - - err = clk_set_rate(pfdev->clock, target_rate); - if (err) { - dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate, - err); - regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt, - pfdev->devfreq.cur_volt); + err = dev_pm_opp_set_rate(dev, *freq); + if (err) return err; - } - - if (old_clk_rate > target_rate) { - err = regulator_set_voltage(pfdev->regulator, target_volt, - target_volt); - if (err) - dev_err(dev, "Cannot set voltage %lu uV\n", target_volt); - } - - pfdev->devfreq.cur_freq = target_rate; - pfdev->devfreq.cur_volt = target_volt; return 0; } static void panfrost_devfreq_reset(struct panfrost_device *pfdev) { - ktime_t now = ktime_get(); - int i; - - for (i = 0; i < NUM_JOB_SLOTS; i++) { - pfdev->devfreq.slot[i].busy_time = 0; - pfdev->devfreq.slot[i].idle_time = 0; - pfdev->devfreq.slot[i].time_last_update = now; - } + pfdev->devfreq.busy_time = 0; + pfdev->devfreq.idle_time = 0; + pfdev->devfreq.time_last_update = ktime_get(); } static int panfrost_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *status) { - struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev)); - int i; + struct panfrost_device *pfdev = dev_get_drvdata(dev); - for (i = 0; i < NUM_JOB_SLOTS; i++) { - panfrost_devfreq_update_utilization(pfdev, i); - } + panfrost_devfreq_update_utilization(pfdev); status->current_frequency = clk_get_rate(pfdev->clock); - status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time, - pfdev->devfreq.slot[0].idle_time)); + status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.busy_time, + pfdev->devfreq.idle_time)); - status->busy_time = 0; - for (i = 0; i < NUM_JOB_SLOTS; i++) { - status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time); - } - - /* We're scheduling only to one core atm, so don't divide for now */ - /* status->busy_time /= NUM_JOB_SLOTS; */ + status->busy_time = ktime_to_ns(pfdev->devfreq.busy_time); panfrost_devfreq_reset(pfdev); @@ -115,31 +64,22 @@ static int panfrost_devfreq_get_dev_status(struct device *dev, return 0; } -static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq) -{ - struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev)); - - *freq = pfdev->devfreq.cur_freq; - - return 0; -} - static struct devfreq_dev_profile panfrost_devfreq_profile = { .polling_ms = 50, /* ~3 frames */ .target = panfrost_devfreq_target, .get_dev_status = panfrost_devfreq_get_dev_status, - .get_cur_freq = panfrost_devfreq_get_cur_freq, }; int panfrost_devfreq_init(struct panfrost_device *pfdev) { int ret; struct dev_pm_opp *opp; + unsigned long cur_freq; + struct device *dev = &pfdev->pdev->dev; + struct devfreq *devfreq; + struct thermal_cooling_device *cooling; - if (!pfdev->regulator) - return 0; - - ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev); + ret = dev_pm_opp_of_add_table(dev); if (ret == -ENODEV) /* Optional, continue without devfreq */ return 0; else if (ret) @@ -147,37 +87,46 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) panfrost_devfreq_reset(pfdev); - pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock); + cur_freq = clk_get_rate(pfdev->clock); - opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0); + opp = devfreq_recommended_opp(dev, &cur_freq, 0); if (IS_ERR(opp)) return PTR_ERR(opp); - panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq; + panfrost_devfreq_profile.initial_freq = cur_freq; dev_pm_opp_put(opp); - pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev, - &panfrost_devfreq_profile, "simple_ondemand", NULL); - if (IS_ERR(pfdev->devfreq.devfreq)) { - DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n"); - ret = PTR_ERR(pfdev->devfreq.devfreq); - pfdev->devfreq.devfreq = NULL; - return ret; + devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile, + DEVFREQ_GOV_SIMPLE_ONDEMAND, NULL); + if (IS_ERR(devfreq)) { + DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n"); + dev_pm_opp_of_remove_table(dev); + return PTR_ERR(devfreq); } + pfdev->devfreq.devfreq = devfreq; + + cooling = of_devfreq_cooling_register(dev->of_node, devfreq); + if (IS_ERR(cooling)) + DRM_DEV_INFO(dev, "Failed to register cooling device\n"); + else + pfdev->devfreq.cooling = cooling; return 0; } -void panfrost_devfreq_resume(struct panfrost_device *pfdev) +void panfrost_devfreq_fini(struct panfrost_device *pfdev) { - int i; + if (pfdev->devfreq.cooling) + devfreq_cooling_unregister(pfdev->devfreq.cooling); + dev_pm_opp_of_remove_table(&pfdev->pdev->dev); +} +void panfrost_devfreq_resume(struct panfrost_device *pfdev) +{ if (!pfdev->devfreq.devfreq) return; panfrost_devfreq_reset(pfdev); - for (i = 0; i < NUM_JOB_SLOTS; i++) - pfdev->devfreq.slot[i].busy = false; devfreq_resume_device(pfdev->devfreq.devfreq); } @@ -190,9 +139,8 @@ void panfrost_devfreq_suspend(struct panfrost_device *pfdev) devfreq_suspend_device(pfdev->devfreq.devfreq); } -static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot) +static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev) { - struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot]; ktime_t now; ktime_t last; @@ -200,22 +148,27 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i return; now = ktime_get(); - last = pfdev->devfreq.slot[slot].time_last_update; + last = pfdev->devfreq.time_last_update; - /* If we last recorded a transition to busy, we have been idle since */ - if (devfreq_slot->busy) - pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last); + if (atomic_read(&pfdev->devfreq.busy_count) > 0) + pfdev->devfreq.busy_time += ktime_sub(now, last); else - pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last); + pfdev->devfreq.idle_time += ktime_sub(now, last); + + pfdev->devfreq.time_last_update = now; +} - pfdev->devfreq.slot[slot].time_last_update = now; +void panfrost_devfreq_record_busy(struct panfrost_device *pfdev) +{ + panfrost_devfreq_update_utilization(pfdev); + atomic_inc(&pfdev->devfreq.busy_count); } -/* The job scheduler is expected to call this at every transition busy <-> idle */ -void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot) +void panfrost_devfreq_record_idle(struct panfrost_device *pfdev) { - struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot]; + int count; - panfrost_devfreq_update_utilization(pfdev, slot); - devfreq_slot->busy = !devfreq_slot->busy; + panfrost_devfreq_update_utilization(pfdev); + count = atomic_dec_if_positive(&pfdev->devfreq.busy_count); + WARN_ON(count < 0); } diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h index eb999531ed90..0611beffc8d0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h @@ -5,10 +5,12 @@ #define __PANFROST_DEVFREQ_H__ int panfrost_devfreq_init(struct panfrost_device *pfdev); +void panfrost_devfreq_fini(struct panfrost_device *pfdev); void panfrost_devfreq_resume(struct panfrost_device *pfdev); void panfrost_devfreq_suspend(struct panfrost_device *pfdev); -void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot); +void panfrost_devfreq_record_busy(struct panfrost_device *pfdev); +void panfrost_devfreq_record_idle(struct panfrost_device *pfdev); #endif /* __PANFROST_DEVFREQ_H__ */ diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c index 8a111d7c0200..238fb6d54df4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.c +++ b/drivers/gpu/drm/panfrost/panfrost_device.c @@ -5,7 +5,6 @@ #include <linux/clk.h> #include <linux/reset.h> #include <linux/platform_device.h> -#include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include "panfrost_device.h" @@ -90,12 +89,9 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev) { int ret; - pfdev->regulator = devm_regulator_get_optional(pfdev->dev, "mali"); + pfdev->regulator = devm_regulator_get(pfdev->dev, "mali"); if (IS_ERR(pfdev->regulator)) { ret = PTR_ERR(pfdev->regulator); - pfdev->regulator = NULL; - if (ret == -ENODEV) - return 0; dev_err(pfdev->dev, "failed to get regulator: %d\n", ret); return ret; } @@ -111,8 +107,7 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev) static void panfrost_regulator_fini(struct panfrost_device *pfdev) { - if (pfdev->regulator) - regulator_disable(pfdev->regulator); + regulator_disable(pfdev->regulator); } int panfrost_device_init(struct panfrost_device *pfdev) @@ -123,8 +118,9 @@ int panfrost_device_init(struct panfrost_device *pfdev) mutex_init(&pfdev->sched_lock); mutex_init(&pfdev->reset_lock); INIT_LIST_HEAD(&pfdev->scheduled_jobs); + INIT_LIST_HEAD(&pfdev->as_lru_list); - spin_lock_init(&pfdev->hwaccess_lock); + spin_lock_init(&pfdev->as_lock); err = panfrost_clk_init(pfdev); if (err) { @@ -164,14 +160,6 @@ int panfrost_device_init(struct panfrost_device *pfdev) if (err) goto err_out4; - /* runtime PM will wake us up later */ - panfrost_gpu_power_off(pfdev); - - pm_runtime_set_active(pfdev->dev); - pm_runtime_get_sync(pfdev->dev); - pm_runtime_mark_last_busy(pfdev->dev); - pm_runtime_put_autosuspend(pfdev->dev); - err = panfrost_perfcnt_init(pfdev); if (err) goto err_out5; @@ -254,18 +242,22 @@ const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception return "UNKNOWN"; } +void panfrost_device_reset(struct panfrost_device *pfdev) +{ + panfrost_gpu_soft_reset(pfdev); + + panfrost_gpu_power_on(pfdev); + panfrost_mmu_reset(pfdev); + panfrost_job_enable_interrupts(pfdev); +} + #ifdef CONFIG_PM int panfrost_device_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct panfrost_device *pfdev = platform_get_drvdata(pdev); - panfrost_gpu_soft_reset(pfdev); - - /* TODO: Re-enable all other address spaces */ - panfrost_gpu_power_on(pfdev); - panfrost_mmu_enable(pfdev, 0); - panfrost_job_enable_interrupts(pfdev); + panfrost_device_reset(pfdev); panfrost_devfreq_resume(pfdev); return 0; diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 83cc01cafde1..06713811b92c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -5,6 +5,8 @@ #ifndef __PANFROST_DEVICE_H__ #define __PANFROST_DEVICE_H__ +#include <linux/atomic.h> +#include <linux/io-pgtable.h> #include <linux/spinlock.h> #include <drm/drm_device.h> #include <drm/drm_mm.h> @@ -43,28 +45,17 @@ struct panfrost_features { u32 js_features[16]; u32 nr_core_groups; + u32 thread_tls_alloc; unsigned long hw_features[64 / BITS_PER_LONG]; unsigned long hw_issues[64 / BITS_PER_LONG]; }; -struct panfrost_devfreq_slot { - ktime_t busy_time; - ktime_t idle_time; - ktime_t time_last_update; - bool busy; -}; - struct panfrost_device { struct device *dev; struct drm_device *ddev; struct platform_device *pdev; - spinlock_t hwaccess_lock; - - struct drm_mm mm; - spinlock_t mm_lock; - void __iomem *iomem; struct clk *clock; struct clk *bus_clock; @@ -73,7 +64,11 @@ struct panfrost_device { struct panfrost_features features; - struct panfrost_mmu *mmu; + spinlock_t as_lock; + unsigned long as_in_use_mask; + unsigned long as_alloc_mask; + struct list_head as_lru_list; + struct panfrost_job_slot *js; struct panfrost_job *jobs[NUM_JOB_SLOTS]; @@ -84,19 +79,36 @@ struct panfrost_device { struct mutex sched_lock; struct mutex reset_lock; + struct mutex shrinker_lock; + struct list_head shrinker_list; + struct shrinker shrinker; + struct { struct devfreq *devfreq; struct thermal_cooling_device *cooling; - unsigned long cur_freq; - unsigned long cur_volt; - struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS]; + ktime_t busy_time; + ktime_t idle_time; + ktime_t time_last_update; + atomic_t busy_count; } devfreq; }; +struct panfrost_mmu { + struct io_pgtable_cfg pgtbl_cfg; + struct io_pgtable_ops *pgtbl_ops; + int as; + atomic_t as_count; + struct list_head list; +}; + struct panfrost_file_priv { struct panfrost_device *pfdev; struct drm_sched_entity sched_entity[NUM_JOB_SLOTS]; + + struct panfrost_mmu mmu; + struct drm_mm mm; + spinlock_t mm_lock; }; static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev) @@ -127,6 +139,7 @@ int panfrost_unstable_ioctl_check(void); int panfrost_device_init(struct panfrost_device *pfdev); void panfrost_device_fini(struct panfrost_device *pfdev); +void panfrost_device_reset(struct panfrost_device *pfdev); int panfrost_device_resume(struct device *dev); int panfrost_device_suspend(struct device *dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 85b4b51b6a0d..b7a618db3ee2 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -32,10 +32,42 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct if (param->pad != 0) return -EINVAL; +#define PANFROST_FEATURE(name, member) \ + case DRM_PANFROST_PARAM_ ## name: \ + param->value = pfdev->features.member; \ + break +#define PANFROST_FEATURE_ARRAY(name, member, max) \ + case DRM_PANFROST_PARAM_ ## name ## 0 ... \ + DRM_PANFROST_PARAM_ ## name ## max: \ + param->value = pfdev->features.member[param->param - \ + DRM_PANFROST_PARAM_ ## name ## 0]; \ + break + switch (param->param) { - case DRM_PANFROST_PARAM_GPU_PROD_ID: - param->value = pfdev->features.id; - break; + PANFROST_FEATURE(GPU_PROD_ID, id); + PANFROST_FEATURE(GPU_REVISION, revision); + PANFROST_FEATURE(SHADER_PRESENT, shader_present); + PANFROST_FEATURE(TILER_PRESENT, tiler_present); + PANFROST_FEATURE(L2_PRESENT, l2_present); + PANFROST_FEATURE(STACK_PRESENT, stack_present); + PANFROST_FEATURE(AS_PRESENT, as_present); + PANFROST_FEATURE(JS_PRESENT, js_present); + PANFROST_FEATURE(L2_FEATURES, l2_features); + PANFROST_FEATURE(CORE_FEATURES, core_features); + PANFROST_FEATURE(TILER_FEATURES, tiler_features); + PANFROST_FEATURE(MEM_FEATURES, mem_features); + PANFROST_FEATURE(MMU_FEATURES, mmu_features); + PANFROST_FEATURE(THREAD_FEATURES, thread_features); + PANFROST_FEATURE(MAX_THREADS, max_threads); + PANFROST_FEATURE(THREAD_MAX_WORKGROUP_SZ, + thread_max_workgroup_sz); + PANFROST_FEATURE(THREAD_MAX_BARRIER_SZ, + thread_max_barrier_sz); + PANFROST_FEATURE(COHERENCY_FEATURES, coherency_features); + PANFROST_FEATURE_ARRAY(TEXTURE_FEATURES, texture_features, 3); + PANFROST_FEATURE_ARRAY(JS_FEATURES, js_features, 15); + PANFROST_FEATURE(NR_CORE_GROUPS, nr_core_groups); + PANFROST_FEATURE(THREAD_TLS_ALLOC, thread_tls_alloc); default: return -EINVAL; } @@ -46,29 +78,35 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file) { - int ret; - struct drm_gem_shmem_object *shmem; + struct panfrost_file_priv *priv = file->driver_priv; + struct panfrost_gem_object *bo; struct drm_panfrost_create_bo *args = data; + struct panfrost_gem_mapping *mapping; - if (!args->size || args->flags || args->pad) + if (!args->size || args->pad || + (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) return -EINVAL; - shmem = drm_gem_shmem_create_with_handle(file, dev, args->size, - &args->handle); - if (IS_ERR(shmem)) - return PTR_ERR(shmem); + /* Heaps should never be executable */ + if ((args->flags & PANFROST_BO_HEAP) && + !(args->flags & PANFROST_BO_NOEXEC)) + return -EINVAL; - ret = panfrost_mmu_map(to_panfrost_bo(&shmem->base)); - if (ret) - goto err_free; + bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags, + &args->handle); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + mapping = panfrost_gem_mapping_get(bo, priv); + if (!mapping) { + drm_gem_object_put_unlocked(&bo->base.base); + return -EINVAL; + } - args->offset = to_panfrost_bo(&shmem->base)->node.start << PAGE_SHIFT; + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); return 0; - -err_free: - drm_gem_handle_delete(file, args->handle); - return ret; } /** @@ -90,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev, struct drm_panfrost_submit *args, struct panfrost_job *job) { + struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_object *bo; + unsigned int i; + int ret; + job->bo_count = args->bo_handle_count; if (!job->bo_count) @@ -101,9 +144,33 @@ panfrost_lookup_bos(struct drm_device *dev, if (!job->implicit_fences) return -ENOMEM; - return drm_gem_objects_lookup(file_priv, - (void __user *)(uintptr_t)args->bo_handles, - job->bo_count, &job->bos); + ret = drm_gem_objects_lookup(file_priv, + (void __user *)(uintptr_t)args->bo_handles, + job->bo_count, &job->bos); + if (ret) + return ret; + + job->mappings = kvmalloc_array(job->bo_count, + sizeof(struct panfrost_gem_mapping *), + GFP_KERNEL | __GFP_ZERO); + if (!job->mappings) + return -ENOMEM; + + for (i = 0; i < job->bo_count; i++) { + struct panfrost_gem_mapping *mapping; + + bo = to_panfrost_bo(job->bos[i]); + mapping = panfrost_gem_mapping_get(bo, priv); + if (!mapping) { + ret = -EINVAL; + break; + } + + atomic_inc(&bo->gpu_usecount); + job->mappings[i] = mapping; + } + + return ret; } /** @@ -245,7 +312,7 @@ panfrost_ioctl_wait_bo(struct drm_device *dev, void *data, if (!gem_obj) return -ENOENT; - ret = reservation_object_wait_timeout_rcu(gem_obj->resv, true, + ret = dma_resv_wait_timeout_rcu(gem_obj->resv, true, true, timeout); if (!ret) ret = timeout ? -ETIMEDOUT : -EBUSY; @@ -273,18 +340,27 @@ static int panfrost_ioctl_mmap_bo(struct drm_device *dev, void *data, return -ENOENT; } + /* Don't allow mmapping of heap objects as pages are not pinned. */ + if (to_panfrost_bo(gem_obj)->is_heap) { + ret = -EINVAL; + goto out; + } + ret = drm_gem_create_mmap_offset(gem_obj); if (ret == 0) args->offset = drm_vma_node_offset_addr(&gem_obj->vma_node); - drm_gem_object_put_unlocked(gem_obj); +out: + drm_gem_object_put_unlocked(gem_obj); return ret; } static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct panfrost_file_priv *priv = file_priv->driver_priv; struct drm_panfrost_get_bo_offset *args = data; + struct panfrost_gem_mapping *mapping; struct drm_gem_object *gem_obj; struct panfrost_gem_object *bo; @@ -295,12 +371,77 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, } bo = to_panfrost_bo(gem_obj); - args->offset = bo->node.start << PAGE_SHIFT; - + mapping = panfrost_gem_mapping_get(bo, priv); drm_gem_object_put_unlocked(gem_obj); + + if (!mapping) + return -EINVAL; + + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); return 0; } +static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct panfrost_file_priv *priv = file_priv->driver_priv; + struct drm_panfrost_madvise *args = data; + struct panfrost_device *pfdev = dev->dev_private; + struct drm_gem_object *gem_obj; + struct panfrost_gem_object *bo; + int ret = 0; + + gem_obj = drm_gem_object_lookup(file_priv, args->handle); + if (!gem_obj) { + DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); + return -ENOENT; + } + + bo = to_panfrost_bo(gem_obj); + + mutex_lock(&pfdev->shrinker_lock); + mutex_lock(&bo->mappings.lock); + if (args->madv == PANFROST_MADV_DONTNEED) { + struct panfrost_gem_mapping *first; + + first = list_first_entry(&bo->mappings.list, + struct panfrost_gem_mapping, + node); + + /* + * If we want to mark the BO purgeable, there must be only one + * user: the caller FD. + * We could do something smarter and mark the BO purgeable only + * when all its users have marked it purgeable, but globally + * visible/shared BOs are likely to never be marked purgeable + * anyway, so let's not bother. + */ + if (!list_is_singular(&bo->mappings.list) || + WARN_ON_ONCE(first->mmu != &priv->mmu)) { + ret = -EINVAL; + goto out_unlock_mappings; + } + } + + args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); + + if (args->retained) { + if (args->madv == PANFROST_MADV_DONTNEED) + list_add_tail(&bo->base.madv_list, + &pfdev->shrinker_list); + else if (args->madv == PANFROST_MADV_WILLNEED) + list_del_init(&bo->base.madv_list); + } + +out_unlock_mappings: + mutex_unlock(&bo->mappings.lock); + mutex_unlock(&pfdev->shrinker_lock); + + drm_gem_object_put_unlocked(gem_obj); + return ret; +} + int panfrost_unstable_ioctl_check(void) { if (!unstable_ioctls) @@ -309,9 +450,36 @@ int panfrost_unstable_ioctl_check(void) return 0; } +#define PFN_4G (SZ_4G >> PAGE_SHIFT) +#define PFN_4G_MASK (PFN_4G - 1) +#define PFN_16M (SZ_16M >> PAGE_SHIFT) + +static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node, + unsigned long color, + u64 *start, u64 *end) +{ + /* Executable buffers can't start or end on a 4GB boundary */ + if (!(color & PANFROST_BO_NOEXEC)) { + u64 next_seg; + + if ((*start & PFN_4G_MASK) == 0) + (*start)++; + + if ((*end & PFN_4G_MASK) == 0) + (*end)--; + + next_seg = ALIGN(*start, PFN_4G); + if (next_seg - *start <= PFN_16M) + *start = next_seg + 1; + + *end = min(*end, ALIGN(*start, PFN_4G) - 1); + } +} + static int panfrost_open(struct drm_device *dev, struct drm_file *file) { + int ret; struct panfrost_device *pfdev = dev->dev_private; struct panfrost_file_priv *panfrost_priv; @@ -322,7 +490,28 @@ panfrost_open(struct drm_device *dev, struct drm_file *file) panfrost_priv->pfdev = pfdev; file->driver_priv = panfrost_priv; - return panfrost_job_open(panfrost_priv); + spin_lock_init(&panfrost_priv->mm_lock); + + /* 4G enough for now. can be 48-bit */ + drm_mm_init(&panfrost_priv->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT); + panfrost_priv->mm.color_adjust = panfrost_drm_mm_color_adjust; + + ret = panfrost_mmu_pgtable_alloc(panfrost_priv); + if (ret) + goto err_pgtable; + + ret = panfrost_job_open(panfrost_priv); + if (ret) + goto err_job; + + return 0; + +err_job: + panfrost_mmu_pgtable_free(panfrost_priv); +err_pgtable: + drm_mm_takedown(&panfrost_priv->mm); + kfree(panfrost_priv); + return ret; } static void @@ -330,21 +519,19 @@ panfrost_postclose(struct drm_device *dev, struct drm_file *file) { struct panfrost_file_priv *panfrost_priv = file->driver_priv; - panfrost_perfcnt_close(panfrost_priv); + panfrost_perfcnt_close(file); panfrost_job_close(panfrost_priv); + panfrost_mmu_pgtable_free(panfrost_priv); + drm_mm_takedown(&panfrost_priv->mm); kfree(panfrost_priv); } -/* DRM_AUTH is required on SUBMIT for now, while all clients share a single - * address space. Note that render nodes would be able to submit jobs that - * could access BOs from clients authenticated with the master node. - */ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { #define PANFROST_IOCTL(n, func, flags) \ DRM_IOCTL_DEF_DRV(PANFROST_##n, panfrost_ioctl_##func, flags) - PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW | DRM_AUTH), + PANFROST_IOCTL(SUBMIT, submit, DRM_RENDER_ALLOW), PANFROST_IOCTL(WAIT_BO, wait_bo, DRM_RENDER_ALLOW), PANFROST_IOCTL(CREATE_BO, create_bo, DRM_RENDER_ALLOW), PANFROST_IOCTL(MMAP_BO, mmap_bo, DRM_RENDER_ALLOW), @@ -352,13 +539,18 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { PANFROST_IOCTL(GET_BO_OFFSET, get_bo_offset, DRM_RENDER_ALLOW), PANFROST_IOCTL(PERFCNT_ENABLE, perfcnt_enable, DRM_RENDER_ALLOW), PANFROST_IOCTL(PERFCNT_DUMP, perfcnt_dump, DRM_RENDER_ALLOW), + PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW), }; -DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops); +DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops); +/* + * Panfrost driver version: + * - 1.0 - initial interface + * - 1.1 - adds HEAP and NOEXEC flags for CREATE_BO + */ static struct drm_driver panfrost_drm_driver = { - .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME | - DRIVER_SYNCOBJ, + .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, .open = panfrost_open, .postclose = panfrost_postclose, .ioctls = panfrost_drm_driver_ioctls, @@ -368,7 +560,7 @@ static struct drm_driver panfrost_drm_driver = { .desc = "panfrost DRM", .date = "20180908", .major = 1, - .minor = 0, + .minor = 1, .gem_create_object = panfrost_gem_create_object, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, @@ -400,14 +592,8 @@ static int panfrost_probe(struct platform_device *pdev) ddev->dev_private = pfdev; pfdev->ddev = ddev; - spin_lock_init(&pfdev->mm_lock); - - /* 4G enough for now. can be 48-bit */ - drm_mm_init(&pfdev->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT); - - pm_runtime_use_autosuspend(pfdev->dev); - pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */ - pm_runtime_enable(pfdev->dev); + mutex_init(&pfdev->shrinker_lock); + INIT_LIST_HEAD(&pfdev->shrinker_list); err = panfrost_device_init(pfdev); if (err) { @@ -423,20 +609,30 @@ static int panfrost_probe(struct platform_device *pdev) goto err_out1; } + pm_runtime_set_active(pfdev->dev); + pm_runtime_mark_last_busy(pfdev->dev); + pm_runtime_enable(pfdev->dev); + pm_runtime_set_autosuspend_delay(pfdev->dev, 50); /* ~3 frames */ + pm_runtime_use_autosuspend(pfdev->dev); + /* * Register the DRM device with the core and the connectors with * sysfs */ err = drm_dev_register(ddev, 0); if (err < 0) - goto err_out1; + goto err_out2; + + panfrost_gem_shrinker_init(ddev); return 0; +err_out2: + pm_runtime_disable(pfdev->dev); + panfrost_devfreq_fini(pfdev); err_out1: panfrost_device_fini(pfdev); err_out0: - pm_runtime_disable(pfdev->dev); drm_dev_put(ddev); return err; } @@ -447,10 +643,14 @@ static int panfrost_remove(struct platform_device *pdev) struct drm_device *ddev = pfdev->ddev; drm_dev_unregister(ddev); + panfrost_gem_shrinker_cleanup(ddev); + pm_runtime_get_sync(pfdev->dev); - pm_runtime_put_sync_autosuspend(pfdev->dev); - pm_runtime_disable(pfdev->dev); + panfrost_devfreq_fini(pfdev); panfrost_device_fini(pfdev); + pm_runtime_put_sync_suspend(pfdev->dev); + pm_runtime_disable(pfdev->dev); + drm_dev_put(ddev); return 0; } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index b46416be5a54..17b654e1eb94 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -19,25 +19,195 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) struct panfrost_gem_object *bo = to_panfrost_bo(obj); struct panfrost_device *pfdev = obj->dev->dev_private; - if (bo->is_mapped) - panfrost_mmu_unmap(bo); + /* + * Make sure the BO is no longer inserted in the shrinker list before + * taking care of the destruction itself. If we don't do that we have a + * race condition between this function and what's done in + * panfrost_gem_shrinker_scan(). + */ + mutex_lock(&pfdev->shrinker_lock); + list_del_init(&bo->base.madv_list); + mutex_unlock(&pfdev->shrinker_lock); - spin_lock(&pfdev->mm_lock); - drm_mm_remove_node(&bo->node); - spin_unlock(&pfdev->mm_lock); + /* + * If we still have mappings attached to the BO, there's a problem in + * our refcounting. + */ + WARN_ON_ONCE(!list_empty(&bo->mappings.list)); + + if (bo->sgts) { + int i; + int n_sgt = bo->base.base.size / SZ_2M; + + for (i = 0; i < n_sgt; i++) { + if (bo->sgts[i].sgl) { + dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl, + bo->sgts[i].nents, DMA_BIDIRECTIONAL); + sg_free_table(&bo->sgts[i]); + } + } + kfree(bo->sgts); + } drm_gem_shmem_free_object(obj); } +struct panfrost_gem_mapping * +panfrost_gem_mapping_get(struct panfrost_gem_object *bo, + struct panfrost_file_priv *priv) +{ + struct panfrost_gem_mapping *iter, *mapping = NULL; + + mutex_lock(&bo->mappings.lock); + list_for_each_entry(iter, &bo->mappings.list, node) { + if (iter->mmu == &priv->mmu) { + kref_get(&iter->refcount); + mapping = iter; + break; + } + } + mutex_unlock(&bo->mappings.lock); + + return mapping; +} + +static void +panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) +{ + struct panfrost_file_priv *priv; + + if (mapping->active) + panfrost_mmu_unmap(mapping); + + priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu); + spin_lock(&priv->mm_lock); + if (drm_mm_node_allocated(&mapping->mmnode)) + drm_mm_remove_node(&mapping->mmnode); + spin_unlock(&priv->mm_lock); +} + +static void panfrost_gem_mapping_release(struct kref *kref) +{ + struct panfrost_gem_mapping *mapping; + + mapping = container_of(kref, struct panfrost_gem_mapping, refcount); + + panfrost_gem_teardown_mapping(mapping); + drm_gem_object_put_unlocked(&mapping->obj->base.base); + kfree(mapping); +} + +void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping) +{ + if (!mapping) + return; + + kref_put(&mapping->refcount, panfrost_gem_mapping_release); +} + +void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo) +{ + struct panfrost_gem_mapping *mapping; + + mutex_lock(&bo->mappings.lock); + list_for_each_entry(mapping, &bo->mappings.list, node) + panfrost_gem_teardown_mapping(mapping); + mutex_unlock(&bo->mappings.lock); +} + +int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) +{ + int ret; + size_t size = obj->size; + u64 align; + struct panfrost_gem_object *bo = to_panfrost_bo(obj); + unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0; + struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_mapping *mapping; + + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) + return -ENOMEM; + + INIT_LIST_HEAD(&mapping->node); + kref_init(&mapping->refcount); + drm_gem_object_get(obj); + mapping->obj = bo; + + /* + * Executable buffers cannot cross a 16MB boundary as the program + * counter is 24-bits. We assume executable buffers will be less than + * 16MB and aligning executable buffers to their size will avoid + * crossing a 16MB boundary. + */ + if (!bo->noexec) + align = size >> PAGE_SHIFT; + else + align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; + + mapping->mmu = &priv->mmu; + spin_lock(&priv->mm_lock); + ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode, + size >> PAGE_SHIFT, align, color, 0); + spin_unlock(&priv->mm_lock); + if (ret) + goto err; + + if (!bo->is_heap) { + ret = panfrost_mmu_map(mapping); + if (ret) + goto err; + } + + mutex_lock(&bo->mappings.lock); + WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED); + list_add_tail(&mapping->node, &bo->mappings.list); + mutex_unlock(&bo->mappings.lock); + +err: + if (ret) + panfrost_gem_mapping_put(mapping); + return ret; +} + +void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) +{ + struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_object *bo = to_panfrost_bo(obj); + struct panfrost_gem_mapping *mapping = NULL, *iter; + + mutex_lock(&bo->mappings.lock); + list_for_each_entry(iter, &bo->mappings.list, node) { + if (iter->mmu == &priv->mmu) { + mapping = iter; + list_del(&iter->node); + break; + } + } + mutex_unlock(&bo->mappings.lock); + + panfrost_gem_mapping_put(mapping); +} + +static int panfrost_gem_pin(struct drm_gem_object *obj) +{ + if (to_panfrost_bo(obj)->is_heap) + return -EINVAL; + + return drm_gem_shmem_pin(obj); +} + static const struct drm_gem_object_funcs panfrost_gem_funcs = { .free = panfrost_gem_free_object, + .open = panfrost_gem_open, + .close = panfrost_gem_close, .print_info = drm_gem_shmem_print_info, - .pin = drm_gem_shmem_pin, + .pin = panfrost_gem_pin, .unpin = drm_gem_shmem_unpin, .get_sg_table = drm_gem_shmem_get_sg_table, .vmap = drm_gem_shmem_vmap, .vunmap = drm_gem_shmem_vunmap, - .vm_ops = &drm_gem_shmem_vm_ops, + .mmap = drm_gem_shmem_mmap, }; /** @@ -50,32 +220,52 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = { */ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size) { - int ret; - struct panfrost_device *pfdev = dev->dev_private; struct panfrost_gem_object *obj; - u64 align; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) return NULL; + INIT_LIST_HEAD(&obj->mappings.list); + mutex_init(&obj->mappings.lock); obj->base.base.funcs = &panfrost_gem_funcs; - size = roundup(size, PAGE_SIZE); - align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; + return &obj->base.base; +} - spin_lock(&pfdev->mm_lock); - ret = drm_mm_insert_node_generic(&pfdev->mm, &obj->node, - size >> PAGE_SHIFT, align, 0, 0); - spin_unlock(&pfdev->mm_lock); - if (ret) - goto free_obj; +struct panfrost_gem_object * +panfrost_gem_create_with_handle(struct drm_file *file_priv, + struct drm_device *dev, size_t size, + u32 flags, + uint32_t *handle) +{ + int ret; + struct drm_gem_shmem_object *shmem; + struct panfrost_gem_object *bo; - return &obj->base.base; + /* Round up heap allocations to 2MB to keep fault handling simple */ + if (flags & PANFROST_BO_HEAP) + size = roundup(size, SZ_2M); + + shmem = drm_gem_shmem_create(dev, size); + if (IS_ERR(shmem)) + return ERR_CAST(shmem); + + bo = to_panfrost_bo(&shmem->base); + bo->noexec = !!(flags & PANFROST_BO_NOEXEC); + bo->is_heap = !!(flags & PANFROST_BO_HEAP); -free_obj: - kfree(obj); - return ERR_PTR(ret); + /* + * Allocate an id of idr table where the obj is registered + * and handle has the id what user can see. + */ + ret = drm_gem_handle_create(file_priv, &shmem->base, handle); + /* drop reference from allocate - handle holds it now. */ + drm_gem_object_put_unlocked(&shmem->base); + if (ret) + return ERR_PTR(ret); + + return bo; } struct drm_gem_object * @@ -84,17 +274,14 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev, struct sg_table *sgt) { struct drm_gem_object *obj; - struct panfrost_gem_object *pobj; + struct panfrost_gem_object *bo; obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); if (IS_ERR(obj)) return ERR_CAST(obj); - pobj = to_panfrost_bo(obj); - - obj->resv = attach->dmabuf->resv; - - panfrost_mmu_map(pobj); + bo = to_panfrost_bo(obj); + bo->noexec = true; return obj; } diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 6dbcaba020fc..b3517ff9630c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -7,11 +7,46 @@ #include <drm/drm_gem_shmem_helper.h> #include <drm/drm_mm.h> +struct panfrost_mmu; + struct panfrost_gem_object { struct drm_gem_shmem_object base; + struct sg_table *sgts; + + /* + * Use a list for now. If searching a mapping ever becomes the + * bottleneck, we should consider using an RB-tree, or even better, + * let the core store drm_gem_object_mapping entries (where we + * could place driver specific data) instead of drm_gem_object ones + * in its drm_file->object_idr table. + * + * struct drm_gem_object_mapping { + * struct drm_gem_object *obj; + * void *driver_priv; + * }; + */ + struct { + struct list_head list; + struct mutex lock; + } mappings; + + /* + * Count the number of jobs referencing this BO so we don't let the + * shrinker reclaim this object prematurely. + */ + atomic_t gpu_usecount; - struct drm_mm_node node; - bool is_mapped; + bool noexec :1; + bool is_heap :1; +}; + +struct panfrost_gem_mapping { + struct list_head node; + struct kref refcount; + struct panfrost_gem_object *obj; + struct drm_mm_node mmnode; + struct panfrost_mmu *mmu; + bool active :1; }; static inline @@ -20,6 +55,12 @@ struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj) return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base); } +static inline struct panfrost_gem_mapping * +drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node) +{ + return container_of(node, struct panfrost_gem_mapping, mmnode); +} + struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size); struct drm_gem_object * @@ -27,4 +68,23 @@ panfrost_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); +struct panfrost_gem_object * +panfrost_gem_create_with_handle(struct drm_file *file_priv, + struct drm_device *dev, size_t size, + u32 flags, + uint32_t *handle); + +int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv); +void panfrost_gem_close(struct drm_gem_object *obj, + struct drm_file *file_priv); + +struct panfrost_gem_mapping * +panfrost_gem_mapping_get(struct panfrost_gem_object *bo, + struct panfrost_file_priv *priv); +void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping); +void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo); + +void panfrost_gem_shrinker_init(struct drm_device *dev); +void panfrost_gem_shrinker_cleanup(struct drm_device *dev); + #endif /* __PANFROST_GEM_H__ */ diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c new file mode 100644 index 000000000000..288e46c40673 --- /dev/null +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2019 Arm Ltd. + * + * Based on msm_gem_freedreno.c: + * Copyright (C) 2016 Red Hat + * Author: Rob Clark <robdclark@gmail.com> + */ + +#include <linux/list.h> + +#include <drm/drm_device.h> +#include <drm/drm_gem_shmem_helper.h> + +#include "panfrost_device.h" +#include "panfrost_gem.h" +#include "panfrost_mmu.h" + +static unsigned long +panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) +{ + struct panfrost_device *pfdev = + container_of(shrinker, struct panfrost_device, shrinker); + struct drm_gem_shmem_object *shmem; + unsigned long count = 0; + + if (!mutex_trylock(&pfdev->shrinker_lock)) + return 0; + + list_for_each_entry(shmem, &pfdev->shrinker_list, madv_list) { + if (drm_gem_shmem_is_purgeable(shmem)) + count += shmem->base.size >> PAGE_SHIFT; + } + + mutex_unlock(&pfdev->shrinker_lock); + + return count; +} + +static bool panfrost_gem_purge(struct drm_gem_object *obj) +{ + struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct panfrost_gem_object *bo = to_panfrost_bo(obj); + + if (atomic_read(&bo->gpu_usecount)) + return false; + + if (!mutex_trylock(&shmem->pages_lock)) + return false; + + panfrost_gem_teardown_mappings(bo); + drm_gem_shmem_purge_locked(obj); + + mutex_unlock(&shmem->pages_lock); + return true; +} + +static unsigned long +panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) +{ + struct panfrost_device *pfdev = + container_of(shrinker, struct panfrost_device, shrinker); + struct drm_gem_shmem_object *shmem, *tmp; + unsigned long freed = 0; + + if (!mutex_trylock(&pfdev->shrinker_lock)) + return SHRINK_STOP; + + list_for_each_entry_safe(shmem, tmp, &pfdev->shrinker_list, madv_list) { + if (freed >= sc->nr_to_scan) + break; + if (drm_gem_shmem_is_purgeable(shmem) && + panfrost_gem_purge(&shmem->base)) { + freed += shmem->base.size >> PAGE_SHIFT; + list_del_init(&shmem->madv_list); + } + } + + mutex_unlock(&pfdev->shrinker_lock); + + if (freed > 0) + pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT); + + return freed; +} + +/** + * panfrost_gem_shrinker_init - Initialize panfrost shrinker + * @dev: DRM device + * + * This function registers and sets up the panfrost shrinker. + */ +void panfrost_gem_shrinker_init(struct drm_device *dev) +{ + struct panfrost_device *pfdev = dev->dev_private; + pfdev->shrinker.count_objects = panfrost_gem_shrinker_count; + pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan; + pfdev->shrinker.seeks = DEFAULT_SEEKS; + WARN_ON(register_shrinker(&pfdev->shrinker)); +} + +/** + * panfrost_gem_shrinker_cleanup - Clean up panfrost shrinker + * @dev: DRM device + * + * This function unregisters the panfrost shrinker. + */ +void panfrost_gem_shrinker_cleanup(struct drm_device *dev) +{ + struct panfrost_device *pfdev = dev->dev_private; + + if (pfdev->shrinker.nr_deferred) { + unregister_shrinker(&pfdev->shrinker); + } +} diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c index 20ab333fc925..8822ec13a0d6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gpu.c +++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c @@ -208,6 +208,9 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev) pfdev->features.mem_features = gpu_read(pfdev, GPU_MEM_FEATURES); pfdev->features.mmu_features = gpu_read(pfdev, GPU_MMU_FEATURES); pfdev->features.thread_features = gpu_read(pfdev, GPU_THREAD_FEATURES); + pfdev->features.max_threads = gpu_read(pfdev, GPU_THREAD_MAX_THREADS); + pfdev->features.thread_max_workgroup_sz = gpu_read(pfdev, GPU_THREAD_MAX_WORKGROUP_SIZE); + pfdev->features.thread_max_barrier_sz = gpu_read(pfdev, GPU_THREAD_MAX_BARRIER_SIZE); pfdev->features.coherency_features = gpu_read(pfdev, GPU_COHERENCY_FEATURES); for (i = 0; i < 4; i++) pfdev->features.texture_features[i] = gpu_read(pfdev, GPU_TEXTURE_FEATURES(i)); @@ -232,6 +235,8 @@ static void panfrost_gpu_init_features(struct panfrost_device *pfdev) pfdev->features.stack_present = gpu_read(pfdev, GPU_STACK_PRESENT_LO); pfdev->features.stack_present |= (u64)gpu_read(pfdev, GPU_STACK_PRESENT_HI) << 32; + pfdev->features.thread_tls_alloc = gpu_read(pfdev, GPU_THREAD_TLS_ALLOC); + gpu_id = gpu_read(pfdev, GPU_ID); pfdev->features.revision = gpu_id & 0xffff; pfdev->features.id = gpu_id >> 16; diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h b/drivers/gpu/drm/panfrost/panfrost_issues.h index cec6dcdadb5c..8e59d765bf19 100644 --- a/drivers/gpu/drm/panfrost/panfrost_issues.h +++ b/drivers/gpu/drm/panfrost/panfrost_issues.h @@ -13,37 +13,118 @@ * to care about. */ enum panfrost_hw_issue { + /* Need way to guarantee that all previously-translated memory accesses + * are commited */ HW_ISSUE_6367, + + /* On job complete with non-done the cache is not flushed */ HW_ISSUE_6787, + + /* Write of PRFCNT_CONFIG_MODE_MANUAL to PRFCNT_CONFIG causes a + * instrumentation dump if PRFCNT_TILER_EN is enabled */ HW_ISSUE_8186, + + /* TIB: Reports faults from a vtile which has not yet been allocated */ HW_ISSUE_8245, + + /* uTLB deadlock could occur when writing to an invalid page at the + * same time as access to a valid page in the same uTLB cache line ( == + * 4 PTEs == 16K block of mapping) */ HW_ISSUE_8316, + + /* HT: TERMINATE for RUN command ignored if previous LOAD_DESCRIPTOR is + * still executing */ HW_ISSUE_8394, + + /* CSE: Sends a TERMINATED response for a task that should not be + * terminated */ HW_ISSUE_8401, + + /* Repeatedly Soft-stopping a job chain consisting of (Vertex Shader, + * Cache Flush, Tiler) jobs causes DATA_INVALID_FAULT on tiler job. */ HW_ISSUE_8408, + + /* Disable the Pause Buffer in the LS pipe. */ HW_ISSUE_8443, + + /* Change in RMUs in use causes problems related with the core's SDC */ HW_ISSUE_8987, + + /* Compute endpoint has a 4-deep queue of tasks, meaning a soft stop + * won't complete until all 4 tasks have completed */ HW_ISSUE_9435, + + /* HT: Tiler returns TERMINATED for non-terminated command */ HW_ISSUE_9510, + + /* Occasionally the GPU will issue multiple page faults for the same + * address before the MMU page table has been read by the GPU */ HW_ISSUE_9630, + + /* RA DCD load request to SDC returns invalid load ignore causing + * colour buffer mismatch */ HW_ISSUE_10327, + + /* MMU TLB invalidation hazards */ HW_ISSUE_10649, + + /* Missing cache flush in multi core-group configuration */ HW_ISSUE_10676, + + /* Chicken bit on T72X for a hardware workaround in compiler */ HW_ISSUE_10797, + + /* Soft-stopping fragment jobs might fail with TILE_RANGE_FAULT */ HW_ISSUE_10817, + + /* Intermittent missing interrupt on job completion */ HW_ISSUE_10883, + + /* Soft-stopping fragment jobs might fail with TILE_RANGE_ERROR + * (similar to issue 10817) and can use #10817 workaround */ HW_ISSUE_10959, + + /* Soft-stopped fragment shader job can restart with out-of-bound + * restart index */ HW_ISSUE_10969, + + /* Race condition can cause tile list corruption */ HW_ISSUE_11020, + + /* Write buffer can cause tile list corruption */ HW_ISSUE_11024, + + /* Pause buffer can cause a fragment job hang */ HW_ISSUE_11035, + + /* Dynamic Core Scaling not supported due to errata */ HW_ISSUE_11056, + + /* Clear encoder state for a hard stopped fragment job which is AFBC + * encoded by soft resetting the GPU. Only for T76X r0p0, r0p1 and + * r0p1_50rel0 */ HW_ISSUE_T76X_3542, + + /* Keep tiler module clock on to prevent GPU stall */ HW_ISSUE_T76X_3953, + + /* Must ensure L2 is not transitioning when we reset. Workaround with a + * busy wait until L2 completes transition; ensure there is a maximum + * loop count as she may never complete her transition. (On chips + * without this errata, it's totally okay if L2 transitions.) */ HW_ISSUE_TMIX_8463, + + /* Don't set SC_LS_ATTR_CHECK_DISABLE/SC_LS_ALLOW_ATTR_TYPES */ GPUCORE_1619, + + /* When a hard-stop follows close after a soft-stop, the completion + * code for the terminated job may be incorrectly set to STOPPED */ HW_ISSUE_TMIX_8438, + + /* "Protected mode" is buggy on Mali-G31 some Bifrost chips, so the + * kernel must fiddle with L2 caches to prevent data leakage */ HW_ISSUE_TGOX_R1_1234, + HW_ISSUE_END }; diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 9bb9260d9181..7157dfd7dea3 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -6,7 +6,7 @@ #include <linux/io.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> -#include <linux/reservation.h> +#include <linux/dma-resv.h> #include <drm/gpu_scheduler.h> #include <drm/panfrost_drm.h> @@ -141,7 +141,6 @@ static void panfrost_job_write_affinity(struct panfrost_device *pfdev, static void panfrost_job_hw_submit(struct panfrost_job *job, int js) { struct panfrost_device *pfdev = job->pfdev; - unsigned long flags; u32 cfg; u64 jc_head = job->jc; int ret; @@ -150,11 +149,13 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) if (ret < 0) return; - if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) - goto end; + if (WARN_ON(job_read(pfdev, JS_COMMAND_NEXT(js)))) { + pm_runtime_put_sync_autosuspend(pfdev->dev); + return; + } - panfrost_devfreq_record_transition(pfdev, js); - spin_lock_irqsave(&pfdev->hwaccess_lock, flags); + cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu); + panfrost_devfreq_record_busy(pfdev); job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF); job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32); @@ -163,8 +164,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) /* start MMU, medium priority, cache clean/flush on end, clean/flush on * start */ - /* TODO: different address spaces */ - cfg = JS_CONFIG_THREAD_PRI(8) | + cfg |= JS_CONFIG_THREAD_PRI(8) | JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE | JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE; @@ -184,12 +184,6 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) job, js, jc_head); job_write(pfdev, JS_COMMAND_NEXT(js), JS_COMMAND_START); - - spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags); - -end: - pm_runtime_mark_last_busy(pfdev->dev); - pm_runtime_put_autosuspend(pfdev->dev); } static void panfrost_acquire_object_fences(struct drm_gem_object **bos, @@ -199,7 +193,7 @@ static void panfrost_acquire_object_fences(struct drm_gem_object **bos, int i; for (i = 0; i < bo_count; i++) - implicit_fences[i] = reservation_object_get_excl_rcu(bos[i]->resv); + implicit_fences[i] = dma_resv_get_excl_rcu(bos[i]->resv); } static void panfrost_attach_object_fences(struct drm_gem_object **bos, @@ -209,7 +203,7 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos, int i; for (i = 0; i < bo_count; i++) - reservation_object_add_excl_fence(bos[i]->resv, fence); + dma_resv_add_excl_fence(bos[i]->resv, fence); } int panfrost_job_push(struct panfrost_job *job) @@ -274,9 +268,25 @@ static void panfrost_job_cleanup(struct kref *ref) dma_fence_put(job->done_fence); dma_fence_put(job->render_done_fence); + if (job->mappings) { + for (i = 0; i < job->bo_count; i++) { + if (!job->mappings[i]) + break; + + atomic_dec(&job->mappings[i]->obj->gpu_usecount); + panfrost_gem_mapping_put(job->mappings[i]); + } + kvfree(job->mappings); + } + if (job->bos) { - for (i = 0; i < job->bo_count; i++) + struct panfrost_gem_object *bo; + + for (i = 0; i < job->bo_count; i++) { + bo = to_panfrost_bo(job->bos[i]); drm_gem_object_put_unlocked(job->bos[i]); + } + kvfree(job->bos); } @@ -368,6 +378,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) struct panfrost_job *job = to_panfrost_job(sched_job); struct panfrost_device *pfdev = job->pfdev; int js = panfrost_job_get_slot(job); + unsigned long flags; int i; /* @@ -377,30 +388,39 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) if (dma_fence_is_signaled(job->done_fence)) return; - dev_err(pfdev->dev, "gpu sched timeout, js=%d, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", + dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", js, + job_read(pfdev, JS_CONFIG(js)), job_read(pfdev, JS_STATUS(js)), job_read(pfdev, JS_HEAD_LO(js)), job_read(pfdev, JS_TAIL_LO(js)), sched_job); - mutex_lock(&pfdev->reset_lock); + if (!mutex_trylock(&pfdev->reset_lock)) + return; - for (i = 0; i < NUM_JOB_SLOTS; i++) - drm_sched_stop(&pfdev->js->queue[i].sched, sched_job); + for (i = 0; i < NUM_JOB_SLOTS; i++) { + struct drm_gpu_scheduler *sched = &pfdev->js->queue[i].sched; - if (sched_job) - drm_sched_increase_karma(sched_job); + drm_sched_stop(sched, sched_job); + if (js != i) + /* Ensure any timeouts on other slots have finished */ + cancel_delayed_work_sync(&sched->work_tdr); + } - /* panfrost_core_dump(pfdev); */ + drm_sched_increase_karma(sched_job); - panfrost_devfreq_record_transition(pfdev, js); - panfrost_gpu_soft_reset(pfdev); + spin_lock_irqsave(&pfdev->js->job_lock, flags); + for (i = 0; i < NUM_JOB_SLOTS; i++) { + if (pfdev->jobs[i]) { + pm_runtime_put_noidle(pfdev->dev); + pfdev->jobs[i] = NULL; + } + } + spin_unlock_irqrestore(&pfdev->js->job_lock, flags); - /* TODO: Re-enable all other address spaces */ - panfrost_mmu_enable(pfdev, 0); - panfrost_gpu_power_on(pfdev); - panfrost_job_enable_interrupts(pfdev); + panfrost_devfreq_record_idle(pfdev); + panfrost_device_reset(pfdev); for (i = 0; i < NUM_JOB_SLOTS; i++) drm_sched_resubmit_jobs(&pfdev->js->queue[i].sched); @@ -453,8 +473,21 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) } if (status & JOB_INT_MASK_DONE(j)) { - panfrost_devfreq_record_transition(pfdev, j); - dma_fence_signal(pfdev->jobs[j]->done_fence); + struct panfrost_job *job; + + spin_lock(&pfdev->js->job_lock); + job = pfdev->jobs[j]; + /* Only NULL if job timeout occurred */ + if (job) { + pfdev->jobs[j] = NULL; + + panfrost_mmu_as_put(pfdev, &job->file_priv->mmu); + panfrost_devfreq_record_idle(pfdev); + + dma_fence_signal_locked(job->done_fence); + pm_runtime_put_autosuspend(pfdev->dev); + } + spin_unlock(&pfdev->js->job_lock); } status &= ~mask; @@ -525,12 +558,14 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv) { struct panfrost_device *pfdev = panfrost_priv->pfdev; struct panfrost_job_slot *js = pfdev->js; - struct drm_sched_rq *rq; + struct drm_gpu_scheduler *sched; int ret, i; for (i = 0; i < NUM_JOB_SLOTS; i++) { - rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; - ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL); + sched = &js->queue[i].sched; + ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], + DRM_SCHED_PRIORITY_NORMAL, &sched, + 1, NULL); if (WARN_ON(ret)) return ret; } @@ -550,14 +585,14 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev) struct panfrost_job_slot *js = pfdev->js; int i; + /* Check whether the hardware is idle */ + if (atomic_read(&pfdev->devfreq.busy_count)) + return false; + for (i = 0; i < NUM_JOB_SLOTS; i++) { /* If there are any jobs in the HW queue, we're not idle */ if (atomic_read(&js->queue[i].sched.hw_rq_count)) return false; - - /* Check whether the hardware is idle */ - if (pfdev->devfreq.slot[i].busy) - return false; } return true; diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 62454128a792..bbd3ba97ff67 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -32,6 +32,7 @@ struct panfrost_job { /* Exclusive fences we have taken from the BOs to wait for */ struct dma_fence **implicit_fences; + struct panfrost_gem_mapping **mappings; struct drm_gem_object **bos; u32 bo_count; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 92ac995dd9c6..763cfca886a7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ +#include <linux/atomic.h> #include <linux/bitfield.h> #include <linux/delay.h> +#include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -9,6 +11,7 @@ #include <linux/iommu.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <linux/shmem_fs.h> #include <linux/sizes.h> #include "panfrost_device.h" @@ -20,12 +23,6 @@ #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg) #define mmu_read(dev, reg) readl(dev->iomem + reg) -struct panfrost_mmu { - struct io_pgtable_cfg pgtbl_cfg; - struct io_pgtable_ops *pgtbl_ops; - struct mutex lock; -}; - static int wait_ready(struct panfrost_device *pfdev, u32 as_nr) { int ret; @@ -83,13 +80,11 @@ static void lock_region(struct panfrost_device *pfdev, u32 as_nr, } -static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr, - u64 iova, size_t size, u32 op) +static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr, + u64 iova, size_t size, u32 op) { - unsigned long flags; - int ret; - - spin_lock_irqsave(&pfdev->hwaccess_lock, flags); + if (as_nr < 0) + return 0; if (op != AS_COMMAND_UNLOCK) lock_region(pfdev, as_nr, iova, size); @@ -98,21 +93,29 @@ static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr, write_cmd(pfdev, as_nr, op); /* Wait for the flush to complete */ - ret = wait_ready(pfdev, as_nr); + return wait_ready(pfdev, as_nr); +} - spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags); +static int mmu_hw_do_operation(struct panfrost_device *pfdev, + struct panfrost_mmu *mmu, + u64 iova, size_t size, u32 op) +{ + int ret; + spin_lock(&pfdev->as_lock); + ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op); + spin_unlock(&pfdev->as_lock); return ret; } -void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr) +static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) { - struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg; + int as_nr = mmu->as; + struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg; u64 transtab = cfg->arm_mali_lpae_cfg.transtab; u64 memattr = cfg->arm_mali_lpae_cfg.memattr; - mmu_write(pfdev, MMU_INT_CLEAR, ~0); - mmu_write(pfdev, MMU_INT_MASK, ~0); + mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM); mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL); mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32); @@ -126,8 +129,10 @@ void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr) write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); } -static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr) +static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr) { + mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM); + mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0); mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0); @@ -137,6 +142,80 @@ static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr) write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE); } +u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) +{ + int as; + + spin_lock(&pfdev->as_lock); + + as = mmu->as; + if (as >= 0) { + int en = atomic_inc_return(&mmu->as_count); + WARN_ON(en >= NUM_JOB_SLOTS); + + list_move(&mmu->list, &pfdev->as_lru_list); + goto out; + } + + /* Check for a free AS */ + as = ffz(pfdev->as_alloc_mask); + if (!(BIT(as) & pfdev->features.as_present)) { + struct panfrost_mmu *lru_mmu; + + list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) { + if (!atomic_read(&lru_mmu->as_count)) + break; + } + WARN_ON(&lru_mmu->list == &pfdev->as_lru_list); + + list_del_init(&lru_mmu->list); + as = lru_mmu->as; + + WARN_ON(as < 0); + lru_mmu->as = -1; + } + + /* Assign the free or reclaimed AS to the FD */ + mmu->as = as; + set_bit(as, &pfdev->as_alloc_mask); + atomic_set(&mmu->as_count, 1); + list_add(&mmu->list, &pfdev->as_lru_list); + + dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask); + + panfrost_mmu_enable(pfdev, mmu); + +out: + spin_unlock(&pfdev->as_lock); + return as; +} + +void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) +{ + atomic_dec(&mmu->as_count); + WARN_ON(atomic_read(&mmu->as_count) < 0); +} + +void panfrost_mmu_reset(struct panfrost_device *pfdev) +{ + struct panfrost_mmu *mmu, *mmu_tmp; + + spin_lock(&pfdev->as_lock); + + pfdev->as_alloc_mask = 0; + + list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) { + mmu->as = -1; + atomic_set(&mmu->as_count, 0); + list_del_init(&mmu->list); + } + + spin_unlock(&pfdev->as_lock); + + mmu_write(pfdev, MMU_INT_CLEAR, ~0); + mmu_write(pfdev, MMU_INT_MASK, ~0); +} + static size_t get_pgsize(u64 addr, size_t size) { if (addr & (SZ_2M - 1) || size < SZ_2M) @@ -145,110 +224,110 @@ static size_t get_pgsize(u64 addr, size_t size) return SZ_2M; } -int panfrost_mmu_map(struct panfrost_gem_object *bo) +static void panfrost_mmu_flush_range(struct panfrost_device *pfdev, + struct panfrost_mmu *mmu, + u64 iova, size_t size) { - struct drm_gem_object *obj = &bo->base.base; - struct panfrost_device *pfdev = to_panfrost_device(obj->dev); - struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops; - u64 iova = bo->node.start << PAGE_SHIFT; - unsigned int count; - struct scatterlist *sgl; - struct sg_table *sgt; - int ret; + if (mmu->as < 0) + return; - if (WARN_ON(bo->is_mapped)) - return 0; + pm_runtime_get_noresume(pfdev->dev); - sgt = drm_gem_shmem_get_pages_sgt(obj); - if (WARN_ON(IS_ERR(sgt))) - return PTR_ERR(sgt); + /* Flush the PTs only if we're already awake */ + if (pm_runtime_active(pfdev->dev)) + mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT); - ret = pm_runtime_get_sync(pfdev->dev); - if (ret < 0) - return ret; + pm_runtime_put_sync_autosuspend(pfdev->dev); +} - mutex_lock(&pfdev->mmu->lock); +static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, + u64 iova, int prot, struct sg_table *sgt) +{ + unsigned int count; + struct scatterlist *sgl; + struct io_pgtable_ops *ops = mmu->pgtbl_ops; + u64 start_iova = iova; for_each_sg(sgt->sgl, sgl, sgt->nents, count) { unsigned long paddr = sg_dma_address(sgl); size_t len = sg_dma_len(sgl); - dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len); + dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len); while (len) { size_t pgsize = get_pgsize(iova | paddr, len); - ops->map(ops, iova, paddr, pgsize, IOMMU_WRITE | IOMMU_READ); + ops->map(ops, iova, paddr, pgsize, prot); iova += pgsize; paddr += pgsize; len -= pgsize; } } - mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT, - bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT); + panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova); + + return 0; +} + +int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) +{ + struct panfrost_gem_object *bo = mapping->obj; + struct drm_gem_object *obj = &bo->base.base; + struct panfrost_device *pfdev = to_panfrost_device(obj->dev); + struct sg_table *sgt; + int prot = IOMMU_READ | IOMMU_WRITE; + + if (WARN_ON(mapping->active)) + return 0; - mutex_unlock(&pfdev->mmu->lock); + if (bo->noexec) + prot |= IOMMU_NOEXEC; - pm_runtime_mark_last_busy(pfdev->dev); - pm_runtime_put_autosuspend(pfdev->dev); - bo->is_mapped = true; + sgt = drm_gem_shmem_get_pages_sgt(obj); + if (WARN_ON(IS_ERR(sgt))) + return PTR_ERR(sgt); + + mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, + prot, sgt); + mapping->active = true; return 0; } -void panfrost_mmu_unmap(struct panfrost_gem_object *bo) +void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) { + struct panfrost_gem_object *bo = mapping->obj; struct drm_gem_object *obj = &bo->base.base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); - struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops; - u64 iova = bo->node.start << PAGE_SHIFT; - size_t len = bo->node.size << PAGE_SHIFT; + struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops; + u64 iova = mapping->mmnode.start << PAGE_SHIFT; + size_t len = mapping->mmnode.size << PAGE_SHIFT; size_t unmapped_len = 0; - int ret; - - if (WARN_ON(!bo->is_mapped)) - return; - - dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len); - ret = pm_runtime_get_sync(pfdev->dev); - if (ret < 0) + if (WARN_ON(!mapping->active)) return; - mutex_lock(&pfdev->mmu->lock); + dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", + mapping->mmu->as, iova, len); while (unmapped_len < len) { size_t unmapped_page; size_t pgsize = get_pgsize(iova, len - unmapped_len); - unmapped_page = ops->unmap(ops, iova, pgsize); - if (!unmapped_page) - break; - - iova += unmapped_page; - unmapped_len += unmapped_page; + if (ops->iova_to_phys(ops, iova)) { + unmapped_page = ops->unmap(ops, iova, pgsize, NULL); + WARN_ON(unmapped_page != pgsize); + } + iova += pgsize; + unmapped_len += pgsize; } - mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT, - bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT); - - mutex_unlock(&pfdev->mmu->lock); - - pm_runtime_mark_last_busy(pfdev->dev); - pm_runtime_put_autosuspend(pfdev->dev); - bo->is_mapped = false; + panfrost_mmu_flush_range(pfdev, mapping->mmu, + mapping->mmnode.start << PAGE_SHIFT, len); + mapping->active = false; } static void mmu_tlb_inv_context_s1(void *cookie) -{ - struct panfrost_device *pfdev = cookie; - - mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM); -} - -static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) {} static void mmu_tlb_sync_context(void *cookie) @@ -257,12 +336,207 @@ static void mmu_tlb_sync_context(void *cookie) // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X } -static const struct iommu_gather_ops mmu_tlb_ops = { +static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, + void *cookie) +{ + mmu_tlb_sync_context(cookie); +} + +static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule, + void *cookie) +{ + mmu_tlb_sync_context(cookie); +} + +static const struct iommu_flush_ops mmu_tlb_ops = { .tlb_flush_all = mmu_tlb_inv_context_s1, - .tlb_add_flush = mmu_tlb_inv_range_nosync, - .tlb_sync = mmu_tlb_sync_context, + .tlb_flush_walk = mmu_tlb_flush_walk, + .tlb_flush_leaf = mmu_tlb_flush_leaf, }; +int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv) +{ + struct panfrost_mmu *mmu = &priv->mmu; + struct panfrost_device *pfdev = priv->pfdev; + + INIT_LIST_HEAD(&mmu->list); + mmu->as = -1; + + mmu->pgtbl_cfg = (struct io_pgtable_cfg) { + .pgsize_bitmap = SZ_4K | SZ_2M, + .ias = FIELD_GET(0xff, pfdev->features.mmu_features), + .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), + .tlb = &mmu_tlb_ops, + .iommu_dev = pfdev->dev, + }; + + mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg, + priv); + if (!mmu->pgtbl_ops) + return -EINVAL; + + return 0; +} + +void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) +{ + struct panfrost_device *pfdev = priv->pfdev; + struct panfrost_mmu *mmu = &priv->mmu; + + spin_lock(&pfdev->as_lock); + if (mmu->as >= 0) { + pm_runtime_get_noresume(pfdev->dev); + if (pm_runtime_active(pfdev->dev)) + panfrost_mmu_disable(pfdev, mmu->as); + pm_runtime_put_autosuspend(pfdev->dev); + + clear_bit(mmu->as, &pfdev->as_alloc_mask); + clear_bit(mmu->as, &pfdev->as_in_use_mask); + list_del(&mmu->list); + } + spin_unlock(&pfdev->as_lock); + + free_io_pgtable_ops(mmu->pgtbl_ops); +} + +static struct panfrost_gem_mapping * +addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr) +{ + struct panfrost_gem_mapping *mapping = NULL; + struct panfrost_file_priv *priv; + struct drm_mm_node *node; + u64 offset = addr >> PAGE_SHIFT; + struct panfrost_mmu *mmu; + + spin_lock(&pfdev->as_lock); + list_for_each_entry(mmu, &pfdev->as_lru_list, list) { + if (as == mmu->as) + goto found_mmu; + } + goto out; + +found_mmu: + priv = container_of(mmu, struct panfrost_file_priv, mmu); + + spin_lock(&priv->mm_lock); + + drm_mm_for_each_node(node, &priv->mm) { + if (offset >= node->start && + offset < (node->start + node->size)) { + mapping = drm_mm_node_to_panfrost_mapping(node); + + kref_get(&mapping->refcount); + break; + } + } + + spin_unlock(&priv->mm_lock); +out: + spin_unlock(&pfdev->as_lock); + return mapping; +} + +#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) + +static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, + u64 addr) +{ + int ret, i; + struct panfrost_gem_mapping *bomapping; + struct panfrost_gem_object *bo; + struct address_space *mapping; + pgoff_t page_offset; + struct sg_table *sgt; + struct page **pages; + + bomapping = addr_to_mapping(pfdev, as, addr); + if (!bomapping) + return -ENOENT; + + bo = bomapping->obj; + if (!bo->is_heap) { + dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", + bomapping->mmnode.start << PAGE_SHIFT); + ret = -EINVAL; + goto err_bo; + } + WARN_ON(bomapping->mmu->as != as); + + /* Assume 2MB alignment and size multiple */ + addr &= ~((u64)SZ_2M - 1); + page_offset = addr >> PAGE_SHIFT; + page_offset -= bomapping->mmnode.start; + + mutex_lock(&bo->base.pages_lock); + + if (!bo->base.pages) { + bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M, + sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); + if (!bo->sgts) { + mutex_unlock(&bo->base.pages_lock); + ret = -ENOMEM; + goto err_bo; + } + + pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, + sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); + if (!pages) { + kfree(bo->sgts); + bo->sgts = NULL; + mutex_unlock(&bo->base.pages_lock); + ret = -ENOMEM; + goto err_bo; + } + bo->base.pages = pages; + bo->base.pages_use_count = 1; + } else + pages = bo->base.pages; + + mapping = bo->base.base.filp->f_mapping; + mapping_set_unevictable(mapping); + + for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) { + pages[i] = shmem_read_mapping_page(mapping, i); + if (IS_ERR(pages[i])) { + mutex_unlock(&bo->base.pages_lock); + ret = PTR_ERR(pages[i]); + goto err_pages; + } + } + + mutex_unlock(&bo->base.pages_lock); + + sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)]; + ret = sg_alloc_table_from_pages(sgt, pages + page_offset, + NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL); + if (ret) + goto err_pages; + + if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) { + ret = -EINVAL; + goto err_map; + } + + mmu_map_sg(pfdev, bomapping->mmu, addr, + IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); + + bomapping->active = true; + + dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); + + panfrost_gem_mapping_put(bomapping); + + return 0; + +err_map: + sg_free_table(sgt); +err_pages: + drm_gem_shmem_put_pages(&bo->base); +err_bo: + drm_gem_object_put_unlocked(&bo->base.base); + return ret; +} + static const char *access_type_name(struct panfrost_device *pfdev, u32 fault_status) { @@ -287,13 +561,19 @@ static const char *access_type_name(struct panfrost_device *pfdev, static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) { struct panfrost_device *pfdev = data; - u32 status = mmu_read(pfdev, MMU_INT_STAT); - int i; - if (!status) + if (!mmu_read(pfdev, MMU_INT_STAT)) return IRQ_NONE; - dev_err(pfdev->dev, "mmu irq status=%x\n", status); + mmu_write(pfdev, MMU_INT_MASK, 0); + return IRQ_WAKE_THREAD; +} + +static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data) +{ + struct panfrost_device *pfdev = data; + u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT); + int i, ret; for (i = 0; status; i++) { u32 mask = BIT(i) | BIT(i + 16); @@ -315,6 +595,18 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) access_type = (fault_status >> 8) & 0x3; source_id = (fault_status >> 16); + /* Page fault only */ + if ((status & mask) == BIT(i)) { + WARN_ON(exception_type < 0xC1 || exception_type > 0xC4); + + ret = panfrost_mmu_map_fault_addr(pfdev, i, addr); + if (!ret) { + mmu_write(pfdev, MMU_INT_CLEAR, BIT(i)); + status &= ~mask; + continue; + } + } + /* terminal fault, print info about the fault */ dev_err(pfdev->dev, "Unhandled Page fault in AS%d at VA 0x%016llX\n" @@ -337,50 +629,26 @@ static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data) status &= ~mask; } + mmu_write(pfdev, MMU_INT_MASK, ~0); return IRQ_HANDLED; }; int panfrost_mmu_init(struct panfrost_device *pfdev) { - struct io_pgtable_ops *pgtbl_ops; int err, irq; - pfdev->mmu = devm_kzalloc(pfdev->dev, sizeof(*pfdev->mmu), GFP_KERNEL); - if (!pfdev->mmu) - return -ENOMEM; - - mutex_init(&pfdev->mmu->lock); - irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu"); if (irq <= 0) return -ENODEV; - err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, - IRQF_SHARED, "mmu", pfdev); + err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler, + panfrost_mmu_irq_handler_thread, + IRQF_SHARED, "mmu", pfdev); if (err) { dev_err(pfdev->dev, "failed to request mmu irq"); return err; } - mmu_write(pfdev, MMU_INT_CLEAR, ~0); - mmu_write(pfdev, MMU_INT_MASK, ~0); - - pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) { - .pgsize_bitmap = SZ_4K | SZ_2M, - .ias = FIELD_GET(0xff, pfdev->features.mmu_features), - .oas = FIELD_GET(0xff00, pfdev->features.mmu_features), - .tlb = &mmu_tlb_ops, - .iommu_dev = pfdev->dev, - }; - - pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &pfdev->mmu->pgtbl_cfg, - pfdev); - if (!pgtbl_ops) - return -ENOMEM; - - pfdev->mmu->pgtbl_ops = pgtbl_ops; - - panfrost_mmu_enable(pfdev, 0); return 0; } @@ -388,7 +656,4 @@ int panfrost_mmu_init(struct panfrost_device *pfdev) void panfrost_mmu_fini(struct panfrost_device *pfdev) { mmu_write(pfdev, MMU_INT_MASK, 0); - mmu_disable(pfdev, 0); - - free_io_pgtable_ops(pfdev->mmu->pgtbl_ops); } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h index f5878d86a5ce..44fc2edf63ce 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.h +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h @@ -4,14 +4,21 @@ #ifndef __PANFROST_MMU_H__ #define __PANFROST_MMU_H__ -struct panfrost_gem_object; +struct panfrost_gem_mapping; +struct panfrost_file_priv; +struct panfrost_mmu; -int panfrost_mmu_map(struct panfrost_gem_object *bo); -void panfrost_mmu_unmap(struct panfrost_gem_object *bo); +int panfrost_mmu_map(struct panfrost_gem_mapping *mapping); +void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping); int panfrost_mmu_init(struct panfrost_device *pfdev); void panfrost_mmu_fini(struct panfrost_device *pfdev); +void panfrost_mmu_reset(struct panfrost_device *pfdev); -void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr); +u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); +void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu); + +int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv); +void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv); #endif diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 83c57d325ca8..684820448be3 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -16,6 +16,7 @@ #include "panfrost_issues.h" #include "panfrost_job.h" #include "panfrost_mmu.h" +#include "panfrost_perfcnt.h" #include "panfrost_regs.h" #define COUNTERS_PER_BLOCK 64 @@ -24,7 +25,7 @@ #define V4_SHADERS_PER_COREGROUP 4 struct panfrost_perfcnt { - struct panfrost_gem_object *bo; + struct panfrost_gem_mapping *mapping; size_t bosize; void *buf; struct panfrost_file_priv *user; @@ -48,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) int ret; reinit_completion(&pfdev->perfcnt->dump_comp); - gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT; + gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT; gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva); gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32); gpu_write(pfdev, GPU_INT_CLEAR, @@ -66,9 +67,10 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) } static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, - struct panfrost_file_priv *user, + struct drm_file *file_priv, unsigned int counterset) { + struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_gem_shmem_object *bo; u32 cfg; @@ -87,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, if (IS_ERR(bo)) return PTR_ERR(bo); - perfcnt->bo = to_panfrost_bo(&bo->base); - /* Map the perfcnt buf in the address space attached to file_priv. */ - ret = panfrost_mmu_map(perfcnt->bo); + ret = panfrost_gem_open(&bo->base, file_priv); if (ret) goto err_put_bo; + perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base), + user); + if (!perfcnt->mapping) { + ret = -EINVAL; + goto err_close_bo; + } + perfcnt->buf = drm_gem_shmem_vmap(&bo->base); if (IS_ERR(perfcnt->buf)) { ret = PTR_ERR(perfcnt->buf); - goto err_put_bo; + goto err_put_mapping; } /* @@ -152,18 +159,26 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186)) gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff); + /* The BO ref is retained by the mapping. */ + drm_gem_object_put_unlocked(&bo->base); + return 0; err_vunmap: - drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); + drm_gem_shmem_vunmap(&bo->base, perfcnt->buf); +err_put_mapping: + panfrost_gem_mapping_put(perfcnt->mapping); +err_close_bo: + panfrost_gem_close(&bo->base, file_priv); err_put_bo: drm_gem_object_put_unlocked(&bo->base); return ret; } static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, - struct panfrost_file_priv *user) + struct drm_file *file_priv) { + struct panfrost_file_priv *user = file_priv->driver_priv; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; if (user != perfcnt->user) @@ -177,10 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); perfcnt->user = NULL; - drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf); perfcnt->buf = NULL; - drm_gem_object_put_unlocked(&perfcnt->bo->base.base); - perfcnt->bo = NULL; + panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); + panfrost_gem_mapping_put(perfcnt->mapping); + perfcnt->mapping = NULL; pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); @@ -190,7 +206,6 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct panfrost_file_priv *pfile = file_priv->driver_priv; struct panfrost_device *pfdev = dev->dev_private; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; struct drm_panfrost_perfcnt_enable *req = data; @@ -206,10 +221,10 @@ int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, mutex_lock(&perfcnt->lock); if (req->enable) - ret = panfrost_perfcnt_enable_locked(pfdev, pfile, + ret = panfrost_perfcnt_enable_locked(pfdev, file_priv, req->counterset); else - ret = panfrost_perfcnt_disable_locked(pfdev, pfile); + ret = panfrost_perfcnt_disable_locked(pfdev, file_priv); mutex_unlock(&perfcnt->lock); return ret; @@ -247,15 +262,16 @@ out: return ret; } -void panfrost_perfcnt_close(struct panfrost_file_priv *pfile) +void panfrost_perfcnt_close(struct drm_file *file_priv) { + struct panfrost_file_priv *pfile = file_priv->driver_priv; struct panfrost_device *pfdev = pfile->pfdev; struct panfrost_perfcnt *perfcnt = pfdev->perfcnt; pm_runtime_get_sync(pfdev->dev); mutex_lock(&perfcnt->lock); if (perfcnt->user == pfile) - panfrost_perfcnt_disable_locked(pfdev, pfile); + panfrost_perfcnt_disable_locked(pfdev, file_priv); mutex_unlock(&perfcnt->lock); pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h index 13b8fdaa1b43..8bbcf5f5fb33 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.h +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.h @@ -9,7 +9,7 @@ void panfrost_perfcnt_sample_done(struct panfrost_device *pfdev); void panfrost_perfcnt_clean_cache_done(struct panfrost_device *pfdev); int panfrost_perfcnt_init(struct panfrost_device *pfdev); void panfrost_perfcnt_fini(struct panfrost_device *pfdev); -void panfrost_perfcnt_close(struct panfrost_file_priv *pfile); +void panfrost_perfcnt_close(struct drm_file *file_priv); int panfrost_ioctl_perfcnt_enable(struct drm_device *dev, void *data, struct drm_file *file_priv); int panfrost_ioctl_perfcnt_dump(struct drm_device *dev, void *data, |