diff options
Diffstat (limited to 'drivers/gpu/drm')
23 files changed, 213 insertions, 684 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index e55508b39496..3e6823fdd939 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -238,44 +238,40 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change * * @mn: our notifier - * @mm: the mm this callback is about - * @start: start of updated range - * @end: end of updated range + * @range: mmu notifier context * * Block for operations on BOs to finish and mark pages as accessed and * potentially dirty. */ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, - unsigned long end, - bool blockable) + const struct mmu_notifier_range *range) { struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); struct interval_tree_node *it; + unsigned long end; /* notification is exclusive, but interval is inclusive */ - end -= 1; + end = range->end - 1; /* TODO we should be able to split locking for interval tree and * amdgpu_mn_invalidate_node */ - if (amdgpu_mn_read_lock(amn, blockable)) + if (amdgpu_mn_read_lock(amn, range->blockable)) return -EAGAIN; - it = interval_tree_iter_first(&amn->objects, start, end); + it = interval_tree_iter_first(&amn->objects, range->start, end); while (it) { struct amdgpu_mn_node *node; - if (!blockable) { + if (!range->blockable) { amdgpu_mn_read_unlock(amn); return -EAGAIN; } node = container_of(it, struct amdgpu_mn_node, it); - it = interval_tree_iter_next(it, start, end); + it = interval_tree_iter_next(it, range->start, end); - amdgpu_mn_invalidate_node(node, start, end); + amdgpu_mn_invalidate_node(node, range->start, end); } return 0; @@ -294,39 +290,38 @@ static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, * are restorted in amdgpu_mn_invalidate_range_end_hsa. */ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, - unsigned long end, - bool blockable) + const struct mmu_notifier_range *range) { struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); struct interval_tree_node *it; + unsigned long end; /* notification is exclusive, but interval is inclusive */ - end -= 1; + end = range->end - 1; - if (amdgpu_mn_read_lock(amn, blockable)) + if (amdgpu_mn_read_lock(amn, range->blockable)) return -EAGAIN; - it = interval_tree_iter_first(&amn->objects, start, end); + it = interval_tree_iter_first(&amn->objects, range->start, end); while (it) { struct amdgpu_mn_node *node; struct amdgpu_bo *bo; - if (!blockable) { + if (!range->blockable) { amdgpu_mn_read_unlock(amn); return -EAGAIN; } node = container_of(it, struct amdgpu_mn_node, it); - it = interval_tree_iter_next(it, start, end); + it = interval_tree_iter_next(it, range->start, end); list_for_each_entry(bo, &node->bos, mn_list) { struct kgd_mem *mem = bo->kfd_bo; if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, - start, end)) - amdgpu_amdkfd_evict_userptr(mem, mm); + range->start, + end)) + amdgpu_amdkfd_evict_userptr(mem, range->mm); } } @@ -344,9 +339,7 @@ static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, * Release the lock again to allow new command submissions. */ static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, - unsigned long end) + const struct mmu_notifier_range *range) { struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index c02adbbeef2a..b7bc7d7d048f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -853,7 +853,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, */ pgdat = NODE_DATA(numa_node_id); for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) - mem_in_bytes += pgdat->node_zones[zone_type].managed_pages; + mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]); mem_in_bytes <<= PAGE_SHIFT; sub_type_hdr->length_low = lower_32_bits(mem_in_bytes); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 7fea74861a87..160ce3c060a5 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -439,6 +439,4 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, if (drm_debug & DRM_UT_DRIVER) etnaviv_buffer_dump(gpu, buffer, 0, 0x50); - - gpu->lastctx = cmdbuf->ctx; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 52802e6049e0..96efc84396bf 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -72,14 +72,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) for (i = 0; i < ETNA_MAX_PIPES; i++) { struct etnaviv_gpu *gpu = priv->gpu[i]; - if (gpu) { - mutex_lock(&gpu->lock); - if (gpu->lastctx == ctx) - gpu->lastctx = NULL; - mutex_unlock(&gpu->lock); - + if (gpu) drm_sched_entity_destroy(&ctx->sched_entity[i]); - } } kfree(ctx); @@ -523,7 +517,7 @@ static int etnaviv_bind(struct device *dev) if (!priv) { dev_err(dev, "failed to allocate private data\n"); ret = -ENOMEM; - goto out_unref; + goto out_put; } drm->dev_private = priv; @@ -549,7 +543,7 @@ out_register: component_unbind_all(dev, drm); out_bind: kfree(priv); -out_unref: +out_put: drm_dev_put(drm); return ret; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h index 8d02d1b7dcf5..4bf698de5996 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h @@ -107,17 +107,6 @@ static inline size_t size_vstruct(size_t nelem, size_t elem_size, size_t base) return base + nelem * elem_size; } -/* returns true if fence a comes after fence b */ -static inline bool fence_after(u32 a, u32 b) -{ - return (s32)(a - b) > 0; -} - -static inline bool fence_after_eq(u32 a, u32 b) -{ - return (s32)(a - b) >= 0; -} - /* * Etnaviv timeouts are specified wrt CLOCK_MONOTONIC, not jiffies. * We need to calculate the timeout in terms of number of jiffies diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index f225fbc6edd2..6904535475de 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -3,10 +3,12 @@ * Copyright (C) 2015-2018 Etnaviv Project */ +#include <linux/clk.h> #include <linux/component.h> #include <linux/dma-fence.h> #include <linux/moduleparam.h> #include <linux/of_device.h> +#include <linux/regulator/consumer.h> #include <linux/thermal.h> #include "etnaviv_cmdbuf.h" @@ -976,7 +978,6 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m) void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) { - unsigned long flags; unsigned int i = 0; dev_err(gpu->dev, "recover hung GPU!\n"); @@ -989,15 +990,13 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu) etnaviv_hw_reset(gpu); /* complete all events, the GPU won't do it after the reset */ - spin_lock_irqsave(&gpu->event_spinlock, flags); + spin_lock(&gpu->event_spinlock); for_each_set_bit_from(i, gpu->event_bitmap, ETNA_NR_EVENTS) complete(&gpu->event_free); bitmap_zero(gpu->event_bitmap, ETNA_NR_EVENTS); - spin_unlock_irqrestore(&gpu->event_spinlock, flags); - gpu->completed_fence = gpu->active_fence; + spin_unlock(&gpu->event_spinlock); etnaviv_gpu_hw_init(gpu); - gpu->lastctx = NULL; gpu->exec_state = -1; mutex_unlock(&gpu->lock); @@ -1032,7 +1031,7 @@ static bool etnaviv_fence_signaled(struct dma_fence *fence) { struct etnaviv_fence *f = to_etnaviv_fence(fence); - return fence_completed(f->gpu, f->base.seqno); + return (s32)(f->gpu->completed_fence - f->base.seqno) >= 0; } static void etnaviv_fence_release(struct dma_fence *fence) @@ -1071,6 +1070,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) return &f->base; } +/* returns true if fence a comes after fence b */ +static inline bool fence_after(u32 a, u32 b) +{ + return (s32)(a - b) > 0; +} + /* * event management: */ @@ -1078,7 +1083,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events, unsigned int *events) { - unsigned long flags, timeout = msecs_to_jiffies(10 * 10000); + unsigned long timeout = msecs_to_jiffies(10 * 10000); unsigned i, acquired = 0; for (i = 0; i < nr_events; i++) { @@ -1095,7 +1100,7 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events, timeout = ret; } - spin_lock_irqsave(&gpu->event_spinlock, flags); + spin_lock(&gpu->event_spinlock); for (i = 0; i < nr_events; i++) { int event = find_first_zero_bit(gpu->event_bitmap, ETNA_NR_EVENTS); @@ -1105,7 +1110,7 @@ static int event_alloc(struct etnaviv_gpu *gpu, unsigned nr_events, set_bit(event, gpu->event_bitmap); } - spin_unlock_irqrestore(&gpu->event_spinlock, flags); + spin_unlock(&gpu->event_spinlock); return 0; @@ -1118,18 +1123,11 @@ out: static void event_free(struct etnaviv_gpu *gpu, unsigned int event) { - unsigned long flags; - - spin_lock_irqsave(&gpu->event_spinlock, flags); - if (!test_bit(event, gpu->event_bitmap)) { dev_warn(gpu->dev, "event %u is already marked as free", event); - spin_unlock_irqrestore(&gpu->event_spinlock, flags); } else { clear_bit(event, gpu->event_bitmap); - spin_unlock_irqrestore(&gpu->event_spinlock, flags); - complete(&gpu->event_free); } } @@ -1306,8 +1304,6 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit) goto out_unlock; } - gpu->active_fence = gpu_fence->seqno; - if (submit->nr_pmrs) { gpu->event[event[1]].sync_point = &sync_point_perfmon_sample_pre; kref_get(&submit->refcount); @@ -1549,7 +1545,6 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) etnaviv_gpu_update_clock(gpu); etnaviv_gpu_hw_init(gpu); - gpu->lastctx = NULL; gpu->exec_state = -1; mutex_unlock(&gpu->lock); @@ -1806,8 +1801,8 @@ static int etnaviv_gpu_rpm_suspend(struct device *dev) struct etnaviv_gpu *gpu = dev_get_drvdata(dev); u32 idle, mask; - /* If we have outstanding fences, we're not idle */ - if (gpu->completed_fence != gpu->active_fence) + /* If there are any jobs in the HW queue, we're not idle */ + if (atomic_read(&gpu->sched.hw_rq_count)) return -EBUSY; /* Check whether the hardware (except FE) is idle */ diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 9a75a6937268..9bcf151f706b 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -6,9 +6,6 @@ #ifndef __ETNAVIV_GPU_H__ #define __ETNAVIV_GPU_H__ -#include <linux/clk.h> -#include <linux/regulator/consumer.h> - #include "etnaviv_cmdbuf.h" #include "etnaviv_drv.h" @@ -88,6 +85,8 @@ struct etnaviv_event { struct etnaviv_cmdbuf_suballoc; struct etnaviv_cmdbuf; +struct regulator; +struct clk; #define ETNA_NR_EVENTS 30 @@ -98,7 +97,6 @@ struct etnaviv_gpu { struct mutex lock; struct etnaviv_chip_identity identity; enum etnaviv_sec_mode sec_mode; - struct etnaviv_file_private *lastctx; struct workqueue_struct *wq; struct drm_gpu_scheduler sched; @@ -121,7 +119,6 @@ struct etnaviv_gpu { struct mutex fence_lock; struct idr fence_idr; u32 next_fence; - u32 active_fence; u32 completed_fence; wait_queue_head_t fence_event; u64 fence_context; @@ -161,11 +158,6 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) return readl(gpu->mmio + reg); } -static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence) -{ - return fence_after_eq(gpu->completed_fence, fence); -} - int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value); int etnaviv_gpu_init(struct etnaviv_gpu *gpu); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index e3d6a8584715..786a8ee6f10f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -228,6 +228,21 @@ static const uint32_t fimd_formats[] = { DRM_FORMAT_ARGB8888, }; +static const unsigned int capabilities[WINDOWS_NR] = { + 0, + EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND, + EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND, + EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND, + EXYNOS_DRM_PLANE_CAP_WIN_BLEND | EXYNOS_DRM_PLANE_CAP_PIX_BLEND, +}; + +static inline void fimd_set_bits(struct fimd_context *ctx, u32 reg, u32 mask, + u32 val) +{ + val = (val & mask) | (readl(ctx->regs + reg) & ~mask); + writel(val, ctx->regs + reg); +} + static int fimd_enable_vblank(struct exynos_drm_crtc *crtc) { struct fimd_context *ctx = crtc->ctx; @@ -551,13 +566,88 @@ static void fimd_commit(struct exynos_drm_crtc *crtc) writel(val, ctx->regs + VIDCON0); } +static void fimd_win_set_bldeq(struct fimd_context *ctx, unsigned int win, + unsigned int alpha, unsigned int pixel_alpha) +{ + u32 mask = BLENDEQ_A_FUNC_F(0xf) | BLENDEQ_B_FUNC_F(0xf); + u32 val = 0; + + switch (pixel_alpha) { + case DRM_MODE_BLEND_PIXEL_NONE: + case DRM_MODE_BLEND_COVERAGE: + val |= BLENDEQ_A_FUNC_F(BLENDEQ_ALPHA_A); + val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A); + break; + case DRM_MODE_BLEND_PREMULTI: + default: + if (alpha != DRM_BLEND_ALPHA_OPAQUE) { + val |= BLENDEQ_A_FUNC_F(BLENDEQ_ALPHA0); + val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A); + } else { + val |= BLENDEQ_A_FUNC_F(BLENDEQ_ONE); + val |= BLENDEQ_B_FUNC_F(BLENDEQ_ONE_MINUS_ALPHA_A); + } + break; + } + fimd_set_bits(ctx, BLENDEQx(win), mask, val); +} -static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, - uint32_t pixel_format, int width) +static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win, + unsigned int alpha, unsigned int pixel_alpha) { - unsigned long val; + u32 win_alpha_l = (alpha >> 8) & 0xf; + u32 win_alpha_h = alpha >> 12; + u32 val = 0; - val = WINCONx_ENWIN; + switch (pixel_alpha) { + case DRM_MODE_BLEND_PIXEL_NONE: + break; + case DRM_MODE_BLEND_COVERAGE: + case DRM_MODE_BLEND_PREMULTI: + default: + val |= WINCON1_ALPHA_SEL; + val |= WINCON1_BLD_PIX; + val |= WINCON1_ALPHA_MUL; + break; + } + fimd_set_bits(ctx, WINCON(win), WINCONx_BLEND_MODE_MASK, val); + + /* OSD alpha */ + val = VIDISD14C_ALPHA0_R(win_alpha_h) | + VIDISD14C_ALPHA0_G(win_alpha_h) | + VIDISD14C_ALPHA0_B(win_alpha_h) | + VIDISD14C_ALPHA1_R(0x0) | + VIDISD14C_ALPHA1_G(0x0) | + VIDISD14C_ALPHA1_B(0x0); + writel(val, ctx->regs + VIDOSD_C(win)); + + val = VIDW_ALPHA_R(win_alpha_l) | VIDW_ALPHA_G(win_alpha_l) | + VIDW_ALPHA_B(win_alpha_l); + writel(val, ctx->regs + VIDWnALPHA0(win)); + + val = VIDW_ALPHA_R(0x0) | VIDW_ALPHA_G(0x0) | + VIDW_ALPHA_B(0x0); + writel(val, ctx->regs + VIDWnALPHA1(win)); + + fimd_set_bits(ctx, BLENDCON, BLENDCON_NEW_MASK, + BLENDCON_NEW_8BIT_ALPHA_VALUE); +} + +static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, + struct drm_framebuffer *fb, int width) +{ + struct exynos_drm_plane plane = ctx->planes[win]; + struct exynos_drm_plane_state *state = + to_exynos_plane_state(plane.base.state); + uint32_t pixel_format = fb->format->format; + unsigned int alpha = state->base.alpha; + u32 val = WINCONx_ENWIN; + unsigned int pixel_alpha; + + if (fb->format->has_alpha) + pixel_alpha = state->base.pixel_blend_mode; + else + pixel_alpha = DRM_MODE_BLEND_PIXEL_NONE; /* * In case of s3c64xx, window 0 doesn't support alpha channel. @@ -591,8 +681,7 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, break; case DRM_FORMAT_ARGB8888: default: - val |= WINCON1_BPPMODE_25BPP_A1888 - | WINCON1_BLD_PIX | WINCON1_ALPHA_SEL; + val |= WINCON1_BPPMODE_25BPP_A1888; val |= WINCONx_WSWP; val |= WINCONx_BURSTLEN_16WORD; break; @@ -610,25 +699,12 @@ static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win, val &= ~WINCONx_BURSTLEN_MASK; val |= WINCONx_BURSTLEN_4WORD; } - - writel(val, ctx->regs + WINCON(win)); + fimd_set_bits(ctx, WINCON(win), ~WINCONx_BLEND_MODE_MASK, val); /* hardware window 0 doesn't support alpha channel. */ if (win != 0) { - /* OSD alpha */ - val = VIDISD14C_ALPHA0_R(0xf) | - VIDISD14C_ALPHA0_G(0xf) | - VIDISD14C_ALPHA0_B(0xf) | - VIDISD14C_ALPHA1_R(0xf) | - VIDISD14C_ALPHA1_G(0xf) | - VIDISD14C_ALPHA1_B(0xf); - - writel(val, ctx->regs + VIDOSD_C(win)); - - val = VIDW_ALPHA_R(0xf) | VIDW_ALPHA_G(0xf) | - VIDW_ALPHA_G(0xf); - writel(val, ctx->regs + VIDWnALPHA0(win)); - writel(val, ctx->regs + VIDWnALPHA1(win)); + fimd_win_set_bldmod(ctx, win, alpha, pixel_alpha); + fimd_win_set_bldeq(ctx, win, alpha, pixel_alpha); } } @@ -785,7 +861,7 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc, DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val); } - fimd_win_set_pixfmt(ctx, win, fb->format->format, state->src.w); + fimd_win_set_pixfmt(ctx, win, fb, state->src.w); /* hardware window 0 doesn't support color key. */ if (win != 0) @@ -987,6 +1063,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) ctx->configs[i].num_pixel_formats = ARRAY_SIZE(fimd_formats); ctx->configs[i].zpos = i; ctx->configs[i].type = fimd_win_types[i]; + ctx->configs[i].capabilities = capabilities[i]; ret = exynos_plane_init(drm_dev, &ctx->planes[i], i, &ctx->configs[i]); if (ret) diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 33a458b7f1fc..148be8e1a090 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -131,5 +131,5 @@ config DRM_I915_GVT_KVMGT menu "drm/i915 Debugging" depends on DRM_I915 depends on EXPERT -source drivers/gpu/drm/i915/Kconfig.debug +source "drivers/gpu/drm/i915/Kconfig.debug" endmenu diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index d36a9755ad91..a9de07bb72c8 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2559,7 +2559,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) * If there's no chance of allocating enough pages for the whole * object, bail early. */ - if (page_count > totalram_pages) + if (page_count > totalram_pages()) return -ENOMEM; st = kmalloc(sizeof(*st), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 786d719e652d..8ff6b581cf1c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -26,7 +26,7 @@ * */ -#include <linux/dma_remapping.h> +#include <linux/intel-iommu.h> #include <linux/reservation.h> #include <linux/sync_file.h> #include <linux/uaccess.h> diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index 2c9b284036d1..3df77020aada 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c @@ -113,27 +113,25 @@ static void del_object(struct i915_mmu_object *mo) } static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, - struct mm_struct *mm, - unsigned long start, - unsigned long end, - bool blockable) + const struct mmu_notifier_range *range) { struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); struct i915_mmu_object *mo; struct interval_tree_node *it; LIST_HEAD(cancelled); + unsigned long end; if (RB_EMPTY_ROOT(&mn->objects.rb_root)) return 0; /* interval ranges are inclusive, but invalidate range is exclusive */ - end--; + end = range->end - 1; spin_lock(&mn->lock); - it = interval_tree_iter_first(&mn->objects, start, end); + it = interval_tree_iter_first(&mn->objects, range->start, end); while (it) { - if (!blockable) { + if (!range->blockable) { spin_unlock(&mn->lock); return -EAGAIN; } @@ -151,7 +149,7 @@ static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, queue_work(mn->wq, &mo->work); list_add(&mo->link, &cancelled); - it = interval_tree_iter_next(it, start, end); + it = interval_tree_iter_next(it, range->start, end); } list_for_each_entry(mo, &cancelled, link) del_object(mo); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 07c861884c70..3da9c0f9e948 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -46,7 +46,7 @@ #include <drm/drm_plane_helper.h> #include <drm/drm_rect.h> #include <drm/drm_atomic_uapi.h> -#include <linux/dma_remapping.h> +#include <linux/intel-iommu.h> #include <linux/reservation.h> /* Primary plane formats for gen <= 3 */ diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 69fe86b30fbb..a9ed0ecc94e2 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -170,7 +170,7 @@ static int igt_ppgtt_alloc(void *arg) * This should ensure that we do not run into the oomkiller during * the test and take down the machine wilfully. */ - limit = totalram_pages << PAGE_SHIFT; + limit = totalram_pages() << PAGE_SHIFT; limit = min(ppgtt->vm.total, limit); /* Check we can allocate the entire range */ @@ -1244,7 +1244,7 @@ static int exercise_mock(struct drm_i915_private *i915, u64 hole_start, u64 hole_end, unsigned long end_time)) { - const u64 limit = totalram_pages << PAGE_SHIFT; + const u64 limit = totalram_pages() << PAGE_SHIFT; struct i915_gem_context *ctx; struct i915_hw_ppgtt *ppgtt; IGT_TIMEOUT(end_time); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index c58e953fefa3..5beb83d1cf87 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -927,26 +927,6 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) return ret; } -/* Get the list of RPMh voltage levels from cmd-db */ -static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size) -{ - u32 len = cmd_db_read_aux_data_len(id); - - if (!len) - return 0; - - if (WARN_ON(len > size)) - return -EINVAL; - - cmd_db_read_aux_data(id, vals, len); - - /* - * The data comes back as an array of unsigned shorts so adjust the - * count accordingly - */ - return len >> 1; -} - /* Return the 'arc-level' for the given frequency */ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) { @@ -974,11 +954,30 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq) } static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes, - unsigned long *freqs, int freqs_count, - u16 *pri, int pri_count, - u16 *sec, int sec_count) + unsigned long *freqs, int freqs_count, const char *id) { int i, j; + const u16 *pri, *sec; + size_t pri_count, sec_count; + + pri = cmd_db_read_aux_data(id, &pri_count); + if (IS_ERR(pri)) + return PTR_ERR(pri); + /* + * The data comes back as an array of unsigned shorts so adjust the + * count accordingly + */ + pri_count >>= 1; + if (!pri_count) + return -EINVAL; + + sec = cmd_db_read_aux_data("mx.lvl", &sec_count); + if (IS_ERR(sec)) + return PTR_ERR(sec); + + sec_count >>= 1; + if (!sec_count) + return -EINVAL; /* Construct a vote for each frequency */ for (i = 0; i < freqs_count; i++) { @@ -1037,25 +1036,15 @@ static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); struct adreno_gpu *adreno_gpu = &a6xx_gpu->base; struct msm_gpu *gpu = &adreno_gpu->base; - - u16 gx[16], cx[16], mx[16]; - u32 gxcount, cxcount, mxcount; int ret; - /* Get the list of available voltage levels for each component */ - gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx)); - cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx)); - mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx)); - /* Build the GX votes */ ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes, - gmu->gpu_freqs, gmu->nr_gpu_freqs, - gx, gxcount, mx, mxcount); + gmu->gpu_freqs, gmu->nr_gpu_freqs, "gfx.lvl"); /* Build the CX votes */ ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes, - gmu->gmu_freqs, gmu->nr_gmu_freqs, - cx, cxcount, mx, mxcount); + gmu->gmu_freqs, gmu->nr_gmu_freqs, "cx.lvl"); return ret; } diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index f8b35df44c60..b3019505065a 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -119,40 +119,38 @@ static void radeon_mn_release(struct mmu_notifier *mn, * unmap them by move them into system domain again. */ static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn, - struct mm_struct *mm, - unsigned long start, - unsigned long end, - bool blockable) + const struct mmu_notifier_range *range) { struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn); struct ttm_operation_ctx ctx = { false, false }; struct interval_tree_node *it; + unsigned long end; int ret = 0; /* notification is exclusive, but interval is inclusive */ - end -= 1; + end = range->end - 1; /* TODO we should be able to split locking for interval tree and * the tear down. */ - if (blockable) + if (range->blockable) mutex_lock(&rmn->lock); else if (!mutex_trylock(&rmn->lock)) return -EAGAIN; - it = interval_tree_iter_first(&rmn->objects, start, end); + it = interval_tree_iter_first(&rmn->objects, range->start, end); while (it) { struct radeon_mn_node *node; struct radeon_bo *bo; long r; - if (!blockable) { + if (!range->blockable) { ret = -EAGAIN; goto out_unlock; } node = container_of(it, struct radeon_mn_node, it); - it = interval_tree_iter_next(it, start, end); + it = interval_tree_iter_next(it, range->start, end); list_for_each_entry(bo, &node->bos, mn_list) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index f05a29ff586e..25afb1d594e3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -34,7 +34,7 @@ #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_module.h> -#include <linux/dma_remapping.h> +#include <linux/intel-iommu.h> #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" #define VMWGFX_CHIP_SVGAII 0 @@ -583,7 +583,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) dev_priv->map_mode = vmw_dma_map_populate; - if (dma_ops->sync_single_for_cpu) + if (dma_ops && dma_ops->sync_single_for_cpu) dev_priv->map_mode = vmw_dma_alloc_coherent; #ifdef CONFIG_SWIOTLB if (swiotlb_nr_tbl() == 0) diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig index 4cca160782ab..f969d486855d 100644 --- a/drivers/gpu/drm/xen/Kconfig +++ b/drivers/gpu/drm/xen/Kconfig @@ -12,6 +12,7 @@ config DRM_XEN_FRONTEND select DRM_KMS_HELPER select VIDEOMODE_HELPERS select XEN_XENBUS_FRONTEND + select XEN_FRONT_PGDIR_SHBUF help Choose this option if you want to enable a para-virtualized frontend DRM/KMS driver for Xen guest OSes. diff --git a/drivers/gpu/drm/xen/Makefile b/drivers/gpu/drm/xen/Makefile index 712afff5ffc3..825905f67faa 100644 --- a/drivers/gpu/drm/xen/Makefile +++ b/drivers/gpu/drm/xen/Makefile @@ -4,7 +4,6 @@ drm_xen_front-objs := xen_drm_front.o \ xen_drm_front_kms.o \ xen_drm_front_conn.o \ xen_drm_front_evtchnl.o \ - xen_drm_front_shbuf.o \ xen_drm_front_cfg.o \ xen_drm_front_gem.o diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 6b6d5ab82ec3..4d3d36fc3a5d 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -19,6 +19,7 @@ #include <xen/xen.h> #include <xen/xenbus.h> +#include <xen/xen-front-pgdir-shbuf.h> #include <xen/interface/io/displif.h> #include "xen_drm_front.h" @@ -26,28 +27,20 @@ #include "xen_drm_front_evtchnl.h" #include "xen_drm_front_gem.h" #include "xen_drm_front_kms.h" -#include "xen_drm_front_shbuf.h" struct xen_drm_front_dbuf { struct list_head list; u64 dbuf_cookie; u64 fb_cookie; - struct xen_drm_front_shbuf *shbuf; + + struct xen_front_pgdir_shbuf shbuf; }; -static int dbuf_add_to_list(struct xen_drm_front_info *front_info, - struct xen_drm_front_shbuf *shbuf, u64 dbuf_cookie) +static void dbuf_add_to_list(struct xen_drm_front_info *front_info, + struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie) { - struct xen_drm_front_dbuf *dbuf; - - dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL); - if (!dbuf) - return -ENOMEM; - dbuf->dbuf_cookie = dbuf_cookie; - dbuf->shbuf = shbuf; list_add(&dbuf->list, &front_info->dbuf_list); - return 0; } static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list, @@ -62,15 +55,6 @@ static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list, return NULL; } -static void dbuf_flush_fb(struct list_head *dbuf_list, u64 fb_cookie) -{ - struct xen_drm_front_dbuf *buf, *q; - - list_for_each_entry_safe(buf, q, dbuf_list, list) - if (buf->fb_cookie == fb_cookie) - xen_drm_front_shbuf_flush(buf->shbuf); -} - static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie) { struct xen_drm_front_dbuf *buf, *q; @@ -78,8 +62,8 @@ static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie) list_for_each_entry_safe(buf, q, dbuf_list, list) if (buf->dbuf_cookie == dbuf_cookie) { list_del(&buf->list); - xen_drm_front_shbuf_unmap(buf->shbuf); - xen_drm_front_shbuf_free(buf->shbuf); + xen_front_pgdir_shbuf_unmap(&buf->shbuf); + xen_front_pgdir_shbuf_free(&buf->shbuf); kfree(buf); break; } @@ -91,8 +75,8 @@ static void dbuf_free_all(struct list_head *dbuf_list) list_for_each_entry_safe(buf, q, dbuf_list, list) { list_del(&buf->list); - xen_drm_front_shbuf_unmap(buf->shbuf); - xen_drm_front_shbuf_free(buf->shbuf); + xen_front_pgdir_shbuf_unmap(&buf->shbuf); + xen_front_pgdir_shbuf_free(&buf->shbuf); kfree(buf); } } @@ -171,9 +155,9 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, u32 bpp, u64 size, struct page **pages) { struct xen_drm_front_evtchnl *evtchnl; - struct xen_drm_front_shbuf *shbuf; + struct xen_drm_front_dbuf *dbuf; struct xendispl_req *req; - struct xen_drm_front_shbuf_cfg buf_cfg; + struct xen_front_pgdir_shbuf_cfg buf_cfg; unsigned long flags; int ret; @@ -181,28 +165,29 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, if (unlikely(!evtchnl)) return -EIO; + dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL); + if (!dbuf) + return -ENOMEM; + + dbuf_add_to_list(front_info, dbuf, dbuf_cookie); + memset(&buf_cfg, 0, sizeof(buf_cfg)); buf_cfg.xb_dev = front_info->xb_dev; + buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE); buf_cfg.pages = pages; - buf_cfg.size = size; + buf_cfg.pgdir = &dbuf->shbuf; buf_cfg.be_alloc = front_info->cfg.be_alloc; - shbuf = xen_drm_front_shbuf_alloc(&buf_cfg); - if (IS_ERR(shbuf)) - return PTR_ERR(shbuf); - - ret = dbuf_add_to_list(front_info, shbuf, dbuf_cookie); - if (ret < 0) { - xen_drm_front_shbuf_free(shbuf); - return ret; - } + ret = xen_front_pgdir_shbuf_alloc(&buf_cfg); + if (ret < 0) + goto fail_shbuf_alloc; mutex_lock(&evtchnl->u.req.req_io_lock); spin_lock_irqsave(&front_info->io_lock, flags); req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE); req->op.dbuf_create.gref_directory = - xen_drm_front_shbuf_get_dir_start(shbuf); + xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf); req->op.dbuf_create.buffer_sz = size; req->op.dbuf_create.dbuf_cookie = dbuf_cookie; req->op.dbuf_create.width = width; @@ -221,7 +206,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, if (ret < 0) goto fail; - ret = xen_drm_front_shbuf_map(shbuf); + ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf); if (ret < 0) goto fail; @@ -230,6 +215,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, fail: mutex_unlock(&evtchnl->u.req.req_io_lock); +fail_shbuf_alloc: dbuf_free(&front_info->dbuf_list, dbuf_cookie); return ret; } @@ -358,7 +344,6 @@ int xen_drm_front_page_flip(struct xen_drm_front_info *front_info, if (unlikely(conn_idx >= front_info->num_evt_pairs)) return -EINVAL; - dbuf_flush_fb(&front_info->dbuf_list, fb_cookie); evtchnl = &front_info->evt_pairs[conn_idx].req; mutex_lock(&evtchnl->u.req.req_io_lock); diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 47ff019d3aef..28bc501af450 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -22,7 +22,6 @@ #include <xen/balloon.h> #include "xen_drm_front.h" -#include "xen_drm_front_shbuf.h" struct xen_gem_object { struct drm_gem_object base; diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c b/drivers/gpu/drm/xen/xen_drm_front_shbuf.c deleted file mode 100644 index d333b67cc1a0..000000000000 --- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.c +++ /dev/null @@ -1,414 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT - -/* - * Xen para-virtual DRM device - * - * Copyright (C) 2016-2018 EPAM Systems Inc. - * - * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> - */ - -#include <drm/drmP.h> - -#if defined(CONFIG_X86) -#include <drm/drm_cache.h> -#endif -#include <linux/errno.h> -#include <linux/mm.h> - -#include <asm/xen/hypervisor.h> -#include <xen/balloon.h> -#include <xen/xen.h> -#include <xen/xenbus.h> -#include <xen/interface/io/ring.h> -#include <xen/interface/io/displif.h> - -#include "xen_drm_front.h" -#include "xen_drm_front_shbuf.h" - -struct xen_drm_front_shbuf_ops { - /* - * Calculate number of grefs required to handle this buffer, - * e.g. if grefs are required for page directory only or the buffer - * pages as well. - */ - void (*calc_num_grefs)(struct xen_drm_front_shbuf *buf); - /* Fill page directory according to para-virtual display protocol. */ - void (*fill_page_dir)(struct xen_drm_front_shbuf *buf); - /* Claim grant references for the pages of the buffer. */ - int (*grant_refs_for_buffer)(struct xen_drm_front_shbuf *buf, - grant_ref_t *priv_gref_head, int gref_idx); - /* Map grant references of the buffer. */ - int (*map)(struct xen_drm_front_shbuf *buf); - /* Unmap grant references of the buffer. */ - int (*unmap)(struct xen_drm_front_shbuf *buf); -}; - -grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf) -{ - if (!buf->grefs) - return GRANT_INVALID_REF; - - return buf->grefs[0]; -} - -int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf) -{ - if (buf->ops->map) - return buf->ops->map(buf); - - /* no need to map own grant references */ - return 0; -} - -int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf) -{ - if (buf->ops->unmap) - return buf->ops->unmap(buf); - - /* no need to unmap own grant references */ - return 0; -} - -void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf) -{ -#if defined(CONFIG_X86) - drm_clflush_pages(buf->pages, buf->num_pages); -#endif -} - -void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf) -{ - if (buf->grefs) { - int i; - - for (i = 0; i < buf->num_grefs; i++) - if (buf->grefs[i] != GRANT_INVALID_REF) - gnttab_end_foreign_access(buf->grefs[i], - 0, 0UL); - } - kfree(buf->grefs); - kfree(buf->directory); - kfree(buf); -} - -/* - * number of grefs a page can hold with respect to the - * struct xendispl_page_directory header - */ -#define XEN_DRM_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \ - offsetof(struct xendispl_page_directory, gref)) / \ - sizeof(grant_ref_t)) - -static int get_num_pages_dir(struct xen_drm_front_shbuf *buf) -{ - /* number of pages the page directory consumes itself */ - return DIV_ROUND_UP(buf->num_pages, XEN_DRM_NUM_GREFS_PER_PAGE); -} - -static void backend_calc_num_grefs(struct xen_drm_front_shbuf *buf) -{ - /* only for pages the page directory consumes itself */ - buf->num_grefs = get_num_pages_dir(buf); -} - -static void guest_calc_num_grefs(struct xen_drm_front_shbuf *buf) -{ - /* - * number of pages the page directory consumes itself - * plus grefs for the buffer pages - */ - buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages; -} - -#define xen_page_to_vaddr(page) \ - ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page))) - -static int backend_unmap(struct xen_drm_front_shbuf *buf) -{ - struct gnttab_unmap_grant_ref *unmap_ops; - int i, ret; - - if (!buf->pages || !buf->backend_map_handles || !buf->grefs) - return 0; - - unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops), - GFP_KERNEL); - if (!unmap_ops) { - DRM_ERROR("Failed to get memory while unmapping\n"); - return -ENOMEM; - } - - for (i = 0; i < buf->num_pages; i++) { - phys_addr_t addr; - - addr = xen_page_to_vaddr(buf->pages[i]); - gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map, - buf->backend_map_handles[i]); - } - - ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages, - buf->num_pages); - - for (i = 0; i < buf->num_pages; i++) { - if (unlikely(unmap_ops[i].status != GNTST_okay)) - DRM_ERROR("Failed to unmap page %d: %d\n", - i, unmap_ops[i].status); - } - - if (ret) - DRM_ERROR("Failed to unmap grant references, ret %d", ret); - - kfree(unmap_ops); - kfree(buf->backend_map_handles); - buf->backend_map_handles = NULL; - return ret; -} - -static int backend_map(struct xen_drm_front_shbuf *buf) -{ - struct gnttab_map_grant_ref *map_ops = NULL; - unsigned char *ptr; - int ret, cur_gref, cur_dir_page, cur_page, grefs_left; - - map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL); - if (!map_ops) - return -ENOMEM; - - buf->backend_map_handles = kcalloc(buf->num_pages, - sizeof(*buf->backend_map_handles), - GFP_KERNEL); - if (!buf->backend_map_handles) { - kfree(map_ops); - return -ENOMEM; - } - - /* - * read page directory to get grefs from the backend: for external - * buffer we only allocate buf->grefs for the page directory, - * so buf->num_grefs has number of pages in the page directory itself - */ - ptr = buf->directory; - grefs_left = buf->num_pages; - cur_page = 0; - for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) { - struct xendispl_page_directory *page_dir = - (struct xendispl_page_directory *)ptr; - int to_copy = XEN_DRM_NUM_GREFS_PER_PAGE; - - if (to_copy > grefs_left) - to_copy = grefs_left; - - for (cur_gref = 0; cur_gref < to_copy; cur_gref++) { - phys_addr_t addr; - - addr = xen_page_to_vaddr(buf->pages[cur_page]); - gnttab_set_map_op(&map_ops[cur_page], addr, - GNTMAP_host_map, - page_dir->gref[cur_gref], - buf->xb_dev->otherend_id); - cur_page++; - } - - grefs_left -= to_copy; - ptr += PAGE_SIZE; - } - ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages); - - /* save handles even if error, so we can unmap */ - for (cur_page = 0; cur_page < buf->num_pages; cur_page++) { - buf->backend_map_handles[cur_page] = map_ops[cur_page].handle; - if (unlikely(map_ops[cur_page].status != GNTST_okay)) - DRM_ERROR("Failed to map page %d: %d\n", - cur_page, map_ops[cur_page].status); - } - - if (ret) { - DRM_ERROR("Failed to map grant references, ret %d", ret); - backend_unmap(buf); - } - - kfree(map_ops); - return ret; -} - -static void backend_fill_page_dir(struct xen_drm_front_shbuf *buf) -{ - struct xendispl_page_directory *page_dir; - unsigned char *ptr; - int i, num_pages_dir; - - ptr = buf->directory; - num_pages_dir = get_num_pages_dir(buf); - - /* fill only grefs for the page directory itself */ - for (i = 0; i < num_pages_dir - 1; i++) { - page_dir = (struct xendispl_page_directory *)ptr; - - page_dir->gref_dir_next_page = buf->grefs[i + 1]; - ptr += PAGE_SIZE; - } - /* last page must say there is no more pages */ - page_dir = (struct xendispl_page_directory *)ptr; - page_dir->gref_dir_next_page = GRANT_INVALID_REF; -} - -static void guest_fill_page_dir(struct xen_drm_front_shbuf *buf) -{ - unsigned char *ptr; - int cur_gref, grefs_left, to_copy, i, num_pages_dir; - - ptr = buf->directory; - num_pages_dir = get_num_pages_dir(buf); - - /* - * while copying, skip grefs at start, they are for pages - * granted for the page directory itself - */ - cur_gref = num_pages_dir; - grefs_left = buf->num_pages; - for (i = 0; i < num_pages_dir; i++) { - struct xendispl_page_directory *page_dir = - (struct xendispl_page_directory *)ptr; - - if (grefs_left <= XEN_DRM_NUM_GREFS_PER_PAGE) { - to_copy = grefs_left; - page_dir->gref_dir_next_page = GRANT_INVALID_REF; - } else { - to_copy = XEN_DRM_NUM_GREFS_PER_PAGE; - page_dir->gref_dir_next_page = buf->grefs[i + 1]; - } - memcpy(&page_dir->gref, &buf->grefs[cur_gref], - to_copy * sizeof(grant_ref_t)); - ptr += PAGE_SIZE; - grefs_left -= to_copy; - cur_gref += to_copy; - } -} - -static int guest_grant_refs_for_buffer(struct xen_drm_front_shbuf *buf, - grant_ref_t *priv_gref_head, - int gref_idx) -{ - int i, cur_ref, otherend_id; - - otherend_id = buf->xb_dev->otherend_id; - for (i = 0; i < buf->num_pages; i++) { - cur_ref = gnttab_claim_grant_reference(priv_gref_head); - if (cur_ref < 0) - return cur_ref; - - gnttab_grant_foreign_access_ref(cur_ref, otherend_id, - xen_page_to_gfn(buf->pages[i]), - 0); - buf->grefs[gref_idx++] = cur_ref; - } - return 0; -} - -static int grant_references(struct xen_drm_front_shbuf *buf) -{ - grant_ref_t priv_gref_head; - int ret, i, j, cur_ref; - int otherend_id, num_pages_dir; - - ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head); - if (ret < 0) { - DRM_ERROR("Cannot allocate grant references\n"); - return ret; - } - - otherend_id = buf->xb_dev->otherend_id; - j = 0; - num_pages_dir = get_num_pages_dir(buf); - for (i = 0; i < num_pages_dir; i++) { - unsigned long frame; - - cur_ref = gnttab_claim_grant_reference(&priv_gref_head); - if (cur_ref < 0) - return cur_ref; - - frame = xen_page_to_gfn(virt_to_page(buf->directory + - PAGE_SIZE * i)); - gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0); - buf->grefs[j++] = cur_ref; - } - - if (buf->ops->grant_refs_for_buffer) { - ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j); - if (ret) - return ret; - } - - gnttab_free_grant_references(priv_gref_head); - return 0; -} - -static int alloc_storage(struct xen_drm_front_shbuf *buf) -{ - buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL); - if (!buf->grefs) - return -ENOMEM; - - buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL); - if (!buf->directory) - return -ENOMEM; - - return 0; -} - -/* - * For be allocated buffers we don't need grant_refs_for_buffer as those - * grant references are allocated at backend side - */ -static const struct xen_drm_front_shbuf_ops backend_ops = { - .calc_num_grefs = backend_calc_num_grefs, - .fill_page_dir = backend_fill_page_dir, - .map = backend_map, - .unmap = backend_unmap -}; - -/* For locally granted references we do not need to map/unmap the references */ -static const struct xen_drm_front_shbuf_ops local_ops = { - .calc_num_grefs = guest_calc_num_grefs, - .fill_page_dir = guest_fill_page_dir, - .grant_refs_for_buffer = guest_grant_refs_for_buffer, -}; - -struct xen_drm_front_shbuf * -xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg) -{ - struct xen_drm_front_shbuf *buf; - int ret; - - buf = kzalloc(sizeof(*buf), GFP_KERNEL); - if (!buf) - return ERR_PTR(-ENOMEM); - - if (cfg->be_alloc) - buf->ops = &backend_ops; - else - buf->ops = &local_ops; - - buf->xb_dev = cfg->xb_dev; - buf->num_pages = DIV_ROUND_UP(cfg->size, PAGE_SIZE); - buf->pages = cfg->pages; - - buf->ops->calc_num_grefs(buf); - - ret = alloc_storage(buf); - if (ret) - goto fail; - - ret = grant_references(buf); - if (ret) - goto fail; - - buf->ops->fill_page_dir(buf); - - return buf; - -fail: - xen_drm_front_shbuf_free(buf); - return ERR_PTR(ret); -} diff --git a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h b/drivers/gpu/drm/xen/xen_drm_front_shbuf.h deleted file mode 100644 index 7545c692539e..000000000000 --- a/drivers/gpu/drm/xen/xen_drm_front_shbuf.h +++ /dev/null @@ -1,64 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ - -/* - * Xen para-virtual DRM device - * - * Copyright (C) 2016-2018 EPAM Systems Inc. - * - * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> - */ - -#ifndef __XEN_DRM_FRONT_SHBUF_H_ -#define __XEN_DRM_FRONT_SHBUF_H_ - -#include <linux/kernel.h> -#include <linux/scatterlist.h> - -#include <xen/grant_table.h> - -struct xen_drm_front_shbuf { - /* - * number of references granted for the backend use: - * - for allocated/imported dma-buf's this holds number of grant - * references for the page directory and pages of the buffer - * - for the buffer provided by the backend this holds number of - * grant references for the page directory as grant references for - * the buffer will be provided by the backend - */ - int num_grefs; - grant_ref_t *grefs; - unsigned char *directory; - - int num_pages; - struct page **pages; - - struct xenbus_device *xb_dev; - - /* these are the ops used internally depending on be_alloc mode */ - const struct xen_drm_front_shbuf_ops *ops; - - /* Xen map handles for the buffer allocated by the backend */ - grant_handle_t *backend_map_handles; -}; - -struct xen_drm_front_shbuf_cfg { - struct xenbus_device *xb_dev; - size_t size; - struct page **pages; - bool be_alloc; -}; - -struct xen_drm_front_shbuf * -xen_drm_front_shbuf_alloc(struct xen_drm_front_shbuf_cfg *cfg); - -grant_ref_t xen_drm_front_shbuf_get_dir_start(struct xen_drm_front_shbuf *buf); - -int xen_drm_front_shbuf_map(struct xen_drm_front_shbuf *buf); - -int xen_drm_front_shbuf_unmap(struct xen_drm_front_shbuf *buf); - -void xen_drm_front_shbuf_flush(struct xen_drm_front_shbuf *buf); - -void xen_drm_front_shbuf_free(struct xen_drm_front_shbuf *buf); - -#endif /* __XEN_DRM_FRONT_SHBUF_H_ */ |