diff options
Diffstat (limited to 'drivers/gpu/drm/mediatek/mtk_drm_crtc.c')
-rw-r--r-- | drivers/gpu/drm/mediatek/mtk_drm_crtc.c | 338 |
1 files changed, 285 insertions, 53 deletions
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index a9007210dda1..0dfcd1787e65 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -3,14 +3,17 @@ * Copyright (c) 2015 MediaTek Inc. */ +#include <linux/clk.h> +#include <linux/pm_runtime.h> +#include <linux/soc/mediatek/mtk-cmdq.h> + #include <asm/barrier.h> -#include <drm/drmP.h> +#include <soc/mediatek/smi.h> + #include <drm/drm_atomic_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> -#include <linux/clk.h> -#include <linux/pm_runtime.h> -#include <soc/mediatek/smi.h> +#include <drm/drm_vblank.h> #include "mtk_drm_drv.h" #include "mtk_drm_crtc.h" @@ -40,11 +43,20 @@ struct mtk_drm_crtc { struct drm_plane *planes; unsigned int layer_nr; bool pending_planes; + bool pending_async_planes; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_client *cmdq_client; + u32 cmdq_event; +#endif void __iomem *config_regs; struct mtk_disp_mutex *mutex; unsigned int ddp_comp_nr; struct mtk_ddp_comp **ddp_comp; + + /* lock for display hardware access */ + struct mutex hw_lock; }; struct mtk_crtc_state { @@ -205,6 +217,36 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc) clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk); } +static +struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, + struct drm_plane *plane, + unsigned int *local_layer) +{ + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp; + int i, count = 0; + unsigned int local_index = plane - mtk_crtc->planes; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + comp = mtk_crtc->ddp_comp[i]; + if (local_index < (count + mtk_ddp_comp_layer_nr(comp))) { + *local_layer = local_index - count; + return comp; + } + count += mtk_ddp_comp_layer_nr(comp); + } + + WARN(1, "Failed to find component for plane %d\n", plane->index); + return NULL; +} + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) +static void ddp_cmdq_cb(struct cmdq_cb_data data) +{ + cmdq_pkt_destroy(data.data); +} +#endif + static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) { struct drm_crtc *crtc = &mtk_crtc->base; @@ -270,7 +312,10 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i]; - mtk_ddp_comp_config(comp, width, height, vrefresh, bpc); + if (i == 1) + mtk_ddp_comp_bgclr_in_on(comp); + + mtk_ddp_comp_config(comp, width, height, vrefresh, bpc, NULL); mtk_ddp_comp_start(comp); } @@ -278,10 +323,14 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; + struct mtk_ddp_comp *comp; + unsigned int local_layer; plane_state = to_mtk_plane_state(plane->state); - mtk_ddp_comp_layer_config(mtk_crtc->ddp_comp[0], i, - plane_state); + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, NULL); } return 0; @@ -296,11 +345,16 @@ err_pm_runtime_put: static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) { struct drm_device *drm = mtk_crtc->base.dev; + struct drm_crtc *crtc = &mtk_crtc->base; int i; DRM_DEBUG_DRIVER("%s\n", __func__); - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]); + if (i == 1) + mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]); + } + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) mtk_disp_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); @@ -317,14 +371,23 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) mtk_disp_mutex_unprepare(mtk_crtc->mutex); pm_runtime_put(drm->dev); + + if (crtc->state->event && !crtc->state->active) { + spin_lock_irq(&crtc->dev->event_lock); + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&crtc->dev->event_lock); + } } -static void mtk_crtc_ddp_config(struct drm_crtc *crtc) +static void mtk_crtc_ddp_config(struct drm_crtc *crtc, + struct cmdq_pkt *cmdq_handle) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; unsigned int i; + unsigned int local_layer; /* * TODO: instead of updating the registers here, we should prepare @@ -334,7 +397,8 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) if (state->pending_config) { mtk_ddp_comp_config(comp, state->pending_width, state->pending_height, - state->pending_vrefresh, 0); + state->pending_vrefresh, 0, + cmdq_handle); state->pending_config = false; } @@ -346,13 +410,116 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) plane_state = to_mtk_plane_state(plane->state); - if (plane_state->pending.config) { - mtk_ddp_comp_layer_config(comp, i, plane_state); - plane_state->pending.config = false; - } + if (!plane_state->pending.config) + continue; + + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, + &local_layer); + + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, + cmdq_handle); + plane_state->pending.config = false; } mtk_crtc->pending_planes = false; } + + if (mtk_crtc->pending_async_planes) { + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + + if (!plane_state->pending.async_config) + continue; + + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, + &local_layer); + + if (comp) + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state, + cmdq_handle); + plane_state->pending.async_config = false; + } + mtk_crtc->pending_async_planes = false; + } +} + +static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_pkt *cmdq_handle; +#endif + struct drm_crtc *crtc = &mtk_crtc->base; + struct mtk_drm_private *priv = crtc->dev->dev_private; + unsigned int pending_planes = 0, pending_async_planes = 0; + int i; + + mutex_lock(&mtk_crtc->hw_lock); + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *plane_state; + + plane_state = to_mtk_plane_state(plane->state); + if (plane_state->pending.dirty) { + plane_state->pending.config = true; + plane_state->pending.dirty = false; + pending_planes |= BIT(i); + } else if (plane_state->pending.async_dirty) { + plane_state->pending.async_config = true; + plane_state->pending.async_dirty = false; + pending_async_planes |= BIT(i); + } + } + if (pending_planes) + mtk_crtc->pending_planes = true; + if (pending_async_planes) + mtk_crtc->pending_async_planes = true; + + if (priv->data->shadow_register) { + mtk_disp_mutex_acquire(mtk_crtc->mutex); + mtk_crtc_ddp_config(crtc, NULL); + mtk_disp_mutex_release(mtk_crtc->mutex); + } +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (mtk_crtc->cmdq_client) { + cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE); + cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event); + cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event); + mtk_crtc_ddp_config(crtc, cmdq_handle); + cmdq_pkt_flush_async(cmdq_handle, ddp_cmdq_cb, cmdq_handle); + } +#endif + mutex_unlock(&mtk_crtc->hw_lock); +} + +int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, + struct mtk_plane_state *state) +{ + unsigned int local_layer; + struct mtk_ddp_comp *comp; + + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); + if (comp) + return mtk_ddp_comp_layer_check(comp, local_layer, state); + return 0; +} + +void mtk_drm_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); + const struct drm_plane_helper_funcs *plane_helper_funcs = + plane->helper_private; + + if (!mtk_crtc->enabled) + return; + + plane_helper_funcs->atomic_update(plane, new_state); + mtk_drm_crtc_hw_config(mtk_crtc); } static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, @@ -402,6 +569,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc, } mtk_crtc->pending_planes = true; + mtk_drm_crtc_hw_config(mtk_crtc); /* Wait for planes to be disabled */ drm_crtc_wait_one_vblank(crtc); @@ -433,34 +601,16 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); - struct mtk_drm_private *priv = crtc->dev->dev_private; - unsigned int pending_planes = 0; int i; if (mtk_crtc->event) mtk_crtc->pending_needs_vblank = true; - for (i = 0; i < mtk_crtc->layer_nr; i++) { - struct drm_plane *plane = &mtk_crtc->planes[i]; - struct mtk_plane_state *plane_state; - - plane_state = to_mtk_plane_state(plane->state); - if (plane_state->pending.dirty) { - plane_state->pending.config = true; - plane_state->pending.dirty = false; - pending_planes |= BIT(i); - } - } - if (pending_planes) - mtk_crtc->pending_planes = true; if (crtc->state->color_mgmt_changed) - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { mtk_ddp_gamma_set(mtk_crtc->ddp_comp[i], crtc->state); - - if (priv->data->shadow_register) { - mtk_disp_mutex_acquire(mtk_crtc->mutex); - mtk_crtc_ddp_config(crtc); - mtk_disp_mutex_release(mtk_crtc->mutex); - } + mtk_ddp_ctm_set(mtk_crtc->ddp_comp[i], crtc->state); + } + mtk_drm_crtc_hw_config(mtk_crtc); } static const struct drm_crtc_funcs mtk_crtc_funcs = { @@ -510,23 +660,80 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); struct mtk_drm_private *priv = crtc->dev->dev_private; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + if (!priv->data->shadow_register && !mtk_crtc->cmdq_client) +#else if (!priv->data->shadow_register) - mtk_crtc_ddp_config(crtc); +#endif + mtk_crtc_ddp_config(crtc, NULL); mtk_drm_finish_page_flip(mtk_crtc); } +static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc, + int comp_idx) +{ + struct mtk_ddp_comp *comp; + + if (comp_idx > 1) + return 0; + + comp = mtk_crtc->ddp_comp[comp_idx]; + if (!comp->funcs) + return 0; + + if (comp_idx == 1 && !comp->funcs->bgclr_in_on) + return 0; + + return mtk_ddp_comp_layer_nr(comp); +} + +static inline +enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx) +{ + if (plane_idx == 0) + return DRM_PLANE_TYPE_PRIMARY; + else if (plane_idx == 1) + return DRM_PLANE_TYPE_CURSOR; + else + return DRM_PLANE_TYPE_OVERLAY; + +} + +static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev, + struct mtk_drm_crtc *mtk_crtc, + int comp_idx, int pipe) +{ + int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx]; + int i, ret; + + for (i = 0; i < num_planes; i++) { + ret = mtk_plane_init(drm_dev, + &mtk_crtc->planes[mtk_crtc->layer_nr], + BIT(pipe), + mtk_drm_crtc_plane_type(mtk_crtc->layer_nr), + mtk_ddp_comp_supported_rotations(comp)); + if (ret) + return ret; + + mtk_crtc->layer_nr++; + } + return 0; +} + int mtk_drm_crtc_create(struct drm_device *drm_dev, const enum mtk_ddp_comp_id *path, unsigned int path_len) { struct mtk_drm_private *priv = drm_dev->dev_private; struct device *dev = drm_dev->dev; struct mtk_drm_crtc *mtk_crtc; - enum drm_plane_type type; - unsigned int zpos; + unsigned int num_comp_planes = 0; int pipe = priv->num_pipes; int ret; int i; + bool has_ctm = false; + uint gamma_lut_size = 0; if (!path) return 0; @@ -577,19 +784,25 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, } mtk_crtc->ddp_comp[i] = comp; + + if (comp->funcs) { + if (comp->funcs->gamma_set) + gamma_lut_size = MTK_LUT_SIZE; + + if (comp->funcs->ctm_set) + has_ctm = true; + } } - mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); - mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr, - sizeof(struct drm_plane), - GFP_KERNEL); - - for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) { - type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : - (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : - DRM_PLANE_TYPE_OVERLAY; - ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos], - BIT(pipe), type); + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i); + + mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes, + sizeof(struct drm_plane), GFP_KERNEL); + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i, + pipe); if (ret) return ret; } @@ -599,9 +812,28 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, NULL, pipe); if (ret < 0) return ret; - drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE); - drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE); - priv->num_pipes++; + if (gamma_lut_size) + drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size); + drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size); + priv->num_pipes++; + mutex_init(&mtk_crtc->hw_lock); + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + mtk_crtc->cmdq_client = + cmdq_mbox_create(dev, drm_crtc_index(&mtk_crtc->base), + 2000); + if (IS_ERR(mtk_crtc->cmdq_client)) { + dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", + drm_crtc_index(&mtk_crtc->base)); + mtk_crtc->cmdq_client = NULL; + } + ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events", + drm_crtc_index(&mtk_crtc->base), + &mtk_crtc->cmdq_event); + if (ret) + dev_dbg(dev, "mtk_crtc %d failed to get mediatek,gce-events property\n", + drm_crtc_index(&mtk_crtc->base)); +#endif return 0; } |