diff options
author | Dave Airlie <airlied@redhat.com> | 2015-04-01 08:21:46 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-04-01 08:21:46 +1000 |
commit | 9e87e48f8e5de2146842fd0ff436e0256b52c4a9 (patch) | |
tree | 9ada800ddc51e4250aafacc54ef9fb18abf38e37 /drivers/gpu | |
parent | d7de390bff7ad0f551fc0e409543e98db86a65df (diff) | |
parent | 6e0aa8018f9c676b115b7ca6c20a056fc57c68a9 (diff) | |
download | talos-op-linux-9e87e48f8e5de2146842fd0ff436e0256b52c4a9.tar.gz talos-op-linux-9e87e48f8e5de2146842fd0ff436e0256b52c4a9.zip |
Merge tag 'drm-intel-next-2015-03-27-merge' of git://anongit.freedesktop.org/drm-intel into drm-next
This backmerges 4.0-rc6 due to the recent fixes in rc5/6
- DP link rate refactoring from Ville
- byt/bsw rps tuning from Chris
- kerneldoc for the shrinker code
- more dynamic ppgtt pte work (Michel, Ben, ...)
- vlv dpll code refactoring to prep fro bxt (Imre)
- refactoring the sprite colorkey code (Ville)
- rotated ggtt view support from Tvrtko
- roll out struct drm_atomic_state to prep for atomic update (Ander)
* tag 'drm-intel-next-2015-03-27-merge' of git://anongit.freedesktop.org/drm-intel: (473 commits)
Linux 4.0-rc6
arm64: juno: Fix misleading name of UART reference clock
drm/i915: Update DRIVER_DATE to 20150327
drm/i915: Skip allocating shadow batch for 0-length batches
drm/i915: Handle error to get connector state when staging config
drm/i915: Compare GGTT view structs instead of types
drm/i915: fix simple_return.cocci warnings
drm/i915: Add module param to test the load detect code
drm/i915: Remove usage of encoder->new_crtc from clock computations
drm/i915: Don't look at staged config crtc when changing DRRS state
drm/i915: Convert intel_pipe_will_have_type() to using atomic state
drm/i915: Pass an atomic state to modeset_global_resources() functions
drm/i915: Add dynamic page trace events
drm/i915: Finish gen6/7 dynamic page table allocation
drm/i915: Remove unnecessary gen6_ppgtt_unmap_pages
drm/i915: Fix i915_dma_map_single positive error code
drm/i915: Prevent out of range pt in gen6_for_each_pde
drm/i915: fix definition of the DRM_IOCTL_I915_GET_SPRITE_COLORKEY ioctl
drm/i915: Rip out GET_SPRITE_COLORKEY ioctl
watchdog: imgpdc: Fix default heartbeat
...
Diffstat (limited to 'drivers/gpu')
52 files changed, 2805 insertions, 1840 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index d7174300f501..69af73f15310 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -649,6 +649,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); + init_sdma_vm(dqm, q, qpd); retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); if (retval != 0) { @@ -656,7 +657,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, return retval; } - init_sdma_vm(dqm, q, qpd); + retval = mqd->load_mqd(mqd, q->mqd, 0, + 0, NULL); + if (retval != 0) { + deallocate_sdma_queue(dqm, q->sdma_id); + mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); + return retval; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index e415a2a9207e..c7d298e62c96 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -44,7 +44,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, BUG_ON(!kq || !dev); BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); - pr_debug("kfd: In func %s initializing queue type %d size %d\n", + pr_debug("amdkfd: In func %s initializing queue type %d size %d\n", __func__, KFD_QUEUE_TYPE_HIQ, queue_size); nop.opcode = IT_NOP; @@ -69,12 +69,16 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); - if (prop.doorbell_ptr == NULL) + if (prop.doorbell_ptr == NULL) { + pr_err("amdkfd: error init doorbell"); goto err_get_kernel_doorbell; + } retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); - if (retval != 0) + if (retval != 0) { + pr_err("amdkfd: error init pq queues size (%d)\n", queue_size); goto err_pq_allocate_vidmem; + } kq->pq_kernel_addr = kq->pq->cpu_ptr; kq->pq_gpu_addr = kq->pq->gpu_addr; @@ -165,10 +169,8 @@ err_rptr_allocate_vidmem: err_eop_allocate_vidmem: kfd_gtt_sa_free(dev, kq->pq); err_pq_allocate_vidmem: - pr_err("kfd: error init pq\n"); kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); err_get_kernel_doorbell: - pr_err("kfd: error init doorbell"); return false; } @@ -187,6 +189,8 @@ static void uninitialize(struct kernel_queue *kq) else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); + kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj); + kfd_gtt_sa_free(kq->dev, kq->rptr_mem); kfd_gtt_sa_free(kq->dev, kq->wptr_mem); kq->ops_asic_specific.uninitialize(kq); @@ -211,7 +215,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq, queue_address = (unsigned int *)kq->pq_kernel_addr; queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); - pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", + pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", __func__, rptr, wptr, queue_address); available_size = (rptr - 1 - wptr + queue_size_dwords) % @@ -296,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, } if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { - pr_err("kfd: failed to init kernel queue\n"); + pr_err("amdkfd: failed to init kernel queue\n"); kfree(kq); return NULL; } @@ -319,7 +323,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) BUG_ON(!dev); - pr_err("kfd: starting kernel queue test\n"); + pr_err("amdkfd: starting kernel queue test\n"); kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); BUG_ON(!kq); @@ -330,7 +334,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) buffer[i] = kq->nop_packet; kq->ops.submit_packet(kq); - pr_err("kfd: ending kernel queue test\n"); + pr_err("amdkfd: ending kernel queue test\n"); } diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index a5e74612100e..0a6780367d28 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI config DRM_EXYNOS_DP bool "EXYNOS DRM DP driver support" - depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) + depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) default DRM_EXYNOS select DRM_PANEL help diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 63f02e2380ae..970046199608 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -888,8 +888,8 @@ static int decon_probe(struct platform_device *pdev) of_node_put(i80_if_timings); ctx->regs = of_iomap(dev->of_node, 0); - if (IS_ERR(ctx->regs)) { - ret = PTR_ERR(ctx->regs); + if (!ctx->regs) { + ret = -ENOMEM; goto err_del_component; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c deleted file mode 100644 index ba9b3d5ed672..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Authors: - * Inki Dae <inki.dae@samsung.com> - * Joonyoung Shim <jy0922.shim@samsung.com> - * Seung-Woo Kim <sw0312.kim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <drm/drmP.h> -#include <drm/drm_crtc_helper.h> - -#include <drm/exynos_drm.h> -#include "exynos_drm_drv.h" -#include "exynos_drm_encoder.h" -#include "exynos_drm_connector.h" - -#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ - drm_connector) - -struct exynos_drm_connector { - struct drm_connector drm_connector; - uint32_t encoder_id; - struct exynos_drm_display *display; -}; - -static int exynos_drm_connector_get_modes(struct drm_connector *connector) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - struct exynos_drm_display *display = exynos_connector->display; - struct edid *edid = NULL; - unsigned int count = 0; - int ret; - - /* - * if get_edid() exists then get_edid() callback of hdmi side - * is called to get edid data through i2c interface else - * get timing from the FIMD driver(display controller). - * - * P.S. in case of lcd panel, count is always 1 if success - * because lcd panel has only one mode. - */ - if (display->ops->get_edid) { - edid = display->ops->get_edid(display, connector); - if (IS_ERR_OR_NULL(edid)) { - ret = PTR_ERR(edid); - edid = NULL; - DRM_ERROR("Panel operation get_edid failed %d\n", ret); - goto out; - } - - count = drm_add_edid_modes(connector, edid); - if (!count) { - DRM_ERROR("Add edid modes failed %d\n", count); - goto out; - } - - drm_mode_connector_update_edid_property(connector, edid); - } else { - struct exynos_drm_panel_info *panel; - struct drm_display_mode *mode = drm_mode_create(connector->dev); - if (!mode) { - DRM_ERROR("failed to create a new display mode.\n"); - return 0; - } - - if (display->ops->get_panel) - panel = display->ops->get_panel(display); - else { - drm_mode_destroy(connector->dev, mode); - return 0; - } - - drm_display_mode_from_videomode(&panel->vm, mode); - mode->width_mm = panel->width_mm; - mode->height_mm = panel->height_mm; - connector->display_info.width_mm = mode->width_mm; - connector->display_info.height_mm = mode->height_mm; - - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_set_name(mode); - drm_mode_probed_add(connector, mode); - - count = 1; - } - -out: - kfree(edid); - return count; -} - -static int exynos_drm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - struct exynos_drm_display *display = exynos_connector->display; - int ret = MODE_BAD; - - DRM_DEBUG_KMS("%s\n", __FILE__); - - if (display->ops->check_mode) - if (!display->ops->check_mode(display, mode)) - ret = MODE_OK; - - return ret; -} - -static struct drm_encoder *exynos_drm_best_encoder( - struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - return drm_encoder_find(dev, exynos_connector->encoder_id); -} - -static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { - .get_modes = exynos_drm_connector_get_modes, - .mode_valid = exynos_drm_connector_mode_valid, - .best_encoder = exynos_drm_best_encoder, -}; - -static int exynos_drm_connector_fill_modes(struct drm_connector *connector, - unsigned int max_width, unsigned int max_height) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - struct exynos_drm_display *display = exynos_connector->display; - unsigned int width, height; - - width = max_width; - height = max_height; - - /* - * if specific driver want to find desired_mode using maxmum - * resolution then get max width and height from that driver. - */ - if (display->ops->get_max_resol) - display->ops->get_max_resol(display, &width, &height); - - return drm_helper_probe_single_connector_modes(connector, width, - height); -} - -/* get detection status of display device. */ -static enum drm_connector_status -exynos_drm_connector_detect(struct drm_connector *connector, bool force) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - struct exynos_drm_display *display = exynos_connector->display; - enum drm_connector_status status = connector_status_disconnected; - - if (display->ops->is_connected) { - if (display->ops->is_connected(display)) - status = connector_status_connected; - else - status = connector_status_disconnected; - } - - return status; -} - -static void exynos_drm_connector_destroy(struct drm_connector *connector) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - kfree(exynos_connector); -} - -static struct drm_connector_funcs exynos_connector_funcs = { - .dpms = drm_helper_connector_dpms, - .fill_modes = exynos_drm_connector_fill_modes, - .detect = exynos_drm_connector_detect, - .destroy = exynos_drm_connector_destroy, -}; - -struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, - struct drm_encoder *encoder) -{ - struct exynos_drm_connector *exynos_connector; - struct exynos_drm_display *display = exynos_drm_get_display(encoder); - struct drm_connector *connector; - int type; - int err; - - exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); - if (!exynos_connector) - return NULL; - - connector = &exynos_connector->drm_connector; - - switch (display->type) { - case EXYNOS_DISPLAY_TYPE_HDMI: - type = DRM_MODE_CONNECTOR_HDMIA; - connector->interlace_allowed = true; - connector->polled = DRM_CONNECTOR_POLL_HPD; - break; - case EXYNOS_DISPLAY_TYPE_VIDI: - type = DRM_MODE_CONNECTOR_VIRTUAL; - connector->polled = DRM_CONNECTOR_POLL_HPD; - break; - default: - type = DRM_MODE_CONNECTOR_Unknown; - break; - } - - drm_connector_init(dev, connector, &exynos_connector_funcs, type); - drm_connector_helper_add(connector, &exynos_connector_helper_funcs); - - err = drm_connector_register(connector); - if (err) - goto err_connector; - - exynos_connector->encoder_id = encoder->base.id; - exynos_connector->display = display; - connector->dpms = DRM_MODE_DPMS_OFF; - connector->encoder = encoder; - - err = drm_mode_connector_attach_encoder(connector, encoder); - if (err) { - DRM_ERROR("failed to attach a connector to a encoder\n"); - goto err_sysfs; - } - - DRM_DEBUG_KMS("connector has been created\n"); - - return connector; - -err_sysfs: - drm_connector_unregister(connector); -err_connector: - drm_connector_cleanup(connector); - kfree(exynos_connector); - return NULL; -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h deleted file mode 100644 index 4eb20d78379a..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Authors: - * Inki Dae <inki.dae@samsung.com> - * Joonyoung Shim <jy0922.shim@samsung.com> - * Seung-Woo Kim <sw0312.kim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef _EXYNOS_DRM_CONNECTOR_H_ -#define _EXYNOS_DRM_CONNECTOR_H_ - -struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, - struct drm_encoder *encoder); - -#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 925fc69af1a0..c300e22da8ac 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -284,14 +284,9 @@ static void fimd_clear_channel(struct fimd_context *ctx) } } -static int fimd_ctx_initialize(struct fimd_context *ctx, +static int fimd_iommu_attach_devices(struct fimd_context *ctx, struct drm_device *drm_dev) { - struct exynos_drm_private *priv; - priv = drm_dev->dev_private; - - ctx->drm_dev = drm_dev; - ctx->pipe = priv->pipe++; /* attach this sub driver to iommu mapping if supported. */ if (is_drm_iommu_supported(ctx->drm_dev)) { @@ -313,7 +308,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx, return 0; } -static void fimd_ctx_remove(struct fimd_context *ctx) +static void fimd_iommu_detach_devices(struct fimd_context *ctx) { /* detach this sub driver from iommu mapping if supported. */ if (is_drm_iommu_supported(ctx->drm_dev)) @@ -1056,25 +1051,23 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) { struct fimd_context *ctx = dev_get_drvdata(dev); struct drm_device *drm_dev = data; + struct exynos_drm_private *priv = drm_dev->dev_private; int ret; - ret = fimd_ctx_initialize(ctx, drm_dev); - if (ret) { - DRM_ERROR("fimd_ctx_initialize failed.\n"); - return ret; - } + ctx->drm_dev = drm_dev; + ctx->pipe = priv->pipe++; ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD, &fimd_crtc_ops, ctx); - if (IS_ERR(ctx->crtc)) { - fimd_ctx_remove(ctx); - return PTR_ERR(ctx->crtc); - } if (ctx->display) exynos_drm_create_enc_conn(drm_dev, ctx->display); + ret = fimd_iommu_attach_devices(ctx, drm_dev); + if (ret) + return ret; + return 0; } @@ -1086,10 +1079,10 @@ static void fimd_unbind(struct device *dev, struct device *master, fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); + fimd_iommu_detach_devices(ctx); + if (ctx->display) exynos_dpi_remove(ctx->display); - - fimd_ctx_remove(ctx); } static const struct component_ops fimd_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index a5616872eee7..8ad5b7294eb4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -175,7 +175,7 @@ static int exynos_disable_plane(struct drm_plane *plane) struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); - if (exynos_crtc->ops->win_disable) + if (exynos_crtc && exynos_crtc->ops->win_disable) exynos_crtc->ops->win_disable(exynos_crtc, exynos_plane->zpos); diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index d3ebaf204408..a69002e2257d 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -28,6 +28,7 @@ i915-y += i915_cmd_parser.o \ i915_gem_execbuffer.o \ i915_gem_gtt.o \ i915_gem.o \ + i915_gem_shrinker.o \ i915_gem_stolen.o \ i915_gem_tiling.o \ i915_gem_userptr.o \ diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1a52d6ab0f80..007c7d7d8295 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1200,6 +1200,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Max overclocked frequency: %dMHz\n", intel_gpu_freq(dev_priv, dev_priv->rps.max_freq)); + + seq_printf(m, "Idle freq: %d MHz\n", + intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); } else if (IS_VALLEYVIEW(dev)) { u32 freq_sts; @@ -1214,6 +1217,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "min GPU freq: %d MHz\n", intel_gpu_freq(dev_priv, dev_priv->rps.min_freq)); + seq_printf(m, "idle GPU freq: %d MHz\n", + intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq)); + seq_printf(m, "efficient (RPe) frequency: %d MHz\n", intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq)); @@ -2308,7 +2314,7 @@ static int i915_sink_crc(struct seq_file *m, void *data) u8 crc[6]; drm_modeset_lock_all(dev); - for_each_intel_encoder(dev, connector) { + for_each_intel_connector(dev, connector) { if (connector->base.dpms != DRM_MODE_DPMS_ON) continue; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index d49ed68f041e..68e0c85a17cf 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1199,7 +1199,7 @@ const struct drm_ioctl_desc i915_ioctls[] = { DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), - DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), + DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED), DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8ba7e1b7b733..e326ac9730cf 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -56,7 +56,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20150313" +#define DRIVER_DATE "20150327" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -427,6 +427,8 @@ struct drm_i915_error_state { u32 forcewake; u32 error; /* gen6+ */ u32 err_int; /* gen7 */ + u32 fault_data0; /* gen8, gen9 */ + u32 fault_data1; /* gen8, gen9 */ u32 done_reg; u32 gac_eco; u32 gam_ecochk; @@ -544,7 +546,7 @@ struct drm_i915_display_funcs { * Returns true on success, false on failure. */ bool (*find_dpll)(const struct intel_limit *limit, - struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state, int target, int refclk, struct dpll *match_clock, struct dpll *best_clock); @@ -553,7 +555,7 @@ struct drm_i915_display_funcs { struct drm_crtc *crtc, uint32_t sprite_width, uint32_t sprite_height, int pixel_size, bool enable, bool scaled); - void (*modeset_global_resources)(struct drm_device *dev); + void (*modeset_global_resources)(struct drm_atomic_state *state); /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ bool (*get_pipe_config)(struct intel_crtc *, @@ -1025,13 +1027,12 @@ struct intel_gen6_power_mgmt { u8 max_freq_softlimit; /* Max frequency permitted by the driver */ u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ u8 min_freq; /* AKA RPn. Minimum frequency */ + u8 idle_freq; /* Frequency to request when we are idle */ u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ u8 rp1_freq; /* "less than" RP0 power/freqency */ u8 rp0_freq; /* Non-overclocked max frequency. */ u32 cz_freq; - u32 ei_interrupt_count; - int last_adj; enum { LOW_POWER, BETWEEN, HIGH_POWER } power; @@ -2442,7 +2443,6 @@ extern int i915_resume_legacy(struct drm_device *dev); struct i915_params { int modeset; int panel_ignore_lid; - unsigned int powersave; int semaphores; unsigned int lvds_downclock; int lvds_channel_mode; @@ -2462,6 +2462,7 @@ struct i915_params { bool enable_hangcheck; bool fastboot; bool prefault_disable; + bool load_detect_test; bool reset; bool disable_display; bool disable_vtd_wa; @@ -2601,12 +2602,6 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, int i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_load(struct drm_device *dev); -unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, - long target, - unsigned flags); -#define I915_SHRINK_PURGEABLE 0x1 -#define I915_SHRINK_UNBOUND 0x2 -#define I915_SHRINK_BOUND 0x4 void *i915_gem_object_alloc(struct drm_device *dev); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, @@ -2623,20 +2618,16 @@ void i915_gem_vma_destroy(struct i915_vma *vma); #define PIN_GLOBAL 0x4 #define PIN_OFFSET_BIAS 0x8 #define PIN_OFFSET_MASK (~4095) -int __must_check i915_gem_object_pin_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags, - const struct i915_ggtt_view *view); -static inline -int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags) -{ - return i915_gem_object_pin_view(obj, vm, alignment, flags, - &i915_ggtt_view_normal); -} +int __must_check +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + uint32_t alignment, + uint64_t flags); +int __must_check +i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view, + uint32_t alignment, + uint64_t flags); int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags); @@ -2776,8 +2767,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write); int __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, - struct intel_engine_cs *pipelined); -void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); + struct intel_engine_cs *pipelined, + const struct i915_ggtt_view *view); +void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align); int i915_gem_open(struct drm_device *dev, struct drm_file *file); @@ -2800,60 +2793,46 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, void i915_gem_restore_fences(struct drm_device *dev); -unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view); -static inline -unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o, - struct i915_address_space *vm) +unsigned long +i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view); +unsigned long +i915_gem_obj_offset(struct drm_i915_gem_object *o, + struct i915_address_space *vm); +static inline unsigned long +i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o) { - return i915_gem_obj_offset_view(o, vm, I915_GGTT_VIEW_NORMAL); + return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal); } + bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o); -bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view); -static inline +bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view); bool i915_gem_obj_bound(struct drm_i915_gem_object *o, - struct i915_address_space *vm) -{ - return i915_gem_obj_bound_view(o, vm, I915_GGTT_VIEW_NORMAL); -} + struct i915_address_space *vm); unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, struct i915_address_space *vm); -struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view); -static inline -struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) -{ - return i915_gem_obj_to_vma_view(obj, vm, &i915_ggtt_view_normal); -} - struct i915_vma * -i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view); +i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm); +struct i915_vma * +i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); -static inline struct i915_vma * i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, - struct i915_address_space *vm) -{ - return i915_gem_obj_lookup_or_create_vma_view(obj, vm, - &i915_ggtt_view_normal); -} + struct i915_address_space *vm); +struct i915_vma * +i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); -struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); -static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { - struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->pin_count > 0) - return true; - return false; +static inline struct i915_vma * +i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) +{ + return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal); } +bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); /* Some GGTT VM helpers */ #define i915_obj_to_ggtt(obj) \ @@ -2876,13 +2855,7 @@ i915_vm_to_ppgtt(struct i915_address_space *vm) static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) { - return i915_gem_obj_bound(obj, i915_obj_to_ggtt(obj)); -} - -static inline unsigned long -i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj) -{ - return i915_gem_obj_offset(obj, i915_obj_to_ggtt(obj)); + return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); } static inline unsigned long @@ -2906,7 +2879,13 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj) return i915_vma_unbind(i915_gem_obj_to_ggtt(obj)); } -void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj); +void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view); +static inline void +i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) +{ + i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); +} /* i915_gem_context.c */ int __must_check i915_gem_context_init(struct drm_device *dev); @@ -2978,6 +2957,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, u32 gtt_offset, u32 size); +/* i915_gem_shrinker.c */ +unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, + long target, + unsigned flags); +#define I915_SHRINK_PURGEABLE 0x1 +#define I915_SHRINK_UNBOUND 0x2 +#define I915_SHRINK_BOUND 0x4 +unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); +void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); + + /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0fe313d0f609..d07c0b1fb498 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1,5 +1,5 @@ /* - * Copyright © 2008 Intel Corporation + * Copyright © 2008-2015 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,7 +32,6 @@ #include "i915_vgpu.h" #include "i915_trace.h" #include "intel_drv.h" -#include <linux/oom.h> #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/swap.h> @@ -53,15 +52,6 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, struct drm_i915_fence_reg *fence, bool enable); -static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker, - struct shrink_control *sc); -static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker, - struct shrink_control *sc); -static int i915_gem_shrinker_oom(struct notifier_block *nb, - unsigned long event, - void *ptr); -static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); - static bool cpu_cache_is_coherent(struct drm_device *dev, enum i915_cache_level level) { @@ -1936,12 +1926,6 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); } -static inline int -i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) -{ - return obj->madv == I915_MADV_DONTNEED; -} - /* Immediately discard the backing storage */ static void i915_gem_object_truncate(struct drm_i915_gem_object *obj) @@ -2047,85 +2031,6 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj) return 0; } -unsigned long -i915_gem_shrink(struct drm_i915_private *dev_priv, - long target, unsigned flags) -{ - const struct { - struct list_head *list; - unsigned int bit; - } phases[] = { - { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, - { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, - { NULL, 0 }, - }, *phase; - unsigned long count = 0; - - /* - * As we may completely rewrite the (un)bound list whilst unbinding - * (due to retiring requests) we have to strictly process only - * one element of the list at the time, and recheck the list - * on every iteration. - * - * In particular, we must hold a reference whilst removing the - * object as we may end up waiting for and/or retiring the objects. - * This might release the final reference (held by the active list) - * and result in the object being freed from under us. This is - * similar to the precautions the eviction code must take whilst - * removing objects. - * - * Also note that although these lists do not hold a reference to - * the object we can safely grab one here: The final object - * unreferencing and the bound_list are both protected by the - * dev->struct_mutex and so we won't ever be able to observe an - * object on the bound_list with a reference count equals 0. - */ - for (phase = phases; phase->list; phase++) { - struct list_head still_in_list; - - if ((flags & phase->bit) == 0) - continue; - - INIT_LIST_HEAD(&still_in_list); - while (count < target && !list_empty(phase->list)) { - struct drm_i915_gem_object *obj; - struct i915_vma *vma, *v; - - obj = list_first_entry(phase->list, - typeof(*obj), global_list); - list_move_tail(&obj->global_list, &still_in_list); - - if (flags & I915_SHRINK_PURGEABLE && - !i915_gem_object_is_purgeable(obj)) - continue; - - drm_gem_object_reference(&obj->base); - - /* For the unbound phase, this should be a no-op! */ - list_for_each_entry_safe(vma, v, - &obj->vma_list, vma_link) - if (i915_vma_unbind(vma)) - break; - - if (i915_gem_object_put_pages(obj) == 0) - count += obj->base.size >> PAGE_SHIFT; - - drm_gem_object_unreference(&obj->base); - } - list_splice(&still_in_list, phase->list); - } - - return count; -} - -static unsigned long -i915_gem_shrink_all(struct drm_i915_private *dev_priv) -{ - i915_gem_evict_everything(dev_priv->dev); - return i915_gem_shrink(dev_priv, LONG_MAX, - I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); -} - static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) { @@ -2755,24 +2660,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) WARN_ON(i915_verify_lists(ring->dev)); - /* Move any buffers on the active list that are no longer referenced - * by the ringbuffer to the flushing/inactive lists as appropriate, - * before we free the context associated with the requests. + /* Retire requests first as we use it above for the early return. + * If we retire requests last, we may use a later seqno and so clear + * the requests lists without clearing the active list, leading to + * confusion. */ - while (!list_empty(&ring->active_list)) { - struct drm_i915_gem_object *obj; - - obj = list_first_entry(&ring->active_list, - struct drm_i915_gem_object, - ring_list); - - if (!i915_gem_request_completed(obj->last_read_req, true)) - break; - - i915_gem_object_move_to_inactive(obj); - } - - while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; @@ -2795,6 +2687,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) i915_gem_free_request(request); } + /* Move any buffers on the active list that are no longer referenced + * by the ringbuffer to the flushing/inactive lists as appropriate, + * before we free the context associated with the requests. + */ + while (!list_empty(&ring->active_list)) { + struct drm_i915_gem_object *obj; + + obj = list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); + + if (!i915_gem_request_completed(obj->last_read_req, true)) + break; + + i915_gem_object_move_to_inactive(obj); + } + if (unlikely(ring->trace_irq_req && i915_gem_request_completed(ring->trace_irq_req, true))) { ring->irq_put(ring); @@ -3518,9 +3427,9 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma, static struct i915_vma * i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_address_space *vm, + const struct i915_ggtt_view *ggtt_view, unsigned alignment, - uint64_t flags, - const struct i915_ggtt_view *view) + uint64_t flags) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -3532,6 +3441,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, struct i915_vma *vma; int ret; + if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) + return ERR_PTR(-EINVAL); + fence_size = i915_gem_get_gtt_size(dev, obj->base.size, obj->tiling_mode); @@ -3570,7 +3482,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, i915_gem_object_pin_pages(obj); - vma = i915_gem_obj_lookup_or_create_vma_view(obj, vm, view); + vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) : + i915_gem_obj_lookup_or_create_vma(obj, vm); + if (IS_ERR(vma)) goto err_unpin; @@ -3600,6 +3514,17 @@ search_free: if (ret) goto err_remove_node; + /* allocate before insert / bind */ + if (vma->vm->allocate_va_range) { + trace_i915_va_alloc(vma->vm, vma->node.start, vma->node.size, + VM_TO_TRACE_NAME(vma->vm)); + ret = vma->vm->allocate_va_range(vma->vm, + vma->node.start, + vma->node.size); + if (ret) + goto err_remove_node; + } + trace_i915_vma_bind(vma, flags); ret = i915_vma_bind(vma, obj->cache_level, flags & PIN_GLOBAL ? GLOBAL_BIND : 0); @@ -3952,7 +3877,8 @@ static bool is_pin_display(struct drm_i915_gem_object *obj) int i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, - struct intel_engine_cs *pipelined) + struct intel_engine_cs *pipelined, + const struct i915_ggtt_view *view) { u32 old_read_domains, old_write_domain; bool was_pin_display; @@ -3988,7 +3914,9 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, * (e.g. libkms for the bootup splash), we have to ensure that we * always use map_and_fenceable for all scanout buffers. */ - ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE); + ret = i915_gem_object_ggtt_pin(obj, view, alignment, + view->type == I915_GGTT_VIEW_NORMAL ? + PIN_MAPPABLE : 0); if (ret) goto err_unpin_display; @@ -4016,9 +3944,11 @@ err_unpin_display: } void -i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj) +i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view) { - i915_gem_object_ggtt_unpin(obj); + i915_gem_object_ggtt_unpin_view(obj, view); + obj->pin_display = is_pin_display(obj); } @@ -4167,12 +4097,12 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags) return false; } -int -i915_gem_object_pin_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - uint32_t alignment, - uint64_t flags, - const struct i915_ggtt_view *view) +static int +i915_gem_object_do_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *ggtt_view, + uint32_t alignment, + uint64_t flags) { struct drm_i915_private *dev_priv = obj->base.dev->dev_private; struct i915_vma *vma; @@ -4188,17 +4118,29 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE)) return -EINVAL; - vma = i915_gem_obj_to_vma_view(obj, vm, view); + if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) + return -EINVAL; + + vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) : + i915_gem_obj_to_vma(obj, vm); + + if (IS_ERR(vma)) + return PTR_ERR(vma); + if (vma) { if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) return -EBUSY; if (i915_vma_misplaced(vma, alignment, flags)) { + unsigned long offset; + offset = ggtt_view ? i915_gem_obj_ggtt_offset_view(obj, ggtt_view) : + i915_gem_obj_offset(obj, vm); WARN(vma->pin_count, - "bo is already pinned with incorrect alignment:" + "bo is already pinned in %s with incorrect alignment:" " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d," " obj->map_and_fenceable=%d\n", - i915_gem_obj_offset_view(obj, vm, view->type), + ggtt_view ? "ggtt" : "ppgtt", + offset, alignment, !!(flags & PIN_MAPPABLE), obj->map_and_fenceable); @@ -4212,8 +4154,12 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, bound = vma ? vma->bound : 0; if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { - vma = i915_gem_object_bind_to_vm(obj, vm, alignment, - flags, view); + /* In true PPGTT, bind has possibly changed PDEs, which + * means we must do a context switch before the GPU can + * accurately read some of the VMAs. + */ + vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment, + flags); if (IS_ERR(vma)) return PTR_ERR(vma); } @@ -4254,16 +4200,41 @@ i915_gem_object_pin_view(struct drm_i915_gem_object *obj, return 0; } +int +i915_gem_object_pin(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + uint32_t alignment, + uint64_t flags) +{ + return i915_gem_object_do_pin(obj, vm, + i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL, + alignment, flags); +} + +int +i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view, + uint32_t alignment, + uint64_t flags) +{ + if (WARN_ONCE(!view, "no view specified")) + return -EINVAL; + + return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view, + alignment, flags | PIN_GLOBAL); +} + void -i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) +i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view) { - struct i915_vma *vma = i915_gem_obj_to_ggtt(obj); + struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); BUG_ON(!vma); - BUG_ON(vma->pin_count == 0); - BUG_ON(!i915_gem_obj_ggtt_bound(obj)); + WARN_ON(vma->pin_count == 0); + WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view)); - if (--vma->pin_count == 0) + if (--vma->pin_count == 0 && view->type == I915_GGTT_VIEW_NORMAL) obj->pin_mappable = false; } @@ -4384,7 +4355,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, obj->madv = args->madv; /* if the object is no longer attached, discard its backing storage */ - if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) + if (obj->madv == I915_MADV_DONTNEED && obj->pages == NULL) i915_gem_object_truncate(obj); args->retained = obj->madv != __I915_MADV_PURGED; @@ -4559,15 +4530,33 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) intel_runtime_pm_put(dev_priv); } -struct i915_vma *i915_gem_obj_to_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view) +struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) { struct i915_vma *vma; - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->vm == vm && vma->ggtt_view.type == view->type) + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; + if (vma->vm == vm) return vma; + } + return NULL; +} +struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, + const struct i915_ggtt_view *view) +{ + struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); + struct i915_vma *vma; + + if (WARN_ONCE(!view, "no view specified")) + return ERR_PTR(-EINVAL); + + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (vma->vm == ggtt && + i915_ggtt_view_equal(&vma->ggtt_view, view)) + return vma; return NULL; } @@ -5006,13 +4995,7 @@ i915_gem_load(struct drm_device *dev) dev_priv->mm.interruptible = true; - dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; - dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; - dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; - register_shrinker(&dev_priv->mm.shrinker); - - dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; - register_oom_notifier(&dev_priv->mm.oom_notifier); + i915_gem_shrinker_init(dev_priv); i915_gem_batch_pool_init(dev, &dev_priv->mm.batch_pool); @@ -5104,106 +5087,70 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, } } -static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) -{ - if (!mutex_is_locked(mutex)) - return false; - -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) - return mutex->owner == task; -#else - /* Since UP may be pre-empted, we cannot assume that we own the lock */ - return false; -#endif -} - -static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) +/* All the new VM stuff */ +unsigned long +i915_gem_obj_offset(struct drm_i915_gem_object *o, + struct i915_address_space *vm) { - if (!mutex_trylock(&dev->struct_mutex)) { - if (!mutex_is_locked_by(&dev->struct_mutex, current)) - return false; + struct drm_i915_private *dev_priv = o->base.dev->dev_private; + struct i915_vma *vma; - if (to_i915(dev)->mm.shrinker_no_lock_stealing) - return false; + WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - *unlock = false; - } else - *unlock = true; + list_for_each_entry(vma, &o->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; + if (vma->vm == vm) + return vma->node.start; + } - return true; + WARN(1, "%s vma for this object not found.\n", + i915_is_ggtt(vm) ? "global" : "ppgtt"); + return -1; } -static int num_vma_bound(struct drm_i915_gem_object *obj) +unsigned long +i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view) { + struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; - int count = 0; - - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (drm_mm_node_allocated(&vma->node)) - count++; - return count; -} - -static unsigned long -i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) -{ - struct drm_i915_private *dev_priv = - container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_device *dev = dev_priv->dev; - struct drm_i915_gem_object *obj; - unsigned long count; - bool unlock; - - if (!i915_gem_shrinker_lock(dev, &unlock)) - return 0; - - count = 0; - list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) - if (obj->pages_pin_count == 0) - count += obj->base.size >> PAGE_SHIFT; - - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!i915_gem_obj_is_pinned(obj) && - obj->pages_pin_count == num_vma_bound(obj)) - count += obj->base.size >> PAGE_SHIFT; - } - - if (unlock) - mutex_unlock(&dev->struct_mutex); + list_for_each_entry(vma, &o->vma_list, vma_link) + if (vma->vm == ggtt && + i915_ggtt_view_equal(&vma->ggtt_view, view)) + return vma->node.start; - return count; + WARN(1, "global vma for this object not found.\n"); + return -1; } -/* All the new VM stuff */ -unsigned long i915_gem_obj_offset_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view) +bool i915_gem_obj_bound(struct drm_i915_gem_object *o, + struct i915_address_space *vm) { - struct drm_i915_private *dev_priv = o->base.dev->dev_private; struct i915_vma *vma; - WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); - list_for_each_entry(vma, &o->vma_list, vma_link) { - if (vma->vm == vm && vma->ggtt_view.type == view) - return vma->node.start; - + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; + if (vma->vm == vm && drm_mm_node_allocated(&vma->node)) + return true; } - WARN(1, "%s vma for this object not found.\n", - i915_is_ggtt(vm) ? "global" : "ppgtt"); - return -1; + + return false; } -bool i915_gem_obj_bound_view(struct drm_i915_gem_object *o, - struct i915_address_space *vm, - enum i915_ggtt_view_type view) +bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, + const struct i915_ggtt_view *view) { + struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct i915_vma *vma; list_for_each_entry(vma, &o->vma_list, vma_link) - if (vma->vm == vm && - vma->ggtt_view.type == view && + if (vma->vm == ggtt && + i915_ggtt_view_equal(&vma->ggtt_view, view) && drm_mm_node_allocated(&vma->node)) return true; @@ -5231,118 +5178,26 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, BUG_ON(list_empty(&o->vma_list)); - list_for_each_entry(vma, &o->vma_list, vma_link) + list_for_each_entry(vma, &o->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) + continue; if (vma->vm == vm) return vma->node.size; - + } return 0; } -static unsigned long -i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) +bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = - container_of(shrinker, struct drm_i915_private, mm.shrinker); - struct drm_device *dev = dev_priv->dev; - unsigned long freed; - bool unlock; - - if (!i915_gem_shrinker_lock(dev, &unlock)) - return SHRINK_STOP; - - freed = i915_gem_shrink(dev_priv, - sc->nr_to_scan, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_PURGEABLE); - if (freed < sc->nr_to_scan) - freed += i915_gem_shrink(dev_priv, - sc->nr_to_scan - freed, - I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND); - if (unlock) - mutex_unlock(&dev->struct_mutex); - - return freed; -} - -static int -i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) -{ - struct drm_i915_private *dev_priv = - container_of(nb, struct drm_i915_private, mm.oom_notifier); - struct drm_device *dev = dev_priv->dev; - struct drm_i915_gem_object *obj; - unsigned long timeout = msecs_to_jiffies(5000) + 1; - unsigned long pinned, bound, unbound, freed_pages; - bool was_interruptible; - bool unlock; - - while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { - schedule_timeout_killable(1); - if (fatal_signal_pending(current)) - return NOTIFY_DONE; - } - if (timeout == 0) { - pr_err("Unable to purge GPU memory due lock contention.\n"); - return NOTIFY_DONE; - } - - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; - - freed_pages = i915_gem_shrink_all(dev_priv); - - dev_priv->mm.interruptible = was_interruptible; - - /* Because we may be allocating inside our own driver, we cannot - * assert that there are no objects with pinned pages that are not - * being pointed to by hardware. - */ - unbound = bound = pinned = 0; - list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { - if (!obj->base.filp) /* not backed by a freeable object */ - continue; - - if (obj->pages_pin_count) - pinned += obj->base.size; - else - unbound += obj->base.size; - } - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { - if (!obj->base.filp) + struct i915_vma *vma; + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) continue; - - if (obj->pages_pin_count) - pinned += obj->base.size; - else - bound += obj->base.size; + if (vma->pin_count > 0) + return true; } - - if (unlock) - mutex_unlock(&dev->struct_mutex); - - if (freed_pages || unbound || bound) - pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", - freed_pages << PAGE_SHIFT, pinned); - if (unbound || bound) - pr_err("%lu and %lu bytes still available in the " - "bound and unbound GPU page lists.\n", - bound, unbound); - - *(unsigned long *)ptr += freed_pages; - return NOTIFY_DONE; + return false; } -struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) -{ - struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); - struct i915_vma *vma; - - list_for_each_entry(vma, &obj->vma_list, vma_link) - if (vma->vm == ggtt && - vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) - return vma; - - return NULL; -} diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 70346b0028f9..f3e84c44d009 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c @@ -569,6 +569,66 @@ mi_set_context(struct intel_engine_cs *ring, return ret; } +static inline bool should_skip_switch(struct intel_engine_cs *ring, + struct intel_context *from, + struct intel_context *to) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + if (to->remap_slice) + return false; + + if (to->ppgtt) { + if (from == to && !test_bit(ring->id, + &to->ppgtt->pd_dirty_rings)) + return true; + } else if (dev_priv->mm.aliasing_ppgtt) { + if (from == to && !test_bit(ring->id, + &dev_priv->mm.aliasing_ppgtt->pd_dirty_rings)) + return true; + } + + return false; +} + +static bool +needs_pd_load_pre(struct intel_engine_cs *ring, struct intel_context *to) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + if (!to->ppgtt) + return false; + + if (INTEL_INFO(ring->dev)->gen < 8) + return true; + + if (ring != &dev_priv->ring[RCS]) + return true; + + return false; +} + +static bool +needs_pd_load_post(struct intel_engine_cs *ring, struct intel_context *to, + u32 hw_flags) +{ + struct drm_i915_private *dev_priv = ring->dev->dev_private; + + if (!to->ppgtt) + return false; + + if (!IS_GEN8(ring->dev)) + return false; + + if (ring != &dev_priv->ring[RCS]) + return false; + + if (hw_flags & MI_RESTORE_INHIBIT) + return true; + + return false; +} + static int do_switch(struct intel_engine_cs *ring, struct intel_context *to) { @@ -584,7 +644,7 @@ static int do_switch(struct intel_engine_cs *ring, BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state)); } - if (from == to && !to->remap_slice) + if (should_skip_switch(ring, from, to)) return 0; /* Trying to pin first makes error handling easier. */ @@ -602,11 +662,18 @@ static int do_switch(struct intel_engine_cs *ring, */ from = ring->last_context; - if (to->ppgtt) { + if (needs_pd_load_pre(ring, to)) { + /* Older GENs and non render rings still want the load first, + * "PP_DCLV followed by PP_DIR_BASE register through Load + * Register Immediate commands in Ring Buffer before submitting + * a context."*/ trace_switch_mm(ring, to); ret = to->ppgtt->switch_mm(to->ppgtt, ring); if (ret) goto unpin_out; + + /* Doing a PD load always reloads the page dirs */ + clear_bit(ring->id, &to->ppgtt->pd_dirty_rings); } if (ring != &dev_priv->ring[RCS]) { @@ -637,13 +704,41 @@ static int do_switch(struct intel_engine_cs *ring, goto unpin_out; } - if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) + if (!to->legacy_hw_ctx.initialized) { hw_flags |= MI_RESTORE_INHIBIT; + /* NB: If we inhibit the restore, the context is not allowed to + * die because future work may end up depending on valid address + * space. This means we must enforce that a page table load + * occur when this occurs. */ + } else if (to->ppgtt && + test_and_clear_bit(ring->id, &to->ppgtt->pd_dirty_rings)) + hw_flags |= MI_FORCE_RESTORE; + + /* We should never emit switch_mm more than once */ + WARN_ON(needs_pd_load_pre(ring, to) && + needs_pd_load_post(ring, to, hw_flags)); ret = mi_set_context(ring, to, hw_flags); if (ret) goto unpin_out; + /* GEN8 does *not* require an explicit reload if the PDPs have been + * setup, and we do not wish to move them. + */ + if (needs_pd_load_post(ring, to, hw_flags)) { + trace_switch_mm(ring, to); + ret = to->ppgtt->switch_mm(to->ppgtt, ring); + /* The hardware context switch is emitted, but we haven't + * actually changed the state - so it's probably safe to bail + * here. Still, let the user know something dangerous has + * happened. + */ + if (ret) { + DRM_ERROR("Failed to change address space on context switch\n"); + goto unpin_out; + } + } + for (i = 0; i < MAX_L3_SLICES; i++) { if (!(to->remap_slice & (1<<i))) continue; @@ -681,7 +776,7 @@ static int do_switch(struct intel_engine_cs *ring, i915_gem_context_unreference(from); } - uninitialized = !to->legacy_hw_ctx.initialized && from == NULL; + uninitialized = !to->legacy_hw_ctx.initialized; to->legacy_hw_ctx.initialized = true; done: diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index e3a49d94da3a..d09e35ed9c9a 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -63,6 +63,10 @@ mark_free(struct i915_vma *vma, struct list_head *unwind) * * This function is used by the object/vma binding code. * + * Since this function is only used to free up virtual address space it only + * ignores pinned vmas, and not object where the backing storage itself is + * pinned. Hence obj->pages_pin_count does not protect against eviction. + * * To clarify: This is for freeing up virtual address space, not for freeing * memory in e.g. the shrinker. */ diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index dc10bc43864e..a3190e793ed4 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -251,7 +251,6 @@ static inline int use_cpu_reloc(struct drm_i915_gem_object *obj) { return (HAS_LLC(obj->base.dev) || obj->base.write_domain == I915_GEM_DOMAIN_CPU || - !obj->map_and_fenceable || obj->cache_level != I915_CACHE_NONE); } @@ -337,6 +336,51 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, return 0; } +static void +clflush_write32(void *addr, uint32_t value) +{ + /* This is not a fast path, so KISS. */ + drm_clflush_virt_range(addr, sizeof(uint32_t)); + *(uint32_t *)addr = value; + drm_clflush_virt_range(addr, sizeof(uint32_t)); +} + +static int +relocate_entry_clflush(struct drm_i915_gem_object *obj, + struct drm_i915_gem_relocation_entry *reloc, + uint64_t target_offset) +{ + struct drm_device *dev = obj->base.dev; + uint32_t page_offset = offset_in_page(reloc->offset); + uint64_t delta = (int)reloc->delta + target_offset; + char *vaddr; + int ret; + + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + reloc->offset >> PAGE_SHIFT)); + clflush_write32(vaddr + page_offset, lower_32_bits(delta)); + + if (INTEL_INFO(dev)->gen >= 8) { + page_offset = offset_in_page(page_offset + sizeof(uint32_t)); + + if (page_offset == 0) { + kunmap_atomic(vaddr); + vaddr = kmap_atomic(i915_gem_object_get_page(obj, + (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT)); + } + + clflush_write32(vaddr + page_offset, upper_32_bits(delta)); + } + + kunmap_atomic(vaddr); + + return 0; +} + static int i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, struct eb_vmas *eb, @@ -426,8 +470,14 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, if (use_cpu_reloc(obj)) ret = relocate_entry_cpu(obj, reloc, target_offset); - else + else if (obj->map_and_fenceable) ret = relocate_entry_gtt(obj, reloc, target_offset); + else if (cpu_has_clflush) + ret = relocate_entry_clflush(obj, reloc, target_offset); + else { + WARN_ONCE(1, "Impossible case in relocation handling\n"); + ret = -ENODEV; + } if (ret) return ret; @@ -525,6 +575,12 @@ i915_gem_execbuffer_relocate(struct eb_vmas *eb) return ret; } +static bool only_mappable_for_reloc(unsigned int flags) +{ + return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) == + __EXEC_OBJECT_NEEDS_MAP; +} + static int i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, struct intel_engine_cs *ring, @@ -536,14 +592,21 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma, int ret; flags = 0; - if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) - flags |= PIN_GLOBAL | PIN_MAPPABLE; - if (entry->flags & EXEC_OBJECT_NEEDS_GTT) - flags |= PIN_GLOBAL; - if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) - flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; + if (!drm_mm_node_allocated(&vma->node)) { + if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) + flags |= PIN_GLOBAL | PIN_MAPPABLE; + if (entry->flags & EXEC_OBJECT_NEEDS_GTT) + flags |= PIN_GLOBAL; + if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) + flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; + } ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); + if ((ret == -ENOSPC || ret == -E2BIG) && + only_mappable_for_reloc(entry->flags)) + ret = i915_gem_object_pin(obj, vma->vm, + entry->alignment, + flags & ~(PIN_GLOBAL | PIN_MAPPABLE)); if (ret) return ret; @@ -605,13 +668,14 @@ eb_vma_misplaced(struct i915_vma *vma) vma->node.start & (entry->alignment - 1)) return true; - if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) - return true; - if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS && vma->node.start < BATCH_OFFSET_BIAS) return true; + /* avoid costly ping-pong once a batch bo ended up non-mappable */ + if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) + return !only_mappable_for_reloc(entry->flags); + return false; } @@ -1187,6 +1251,13 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, if (ret) goto error; + if (ctx->ppgtt) + WARN(ctx->ppgtt->pd_dirty_rings & (1<<ring->id), + "%s didn't clear reload\n", ring->name); + else if (dev_priv->mm.aliasing_ppgtt) + WARN(dev_priv->mm.aliasing_ppgtt->pd_dirty_rings & + (1<<ring->id), "%s didn't clear reload\n", ring->name); + instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK; instp_mask = I915_EXEC_CONSTANTS_MASK; switch (instp_mode) { @@ -1476,7 +1547,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - if (i915_needs_cmd_parser(ring)) { + if (i915_needs_cmd_parser(ring) && args->batch_len) { batch_obj = i915_gem_execbuffer_parse(ring, &shadow_exec_entry, eb, diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 2034f7cf238b..0239fbff7bf7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -67,8 +67,9 @@ * i915_ggtt_view_type and struct i915_ggtt_view. * * A new flavour of core GEM functions which work with GGTT bound objects were - * added with the _view suffix. They take the struct i915_ggtt_view parameter - * encapsulating all metadata required to implement a view. + * added with the _ggtt_ infix, and sometimes with _view postfix to avoid + * renaming in large amounts of code. They take the struct i915_ggtt_view + * parameter encapsulating all metadata required to implement a view. * * As a helper for callers which are only interested in the normal view, * globally const i915_ggtt_view_normal singleton instance exists. All old core @@ -92,6 +93,9 @@ */ const struct i915_ggtt_view i915_ggtt_view_normal; +const struct i915_ggtt_view i915_ggtt_view_rotated = { + .type = I915_GGTT_VIEW_ROTATED +}; static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv); static void chv_setup_private_ppat(struct drm_i915_private *dev_priv); @@ -147,11 +151,11 @@ static void ppgtt_bind_vma(struct i915_vma *vma, u32 flags); static void ppgtt_unbind_vma(struct i915_vma *vma); -static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid) +static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid) { - gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; + gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0; pte |= addr; switch (level) { @@ -169,11 +173,11 @@ static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr, return pte; } -static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, - dma_addr_t addr, - enum i915_cache_level level) +static inline gen8_pde_t gen8_pde_encode(struct drm_device *dev, + dma_addr_t addr, + enum i915_cache_level level) { - gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW; + gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; pde |= addr; if (level != I915_CACHE_NONE) pde |= PPAT_CACHED_PDE_INDEX; @@ -182,11 +186,11 @@ static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev, return pde; } -static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid, u32 unused) +static gen6_pte_t snb_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid, u32 unused) { - gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; + gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); switch (level) { @@ -204,11 +208,11 @@ static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr, return pte; } -static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid, u32 unused) +static gen6_pte_t ivb_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid, u32 unused) { - gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; + gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); switch (level) { @@ -228,11 +232,11 @@ static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr, return pte; } -static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid, u32 flags) +static gen6_pte_t byt_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid, u32 flags) { - gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; + gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= GEN6_PTE_ADDR_ENCODE(addr); if (!(flags & PTE_READ_ONLY)) @@ -244,11 +248,11 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr, return pte; } -static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid, u32 unused) +static gen6_pte_t hsw_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid, u32 unused) { - gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; + gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= HSW_PTE_ADDR_ENCODE(addr); if (level != I915_CACHE_NONE) @@ -257,11 +261,11 @@ static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr, return pte; } -static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, - enum i915_cache_level level, - bool valid, u32 unused) +static gen6_pte_t iris_pte_encode(dma_addr_t addr, + enum i915_cache_level level, + bool valid, u32 unused) { - gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; + gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0; pte |= HSW_PTE_ADDR_ENCODE(addr); switch (level) { @@ -278,29 +282,91 @@ static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr, return pte; } -static void unmap_and_free_pt(struct i915_page_table_entry *pt, struct drm_device *dev) +#define i915_dma_unmap_single(px, dev) \ + __i915_dma_unmap_single((px)->daddr, dev) + +static inline void __i915_dma_unmap_single(dma_addr_t daddr, + struct drm_device *dev) +{ + struct device *device = &dev->pdev->dev; + + dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL); +} + +/** + * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc. + * @px: Page table/dir/etc to get a DMA map for + * @dev: drm device + * + * Page table allocations are unified across all gens. They always require a + * single 4k allocation, as well as a DMA mapping. If we keep the structs + * symmetric here, the simple macro covers us for every page table type. + * + * Return: 0 if success. + */ +#define i915_dma_map_single(px, dev) \ + i915_dma_map_page_single((px)->page, (dev), &(px)->daddr) + +static inline int i915_dma_map_page_single(struct page *page, + struct drm_device *dev, + dma_addr_t *daddr) +{ + struct device *device = &dev->pdev->dev; + + *daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL); + if (dma_mapping_error(device, *daddr)) + return -ENOMEM; + + return 0; +} + +static void unmap_and_free_pt(struct i915_page_table_entry *pt, + struct drm_device *dev) { if (WARN_ON(!pt->page)) return; + + i915_dma_unmap_single(pt, dev); __free_page(pt->page); + kfree(pt->used_ptes); kfree(pt); } static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev) { struct i915_page_table_entry *pt; + const size_t count = INTEL_INFO(dev)->gen >= 8 ? + GEN8_PTES : GEN6_PTES; + int ret = -ENOMEM; pt = kzalloc(sizeof(*pt), GFP_KERNEL); if (!pt) return ERR_PTR(-ENOMEM); - pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!pt->page) { - kfree(pt); - return ERR_PTR(-ENOMEM); - } + pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes), + GFP_KERNEL); + + if (!pt->used_ptes) + goto fail_bitmap; + + pt->page = alloc_page(GFP_KERNEL); + if (!pt->page) + goto fail_page; + + ret = i915_dma_map_single(pt, dev); + if (ret) + goto fail_dma; return pt; + +fail_dma: + __free_page(pt->page); +fail_page: + kfree(pt->used_ptes); +fail_bitmap: + kfree(pt); + + return ERR_PTR(ret); } /** @@ -318,12 +384,12 @@ static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev) * Return: 0 if allocation succeeded. */ static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count, - struct drm_device *dev) + struct drm_device *dev) { int i, ret; /* 512 is the max page tables per page_directory on any platform. */ - if (WARN_ON(pde + count > GEN6_PPGTT_PD_ENTRIES)) + if (WARN_ON(pde + count > I915_PDES)) return -EINVAL; for (i = pde; i < pde + count; i++) { @@ -401,7 +467,7 @@ static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt, int i, ret; /* bit of a hack to find the actual last used pd */ - int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE; + int used_pd = ppgtt->num_pd_entries / I915_PDES; for (i = used_pd - 1; i >= 0; i--) { dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr; @@ -420,7 +486,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - gen8_gtt_pte_t *pt_vaddr, scratch_pte; + gen8_pte_t *pt_vaddr, scratch_pte; unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; @@ -451,8 +517,8 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, page_table = pt->page; last_pte = pte + num_entries; - if (last_pte > GEN8_PTES_PER_PAGE) - last_pte = GEN8_PTES_PER_PAGE; + if (last_pte > GEN8_PTES) + last_pte = GEN8_PTES; pt_vaddr = kmap_atomic(page_table); @@ -466,7 +532,7 @@ static void gen8_ppgtt_clear_range(struct i915_address_space *vm, kunmap_atomic(pt_vaddr); pte = 0; - if (++pde == GEN8_PDES_PER_PAGE) { + if (++pde == I915_PDES) { pdpe++; pde = 0; } @@ -480,7 +546,7 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - gen8_gtt_pte_t *pt_vaddr; + gen8_pte_t *pt_vaddr; unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK; unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK; unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK; @@ -503,12 +569,12 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm, pt_vaddr[pte] = gen8_pte_encode(sg_page_iter_dma_address(&sg_iter), cache_level, true); - if (++pte == GEN8_PTES_PER_PAGE) { + if (++pte == GEN8_PTES) { if (!HAS_LLC(ppgtt->base.dev)) drm_clflush_virt_range(pt_vaddr, PAGE_SIZE); kunmap_atomic(pt_vaddr); pt_vaddr = NULL; - if (++pde == GEN8_PDES_PER_PAGE) { + if (++pde == I915_PDES) { pdpe++; pde = 0; } @@ -529,7 +595,7 @@ static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct d if (!pd->page) return; - for (i = 0; i < GEN8_PDES_PER_PAGE; i++) { + for (i = 0; i < I915_PDES; i++) { if (WARN_ON(!pd->page_table[i])) continue; @@ -565,7 +631,7 @@ static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { + for (j = 0; j < I915_PDES; j++) { struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; struct i915_page_table_entry *pt; dma_addr_t addr; @@ -598,7 +664,7 @@ static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt) for (i = 0; i < ppgtt->num_pd_pages; i++) { ret = alloc_pt_range(ppgtt->pdp.page_directory[i], - 0, GEN8_PDES_PER_PAGE, ppgtt->base.dev); + 0, I915_PDES, ppgtt->base.dev); if (ret) goto unwind_out; } @@ -648,7 +714,7 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt, if (ret) goto err_out; - ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE; + ppgtt->num_pd_entries = max_pdp * I915_PDES; return 0; @@ -697,7 +763,7 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, return 0; } -/** +/* * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers * with a net effect resembling a 2-level page table in normal x86 terms. Each * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address @@ -710,7 +776,7 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt, static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) { const int max_pdp = DIV_ROUND_UP(size, 1 << 30); - const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp; + const int min_pt_pages = I915_PDES * max_pdp; int i, j, ret; if (size % (1<<30)) @@ -733,7 +799,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) if (ret) goto bail; - for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { + for (j = 0; j < I915_PDES; j++) { ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j); if (ret) goto bail; @@ -750,9 +816,9 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) */ for (i = 0; i < GEN8_LEGACY_PDPES; i++) { struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; - gen8_ppgtt_pde_t *pd_vaddr; + gen8_pde_t *pd_vaddr; pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page); - for (j = 0; j < GEN8_PDES_PER_PAGE; j++) { + for (j = 0; j < I915_PDES; j++) { struct i915_page_table_entry *pt = pd->page_table[j]; dma_addr_t addr = pt->daddr; pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr, @@ -770,11 +836,11 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->base.start = 0; /* This is the area that we advertise as usable for the caller */ - ppgtt->base.total = max_pdp * GEN8_PDES_PER_PAGE * GEN8_PTES_PER_PAGE * PAGE_SIZE; + ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE; /* Set all ptes to a valid scratch page. Also above requested space */ ppgtt->base.clear_range(&ppgtt->base, 0, - ppgtt->num_pd_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE, + ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE, true); DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", @@ -794,22 +860,22 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) { struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; struct i915_address_space *vm = &ppgtt->base; - gen6_gtt_pte_t __iomem *pd_addr; - gen6_gtt_pte_t scratch_pte; + gen6_pte_t __iomem *pd_addr; + gen6_pte_t scratch_pte; uint32_t pd_entry; int pte, pde; scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); - pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + - ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t); + pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + + ppgtt->pd.pd_offset / sizeof(gen6_pte_t); seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm, ppgtt->pd.pd_offset, ppgtt->pd.pd_offset + ppgtt->num_pd_entries); for (pde = 0; pde < ppgtt->num_pd_entries; pde++) { u32 expected; - gen6_gtt_pte_t *pt_vaddr; + gen6_pte_t *pt_vaddr; dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr; pd_entry = readl(pd_addr + pde); expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); @@ -822,9 +888,9 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) seq_printf(m, "\tPDE: %x\n", pd_entry); pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page); - for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) { + for (pte = 0; pte < GEN6_PTES; pte+=4) { unsigned long va = - (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) + + (pde * PAGE_SIZE * GEN6_PTES) + (pte * PAGE_SIZE); int i; bool found = false; @@ -847,26 +913,36 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) } } -static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt) +/* Write pde (index) from the page directory @pd to the page table @pt */ +static void gen6_write_pde(struct i915_page_directory_entry *pd, + const int pde, struct i915_page_table_entry *pt) { - struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private; - gen6_gtt_pte_t __iomem *pd_addr; - uint32_t pd_entry; - int i; + /* Caller needs to make sure the write completes if necessary */ + struct i915_hw_ppgtt *ppgtt = + container_of(pd, struct i915_hw_ppgtt, pd); + u32 pd_entry; - WARN_ON(ppgtt->pd.pd_offset & 0x3f); - pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm + - ppgtt->pd.pd_offset / sizeof(gen6_gtt_pte_t); - for (i = 0; i < ppgtt->num_pd_entries; i++) { - dma_addr_t pt_addr; + pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr); + pd_entry |= GEN6_PDE_VALID; - pt_addr = ppgtt->pd.page_table[i]->daddr; - pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); - pd_entry |= GEN6_PDE_VALID; + writel(pd_entry, ppgtt->pd_addr + pde); +} - writel(pd_entry, pd_addr + i); - } - readl(pd_addr); +/* Write all the page tables found in the ppgtt structure to incrementing page + * directories. */ +static void gen6_write_page_range(struct drm_i915_private *dev_priv, + struct i915_page_directory_entry *pd, + uint32_t start, uint32_t length) +{ + struct i915_page_table_entry *pt; + uint32_t pde, temp; + + gen6_for_each_pde(pt, pd, start, length, temp, pde) + gen6_write_pde(pd, pde, pt); + + /* Make sure write is complete before other code can use this page + * table. Also require for WC mapped PTEs */ + readl(dev_priv->gtt.gsm); } static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) @@ -1022,19 +1098,19 @@ static void gen6_ppgtt_clear_range(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - gen6_gtt_pte_t *pt_vaddr, scratch_pte; + gen6_pte_t *pt_vaddr, scratch_pte; unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; - unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; - unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; + unsigned act_pt = first_entry / GEN6_PTES; + unsigned first_pte = first_entry % GEN6_PTES; unsigned last_pte, i; scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0); while (num_entries) { last_pte = first_pte + num_entries; - if (last_pte > I915_PPGTT_PT_ENTRIES) - last_pte = I915_PPGTT_PT_ENTRIES; + if (last_pte > GEN6_PTES) + last_pte = GEN6_PTES; pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page); @@ -1056,10 +1132,10 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, { struct i915_hw_ppgtt *ppgtt = container_of(vm, struct i915_hw_ppgtt, base); - gen6_gtt_pte_t *pt_vaddr; + gen6_pte_t *pt_vaddr; unsigned first_entry = start >> PAGE_SHIFT; - unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES; - unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES; + unsigned act_pt = first_entry / GEN6_PTES; + unsigned act_pte = first_entry % GEN6_PTES; struct sg_page_iter sg_iter; pt_vaddr = NULL; @@ -1071,7 +1147,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, vm->pte_encode(sg_page_iter_dma_address(&sg_iter), cache_level, true, flags); - if (++act_pte == I915_PPGTT_PT_ENTRIES) { + if (++act_pte == GEN6_PTES) { kunmap_atomic(pt_vaddr); pt_vaddr = NULL; act_pt++; @@ -1082,23 +1158,133 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, kunmap_atomic(pt_vaddr); } -static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt) +/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we + * are switching between contexts with the same LRCA, we also must do a force + * restore. + */ +static inline void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) { + /* If current vm != vm, */ + ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask; +} + +static void gen6_initialize_pt(struct i915_address_space *vm, + struct i915_page_table_entry *pt) +{ + gen6_pte_t *pt_vaddr, scratch_pte; int i; - for (i = 0; i < ppgtt->num_pd_entries; i++) - pci_unmap_page(ppgtt->base.dev->pdev, - ppgtt->pd.page_table[i]->daddr, - 4096, PCI_DMA_BIDIRECTIONAL); + WARN_ON(vm->scratch.addr == 0); + + scratch_pte = vm->pte_encode(vm->scratch.addr, + I915_CACHE_LLC, true, 0); + + pt_vaddr = kmap_atomic(pt->page); + + for (i = 0; i < GEN6_PTES; i++) + pt_vaddr[i] = scratch_pte; + + kunmap_atomic(pt_vaddr); +} + +static int gen6_alloc_va_range(struct i915_address_space *vm, + uint64_t start, uint64_t length) +{ + DECLARE_BITMAP(new_page_tables, I915_PDES); + struct drm_device *dev = vm->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, base); + struct i915_page_table_entry *pt; + const uint32_t start_save = start, length_save = length; + uint32_t pde, temp; + int ret; + + WARN_ON(upper_32_bits(start)); + + bitmap_zero(new_page_tables, I915_PDES); + + /* The allocation is done in two stages so that we can bail out with + * minimal amount of pain. The first stage finds new page tables that + * need allocation. The second stage marks use ptes within the page + * tables. + */ + gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { + if (pt != ppgtt->scratch_pt) { + WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); + continue; + } + + /* We've already allocated a page table */ + WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES)); + + pt = alloc_pt_single(dev); + if (IS_ERR(pt)) { + ret = PTR_ERR(pt); + goto unwind_out; + } + + gen6_initialize_pt(vm, pt); + + ppgtt->pd.page_table[pde] = pt; + set_bit(pde, new_page_tables); + trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT); + } + + start = start_save; + length = length_save; + + gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { + DECLARE_BITMAP(tmp_bitmap, GEN6_PTES); + + bitmap_zero(tmp_bitmap, GEN6_PTES); + bitmap_set(tmp_bitmap, gen6_pte_index(start), + gen6_pte_count(start, length)); + + if (test_and_clear_bit(pde, new_page_tables)) + gen6_write_pde(&ppgtt->pd, pde, pt); + + trace_i915_page_table_entry_map(vm, pde, pt, + gen6_pte_index(start), + gen6_pte_count(start, length), + GEN6_PTES); + bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes, + GEN6_PTES); + } + + WARN_ON(!bitmap_empty(new_page_tables, I915_PDES)); + + /* Make sure write is complete before other code can use this page + * table. Also require for WC mapped PTEs */ + readl(dev_priv->gtt.gsm); + + mark_tlbs_dirty(ppgtt); + return 0; + +unwind_out: + for_each_set_bit(pde, new_page_tables, I915_PDES) { + struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde]; + + ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; + unmap_and_free_pt(pt, vm->dev); + } + + mark_tlbs_dirty(ppgtt); + return ret; } static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt) { int i; - for (i = 0; i < ppgtt->num_pd_entries; i++) - unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev); + for (i = 0; i < ppgtt->num_pd_entries; i++) { + struct i915_page_table_entry *pt = ppgtt->pd.page_table[i]; + if (pt != ppgtt->scratch_pt) + unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev); + } + + unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); unmap_and_free_pd(&ppgtt->pd); } @@ -1109,7 +1295,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) drm_mm_remove_node(&ppgtt->node); - gen6_ppgtt_unmap_pages(ppgtt); gen6_ppgtt_free(ppgtt); } @@ -1125,6 +1310,12 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) * size. We allocate at the top of the GTT to avoid fragmentation. */ BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm)); + ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev); + if (IS_ERR(ppgtt->scratch_pt)) + return PTR_ERR(ppgtt->scratch_pt); + + gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt); + alloc: ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm, &ppgtt->node, GEN6_PD_SIZE, @@ -1138,66 +1329,43 @@ alloc: 0, dev_priv->gtt.base.total, 0); if (ret) - return ret; + goto err_out; retried = true; goto alloc; } if (ret) - return ret; + goto err_out; + if (ppgtt->node.start < dev_priv->gtt.mappable_end) DRM_DEBUG("Forced to use aperture for PDEs\n"); - ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES; + ppgtt->num_pd_entries = I915_PDES; return 0; + +err_out: + unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev); + return ret; } static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) { - int ret; - - ret = gen6_ppgtt_allocate_page_directories(ppgtt); - if (ret) - return ret; - - ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries, - ppgtt->base.dev); - - if (ret) { - drm_mm_remove_node(&ppgtt->node); - return ret; - } - - return 0; + return gen6_ppgtt_allocate_page_directories(ppgtt); } -static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt) +static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, + uint64_t start, uint64_t length) { - struct drm_device *dev = ppgtt->base.dev; - int i; - - for (i = 0; i < ppgtt->num_pd_entries; i++) { - struct page *page; - dma_addr_t pt_addr; - - page = ppgtt->pd.page_table[i]->page; - pt_addr = pci_map_page(dev->pdev, page, 0, 4096, - PCI_DMA_BIDIRECTIONAL); - - if (pci_dma_mapping_error(dev->pdev, pt_addr)) { - gen6_ppgtt_unmap_pages(ppgtt); - return -EIO; - } + struct i915_page_table_entry *unused; + uint32_t pde, temp; - ppgtt->pd.page_table[i]->daddr = pt_addr; - } - - return 0; + gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) + ppgtt->pd.page_table[pde] = ppgtt->scratch_pt; } -static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) +static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing) { struct drm_device *dev = ppgtt->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -1220,36 +1388,50 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) if (ret) return ret; - ret = gen6_ppgtt_setup_page_tables(ppgtt); - if (ret) { - gen6_ppgtt_free(ppgtt); - return ret; + if (aliasing) { + /* preallocate all pts */ + ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries, + ppgtt->base.dev); + + if (ret) { + gen6_ppgtt_cleanup(&ppgtt->base); + return ret; + } } + ppgtt->base.allocate_va_range = gen6_alloc_va_range; ppgtt->base.clear_range = gen6_ppgtt_clear_range; ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; ppgtt->base.cleanup = gen6_ppgtt_cleanup; ppgtt->base.start = 0; - ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE; + ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE; ppgtt->debug_dump = gen6_dump_ppgtt; ppgtt->pd.pd_offset = - ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t); + ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); + + ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm + + ppgtt->pd.pd_offset / sizeof(gen6_pte_t); + + if (aliasing) + ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); + else + gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); - ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); + gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total); DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", ppgtt->node.size >> 20, ppgtt->node.start / PAGE_SIZE); - gen6_write_pdes(ppgtt); DRM_DEBUG("Adding PPGTT at offset %x\n", ppgtt->pd.pd_offset << 10); return 0; } -static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) +static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt, + bool aliasing) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -1257,7 +1439,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) ppgtt->base.scratch = dev_priv->gtt.base.scratch; if (INTEL_INFO(dev)->gen < 8) - return gen6_ppgtt_init(ppgtt); + return gen6_ppgtt_init(ppgtt, aliasing); else return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); } @@ -1266,7 +1448,7 @@ int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; - ret = __hw_ppgtt_init(dev, ppgtt); + ret = __hw_ppgtt_init(dev, ppgtt, false); if (ret == 0) { kref_init(&ppgtt->ref); drm_mm_init(&ppgtt->base.mm, ppgtt->base.start, @@ -1513,15 +1695,20 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) return; } - list_for_each_entry(vm, &dev_priv->vm_list, global_link) { - /* TODO: Perhaps it shouldn't be gen6 specific */ - if (i915_is_ggtt(vm)) { - if (dev_priv->mm.aliasing_ppgtt) - gen6_write_pdes(dev_priv->mm.aliasing_ppgtt); - continue; - } + if (USES_PPGTT(dev)) { + list_for_each_entry(vm, &dev_priv->vm_list, global_link) { + /* TODO: Perhaps it shouldn't be gen6 specific */ - gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base)); + struct i915_hw_ppgtt *ppgtt = + container_of(vm, struct i915_hw_ppgtt, + base); + + if (i915_is_ggtt(vm)) + ppgtt = dev_priv->mm.aliasing_ppgtt; + + gen6_write_page_range(dev_priv, &ppgtt->pd, + 0, ppgtt->base.total); + } } i915_ggtt_flush(dev_priv); @@ -1540,7 +1727,7 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) return 0; } -static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte) +static inline void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) { #ifdef writeq writeq(pte, addr); @@ -1557,8 +1744,8 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, { struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; - gen8_gtt_pte_t __iomem *gtt_entries = - (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; + gen8_pte_t __iomem *gtt_entries = + (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; int i = 0; struct sg_page_iter sg_iter; dma_addr_t addr = 0; /* shut up gcc */ @@ -1603,8 +1790,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, { struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; - gen6_gtt_pte_t __iomem *gtt_entries = - (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; + gen6_pte_t __iomem *gtt_entries = + (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; int i = 0; struct sg_page_iter sg_iter; dma_addr_t addr = 0; @@ -1642,8 +1829,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; - gen8_gtt_pte_t scratch_pte, __iomem *gtt_base = - (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; + gen8_pte_t scratch_pte, __iomem *gtt_base = + (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; int i; @@ -1668,8 +1855,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, struct drm_i915_private *dev_priv = vm->dev->dev_private; unsigned first_entry = start >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT; - gen6_gtt_pte_t scratch_pte, __iomem *gtt_base = - (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; + gen6_pte_t scratch_pte, __iomem *gtt_base = + (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; int i; @@ -1726,11 +1913,15 @@ static void ggtt_bind_vma(struct i915_vma *vma, struct drm_device *dev = vma->vm->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = vma->obj; + struct sg_table *pages = obj->pages; /* Currently applicable only to VLV */ if (obj->gt_ro) flags |= PTE_READ_ONLY; + if (i915_is_ggtt(vma->vm)) + pages = vma->ggtt_view.pages; + /* If there is no aliasing PPGTT, or the caller needs a global mapping, * or we have a global mapping already but the cacheability flags have * changed, set the global PTEs. @@ -1745,7 +1936,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { if (!(vma->bound & GLOBAL_BIND) || (cache_level != obj->cache_level)) { - vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages, + vma->vm->insert_entries(vma->vm, pages, vma->node.start, cache_level, flags); vma->bound |= GLOBAL_BIND; @@ -1756,8 +1947,7 @@ static void ggtt_bind_vma(struct i915_vma *vma, (!(vma->bound & LOCAL_BIND) || (cache_level != obj->cache_level))) { struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; - appgtt->base.insert_entries(&appgtt->base, - vma->ggtt_view.pages, + appgtt->base.insert_entries(&appgtt->base, pages, vma->node.start, cache_level, flags); vma->bound |= LOCAL_BIND; @@ -1893,9 +2083,11 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, if (!ppgtt) return -ENOMEM; - ret = __hw_ppgtt_init(dev, ppgtt); - if (ret != 0) + ret = __hw_ppgtt_init(dev, ppgtt, true); + if (ret) { + kfree(ppgtt); return ret; + } dev_priv->mm.aliasing_ppgtt = ppgtt; } @@ -2181,7 +2373,7 @@ static int gen8_gmch_probe(struct drm_device *dev, gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl); } - *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT; + *gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT; if (IS_CHERRYVIEW(dev)) chv_setup_private_ppat(dev_priv); @@ -2226,7 +2418,7 @@ static int gen6_gmch_probe(struct drm_device *dev, *stolen = gen6_get_stolen_size(snb_gmch_ctl); gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); - *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; + *gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT; ret = ggtt_probe_common(dev, gtt_size); @@ -2331,11 +2523,16 @@ int i915_gem_gtt_init(struct drm_device *dev) return 0; } -static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, - const struct i915_ggtt_view *view) +static struct i915_vma * +__i915_gem_vma_create(struct drm_i915_gem_object *obj, + struct i915_address_space *vm, + const struct i915_ggtt_view *ggtt_view) { - struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); + struct i915_vma *vma; + + if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view)) + return ERR_PTR(-EINVAL); + vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (vma == NULL) return ERR_PTR(-ENOMEM); @@ -2344,10 +2541,11 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, INIT_LIST_HEAD(&vma->exec_list); vma->vm = vm; vma->obj = obj; - vma->ggtt_view = *view; if (INTEL_INFO(vm->dev)->gen >= 6) { if (i915_is_ggtt(vm)) { + vma->ggtt_view = *ggtt_view; + vma->unbind_vma = ggtt_unbind_vma; vma->bind_vma = ggtt_bind_vma; } else { @@ -2356,6 +2554,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, } } else { BUG_ON(!i915_is_ggtt(vm)); + vma->ggtt_view = *ggtt_view; vma->unbind_vma = i915_ggtt_unbind_vma; vma->bind_vma = i915_ggtt_bind_vma; } @@ -2368,38 +2567,170 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj, } struct i915_vma * -i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj, - struct i915_address_space *vm, +i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj, + struct i915_address_space *vm) +{ + struct i915_vma *vma; + + vma = i915_gem_obj_to_vma(obj, vm); + if (!vma) + vma = __i915_gem_vma_create(obj, vm, + i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL); + + return vma; +} + +struct i915_vma * +i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view) { + struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); struct i915_vma *vma; - vma = i915_gem_obj_to_vma_view(obj, vm, view); + if (WARN_ON(!view)) + return ERR_PTR(-EINVAL); + + vma = i915_gem_obj_to_ggtt_view(obj, view); + + if (IS_ERR(vma)) + return vma; + if (!vma) - vma = __i915_gem_vma_create(obj, vm, view); + vma = __i915_gem_vma_create(obj, ggtt, view); return vma; + +} + +static void +rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height, + struct sg_table *st) +{ + unsigned int column, row; + unsigned int src_idx; + struct scatterlist *sg = st->sgl; + + st->nents = 0; + + for (column = 0; column < width; column++) { + src_idx = width * (height - 1) + column; + for (row = 0; row < height; row++) { + st->nents++; + /* We don't need the pages, but need to initialize + * the entries so the sg list can be happily traversed. + * The only thing we need are DMA addresses. + */ + sg_set_page(sg, NULL, PAGE_SIZE, 0); + sg_dma_address(sg) = in[src_idx]; + sg_dma_len(sg) = PAGE_SIZE; + sg = sg_next(sg); + src_idx -= width; + } + } } -static inline -int i915_get_vma_pages(struct i915_vma *vma) +static struct sg_table * +intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view, + struct drm_i915_gem_object *obj) { + struct drm_device *dev = obj->base.dev; + struct intel_rotation_info *rot_info = &ggtt_view->rotation_info; + unsigned long size, pages, rot_pages; + struct sg_page_iter sg_iter; + unsigned long i; + dma_addr_t *page_addr_list; + struct sg_table *st; + unsigned int tile_pitch, tile_height; + unsigned int width_pages, height_pages; + int ret = -ENOMEM; + + pages = obj->base.size / PAGE_SIZE; + + /* Calculate tiling geometry. */ + tile_height = intel_tile_height(dev, rot_info->pixel_format, + rot_info->fb_modifier); + tile_pitch = PAGE_SIZE / tile_height; + width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch); + height_pages = DIV_ROUND_UP(rot_info->height, tile_height); + rot_pages = width_pages * height_pages; + size = rot_pages * PAGE_SIZE; + + /* Allocate a temporary list of source pages for random access. */ + page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t)); + if (!page_addr_list) + return ERR_PTR(ret); + + /* Allocate target SG list. */ + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + goto err_st_alloc; + + ret = sg_alloc_table(st, rot_pages, GFP_KERNEL); + if (ret) + goto err_sg_alloc; + + /* Populate source page list from the object. */ + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { + page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); + i++; + } + + /* Rotate the pages. */ + rotate_pages(page_addr_list, width_pages, height_pages, st); + + DRM_DEBUG_KMS( + "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n", + size, rot_info->pitch, rot_info->height, + rot_info->pixel_format, width_pages, height_pages, + rot_pages); + + drm_free_large(page_addr_list); + + return st; + +err_sg_alloc: + kfree(st); +err_st_alloc: + drm_free_large(page_addr_list); + + DRM_DEBUG_KMS( + "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n", + size, ret, rot_info->pitch, rot_info->height, + rot_info->pixel_format, width_pages, height_pages, + rot_pages); + return ERR_PTR(ret); +} + +static inline int +i915_get_ggtt_vma_pages(struct i915_vma *vma) +{ + int ret = 0; + if (vma->ggtt_view.pages) return 0; if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) vma->ggtt_view.pages = vma->obj->pages; + else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED) + vma->ggtt_view.pages = + intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj); else WARN_ONCE(1, "GGTT view %u not implemented!\n", vma->ggtt_view.type); if (!vma->ggtt_view.pages) { - DRM_ERROR("Failed to get pages for VMA view type %u!\n", + DRM_ERROR("Failed to get pages for GGTT view type %u!\n", vma->ggtt_view.type); - return -EINVAL; + ret = -EINVAL; + } else if (IS_ERR(vma->ggtt_view.pages)) { + ret = PTR_ERR(vma->ggtt_view.pages); + vma->ggtt_view.pages = NULL; + DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", + vma->ggtt_view.type, ret); } - return 0; + return ret; } /** @@ -2415,10 +2746,12 @@ int i915_get_vma_pages(struct i915_vma *vma) int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - int ret = i915_get_vma_pages(vma); + if (i915_is_ggtt(vma->vm)) { + int ret = i915_get_ggtt_vma_pages(vma); - if (ret) - return ret; + if (ret) + return ret; + } vma->bind_vma(vma, cache_level, flags); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index c9e93f5070bc..fc03c99317c9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -36,13 +36,13 @@ struct drm_i915_file_private; -typedef uint32_t gen6_gtt_pte_t; -typedef uint64_t gen8_gtt_pte_t; -typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; +typedef uint32_t gen6_pte_t; +typedef uint64_t gen8_pte_t; +typedef uint64_t gen8_pde_t; #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) -#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t)) + /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) @@ -51,9 +51,16 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; #define GEN6_PTE_UNCACHED (1 << 1) #define GEN6_PTE_VALID (1 << 0) -#define GEN6_PPGTT_PD_ENTRIES 512 -#define GEN6_PD_SIZE (GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE) +#define I915_PTES(pte_len) (PAGE_SIZE / (pte_len)) +#define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1) +#define I915_PDES 512 +#define I915_PDE_MASK (I915_PDES - 1) +#define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT)) + +#define GEN6_PTES I915_PTES(sizeof(gen6_pte_t)) +#define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE) #define GEN6_PD_ALIGN (PAGE_SIZE * 16) +#define GEN6_PDE_SHIFT 22 #define GEN6_PDE_VALID (1 << 0) #define GEN7_PTE_CACHE_L3_LLC (3 << 1) @@ -89,8 +96,7 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; #define GEN8_PTE_SHIFT 12 #define GEN8_PTE_MASK 0x1ff #define GEN8_LEGACY_PDPES 4 -#define GEN8_PTES_PER_PAGE (PAGE_SIZE / sizeof(gen8_gtt_pte_t)) -#define GEN8_PDES_PER_PAGE (PAGE_SIZE / sizeof(gen8_ppgtt_pde_t)) +#define GEN8_PTES I915_PTES(sizeof(gen8_pte_t)) #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD) #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */ @@ -111,15 +117,28 @@ typedef gen8_gtt_pte_t gen8_ppgtt_pde_t; enum i915_ggtt_view_type { I915_GGTT_VIEW_NORMAL = 0, + I915_GGTT_VIEW_ROTATED +}; + +struct intel_rotation_info { + unsigned int height; + unsigned int pitch; + uint32_t pixel_format; + uint64_t fb_modifier; }; struct i915_ggtt_view { enum i915_ggtt_view_type type; struct sg_table *pages; + + union { + struct intel_rotation_info rotation_info; + }; }; extern const struct i915_ggtt_view i915_ggtt_view_normal; +extern const struct i915_ggtt_view i915_ggtt_view_rotated; enum i915_cache_level; @@ -190,6 +209,8 @@ struct i915_vma { struct i915_page_table_entry { struct page *page; dma_addr_t daddr; + + unsigned long *used_ptes; }; struct i915_page_directory_entry { @@ -199,7 +220,7 @@ struct i915_page_directory_entry { dma_addr_t daddr; }; - struct i915_page_table_entry *page_table[GEN6_PPGTT_PD_ENTRIES]; /* PDEs */ + struct i915_page_table_entry *page_table[I915_PDES]; /* PDEs */ }; struct i915_page_directory_pointer_entry { @@ -243,9 +264,12 @@ struct i915_address_space { struct list_head inactive_list; /* FIXME: Need a more generic return type */ - gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, - enum i915_cache_level level, - bool valid, u32 flags); /* Create a valid PTE */ + gen6_pte_t (*pte_encode)(dma_addr_t addr, + enum i915_cache_level level, + bool valid, u32 flags); /* Create a valid PTE */ + int (*allocate_va_range)(struct i915_address_space *vm, + uint64_t start, + uint64_t length); void (*clear_range)(struct i915_address_space *vm, uint64_t start, uint64_t length, @@ -289,6 +313,7 @@ struct i915_hw_ppgtt { struct i915_address_space base; struct kref ref; struct drm_mm_node node; + unsigned long pd_dirty_rings; unsigned num_pd_entries; unsigned num_pd_pages; /* gen8+ */ union { @@ -296,14 +321,82 @@ struct i915_hw_ppgtt { struct i915_page_directory_entry pd; }; + struct i915_page_table_entry *scratch_pt; + struct drm_i915_file_private *file_priv; + gen6_pte_t __iomem *pd_addr; + int (*enable)(struct i915_hw_ppgtt *ppgtt); int (*switch_mm)(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *ring); void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); }; +/* For each pde iterates over every pde between from start until start + length. + * If start, and start+length are not perfectly divisible, the macro will round + * down, and up as needed. The macro modifies pde, start, and length. Dev is + * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0, + * and length = 2G effectively iterates over every PDE in the system. + * + * XXX: temp is not actually needed, but it saves doing the ALIGN operation. + */ +#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ + for (iter = gen6_pde_index(start); \ + pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \ + iter++, \ + temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \ + temp = min_t(unsigned, temp, length), \ + start += temp, length -= temp) + +static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift) +{ + const uint32_t mask = NUM_PTE(pde_shift) - 1; + + return (address >> PAGE_SHIFT) & mask; +} + +/* Helper to counts the number of PTEs within the given length. This count + * does not cross a page table boundary, so the max value would be + * GEN6_PTES for GEN6, and GEN8_PTES for GEN8. +*/ +static inline uint32_t i915_pte_count(uint64_t addr, size_t length, + uint32_t pde_shift) +{ + const uint64_t mask = ~((1 << pde_shift) - 1); + uint64_t end; + + WARN_ON(length == 0); + WARN_ON(offset_in_page(addr|length)); + + end = addr + length; + + if ((addr & mask) != (end & mask)) + return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift); + + return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift); +} + +static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift) +{ + return (addr >> shift) & I915_PDE_MASK; +} + +static inline uint32_t gen6_pte_index(uint32_t addr) +{ + return i915_pte_index(addr, GEN6_PDE_SHIFT); +} + +static inline size_t gen6_pte_count(uint32_t addr, uint32_t length) +{ + return i915_pte_count(addr, length, GEN6_PDE_SHIFT); +} + +static inline uint32_t gen6_pde_index(uint32_t addr) +{ + return i915_pde_index(addr, GEN6_PDE_SHIFT); +} + int i915_gem_gtt_init(struct drm_device *dev); void i915_gem_init_global_gtt(struct drm_device *dev); void i915_global_gtt_cleanup(struct drm_device *dev); @@ -332,4 +425,14 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev); int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); +static inline bool +i915_ggtt_view_equal(const struct i915_ggtt_view *a, + const struct i915_ggtt_view *b) +{ + if (WARN_ON(!a || !b)) + return false; + + return a->type == b->type; +} + #endif diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c new file mode 100644 index 000000000000..f7929e769250 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -0,0 +1,335 @@ +/* + * Copyright © 2008-2015 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + */ + +#include <linux/oom.h> +#include <linux/shmem_fs.h> +#include <linux/slab.h> +#include <linux/swap.h> +#include <linux/pci.h> +#include <linux/dma-buf.h> +#include <drm/drmP.h> +#include <drm/i915_drm.h> + +#include "i915_drv.h" +#include "i915_trace.h" + +static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) +{ + if (!mutex_is_locked(mutex)) + return false; + +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) + return mutex->owner == task; +#else + /* Since UP may be pre-empted, we cannot assume that we own the lock */ + return false; +#endif +} + +/** + * i915_gem_shrink - Shrink buffer object caches + * @dev_priv: i915 device + * @target: amount of memory to make available, in pages + * @flags: control flags for selecting cache types + * + * This function is the main interface to the shrinker. It will try to release + * up to @target pages of main memory backing storage from buffer objects. + * Selection of the specific caches can be done with @flags. This is e.g. useful + * when purgeable objects should be removed from caches preferentially. + * + * Note that it's not guaranteed that released amount is actually available as + * free system memory - the pages might still be in-used to due to other reasons + * (like cpu mmaps) or the mm core has reused them before we could grab them. + * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to + * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all(). + * + * Also note that any kind of pinning (both per-vma address space pins and + * backing storage pins at the buffer object level) result in the shrinker code + * having to skip the object. + * + * Returns: + * The number of pages of backing storage actually released. + */ +unsigned long +i915_gem_shrink(struct drm_i915_private *dev_priv, + long target, unsigned flags) +{ + const struct { + struct list_head *list; + unsigned int bit; + } phases[] = { + { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND }, + { &dev_priv->mm.bound_list, I915_SHRINK_BOUND }, + { NULL, 0 }, + }, *phase; + unsigned long count = 0; + + /* + * As we may completely rewrite the (un)bound list whilst unbinding + * (due to retiring requests) we have to strictly process only + * one element of the list at the time, and recheck the list + * on every iteration. + * + * In particular, we must hold a reference whilst removing the + * object as we may end up waiting for and/or retiring the objects. + * This might release the final reference (held by the active list) + * and result in the object being freed from under us. This is + * similar to the precautions the eviction code must take whilst + * removing objects. + * + * Also note that although these lists do not hold a reference to + * the object we can safely grab one here: The final object + * unreferencing and the bound_list are both protected by the + * dev->struct_mutex and so we won't ever be able to observe an + * object on the bound_list with a reference count equals 0. + */ + for (phase = phases; phase->list; phase++) { + struct list_head still_in_list; + + if ((flags & phase->bit) == 0) + continue; + + INIT_LIST_HEAD(&still_in_list); + while (count < target && !list_empty(phase->list)) { + struct drm_i915_gem_object *obj; + struct i915_vma *vma, *v; + + obj = list_first_entry(phase->list, + typeof(*obj), global_list); + list_move_tail(&obj->global_list, &still_in_list); + + if (flags & I915_SHRINK_PURGEABLE && + obj->madv != I915_MADV_DONTNEED) + continue; + + drm_gem_object_reference(&obj->base); + + /* For the unbound phase, this should be a no-op! */ + list_for_each_entry_safe(vma, v, + &obj->vma_list, vma_link) + if (i915_vma_unbind(vma)) + break; + + if (i915_gem_object_put_pages(obj) == 0) + count += obj->base.size >> PAGE_SHIFT; + + drm_gem_object_unreference(&obj->base); + } + list_splice(&still_in_list, phase->list); + } + + return count; +} + +/** + * i915_gem_shrink - Shrink buffer object caches completely + * @dev_priv: i915 device + * + * This is a simple wraper around i915_gem_shrink() to aggressively shrink all + * caches completely. It also first waits for and retires all outstanding + * requests to also be able to release backing storage for active objects. + * + * This should only be used in code to intentionally quiescent the gpu or as a + * last-ditch effort when memory seems to have run out. + * + * Returns: + * The number of pages of backing storage actually released. + */ +unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) +{ + i915_gem_evict_everything(dev_priv->dev); + return i915_gem_shrink(dev_priv, LONG_MAX, + I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); +} + +static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) +{ + if (!mutex_trylock(&dev->struct_mutex)) { + if (!mutex_is_locked_by(&dev->struct_mutex, current)) + return false; + + if (to_i915(dev)->mm.shrinker_no_lock_stealing) + return false; + + *unlock = false; + } else + *unlock = true; + + return true; +} + +static int num_vma_bound(struct drm_i915_gem_object *obj) +{ + struct i915_vma *vma; + int count = 0; + + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (drm_mm_node_allocated(&vma->node)) + count++; + + return count; +} + +static unsigned long +i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) +{ + struct drm_i915_private *dev_priv = + container_of(shrinker, struct drm_i915_private, mm.shrinker); + struct drm_device *dev = dev_priv->dev; + struct drm_i915_gem_object *obj; + unsigned long count; + bool unlock; + + if (!i915_gem_shrinker_lock(dev, &unlock)) + return 0; + + count = 0; + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) + if (obj->pages_pin_count == 0) + count += obj->base.size >> PAGE_SHIFT; + + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + if (!i915_gem_obj_is_pinned(obj) && + obj->pages_pin_count == num_vma_bound(obj)) + count += obj->base.size >> PAGE_SHIFT; + } + + if (unlock) + mutex_unlock(&dev->struct_mutex); + + return count; +} + +static unsigned long +i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) +{ + struct drm_i915_private *dev_priv = + container_of(shrinker, struct drm_i915_private, mm.shrinker); + struct drm_device *dev = dev_priv->dev; + unsigned long freed; + bool unlock; + + if (!i915_gem_shrinker_lock(dev, &unlock)) + return SHRINK_STOP; + + freed = i915_gem_shrink(dev_priv, + sc->nr_to_scan, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND | + I915_SHRINK_PURGEABLE); + if (freed < sc->nr_to_scan) + freed += i915_gem_shrink(dev_priv, + sc->nr_to_scan - freed, + I915_SHRINK_BOUND | + I915_SHRINK_UNBOUND); + if (unlock) + mutex_unlock(&dev->struct_mutex); + + return freed; +} + +static int +i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) +{ + struct drm_i915_private *dev_priv = + container_of(nb, struct drm_i915_private, mm.oom_notifier); + struct drm_device *dev = dev_priv->dev; + struct drm_i915_gem_object *obj; + unsigned long timeout = msecs_to_jiffies(5000) + 1; + unsigned long pinned, bound, unbound, freed_pages; + bool was_interruptible; + bool unlock; + + while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) { + schedule_timeout_killable(1); + if (fatal_signal_pending(current)) + return NOTIFY_DONE; + } + if (timeout == 0) { + pr_err("Unable to purge GPU memory due lock contention.\n"); + return NOTIFY_DONE; + } + + was_interruptible = dev_priv->mm.interruptible; + dev_priv->mm.interruptible = false; + + freed_pages = i915_gem_shrink_all(dev_priv); + + dev_priv->mm.interruptible = was_interruptible; + + /* Because we may be allocating inside our own driver, we cannot + * assert that there are no objects with pinned pages that are not + * being pointed to by hardware. + */ + unbound = bound = pinned = 0; + list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { + if (!obj->base.filp) /* not backed by a freeable object */ + continue; + + if (obj->pages_pin_count) + pinned += obj->base.size; + else + unbound += obj->base.size; + } + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + if (!obj->base.filp) + continue; + + if (obj->pages_pin_count) + pinned += obj->base.size; + else + bound += obj->base.size; + } + + if (unlock) + mutex_unlock(&dev->struct_mutex); + + if (freed_pages || unbound || bound) + pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", + freed_pages << PAGE_SHIFT, pinned); + if (unbound || bound) + pr_err("%lu and %lu bytes still available in the " + "bound and unbound GPU page lists.\n", + bound, unbound); + + *(unsigned long *)ptr += freed_pages; + return NOTIFY_DONE; +} + +/** + * i915_gem_shrinker_init - Initialize i915 shrinker + * @dev_priv: i915 device + * + * This function registers and sets up the i915 shrinker and OOM handler. + */ +void i915_gem_shrinker_init(struct drm_i915_private *dev_priv) +{ + dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan; + dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count; + dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS; + register_shrinker(&dev_priv->mm.shrinker); + + dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom; + register_oom_notifier(&dev_priv->mm.oom_notifier); +} diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index a982849a5edd..1d4e60df8883 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -386,6 +386,11 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m, if (INTEL_INFO(dev)->gen >= 6) { err_printf(m, "ERROR: 0x%08x\n", error->error); + + if (INTEL_INFO(dev)->gen >= 8) + err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n", + error->fault_data1, error->fault_data0); + err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); } @@ -555,7 +560,14 @@ static void i915_error_state_free(struct kref *error_ref) } i915_error_object_free(error->semaphore_obj); + + for (i = 0; i < error->vm_count; i++) + kfree(error->active_bo[i]); + kfree(error->active_bo); + kfree(error->active_bo_count); + kfree(error->pinned_bo); + kfree(error->pinned_bo_count); kfree(error->overlay); kfree(error->display); kfree(error); @@ -1164,6 +1176,11 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv, if (IS_GEN7(dev)) error->err_int = I915_READ(GEN7_ERR_INT); + if (INTEL_INFO(dev)->gen >= 8) { + error->fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0); + error->fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1); + } + if (IS_GEN6(dev)) { error->forcewake = I915_READ(FORCEWAKE); error->gab_ctl = I915_READ(GAB_CTL); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 49ad5fb82ace..14ecb4d13a1a 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -277,6 +277,7 @@ void gen6_reset_rps_interrupts(struct drm_device *dev) I915_WRITE(reg, dev_priv->pm_rps_events); I915_WRITE(reg, dev_priv->pm_rps_events); POSTING_READ(reg); + dev_priv->rps.pm_iir = 0; spin_unlock_irq(&dev_priv->irq_lock); } @@ -330,12 +331,10 @@ void gen6_disable_rps_interrupts(struct drm_device *dev) __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & ~dev_priv->pm_rps_events); - I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); - I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); - - dev_priv->rps.pm_iir = 0; spin_unlock_irq(&dev_priv->irq_lock); + + synchronize_irq(dev->irq); } /** @@ -997,129 +996,73 @@ static void notify_ring(struct drm_device *dev, wake_up_all(&ring->irq_queue); } -static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, - struct intel_rps_ei *rps_ei) +static void vlv_c0_read(struct drm_i915_private *dev_priv, + struct intel_rps_ei *ei) { - u32 cz_ts, cz_freq_khz; - u32 render_count, media_count; - u32 elapsed_render, elapsed_media, elapsed_time; - u32 residency = 0; - - cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); - cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); - - render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); - media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); - - if (rps_ei->cz_clock == 0) { - rps_ei->cz_clock = cz_ts; - rps_ei->render_c0 = render_count; - rps_ei->media_c0 = media_count; - - return dev_priv->rps.cur_freq; - } - - elapsed_time = cz_ts - rps_ei->cz_clock; - rps_ei->cz_clock = cz_ts; + ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); + ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); + ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); +} - elapsed_render = render_count - rps_ei->render_c0; - rps_ei->render_c0 = render_count; +static bool vlv_c0_above(struct drm_i915_private *dev_priv, + const struct intel_rps_ei *old, + const struct intel_rps_ei *now, + int threshold) +{ + u64 time, c0; - elapsed_media = media_count - rps_ei->media_c0; - rps_ei->media_c0 = media_count; + if (old->cz_clock == 0) + return false; - /* Convert all the counters into common unit of milli sec */ - elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; - elapsed_render /= cz_freq_khz; - elapsed_media /= cz_freq_khz; + time = now->cz_clock - old->cz_clock; + time *= threshold * dev_priv->mem_freq; - /* - * Calculate overall C0 residency percentage - * only if elapsed time is non zero + /* Workload can be split between render + media, e.g. SwapBuffers + * being blitted in X after being rendered in mesa. To account for + * this we need to combine both engines into our activity counter. */ - if (elapsed_time) { - residency = - ((max(elapsed_render, elapsed_media) * 100) - / elapsed_time); - } + c0 = now->render_c0 - old->render_c0; + c0 += now->media_c0 - old->media_c0; + c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000; - return residency; + return c0 >= time; } -/** - * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU - * busy-ness calculated from C0 counters of render & media power wells - * @dev_priv: DRM device private - * - */ -static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) +void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) { - u32 residency_C0_up = 0, residency_C0_down = 0; - int new_delay, adj; - - dev_priv->rps.ei_interrupt_count++; - - WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - - - if (dev_priv->rps.up_ei.cz_clock == 0) { - vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); - vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); - return dev_priv->rps.cur_freq; - } + vlv_c0_read(dev_priv, &dev_priv->rps.down_ei); + dev_priv->rps.up_ei = dev_priv->rps.down_ei; +} +static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) +{ + struct intel_rps_ei now; + u32 events = 0; - /* - * To down throttle, C0 residency should be less than down threshold - * for continous EI intervals. So calculate down EI counters - * once in VLV_INT_COUNT_FOR_DOWN_EI - */ - if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { + if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0) + return 0; - dev_priv->rps.ei_interrupt_count = 0; + vlv_c0_read(dev_priv, &now); + if (now.cz_clock == 0) + return 0; - residency_C0_down = vlv_c0_residency(dev_priv, - &dev_priv->rps.down_ei); - } else { - residency_C0_up = vlv_c0_residency(dev_priv, - &dev_priv->rps.up_ei); + if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) { + if (!vlv_c0_above(dev_priv, + &dev_priv->rps.down_ei, &now, + VLV_RP_DOWN_EI_THRESHOLD)) + events |= GEN6_PM_RP_DOWN_THRESHOLD; + dev_priv->rps.down_ei = now; } - new_delay = dev_priv->rps.cur_freq; - - adj = dev_priv->rps.last_adj; - /* C0 residency is greater than UP threshold. Increase Frequency */ - if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { - if (adj > 0) - adj *= 2; - else - adj = 1; - - if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) - new_delay = dev_priv->rps.cur_freq + adj; - - /* - * For better performance, jump directly - * to RPe if we're below it. - */ - if (new_delay < dev_priv->rps.efficient_freq) - new_delay = dev_priv->rps.efficient_freq; - - } else if (!dev_priv->rps.ei_interrupt_count && - (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { - if (adj < 0) - adj *= 2; - else - adj = -1; - /* - * This means, C0 residency is less than down threshold over - * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq - */ - if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) - new_delay = dev_priv->rps.cur_freq + adj; + if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { + if (vlv_c0_above(dev_priv, + &dev_priv->rps.up_ei, &now, + VLV_RP_UP_EI_THRESHOLD)) + events |= GEN6_PM_RP_UP_THRESHOLD; + dev_priv->rps.up_ei = now; } - return new_delay; + return events; } static void gen6_pm_rps_work(struct work_struct *work) @@ -1149,6 +1092,8 @@ static void gen6_pm_rps_work(struct work_struct *work) mutex_lock(&dev_priv->rps.hw_lock); + pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); + adj = dev_priv->rps.last_adj; if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { if (adj > 0) @@ -1171,8 +1116,6 @@ static void gen6_pm_rps_work(struct work_struct *work) else new_delay = dev_priv->rps.min_freq_softlimit; adj = 0; - } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { - new_delay = vlv_calc_delay_from_C0_counters(dev_priv); } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { if (adj < 0) adj *= 2; @@ -4299,7 +4242,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) /* Let's track the enabled rps events */ if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) /* WaGsvRC0ResidencyMethod:vlv */ - dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; + dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED; else dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index e2d20ffe6586..bb64415a1c3e 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -27,7 +27,6 @@ struct i915_params i915 __read_mostly = { .modeset = -1, .panel_ignore_lid = 1, - .powersave = 1, .semaphores = -1, .lvds_downclock = 0, .lvds_channel_mode = 0, @@ -44,6 +43,7 @@ struct i915_params i915 __read_mostly = { .enable_ips = 1, .fastboot = 0, .prefault_disable = 0, + .load_detect_test = 0, .reset = true, .invert_brightness = 0, .disable_display = 0, @@ -65,10 +65,6 @@ MODULE_PARM_DESC(panel_ignore_lid, "Override lid status (0=autodetect, 1=autodetect disabled [default], " "-1=force lid closed, -2=force lid open)"); -module_param_named(powersave, i915.powersave, int, 0600); -MODULE_PARM_DESC(powersave, - "Enable powersavings, fbc, downclocking, etc. (default: true)"); - module_param_named_unsafe(semaphores, i915.semaphores, int, 0400); MODULE_PARM_DESC(semaphores, "Use semaphores for inter-ring sync " @@ -144,11 +140,16 @@ module_param_named(fastboot, i915.fastboot, bool, 0600); MODULE_PARM_DESC(fastboot, "Try to skip unnecessary mode sets at boot time (default: false)"); -module_param_named(prefault_disable, i915.prefault_disable, bool, 0600); +module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600); MODULE_PARM_DESC(prefault_disable, "Disable page prefaulting for pread/pwrite/reloc (default:false). " "For developers only."); +module_param_named_unsafe(load_detect_test, i915.load_detect_test, bool, 0600); +MODULE_PARM_DESC(load_detect_test, + "Force-enable the VGA load detect code for testing (default:false). " + "For developers only."); + module_param_named(invert_brightness, i915.invert_brightness, int, 0600); MODULE_PARM_DESC(invert_brightness, "Invert backlight brightness " diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cc8ebabc488d..b522eb6e59a4 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -673,7 +673,6 @@ enum skl_disp_power_wells { #define VLV_CZ_CLOCK_TO_MILLI_SEC 100000 #define VLV_RP_UP_EI_THRESHOLD 90 #define VLV_RP_DOWN_EI_THRESHOLD 70 -#define VLV_INT_COUNT_FOR_DOWN_EI 5 /* vlv2 north clock has */ #define CCK_FUSE_REG 0x8 @@ -1307,6 +1306,9 @@ enum skl_disp_power_wells { #define ERR_INT_FIFO_UNDERRUN_A (1<<0) #define ERR_INT_FIFO_UNDERRUN(pipe) (1<<(pipe*3)) +#define GEN8_FAULT_TLB_DATA0 0x04b10 +#define GEN8_FAULT_TLB_DATA1 0x04b14 + #define FPGA_DBG 0x42300 #define FPGA_DBG_RM_NOCLAIM (1<<31) @@ -6220,8 +6222,8 @@ enum skl_disp_power_wells { #define GEN6_GT_GFX_RC6p 0x13810C #define GEN6_GT_GFX_RC6pp 0x138110 -#define VLV_RENDER_C0_COUNT_REG 0x138118 -#define VLV_MEDIA_C0_COUNT_REG 0x13811C +#define VLV_RENDER_C0_COUNT 0x138118 +#define VLV_MEDIA_C0_COUNT 0x13811C #define GEN6_PCODE_MAILBOX 0x138124 #define GEN6_PCODE_READY (1<<31) diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index f004d3d89b87..b3070a4501ab 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -156,6 +156,105 @@ TRACE_EVENT(i915_vma_unbind, __entry->obj, __entry->offset, __entry->size, __entry->vm) ); +#define VM_TO_TRACE_NAME(vm) \ + (i915_is_ggtt(vm) ? "G" : \ + "P") + +DECLARE_EVENT_CLASS(i915_va, + TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name), + TP_ARGS(vm, start, length, name), + + TP_STRUCT__entry( + __field(struct i915_address_space *, vm) + __field(u64, start) + __field(u64, end) + __string(name, name) + ), + + TP_fast_assign( + __entry->vm = vm; + __entry->start = start; + __entry->end = start + length - 1; + __assign_str(name, name); + ), + + TP_printk("vm=%p (%s), 0x%llx-0x%llx", + __entry->vm, __get_str(name), __entry->start, __entry->end) +); + +DEFINE_EVENT(i915_va, i915_va_alloc, + TP_PROTO(struct i915_address_space *vm, u64 start, u64 length, const char *name), + TP_ARGS(vm, start, length, name) +); + +DECLARE_EVENT_CLASS(i915_page_table_entry, + TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift), + TP_ARGS(vm, pde, start, pde_shift), + + TP_STRUCT__entry( + __field(struct i915_address_space *, vm) + __field(u32, pde) + __field(u64, start) + __field(u64, end) + ), + + TP_fast_assign( + __entry->vm = vm; + __entry->pde = pde; + __entry->start = start; + __entry->end = ((start + (1ULL << pde_shift)) & ~((1ULL << pde_shift)-1)) - 1; + ), + + TP_printk("vm=%p, pde=%d (0x%llx-0x%llx)", + __entry->vm, __entry->pde, __entry->start, __entry->end) +); + +DEFINE_EVENT(i915_page_table_entry, i915_page_table_entry_alloc, + TP_PROTO(struct i915_address_space *vm, u32 pde, u64 start, u64 pde_shift), + TP_ARGS(vm, pde, start, pde_shift) +); + +/* Avoid extra math because we only support two sizes. The format is defined by + * bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */ +#define TRACE_PT_SIZE(bits) \ + ((((bits) == 1024) ? 288 : 144) + 1) + +DECLARE_EVENT_CLASS(i915_page_table_entry_update, + TP_PROTO(struct i915_address_space *vm, u32 pde, + struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits), + TP_ARGS(vm, pde, pt, first, count, bits), + + TP_STRUCT__entry( + __field(struct i915_address_space *, vm) + __field(u32, pde) + __field(u32, first) + __field(u32, last) + __dynamic_array(char, cur_ptes, TRACE_PT_SIZE(bits)) + ), + + TP_fast_assign( + __entry->vm = vm; + __entry->pde = pde; + __entry->first = first; + __entry->last = first + count - 1; + scnprintf(__get_str(cur_ptes), + TRACE_PT_SIZE(bits), + "%*pb", + bits, + pt->used_ptes); + ), + + TP_printk("vm=%p, pde=%d, updating %u:%u\t%s", + __entry->vm, __entry->pde, __entry->last, __entry->first, + __get_str(cur_ptes)) +); + +DEFINE_EVENT(i915_page_table_entry_update, i915_page_table_entry_map, + TP_PROTO(struct i915_address_space *vm, u32 pde, + struct i915_page_table_entry *pt, u32 first, u32 count, u32 bits), + TP_ARGS(vm, pde, pt, first, count, bits) +); + TRACE_EVENT(i915_gem_object_change_domain, TP_PROTO(struct drm_i915_gem_object *obj, u32 old_read, u32 old_write), TP_ARGS(obj, old_read, old_write), diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index e66e17af0a56..6095a998bdac 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -690,7 +690,7 @@ intel_crt_detect(struct drm_connector *connector, bool force) * broken monitor (without edid) to work behind a broken kvm (that fails * to have the right resistors for HP detection) needs to fix this up. * For now just bail out. */ - if (I915_HAS_HOTPLUG(dev)) { + if (I915_HAS_HOTPLUG(dev) && !i915.load_detect_test) { status = connector_status_disconnected; goto out; } @@ -706,9 +706,11 @@ intel_crt_detect(struct drm_connector *connector, bool force) if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; - else + else if (INTEL_INFO(dev)->gen < 4) status = intel_crt_load_detect(crt); - intel_release_load_detect_pipe(connector, &tmp); + else + status = connector_status_unknown; + intel_release_load_detect_pipe(connector, &tmp, &ctx); } else status = connector_status_unknown; @@ -794,6 +796,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = { .destroy = intel_crt_destroy, .set_property = intel_crt_set_property, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, .atomic_get_property = intel_connector_atomic_get_property, }; diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 8aee7d77ce9d..47b9307da24b 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -492,17 +492,23 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc) } static struct intel_encoder * -intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc) +intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; - struct intel_encoder *intel_encoder, *ret = NULL; + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_encoder *ret = NULL; + struct drm_atomic_state *state; int num_encoders = 0; + int i; - for_each_intel_encoder(dev, intel_encoder) { - if (intel_encoder->new_crtc == crtc) { - ret = intel_encoder; - num_encoders++; - } + state = crtc_state->base.state; + + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i] || + state->connector_states[i]->crtc != crtc_state->base.crtc) + continue; + + ret = to_intel_encoder(state->connector_states[i]->best_encoder); + num_encoders++; } WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders, @@ -1216,7 +1222,7 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc, { struct drm_device *dev = intel_crtc->base.dev; struct intel_encoder *intel_encoder = - intel_ddi_get_crtc_new_encoder(intel_crtc); + intel_ddi_get_crtc_new_encoder(crtc_state); int clock = crtc_state->port_clock; if (IS_SKYLAKE(dev)) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 90b460cf2b57..75955fee6d24 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -83,7 +83,8 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, - int x, int y, struct drm_framebuffer *old_fb); + int x, int y, struct drm_framebuffer *old_fb, + struct drm_atomic_state *state); static int intel_framebuffer_init(struct drm_device *dev, struct intel_framebuffer *ifb, struct drm_mode_fb_cmd2 *mode_cmd, @@ -430,25 +431,41 @@ bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type) * intel_pipe_has_type() but looking at encoder->new_crtc instead of * encoder->crtc. */ -static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type) +static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state, + int type) { - struct drm_device *dev = crtc->base.dev; + struct drm_atomic_state *state = crtc_state->base.state; + struct drm_connector_state *connector_state; struct intel_encoder *encoder; + int i, num_connectors = 0; + + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) + continue; + + connector_state = state->connector_states[i]; + if (connector_state->crtc != crtc_state->base.crtc) + continue; - for_each_intel_encoder(dev, encoder) - if (encoder->new_crtc == crtc && encoder->type == type) + num_connectors++; + + encoder = to_intel_encoder(connector_state->best_encoder); + if (encoder->type == type) return true; + } + + WARN_ON(num_connectors == 0); return false; } -static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc, - int refclk) +static const intel_limit_t * +intel_ironlake_limit(struct intel_crtc_state *crtc_state, int refclk) { - struct drm_device *dev = crtc->base.dev; + struct drm_device *dev = crtc_state->base.crtc->dev; const intel_limit_t *limit; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_is_dual_link_lvds(dev)) { if (refclk == 100000) limit = &intel_limits_ironlake_dual_lvds_100m; @@ -466,20 +483,21 @@ static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc, return limit; } -static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc) +static const intel_limit_t * +intel_g4x_limit(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; + struct drm_device *dev = crtc_state->base.crtc->dev; const intel_limit_t *limit; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_is_dual_link_lvds(dev)) limit = &intel_limits_g4x_dual_channel_lvds; else limit = &intel_limits_g4x_single_channel_lvds; - } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) || - intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) { + } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || + intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { limit = &intel_limits_g4x_hdmi; - } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) { + } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { limit = &intel_limits_g4x_sdvo; } else /* The option is for other outputs */ limit = &intel_limits_i9xx_sdvo; @@ -487,17 +505,18 @@ static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc) return limit; } -static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk) +static const intel_limit_t * +intel_limit(struct intel_crtc_state *crtc_state, int refclk) { - struct drm_device *dev = crtc->base.dev; + struct drm_device *dev = crtc_state->base.crtc->dev; const intel_limit_t *limit; if (HAS_PCH_SPLIT(dev)) - limit = intel_ironlake_limit(crtc, refclk); + limit = intel_ironlake_limit(crtc_state, refclk); else if (IS_G4X(dev)) { - limit = intel_g4x_limit(crtc); + limit = intel_g4x_limit(crtc_state); } else if (IS_PINEVIEW(dev)) { - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) limit = &intel_limits_pineview_lvds; else limit = &intel_limits_pineview_sdvo; @@ -506,14 +525,14 @@ static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk) } else if (IS_VALLEYVIEW(dev)) { limit = &intel_limits_vlv; } else if (!IS_GEN2(dev)) { - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i9xx_lvds; else limit = &intel_limits_i9xx_sdvo; } else { - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) limit = &intel_limits_i8xx_lvds; - else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO)) + else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) limit = &intel_limits_i8xx_dvo; else limit = &intel_limits_i8xx_dac; @@ -600,15 +619,17 @@ static bool intel_PLL_is_valid(struct drm_device *dev, } static bool -i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, +i9xx_find_best_dpll(const intel_limit_t *limit, + struct intel_crtc_state *crtc_state, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; intel_clock_t clock; int err = target; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { /* * For LVDS just rely on its current settings for dual-channel. * We haven't figured out how to reliably set up different @@ -661,15 +682,17 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, } static bool -pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, +pnv_find_best_dpll(const intel_limit_t *limit, + struct intel_crtc_state *crtc_state, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; intel_clock_t clock; int err = target; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { /* * For LVDS just rely on its current settings for dual-channel. * We haven't figured out how to reliably set up different @@ -720,10 +743,12 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, } static bool -g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, +g4x_find_best_dpll(const intel_limit_t *limit, + struct intel_crtc_state *crtc_state, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; intel_clock_t clock; int max_n; @@ -732,7 +757,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, int err_most = (target >> 8) + (target >> 9); found = false; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { if (intel_is_dual_link_lvds(dev)) clock.p2 = limit->p2.p2_fast; else @@ -776,11 +801,53 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, return found; } +/* + * Check if the calculated PLL configuration is more optimal compared to the + * best configuration and error found so far. Return the calculated error. + */ +static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, + const intel_clock_t *calculated_clock, + const intel_clock_t *best_clock, + unsigned int best_error_ppm, + unsigned int *error_ppm) +{ + /* + * For CHV ignore the error and consider only the P value. + * Prefer a bigger P value based on HW requirements. + */ + if (IS_CHERRYVIEW(dev)) { + *error_ppm = 0; + + return calculated_clock->p > best_clock->p; + } + + if (WARN_ON_ONCE(!target_freq)) + return false; + + *error_ppm = div_u64(1000000ULL * + abs(target_freq - calculated_clock->dot), + target_freq); + /* + * Prefer a better P value over a better (smaller) error if the error + * is small. Ensure this preference for future configurations too by + * setting the error to 0. + */ + if (*error_ppm < 100 && calculated_clock->p > best_clock->p) { + *error_ppm = 0; + + return true; + } + + return *error_ppm + 10 < best_error_ppm; +} + static bool -vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, +vlv_find_best_dpll(const intel_limit_t *limit, + struct intel_crtc_state *crtc_state, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; intel_clock_t clock; unsigned int bestppm = 1000000; @@ -800,7 +867,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, clock.p = clock.p1 * clock.p2; /* based on hardware requirement, prefer bigger m1,m2 values */ for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) { - unsigned int ppm, diff; + unsigned int ppm; clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n, refclk * clock.m1); @@ -811,20 +878,15 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, &clock)) continue; - diff = abs(clock.dot - target); - ppm = div_u64(1000000ULL * diff, target); - - if (ppm < 100 && clock.p > best_clock->p) { - bestppm = 0; - *best_clock = clock; - found = true; - } + if (!vlv_PLL_is_optimal(dev, target, + &clock, + best_clock, + bestppm, &ppm)) + continue; - if (bestppm >= 10 && ppm < bestppm - 10) { - bestppm = ppm; - *best_clock = clock; - found = true; - } + *best_clock = clock; + bestppm = ppm; + found = true; } } } @@ -834,16 +896,20 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, } static bool -chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, +chv_find_best_dpll(const intel_limit_t *limit, + struct intel_crtc_state *crtc_state, int target, int refclk, intel_clock_t *match_clock, intel_clock_t *best_clock) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; + unsigned int best_error_ppm; intel_clock_t clock; uint64_t m2; int found = false; memset(best_clock, 0, sizeof(*best_clock)); + best_error_ppm = 1000000; /* * Based on hardware doc, the n always set to 1, and m1 always @@ -857,6 +923,7 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow; clock.p2 -= clock.p2 > 10 ? 2 : 1) { + unsigned int error_ppm; clock.p = clock.p1 * clock.p2; @@ -873,12 +940,13 @@ chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc, if (!intel_PLL_is_valid(dev, limit, &clock)) continue; - /* based on hardware requirement, prefer bigger p - */ - if (clock.p > best_clock->p) { - *best_clock = clock; - found = true; - } + if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock, + best_error_ppm, &error_ppm)) + continue; + + *best_clock = clock; + best_error_ppm = error_ppm; + found = true; } } @@ -2194,13 +2262,12 @@ static bool need_vtd_wa(struct drm_device *dev) return false; } -int -intel_fb_align_height(struct drm_device *dev, int height, - uint32_t pixel_format, - uint64_t fb_format_modifier) +unsigned int +intel_tile_height(struct drm_device *dev, uint32_t pixel_format, + uint64_t fb_format_modifier) { - int tile_height; - uint32_t bits_per_pixel; + unsigned int tile_height; + uint32_t pixel_bytes; switch (fb_format_modifier) { case DRM_FORMAT_MOD_NONE: @@ -2213,20 +2280,20 @@ intel_fb_align_height(struct drm_device *dev, int height, tile_height = 32; break; case I915_FORMAT_MOD_Yf_TILED: - bits_per_pixel = drm_format_plane_cpp(pixel_format, 0) * 8; - switch (bits_per_pixel) { + pixel_bytes = drm_format_plane_cpp(pixel_format, 0); + switch (pixel_bytes) { default: - case 8: + case 1: tile_height = 64; break; - case 16: - case 32: + case 2: + case 4: tile_height = 32; break; - case 64: + case 8: tile_height = 16; break; - case 128: + case 16: WARN_ONCE(1, "128-bit pixels are not supported for display!"); tile_height = 16; @@ -2239,17 +2306,58 @@ intel_fb_align_height(struct drm_device *dev, int height, break; } - return ALIGN(height, tile_height); + return tile_height; +} + +unsigned int +intel_fb_align_height(struct drm_device *dev, unsigned int height, + uint32_t pixel_format, uint64_t fb_format_modifier) +{ + return ALIGN(height, intel_tile_height(dev, pixel_format, + fb_format_modifier)); +} + +static int +intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb, + const struct drm_plane_state *plane_state) +{ + struct intel_rotation_info *info = &view->rotation_info; + + *view = i915_ggtt_view_normal; + + if (!plane_state) + return 0; + + if (!intel_rotation_90_or_270(plane_state->rotation)) + return 0; + + *view = i915_ggtt_view_rotated; + + info->height = fb->height; + info->pixel_format = fb->pixel_format; + info->pitch = fb->pitches[0]; + info->fb_modifier = fb->modifier[0]; + + if (!(info->fb_modifier == I915_FORMAT_MOD_Y_TILED || + info->fb_modifier == I915_FORMAT_MOD_Yf_TILED)) { + DRM_DEBUG_KMS( + "Y or Yf tiling is needed for 90/270 rotation!\n"); + return -EINVAL; + } + + return 0; } int intel_pin_and_fence_fb_obj(struct drm_plane *plane, struct drm_framebuffer *fb, + const struct drm_plane_state *plane_state, struct intel_engine_cs *pipelined) { struct drm_device *dev = fb->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct i915_ggtt_view view; u32 alignment; int ret; @@ -2286,6 +2394,10 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, return -EINVAL; } + ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); + if (ret) + return ret; + /* Note that the w/a also requires 64 PTE of padding following the * bo. We currently fill all unused PTE with the shadow page and so * we should always have valid PTE following the scanout preventing @@ -2304,7 +2416,8 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, intel_runtime_pm_get(dev_priv); dev_priv->mm.interruptible = false; - ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); + ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined, + &view); if (ret) goto err_interruptible; @@ -2324,19 +2437,27 @@ intel_pin_and_fence_fb_obj(struct drm_plane *plane, return 0; err_unpin: - i915_gem_object_unpin_from_display_plane(obj); + i915_gem_object_unpin_from_display_plane(obj, &view); err_interruptible: dev_priv->mm.interruptible = true; intel_runtime_pm_put(dev_priv); return ret; } -static void intel_unpin_fb_obj(struct drm_i915_gem_object *obj) +static void intel_unpin_fb_obj(struct drm_framebuffer *fb, + const struct drm_plane_state *plane_state) { + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + struct i915_ggtt_view view; + int ret; + WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex)); + ret = intel_fill_fb_ggtt_view(&view, fb, plane_state); + WARN_ONCE(ret, "Couldn't get view from plane state!"); + i915_gem_object_unpin_fence(obj); - i915_gem_object_unpin_from_display_plane(obj); + i915_gem_object_unpin_from_display_plane(obj, &view); } /* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel @@ -2414,8 +2535,8 @@ static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha) } static bool -intel_alloc_plane_obj(struct intel_crtc *crtc, - struct intel_initial_plane_config *plane_config) +intel_alloc_initial_plane_obj(struct intel_crtc *crtc, + struct intel_initial_plane_config *plane_config) { struct drm_device *dev = crtc->base.dev; struct drm_i915_gem_object *obj = NULL; @@ -2449,17 +2570,14 @@ intel_alloc_plane_obj(struct intel_crtc *crtc, mode_cmd.flags = DRM_MODE_FB_MODIFIERS; mutex_lock(&dev->struct_mutex); - if (intel_framebuffer_init(dev, to_intel_framebuffer(fb), &mode_cmd, obj)) { DRM_DEBUG_KMS("intel fb init failed\n"); goto out_unref_obj; } - - obj->frontbuffer_bits = INTEL_FRONTBUFFER_PRIMARY(crtc->pipe); mutex_unlock(&dev->struct_mutex); - DRM_DEBUG_KMS("plane fb obj %p\n", obj); + DRM_DEBUG_KMS("initial plane fb obj %p\n", obj); return true; out_unref_obj: @@ -2483,26 +2601,23 @@ update_state_fb(struct drm_plane *plane) } static void -intel_find_plane_obj(struct intel_crtc *intel_crtc, - struct intel_initial_plane_config *plane_config) +intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, + struct intel_initial_plane_config *plane_config) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *c; struct intel_crtc *i; struct drm_i915_gem_object *obj; + struct drm_plane *primary = intel_crtc->base.primary; + struct drm_framebuffer *fb; if (!plane_config->fb) return; - if (intel_alloc_plane_obj(intel_crtc, plane_config)) { - struct drm_plane *primary = intel_crtc->base.primary; - - primary->fb = &plane_config->fb->base; - primary->state->crtc = &intel_crtc->base; - update_state_fb(primary); - - return; + if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) { + fb = &plane_config->fb->base; + goto valid_fb; } kfree(plane_config->fb); @@ -2520,24 +2635,29 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, if (!i->active) continue; - obj = intel_fb_obj(c->primary->fb); - if (obj == NULL) + fb = c->primary->fb; + if (!fb) continue; + obj = intel_fb_obj(fb); if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { - struct drm_plane *primary = intel_crtc->base.primary; - - if (obj->tiling_mode != I915_TILING_NONE) - dev_priv->preserve_bios_swizzle = true; - - drm_framebuffer_reference(c->primary->fb); - primary->fb = c->primary->fb; - primary->state->crtc = &intel_crtc->base; - update_state_fb(intel_crtc->base.primary); - obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); - break; + drm_framebuffer_reference(fb); + goto valid_fb; } } + + return; + +valid_fb: + obj = intel_fb_obj(fb); + if (obj->tiling_mode != I915_TILING_NONE) + dev_priv->preserve_bios_swizzle = true; + + primary->fb = fb; + primary->state->crtc = &intel_crtc->base; + primary->crtc = &intel_crtc->base; + update_state_fb(primary); + obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); } static void i9xx_update_primary_plane(struct drm_crtc *crtc, @@ -2805,6 +2925,17 @@ u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, } } +unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, + struct drm_i915_gem_object *obj) +{ + const struct i915_ggtt_view *view = &i915_ggtt_view_normal; + + if (intel_rotation_90_or_270(intel_plane->base.state->rotation)) + view = &i915_ggtt_view_rotated; + + return i915_gem_obj_ggtt_offset_view(obj, view); +} + static void skylake_update_primary_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y) @@ -2815,6 +2946,7 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, struct drm_i915_gem_object *obj; int pipe = intel_crtc->pipe; u32 plane_ctl, stride_div; + unsigned long surf_addr; if (!intel_crtc->primary_enabled) { I915_WRITE(PLANE_CTL(pipe, 0), 0); @@ -2881,16 +3013,16 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, obj = intel_fb_obj(fb); stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], fb->pixel_format); + surf_addr = intel_plane_obj_offset(to_intel_plane(crtc->primary), obj); I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); - I915_WRITE(PLANE_POS(pipe, 0), 0); I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x); I915_WRITE(PLANE_SIZE(pipe, 0), (intel_crtc->config->pipe_src_h - 1) << 16 | (intel_crtc->config->pipe_src_w - 1)); I915_WRITE(PLANE_STRIDE(pipe, 0), fb->pitches[0] / stride_div); - I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj)); + I915_WRITE(PLANE_SURF(pipe, 0), surf_addr); POSTING_READ(PLANE_SURF(pipe, 0)); } @@ -4824,8 +4956,9 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc) return mask; } -static void modeset_update_crtc_power_domains(struct drm_device *dev) +static void modeset_update_crtc_power_domains(struct drm_atomic_state *state) { + struct drm_device *dev = state->dev; struct drm_i915_private *dev_priv = dev->dev_private; unsigned long pipe_domains[I915_MAX_PIPES] = { 0, }; struct intel_crtc *crtc; @@ -4847,7 +4980,7 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev) } if (dev_priv->display.modeset_global_resources) - dev_priv->display.modeset_global_resources(dev); + dev_priv->display.modeset_global_resources(state); for_each_intel_crtc(dev, crtc) { enum intel_display_power_domain domain; @@ -5095,8 +5228,9 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); } -static void valleyview_modeset_global_resources(struct drm_device *dev) +static void valleyview_modeset_global_resources(struct drm_atomic_state *state) { + struct drm_device *dev = state->dev; struct drm_i915_private *dev_priv = dev->dev_private; int max_pixclk = intel_mode_max_pixclk(dev_priv); int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); @@ -5687,7 +5821,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc, * - LVDS dual channel mode * - Double wide pipe */ - if ((intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && + if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) && intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) pipe_config->pipe_src_w &= ~1; @@ -5866,15 +6000,18 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); } -static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors) +static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state, + int num_connectors) { - struct drm_device *dev = crtc->base.dev; + struct drm_device *dev = crtc_state->base.crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; int refclk; + WARN_ON(!crtc_state->base.state); + if (IS_VALLEYVIEW(dev)) { refclk = 100000; - } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && + } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { refclk = dev_priv->vbt.lvds_ssc_freq; DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); @@ -5917,8 +6054,8 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc, crtc_state->dpll_hw_state.fp0 = fp; crtc->lowfreq_avail = false; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && - reduced_clock && i915.powersave) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && + reduced_clock) { crtc_state->dpll_hw_state.fp1 = fp2; crtc->lowfreq_avail = true; } else { @@ -6275,6 +6412,7 @@ void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe, struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); struct intel_crtc_state pipe_config = { + .base.crtc = &crtc->base, .pixel_multiplier = 1, .dpll = *dpll, }; @@ -6319,12 +6457,12 @@ static void i9xx_update_pll(struct intel_crtc *crtc, i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); - is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) || - intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI); + is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) || + intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI); dpll = DPLL_VGA_MODE_DIS; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) dpll |= DPLLB_MODE_LVDS; else dpll |= DPLLB_MODE_DAC_SERIAL; @@ -6367,7 +6505,7 @@ static void i9xx_update_pll(struct intel_crtc *crtc, if (crtc_state->sdvo_tv_clock) dpll |= PLL_REF_INPUT_TVCLKINBC; - else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && + else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else @@ -6397,7 +6535,7 @@ static void i8xx_update_pll(struct intel_crtc *crtc, dpll = DPLL_VGA_MODE_DIS; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) { + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; } else { if (clock->p1 == 2) @@ -6408,10 +6546,10 @@ static void i8xx_update_pll(struct intel_crtc *crtc, dpll |= PLL_P2_DIVIDE_BY_4; } - if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO)) + if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) dpll |= DPLL_DVO_2X_MODE; - if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) && + if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && intel_panel_use_ssc(dev_priv) && num_connectors < 2) dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; else @@ -6625,11 +6763,20 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, bool is_lvds = false, is_dsi = false; struct intel_encoder *encoder; const intel_limit_t *limit; + struct drm_atomic_state *state = crtc_state->base.state; + struct drm_connector_state *connector_state; + int i; - for_each_intel_encoder(dev, encoder) { - if (encoder->new_crtc != crtc) + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) continue; + connector_state = state->connector_states[i]; + if (connector_state->crtc != &crtc->base) + continue; + + encoder = to_intel_encoder(connector_state->best_encoder); + switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; @@ -6648,7 +6795,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, return 0; if (!crtc_state->clock_set) { - refclk = i9xx_get_refclk(crtc, num_connectors); + refclk = i9xx_get_refclk(crtc_state, num_connectors); /* * Returns a set of divisors for the desired target clock with @@ -6656,8 +6803,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, * the clock equation: reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + * 2) / p1 / p2. */ - limit = intel_limit(crtc, refclk); - ok = dev_priv->display.find_dpll(limit, crtc, + limit = intel_limit(crtc_state, refclk); + ok = dev_priv->display.find_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, &clock); if (!ok) { @@ -6673,7 +6820,7 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc, * we will disable the LVDS downclock feature. */ has_reduced_clock = - dev_priv->display.find_dpll(limit, crtc, + dev_priv->display.find_dpll(limit, crtc_state, dev_priv->lvds_downclock, refclk, &clock, &reduced_clock); @@ -6772,7 +6919,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, u32 val, base, offset; int pipe = crtc->pipe, plane = crtc->plane; int fourcc, pixel_format; - int aligned_height; + unsigned int aligned_height; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; @@ -7303,18 +7450,26 @@ void intel_init_pch_refclk(struct drm_device *dev) lpt_init_pch_refclk(dev); } -static int ironlake_get_refclk(struct drm_crtc *crtc) +static int ironlake_get_refclk(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->dev; + struct drm_device *dev = crtc_state->base.crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_atomic_state *state = crtc_state->base.state; + struct drm_connector_state *connector_state; struct intel_encoder *encoder; - int num_connectors = 0; + int num_connectors = 0, i; bool is_lvds = false; - for_each_intel_encoder(dev, encoder) { - if (encoder->new_crtc != to_intel_crtc(crtc)) + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) + continue; + + connector_state = state->connector_states[i]; + if (connector_state->crtc != crtc_state->base.crtc) continue; + encoder = to_intel_encoder(connector_state->best_encoder); + switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; @@ -7501,22 +7656,21 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); int refclk; const intel_limit_t *limit; bool ret, is_lvds = false; - is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS); + is_lvds = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS); - refclk = ironlake_get_refclk(crtc); + refclk = ironlake_get_refclk(crtc_state); /* * Returns a set of divisors for the desired target clock with the given * refclk, or FALSE. The returned values represent the clock equation: * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. */ - limit = intel_limit(intel_crtc, refclk); - ret = dev_priv->display.find_dpll(limit, intel_crtc, + limit = intel_limit(crtc_state, refclk); + ret = dev_priv->display.find_dpll(limit, crtc_state, crtc_state->port_clock, refclk, NULL, clock); if (!ret) @@ -7530,7 +7684,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc, * downclock feature. */ *has_reduced_clock = - dev_priv->display.find_dpll(limit, intel_crtc, + dev_priv->display.find_dpll(limit, crtc_state, dev_priv->lvds_downclock, refclk, clock, reduced_clock); @@ -7563,16 +7717,24 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc, struct drm_crtc *crtc = &intel_crtc->base; struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_encoder *intel_encoder; + struct drm_atomic_state *state = crtc_state->base.state; + struct drm_connector_state *connector_state; + struct intel_encoder *encoder; uint32_t dpll; - int factor, num_connectors = 0; + int factor, num_connectors = 0, i; bool is_lvds = false, is_sdvo = false; - for_each_intel_encoder(dev, intel_encoder) { - if (intel_encoder->new_crtc != to_intel_crtc(crtc)) + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) continue; - switch (intel_encoder->type) { + connector_state = state->connector_states[i]; + if (connector_state->crtc != crtc_state->base.crtc) + continue; + + encoder = to_intel_encoder(connector_state->best_encoder); + + switch (encoder->type) { case INTEL_OUTPUT_LVDS: is_lvds = true; break; @@ -7701,7 +7863,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc, } } - if (is_lvds && has_reduced_clock && i915.powersave) + if (is_lvds && has_reduced_clock) crtc->lowfreq_avail = true; else crtc->lowfreq_avail = false; @@ -7810,7 +7972,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, u32 val, base, offset, stride_mult, tiling; int pipe = crtc->pipe; int fourcc, pixel_format; - int aligned_height; + unsigned int aligned_height; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; @@ -7918,7 +8080,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, u32 val, base, offset; int pipe = crtc->pipe; int fourcc, pixel_format; - int aligned_height; + unsigned int aligned_height; struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; @@ -8802,6 +8964,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, struct drm_device *dev = encoder->dev; struct drm_framebuffer *fb; struct drm_mode_config *config = &dev->mode_config; + struct drm_atomic_state *state = NULL; + struct drm_connector_state *connector_state; int ret, i = -1; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", @@ -8883,6 +9047,21 @@ retry: old->load_detect_temp = true; old->release_fb = NULL; + state = drm_atomic_state_alloc(dev); + if (!state) + return false; + + state->acquire_ctx = ctx; + + connector_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(connector_state)) { + ret = PTR_ERR(connector_state); + goto fail; + } + + connector_state->crtc = crtc; + connector_state->best_encoder = &intel_encoder->base; + if (!mode) mode = &load_detect_mode; @@ -8905,7 +9084,7 @@ retry: goto fail; } - if (intel_set_mode(crtc, mode, 0, 0, fb)) { + if (intel_set_mode(crtc, mode, 0, 0, fb, state)) { DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); if (old->release_fb) old->release_fb->funcs->destroy(old->release_fb); @@ -8924,6 +9103,11 @@ retry: else intel_crtc->new_config = NULL; fail_unlock: + if (state) { + drm_atomic_state_free(state); + state = NULL; + } + if (ret == -EDEADLK) { drm_modeset_backoff(ctx); goto retry; @@ -8933,24 +9117,44 @@ fail_unlock: } void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old) + struct intel_load_detect_pipe *old, + struct drm_modeset_acquire_ctx *ctx) { + struct drm_device *dev = connector->dev; struct intel_encoder *intel_encoder = intel_attached_encoder(connector); struct drm_encoder *encoder = &intel_encoder->base; struct drm_crtc *crtc = encoder->crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_atomic_state *state; + struct drm_connector_state *connector_state; DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", connector->base.id, connector->name, encoder->base.id, encoder->name); if (old->load_detect_temp) { + state = drm_atomic_state_alloc(dev); + if (!state) + goto fail; + + state->acquire_ctx = ctx; + + connector_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(connector_state)) + goto fail; + to_intel_connector(connector)->new_encoder = NULL; intel_encoder->new_crtc = NULL; intel_crtc->new_enabled = false; intel_crtc->new_config = NULL; - intel_set_mode(crtc, NULL, 0, 0, NULL); + + connector_state->best_encoder = NULL; + connector_state->crtc = NULL; + + intel_set_mode(crtc, NULL, 0, 0, NULL, state); + + drm_atomic_state_free(state); if (old->release_fb) { drm_framebuffer_unregister_private(old->release_fb); @@ -8963,6 +9167,11 @@ void intel_release_load_detect_pipe(struct drm_connector *connector, /* Switch crtc and encoder back off if necessary */ if (old->dpms_mode != DRM_MODE_DPMS_ON) connector->funcs->dpms(connector, old->dpms_mode); + + return; +fail: + DRM_DEBUG_KMS("Couldn't release load detect pipe.\n"); + drm_atomic_state_free(state); } static int i9xx_pll_refclk(struct drm_device *dev, @@ -9201,6 +9410,8 @@ void intel_mark_busy(struct drm_device *dev) intel_runtime_pm_get(dev_priv); i915_update_gfx_val(dev_priv); + if (INTEL_INFO(dev)->gen >= 6) + gen6_rps_busy(dev_priv); dev_priv->mm.busy = true; } @@ -9214,9 +9425,6 @@ void intel_mark_idle(struct drm_device *dev) dev_priv->mm.busy = false; - if (!i915.powersave) - goto out; - for_each_crtc(dev, crtc) { if (!crtc->primary->fb) continue; @@ -9227,7 +9435,6 @@ void intel_mark_idle(struct drm_device *dev) if (INTEL_INFO(dev)->gen >= 6) gen6_rps_idle(dev->dev_private); -out: intel_runtime_pm_put(dev_priv); } @@ -9269,7 +9476,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) enum pipe pipe = to_intel_crtc(work->crtc)->pipe; mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(intel_fb_obj(work->old_fb)); + intel_unpin_fb_obj(work->old_fb, work->crtc->primary->state); drm_gem_object_unreference(&work->pending_flip_obj->base); intel_fbc_update(dev); @@ -9977,12 +10184,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ring = &dev_priv->ring[RCS]; } - ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring); + ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, + crtc->primary->state, ring); if (ret) goto cleanup_pending; - work->gtt_offset = - i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset; + work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), obj) + + intel_crtc->dspaddr_offset; if (use_mmio_flip(ring, obj)) { ret = intel_queue_mmio_flip(dev, crtc, fb, obj, ring, @@ -10017,7 +10225,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return 0; cleanup_unpin: - intel_unpin_fb_obj(obj); + intel_unpin_fb_obj(fb, crtc->primary->state); cleanup_pending: atomic_dec(&intel_crtc->unpin_work_count); mutex_unlock(&dev->struct_mutex); @@ -10087,6 +10295,27 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev) } } +/* Transitional helper to copy current connector/encoder state to + * connector->state. This is needed so that code that is partially + * converted to atomic does the right thing. + */ +static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) +{ + struct intel_connector *connector; + + for_each_intel_connector(dev, connector) { + if (connector->base.encoder) { + connector->base.state->best_encoder = + connector->base.encoder; + connector->base.state->crtc = + connector->base.encoder->crtc; + } else { + connector->base.state->best_encoder = NULL; + connector->base.state->crtc = NULL; + } + } +} + /** * intel_modeset_commit_output_state * @@ -10110,6 +10339,8 @@ static void intel_modeset_commit_output_state(struct drm_device *dev) crtc->base.state->enable = crtc->new_enabled; crtc->base.enabled = crtc->new_enabled; } + + intel_modeset_update_connector_atomic_state(dev); } static void @@ -10144,8 +10375,9 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_device *dev = crtc->base.dev; + struct drm_atomic_state *state; struct intel_connector *connector; - int bpp; + int bpp, i; switch (fb->pixel_format) { case DRM_FORMAT_C8: @@ -10185,10 +10417,15 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, pipe_config->pipe_bpp = bpp; + state = pipe_config->base.state; + /* Clamp display bpp to EDID value */ - for_each_intel_connector(dev, connector) { - if (!connector->new_encoder || - connector->new_encoder->new_crtc != crtc) + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) + continue; + + connector = to_intel_connector(state->connectors[i]); + if (state->connector_states[i]->crtc != &crtc->base) continue; connected_sink_compute_bpp(connector, pipe_config); @@ -10344,15 +10581,30 @@ static bool check_digital_port_conflicts(struct drm_device *dev) return true; } +static void +clear_intel_crtc_state(struct intel_crtc_state *crtc_state) +{ + struct drm_crtc_state tmp_state; + + /* Clear only the intel specific part of the crtc state */ + tmp_state = crtc_state->base; + memset(crtc_state, 0, sizeof *crtc_state); + crtc_state->base = tmp_state; +} + static struct intel_crtc_state * intel_modeset_pipe_config(struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_display_mode *mode) + struct drm_display_mode *mode, + struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct intel_encoder *encoder; + struct intel_connector *connector; + struct drm_connector_state *connector_state; struct intel_crtc_state *pipe_config; int plane_bpp, ret = -EINVAL; + int i; bool retry = true; if (!check_encoder_cloning(to_intel_crtc(crtc))) { @@ -10365,9 +10617,11 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, return ERR_PTR(-EINVAL); } - pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); - if (!pipe_config) - return ERR_PTR(-ENOMEM); + pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc)); + if (IS_ERR(pipe_config)) + return pipe_config; + + clear_intel_crtc_state(pipe_config); pipe_config->base.crtc = crtc; drm_mode_copy(&pipe_config->base.adjusted_mode, mode); @@ -10424,11 +10678,17 @@ encoder_retry: * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. */ - for_each_intel_encoder(dev, encoder) { + for (i = 0; i < state->num_connector; i++) { + connector = to_intel_connector(state->connectors[i]); + if (!connector) + continue; - if (&encoder->new_crtc->base != crtc) + connector_state = state->connector_states[i]; + if (connector_state->crtc != crtc) continue; + encoder = to_intel_encoder(connector_state->best_encoder); + if (!(encoder->compute_config(encoder, pipe_config))) { DRM_DEBUG_KMS("Encoder config failure\n"); goto fail; @@ -10464,7 +10724,6 @@ encoder_retry: return pipe_config; fail: - kfree(pipe_config); return ERR_PTR(ret); } @@ -11143,17 +11402,30 @@ static struct intel_crtc_state * intel_modeset_compute_config(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_framebuffer *fb, + struct drm_atomic_state *state, unsigned *modeset_pipes, unsigned *prepare_pipes, unsigned *disable_pipes) { + struct drm_device *dev = crtc->dev; struct intel_crtc_state *pipe_config = NULL; + struct intel_crtc *intel_crtc; + int ret = 0; + + ret = drm_atomic_add_affected_connectors(state, crtc); + if (ret) + return ERR_PTR(ret); intel_modeset_affected_pipes(crtc, modeset_pipes, prepare_pipes, disable_pipes); - if ((*modeset_pipes) == 0) - goto out; + for_each_intel_crtc_masked(dev, *disable_pipes, intel_crtc) { + pipe_config = intel_atomic_get_crtc_state(state, intel_crtc); + if (IS_ERR(pipe_config)) + return pipe_config; + + pipe_config->base.enable = false; + } /* * Note this needs changes when we start tracking multiple modes @@ -11161,15 +11433,21 @@ intel_modeset_compute_config(struct drm_crtc *crtc, * (i.e. one pipe_config for each crtc) rather than just the one * for this crtc. */ - pipe_config = intel_modeset_pipe_config(crtc, fb, mode); - if (IS_ERR(pipe_config)) { - goto out; + for_each_intel_crtc_masked(dev, *modeset_pipes, intel_crtc) { + /* FIXME: For now we still expect modeset_pipes has at most + * one bit set. */ + if (WARN_ON(&intel_crtc->base != crtc)) + continue; + + pipe_config = intel_modeset_pipe_config(crtc, fb, mode, state); + if (IS_ERR(pipe_config)) + return pipe_config; + + intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, + "[modeset]"); } - intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, - "[modeset]"); -out: - return pipe_config; + return intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));; } static int __intel_set_mode_setup_plls(struct drm_device *dev, @@ -11213,6 +11491,7 @@ static int __intel_set_mode(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *saved_mode; + struct intel_crtc_state *crtc_state_copy = NULL; struct intel_crtc *intel_crtc; int ret = 0; @@ -11220,6 +11499,12 @@ static int __intel_set_mode(struct drm_crtc *crtc, if (!saved_mode) return -ENOMEM; + crtc_state_copy = kmalloc(sizeof(*crtc_state_copy), GFP_KERNEL); + if (!crtc_state_copy) { + ret = -ENOMEM; + goto done; + } + *saved_mode = crtc->mode; if (modeset_pipes) @@ -11277,7 +11562,7 @@ static int __intel_set_mode(struct drm_crtc *crtc, * update the the output configuration. */ intel_modeset_update_state(dev, prepare_pipes); - modeset_update_crtc_power_domains(dev); + modeset_update_crtc_power_domains(pipe_config->base.state); /* Set up the DPLL and any encoders state that needs to adjust or depend * on the DPLL. @@ -11306,6 +11591,22 @@ done: if (ret && crtc->state->enable) crtc->mode = *saved_mode; + if (ret == 0 && pipe_config) { + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + + /* The pipe_config will be freed with the atomic state, so + * make a copy. */ + memcpy(crtc_state_copy, intel_crtc->config, + sizeof *crtc_state_copy); + intel_crtc->config = crtc_state_copy; + intel_crtc->base.state = &crtc_state_copy->base; + + if (modeset_pipes) + intel_crtc->new_config = intel_crtc->config; + } else { + kfree(crtc_state_copy); + } + kfree(saved_mode); return ret; } @@ -11331,27 +11632,81 @@ static int intel_set_mode_pipes(struct drm_crtc *crtc, static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, - int x, int y, struct drm_framebuffer *fb) + int x, int y, struct drm_framebuffer *fb, + struct drm_atomic_state *state) { struct intel_crtc_state *pipe_config; unsigned modeset_pipes, prepare_pipes, disable_pipes; + int ret = 0; - pipe_config = intel_modeset_compute_config(crtc, mode, fb, + pipe_config = intel_modeset_compute_config(crtc, mode, fb, state, &modeset_pipes, &prepare_pipes, &disable_pipes); - if (IS_ERR(pipe_config)) - return PTR_ERR(pipe_config); + if (IS_ERR(pipe_config)) { + ret = PTR_ERR(pipe_config); + goto out; + } + + ret = intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config, + modeset_pipes, prepare_pipes, + disable_pipes); + if (ret) + goto out; - return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config, - modeset_pipes, prepare_pipes, - disable_pipes); +out: + return ret; } void intel_crtc_restore_mode(struct drm_crtc *crtc) { - intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); + struct drm_device *dev = crtc->dev; + struct drm_atomic_state *state; + struct intel_encoder *encoder; + struct intel_connector *connector; + struct drm_connector_state *connector_state; + + state = drm_atomic_state_alloc(dev); + if (!state) { + DRM_DEBUG_KMS("[CRTC:%d] mode restore failed, out of memory", + crtc->base.id); + return; + } + + state->acquire_ctx = dev->mode_config.acquire_ctx; + + /* The force restore path in the HW readout code relies on the staged + * config still keeping the user requested config while the actual + * state has been overwritten by the configuration read from HW. We + * need to copy the staged config to the atomic state, otherwise the + * mode set will just reapply the state the HW is already in. */ + for_each_intel_encoder(dev, encoder) { + if (&encoder->new_crtc->base != crtc) + continue; + + for_each_intel_connector(dev, connector) { + if (connector->new_encoder != encoder) + continue; + + connector_state = drm_atomic_get_connector_state(state, &connector->base); + if (IS_ERR(connector_state)) { + DRM_DEBUG_KMS("Failed to add [CONNECTOR:%d:%s] to state: %ld\n", + connector->base.base.id, + connector->base.name, + PTR_ERR(connector_state)); + continue; + } + + connector_state->crtc = crtc; + connector_state->best_encoder = &encoder->base; + } + } + + intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb, + state); + + drm_atomic_state_free(state); } #undef for_each_intel_crtc_masked @@ -11520,9 +11875,11 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, static int intel_modeset_stage_output_state(struct drm_device *dev, struct drm_mode_set *set, - struct intel_set_config *config) + struct intel_set_config *config, + struct drm_atomic_state *state) { struct intel_connector *connector; + struct drm_connector_state *connector_state; struct intel_encoder *encoder; struct intel_crtc *crtc; int ro; @@ -11586,6 +11943,14 @@ intel_modeset_stage_output_state(struct drm_device *dev, } connector->new_encoder->new_crtc = to_intel_crtc(new_crtc); + connector_state = + drm_atomic_get_connector_state(state, &connector->base); + if (IS_ERR(connector_state)) + return PTR_ERR(connector_state); + + connector_state->crtc = new_crtc; + connector_state->best_encoder = &connector->new_encoder->base; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n", connector->base.base.id, connector->base.name, @@ -11618,9 +11983,17 @@ intel_modeset_stage_output_state(struct drm_device *dev, } /* Now we've also updated encoder->new_crtc for all encoders. */ for_each_intel_connector(dev, connector) { - if (connector->new_encoder) + connector_state = + drm_atomic_get_connector_state(state, &connector->base); + if (IS_ERR(connector_state)) + return PTR_ERR(connector_state); + + if (connector->new_encoder) { if (connector->new_encoder != connector->encoder) connector->encoder = connector->new_encoder; + } else { + connector_state->crtc = NULL; + } } for_each_intel_crtc(dev, crtc) { crtc->new_enabled = false; @@ -11676,6 +12049,7 @@ static int intel_crtc_set_config(struct drm_mode_set *set) { struct drm_device *dev; struct drm_mode_set save_set; + struct drm_atomic_state *state = NULL; struct intel_set_config *config; struct intel_crtc_state *pipe_config; unsigned modeset_pipes, prepare_pipes, disable_pipes; @@ -11720,12 +12094,20 @@ static int intel_crtc_set_config(struct drm_mode_set *set) * such cases. */ intel_set_config_compute_mode_changes(set, config); - ret = intel_modeset_stage_output_state(dev, set, config); + state = drm_atomic_state_alloc(dev); + if (!state) { + ret = -ENOMEM; + goto out_config; + } + + state->acquire_ctx = dev->mode_config.acquire_ctx; + + ret = intel_modeset_stage_output_state(dev, set, config, state); if (ret) goto fail; pipe_config = intel_modeset_compute_config(set->crtc, set->mode, - set->fb, + set->fb, state, &modeset_pipes, &prepare_pipes, &disable_pipes); @@ -11745,10 +12127,6 @@ static int intel_crtc_set_config(struct drm_mode_set *set) */ } - /* set_mode will free it in the mode_changed case */ - if (!config->mode_changed) - kfree(pipe_config); - intel_update_pipe_size(to_intel_crtc(set->crtc)); if (config->mode_changed) { @@ -11794,6 +12172,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set) fail: intel_set_config_restore_state(dev, config); + drm_atomic_state_clear(state); + /* * HACK: if the pipe was on, but we didn't have a framebuffer, * force the pipe off to avoid oopsing in the modeset code @@ -11806,11 +12186,15 @@ fail: /* Try to restore the config */ if (config->mode_changed && intel_set_mode(save_set.crtc, save_set.mode, - save_set.x, save_set.y, save_set.fb)) + save_set.x, save_set.y, save_set.fb, + state)) DRM_ERROR("failed to restore config after modeset failure\n"); } out_config: + if (state) + drm_atomic_state_free(state); + intel_set_config_free(config); return ret; } @@ -11925,6 +12309,28 @@ static void intel_shared_dpll_init(struct drm_device *dev) } /** + * intel_wm_need_update - Check whether watermarks need updating + * @plane: drm plane + * @state: new plane state + * + * Check current plane state versus the new one to determine whether + * watermarks need to be recalculated. + * + * Returns true or false. + */ +bool intel_wm_need_update(struct drm_plane *plane, + struct drm_plane_state *state) +{ + /* Update watermarks on tiling changes. */ + if (!plane->state->fb || !state->fb || + plane->state->fb->modifier[0] != state->fb->modifier[0] || + plane->state->rotation != state->rotation) + return true; + + return false; +} + +/** * intel_prepare_plane_fb - Prepare fb for usage on plane * @plane: drm plane to prepare for * @fb: framebuffer to prepare for presentation @@ -11973,7 +12379,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, if (ret) DRM_DEBUG_KMS("failed to attach phys object\n"); } else { - ret = intel_pin_and_fence_fb_obj(plane, fb, NULL); + ret = intel_pin_and_fence_fb_obj(plane, fb, new_state, NULL); } if (ret == 0) @@ -12005,7 +12411,7 @@ intel_cleanup_plane_fb(struct drm_plane *plane, if (plane->type != DRM_PLANE_TYPE_CURSOR || !INTEL_INFO(dev)->cursor_needs_physical) { mutex_lock(&dev->struct_mutex); - intel_unpin_fb_obj(obj); + intel_unpin_fb_obj(fb, old_state); mutex_unlock(&dev->struct_mutex); } } @@ -12070,10 +12476,7 @@ intel_check_primary_plane(struct drm_plane *plane, intel_crtc->atomic.update_fbc = true; - /* Update watermarks on tiling changes. */ - if (!plane->state->fb || !state->base.fb || - plane->state->fb->modifier[0] != - state->base.fb->modifier[0]) + if (intel_wm_need_update(plane, &state->base)) intel_crtc->atomic.update_wm = true; } @@ -12089,8 +12492,6 @@ intel_commit_primary_plane(struct drm_plane *plane, struct drm_device *dev = plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); - struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_rect *src = &state->src; crtc = crtc ? crtc : plane->crtc; @@ -12100,8 +12501,6 @@ intel_commit_primary_plane(struct drm_plane *plane, crtc->x = src->x1 >> 16; crtc->y = src->y1 >> 16; - intel_plane->obj = obj; - if (intel_crtc->active) { if (state->visible) { /* FIXME: kill this fastboot hack */ @@ -12365,7 +12764,6 @@ intel_commit_cursor_plane(struct drm_plane *plane, struct drm_crtc *crtc = state->base.crtc; struct drm_device *dev = plane->dev; struct intel_crtc *intel_crtc; - struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb); uint32_t addr; @@ -12376,8 +12774,6 @@ intel_commit_cursor_plane(struct drm_plane *plane, crtc->cursor_x = state->base.crtc_x; crtc->cursor_y = state->base.crtc_y; - intel_plane->obj = obj; - if (intel_crtc->cursor_bo == obj) goto update; @@ -12758,19 +13154,21 @@ static void intel_setup_outputs(struct drm_device *dev) * testing/debug of the plane operations (and only when a specific * kernel module option is given), that shouldn't really matter. * + * We are also relying on these states to convert the legacy mode set + * to use a drm_atomic_state struct. The states are kept consistent + * with actual state, so that it is safe to rely on that instead of + * the staged config. + * * Once atomic support for crtc's + connectors lands, this loop should * be removed since we'll be setting up real connector state, which * will contain Intel-specific properties. */ - if (drm_core_check_feature(dev, DRIVER_ATOMIC)) { - list_for_each_entry(connector, - &dev->mode_config.connector_list, - head) { - if (!WARN_ON(connector->state)) { - connector->state = - kzalloc(sizeof(*connector->state), - GFP_KERNEL); - } + list_for_each_entry(connector, + &dev->mode_config.connector_list, + head) { + if (!WARN_ON(connector->state)) { + connector->state = kzalloc(sizeof(*connector->state), + GFP_KERNEL); } } @@ -12849,7 +13247,7 @@ static int intel_framebuffer_init(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_i915_gem_object *obj) { - int aligned_height; + unsigned int aligned_height; int ret; u32 pitch_limit, stride_alignment; @@ -12885,8 +13283,8 @@ static int intel_framebuffer_init(struct drm_device *dev, case I915_FORMAT_MOD_X_TILED: break; default: - DRM_ERROR("Unsupported fb modifier 0x%llx!\n", - mode_cmd->modifier[0]); + DRM_DEBUG("Unsupported fb modifier 0x%llx!\n", + mode_cmd->modifier[0]); return -EINVAL; } @@ -13453,7 +13851,7 @@ void intel_modeset_init(struct drm_device *dev) * If the fb is shared between multiple heads, we'll * just get the first one. */ - intel_find_plane_obj(crtc, &crtc->plane_config); + intel_find_initial_plane_obj(crtc, &crtc->plane_config); } } } @@ -13479,7 +13877,7 @@ static void intel_enable_pipe_a(struct drm_device *dev) return; if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) - intel_release_load_detect_pipe(crt, &load_detect_temp); + intel_release_load_detect_pipe(crt, &load_detect_temp, ctx); } static bool @@ -13823,6 +14221,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, "[setup_hw_state]"); } + intel_modeset_update_connector_atomic_state(dev); + for (i = 0; i < dev_priv->num_shared_dpll; i++) { struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; @@ -13851,8 +14251,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; - intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, - crtc->primary->fb); + intel_crtc_restore_mode(crtc); } } else { intel_modeset_update_staged_output_state(dev); @@ -13898,6 +14297,7 @@ void intel_modeset_gem_init(struct drm_device *dev) if (intel_pin_and_fence_fb_obj(c->primary, c->primary->fb, + c->primary->state, NULL)) { DRM_ERROR("failed to pin boot fb on pipe %d\n", to_intel_crtc(c)->pipe); diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ca60060710d2..b70e635ccaf4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -85,10 +85,12 @@ static const struct dp_link_dpll chv_dpll[] = { { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } }; /* Skylake supports following rates */ -static const uint32_t gen9_rates[] = { 162000, 216000, 270000, 324000, - 432000, 540000 }; - -static const uint32_t default_rates[] = { 162000, 270000, 540000 }; +static const int gen9_rates[] = { 162000, 216000, 270000, + 324000, 432000, 540000 }; +static const int chv_rates[] = { 162000, 202500, 210000, 216000, + 243000, 270000, 324000, 405000, + 420000, 432000, 540000 }; +static const int default_rates[] = { 162000, 270000, 540000 }; /** * is_edp - is the given port attached to an eDP panel (either CPU or PCH) @@ -123,26 +125,15 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp); static void vlv_steal_power_sequencer(struct drm_device *dev, enum pipe pipe); -int -intel_dp_max_link_bw(struct intel_dp *intel_dp) +static int +intel_dp_max_link_bw(struct intel_dp *intel_dp) { int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; - struct drm_device *dev = intel_dp->attached_connector->base.dev; switch (max_link_bw) { case DP_LINK_BW_1_62: case DP_LINK_BW_2_7: - break; - case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ - if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) - /* WaDisableHBR2:skl */ - max_link_bw = DP_LINK_BW_2_7; - else if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || - INTEL_INFO(dev)->gen >= 8) && - intel_dp->dpcd[DP_DPCD_REV] >= 0x12) - max_link_bw = DP_LINK_BW_5_4; - else - max_link_bw = DP_LINK_BW_2_7; + case DP_LINK_BW_5_4: break; default: WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n", @@ -218,7 +209,7 @@ intel_dp_mode_valid(struct drm_connector *connector, target_clock = fixed_mode->clock; } - max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); + max_link_clock = intel_dp_max_link_rate(intel_dp); max_lanes = intel_dp_max_lane_count(intel_dp); max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); @@ -951,8 +942,9 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) size_t txsize, rxsize; int ret; - txbuf[0] = msg->request << 4; - txbuf[1] = msg->address >> 8; + txbuf[0] = (msg->request << 4) | + ((msg->address >> 16) & 0xf); + txbuf[1] = (msg->address >> 8) & 0xff; txbuf[2] = msg->address & 0xff; txbuf[3] = msg->size - 1; @@ -960,7 +952,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) case DP_AUX_NATIVE_WRITE: case DP_AUX_I2C_WRITE: txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE; - rxsize = 1; + rxsize = 2; /* 0 or 1 data bytes */ if (WARN_ON(txsize > 20)) return -E2BIG; @@ -971,8 +963,13 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) if (ret > 0) { msg->reply = rxbuf[0] >> 4; - /* Return payload size. */ - ret = msg->size; + if (ret > 1) { + /* Number of bytes written in a short write. */ + ret = clamp_t(int, rxbuf[1], 0, msg->size); + } else { + /* Return payload size. */ + ret = msg->size; + } } break; @@ -1142,49 +1139,39 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw) } static int -intel_read_sink_rates(struct intel_dp *intel_dp, uint32_t *sink_rates) +intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - int i = 0; - uint16_t val; - - if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) { - /* - * Receiver supports only main-link rate selection by - * link rate table method, so read link rates from - * supported_link_rates - */ - for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i) { - val = le16_to_cpu(intel_dp->supported_rates[i]); - if (val == 0) - break; + if (intel_dp->num_sink_rates) { + *sink_rates = intel_dp->sink_rates; + return intel_dp->num_sink_rates; + } - sink_rates[i] = val * 200; - } + *sink_rates = default_rates; - if (i <= 0) - DRM_ERROR("No rates in SUPPORTED_LINK_RATES"); - } - return i; + return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; } static int -intel_read_source_rates(struct intel_dp *intel_dp, uint32_t *source_rates) +intel_dp_source_rates(struct drm_device *dev, const int **source_rates) { - struct drm_device *dev = intel_dp_to_dev(intel_dp); - int i; - int max_default_rate; - - if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) { - for (i = 0; i < ARRAY_SIZE(gen9_rates); ++i) - source_rates[i] = gen9_rates[i]; - } else { - /* Index of the max_link_bw supported + 1 */ - max_default_rate = (intel_dp_max_link_bw(intel_dp) >> 3) + 1; - for (i = 0; i < max_default_rate; ++i) - source_rates[i] = default_rates[i]; + if (INTEL_INFO(dev)->gen >= 9) { + *source_rates = gen9_rates; + return ARRAY_SIZE(gen9_rates); + } else if (IS_CHERRYVIEW(dev)) { + *source_rates = chv_rates; + return ARRAY_SIZE(chv_rates); } - return i; + + *source_rates = default_rates; + + if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) + /* WaDisableHBR2:skl */ + return (DP_LINK_BW_2_7 >> 3) + 1; + else if (INTEL_INFO(dev)->gen >= 8 || + (IS_HASWELL(dev) && !IS_HSW_ULX(dev))) + return (DP_LINK_BW_5_4 >> 3) + 1; + else + return (DP_LINK_BW_2_7 >> 3) + 1; } static void @@ -1220,22 +1207,17 @@ intel_dp_set_clock(struct intel_encoder *encoder, } } -static int intel_supported_rates(const uint32_t *source_rates, int source_len, -const uint32_t *sink_rates, int sink_len, uint32_t *supported_rates) +static int intersect_rates(const int *source_rates, int source_len, + const int *sink_rates, int sink_len, + int *common_rates) { int i = 0, j = 0, k = 0; - /* For panels with edp version less than 1.4 */ - if (sink_len == 0) { - for (i = 0; i < source_len; ++i) - supported_rates[i] = source_rates[i]; - return source_len; - } - - /* For edp1.4 panels, find the common rates between source and sink */ while (i < source_len && j < sink_len) { if (source_rates[i] == sink_rates[j]) { - supported_rates[k] = source_rates[i]; + if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) + return k; + common_rates[k] = source_rates[i]; ++k; ++i; ++j; @@ -1248,7 +1230,62 @@ const uint32_t *sink_rates, int sink_len, uint32_t *supported_rates) return k; } -static int rate_to_index(uint32_t find, const uint32_t *rates) +static int intel_dp_common_rates(struct intel_dp *intel_dp, + int *common_rates) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + const int *source_rates, *sink_rates; + int source_len, sink_len; + + sink_len = intel_dp_sink_rates(intel_dp, &sink_rates); + source_len = intel_dp_source_rates(dev, &source_rates); + + return intersect_rates(source_rates, source_len, + sink_rates, sink_len, + common_rates); +} + +static void snprintf_int_array(char *str, size_t len, + const int *array, int nelem) +{ + int i; + + str[0] = '\0'; + + for (i = 0; i < nelem; i++) { + int r = snprintf(str, len, "%d,", array[i]); + if (r >= len) + return; + str += r; + len -= r; + } +} + +static void intel_dp_print_rates(struct intel_dp *intel_dp) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + const int *source_rates, *sink_rates; + int source_len, sink_len, common_len; + int common_rates[DP_MAX_SUPPORTED_RATES]; + char str[128]; /* FIXME: too big for stack? */ + + if ((drm_debug & DRM_UT_KMS) == 0) + return; + + source_len = intel_dp_source_rates(dev, &source_rates); + snprintf_int_array(str, sizeof(str), source_rates, source_len); + DRM_DEBUG_KMS("source rates: %s\n", str); + + sink_len = intel_dp_sink_rates(intel_dp, &sink_rates); + snprintf_int_array(str, sizeof(str), sink_rates, sink_len); + DRM_DEBUG_KMS("sink rates: %s\n", str); + + common_len = intel_dp_common_rates(intel_dp, common_rates); + snprintf_int_array(str, sizeof(str), common_rates, common_len); + DRM_DEBUG_KMS("common rates: %s\n", str); +} + +static int rate_to_index(int find, const int *rates) { int i = 0; @@ -1259,6 +1296,24 @@ static int rate_to_index(uint32_t find, const uint32_t *rates) return i; } +int +intel_dp_max_link_rate(struct intel_dp *intel_dp) +{ + int rates[DP_MAX_SUPPORTED_RATES] = {}; + int len; + + len = intel_dp_common_rates(intel_dp, rates); + if (WARN_ON(len <= 0)) + return 162000; + + return rates[rate_to_index(0, rates) - 1]; +} + +int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) +{ + return rate_to_index(rate, intel_dp->sink_rates); +} + bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) @@ -1268,7 +1323,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); enum port port = dp_to_dig_port(intel_dp)->port; - struct intel_crtc *intel_crtc = encoder->new_crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); struct intel_connector *intel_connector = intel_dp->attached_connector; int lane_count, clock; int min_lane_count = 1; @@ -1278,22 +1333,15 @@ intel_dp_compute_config(struct intel_encoder *encoder, int max_clock; int bpp, mode_rate; int link_avail, link_clock; - uint32_t sink_rates[8]; - uint32_t supported_rates[8] = {0}; - uint32_t source_rates[8]; - int source_len, sink_len, supported_len; - - sink_len = intel_read_sink_rates(intel_dp, sink_rates); + int common_rates[DP_MAX_SUPPORTED_RATES] = {}; + int common_len; - source_len = intel_read_source_rates(intel_dp, source_rates); - - supported_len = intel_supported_rates(source_rates, source_len, - sink_rates, sink_len, supported_rates); + common_len = intel_dp_common_rates(intel_dp, common_rates); /* No common link rates between source and sink */ - WARN_ON(supported_len <= 0); + WARN_ON(common_len <= 0); - max_clock = supported_len - 1; + max_clock = common_len - 1; if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) pipe_config->has_pch_encoder = true; @@ -1318,7 +1366,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, DRM_DEBUG_KMS("DP link computation with max lane count %i " "max bw %d pixel clock %iKHz\n", - max_lane_count, supported_rates[max_clock], + max_lane_count, common_rates[max_clock], adjusted_mode->crtc_clock); /* Walk through all bpp values. Luckily they're all nicely spaced with 2 @@ -1351,7 +1399,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, lane_count <= max_lane_count; lane_count <<= 1) { - link_clock = supported_rates[clock]; + link_clock = common_rates[clock]; link_avail = intel_dp_max_data_rate(link_clock, lane_count); @@ -1382,17 +1430,18 @@ found: intel_dp->lane_count = lane_count; - intel_dp->link_bw = - drm_dp_link_rate_to_bw_code(supported_rates[clock]); - - if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) { - intel_dp->rate_select = - rate_to_index(supported_rates[clock], sink_rates); + if (intel_dp->num_sink_rates) { intel_dp->link_bw = 0; + intel_dp->rate_select = + intel_dp_rate_select(intel_dp, common_rates[clock]); + } else { + intel_dp->link_bw = + drm_dp_link_rate_to_bw_code(common_rates[clock]); + intel_dp->rate_select = 0; } pipe_config->pipe_bpp = bpp; - pipe_config->port_clock = supported_rates[clock]; + pipe_config->port_clock = common_rates[clock]; DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", intel_dp->link_bw, intel_dp->lane_count, @@ -1415,7 +1464,7 @@ found: } if (IS_SKYLAKE(dev) && is_edp(intel_dp)) - skl_edp_set_pll_config(pipe_config, supported_rates[clock]); + skl_edp_set_pll_config(pipe_config, common_rates[clock]); else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); else @@ -3502,7 +3551,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); - if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) + if (intel_dp->num_sink_rates) drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, &intel_dp->rate_select, 1); @@ -3754,11 +3803,27 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) && (rev >= 0x03)) { /* eDp v1.4 or higher */ + __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; + int i; + intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SUPPORTED_LINK_RATES, - intel_dp->supported_rates, - sizeof(intel_dp->supported_rates)); + sink_rates, + sizeof(sink_rates)); + + for (i = 0; i < ARRAY_SIZE(sink_rates); i++) { + int val = le16_to_cpu(sink_rates[i]); + + if (val == 0) + break; + + intel_dp->sink_rates[i] = val * 200; + } + intel_dp->num_sink_rates = i; } + + intel_dp_print_rates(intel_dp); + if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) return true; /* native DP sink */ @@ -4548,6 +4613,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = { .atomic_get_property = intel_connector_atomic_get_property, .destroy = intel_dp_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { @@ -4934,7 +5000,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) dig_port = dp_to_dig_port(intel_dp); encoder = &dig_port->base; - intel_crtc = encoder->new_crtc; + intel_crtc = to_intel_crtc(encoder->base.crtc); if (!intel_crtc) { DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n"); @@ -5272,8 +5338,6 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, struct edid *edid; enum pipe pipe = INVALID_PIPE; - dev_priv->drrs.type = DRRS_NOT_SUPPORTED; - if (!is_edp(intel_dp)) return true; diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index be124928ca14..5329c855acce 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -36,11 +36,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_dp_mst_encoder *intel_mst = enc_to_mst(&encoder->base); struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; - struct drm_device *dev = encoder->base.dev; - int bpp; - int lane_count, slots; + struct drm_atomic_state *state; + int bpp, i; + int lane_count, slots, rate; struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - struct intel_connector *found = NULL, *intel_connector; + struct intel_connector *found = NULL; int mst_pbn; pipe_config->dp_encoder_is_mst = true; @@ -52,15 +52,30 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, * seem to suggest we should do otherwise. */ lane_count = drm_dp_max_lane_count(intel_dp->dpcd); - intel_dp->link_bw = intel_dp_max_link_bw(intel_dp); + + rate = intel_dp_max_link_rate(intel_dp); + + if (intel_dp->num_sink_rates) { + intel_dp->link_bw = 0; + intel_dp->rate_select = intel_dp_rate_select(intel_dp, rate); + } else { + intel_dp->link_bw = drm_dp_link_rate_to_bw_code(rate); + intel_dp->rate_select = 0; + } + intel_dp->lane_count = lane_count; pipe_config->pipe_bpp = 24; - pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); + pipe_config->port_clock = rate; - for_each_intel_connector(dev, intel_connector) { - if (intel_connector->new_encoder == encoder) { - found = intel_connector; + state = pipe_config->base.state; + + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) + continue; + + if (state->connector_states[i]->best_encoder == &encoder->base) { + found = to_intel_connector(state->connectors[i]); break; } } @@ -317,6 +332,7 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { .atomic_get_property = intel_connector_atomic_get_property, .destroy = intel_dp_mst_connector_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; static int intel_dp_mst_get_modes(struct drm_connector *connector) diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c77128c67cf8..6036e3b73b7b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -35,6 +35,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_dp_mst_helper.h> #include <drm/drm_rect.h> +#include <drm/drm_atomic.h> #define DIV_ROUND_CLOSEST_ULL(ll, d) \ ({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) @@ -56,8 +57,8 @@ ret__ = -ETIMEDOUT; \ break; \ } \ - if (W && drm_can_sleep()) { \ - msleep(W); \ + if ((W) && drm_can_sleep()) { \ + usleep_range((W)*1000, (W)*2000); \ } else { \ cpu_relax(); \ } \ @@ -501,16 +502,19 @@ struct intel_plane_wm_parameters { bool enabled; bool scaled; u64 tiling; + unsigned int rotation; }; struct intel_plane { struct drm_plane base; int plane; enum pipe pipe; - struct drm_i915_gem_object *obj; bool can_scale; int max_downscale; + /* FIXME convert to properties */ + struct drm_intel_sprite_colorkey ckey; + /* Since we need to change the watermarks before/after * enabling/disabling the planes, we need to store the parameters here * as the other pieces of the struct may not reflect the values we want @@ -527,7 +531,6 @@ struct intel_plane { void (*update_plane)(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, @@ -538,10 +541,6 @@ struct intel_plane { struct intel_plane_state *state); void (*commit_plane)(struct drm_plane *plane, struct intel_plane_state *state); - int (*update_colorkey)(struct drm_plane *plane, - struct drm_intel_sprite_colorkey *key); - void (*get_colorkey)(struct drm_plane *plane, - struct drm_intel_sprite_colorkey *key); }; struct intel_watermark_params { @@ -564,6 +563,7 @@ struct cxsr_latency { }; #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) +#define to_intel_crtc_state(x) container_of(x, struct intel_crtc_state, base) #define to_intel_connector(x) container_of(x, struct intel_connector, base) #define to_intel_encoder(x) container_of(x, struct intel_encoder, base) #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base) @@ -627,7 +627,9 @@ struct intel_dp { uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; - __le16 supported_rates[DP_MAX_SUPPORTED_RATES]; + /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ + uint8_t num_sink_rates; + int sink_rates[DP_MAX_SUPPORTED_RATES]; struct drm_dp_aux aux; uint8_t train_set[4]; int panel_power_up_delay; @@ -902,9 +904,10 @@ void intel_frontbuffer_flip(struct drm_device *dev, intel_frontbuffer_flush(dev, frontbuffer_bits); } -int intel_fb_align_height(struct drm_device *dev, int height, - uint32_t pixel_format, - uint64_t fb_format_modifier); +unsigned int intel_fb_align_height(struct drm_device *dev, + unsigned int height, + uint32_t pixel_format, + uint64_t fb_format_modifier); void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); u32 intel_fb_stride_alignment(struct drm_device *dev, uint64_t fb_modifier, @@ -956,9 +959,11 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx); void intel_release_load_detect_pipe(struct drm_connector *connector, - struct intel_load_detect_pipe *old); + struct intel_load_detect_pipe *old, + struct drm_modeset_acquire_ctx *ctx); int intel_pin_and_fence_fb_obj(struct drm_plane *plane, struct drm_framebuffer *fb, + const struct drm_plane_state *plane_state, struct intel_engine_cs *pipelined); struct drm_framebuffer * __intel_framebuffer_create(struct drm_device *dev, @@ -983,6 +988,19 @@ int intel_plane_atomic_set_property(struct drm_plane *plane, struct drm_property *property, uint64_t val); +unsigned int +intel_tile_height(struct drm_device *dev, uint32_t pixel_format, + uint64_t fb_format_modifier); + +static inline bool +intel_rotation_90_or_270(unsigned int rotation) +{ + return rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270)); +} + +bool intel_wm_need_update(struct drm_plane *plane, + struct drm_plane_state *state); + /* shared dpll functions */ struct intel_shared_dpll *intel_crtc_to_shared_dpll(struct intel_crtc *crtc); void assert_shared_dpll(struct drm_i915_private *dev_priv, @@ -1037,6 +1055,9 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode, void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc); void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file); +unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane, + struct drm_i915_gem_object *obj); + /* intel_dp.c */ void intel_dp_init(struct drm_device *dev, int output_reg, enum port port); bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port, @@ -1060,7 +1081,8 @@ void intel_edp_panel_off(struct intel_dp *intel_dp); void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector); void intel_dp_mst_suspend(struct drm_device *dev); void intel_dp_mst_resume(struct drm_device *dev); -int intel_dp_max_link_bw(struct intel_dp *intel_dp); +int intel_dp_max_link_rate(struct intel_dp *intel_dp); +int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); void intel_dp_hot_plug(struct intel_encoder *intel_encoder); void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); @@ -1239,6 +1261,8 @@ void intel_disable_gt_powersave(struct drm_device *dev); void intel_suspend_gt_powersave(struct drm_device *dev); void intel_reset_gt_powersave(struct drm_device *dev); void gen6_update_ring_freq(struct drm_device *dev); +void gen6_rps_busy(struct drm_i915_private *dev_priv); +void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); void gen6_rps_idle(struct drm_i915_private *dev_priv); void gen6_rps_boost(struct drm_i915_private *dev_priv); void ilk_wm_get_hw_state(struct drm_device *dev); @@ -1258,8 +1282,6 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv, int intel_plane_restore(struct drm_plane *plane); int intel_sprite_set_colorkey(struct drm_device *dev, void *data, struct drm_file *file_priv); -int intel_sprite_get_colorkey(struct drm_device *dev, void *data, - struct drm_file *file_priv); bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count); void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count); @@ -1282,6 +1304,17 @@ int intel_connector_atomic_get_property(struct drm_connector *connector, struct drm_crtc_state *intel_crtc_duplicate_state(struct drm_crtc *crtc); void intel_crtc_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *state); +static inline struct intel_crtc_state * +intel_atomic_get_crtc_state(struct drm_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_crtc_state *crtc_state; + crtc_state = drm_atomic_get_crtc_state(state, &crtc->base); + if (IS_ERR(crtc_state)) + return ERR_PTR(PTR_ERR(crtc_state)); + + return to_intel_crtc_state(crtc_state); +} /* intel_atomic_plane.c */ struct intel_plane_state *intel_create_plane_state(struct drm_plane *plane); diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c index c8c8b24e300c..572251e9810b 100644 --- a/drivers/gpu/drm/i915/intel_dsi.c +++ b/drivers/gpu/drm/i915/intel_dsi.c @@ -975,6 +975,7 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_connector_atomic_get_property, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; void intel_dsi_init(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index d8579510beb0..4ccd6c3f133d 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -393,6 +393,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = { .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_connector_atomic_get_property, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 9fcf446e95f5..4165ce0644f7 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -521,7 +521,7 @@ void intel_fbc_update(struct drm_device *dev) goto out_disable; } - if (!i915.enable_fbc || !i915.powersave) { + if (!i915.enable_fbc) { if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM)) DRM_DEBUG_KMS("fbc disabled per module param\n"); goto out_disable; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 757c0d216f80..4e7e7da2e03b 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -151,7 +151,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, } /* Flush everything out, we'll be doing GTT only from now on */ - ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL); + ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL, NULL); if (ret) { DRM_ERROR("failed to pin obj: %d\n", ret); goto out_fb; diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c index 0a1bac8ac72b..a20cffb78c0f 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c @@ -110,9 +110,6 @@ static void intel_mark_fb_busy(struct drm_device *dev, struct drm_i915_private *dev_priv = dev->dev_private; enum pipe pipe; - if (!i915.powersave) - return; - for_each_pipe(dev_priv, pipe) { if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe))) continue; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 995c5b261f4f..cacbafdad3ab 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -951,19 +951,30 @@ intel_hdmi_mode_valid(struct drm_connector *connector, return MODE_OK; } -static bool hdmi_12bpc_possible(struct intel_crtc *crtc) +static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) { - struct drm_device *dev = crtc->base.dev; + struct drm_device *dev = crtc_state->base.crtc->dev; + struct drm_atomic_state *state; struct intel_encoder *encoder; + struct drm_connector_state *connector_state; int count = 0, count_hdmi = 0; + int i; if (HAS_GMCH_DISPLAY(dev)) return false; - for_each_intel_encoder(dev, encoder) { - if (encoder->new_crtc != crtc) + state = crtc_state->base.state; + + for (i = 0; i < state->num_connector; i++) { + if (!state->connectors[i]) continue; + connector_state = state->connector_states[i]; + if (connector_state->crtc != crtc_state->base.crtc) + continue; + + encoder = to_intel_encoder(connector_state->best_encoder); + count_hdmi += encoder->type == INTEL_OUTPUT_HDMI; count++; } @@ -1020,7 +1031,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, */ if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && clock_12bpc <= portclock_limit && - hdmi_12bpc_possible(encoder->new_crtc)) { + hdmi_12bpc_possible(pipe_config)) { DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); desired_bpp = 12*3; @@ -1618,6 +1629,7 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = { .atomic_get_property = intel_connector_atomic_get_property, .destroy = intel_hdmi_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 24e8730dc189..06d2da336f7c 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -286,7 +286,7 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, struct intel_connector *intel_connector = &lvds_encoder->attached_connector->base; struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc; + struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc); unsigned int lvds_bpp; /* Should never happen!! */ @@ -535,6 +535,7 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = { .atomic_get_property = intel_connector_atomic_get_property, .destroy = intel_lvds_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; static const struct drm_encoder_funcs intel_lvds_enc_funcs = { diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index 823d1d97a000..dd92122ed95c 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -720,7 +720,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, if (ret != 0) return ret; - ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL); + ret = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL, + &i915_ggtt_view_normal); if (ret != 0) return ret; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 288c9d24098e..fa4ccb346389 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2840,6 +2840,7 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc, } p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; + p->plane[0].rotation = crtc->primary->state->rotation; fb = crtc->cursor->state->fb; if (fb) { @@ -2897,7 +2898,21 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || p_params->tiling == I915_FORMAT_MOD_Yf_TILED) { - uint32_t y_tile_minimum = plane_blocks_per_line * 4; + uint32_t min_scanlines = 4; + uint32_t y_tile_minimum; + if (intel_rotation_90_or_270(p_params->rotation)) { + switch (p_params->bytes_per_pixel) { + case 1: + min_scanlines = 16; + break; + case 2: + min_scanlines = 8; + break; + case 8: + WARN(1, "Unsupported pixel depth for rotation"); + } + } + y_tile_minimum = plane_blocks_per_line * min_scanlines; selected_result = max(method2, y_tile_minimum); } else { if ((ddb_allocation / plane_blocks_per_line) >= 1) @@ -3357,6 +3372,7 @@ skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc, */ if (fb) intel_plane->wm.tiling = fb->modifier[0]; + intel_plane->wm.rotation = plane->state->rotation; skl_update_wm(crtc); } @@ -3855,9 +3871,9 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) break; } /* Max/min bins are special */ - if (val == dev_priv->rps.min_freq_softlimit) + if (val <= dev_priv->rps.min_freq_softlimit) new_power = LOW_POWER; - if (val == dev_priv->rps.max_freq_softlimit) + if (val >= dev_priv->rps.max_freq_softlimit) new_power = HIGH_POWER; if (new_power == dev_priv->rps.power) return; @@ -3922,11 +3938,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) u32 mask = 0; if (val > dev_priv->rps.min_freq_softlimit) - mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; + mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; if (val < dev_priv->rps.max_freq_softlimit) - mask |= GEN6_PM_RP_UP_THRESHOLD; + mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; - mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED); mask &= dev_priv->pm_rps_events; return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); @@ -3940,8 +3955,8 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) struct drm_i915_private *dev_priv = dev->dev_private; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - WARN_ON(val > dev_priv->rps.max_freq_softlimit); - WARN_ON(val < dev_priv->rps.min_freq_softlimit); + WARN_ON(val > dev_priv->rps.max_freq); + WARN_ON(val < dev_priv->rps.min_freq); /* min/max delay may still have been modified so be sure to * write the limits value. @@ -3979,8 +3994,8 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val) struct drm_i915_private *dev_priv = dev->dev_private; WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); - WARN_ON(val > dev_priv->rps.max_freq_softlimit); - WARN_ON(val < dev_priv->rps.min_freq_softlimit); + WARN_ON(val > dev_priv->rps.max_freq); + WARN_ON(val < dev_priv->rps.min_freq); if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), "Odd GPU freq value\n")) @@ -4007,10 +4022,11 @@ static void valleyview_set_rps(struct drm_device *dev, u8 val) static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) { struct drm_device *dev = dev_priv->dev; + u32 val = dev_priv->rps.idle_freq; /* CHV and latest VLV don't need to force the gfx clock */ if (IS_CHERRYVIEW(dev) || dev->pdev->revision >= 0xd) { - valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); + valleyview_set_rps(dev_priv->dev, val); return; } @@ -4018,7 +4034,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) * When we are idle. Drop to min voltage state. */ - if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit) + if (dev_priv->rps.cur_freq <= val) return; /* Mask turbo interrupt so that they will not come in between */ @@ -4027,10 +4043,9 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) vlv_force_gfx_clock(dev_priv, true); - dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit; + dev_priv->rps.cur_freq = val; - vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, - dev_priv->rps.min_freq_softlimit); + vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) & GENFREQSTATUS) == 0, 100)) @@ -4038,8 +4053,19 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) vlv_force_gfx_clock(dev_priv, false); - I915_WRITE(GEN6_PMINTRMSK, - gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); + I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); +} + +void gen6_rps_busy(struct drm_i915_private *dev_priv) +{ + mutex_lock(&dev_priv->rps.hw_lock); + if (dev_priv->rps.enabled) { + if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) + gen6_rps_reset_ei(dev_priv); + I915_WRITE(GEN6_PMINTRMSK, + gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); + } + mutex_unlock(&dev_priv->rps.hw_lock); } void gen6_rps_idle(struct drm_i915_private *dev_priv) @@ -4051,17 +4077,23 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) if (IS_VALLEYVIEW(dev)) vlv_set_rps_idle(dev_priv); else - gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); + gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); dev_priv->rps.last_adj = 0; + I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); } mutex_unlock(&dev_priv->rps.hw_lock); } void gen6_rps_boost(struct drm_i915_private *dev_priv) { + u32 val; + mutex_lock(&dev_priv->rps.hw_lock); - if (dev_priv->rps.enabled) { - intel_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit); + val = dev_priv->rps.max_freq_softlimit; + if (dev_priv->rps.enabled && + dev_priv->mm.busy && + dev_priv->rps.cur_freq < val) { + intel_set_rps(dev_priv->dev, val); dev_priv->rps.last_adj = 0; } mutex_unlock(&dev_priv->rps.hw_lock); @@ -4209,6 +4241,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.max_freq); } + dev_priv->rps.idle_freq = dev_priv->rps.min_freq; + /* Preserve min/max settings in case of re-init */ if (dev_priv->rps.max_freq_softlimit == 0) dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; @@ -4375,7 +4409,7 @@ static void gen8_enable_rps(struct drm_device *dev) /* 6: Ring frequency + overclocking (our driver does this later */ dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); + gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -4469,7 +4503,7 @@ static void gen6_enable_rps(struct drm_device *dev) } dev_priv->rps.power = HIGH_POWER; /* force a reset */ - gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); + gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); rc6vids = 0; ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); @@ -4834,6 +4868,8 @@ static void valleyview_init_gt_powersave(struct drm_device *dev) intel_gpu_freq(dev_priv, dev_priv->rps.min_freq), dev_priv->rps.min_freq); + dev_priv->rps.idle_freq = dev_priv->rps.min_freq; + /* Preserve min/max settings in case of re-init */ if (dev_priv->rps.max_freq_softlimit == 0) dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; @@ -4909,6 +4945,8 @@ static void cherryview_init_gt_powersave(struct drm_device *dev) dev_priv->rps.min_freq) & 1, "Odd GPU freq values\n"); + dev_priv->rps.idle_freq = dev_priv->rps.min_freq; + /* Preserve min/max settings in case of re-init */ if (dev_priv->rps.max_freq_softlimit == 0) dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; @@ -5686,6 +5724,13 @@ static void intel_gen6_powersave_work(struct work_struct *work) gen6_enable_rps(dev); __gen6_update_ring_freq(dev); } + + WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); + WARN_ON(dev_priv->rps.idle_freq > dev_priv->rps.max_freq); + + WARN_ON(dev_priv->rps.efficient_freq < dev_priv->rps.min_freq); + WARN_ON(dev_priv->rps.efficient_freq > dev_priv->rps.max_freq); + dev_priv->rps.enabled = true; gen6_enable_rps_interrupts(dev); diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c index b9f40c2e0af7..a8f9348259ae 100644 --- a/drivers/gpu/drm/i915/intel_psr.c +++ b/drivers/gpu/drm/i915/intel_psr.c @@ -532,8 +532,6 @@ static void intel_psr_exit(struct drm_device *dev) WARN_ON(!(val & EDP_PSR_ENABLE)); I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE); - - dev_priv->psr.active = false; } else { val = I915_READ(VLV_PSRCTL(pipe)); diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 9e554c2cfbb4..f5b7e1e7c5e0 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -2194,6 +2194,7 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = { .atomic_get_property = intel_connector_atomic_get_property, .destroy = intel_sdvo_destroy, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index a82873631851..e9ff6fc61267 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -179,7 +179,7 @@ static void intel_update_primary_plane(struct intel_crtc *crtc) static void skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, + int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, uint32_t src_w, uint32_t src_h) @@ -187,23 +187,16 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, struct drm_device *dev = drm_plane->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(drm_plane); + struct drm_i915_gem_object *obj = intel_fb_obj(fb); const int pipe = intel_plane->pipe; const int plane = intel_plane->plane + 1; u32 plane_ctl, stride_div; int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey; + unsigned long surf_addr; - plane_ctl = I915_READ(PLANE_CTL(pipe, plane)); - - /* Mask out pixel format bits in case we change it */ - plane_ctl &= ~PLANE_CTL_FORMAT_MASK; - plane_ctl &= ~PLANE_CTL_ORDER_RGBX; - plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK; - plane_ctl &= ~PLANE_CTL_TILED_MASK; - plane_ctl &= ~PLANE_CTL_ALPHA_MASK; - plane_ctl &= ~PLANE_CTL_ROTATE_MASK; - - /* Trickle feed has to be enabled */ - plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE; + plane_ctl = PLANE_CTL_ENABLE | + PLANE_CTL_PIPE_CSC_ENABLE; switch (fb->pixel_format) { case DRM_FORMAT_RGB565: @@ -264,9 +257,6 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, if (drm_plane->state->rotation == BIT(DRM_ROTATE_180)) plane_ctl |= PLANE_CTL_ROTATE_180; - plane_ctl |= PLANE_CTL_ENABLE; - plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE; - intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h, pixel_size, true, src_w != crtc_w || src_h != crtc_h); @@ -280,12 +270,25 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc, crtc_w--; crtc_h--; + if (key->flags) { + I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value); + I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value); + I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask); + } + + if (key->flags & I915_SET_COLORKEY_DESTINATION) + plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; + else if (key->flags & I915_SET_COLORKEY_SOURCE) + plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; + + surf_addr = intel_plane_obj_offset(intel_plane, obj); + I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x); I915_WRITE(PLANE_STRIDE(pipe, plane), fb->pitches[0] / stride_div); I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x); I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w); I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); - I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj)); + I915_WRITE(PLANE_SURF(pipe, plane), surf_addr); POSTING_READ(PLANE_SURF(pipe, plane)); } @@ -298,73 +301,15 @@ skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc) const int pipe = intel_plane->pipe; const int plane = intel_plane->plane + 1; - I915_WRITE(PLANE_CTL(pipe, plane), - I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE); + I915_WRITE(PLANE_CTL(pipe, plane), 0); /* Activate double buffered register update */ - I915_WRITE(PLANE_CTL(pipe, plane), 0); - POSTING_READ(PLANE_CTL(pipe, plane)); + I915_WRITE(PLANE_SURF(pipe, plane), 0); + POSTING_READ(PLANE_SURF(pipe, plane)); intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false); } -static int -skl_update_colorkey(struct drm_plane *drm_plane, - struct drm_intel_sprite_colorkey *key) -{ - struct drm_device *dev = drm_plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_plane *intel_plane = to_intel_plane(drm_plane); - const int pipe = intel_plane->pipe; - const int plane = intel_plane->plane; - u32 plane_ctl; - - I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value); - I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value); - I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask); - - plane_ctl = I915_READ(PLANE_CTL(pipe, plane)); - plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK; - if (key->flags & I915_SET_COLORKEY_DESTINATION) - plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION; - else if (key->flags & I915_SET_COLORKEY_SOURCE) - plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE; - I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl); - - POSTING_READ(PLANE_CTL(pipe, plane)); - - return 0; -} - -static void -skl_get_colorkey(struct drm_plane *drm_plane, - struct drm_intel_sprite_colorkey *key) -{ - struct drm_device *dev = drm_plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_plane *intel_plane = to_intel_plane(drm_plane); - const int pipe = intel_plane->pipe; - const int plane = intel_plane->plane; - u32 plane_ctl; - - key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane)); - key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane)); - key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane)); - - plane_ctl = I915_READ(PLANE_CTL(pipe, plane)); - - switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) { - case PLANE_CTL_KEY_ENABLE_DESTINATION: - key->flags = I915_SET_COLORKEY_DESTINATION; - break; - case PLANE_CTL_KEY_ENABLE_SOURCE: - key->flags = I915_SET_COLORKEY_SOURCE; - break; - default: - key->flags = I915_SET_COLORKEY_NONE; - } -} - static void chv_update_csc(struct intel_plane *intel_plane, uint32_t format) { @@ -407,7 +352,7 @@ chv_update_csc(struct intel_plane *intel_plane, uint32_t format) static void vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, + int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, uint32_t src_w, uint32_t src_h) @@ -416,19 +361,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(dplane); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_plane->pipe; int plane = intel_plane->plane; u32 sprctl; unsigned long sprsurf_offset, linear_offset; int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey; - sprctl = I915_READ(SPCNTR(pipe, plane)); - - /* Mask out pixel format bits in case we change it */ - sprctl &= ~SP_PIXFORMAT_MASK; - sprctl &= ~SP_YUV_BYTE_ORDER_MASK; - sprctl &= ~SP_TILED; - sprctl &= ~SP_ROTATE_180; + sprctl = SP_ENABLE; switch (fb->pixel_format) { case DRM_FORMAT_YUYV: @@ -482,8 +423,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, if (obj->tiling_mode != I915_TILING_NONE) sprctl |= SP_TILED; - sprctl |= SP_ENABLE; - intel_update_sprite_watermarks(dplane, crtc, src_w, src_h, pixel_size, true, src_w != crtc_w || src_h != crtc_h); @@ -511,6 +450,15 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, intel_update_primary_plane(intel_crtc); + if (key->flags) { + I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value); + I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value); + I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask); + } + + if (key->flags & I915_SET_COLORKEY_SOURCE) + sprctl |= SP_SOURCE_KEY; + if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) chv_update_csc(intel_plane, fb->pixel_format); @@ -544,8 +492,8 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) intel_update_primary_plane(intel_crtc); - I915_WRITE(SPCNTR(pipe, plane), I915_READ(SPCNTR(pipe, plane)) & - ~SP_ENABLE); + I915_WRITE(SPCNTR(pipe, plane), 0); + /* Activate double buffered register update */ I915_WRITE(SPSURF(pipe, plane), 0); @@ -554,61 +502,11 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false); } -static int -vlv_update_colorkey(struct drm_plane *dplane, - struct drm_intel_sprite_colorkey *key) -{ - struct drm_device *dev = dplane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_plane *intel_plane = to_intel_plane(dplane); - int pipe = intel_plane->pipe; - int plane = intel_plane->plane; - u32 sprctl; - - if (key->flags & I915_SET_COLORKEY_DESTINATION) - return -EINVAL; - - I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value); - I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value); - I915_WRITE(SPKEYMSK(pipe, plane), key->channel_mask); - - sprctl = I915_READ(SPCNTR(pipe, plane)); - sprctl &= ~SP_SOURCE_KEY; - if (key->flags & I915_SET_COLORKEY_SOURCE) - sprctl |= SP_SOURCE_KEY; - I915_WRITE(SPCNTR(pipe, plane), sprctl); - - POSTING_READ(SPKEYMSK(pipe, plane)); - - return 0; -} - -static void -vlv_get_colorkey(struct drm_plane *dplane, - struct drm_intel_sprite_colorkey *key) -{ - struct drm_device *dev = dplane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_plane *intel_plane = to_intel_plane(dplane); - int pipe = intel_plane->pipe; - int plane = intel_plane->plane; - u32 sprctl; - - key->min_value = I915_READ(SPKEYMINVAL(pipe, plane)); - key->max_value = I915_READ(SPKEYMAXVAL(pipe, plane)); - key->channel_mask = I915_READ(SPKEYMSK(pipe, plane)); - - sprctl = I915_READ(SPCNTR(pipe, plane)); - if (sprctl & SP_SOURCE_KEY) - key->flags = I915_SET_COLORKEY_SOURCE; - else - key->flags = I915_SET_COLORKEY_NONE; -} static void ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, + int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, uint32_t src_w, uint32_t src_h) @@ -617,19 +515,14 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_plane->pipe; + struct drm_i915_gem_object *obj = intel_fb_obj(fb); + enum pipe pipe = intel_plane->pipe; u32 sprctl, sprscale = 0; unsigned long sprsurf_offset, linear_offset; int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey; - sprctl = I915_READ(SPRCTL(pipe)); - - /* Mask out pixel format bits in case we change it */ - sprctl &= ~SPRITE_PIXFORMAT_MASK; - sprctl &= ~SPRITE_RGB_ORDER_RGBX; - sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK; - sprctl &= ~SPRITE_TILED; - sprctl &= ~SPRITE_ROTATE_180; + sprctl = SPRITE_ENABLE; switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: @@ -668,8 +561,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, else sprctl |= SPRITE_TRICKLE_FEED_DISABLE; - sprctl |= SPRITE_ENABLE; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) sprctl |= SPRITE_PIPE_CSC_ENABLE; @@ -706,6 +597,17 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, intel_update_primary_plane(intel_crtc); + if (key->flags) { + I915_WRITE(SPRKEYVAL(pipe), key->min_value); + I915_WRITE(SPRKEYMAX(pipe), key->max_value); + I915_WRITE(SPRKEYMSK(pipe), key->channel_mask); + } + + if (key->flags & I915_SET_COLORKEY_DESTINATION) + sprctl |= SPRITE_DEST_KEY; + else if (key->flags & I915_SET_COLORKEY_SOURCE) + sprctl |= SPRITE_SOURCE_KEY; + I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); @@ -747,73 +649,12 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) I915_WRITE(SPRSURF(pipe), 0); intel_flush_primary_plane(dev_priv, intel_crtc->plane); - - /* - * Avoid underruns when disabling the sprite. - * FIXME remove once watermark updates are done properly. - */ - intel_crtc->atomic.wait_vblank = true; - intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane)); -} - -static int -ivb_update_colorkey(struct drm_plane *plane, - struct drm_intel_sprite_colorkey *key) -{ - struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_plane *intel_plane; - u32 sprctl; - int ret = 0; - - intel_plane = to_intel_plane(plane); - - I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value); - I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value); - I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask); - - sprctl = I915_READ(SPRCTL(intel_plane->pipe)); - sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY); - if (key->flags & I915_SET_COLORKEY_DESTINATION) - sprctl |= SPRITE_DEST_KEY; - else if (key->flags & I915_SET_COLORKEY_SOURCE) - sprctl |= SPRITE_SOURCE_KEY; - I915_WRITE(SPRCTL(intel_plane->pipe), sprctl); - - POSTING_READ(SPRKEYMSK(intel_plane->pipe)); - - return ret; -} - -static void -ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) -{ - struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_plane *intel_plane; - u32 sprctl; - - intel_plane = to_intel_plane(plane); - - key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe)); - key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe)); - key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe)); - key->flags = 0; - - sprctl = I915_READ(SPRCTL(intel_plane->pipe)); - - if (sprctl & SPRITE_DEST_KEY) - key->flags = I915_SET_COLORKEY_DESTINATION; - else if (sprctl & SPRITE_SOURCE_KEY) - key->flags = I915_SET_COLORKEY_SOURCE; - else - key->flags = I915_SET_COLORKEY_NONE; } static void ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_framebuffer *fb, - struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, + int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, uint32_t x, uint32_t y, uint32_t src_w, uint32_t src_h) @@ -822,19 +663,14 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, struct drm_i915_private *dev_priv = dev->dev_private; struct intel_plane *intel_plane = to_intel_plane(plane); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct drm_i915_gem_object *obj = intel_fb_obj(fb); int pipe = intel_plane->pipe; unsigned long dvssurf_offset, linear_offset; u32 dvscntr, dvsscale; int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); + const struct drm_intel_sprite_colorkey *key = &intel_plane->ckey; - dvscntr = I915_READ(DVSCNTR(pipe)); - - /* Mask out pixel format bits in case we change it */ - dvscntr &= ~DVS_PIXFORMAT_MASK; - dvscntr &= ~DVS_RGB_ORDER_XBGR; - dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK; - dvscntr &= ~DVS_TILED; - dvscntr &= ~DVS_ROTATE_180; + dvscntr = DVS_ENABLE; switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: @@ -870,7 +706,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (IS_GEN6(dev)) dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ - dvscntr |= DVS_ENABLE; intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size, true, @@ -902,6 +737,17 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, intel_update_primary_plane(intel_crtc); + if (key->flags) { + I915_WRITE(DVSKEYVAL(pipe), key->min_value); + I915_WRITE(DVSKEYMAX(pipe), key->max_value); + I915_WRITE(DVSKEYMSK(pipe), key->channel_mask); + } + + if (key->flags & I915_SET_COLORKEY_DESTINATION) + dvscntr |= DVS_DEST_KEY; + else if (key->flags & I915_SET_COLORKEY_SOURCE) + dvscntr |= DVS_SOURCE_KEY; + I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x); @@ -930,20 +776,14 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) intel_update_primary_plane(intel_crtc); - I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE); + I915_WRITE(DVSCNTR(pipe), 0); /* Disable the scaler */ I915_WRITE(DVSSCALE(pipe), 0); + /* Flush double buffered register updates */ I915_WRITE(DVSSURF(pipe), 0); intel_flush_primary_plane(dev_priv, intel_crtc->plane); - - /* - * Avoid underruns when disabling the sprite. - * FIXME remove once watermark updates are done properly. - */ - intel_crtc->atomic.wait_vblank = true; - intel_crtc->atomic.update_sprite_watermarks |= (1 << drm_plane_index(plane)); } /** @@ -1014,67 +854,9 @@ intel_pre_disable_primary(struct drm_crtc *crtc) hsw_disable_ips(intel_crtc); } -static int -ilk_update_colorkey(struct drm_plane *plane, - struct drm_intel_sprite_colorkey *key) -{ - struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_plane *intel_plane; - u32 dvscntr; - int ret = 0; - - intel_plane = to_intel_plane(plane); - - I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value); - I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value); - I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask); - - dvscntr = I915_READ(DVSCNTR(intel_plane->pipe)); - dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY); - if (key->flags & I915_SET_COLORKEY_DESTINATION) - dvscntr |= DVS_DEST_KEY; - else if (key->flags & I915_SET_COLORKEY_SOURCE) - dvscntr |= DVS_SOURCE_KEY; - I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr); - - POSTING_READ(DVSKEYMSK(intel_plane->pipe)); - - return ret; -} - -static void -ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) -{ - struct drm_device *dev = plane->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_plane *intel_plane; - u32 dvscntr; - - intel_plane = to_intel_plane(plane); - - key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe)); - key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe)); - key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe)); - key->flags = 0; - - dvscntr = I915_READ(DVSCNTR(intel_plane->pipe)); - - if (dvscntr & DVS_DEST_KEY) - key->flags = I915_SET_COLORKEY_DESTINATION; - else if (dvscntr & DVS_SOURCE_KEY) - key->flags = I915_SET_COLORKEY_SOURCE; - else - key->flags = I915_SET_COLORKEY_NONE; -} - static bool colorkey_enabled(struct intel_plane *intel_plane) { - struct drm_intel_sprite_colorkey key; - - intel_plane->get_colorkey(&intel_plane->base, &key); - - return key.flags != I915_SET_COLORKEY_NONE; + return intel_plane->ckey.flags != I915_SET_COLORKEY_NONE; } static int @@ -1257,11 +1039,18 @@ finish: if (!intel_crtc->primary_enabled && !state->hides_primary) intel_crtc->atomic.post_enable_primary = true; - /* Update watermarks on tiling changes. */ - if (!plane->state->fb || !state->base.fb || - plane->state->fb->modifier[0] != - state->base.fb->modifier[0]) + if (intel_wm_need_update(plane, &state->base)) intel_crtc->atomic.update_wm = true; + + if (!state->visible) { + /* + * Avoid underruns when disabling the sprite. + * FIXME remove once watermark updates are done properly. + */ + intel_crtc->atomic.wait_vblank = true; + intel_crtc->atomic.update_sprite_watermarks |= + (1 << drm_plane_index(plane)); + } } return 0; @@ -1275,7 +1064,6 @@ intel_commit_sprite_plane(struct drm_plane *plane, struct intel_crtc *intel_crtc; struct intel_plane *intel_plane = to_intel_plane(plane); struct drm_framebuffer *fb = state->base.fb; - struct drm_i915_gem_object *obj = intel_fb_obj(fb); int crtc_x, crtc_y; unsigned int crtc_w, crtc_h; uint32_t src_x, src_y, src_w, src_h; @@ -1283,8 +1071,7 @@ intel_commit_sprite_plane(struct drm_plane *plane, crtc = crtc ? crtc : plane->crtc; intel_crtc = to_intel_crtc(crtc); - plane->fb = state->base.fb; - intel_plane->obj = obj; + plane->fb = fb; if (intel_crtc->active) { intel_crtc->primary_enabled = !state->hides_primary; @@ -1298,7 +1085,7 @@ intel_commit_sprite_plane(struct drm_plane *plane, src_y = state->src.y1; src_w = drm_rect_width(&state->src); src_h = drm_rect_height(&state->src); - intel_plane->update_plane(plane, crtc, fb, obj, + intel_plane->update_plane(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h); } else { @@ -1319,6 +1106,10 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) return -EINVAL; + if (IS_VALLEYVIEW(dev) && + set->flags & I915_SET_COLORKEY_DESTINATION) + return -EINVAL; + drm_modeset_lock_all(dev); plane = drm_plane_find(dev, set->plane_id); @@ -1328,31 +1119,15 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, } intel_plane = to_intel_plane(plane); - ret = intel_plane->update_colorkey(plane, set); - -out_unlock: - drm_modeset_unlock_all(dev); - return ret; -} - -int intel_sprite_get_colorkey(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_intel_sprite_colorkey *get = data; - struct drm_plane *plane; - struct intel_plane *intel_plane; - int ret = 0; + intel_plane->ckey = *set; - drm_modeset_lock_all(dev); - - plane = drm_plane_find(dev, get->plane_id); - if (!plane) { - ret = -ENOENT; - goto out_unlock; - } - - intel_plane = to_intel_plane(plane); - intel_plane->get_colorkey(plane, get); + /* + * The only way this could fail would be due to + * the current plane state being unsupportable already, + * and we dont't consider that an error for the + * colorkey ioctl. So just ignore any error. + */ + intel_plane_restore(plane); out_unlock: drm_modeset_unlock_all(dev); @@ -1445,8 +1220,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) intel_plane->max_downscale = 16; intel_plane->update_plane = ilk_update_plane; intel_plane->disable_plane = ilk_disable_plane; - intel_plane->update_colorkey = ilk_update_colorkey; - intel_plane->get_colorkey = ilk_get_colorkey; if (IS_GEN6(dev)) { plane_formats = snb_plane_formats; @@ -1470,16 +1243,12 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) if (IS_VALLEYVIEW(dev)) { intel_plane->update_plane = vlv_update_plane; intel_plane->disable_plane = vlv_disable_plane; - intel_plane->update_colorkey = vlv_update_colorkey; - intel_plane->get_colorkey = vlv_get_colorkey; plane_formats = vlv_plane_formats; num_plane_formats = ARRAY_SIZE(vlv_plane_formats); } else { intel_plane->update_plane = ivb_update_plane; intel_plane->disable_plane = ivb_disable_plane; - intel_plane->update_colorkey = ivb_update_colorkey; - intel_plane->get_colorkey = ivb_get_colorkey; plane_formats = snb_plane_formats; num_plane_formats = ARRAY_SIZE(snb_plane_formats); @@ -1494,8 +1263,6 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) intel_plane->max_downscale = 1; intel_plane->update_plane = skl_update_plane; intel_plane->disable_plane = skl_disable_plane; - intel_plane->update_colorkey = skl_update_colorkey; - intel_plane->get_colorkey = skl_get_colorkey; plane_formats = skl_plane_formats; num_plane_formats = ARRAY_SIZE(skl_plane_formats); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 892d23c8479d..bc1d9d740904 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1332,7 +1332,7 @@ intel_tv_detect(struct drm_connector *connector, bool force) if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { type = intel_tv_detect_type(intel_tv, connector); - intel_release_load_detect_pipe(connector, &tmp); + intel_release_load_detect_pipe(connector, &tmp, &ctx); status = type < 0 ? connector_status_disconnected : connector_status_connected; @@ -1516,6 +1516,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = { .atomic_get_property = intel_connector_atomic_get_property, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, }; static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 29bd539af183..6efa8f38ff54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -340,11 +340,13 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, /* switch mmio to cpu's native endianness */ #ifndef __BIG_ENDIAN - if (ioread32_native(map + 0x000004) != 0x00000000) + if (ioread32_native(map + 0x000004) != 0x00000000) { #else - if (ioread32_native(map + 0x000004) == 0x00000000) + if (ioread32_native(map + 0x000004) == 0x00000000) { #endif iowrite32_native(0x01000001, map + 0x000004); + ioread32_native(map); + } /* read boot0 and strapping information */ boot0 = ioread32_native(map + 0x000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c index 539561ed3281..108d048da764 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c @@ -142,6 +142,49 @@ gm100_identify(struct nvkm_device *device) device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; #endif break; + case 0x126: + device->cname = "GM206"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass; + device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass; +#if 0 + /* looks to be some non-trivial changes */ + device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass; + /* priv ring says no to 0x10eb14 writes */ + device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass; +#endif + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass; + device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass; +#if 0 + device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; +#endif + device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass; +#if 0 + device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass; + device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass; +#endif + device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass; +#if 0 + device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass; + device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass; + device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass; + device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass; + device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass; + device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; +#endif + break; default: nv_fatal(device, "unknown Maxwell chipset\n"); return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c index b038b6eb51db..043e4296084c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c @@ -502,72 +502,57 @@ nv04_fifo_intr(struct nvkm_subdev *subdev) { struct nvkm_device *device = nv_device(subdev); struct nv04_fifo_priv *priv = (void *)subdev; - uint32_t status, reassign; - int cnt = 0; + u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0); + u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask; + u32 reassign, chid, get, sem; reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; - while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { - uint32_t chid, get; - - nv_wr32(priv, NV03_PFIFO_CACHES, 0); - - chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; - get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); + nv_wr32(priv, NV03_PFIFO_CACHES, 0); - if (status & NV_PFIFO_INTR_CACHE_ERROR) { - nv04_fifo_cache_error(device, priv, chid, get); - status &= ~NV_PFIFO_INTR_CACHE_ERROR; - } + chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; + get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); - if (status & NV_PFIFO_INTR_DMA_PUSHER) { - nv04_fifo_dma_pusher(device, priv, chid); - status &= ~NV_PFIFO_INTR_DMA_PUSHER; - } + if (stat & NV_PFIFO_INTR_CACHE_ERROR) { + nv04_fifo_cache_error(device, priv, chid, get); + stat &= ~NV_PFIFO_INTR_CACHE_ERROR; + } - if (status & NV_PFIFO_INTR_SEMAPHORE) { - uint32_t sem; + if (stat & NV_PFIFO_INTR_DMA_PUSHER) { + nv04_fifo_dma_pusher(device, priv, chid); + stat &= ~NV_PFIFO_INTR_DMA_PUSHER; + } - status &= ~NV_PFIFO_INTR_SEMAPHORE; - nv_wr32(priv, NV03_PFIFO_INTR_0, - NV_PFIFO_INTR_SEMAPHORE); + if (stat & NV_PFIFO_INTR_SEMAPHORE) { + stat &= ~NV_PFIFO_INTR_SEMAPHORE; + nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); - sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); - nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); + sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); + nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); - nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); - nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); - } + nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); + } - if (device->card_type == NV_50) { - if (status & 0x00000010) { - status &= ~0x00000010; - nv_wr32(priv, 0x002100, 0x00000010); - } - - if (status & 0x40000000) { - nv_wr32(priv, 0x002100, 0x40000000); - nvkm_fifo_uevent(&priv->base); - status &= ~0x40000000; - } + if (device->card_type == NV_50) { + if (stat & 0x00000010) { + stat &= ~0x00000010; + nv_wr32(priv, 0x002100, 0x00000010); } - if (status) { - nv_warn(priv, "unknown intr 0x%08x, ch %d\n", - status, chid); - nv_wr32(priv, NV03_PFIFO_INTR_0, status); - status = 0; + if (stat & 0x40000000) { + nv_wr32(priv, 0x002100, 0x40000000); + nvkm_fifo_uevent(&priv->base); + stat &= ~0x40000000; } - - nv_wr32(priv, NV03_PFIFO_CACHES, reassign); } - if (status) { - nv_error(priv, "still angry after %d spins, halt\n", cnt); - nv_wr32(priv, 0x002140, 0); - nv_wr32(priv, 0x000140, 0); + if (stat) { + nv_warn(priv, "unknown intr 0x%08x\n", stat); + nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); + nv_wr32(priv, NV03_PFIFO_INTR_0, stat); } - nv_wr32(priv, 0x000100, 0x00000100); + nv_wr32(priv, NV03_PFIFO_CACHES, reassign); } static int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c index 2e7ec389eea7..57e2c5b13123 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c @@ -1032,9 +1032,9 @@ gf100_grctx_generate_bundle(struct gf100_grctx *info) const int s = 8; const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); mmio_refn(info, 0x408004, 0x00000000, s, b); - mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); mmio_refn(info, 0x418808, 0x00000000, s, b); - mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index b52300d8861a..5e9454ba158f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c @@ -851,9 +851,9 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info) const int s = 8; const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); mmio_refn(info, 0x408004, 0x00000000, s, b); - mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); mmio_refn(info, 0x418808, 0x00000000, s, b); - mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c index 956f4dce960c..b2fae6e389e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c @@ -871,9 +871,9 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info) const int s = 8; const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); mmio_refn(info, 0x408004, 0x00000000, s, b); - mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); mmio_refn(info, 0x418e24, 0x00000000, s, b); - mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s)); mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c index d1a89b2bd5c1..c4e1f085ee10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c @@ -74,7 +74,11 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info) u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); if (ent) { if (ver >= 0x41) { - if (!(nv_ro32(bios, ent) & 0x80000000)) + u32 ent_value = nv_ro32(bios, ent); + u8 i2c_port = (ent_value >> 27) & 0x1f; + u8 dpaux_port = (ent_value >> 22) & 0x1f; + /* value 0x1f means unused according to DCB 4.x spec */ + if (i2c_port == 0x1f && dpaux_port == 0x1f) info->type = DCB_I2C_UNUSED; else info->type = DCB_I2C_PMGR; diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 4cdcaf8361e1..3db23007cdf4 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -153,7 +153,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev) .compute_vmid_bitmap = 0xFF00, .first_compute_pipe = 1, - .compute_pipe_count = 8 - 1, + .compute_pipe_count = 4 - 1, }; radeon_doorbell_get_kfd_info(rdev, diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 43e09942823e..318165d4855c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) else rbo->placements[i].lpfn = 0; } - - /* - * Use two-ended allocation depending on the buffer size to - * improve fragmentation quality. - * 512kb was measured as the most optimal number. - */ - if (rbo->tbo.mem.size > 512 * 1024) { - for (i = 0; i < c; i++) { - rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; - } - } } int radeon_bo_create(struct radeon_device *rdev, |