diff options
Diffstat (limited to 'drivers/gpu/drm')
43 files changed, 411 insertions, 556 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 910ff8ab9c9c..d8135adb2238 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -645,6 +645,7 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id); pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id); + init_sdma_vm(dqm, q, qpd); retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); if (retval != 0) { @@ -652,7 +653,14 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, return retval; } - init_sdma_vm(dqm, q, qpd); + retval = mqd->load_mqd(mqd, q->mqd, 0, + 0, NULL); + if (retval != 0) { + deallocate_sdma_queue(dqm, q->sdma_id); + mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); + return retval; + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index e415a2a9207e..c7d298e62c96 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -44,7 +44,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, BUG_ON(!kq || !dev); BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ); - pr_debug("kfd: In func %s initializing queue type %d size %d\n", + pr_debug("amdkfd: In func %s initializing queue type %d size %d\n", __func__, KFD_QUEUE_TYPE_HIQ, queue_size); nop.opcode = IT_NOP; @@ -69,12 +69,16 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev, prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off); - if (prop.doorbell_ptr == NULL) + if (prop.doorbell_ptr == NULL) { + pr_err("amdkfd: error init doorbell"); goto err_get_kernel_doorbell; + } retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); - if (retval != 0) + if (retval != 0) { + pr_err("amdkfd: error init pq queues size (%d)\n", queue_size); goto err_pq_allocate_vidmem; + } kq->pq_kernel_addr = kq->pq->cpu_ptr; kq->pq_gpu_addr = kq->pq->gpu_addr; @@ -165,10 +169,8 @@ err_rptr_allocate_vidmem: err_eop_allocate_vidmem: kfd_gtt_sa_free(dev, kq->pq); err_pq_allocate_vidmem: - pr_err("kfd: error init pq\n"); kfd_release_kernel_doorbell(dev, prop.doorbell_ptr); err_get_kernel_doorbell: - pr_err("kfd: error init doorbell"); return false; } @@ -187,6 +189,8 @@ static void uninitialize(struct kernel_queue *kq) else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ) kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj); + kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj); + kfd_gtt_sa_free(kq->dev, kq->rptr_mem); kfd_gtt_sa_free(kq->dev, kq->wptr_mem); kq->ops_asic_specific.uninitialize(kq); @@ -211,7 +215,7 @@ static int acquire_packet_buffer(struct kernel_queue *kq, queue_address = (unsigned int *)kq->pq_kernel_addr; queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t); - pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", + pr_debug("amdkfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n", __func__, rptr, wptr, queue_address); available_size = (rptr - 1 - wptr + queue_size_dwords) % @@ -296,7 +300,7 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, } if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) { - pr_err("kfd: failed to init kernel queue\n"); + pr_err("amdkfd: failed to init kernel queue\n"); kfree(kq); return NULL; } @@ -319,7 +323,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) BUG_ON(!dev); - pr_err("kfd: starting kernel queue test\n"); + pr_err("amdkfd: starting kernel queue test\n"); kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ); BUG_ON(!kq); @@ -330,7 +334,7 @@ static __attribute__((unused)) void test_kq(struct kfd_dev *dev) buffer[i] = kq->nop_packet; kq->ops.submit_packet(kq); - pr_err("kfd: ending kernel queue test\n"); + pr_err("amdkfd: ending kernel queue test\n"); } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 6b6b07ff720b..b6f076b213bc 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -43,9 +43,10 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, - struct drm_mode_fb_cmd2 *r, - struct drm_file *file_priv); +static struct drm_framebuffer * +internal_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv); /* Avoid boilerplate. I'm tired of typing. */ #define DRM_ENUM_NAME_FN(fnname, list) \ @@ -524,17 +525,6 @@ void drm_framebuffer_reference(struct drm_framebuffer *fb) } EXPORT_SYMBOL(drm_framebuffer_reference); -static void drm_framebuffer_free_bug(struct kref *kref) -{ - BUG(); -} - -static void __drm_framebuffer_unreference(struct drm_framebuffer *fb) -{ - DRM_DEBUG("%p: FB ID: %d (%d)\n", fb, fb->base.id, atomic_read(&fb->refcount.refcount)); - kref_put(&fb->refcount, drm_framebuffer_free_bug); -} - /** * drm_framebuffer_unregister_private - unregister a private fb from the lookup idr * @fb: fb to unregister @@ -1319,7 +1309,7 @@ void drm_plane_force_disable(struct drm_plane *plane) return; } /* disconnect the plane from the fb and crtc: */ - __drm_framebuffer_unreference(plane->old_fb); + drm_framebuffer_unreference(plane->old_fb); plane->old_fb = NULL; plane->fb = NULL; plane->crtc = NULL; @@ -2131,7 +2121,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, connector = drm_connector_find(dev, out_resp->connector_id); if (!connector) { ret = -ENOENT; - goto out; + goto out_unlock; } for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) @@ -2211,6 +2201,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, out: drm_modeset_unlock(&dev->mode_config.connection_mutex); + +out_unlock: mutex_unlock(&dev->mode_config.mutex); return ret; @@ -2908,13 +2900,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, */ if (req->flags & DRM_MODE_CURSOR_BO) { if (req->handle) { - fb = add_framebuffer_internal(dev, &fbreq, file_priv); + fb = internal_framebuffer_create(dev, &fbreq, file_priv); if (IS_ERR(fb)) { DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); return PTR_ERR(fb); } - - drm_framebuffer_reference(fb); } else { fb = NULL; } @@ -3267,9 +3257,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) return 0; } -static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, - struct drm_mode_fb_cmd2 *r, - struct drm_file *file_priv) +static struct drm_framebuffer * +internal_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv) { struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; @@ -3301,12 +3292,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, return fb; } - mutex_lock(&file_priv->fbs_lock); - r->fb_id = fb->base.id; - list_add(&fb->filp_head, &file_priv->fbs); - DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); - mutex_unlock(&file_priv->fbs_lock); - return fb; } @@ -3328,15 +3313,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, int drm_mode_addfb2(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_mode_fb_cmd2 *r = data; struct drm_framebuffer *fb; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - fb = add_framebuffer_internal(dev, data, file_priv); + fb = internal_framebuffer_create(dev, r, file_priv); if (IS_ERR(fb)) return PTR_ERR(fb); + /* Transfer ownership to the filp for reaping on close */ + + DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); + mutex_lock(&file_priv->fbs_lock); + r->fb_id = fb->base.id; + list_add(&fb->filp_head, &file_priv->fbs); + mutex_unlock(&file_priv->fbs_lock); + return 0; } diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 9a5b68717ec8..379ab4555756 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg) { bool ret; - mutex_lock(&mgr->qlock); + + /* + * All updates to txmsg->state are protected by mgr->qlock, and the two + * cases we check here are terminal states. For those the barriers + * provided by the wake_up/wait_event pair are enough. + */ ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); - mutex_unlock(&mgr->qlock); return ret; } @@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, return 0; } -/* must be called holding qlock */ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) { struct drm_dp_sideband_msg_tx *txmsg; int ret; + WARN_ON(!mutex_is_locked(&mgr->qlock)); + /* construct a chunk from the first msg in the tx_msg queue */ if (list_empty(&mgr->tx_msg_downq)) { mgr->tx_down_in_progress = false; diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index 732cb6f8e653..4c0aa97aaf03 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -287,6 +287,7 @@ int drm_load_edid_firmware(struct drm_connector *connector) drm_mode_connector_update_edid_property(connector, edid); ret = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); kfree(edid); return ret; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 7fc6f8bd4821..1134526286c8 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -403,7 +403,7 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) unsigned rem; rem = do_div(tmp, alignment); - if (tmp) + if (rem) start += alignment - rem; } diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 6591d48c1b9d..3fee587bc284 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -174,6 +174,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect struct edid *edid = (struct edid *) connector->edid_blob_ptr->data; count = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); } else count = (*connector_funcs->get_modes)(connector); } diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index a5e74612100e..0a6780367d28 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -50,7 +50,7 @@ config DRM_EXYNOS_DSI config DRM_EXYNOS_DP bool "EXYNOS DRM DP driver support" - depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) + depends on (DRM_EXYNOS_FIMD || DRM_EXYNOS7_DECON) && ARCH_EXYNOS && (DRM_PTN3460=n || DRM_PTN3460=y || DRM_PTN3460=DRM_EXYNOS) default DRM_EXYNOS select DRM_PANEL help diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 63f02e2380ae..970046199608 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -888,8 +888,8 @@ static int decon_probe(struct platform_device *pdev) of_node_put(i80_if_timings); ctx->regs = of_iomap(dev->of_node, 0); - if (IS_ERR(ctx->regs)) { - ret = PTR_ERR(ctx->regs); + if (!ctx->regs) { + ret = -ENOMEM; goto err_del_component; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c deleted file mode 100644 index ba9b3d5ed672..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.c +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Authors: - * Inki Dae <inki.dae@samsung.com> - * Joonyoung Shim <jy0922.shim@samsung.com> - * Seung-Woo Kim <sw0312.kim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include <drm/drmP.h> -#include <drm/drm_crtc_helper.h> - -#include <drm/exynos_drm.h> -#include "exynos_drm_drv.h" -#include "exynos_drm_encoder.h" -#include "exynos_drm_connector.h" - -#define to_exynos_connector(x) container_of(x, struct exynos_drm_connector,\ - drm_connector) - -struct exynos_drm_connector { - struct drm_connector drm_connector; - uint32_t encoder_id; - struct exynos_drm_display *display; -}; - -static int exynos_drm_connector_get_modes(struct drm_connector *connector) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - struct exynos_drm_display *display = exynos_connector->display; - struct edid *edid = NULL; - unsigned int count = 0; - int ret; - - /* - * if get_edid() exists then get_edid() callback of hdmi side - * is called to get edid data through i2c interface else - * get timing from the FIMD driver(display controller). - * - * P.S. in case of lcd panel, count is always 1 if success - * because lcd panel has only one mode. - */ - if (display->ops->get_edid) { - edid = display->ops->get_edid(display, connector); - if (IS_ERR_OR_NULL(edid)) { - ret = PTR_ERR(edid); - edid = NULL; - DRM_ERROR("Panel operation get_edid failed %d\n", ret); - goto out; - } - - count = drm_add_edid_modes(connector, edid); - if (!count) { - DRM_ERROR("Add edid modes failed %d\n", count); - goto out; - } - - drm_mode_connector_update_edid_property(connector, edid); - } else { - struct exynos_drm_panel_info *panel; - struct drm_display_mode *mode = drm_mode_create(connector->dev); - if (!mode) { - DRM_ERROR("failed to create a new display mode.\n"); - return 0; - } - - if (display->ops->get_panel) - panel = display->ops->get_panel(display); - else { - drm_mode_destroy(connector->dev, mode); - return 0; - } - - drm_display_mode_from_videomode(&panel->vm, mode); - mode->width_mm = panel->width_mm; - mode->height_mm = panel->height_mm; - connector->display_info.width_mm = mode->width_mm; - connector->display_info.height_mm = mode->height_mm; - - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_set_name(mode); - drm_mode_probed_add(connector, mode); - - count = 1; - } - -out: - kfree(edid); - return count; -} - -static int exynos_drm_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - struct exynos_drm_display *display = exynos_connector->display; - int ret = MODE_BAD; - - DRM_DEBUG_KMS("%s\n", __FILE__); - - if (display->ops->check_mode) - if (!display->ops->check_mode(display, mode)) - ret = MODE_OK; - - return ret; -} - -static struct drm_encoder *exynos_drm_best_encoder( - struct drm_connector *connector) -{ - struct drm_device *dev = connector->dev; - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - return drm_encoder_find(dev, exynos_connector->encoder_id); -} - -static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { - .get_modes = exynos_drm_connector_get_modes, - .mode_valid = exynos_drm_connector_mode_valid, - .best_encoder = exynos_drm_best_encoder, -}; - -static int exynos_drm_connector_fill_modes(struct drm_connector *connector, - unsigned int max_width, unsigned int max_height) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - struct exynos_drm_display *display = exynos_connector->display; - unsigned int width, height; - - width = max_width; - height = max_height; - - /* - * if specific driver want to find desired_mode using maxmum - * resolution then get max width and height from that driver. - */ - if (display->ops->get_max_resol) - display->ops->get_max_resol(display, &width, &height); - - return drm_helper_probe_single_connector_modes(connector, width, - height); -} - -/* get detection status of display device. */ -static enum drm_connector_status -exynos_drm_connector_detect(struct drm_connector *connector, bool force) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - struct exynos_drm_display *display = exynos_connector->display; - enum drm_connector_status status = connector_status_disconnected; - - if (display->ops->is_connected) { - if (display->ops->is_connected(display)) - status = connector_status_connected; - else - status = connector_status_disconnected; - } - - return status; -} - -static void exynos_drm_connector_destroy(struct drm_connector *connector) -{ - struct exynos_drm_connector *exynos_connector = - to_exynos_connector(connector); - - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - kfree(exynos_connector); -} - -static struct drm_connector_funcs exynos_connector_funcs = { - .dpms = drm_helper_connector_dpms, - .fill_modes = exynos_drm_connector_fill_modes, - .detect = exynos_drm_connector_detect, - .destroy = exynos_drm_connector_destroy, -}; - -struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, - struct drm_encoder *encoder) -{ - struct exynos_drm_connector *exynos_connector; - struct exynos_drm_display *display = exynos_drm_get_display(encoder); - struct drm_connector *connector; - int type; - int err; - - exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL); - if (!exynos_connector) - return NULL; - - connector = &exynos_connector->drm_connector; - - switch (display->type) { - case EXYNOS_DISPLAY_TYPE_HDMI: - type = DRM_MODE_CONNECTOR_HDMIA; - connector->interlace_allowed = true; - connector->polled = DRM_CONNECTOR_POLL_HPD; - break; - case EXYNOS_DISPLAY_TYPE_VIDI: - type = DRM_MODE_CONNECTOR_VIRTUAL; - connector->polled = DRM_CONNECTOR_POLL_HPD; - break; - default: - type = DRM_MODE_CONNECTOR_Unknown; - break; - } - - drm_connector_init(dev, connector, &exynos_connector_funcs, type); - drm_connector_helper_add(connector, &exynos_connector_helper_funcs); - - err = drm_connector_register(connector); - if (err) - goto err_connector; - - exynos_connector->encoder_id = encoder->base.id; - exynos_connector->display = display; - connector->dpms = DRM_MODE_DPMS_OFF; - connector->encoder = encoder; - - err = drm_mode_connector_attach_encoder(connector, encoder); - if (err) { - DRM_ERROR("failed to attach a connector to a encoder\n"); - goto err_sysfs; - } - - DRM_DEBUG_KMS("connector has been created\n"); - - return connector; - -err_sysfs: - drm_connector_unregister(connector); -err_connector: - drm_connector_cleanup(connector); - kfree(exynos_connector); - return NULL; -} diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.h b/drivers/gpu/drm/exynos/exynos_drm_connector.h deleted file mode 100644 index 4eb20d78379a..000000000000 --- a/drivers/gpu/drm/exynos/exynos_drm_connector.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright (c) 2011 Samsung Electronics Co., Ltd. - * Authors: - * Inki Dae <inki.dae@samsung.com> - * Joonyoung Shim <jy0922.shim@samsung.com> - * Seung-Woo Kim <sw0312.kim@samsung.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#ifndef _EXYNOS_DRM_CONNECTOR_H_ -#define _EXYNOS_DRM_CONNECTOR_H_ - -struct drm_connector *exynos_drm_connector_create(struct drm_device *dev, - struct drm_encoder *encoder); - -#endif diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 925fc69af1a0..33a10ce967ea 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -147,6 +147,7 @@ struct fimd_win_data { unsigned int ovl_height; unsigned int fb_width; unsigned int fb_height; + unsigned int fb_pitch; unsigned int bpp; unsigned int pixel_format; dma_addr_t dma_addr; @@ -284,14 +285,9 @@ static void fimd_clear_channel(struct fimd_context *ctx) } } -static int fimd_ctx_initialize(struct fimd_context *ctx, +static int fimd_iommu_attach_devices(struct fimd_context *ctx, struct drm_device *drm_dev) { - struct exynos_drm_private *priv; - priv = drm_dev->dev_private; - - ctx->drm_dev = drm_dev; - ctx->pipe = priv->pipe++; /* attach this sub driver to iommu mapping if supported. */ if (is_drm_iommu_supported(ctx->drm_dev)) { @@ -313,7 +309,7 @@ static int fimd_ctx_initialize(struct fimd_context *ctx, return 0; } -static void fimd_ctx_remove(struct fimd_context *ctx) +static void fimd_iommu_detach_devices(struct fimd_context *ctx) { /* detach this sub driver from iommu mapping if supported. */ if (is_drm_iommu_supported(ctx->drm_dev)) @@ -537,13 +533,14 @@ static void fimd_win_mode_set(struct exynos_drm_crtc *crtc, win_data->offset_y = plane->crtc_y; win_data->ovl_width = plane->crtc_width; win_data->ovl_height = plane->crtc_height; + win_data->fb_pitch = plane->pitch; win_data->fb_width = plane->fb_width; win_data->fb_height = plane->fb_height; win_data->dma_addr = plane->dma_addr[0] + offset; win_data->bpp = plane->bpp; win_data->pixel_format = plane->pixel_format; - win_data->buf_offsize = (plane->fb_width - plane->crtc_width) * - (plane->bpp >> 3); + win_data->buf_offsize = + plane->pitch - (plane->crtc_width * (plane->bpp >> 3)); win_data->line_size = plane->crtc_width * (plane->bpp >> 3); DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n", @@ -709,7 +706,7 @@ static void fimd_win_commit(struct exynos_drm_crtc *crtc, int zpos) writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); /* buffer end address */ - size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); + size = win_data->fb_pitch * win_data->ovl_height * (win_data->bpp >> 3); val = (unsigned long)(win_data->dma_addr + size); writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); @@ -1056,25 +1053,23 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) { struct fimd_context *ctx = dev_get_drvdata(dev); struct drm_device *drm_dev = data; + struct exynos_drm_private *priv = drm_dev->dev_private; int ret; - ret = fimd_ctx_initialize(ctx, drm_dev); - if (ret) { - DRM_ERROR("fimd_ctx_initialize failed.\n"); - return ret; - } + ctx->drm_dev = drm_dev; + ctx->pipe = priv->pipe++; ctx->crtc = exynos_drm_crtc_create(drm_dev, ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD, &fimd_crtc_ops, ctx); - if (IS_ERR(ctx->crtc)) { - fimd_ctx_remove(ctx); - return PTR_ERR(ctx->crtc); - } if (ctx->display) exynos_drm_create_enc_conn(drm_dev, ctx->display); + ret = fimd_iommu_attach_devices(ctx, drm_dev); + if (ret) + return ret; + return 0; } @@ -1086,10 +1081,10 @@ static void fimd_unbind(struct device *dev, struct device *master, fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); + fimd_iommu_detach_devices(ctx); + if (ctx->display) exynos_dpi_remove(ctx->display); - - fimd_ctx_remove(ctx); } static const struct component_ops fimd_component_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c index a5616872eee7..8ad5b7294eb4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -175,7 +175,7 @@ static int exynos_disable_plane(struct drm_plane *plane) struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane); struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(plane->crtc); - if (exynos_crtc->ops->win_disable) + if (exynos_crtc && exynos_crtc->ops->win_disable) exynos_crtc->ops->win_disable(exynos_crtc, exynos_plane->zpos); diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 3518bc4654c5..2e3bc57ea50e 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -55,6 +55,7 @@ struct hdmi_win_data { unsigned int fb_x; unsigned int fb_y; unsigned int fb_width; + unsigned int fb_pitch; unsigned int fb_height; unsigned int src_width; unsigned int src_height; @@ -438,7 +439,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) } else { luma_addr[0] = win_data->dma_addr; chroma_addr[0] = win_data->dma_addr - + (win_data->fb_width * win_data->fb_height); + + (win_data->fb_pitch * win_data->fb_height); } if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) { @@ -447,8 +448,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) luma_addr[1] = luma_addr[0] + 0x40; chroma_addr[1] = chroma_addr[0] + 0x40; } else { - luma_addr[1] = luma_addr[0] + win_data->fb_width; - chroma_addr[1] = chroma_addr[0] + win_data->fb_width; + luma_addr[1] = luma_addr[0] + win_data->fb_pitch; + chroma_addr[1] = chroma_addr[0] + win_data->fb_pitch; } } else { ctx->interlace = false; @@ -469,10 +470,10 @@ static void vp_video_buffer(struct mixer_context *ctx, int win) vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK); /* setting size of input image */ - vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_width) | + vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(win_data->fb_pitch) | VP_IMG_VSIZE(win_data->fb_height)); /* chroma height has to reduced by 2 to avoid chroma distorions */ - vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_width) | + vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(win_data->fb_pitch) | VP_IMG_VSIZE(win_data->fb_height / 2)); vp_reg_write(res, VP_SRC_WIDTH, win_data->src_width); @@ -559,7 +560,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win) /* converting dma address base and source offset */ dma_addr = win_data->dma_addr + (win_data->fb_x * win_data->bpp >> 3) - + (win_data->fb_y * win_data->fb_width * win_data->bpp >> 3); + + (win_data->fb_y * win_data->fb_pitch); src_x_offset = 0; src_y_offset = 0; @@ -576,7 +577,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win) MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK); /* setup geometry */ - mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), win_data->fb_width); + mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), + win_data->fb_pitch / (win_data->bpp >> 3)); /* setup display size */ if (ctx->mxr_ver == MXR_VER_128_0_0_184 && @@ -961,6 +963,7 @@ static void mixer_win_mode_set(struct exynos_drm_crtc *crtc, win_data->fb_y = plane->fb_y; win_data->fb_width = plane->fb_width; win_data->fb_height = plane->fb_height; + win_data->fb_pitch = plane->pitch; win_data->src_width = plane->src_width; win_data->src_height = plane->src_height; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index cc6ea53d2b81..5c66b568bb81 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1095,6 +1095,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv) /* Gunit-Display CZ domain, 0x182028-0x1821CF */ s->gu_ctl0 = I915_READ(VLV_GU_CTL0); s->gu_ctl1 = I915_READ(VLV_GU_CTL1); + s->pcbr = I915_READ(VLV_PCBR); s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2); /* @@ -1189,6 +1190,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv) /* Gunit-Display CZ domain, 0x182028-0x1821CF */ I915_WRITE(VLV_GU_CTL0, s->gu_ctl0); I915_WRITE(VLV_GU_CTL1, s->gu_ctl1); + I915_WRITE(VLV_PCBR, s->pcbr); I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2); } @@ -1197,19 +1199,7 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on) u32 val; int err; - val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); - WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on); - #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT) - /* Wait for a previous force-off to settle */ - if (force_on) { - err = wait_for(!COND, 20); - if (err) { - DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n", - I915_READ(VLV_GTLC_SURVIVABILITY_REG)); - return err; - } - } val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); val &= ~VLV_GFX_CLK_FORCE_ON_BIT; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 8727086cf48c..b4faa2df9d3d 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1094,6 +1094,7 @@ struct vlv_s0ix_state { /* Display 2 CZ domain */ u32 gu_ctl0; u32 gu_ctl1; + u32 pcbr; u32 clock_gate_dis2; }; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e5daad5f75fb..27ea6bdebce7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2737,24 +2737,11 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) WARN_ON(i915_verify_lists(ring->dev)); - /* Move any buffers on the active list that are no longer referenced - * by the ringbuffer to the flushing/inactive lists as appropriate, - * before we free the context associated with the requests. + /* Retire requests first as we use it above for the early return. + * If we retire requests last, we may use a later seqno and so clear + * the requests lists without clearing the active list, leading to + * confusion. */ - while (!list_empty(&ring->active_list)) { - struct drm_i915_gem_object *obj; - - obj = list_first_entry(&ring->active_list, - struct drm_i915_gem_object, - ring_list); - - if (!i915_gem_request_completed(obj->last_read_req, true)) - break; - - i915_gem_object_move_to_inactive(obj); - } - - while (!list_empty(&ring->request_list)) { struct drm_i915_gem_request *request; struct intel_ringbuffer *ringbuf; @@ -2789,6 +2776,23 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring) i915_gem_free_request(request); } + /* Move any buffers on the active list that are no longer referenced + * by the ringbuffer to the flushing/inactive lists as appropriate, + * before we free the context associated with the requests. + */ + while (!list_empty(&ring->active_list)) { + struct drm_i915_gem_object *obj; + + obj = list_first_entry(&ring->active_list, + struct drm_i915_gem_object, + ring_list); + + if (!i915_gem_request_completed(obj->last_read_req, true)) + break; + + i915_gem_object_move_to_inactive(obj); + } + if (unlikely(ring->trace_irq_req && i915_gem_request_completed(ring->trace_irq_req, true))) { ring->irq_put(ring); @@ -2936,9 +2940,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) req = obj->last_read_req; /* Do this after OLR check to make sure we make forward progress polling - * on this IOCTL with a timeout <=0 (like busy ioctl) + * on this IOCTL with a timeout == 0 (like busy ioctl) */ - if (args->timeout_ns <= 0) { + if (args->timeout_ns == 0) { ret = -ETIME; goto out; } @@ -2948,7 +2952,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) i915_gem_request_reference(req); mutex_unlock(&dev->struct_mutex); - ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, + ret = __i915_wait_request(req, reset_counter, true, + args->timeout_ns > 0 ? &args->timeout_ns : NULL, file->driver_priv); mutex_lock(&dev->struct_mutex); i915_gem_request_unreference(req); @@ -4792,6 +4797,9 @@ i915_gem_init_hw(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) return -EIO; + /* Double layer security blanket, see i915_gem_init() */ + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + if (dev_priv->ellc_size) I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); @@ -4824,7 +4832,7 @@ i915_gem_init_hw(struct drm_device *dev) for_each_ring(ring, dev_priv, i) { ret = ring->init_hw(ring); if (ret) - return ret; + goto out; } for (i = 0; i < NUM_L3_SLICES(dev); i++) @@ -4841,9 +4849,11 @@ i915_gem_init_hw(struct drm_device *dev) DRM_ERROR("Context enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); - return ret; + goto out; } +out: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return ret; } @@ -4877,6 +4887,14 @@ int i915_gem_init(struct drm_device *dev) dev_priv->gt.stop_ring = intel_logical_ring_stop; } + /* This is just a security blanket to placate dragons. + * On some systems, we very sporadically observe that the first TLBs + * used by the CS may be stale, despite us poking the TLB reset. If + * we hold the forcewake during initialisation these problems + * just magically go away. + */ + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + ret = i915_gem_init_userptr(dev); if (ret) goto out_unlock; @@ -4903,6 +4921,7 @@ int i915_gem_init(struct drm_device *dev) } out_unlock: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); mutex_unlock(&dev->struct_mutex); return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index b773368fc62c..38a742532c4f 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1487,7 +1487,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, goto err; } - if (i915_needs_cmd_parser(ring)) { + if (i915_needs_cmd_parser(ring) && args->batch_len) { batch_obj = i915_gem_execbuffer_parse(ring, &shadow_exec_entry, eb, diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e730789b53b7..f75173c20f47 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -37,6 +37,7 @@ #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_trace.h" +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_dp_helper.h> #include <drm/drm_crtc_helper.h> @@ -2416,6 +2417,14 @@ out_unref_obj: return false; } +/* Update plane->state->fb to match plane->fb after driver-internal updates */ +static void +update_state_fb(struct drm_plane *plane) +{ + if (plane->fb != plane->state->fb) + drm_atomic_set_fb_for_plane(plane->state, plane->fb); +} + static void intel_find_plane_obj(struct intel_crtc *intel_crtc, struct intel_initial_plane_config *plane_config) @@ -2429,8 +2438,15 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, if (!intel_crtc->base.primary->fb) return; - if (intel_alloc_plane_obj(intel_crtc, plane_config)) + if (intel_alloc_plane_obj(intel_crtc, plane_config)) { + struct drm_plane *primary = intel_crtc->base.primary; + + primary->state->crtc = &intel_crtc->base; + primary->crtc = &intel_crtc->base; + update_state_fb(primary); + return; + } kfree(intel_crtc->base.primary->fb); intel_crtc->base.primary->fb = NULL; @@ -2453,15 +2469,21 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, continue; if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { + struct drm_plane *primary = intel_crtc->base.primary; + if (obj->tiling_mode != I915_TILING_NONE) dev_priv->preserve_bios_swizzle = true; drm_framebuffer_reference(c->primary->fb); - intel_crtc->base.primary->fb = c->primary->fb; + primary->fb = c->primary->fb; + primary->state->crtc = &intel_crtc->base; + primary->crtc = &intel_crtc->base; obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); break; } } + + update_state_fb(intel_crtc->base.primary); } static void i9xx_update_primary_plane(struct drm_crtc *crtc, @@ -6602,6 +6624,10 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; + val = I915_READ(DSPCNTR(plane)); + if (!(val & DISPLAY_PLANE_ENABLE)) + return; + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { DRM_DEBUG_KMS("failed to alloc fb\n"); @@ -6610,8 +6636,6 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; - val = I915_READ(DSPCNTR(plane)); - if (INTEL_INFO(dev)->gen >= 4) if (val & DISPPLANE_TILED) plane_config->tiling = I915_TILING_X; @@ -7643,6 +7667,9 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; val = I915_READ(PLANE_CTL(pipe, 0)); + if (!(val & PLANE_CTL_ENABLE)) + goto error; + if (val & PLANE_CTL_TILED_MASK) plane_config->tiling = I915_TILING_X; @@ -7730,6 +7757,10 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, struct drm_framebuffer *fb; struct intel_framebuffer *intel_fb; + val = I915_READ(DSPCNTR(pipe)); + if (!(val & DISPLAY_PLANE_ENABLE)) + return; + intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); if (!intel_fb) { DRM_DEBUG_KMS("failed to alloc fb\n"); @@ -7738,8 +7769,6 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; - val = I915_READ(DSPCNTR(pipe)); - if (INTEL_INFO(dev)->gen >= 4) if (val & DISPPLANE_TILED) plane_config->tiling = I915_TILING_X; @@ -9716,7 +9745,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - WARN_ON(!in_irq()); + WARN_ON(!in_interrupt()); if (crtc == NULL) return; @@ -9816,6 +9845,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, drm_gem_object_reference(&obj->base); crtc->primary->fb = fb; + update_state_fb(crtc->primary); work->pending_flip_obj = obj; @@ -9884,6 +9914,7 @@ cleanup_unpin: cleanup_pending: atomic_dec(&intel_crtc->unpin_work_count); crtc->primary->fb = old_fb; + update_state_fb(crtc->primary); drm_gem_object_unreference(&work->old_fb_obj->base); drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); @@ -13718,6 +13749,7 @@ void intel_modeset_gem_init(struct drm_device *dev) to_intel_crtc(c)->pipe); drm_framebuffer_unreference(c->primary->fb); c->primary->fb = NULL; + update_state_fb(c->primary); } } mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 0a52c44ad03d..9c5451c97942 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -1322,7 +1322,7 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); plane = drm_plane_find(dev, set->plane_id); - if (!plane) { + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { ret = -ENOENT; goto out_unlock; } @@ -1349,7 +1349,7 @@ int intel_sprite_get_colorkey(struct drm_device *dev, void *data, drm_modeset_lock_all(dev); plane = drm_plane_find(dev, get->plane_id); - if (!plane) { + if (!plane || plane->type != DRM_PLANE_TYPE_OVERLAY) { ret = -ENOENT; goto out_unlock; } diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c47a3baa53d5..4e8fb891d4ea 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -1048,8 +1048,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) /* We need to init first for ECOBUS access and then * determine later if we want to reinit, in case of MT access is - * not working + * not working. In this stage we don't know which flavour this + * ivb is, so it is better to reset also the gen6 fw registers + * before the ecobus check. */ + + __raw_i915_write32(dev_priv, FORCEWAKE, 0); + __raw_posting_read(dev_priv, ECOBUS); + fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_MT_ACK); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 29bd539af183..6efa8f38ff54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -340,11 +340,13 @@ nvkm_devobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine, /* switch mmio to cpu's native endianness */ #ifndef __BIG_ENDIAN - if (ioread32_native(map + 0x000004) != 0x00000000) + if (ioread32_native(map + 0x000004) != 0x00000000) { #else - if (ioread32_native(map + 0x000004) == 0x00000000) + if (ioread32_native(map + 0x000004) == 0x00000000) { #endif iowrite32_native(0x01000001, map + 0x000004); + ioread32_native(map); + } /* read boot0 and strapping information */ boot0 = ioread32_native(map + 0x000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c index 539561ed3281..108d048da764 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/gm100.c @@ -142,6 +142,49 @@ gm100_identify(struct nvkm_device *device) device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; #endif break; + case 0x126: + device->cname = "GM206"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nvkm_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = gk104_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass; + device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass; +#if 0 + /* looks to be some non-trivial changes */ + device->oclass[NVDEV_SUBDEV_CLK ] = &gk104_clk_oclass; + /* priv ring says no to 0x10eb14 writes */ + device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass; +#endif + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = gf100_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &gk104_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_MMU ] = &gf100_mmu_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &gf100_bar_oclass; + device->oclass[NVDEV_SUBDEV_PMU ] = gk208_pmu_oclass; +#if 0 + device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; +#endif + device->oclass[NVDEV_ENGINE_DMAOBJ ] = gf110_dmaeng_oclass; +#if 0 + device->oclass[NVDEV_ENGINE_FIFO ] = gk208_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = gf100_sw_oclass; + device->oclass[NVDEV_ENGINE_GR ] = gm107_gr_oclass; +#endif + device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass; +#if 0 + device->oclass[NVDEV_ENGINE_CE0 ] = &gm204_ce0_oclass; + device->oclass[NVDEV_ENGINE_CE1 ] = &gm204_ce1_oclass; + device->oclass[NVDEV_ENGINE_CE2 ] = &gm204_ce2_oclass; + device->oclass[NVDEV_ENGINE_MSVLD ] = &gk104_msvld_oclass; + device->oclass[NVDEV_ENGINE_MSPDEC ] = &gk104_mspdec_oclass; + device->oclass[NVDEV_ENGINE_MSPPP ] = &gf100_msppp_oclass; +#endif + break; default: nv_fatal(device, "unknown Maxwell chipset\n"); return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c index b038b6eb51db..043e4296084c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/nv04.c @@ -502,72 +502,57 @@ nv04_fifo_intr(struct nvkm_subdev *subdev) { struct nvkm_device *device = nv_device(subdev); struct nv04_fifo_priv *priv = (void *)subdev; - uint32_t status, reassign; - int cnt = 0; + u32 mask = nv_rd32(priv, NV03_PFIFO_INTR_EN_0); + u32 stat = nv_rd32(priv, NV03_PFIFO_INTR_0) & mask; + u32 reassign, chid, get, sem; reassign = nv_rd32(priv, NV03_PFIFO_CACHES) & 1; - while ((status = nv_rd32(priv, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) { - uint32_t chid, get; - - nv_wr32(priv, NV03_PFIFO_CACHES, 0); - - chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; - get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); + nv_wr32(priv, NV03_PFIFO_CACHES, 0); - if (status & NV_PFIFO_INTR_CACHE_ERROR) { - nv04_fifo_cache_error(device, priv, chid, get); - status &= ~NV_PFIFO_INTR_CACHE_ERROR; - } + chid = nv_rd32(priv, NV03_PFIFO_CACHE1_PUSH1) & priv->base.max; + get = nv_rd32(priv, NV03_PFIFO_CACHE1_GET); - if (status & NV_PFIFO_INTR_DMA_PUSHER) { - nv04_fifo_dma_pusher(device, priv, chid); - status &= ~NV_PFIFO_INTR_DMA_PUSHER; - } + if (stat & NV_PFIFO_INTR_CACHE_ERROR) { + nv04_fifo_cache_error(device, priv, chid, get); + stat &= ~NV_PFIFO_INTR_CACHE_ERROR; + } - if (status & NV_PFIFO_INTR_SEMAPHORE) { - uint32_t sem; + if (stat & NV_PFIFO_INTR_DMA_PUSHER) { + nv04_fifo_dma_pusher(device, priv, chid); + stat &= ~NV_PFIFO_INTR_DMA_PUSHER; + } - status &= ~NV_PFIFO_INTR_SEMAPHORE; - nv_wr32(priv, NV03_PFIFO_INTR_0, - NV_PFIFO_INTR_SEMAPHORE); + if (stat & NV_PFIFO_INTR_SEMAPHORE) { + stat &= ~NV_PFIFO_INTR_SEMAPHORE; + nv_wr32(priv, NV03_PFIFO_INTR_0, NV_PFIFO_INTR_SEMAPHORE); - sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); - nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); + sem = nv_rd32(priv, NV10_PFIFO_CACHE1_SEMAPHORE); + nv_wr32(priv, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1); - nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); - nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); - } + nv_wr32(priv, NV03_PFIFO_CACHE1_GET, get + 4); + nv_wr32(priv, NV04_PFIFO_CACHE1_PULL0, 1); + } - if (device->card_type == NV_50) { - if (status & 0x00000010) { - status &= ~0x00000010; - nv_wr32(priv, 0x002100, 0x00000010); - } - - if (status & 0x40000000) { - nv_wr32(priv, 0x002100, 0x40000000); - nvkm_fifo_uevent(&priv->base); - status &= ~0x40000000; - } + if (device->card_type == NV_50) { + if (stat & 0x00000010) { + stat &= ~0x00000010; + nv_wr32(priv, 0x002100, 0x00000010); } - if (status) { - nv_warn(priv, "unknown intr 0x%08x, ch %d\n", - status, chid); - nv_wr32(priv, NV03_PFIFO_INTR_0, status); - status = 0; + if (stat & 0x40000000) { + nv_wr32(priv, 0x002100, 0x40000000); + nvkm_fifo_uevent(&priv->base); + stat &= ~0x40000000; } - - nv_wr32(priv, NV03_PFIFO_CACHES, reassign); } - if (status) { - nv_error(priv, "still angry after %d spins, halt\n", cnt); - nv_wr32(priv, 0x002140, 0); - nv_wr32(priv, 0x000140, 0); + if (stat) { + nv_warn(priv, "unknown intr 0x%08x\n", stat); + nv_mask(priv, NV03_PFIFO_INTR_EN_0, stat, 0x00000000); + nv_wr32(priv, NV03_PFIFO_INTR_0, stat); } - nv_wr32(priv, 0x000100, 0x00000100); + nv_wr32(priv, NV03_PFIFO_CACHES, reassign); } static int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c index 2e7ec389eea7..57e2c5b13123 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c @@ -1032,9 +1032,9 @@ gf100_grctx_generate_bundle(struct gf100_grctx *info) const int s = 8; const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); mmio_refn(info, 0x408004, 0x00000000, s, b); - mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); mmio_refn(info, 0x418808, 0x00000000, s, b); - mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index b52300d8861a..5e9454ba158f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c @@ -851,9 +851,9 @@ gk104_grctx_generate_bundle(struct gf100_grctx *info) const int s = 8; const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); mmio_refn(info, 0x408004, 0x00000000, s, b); - mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); mmio_refn(info, 0x418808, 0x00000000, s, b); - mmio_refn(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x41880c, 0x80000000 | (impl->bundle_size >> s)); mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c index 956f4dce960c..b2fae6e389e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c @@ -871,9 +871,9 @@ gm107_grctx_generate_bundle(struct gf100_grctx *info) const int s = 8; const int b = mmio_vram(info, impl->bundle_size, (1 << s), access); mmio_refn(info, 0x408004, 0x00000000, s, b); - mmio_refn(info, 0x408008, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x408008, 0x80000000 | (impl->bundle_size >> s)); mmio_refn(info, 0x418e24, 0x00000000, s, b); - mmio_refn(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s), 0, b); + mmio_wr32(info, 0x418e28, 0x80000000 | (impl->bundle_size >> s)); mmio_wr32(info, 0x4064c8, (state_limit << 16) | token_limit); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c index d1a89b2bd5c1..c4e1f085ee10 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/i2c.c @@ -74,7 +74,11 @@ dcb_i2c_parse(struct nvkm_bios *bios, u8 idx, struct dcb_i2c_entry *info) u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); if (ent) { if (ver >= 0x41) { - if (!(nv_ro32(bios, ent) & 0x80000000)) + u32 ent_value = nv_ro32(bios, ent); + u8 i2c_port = (ent_value >> 27) & 0x1f; + u8 dpaux_port = (ent_value >> 22) & 0x1f; + /* value 0x1f means unused according to DCB 4.x spec */ + if (i2c_port == 0x1f && dpaux_port == 0x1f) info->type = DCB_I2C_UNUSED; else info->type = DCB_I2C_PMGR; diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index c648e1996dab..243a36c93b8f 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -2129,6 +2129,7 @@ #define VCE_UENC_REG_CLOCK_GATING 0x207c0 #define VCE_SYS_INT_EN 0x21300 # define VCE_SYS_INT_TRAP_INTERRUPT_EN (1 << 3) +#define VCE_LMI_VCPU_CACHE_40BIT_BAR 0x2145c #define VCE_LMI_CTRL2 0x21474 #define VCE_LMI_CTRL 0x21498 #define VCE_LMI_VM_CTRL 0x214a0 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5587603b4a89..33d5a4f4eebd 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1565,6 +1565,7 @@ struct radeon_dpm { int new_active_crtc_count; u32 current_active_crtcs; int current_active_crtc_count; + bool single_display; struct radeon_dpm_dynamic_state dyn_state; struct radeon_dpm_fan fan; u32 tdp_limit; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index 63ccb8fa799c..d27e4ccb848c 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -76,7 +76,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev) static bool radeon_read_bios(struct radeon_device *rdev) { - uint8_t __iomem *bios; + uint8_t __iomem *bios, val1, val2; size_t size; rdev->bios = NULL; @@ -86,15 +86,19 @@ static bool radeon_read_bios(struct radeon_device *rdev) return false; } - if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { + val1 = readb(&bios[0]); + val2 = readb(&bios[1]); + + if (size == 0 || val1 != 0x55 || val2 != 0xaa) { pci_unmap_rom(rdev->pdev, bios); return false; } - rdev->bios = kmemdup(bios, size, GFP_KERNEL); + rdev->bios = kzalloc(size, GFP_KERNEL); if (rdev->bios == NULL) { pci_unmap_rom(rdev->pdev, bios); return false; } + memcpy_fromio(rdev->bios, bios, size); pci_unmap_rom(rdev->pdev, bios); return true; } diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index d13d1b5a859f..df09ca7c4889 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence) return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); } +struct radeon_wait_cb { + struct fence_cb base; + struct task_struct *task; +}; + +static void +radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) +{ + struct radeon_wait_cb *wait = + container_of(cb, struct radeon_wait_cb, base); + + wake_up_process(wait->task); +} + static signed long radeon_fence_default_wait(struct fence *f, bool intr, signed long t) { struct radeon_fence *fence = to_radeon_fence(f); struct radeon_device *rdev = fence->rdev; - bool signaled; + struct radeon_wait_cb cb; - fence_enable_sw_signaling(&fence->base); + cb.task = current; - /* - * This function has to return -EDEADLK, but cannot hold - * exclusive_lock during the wait because some callers - * may already hold it. This means checking needs_reset without - * lock, and not fiddling with any gpu internals. - * - * The callback installed with fence_enable_sw_signaling will - * run before our wait_event_*timeout call, so we will see - * both the signaled fence and the changes to needs_reset. - */ + if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) + return t; + + while (t > 0) { + if (intr) + set_current_state(TASK_INTERRUPTIBLE); + else + set_current_state(TASK_UNINTERRUPTIBLE); + + /* + * radeon_test_signaled must be called after + * set_current_state to prevent a race with wake_up_process + */ + if (radeon_test_signaled(fence)) + break; + + if (rdev->needs_reset) { + t = -EDEADLK; + break; + } + + t = schedule_timeout(t); + + if (t > 0 && intr && signal_pending(current)) + t = -ERESTARTSYS; + } + + __set_current_state(TASK_RUNNING); + fence_remove_callback(f, &cb.base); - if (intr) - t = wait_event_interruptible_timeout(rdev->fence_queue, - ((signaled = radeon_test_signaled(fence)) || - rdev->needs_reset), t); - else - t = wait_event_timeout(rdev->fence_queue, - ((signaled = radeon_test_signaled(fence)) || - rdev->needs_reset), t); - - if (t > 0 && !signaled) - return -EDEADLK; return t; } diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 061eaa9c19c7..122eb5693ba1 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -153,7 +153,7 @@ void radeon_kfd_device_init(struct radeon_device *rdev) .compute_vmid_bitmap = 0xFF00, .first_compute_pipe = 1, - .compute_pipe_count = 8 - 1, + .compute_pipe_count = 4 - 1, }; radeon_doorbell_get_kfd_info(rdev, diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c index a69bd441dd2d..572b4dbec186 100644 --- a/drivers/gpu/drm/radeon/radeon_mn.c +++ b/drivers/gpu/drm/radeon/radeon_mn.c @@ -122,7 +122,6 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, it = interval_tree_iter_first(&rmn->objects, start, end); while (it) { struct radeon_bo *bo; - struct fence *fence; int r; bo = container_of(it, struct radeon_bo, mn_it); @@ -134,12 +133,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn, continue; } - fence = reservation_object_get_excl(bo->tbo.resv); - if (fence) { - r = radeon_fence_wait((struct radeon_fence *)fence, false); - if (r) - DRM_ERROR("(%d) failed to wait for user bo\n", r); - } + r = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, + false, MAX_SCHEDULE_TIMEOUT); + if (r) + DRM_ERROR("(%d) failed to wait for user bo\n", r); radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 43e09942823e..318165d4855c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -173,17 +173,6 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) else rbo->placements[i].lpfn = 0; } - - /* - * Use two-ended allocation depending on the buffer size to - * improve fragmentation quality. - * 512kb was measured as the most optimal number. - */ - if (rbo->tbo.mem.size > 512 * 1024) { - for (i = 0; i < c; i++) { - rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; - } - } } int radeon_bo_create(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 33cf4108386d..c1ba83a8dd8c 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -837,12 +837,8 @@ static void radeon_dpm_thermal_work_handler(struct work_struct *work) radeon_pm_compute_clocks(rdev); } -static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, - enum radeon_pm_state_type dpm_state) +static bool radeon_dpm_single_display(struct radeon_device *rdev) { - int i; - struct radeon_ps *ps; - u32 ui_class; bool single_display = (rdev->pm.dpm.new_active_crtc_count < 2) ? true : false; @@ -858,6 +854,17 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) single_display = false; + return single_display; +} + +static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, + enum radeon_pm_state_type dpm_state) +{ + int i; + struct radeon_ps *ps; + u32 ui_class; + bool single_display = radeon_dpm_single_display(rdev); + /* certain older asics have a separare 3D performance state, * so try that first if the user selected performance */ @@ -983,6 +990,7 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) struct radeon_ps *ps; enum radeon_pm_state_type dpm_state; int ret; + bool single_display = radeon_dpm_single_display(rdev); /* if dpm init failed */ if (!rdev->pm.dpm_enabled) @@ -1007,6 +1015,9 @@ static void radeon_dpm_change_power_state_locked(struct radeon_device *rdev) /* vce just modifies an existing state so force a change */ if (ps->vce_active != rdev->pm.dpm.vce_active) goto force; + /* user has made a display change (such as timing) */ + if (rdev->pm.dpm.single_display != single_display) + goto force; if ((rdev->family < CHIP_BARTS) || (rdev->flags & RADEON_IS_IGP)) { /* for pre-BTC and APUs if the num crtcs changed but state is the same, * all we need to do is update the display configuration. @@ -1069,6 +1080,7 @@ force: rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs; rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count; + rdev->pm.dpm.single_display = single_display; /* wait for the rings to drain */ for (i = 0; i < RADEON_NUM_RINGS; i++) { diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 2456f69efd23..8c7872339c2a 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -495,7 +495,7 @@ static int radeon_debugfs_ring_info(struct seq_file *m, void *data) seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw); seq_printf(m, "%u dwords in ring\n", count); - if (!ring->ready) + if (!ring->ring) return 0; /* print 8 dw before current rptr as often it's the last executed diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index d02aa1d0f588..b292aca0f342 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -598,6 +598,10 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; + /* double check that we don't free the table twice */ + if (!ttm->sg->sgl) + return; + /* free the sg table and pages again */ dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index e088e5558da0..a7fb2735d4a9 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -7130,8 +7130,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); if (!vclk || !dclk) { - /* keep the Bypass mode, put PLL to sleep */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + /* keep the Bypass mode */ return 0; } @@ -7147,8 +7146,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) /* set VCO_MODE to 1 */ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); - /* toggle UPLL_SLEEP to 1 then back to 0 */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + /* disable sleep mode */ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); /* deassert UPLL_RESET */ diff --git a/drivers/gpu/drm/radeon/vce_v2_0.c b/drivers/gpu/drm/radeon/vce_v2_0.c index 1ac7bb825a1b..fbbe78fbd087 100644 --- a/drivers/gpu/drm/radeon/vce_v2_0.c +++ b/drivers/gpu/drm/radeon/vce_v2_0.c @@ -156,6 +156,9 @@ int vce_v2_0_resume(struct radeon_device *rdev) WREG32(VCE_LMI_SWAP_CNTL1, 0); WREG32(VCE_LMI_VM_CTRL, 0); + WREG32(VCE_LMI_VCPU_CACHE_40BIT_BAR, addr >> 8); + + addr &= 0xff; size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size); WREG32(VCE_VCPU_CACHE_OFFSET0, addr & 0x7fffffff); WREG32(VCE_VCPU_CACHE_SIZE0, size); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6c6b655defcf..e13b9cbc304e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_err1; } - ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, - (dev_priv->vram_size >> PAGE_SHIFT)); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed initializing memory manager for VRAM.\n"); - goto out_err2; - } - - dev_priv->has_gmr = true; - if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || - refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, - VMW_PL_GMR) != 0) { - DRM_INFO("No GMR memory available. " - "Graphics memory resources are very limited.\n"); - dev_priv->has_gmr = false; - } - - if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { - dev_priv->has_mob = true; - if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, - VMW_PL_MOB) != 0) { - DRM_INFO("No MOB memory available. " - "3D will be disabled.\n"); - dev_priv->has_mob = false; - } - } - dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, dev_priv->mmio_size); @@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_no_fman; } + + ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, + (dev_priv->vram_size >> PAGE_SHIFT)); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed initializing memory manager for VRAM.\n"); + goto out_no_vram; + } + + dev_priv->has_gmr = true; + if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || + refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, + VMW_PL_GMR) != 0) { + DRM_INFO("No GMR memory available. " + "Graphics memory resources are very limited.\n"); + dev_priv->has_gmr = false; + } + + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { + dev_priv->has_mob = true; + if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, + VMW_PL_MOB) != 0) { + DRM_INFO("No MOB memory available. " + "3D will be disabled.\n"); + dev_priv->has_mob = false; + } + } + vmw_kms_save_vga(dev_priv); /* Start kms and overlay systems, needs fifo. */ @@ -838,6 +839,12 @@ out_no_fifo: vmw_kms_close(dev_priv); out_no_kms: vmw_kms_restore_vga(dev_priv); + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); + if (dev_priv->has_gmr) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); + (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); +out_no_vram: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: if (dev_priv->capabilities & SVGA_CAP_IRQMASK) @@ -853,12 +860,6 @@ out_err4: iounmap(dev_priv->mmio_virt); out_err3: arch_phys_wc_del(dev_priv->mmio_mtrr); - if (dev_priv->has_mob) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); - if (dev_priv->has_gmr) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); - (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); -out_err2: (void)ttm_bo_device_release(&dev_priv->bdev); out_err1: vmw_ttm_global_release(dev_priv); @@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev) } vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); + + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); + if (dev_priv->has_gmr) + (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); + (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); + vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); @@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev) ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); arch_phys_wc_del(dev_priv->mmio_mtrr); - if (dev_priv->has_mob) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); - if (dev_priv->has_gmr) - (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); - (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); (void)ttm_bo_device_release(&dev_priv->bdev); vmw_ttm_global_release(dev_priv); @@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); + pci_disable_device(pdev); drm_put_dev(dev); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 33176d05db35..654c8daeb5ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use MOB buffer.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_no_reloc; } bo = &vmw_bo->base; @@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, out_no_reloc: vmw_dmabuf_unreference(&vmw_bo); - vmw_bo_p = NULL; + *vmw_bo_p = NULL; return ret; } @@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use GMR region.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_no_reloc; } bo = &vmw_bo->base; @@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, out_no_reloc: vmw_dmabuf_unreference(&vmw_bo); - vmw_bo_p = NULL; + *vmw_bo_p = NULL; return ret; } @@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, NULL, arg->command_size, arg->throttle_us, (void __user *)(unsigned long)arg->fence_rep, NULL); - + ttm_read_unlock(&dev_priv->reservation_sem); if (unlikely(ret != 0)) - goto out_unlock; + return ret; vmw_kms_cursor_post_execbuf(dev_priv); -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); - return ret; + return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8725b79e7847..07cda8cbbddb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, int i; struct drm_mode_config *mode_config = &dev->mode_config; - ret = ttm_read_lock(&dev_priv->reservation_sem, true); - if (unlikely(ret != 0)) - return ret; - if (!arg->num_outputs) { struct drm_vmw_rect def_rect = {0, 0, 800, 600}; vmw_du_update_layout(dev_priv, 1, &def_rect); - goto out_unlock; + return 0; } rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), GFP_KERNEL); - if (unlikely(!rects)) { - ret = -ENOMEM; - goto out_unlock; - } + if (unlikely(!rects)) + return -ENOMEM; user_rects = (void __user *)(unsigned long)arg->rects; ret = copy_from_user(rects, user_rects, rects_size); @@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, out_free: kfree(rects); -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); return ret; } |