diff options
Diffstat (limited to 'drivers/gpu')
53 files changed, 397 insertions, 232 deletions
diff --git a/drivers/gpu/drm/ast/ast_tables.h b/drivers/gpu/drm/ast/ast_tables.h index 4c761dcea972..05c01ea85294 100644 --- a/drivers/gpu/drm/ast/ast_tables.h +++ b/drivers/gpu/drm/ast/ast_tables.h @@ -99,6 +99,7 @@ static struct ast_vbios_dclk_info dclk_table[] = {  	{0x25, 0x65, 0x80},					/* 16: VCLK88.75    */  	{0x77, 0x58, 0x80},					/* 17: VCLK119      */  	{0x32, 0x67, 0x80},				    /* 18: VCLK85_5     */ +	{0x6a, 0x6d, 0x80},					/* 19: VCLK97_75	*/  };  static struct ast_vbios_stdtable vbios_stdtable[] = { diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index fa2be249999c..90e773019eac 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -4696,8 +4696,9 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,  		return -EINVAL;  	/* overflow checks for 32bit size calculations */ +	/* NOTE: DIV_ROUND_UP() can overflow */  	cpp = DIV_ROUND_UP(args->bpp, 8); -	if (cpp > 0xffffffffU / args->width) +	if (!cpp || cpp > 0xffffffffU / args->width)  		return -EINVAL;  	stride = cpp * args->width;  	if (args->height > 0xffffffffU / stride) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ec96f9a9724c..e27cdbe9d524 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -494,6 +494,36 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)  	return true;  } +void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) +{ +	spin_lock_irq(&dev_priv->irq_lock); + +	dev_priv->long_hpd_port_mask = 0; +	dev_priv->short_hpd_port_mask = 0; +	dev_priv->hpd_event_bits = 0; + +	spin_unlock_irq(&dev_priv->irq_lock); + +	cancel_work_sync(&dev_priv->dig_port_work); +	cancel_work_sync(&dev_priv->hotplug_work); +	cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work); +} + +static void intel_suspend_encoders(struct drm_i915_private *dev_priv) +{ +	struct drm_device *dev = dev_priv->dev; +	struct drm_encoder *encoder; + +	drm_modeset_lock_all(dev); +	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { +		struct intel_encoder *intel_encoder = to_intel_encoder(encoder); + +		if (intel_encoder->suspend) +			intel_encoder->suspend(intel_encoder); +	} +	drm_modeset_unlock_all(dev); +} +  static int i915_drm_freeze(struct drm_device *dev)  {  	struct drm_i915_private *dev_priv = dev->dev_private; @@ -538,6 +568,9 @@ static int i915_drm_freeze(struct drm_device *dev)  		flush_delayed_work(&dev_priv->rps.delayed_resume_work);  		intel_runtime_pm_disable_interrupts(dev); +		intel_hpd_cancel_work(dev_priv); + +		intel_suspend_encoders(dev_priv);  		intel_suspend_gt_powersave(dev); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4412f6a4383b..7a830eac5ba3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1458,7 +1458,7 @@ struct drm_i915_private {  		} hpd_mark;  	} hpd_stats[HPD_NUM_PINS];  	u32 hpd_event_bits; -	struct timer_list hotplug_reenable_timer; +	struct delayed_work hotplug_reenable_work;  	struct i915_fbc fbc;  	struct i915_drrs drrs; @@ -2178,6 +2178,7 @@ extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);  extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);  extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);  int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); +void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);  extern void intel_console_resume(struct work_struct *work); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 390ccc2a3096..0050ee9470f1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1189,8 +1189,8 @@ static void i915_hotplug_work_func(struct work_struct *work)  	  * some connectors */  	if (hpd_disabled) {  		drm_kms_helper_poll_enable(dev); -		mod_timer(&dev_priv->hotplug_reenable_timer, -			  jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); +		mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, +				 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));  	}  	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -1213,11 +1213,6 @@ static void i915_hotplug_work_func(struct work_struct *work)  		drm_kms_helper_hotplug_event(dev);  } -static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv) -{ -	del_timer_sync(&dev_priv->hotplug_reenable_timer); -} -  static void ironlake_rps_change_irq_handler(struct drm_device *dev)  {  	struct drm_i915_private *dev_priv = dev->dev_private; @@ -3892,8 +3887,6 @@ static void gen8_irq_uninstall(struct drm_device *dev)  	if (!dev_priv)  		return; -	intel_hpd_irq_uninstall(dev_priv); -  	gen8_irq_reset(dev);  } @@ -3908,8 +3901,6 @@ static void valleyview_irq_uninstall(struct drm_device *dev)  	I915_WRITE(VLV_MASTER_IER, 0); -	intel_hpd_irq_uninstall(dev_priv); -  	for_each_pipe(pipe)  		I915_WRITE(PIPESTAT(pipe), 0xffff); @@ -3988,8 +3979,6 @@ static void ironlake_irq_uninstall(struct drm_device *dev)  	if (!dev_priv)  		return; -	intel_hpd_irq_uninstall(dev_priv); -  	ironlake_irq_reset(dev);  } @@ -4360,8 +4349,6 @@ static void i915_irq_uninstall(struct drm_device * dev)  	struct drm_i915_private *dev_priv = dev->dev_private;  	int pipe; -	intel_hpd_irq_uninstall(dev_priv); -  	if (I915_HAS_HOTPLUG(dev)) {  		I915_WRITE(PORT_HOTPLUG_EN, 0);  		I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -4598,8 +4585,6 @@ static void i965_irq_uninstall(struct drm_device * dev)  	if (!dev_priv)  		return; -	intel_hpd_irq_uninstall(dev_priv); -  	I915_WRITE(PORT_HOTPLUG_EN, 0);  	I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); @@ -4615,14 +4600,18 @@ static void i965_irq_uninstall(struct drm_device * dev)  	I915_WRITE(IIR, I915_READ(IIR));  } -static void intel_hpd_irq_reenable(unsigned long data) +static void intel_hpd_irq_reenable(struct work_struct *work)  { -	struct drm_i915_private *dev_priv = (struct drm_i915_private *)data; +	struct drm_i915_private *dev_priv = +		container_of(work, typeof(*dev_priv), +			     hotplug_reenable_work.work);  	struct drm_device *dev = dev_priv->dev;  	struct drm_mode_config *mode_config = &dev->mode_config;  	unsigned long irqflags;  	int i; +	intel_runtime_pm_get(dev_priv); +  	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);  	for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {  		struct drm_connector *connector; @@ -4648,6 +4637,8 @@ static void intel_hpd_irq_reenable(unsigned long data)  	if (dev_priv->display.hpd_irq_setup)  		dev_priv->display.hpd_irq_setup(dev);  	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); + +	intel_runtime_pm_put(dev_priv);  }  void intel_irq_init(struct drm_device *dev) @@ -4670,8 +4661,8 @@ void intel_irq_init(struct drm_device *dev)  	setup_timer(&dev_priv->gpu_error.hangcheck_timer,  		    i915_hangcheck_elapsed,  		    (unsigned long) dev); -	setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable, -		    (unsigned long) dev_priv); +	INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, +			  intel_hpd_irq_reenable);  	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 2efaf8e8d9c4..e8abfce40976 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c @@ -699,16 +699,21 @@ intel_crt_detect(struct drm_connector *connector, bool force)  		goto out;  	} +	drm_modeset_acquire_init(&ctx, 0); +  	/* for pre-945g platforms use load detect */  	if (intel_get_load_detect_pipe(connector, NULL, &tmp, &ctx)) {  		if (intel_crt_detect_ddc(connector))  			status = connector_status_connected;  		else  			status = intel_crt_load_detect(crt); -		intel_release_load_detect_pipe(connector, &tmp, &ctx); +		intel_release_load_detect_pipe(connector, &tmp);  	} else  		status = connector_status_unknown; +	drm_modeset_drop_locks(&ctx); +	drm_modeset_acquire_fini(&ctx); +  out:  	intel_display_power_put(dev_priv, power_domain);  	return status; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 018fb7222f60..d074d704f458 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8462,8 +8462,6 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,  		      connector->base.id, connector->name,  		      encoder->base.id, encoder->name); -	drm_modeset_acquire_init(ctx, 0); -  retry:  	ret = drm_modeset_lock(&config->connection_mutex, ctx);  	if (ret) @@ -8502,10 +8500,14 @@ retry:  		i++;  		if (!(encoder->possible_crtcs & (1 << i)))  			continue; -		if (!possible_crtc->enabled) { -			crtc = possible_crtc; -			break; -		} +		if (possible_crtc->enabled) +			continue; +		/* This can occur when applying the pipe A quirk on resume. */ +		if (to_intel_crtc(possible_crtc)->new_enabled) +			continue; + +		crtc = possible_crtc; +		break;  	}  	/* @@ -8574,15 +8576,11 @@ fail_unlock:  		goto retry;  	} -	drm_modeset_drop_locks(ctx); -	drm_modeset_acquire_fini(ctx); -  	return false;  }  void intel_release_load_detect_pipe(struct drm_connector *connector, -				    struct intel_load_detect_pipe *old, -				    struct drm_modeset_acquire_ctx *ctx) +				    struct intel_load_detect_pipe *old)  {  	struct intel_encoder *intel_encoder =  		intel_attached_encoder(connector); @@ -8606,17 +8604,12 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,  			drm_framebuffer_unreference(old->release_fb);  		} -		goto unlock;  		return;  	}  	/* Switch crtc and encoder back off if necessary */  	if (old->dpms_mode != DRM_MODE_DPMS_ON)  		connector->funcs->dpms(connector, old->dpms_mode); - -unlock: -	drm_modeset_drop_locks(ctx); -	drm_modeset_acquire_fini(ctx);  }  static int i9xx_pll_refclk(struct drm_device *dev, @@ -11700,8 +11693,8 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,  	};  	const struct drm_rect clip = {  		/* integer pixels */ -		.x2 = intel_crtc->config.pipe_src_w, -		.y2 = intel_crtc->config.pipe_src_h, +		.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0, +		.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,  	};  	bool visible;  	int ret; @@ -12659,7 +12652,7 @@ static void intel_enable_pipe_a(struct drm_device *dev)  	struct intel_connector *connector;  	struct drm_connector *crt = NULL;  	struct intel_load_detect_pipe load_detect_temp; -	struct drm_modeset_acquire_ctx ctx; +	struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;  	/* We can't just switch on the pipe A, we need to set things up with a  	 * proper mode and output configuration. As a gross hack, enable pipe A @@ -12676,10 +12669,8 @@ static void intel_enable_pipe_a(struct drm_device *dev)  	if (!crt)  		return; -	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, &ctx)) -		intel_release_load_detect_pipe(crt, &load_detect_temp, &ctx); - - +	if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx)) +		intel_release_load_detect_pipe(crt, &load_detect_temp);  }  static bool @@ -13112,7 +13103,7 @@ void intel_modeset_cleanup(struct drm_device *dev)  	 * experience fancy races otherwise.  	 */  	drm_irq_uninstall(dev); -	cancel_work_sync(&dev_priv->hotplug_work); +	intel_hpd_cancel_work(dev_priv);  	dev_priv->pm._irqs_disabled = true;  	/* diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ee3942f0b068..67cfed6d911a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -3553,6 +3553,9 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)  	if (WARN_ON(!intel_encoder->base.crtc))  		return; +	if (!to_intel_crtc(intel_encoder->base.crtc)->active) +		return; +  	/* Try to read receiver status if the link appears to be up */  	if (!intel_dp_get_link_status(intel_dp, link_status)) {  		return; @@ -4003,6 +4006,16 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)  	kfree(intel_dig_port);  } +static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder) +{ +	struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); + +	if (!is_edp(intel_dp)) +		return; + +	edp_panel_vdd_off_sync(intel_dp); +} +  static void intel_dp_encoder_reset(struct drm_encoder *encoder)  {  	intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder)); @@ -4037,15 +4050,21 @@ bool  intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)  {  	struct intel_dp *intel_dp = &intel_dig_port->dp; +	struct intel_encoder *intel_encoder = &intel_dig_port->base;  	struct drm_device *dev = intel_dig_port->base.base.dev;  	struct drm_i915_private *dev_priv = dev->dev_private; -	int ret; +	enum intel_display_power_domain power_domain; +	bool ret = true; +  	if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)  		intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;  	DRM_DEBUG_KMS("got hpd irq on port %d - %s\n", intel_dig_port->port,  		      long_hpd ? "long" : "short"); +	power_domain = intel_display_port_power_domain(intel_encoder); +	intel_display_power_get(dev_priv, power_domain); +  	if (long_hpd) {  		if (!ibx_digital_port_connected(dev_priv, intel_dig_port))  			goto mst_fail; @@ -4061,8 +4080,7 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)  	} else {  		if (intel_dp->is_mst) { -			ret = intel_dp_check_mst_status(intel_dp); -			if (ret == -EINVAL) +			if (intel_dp_check_mst_status(intel_dp) == -EINVAL)  				goto mst_fail;  		} @@ -4076,7 +4094,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)  			drm_modeset_unlock(&dev->mode_config.connection_mutex);  		}  	} -	return false; +	ret = false; +	goto put_power;  mst_fail:  	/* if we were in MST mode, and device is not there get out of MST mode */  	if (intel_dp->is_mst) { @@ -4084,7 +4103,10 @@ mst_fail:  		intel_dp->is_mst = false;  		drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);  	} -	return true; +put_power: +	intel_display_power_put(dev_priv, power_domain); + +	return ret;  }  /* Return which DP Port should be selected for Transcoder DP control */ @@ -4722,6 +4744,7 @@ intel_dp_init(struct drm_device *dev, int output_reg, enum port port)  	intel_encoder->disable = intel_disable_dp;  	intel_encoder->get_hw_state = intel_dp_get_hw_state;  	intel_encoder->get_config = intel_dp_get_config; +	intel_encoder->suspend = intel_dp_encoder_suspend;  	if (IS_CHERRYVIEW(dev)) {  		intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;  		intel_encoder->pre_enable = chv_pre_enable_dp; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 4b2664bd5b81..b8c8bbd8e5f9 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -153,6 +153,12 @@ struct intel_encoder {  	 * be set correctly before calling this function. */  	void (*get_config)(struct intel_encoder *,  			   struct intel_crtc_config *pipe_config); +	/* +	 * Called during system suspend after all pending requests for the +	 * encoder are flushed (for example for DP AUX transactions) and +	 * device interrupts are disabled. +	 */ +	void (*suspend)(struct intel_encoder *);  	int crtc_mask;  	enum hpd_pin hpd_pin;  }; @@ -830,8 +836,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,  				struct intel_load_detect_pipe *old,  				struct drm_modeset_acquire_ctx *ctx);  void intel_release_load_detect_pipe(struct drm_connector *connector, -				    struct intel_load_detect_pipe *old, -				    struct drm_modeset_acquire_ctx *ctx); +				    struct intel_load_detect_pipe *old);  int intel_pin_and_fence_fb_obj(struct drm_device *dev,  			       struct drm_i915_gem_object *obj,  			       struct intel_engine_cs *pipelined); diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index e211eef4b7e4..32186a656816 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1323,11 +1323,16 @@ intel_tv_detect(struct drm_connector *connector, bool force)  		struct intel_load_detect_pipe tmp;  		struct drm_modeset_acquire_ctx ctx; +		drm_modeset_acquire_init(&ctx, 0); +  		if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) {  			type = intel_tv_detect_type(intel_tv, connector); -			intel_release_load_detect_pipe(connector, &tmp, &ctx); +			intel_release_load_detect_pipe(connector, &tmp);  		} else  			return connector_status_unknown; + +		drm_modeset_drop_locks(&ctx); +		drm_modeset_acquire_fini(&ctx);  	} else  		return connector->status; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c index 74cebb51e8c2..c6c80ea28c35 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c @@ -397,6 +397,7 @@ static void mdp4_crtc_prepare(struct drm_crtc *crtc)  	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);  	DBG("%s", mdp4_crtc->name);  	/* make sure we hold a ref to mdp clks while setting up mode: */ +	drm_crtc_vblank_get(crtc);  	mdp4_enable(get_kms(crtc));  	mdp4_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);  } @@ -407,6 +408,7 @@ static void mdp4_crtc_commit(struct drm_crtc *crtc)  	crtc_flush(crtc);  	/* drop the ref to mdp clk's that we got in prepare: */  	mdp4_disable(get_kms(crtc)); +	drm_crtc_vblank_put(crtc);  }  static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index b447c01ad89c..26ee80db17af 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -974,12 +974,11 @@ static int msm_pdev_probe(struct platform_device *pdev)  	for (i = 0; i < ARRAY_SIZE(devnames); i++) {  		struct device *dev; -		int ret;  		dev = bus_find_device_by_name(&platform_bus_type,  				NULL, devnames[i]);  		if (!dev) { -			dev_info(master, "still waiting for %s\n", devnames[i]); +			dev_info(&pdev->dev, "still waiting for %s\n", devnames[i]);  			return -EPROBE_DEFER;  		} diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 9c5221ce391a..ab5bfd2d0ebf 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -143,7 +143,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,  	ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);  	if (ret) {  		dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret); -		goto fail; +		goto fail_unlock;  	}  	fbi = framebuffer_alloc(0, dev->dev); diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 099af483fdf0..7acdaa5688b7 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -27,8 +27,8 @@ struct msm_iommu {  static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,  		unsigned long iova, int flags, void *arg)  { -	DBG("*** fault: iova=%08lx, flags=%d", iova, flags); -	return -ENOSYS; +	pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); +	return 0;  }  static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile index 0013ad0db9ef..f77b7135ee4c 100644 --- a/drivers/gpu/drm/radeon/Makefile +++ b/drivers/gpu/drm/radeon/Makefile @@ -76,7 +76,7 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \  	evergreen.o evergreen_cs.o evergreen_blit_shaders.o \  	evergreen_hdmi.o radeon_trace_points.o ni.o cayman_blit_shaders.o \  	atombios_encoders.o radeon_semaphore.o radeon_sa.o atombios_i2c.o si.o \ -	si_blit_shaders.o radeon_prime.o radeon_uvd.o cik.o cik_blit_shaders.o \ +	si_blit_shaders.o radeon_prime.o cik.o cik_blit_shaders.o \  	r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \  	rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \  	trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index 022561e28707..d416bb2ff48d 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c @@ -869,6 +869,9 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,  	WREG32_SMC(CG_THERMAL_CTRL, tmp);  #endif +	rdev->pm.dpm.thermal.min_temp = low_temp; +	rdev->pm.dpm.thermal.max_temp = high_temp; +  	return 0;  } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index b625646bf3e2..fa9565957f9d 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3483,7 +3483,7 @@ static void cik_gpu_init(struct radeon_device *rdev)  	u32 mc_shared_chmap, mc_arb_ramcfg;  	u32 hdp_host_path_cntl;  	u32 tmp; -	int i, j, k; +	int i, j;  	switch (rdev->family) {  	case CHIP_BONAIRE: @@ -3544,6 +3544,7 @@ static void cik_gpu_init(struct radeon_device *rdev)  			   (rdev->pdev->device == 0x130B) ||  			   (rdev->pdev->device == 0x130E) ||  			   (rdev->pdev->device == 0x1315) || +			   (rdev->pdev->device == 0x1318) ||  			   (rdev->pdev->device == 0x131B)) {  			rdev->config.cik.max_cu_per_sh = 4;  			rdev->config.cik.max_backends_per_se = 1; @@ -3672,12 +3673,11 @@ static void cik_gpu_init(struct radeon_device *rdev)  		     rdev->config.cik.max_sh_per_se,  		     rdev->config.cik.max_backends_per_se); +	rdev->config.cik.active_cus = 0;  	for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {  		for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { -			for (k = 0; k < rdev->config.cik.max_cu_per_sh; k++) { -				rdev->config.cik.active_cus += -					hweight32(cik_get_cu_active_bitmap(rdev, i, j)); -			} +			rdev->config.cik.active_cus += +				hweight32(cik_get_cu_active_bitmap(rdev, i, j));  		}  	} @@ -3801,7 +3801,7 @@ int cik_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)  	radeon_ring_write(ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));  	radeon_ring_write(ring, ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2));  	radeon_ring_write(ring, 0xDEADBEEF); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	for (i = 0; i < rdev->usec_timeout; i++) {  		tmp = RREG32(scratch); @@ -3920,6 +3920,17 @@ void cik_fence_compute_ring_emit(struct radeon_device *rdev,  	radeon_ring_write(ring, 0);  } +/** + * cik_semaphore_ring_emit - emit a semaphore on the CP ring + * + * @rdev: radeon_device pointer + * @ring: radeon ring buffer object + * @semaphore: radeon semaphore object + * @emit_wait: Is this a sempahore wait? + * + * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP + * from running ahead of semaphore waits. + */  bool cik_semaphore_ring_emit(struct radeon_device *rdev,  			     struct radeon_ring *ring,  			     struct radeon_semaphore *semaphore, @@ -3932,6 +3943,12 @@ bool cik_semaphore_ring_emit(struct radeon_device *rdev,  	radeon_ring_write(ring, lower_32_bits(addr));  	radeon_ring_write(ring, (upper_32_bits(addr) & 0xffff) | sel); +	if (emit_wait && ring->idx == RADEON_RING_TYPE_GFX_INDEX) { +		/* Prevent the PFP from running ahead of the semaphore wait */ +		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); +		radeon_ring_write(ring, 0x0); +	} +  	return true;  } @@ -4004,7 +4021,7 @@ int cik_copy_cpdma(struct radeon_device *rdev,  		return r;  	} -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	radeon_semaphore_free(rdev, &sem, *fence);  	return r; @@ -4103,7 +4120,7 @@ int cik_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)  	ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START) >> 2);  	ib.ptr[2] = 0xDEADBEEF;  	ib.length_dw = 3; -	r = radeon_ib_schedule(rdev, &ib, NULL); +	r = radeon_ib_schedule(rdev, &ib, NULL, false);  	if (r) {  		radeon_scratch_free(rdev, scratch);  		radeon_ib_free(rdev, &ib); @@ -4324,7 +4341,7 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)  	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */  	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	return 0;  } @@ -5732,20 +5749,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)  	WREG32(0x15D8, 0);  	WREG32(0x15DC, 0); -	/* empty context1-15 */ -	/* FIXME start with 4G, once using 2 level pt switch to full -	 * vm size space -	 */ +	/* restore context1-15 */  	/* set vm size, must be a multiple of 4 */  	WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);  	WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);  	for (i = 1; i < 16; i++) {  		if (i < 8)  			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), -			       rdev->gart.table_addr >> 12); +			       rdev->vm_manager.saved_table_addr[i]);  		else  			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), -			       rdev->gart.table_addr >> 12); +			       rdev->vm_manager.saved_table_addr[i]);  	}  	/* enable context1-15 */ @@ -5810,6 +5824,17 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)   */  static void cik_pcie_gart_disable(struct radeon_device *rdev)  { +	unsigned i; + +	for (i = 1; i < 16; ++i) { +		uint32_t reg; +		if (i < 8) +			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2); +		else +			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2); +		rdev->vm_manager.saved_table_addr[i] = RREG32(reg); +	} +  	/* Disable all tables */  	WREG32(VM_CONTEXT0_CNTL, 0);  	WREG32(VM_CONTEXT1_CNTL, 0); @@ -5958,14 +5983,14 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)  	/* update SH_MEM_* regs */  	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); -	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | +	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |  				 WRITE_DATA_DST_SEL(0)));  	radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);  	radeon_ring_write(ring, 0);  	radeon_ring_write(ring, VMID(vm->id));  	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); -	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | +	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |  				 WRITE_DATA_DST_SEL(0)));  	radeon_ring_write(ring, SH_MEM_BASES >> 2);  	radeon_ring_write(ring, 0); @@ -5976,7 +6001,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)  	radeon_ring_write(ring, 0); /* SH_MEM_APE1_LIMIT */  	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); -	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | +	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |  				 WRITE_DATA_DST_SEL(0)));  	radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);  	radeon_ring_write(ring, 0); @@ -5987,7 +6012,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)  	/* bits 0-15 are the VM contexts0-15 */  	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); -	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | +	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |  				 WRITE_DATA_DST_SEL(0)));  	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);  	radeon_ring_write(ring, 0); @@ -9538,6 +9563,9 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev)  	int ret, i;  	u16 tmp16; +	if (pci_is_root_bus(rdev->pdev->bus)) +		return; +  	if (radeon_pcie_gen2 == 0)  		return; @@ -9764,7 +9792,8 @@ static void cik_program_aspm(struct radeon_device *rdev)  			if (orig != data)  				WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, data); -			if (!disable_clkreq) { +			if (!disable_clkreq && +			    !pci_is_root_bus(rdev->pdev->bus)) {  				struct pci_dev *root = rdev->pdev->bus->self;  				u32 lnkcap; diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index bcf480510ac2..192278bc993c 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -596,7 +596,7 @@ int cik_copy_dma(struct radeon_device *rdev,  		return r;  	} -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	radeon_semaphore_free(rdev, &sem, *fence);  	return r; @@ -638,7 +638,7 @@ int cik_sdma_ring_test(struct radeon_device *rdev,  	radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr));  	radeon_ring_write(ring, 1); /* number of DWs to follow */  	radeon_ring_write(ring, 0xDEADBEEF); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	for (i = 0; i < rdev->usec_timeout; i++) {  		tmp = readl(ptr); @@ -695,7 +695,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)  	ib.ptr[4] = 0xDEADBEEF;  	ib.length_dw = 5; -	r = radeon_ib_schedule(rdev, &ib, NULL); +	r = radeon_ib_schedule(rdev, &ib, NULL, false);  	if (r) {  		radeon_ib_free(rdev, &ib);  		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 4fedd14e670a..dbca60c7d097 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -2869,7 +2869,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)  	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));  	radeon_ring_write(ring, 0);  	radeon_ring_write(ring, 0); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	cp_me = 0xff;  	WREG32(CP_ME_CNTL, cp_me); @@ -2912,7 +2912,7 @@ static int evergreen_cp_start(struct radeon_device *rdev)  	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */  	radeon_ring_write(ring, 0x00000010); /*  */ -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	return 0;  } diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c index 478caefe0fef..afaba388c36d 100644 --- a/drivers/gpu/drm/radeon/evergreen_dma.c +++ b/drivers/gpu/drm/radeon/evergreen_dma.c @@ -155,7 +155,7 @@ int evergreen_copy_dma(struct radeon_device *rdev,  		return r;  	} -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	radeon_semaphore_free(rdev, &sem, *fence);  	return r; diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 9ef8c38f2d66..8b58e11b64fa 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -1438,14 +1438,14 @@ static int kv_update_uvd_dpm(struct radeon_device *rdev, bool gate)  	return kv_enable_uvd_dpm(rdev, !gate);  } -static u8 kv_get_vce_boot_level(struct radeon_device *rdev) +static u8 kv_get_vce_boot_level(struct radeon_device *rdev, u32 evclk)  {  	u8 i;  	struct radeon_vce_clock_voltage_dependency_table *table =  		&rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;  	for (i = 0; i < table->count; i++) { -		if (table->entries[i].evclk >= 0) /* XXX */ +		if (table->entries[i].evclk >= evclk)  			break;  	} @@ -1468,7 +1468,7 @@ static int kv_update_vce_dpm(struct radeon_device *rdev,  		if (pi->caps_stable_p_state)  			pi->vce_boot_level = table->count - 1;  		else -			pi->vce_boot_level = kv_get_vce_boot_level(rdev); +			pi->vce_boot_level = kv_get_vce_boot_level(rdev, radeon_new_state->evclk);  		ret = kv_copy_bytes_to_smc(rdev,  					   pi->dpm_table_start + @@ -2726,7 +2726,10 @@ int kv_dpm_init(struct radeon_device *rdev)  	pi->caps_sclk_ds = true;  	pi->enable_auto_thermal_throttling = true;  	pi->disable_nb_ps3_in_battery = false; -	pi->bapm_enable = true; +	if (radeon_bapm == 0) +		pi->bapm_enable = false; +	else +		pi->bapm_enable = true;  	pi->voltage_drop_t = 0;  	pi->caps_sclk_throttle_low_notification = false;  	pi->caps_fps = false; /* true? */ diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 327b85f7fd0d..3faee58946dd 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1271,7 +1271,7 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)  		WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);  		WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);  		WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), -			rdev->gart.table_addr >> 12); +		       rdev->vm_manager.saved_table_addr[i]);  	}  	/* enable context1-7 */ @@ -1303,6 +1303,13 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)  static void cayman_pcie_gart_disable(struct radeon_device *rdev)  { +	unsigned i; + +	for (i = 1; i < 8; ++i) { +		rdev->vm_manager.saved_table_addr[i] = RREG32( +			VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2)); +	} +  	/* Disable all tables */  	WREG32(VM_CONTEXT0_CNTL, 0);  	WREG32(VM_CONTEXT1_CNTL, 0); @@ -1505,7 +1512,7 @@ static int cayman_cp_start(struct radeon_device *rdev)  	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));  	radeon_ring_write(ring, 0);  	radeon_ring_write(ring, 0); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	cayman_cp_enable(rdev, true); @@ -1547,7 +1554,7 @@ static int cayman_cp_start(struct radeon_device *rdev)  	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */  	radeon_ring_write(ring, 0x00000010); /*  */ -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	/* XXX init other rings */ diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 04b5940b8923..4c5ec44ff328 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -925,7 +925,7 @@ int r100_copy_blit(struct radeon_device *rdev,  	if (fence) {  		r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);  	} -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	return r;  } @@ -958,7 +958,7 @@ void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)  			  RADEON_ISYNC_ANY3D_IDLE2D |  			  RADEON_ISYNC_WAIT_IDLEGUI |  			  RADEON_ISYNC_CPSCRATCH_IDLEGUI); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  } @@ -3638,7 +3638,7 @@ int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)  	}  	radeon_ring_write(ring, PACKET0(scratch, 0));  	radeon_ring_write(ring, 0xDEADBEEF); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	for (i = 0; i < rdev->usec_timeout; i++) {  		tmp = RREG32(scratch);  		if (tmp == 0xDEADBEEF) { @@ -3700,7 +3700,7 @@ int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)  	ib.ptr[6] = PACKET2(0);  	ib.ptr[7] = PACKET2(0);  	ib.length_dw = 8; -	r = radeon_ib_schedule(rdev, &ib, NULL); +	r = radeon_ib_schedule(rdev, &ib, NULL, false);  	if (r) {  		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);  		goto free_ib; diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c index 58f0473aa73f..67780374a652 100644 --- a/drivers/gpu/drm/radeon/r200.c +++ b/drivers/gpu/drm/radeon/r200.c @@ -121,7 +121,7 @@ int r200_copy_dma(struct radeon_device *rdev,  	if (fence) {  		r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);  	} -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	return r;  } diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 75b30338c226..1bc4704034ce 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -295,7 +295,7 @@ void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)  	radeon_ring_write(ring,  			  R300_GEOMETRY_ROUND_NEAREST |  			  R300_COLOR_ROUND_NEAREST); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  }  static void r300_errata(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 802b19220a21..2828605aef3f 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -219,7 +219,7 @@ static void r420_cp_errata_init(struct radeon_device *rdev)  	radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));  	radeon_ring_write(ring, rdev->config.r300.resync_scratch);  	radeon_ring_write(ring, 0xDEADBEEF); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  }  static void r420_cp_errata_fini(struct radeon_device *rdev) @@ -232,7 +232,7 @@ static void r420_cp_errata_fini(struct radeon_device *rdev)  	radeon_ring_lock(rdev, ring, 8);  	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));  	radeon_ring_write(ring, R300_RB3D_DC_FINISH); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);  } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index c70a504d96af..e616eb5f6e7a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -1812,7 +1812,6 @@ static void r600_gpu_init(struct radeon_device *rdev)  {  	u32 tiling_config;  	u32 ramcfg; -	u32 cc_rb_backend_disable;  	u32 cc_gc_shader_pipe_config;  	u32 tmp;  	int i, j; @@ -1939,29 +1938,20 @@ static void r600_gpu_init(struct radeon_device *rdev)  	}  	tiling_config |= BANK_SWAPS(1); -	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; -	tmp = R6XX_MAX_BACKENDS - -		r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK); -	if (tmp < rdev->config.r600.max_backends) { -		rdev->config.r600.max_backends = tmp; -	} -  	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; -	tmp = R6XX_MAX_PIPES - -		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK); -	if (tmp < rdev->config.r600.max_pipes) { -		rdev->config.r600.max_pipes = tmp; -	} -	tmp = R6XX_MAX_SIMDS - -		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); -	if (tmp < rdev->config.r600.max_simds) { -		rdev->config.r600.max_simds = tmp; -	}  	tmp = rdev->config.r600.max_simds -  		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);  	rdev->config.r600.active_simds = tmp;  	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; +	tmp = 0; +	for (i = 0; i < rdev->config.r600.max_backends; i++) +		tmp |= (1 << i); +	/* if all the backends are disabled, fix it up here */ +	if ((disabled_rb_mask & tmp) == tmp) { +		for (i = 0; i < rdev->config.r600.max_backends; i++) +			disabled_rb_mask &= ~(1 << i); +	}  	tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;  	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,  					R6XX_MAX_BACKENDS, disabled_rb_mask); @@ -2547,7 +2537,7 @@ int r600_cp_start(struct radeon_device *rdev)  	radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));  	radeon_ring_write(ring, 0);  	radeon_ring_write(ring, 0); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	cp_me = 0xff;  	WREG32(R_0086D8_CP_ME_CNTL, cp_me); @@ -2683,7 +2673,7 @@ int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)  	radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));  	radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));  	radeon_ring_write(ring, 0xDEADBEEF); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	for (i = 0; i < rdev->usec_timeout; i++) {  		tmp = RREG32(scratch);  		if (tmp == 0xDEADBEEF) @@ -2753,6 +2743,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,  	}  } +/** + * r600_semaphore_ring_emit - emit a semaphore on the CP ring + * + * @rdev: radeon_device pointer + * @ring: radeon ring buffer object + * @semaphore: radeon semaphore object + * @emit_wait: Is this a sempahore wait? + * + * Emits a semaphore signal/wait packet to the CP ring and prevents the PFP + * from running ahead of semaphore waits. + */  bool r600_semaphore_ring_emit(struct radeon_device *rdev,  			      struct radeon_ring *ring,  			      struct radeon_semaphore *semaphore, @@ -2768,6 +2769,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev,  	radeon_ring_write(ring, lower_32_bits(addr));  	radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); +	/* PFP_SYNC_ME packet only exists on 7xx+ */ +	if (emit_wait && (rdev->family >= CHIP_RV770)) { +		/* Prevent the PFP from running ahead of the semaphore wait */ +		radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); +		radeon_ring_write(ring, 0x0); +	} +  	return true;  } @@ -2845,7 +2853,7 @@ int r600_copy_cpdma(struct radeon_device *rdev,  		return r;  	} -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	radeon_semaphore_free(rdev, &sem, *fence);  	return r; @@ -3165,7 +3173,7 @@ int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)  	ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);  	ib.ptr[2] = 0xDEADBEEF;  	ib.length_dw = 3; -	r = radeon_ib_schedule(rdev, &ib, NULL); +	r = radeon_ib_schedule(rdev, &ib, NULL, false);  	if (r) {  		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);  		goto free_ib; diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c index 4969cef44a19..51fd98553eaf 100644 --- a/drivers/gpu/drm/radeon/r600_dma.c +++ b/drivers/gpu/drm/radeon/r600_dma.c @@ -261,7 +261,7 @@ int r600_dma_ring_test(struct radeon_device *rdev,  	radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc);  	radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff);  	radeon_ring_write(ring, 0xDEADBEEF); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	for (i = 0; i < rdev->usec_timeout; i++) {  		tmp = readl(ptr); @@ -368,7 +368,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)  	ib.ptr[3] = 0xDEADBEEF;  	ib.length_dw = 4; -	r = radeon_ib_schedule(rdev, &ib, NULL); +	r = radeon_ib_schedule(rdev, &ib, NULL, false);  	if (r) {  		radeon_ib_free(rdev, &ib);  		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); @@ -493,7 +493,7 @@ int r600_copy_dma(struct radeon_device *rdev,  		return r;  	} -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	radeon_semaphore_free(rdev, &sem, *fence);  	return r; diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index f94e7a9afe75..0c4a7d8d93e0 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -1597,6 +1597,7 @@  		 */  #              define PACKET3_CP_DMA_CMD_SAIC      (1 << 28)  #              define PACKET3_CP_DMA_CMD_DAIC      (1 << 29) +#define	PACKET3_PFP_SYNC_ME				0x42 /* r7xx+ only */  #define	PACKET3_SURFACE_SYNC				0x43  #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)  #              define PACKET3_FULL_CACHE_ENA       (1 << 20) /* r7xx+ only */ diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 9e1732eb402c..5f05b4c84338 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -105,6 +105,7 @@ extern int radeon_vm_size;  extern int radeon_vm_block_size;  extern int radeon_deep_color;  extern int radeon_use_pflipirq; +extern int radeon_bapm;  /*   * Copy from radeon_drv.h so we don't have to include both and have conflicting @@ -914,6 +915,8 @@ struct radeon_vm_manager {  	u64				vram_base_offset;  	/* is vm enabled? */  	bool				enabled; +	/* for hw to save the PD addr on suspend/resume */ +	uint32_t			saved_table_addr[RADEON_NUM_VM];  };  /* @@ -967,7 +970,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,  		  unsigned size);  void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);  int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, -		       struct radeon_ib *const_ib); +		       struct radeon_ib *const_ib, bool hdp_flush);  int radeon_ib_pool_init(struct radeon_device *rdev);  void radeon_ib_pool_fini(struct radeon_device *rdev);  int radeon_ib_ring_tests(struct radeon_device *rdev); @@ -977,8 +980,10 @@ bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,  void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);  int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);  int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); -void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); -void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); +void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp, +			bool hdp_flush); +void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp, +			       bool hdp_flush);  void radeon_ring_undo(struct radeon_ring *ring);  void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);  int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index ee712c199b25..83f382e8e40e 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -132,7 +132,8 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)  		 * the buffers used for read only, which doubles the range  		 * to 0 to 31. 32 is reserved for the kernel driver.  		 */ -		priority = (r->flags & 0xf) * 2 + !!r->write_domain; +		priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2 +			   + !!r->write_domain;  		/* the first reloc of an UVD job is the msg and that must be in  		   VRAM, also but everything into VRAM on AGP cards to avoid @@ -450,7 +451,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,  		radeon_vce_note_usage(rdev);  	radeon_cs_sync_rings(parser); -	r = radeon_ib_schedule(rdev, &parser->ib, NULL); +	r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);  	if (r) {  		DRM_ERROR("Failed to schedule IB !\n");  	} @@ -541,9 +542,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,  	if ((rdev->family >= CHIP_TAHITI) &&  	    (parser->chunk_const_ib_idx != -1)) { -		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib); +		r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);  	} else { -		r = radeon_ib_schedule(rdev, &parser->ib, NULL); +		r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);  	}  out: diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index c8ea050c8fa4..6a219bcee66d 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1680,8 +1680,8 @@ int radeon_gpu_reset(struct radeon_device *rdev)  	radeon_save_bios_scratch_regs(rdev);  	/* block TTM */  	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); -	radeon_pm_suspend(rdev);  	radeon_suspend(rdev); +	radeon_hpd_fini(rdev);  	for (i = 0; i < RADEON_NUM_RINGS; ++i) {  		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i], @@ -1726,9 +1726,39 @@ retry:  		}  	} -	radeon_pm_resume(rdev); +	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { +		/* do dpm late init */ +		r = radeon_pm_late_init(rdev); +		if (r) { +			rdev->pm.dpm_enabled = false; +			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); +		} +	} else { +		/* resume old pm late */ +		radeon_pm_resume(rdev); +	} + +	/* init dig PHYs, disp eng pll */ +	if (rdev->is_atom_bios) { +		radeon_atom_encoder_init(rdev); +		radeon_atom_disp_eng_pll_init(rdev); +		/* turn on the BL */ +		if (rdev->mode_info.bl_encoder) { +			u8 bl_level = radeon_get_backlight_level(rdev, +								 rdev->mode_info.bl_encoder); +			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder, +						   bl_level); +		} +	} +	/* reset hpd state */ +	radeon_hpd_init(rdev); +  	drm_helper_resume_force_mode(rdev->ddev); +	/* set the power state here in case we are a PX system or headless */ +	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) +		radeon_pm_compute_clocks(rdev); +  	ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);  	if (r) {  		/* bad news, how to tell it to userspace ? */ diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 092d067f93e1..8df888908833 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -180,6 +180,7 @@ int radeon_vm_size = 8;  int radeon_vm_block_size = -1;  int radeon_deep_color = 0;  int radeon_use_pflipirq = 2; +int radeon_bapm = -1;  MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");  module_param_named(no_wb, radeon_no_wb, int, 0444); @@ -259,6 +260,9 @@ module_param_named(deep_color, radeon_deep_color, int, 0444);  MODULE_PARM_DESC(use_pflipirq, "Pflip irqs for pageflip completion (0 = disable, 1 = as fallback, 2 = exclusive (default))");  module_param_named(use_pflipirq, radeon_use_pflipirq, int, 0444); +MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)"); +module_param_named(bapm, radeon_bapm, int, 0444); +  static struct pci_device_id pciidlist[] = {  	radeon_PCI_IDS  }; diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c index 65b0c213488d..5bf2c0a05827 100644 --- a/drivers/gpu/drm/radeon/radeon_ib.c +++ b/drivers/gpu/drm/radeon/radeon_ib.c @@ -107,6 +107,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)   * @rdev: radeon_device pointer   * @ib: IB object to schedule   * @const_ib: Const IB to schedule (SI only) + * @hdp_flush: Whether or not to perform an HDP cache flush   *   * Schedule an IB on the associated ring (all asics).   * Returns 0 on success, error on failure. @@ -122,7 +123,7 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)   * to SI there was just a DE IB.   */  int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, -		       struct radeon_ib *const_ib) +		       struct radeon_ib *const_ib, bool hdp_flush)  {  	struct radeon_ring *ring = &rdev->ring[ib->ring];  	int r = 0; @@ -176,7 +177,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,  	if (ib->vm)  		radeon_vm_fence(rdev, ib->vm, ib->fence); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, hdp_flush);  	return 0;  } diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 23314be49480..164898b0010c 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -460,10 +460,6 @@ static ssize_t radeon_get_dpm_state(struct device *dev,  	struct radeon_device *rdev = ddev->dev_private;  	enum radeon_pm_state_type pm = rdev->pm.dpm.user_state; -	if  ((rdev->flags & RADEON_IS_PX) && -	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) -		return snprintf(buf, PAGE_SIZE, "off\n"); -  	return snprintf(buf, PAGE_SIZE, "%s\n",  			(pm == POWER_STATE_TYPE_BATTERY) ? "battery" :  			(pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); @@ -477,11 +473,6 @@ static ssize_t radeon_set_dpm_state(struct device *dev,  	struct drm_device *ddev = dev_get_drvdata(dev);  	struct radeon_device *rdev = ddev->dev_private; -	/* Can't set dpm state when the card is off */ -	if  ((rdev->flags & RADEON_IS_PX) && -	     (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) -		return -EINVAL; -  	mutex_lock(&rdev->pm.mutex);  	if (strncmp("battery", buf, strlen("battery")) == 0)  		rdev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; @@ -495,7 +486,12 @@ static ssize_t radeon_set_dpm_state(struct device *dev,  		goto fail;  	}  	mutex_unlock(&rdev->pm.mutex); -	radeon_pm_compute_clocks(rdev); + +	/* Can't set dpm state when the card is off */ +	if (!(rdev->flags & RADEON_IS_PX) || +	    (ddev->switch_power_state == DRM_SWITCH_POWER_ON)) +		radeon_pm_compute_clocks(rdev); +  fail:  	return count;  } diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 5b4e0cf231a0..d65607902537 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -177,16 +177,18 @@ int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsig   *   * @rdev: radeon_device pointer   * @ring: radeon_ring structure holding ring information + * @hdp_flush: Whether or not to perform an HDP cache flush   *   * Update the wptr (write pointer) to tell the GPU to   * execute new commands on the ring buffer (all asics).   */ -void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring) +void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring, +			bool hdp_flush)  {  	/* If we are emitting the HDP flush via the ring buffer, we need to  	 * do it before padding.  	 */ -	if (rdev->asic->ring[ring->idx]->hdp_flush) +	if (hdp_flush && rdev->asic->ring[ring->idx]->hdp_flush)  		rdev->asic->ring[ring->idx]->hdp_flush(rdev, ring);  	/* We pad to match fetch size */  	while (ring->wptr & ring->align_mask) { @@ -196,7 +198,7 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)  	/* If we are emitting the HDP flush via MMIO, we need to do it after  	 * all CPU writes to VRAM finished.  	 */ -	if (rdev->asic->mmio_hdp_flush) +	if (hdp_flush && rdev->asic->mmio_hdp_flush)  		rdev->asic->mmio_hdp_flush(rdev);  	radeon_ring_set_wptr(rdev, ring);  } @@ -207,12 +209,14 @@ void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)   *   * @rdev: radeon_device pointer   * @ring: radeon_ring structure holding ring information + * @hdp_flush: Whether or not to perform an HDP cache flush   *   * Call radeon_ring_commit() then unlock the ring (all asics).   */ -void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring) +void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring, +			       bool hdp_flush)  { -	radeon_ring_commit(rdev, ring); +	radeon_ring_commit(rdev, ring, hdp_flush);  	mutex_unlock(&rdev->ring_lock);  } @@ -372,7 +376,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,  		radeon_ring_write(ring, data[i]);  	} -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	kfree(data);  	return 0;  } @@ -400,9 +404,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig  	/* Allocate ring buffer */  	if (ring->ring_obj == NULL) {  		r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true, -				     RADEON_GEM_DOMAIN_GTT, -				     (rdev->flags & RADEON_IS_PCIE) ? -				     RADEON_GEM_GTT_WC : 0, +				     RADEON_GEM_DOMAIN_GTT, 0,  				     NULL, &ring->ring_obj);  		if (r) {  			dev_err(rdev->dev, "(%d) ring create failed\n", r); diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index dbd6bcde92de..56d9fd66d8ae 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c @@ -179,7 +179,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,  			continue;  		} -		radeon_ring_commit(rdev, &rdev->ring[i]); +		radeon_ring_commit(rdev, &rdev->ring[i], false);  		radeon_fence_note_sync(fence, ring);  		semaphore->gpu_addr += 8; diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 5adf4207453d..17bc3dced9f1 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c @@ -288,7 +288,7 @@ static int radeon_test_create_and_emit_fence(struct radeon_device *rdev,  			return r;  		}  		radeon_fence_emit(rdev, fence, ring->idx); -		radeon_ring_unlock_commit(rdev, ring); +		radeon_ring_unlock_commit(rdev, ring, false);  	}  	return 0;  } @@ -313,7 +313,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,  		goto out_cleanup;  	}  	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); -	radeon_ring_unlock_commit(rdev, ringA); +	radeon_ring_unlock_commit(rdev, ringA, false);  	r = radeon_test_create_and_emit_fence(rdev, ringA, &fence1);  	if (r) @@ -325,7 +325,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,  		goto out_cleanup;  	}  	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); -	radeon_ring_unlock_commit(rdev, ringA); +	radeon_ring_unlock_commit(rdev, ringA, false);  	r = radeon_test_create_and_emit_fence(rdev, ringA, &fence2);  	if (r) @@ -344,7 +344,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,  		goto out_cleanup;  	}  	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); -	radeon_ring_unlock_commit(rdev, ringB); +	radeon_ring_unlock_commit(rdev, ringB, false);  	r = radeon_fence_wait(fence1, false);  	if (r) { @@ -365,7 +365,7 @@ void radeon_test_ring_sync(struct radeon_device *rdev,  		goto out_cleanup;  	}  	radeon_semaphore_emit_signal(rdev, ringB->idx, semaphore); -	radeon_ring_unlock_commit(rdev, ringB); +	radeon_ring_unlock_commit(rdev, ringB, false);  	r = radeon_fence_wait(fence2, false);  	if (r) { @@ -408,7 +408,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,  		goto out_cleanup;  	}  	radeon_semaphore_emit_wait(rdev, ringA->idx, semaphore); -	radeon_ring_unlock_commit(rdev, ringA); +	radeon_ring_unlock_commit(rdev, ringA, false);  	r = radeon_test_create_and_emit_fence(rdev, ringA, &fenceA);  	if (r) @@ -420,7 +420,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,  		goto out_cleanup;  	}  	radeon_semaphore_emit_wait(rdev, ringB->idx, semaphore); -	radeon_ring_unlock_commit(rdev, ringB); +	radeon_ring_unlock_commit(rdev, ringB, false);  	r = radeon_test_create_and_emit_fence(rdev, ringB, &fenceB);  	if (r)  		goto out_cleanup; @@ -442,7 +442,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,  		goto out_cleanup;  	}  	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); -	radeon_ring_unlock_commit(rdev, ringC); +	radeon_ring_unlock_commit(rdev, ringC, false);  	for (i = 0; i < 30; ++i) {  		mdelay(100); @@ -468,7 +468,7 @@ static void radeon_test_ring_sync2(struct radeon_device *rdev,  		goto out_cleanup;  	}  	radeon_semaphore_emit_signal(rdev, ringC->idx, semaphore); -	radeon_ring_unlock_commit(rdev, ringC); +	radeon_ring_unlock_commit(rdev, ringC, false);  	mdelay(1000); diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 6bf55ec85b62..341848a14376 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -646,7 +646,7 @@ static int radeon_uvd_send_msg(struct radeon_device *rdev,  		ib.ptr[i] = PACKET2(0);  	ib.length_dw = 16; -	r = radeon_ib_schedule(rdev, &ib, NULL); +	r = radeon_ib_schedule(rdev, &ib, NULL, false);  	if (r)  		goto err;  	ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence); diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c index f9b70a43aa52..c7190aadbd89 100644 --- a/drivers/gpu/drm/radeon/radeon_vce.c +++ b/drivers/gpu/drm/radeon/radeon_vce.c @@ -368,7 +368,7 @@ int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,  	for (i = ib.length_dw; i < ib_size_dw; ++i)  		ib.ptr[i] = 0x0; -	r = radeon_ib_schedule(rdev, &ib, NULL); +	r = radeon_ib_schedule(rdev, &ib, NULL, false);  	if (r) {  	        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);  	} @@ -425,7 +425,7 @@ int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,  	for (i = ib.length_dw; i < ib_size_dw; ++i)  		ib.ptr[i] = 0x0; -	r = radeon_ib_schedule(rdev, &ib, NULL); +	r = radeon_ib_schedule(rdev, &ib, NULL, false);  	if (r) {  	        DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);  	} @@ -715,7 +715,7 @@ int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)  		return r;  	}  	radeon_ring_write(ring, VCE_CMD_END); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	for (i = 0; i < rdev->usec_timeout; i++) {  	        if (vce_v1_0_get_rptr(rdev, ring) != rptr) diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c index ccae4d9dc3de..088ffdc2f577 100644 --- a/drivers/gpu/drm/radeon/radeon_vm.c +++ b/drivers/gpu/drm/radeon/radeon_vm.c @@ -420,7 +420,7 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,  	radeon_asic_vm_pad_ib(rdev, &ib);  	WARN_ON(ib.length_dw > 64); -	r = radeon_ib_schedule(rdev, &ib, NULL); +	r = radeon_ib_schedule(rdev, &ib, NULL, false);  	if (r)                  goto error; @@ -483,6 +483,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,  			/* add a clone of the bo_va to clear the old address */  			struct radeon_bo_va *tmp;  			tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); +			if (!tmp) { +				mutex_unlock(&vm->mutex); +				return -ENOMEM; +			}  			tmp->it.start = bo_va->it.start;  			tmp->it.last = bo_va->it.last;  			tmp->vm = vm; @@ -693,7 +697,7 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,  		radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);  		radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);  		WARN_ON(ib.length_dw > ndw); -		r = radeon_ib_schedule(rdev, &ib, NULL); +		r = radeon_ib_schedule(rdev, &ib, NULL, false);  		if (r) {  			radeon_ib_free(rdev, &ib);  			return r; @@ -957,7 +961,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,  	WARN_ON(ib.length_dw > ndw);  	radeon_semaphore_sync_to(ib.semaphore, vm->fence); -	r = radeon_ib_schedule(rdev, &ib, NULL); +	r = radeon_ib_schedule(rdev, &ib, NULL, false);  	if (r) {  		radeon_ib_free(rdev, &ib);  		return r; diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 3e21e869015f..8a477bf1fdb3 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -124,7 +124,7 @@ void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)  	radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);  	radeon_ring_write(ring, PACKET0(0x20C8, 0));  	radeon_ring_write(ring, 0); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  }  int rv515_mc_wait_for_idle(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 2983f17ea1b3..d9f5ce715c9b 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1177,7 +1177,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)  	u32 hdp_host_path_cntl;  	u32 sq_dyn_gpr_size_simd_ab_0;  	u32 gb_tiling_config = 0; -	u32 cc_rb_backend_disable = 0;  	u32 cc_gc_shader_pipe_config = 0;  	u32 mc_arb_ramcfg;  	u32 db_debug4, tmp; @@ -1311,21 +1310,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)  		WREG32(SPI_CONFIG_CNTL, 0);  	} -	cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; -	tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16); -	if (tmp < rdev->config.rv770.max_backends) { -		rdev->config.rv770.max_backends = tmp; -	} -  	cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; -	tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK); -	if (tmp < rdev->config.rv770.max_pipes) { -		rdev->config.rv770.max_pipes = tmp; -	} -	tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK); -	if (tmp < rdev->config.rv770.max_simds) { -		rdev->config.rv770.max_simds = tmp; -	}  	tmp = rdev->config.rv770.max_simds -  		r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);  	rdev->config.rv770.active_simds = tmp; @@ -1348,6 +1333,14 @@ static void rv770_gpu_init(struct radeon_device *rdev)  	rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;  	disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK; +	tmp = 0; +	for (i = 0; i < rdev->config.rv770.max_backends; i++) +		tmp |= (1 << i); +	/* if all the backends are disabled, fix it up here */ +	if ((disabled_rb_mask & tmp) == tmp) { +		for (i = 0; i < rdev->config.rv770.max_backends; i++) +			disabled_rb_mask &= ~(1 << i); +	}  	tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;  	tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,  					R7XX_MAX_BACKENDS, disabled_rb_mask); diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c index bbf2e076ee45..74426ac2bb5c 100644 --- a/drivers/gpu/drm/radeon/rv770_dma.c +++ b/drivers/gpu/drm/radeon/rv770_dma.c @@ -90,7 +90,7 @@ int rv770_copy_dma(struct radeon_device *rdev,  		return r;  	} -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	radeon_semaphore_free(rdev, &sem, *fence);  	return r; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 011779bd2b3d..6bce40847753 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3057,7 +3057,7 @@ static void si_gpu_init(struct radeon_device *rdev)  	u32 sx_debug_1;  	u32 hdp_host_path_cntl;  	u32 tmp; -	int i, j, k; +	int i, j;  	switch (rdev->family) {  	case CHIP_TAHITI: @@ -3255,12 +3255,11 @@ static void si_gpu_init(struct radeon_device *rdev)  		     rdev->config.si.max_sh_per_se,  		     rdev->config.si.max_cu_per_sh); +	rdev->config.si.active_cus = 0;  	for (i = 0; i < rdev->config.si.max_shader_engines; i++) {  		for (j = 0; j < rdev->config.si.max_sh_per_se; j++) { -			for (k = 0; k < rdev->config.si.max_cu_per_sh; k++) { -				rdev->config.si.active_cus += -					hweight32(si_get_cu_active_bitmap(rdev, i, j)); -			} +			rdev->config.si.active_cus += +				hweight32(si_get_cu_active_bitmap(rdev, i, j));  		}  	} @@ -3541,7 +3540,7 @@ static int si_cp_start(struct radeon_device *rdev)  	radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));  	radeon_ring_write(ring, 0xc000);  	radeon_ring_write(ring, 0xe000); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	si_cp_enable(rdev, true); @@ -3570,7 +3569,7 @@ static int si_cp_start(struct radeon_device *rdev)  	radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */  	radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */ -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {  		ring = &rdev->ring[i]; @@ -3580,7 +3579,7 @@ static int si_cp_start(struct radeon_device *rdev)  		radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));  		radeon_ring_write(ring, 0); -		radeon_ring_unlock_commit(rdev, ring); +		radeon_ring_unlock_commit(rdev, ring, false);  	}  	return 0; @@ -4291,10 +4290,10 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)  	for (i = 1; i < 16; i++) {  		if (i < 8)  			WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), -			       rdev->gart.table_addr >> 12); +			       rdev->vm_manager.saved_table_addr[i]);  		else  			WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2), -			       rdev->gart.table_addr >> 12); +			       rdev->vm_manager.saved_table_addr[i]);  	}  	/* enable context1-15 */ @@ -4326,6 +4325,17 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)  static void si_pcie_gart_disable(struct radeon_device *rdev)  { +	unsigned i; + +	for (i = 1; i < 16; ++i) { +		uint32_t reg; +		if (i < 8) +			reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2); +		else +			reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2); +		rdev->vm_manager.saved_table_addr[i] = RREG32(reg); +	} +  	/* Disable all tables */  	WREG32(VM_CONTEXT0_CNTL, 0);  	WREG32(VM_CONTEXT1_CNTL, 0); @@ -5028,7 +5038,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)  	/* flush hdp cache */  	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); -	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | +	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |  				 WRITE_DATA_DST_SEL(0)));  	radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);  	radeon_ring_write(ring, 0); @@ -5036,7 +5046,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)  	/* bits 0-15 are the VM contexts0-15 */  	radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); -	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | +	radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |  				 WRITE_DATA_DST_SEL(0)));  	radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);  	radeon_ring_write(ring, 0); @@ -7178,6 +7188,9 @@ static void si_pcie_gen3_enable(struct radeon_device *rdev)  	int ret, i;  	u16 tmp16; +	if (pci_is_root_bus(rdev->pdev->bus)) +		return; +  	if (radeon_pcie_gen2 == 0)  		return; @@ -7455,7 +7468,8 @@ static void si_program_aspm(struct radeon_device *rdev)  			if (orig != data)  				WREG32_PIF_PHY1(PB1_PIF_CNTL, data); -			if (!disable_clkreq) { +			if (!disable_clkreq && +			    !pci_is_root_bus(rdev->pdev->bus)) {  				struct pci_dev *root = rdev->pdev->bus->self;  				u32 lnkcap; diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index 716505129450..7c22baaf94db 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -275,7 +275,7 @@ int si_copy_dma(struct radeon_device *rdev,  		return r;  	} -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	radeon_semaphore_free(rdev, &sem, *fence);  	return r; diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c index 32e50be9c4ac..57f780053b3e 100644 --- a/drivers/gpu/drm/radeon/trinity_dpm.c +++ b/drivers/gpu/drm/radeon/trinity_dpm.c @@ -1874,16 +1874,22 @@ int trinity_dpm_init(struct radeon_device *rdev)  	for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)  		pi->at[i] = TRINITY_AT_DFLT; -	/* There are stability issues reported on with -	 * bapm enabled when switching between AC and battery -	 * power.  At the same time, some MSI boards hang -	 * if it's not enabled and dpm is enabled.  Just enable -	 * it for MSI boards right now. -	 */ -	if (rdev->pdev->subsystem_vendor == 0x1462) -		pi->enable_bapm = true; -	else +	if (radeon_bapm == -1) { +		/* There are stability issues reported on with +		 * bapm enabled when switching between AC and battery +		 * power.  At the same time, some MSI boards hang +		 * if it's not enabled and dpm is enabled.  Just enable +		 * it for MSI boards right now. +		 */ +		if (rdev->pdev->subsystem_vendor == 0x1462) +			pi->enable_bapm = true; +		else +			pi->enable_bapm = false; +	} else if (radeon_bapm == 0) {  		pi->enable_bapm = false; +	} else { +		pi->enable_bapm = true; +	}  	pi->enable_nbps_policy = true;  	pi->enable_sclk_ds = true;  	pi->enable_gfx_power_gating = true; diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c index be42c8125203..cda391347286 100644 --- a/drivers/gpu/drm/radeon/uvd_v1_0.c +++ b/drivers/gpu/drm/radeon/uvd_v1_0.c @@ -124,7 +124,7 @@ int uvd_v1_0_init(struct radeon_device *rdev)  	radeon_ring_write(ring, PACKET0(UVD_SEMA_CNTL, 0));  	radeon_ring_write(ring, 3); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  done:  	/* lower clocks again */ @@ -331,7 +331,7 @@ int uvd_v1_0_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)  	}  	radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));  	radeon_ring_write(ring, 0xDEADBEEF); -	radeon_ring_unlock_commit(rdev, ring); +	radeon_ring_unlock_commit(rdev, ring, false);  	for (i = 0; i < rdev->usec_timeout; i++) {  		tmp = RREG32(UVD_CONTEXT_ID);  		if (tmp == 0xDEADBEEF) diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index 2d9d4252d598..ae8850f3e63b 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig @@ -1,6 +1,7 @@  config DRM_STI  	tristate "DRM Support for STMicroelectronics SoC stiH41x Series"  	depends on DRM && (SOC_STIH415 || SOC_STIH416 || ARCH_MULTIPLATFORM) +	select RESET_CONTROLLER  	select DRM_KMS_HELPER  	select DRM_GEM_CMA_HELPER  	select DRM_KMS_CMA_HELPER diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c index a7cc24917a96..223d93c3a05d 100644 --- a/drivers/gpu/drm/sti/sti_drm_drv.c +++ b/drivers/gpu/drm/sti/sti_drm_drv.c @@ -201,8 +201,8 @@ static int sti_drm_platform_probe(struct platform_device *pdev)  	master = platform_device_register_resndata(dev,  			DRIVER_NAME "__master", -1,  			NULL, 0, NULL, 0); -	if (!master) -		return -EINVAL; +	if (IS_ERR(master)) +               return PTR_ERR(master);  	platform_set_drvdata(pdev, master);  	return 0; diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 72d957f81c05..2ae9a9b73666 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -730,16 +730,16 @@ static int sti_hda_probe(struct platform_device *pdev)  		return -ENOMEM;  	}  	hda->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); -	if (IS_ERR(hda->regs)) -		return PTR_ERR(hda->regs); +	if (!hda->regs) +		return -ENOMEM;  	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,  			"video-dacs-ctrl");  	if (res) {  		hda->video_dacs_ctrl = devm_ioremap_nocache(dev, res->start,  				resource_size(res)); -		if (IS_ERR(hda->video_dacs_ctrl)) -			return PTR_ERR(hda->video_dacs_ctrl); +		if (!hda->video_dacs_ctrl) +			return -ENOMEM;  	} else {  		/* If no existing video-dacs-ctrl resource continue the probe */  		DRM_DEBUG_DRIVER("No video-dacs-ctrl resource\n"); @@ -770,7 +770,7 @@ static int sti_hda_remove(struct platform_device *pdev)  	return 0;  } -static struct of_device_id hda_of_match[] = { +static const struct of_device_id hda_of_match[] = {  	{ .compatible = "st,stih416-hda", },  	{ .compatible = "st,stih407-hda", },  	{ /* end node */ } diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 284e541d970d..ef93156a69c6 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -677,7 +677,7 @@ static const struct component_ops sti_hdmi_ops = {  	.unbind = sti_hdmi_unbind,  }; -static struct of_device_id hdmi_of_match[] = { +static const struct of_device_id hdmi_of_match[] = {  	{  		.compatible = "st,stih416-hdmi",  		.data = &tx3g0c55phy_ops, @@ -713,8 +713,8 @@ static int sti_hdmi_probe(struct platform_device *pdev)  		return -ENOMEM;  	}  	hdmi->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); -	if (IS_ERR(hdmi->regs)) -		return PTR_ERR(hdmi->regs); +	if (!hdmi->regs) +		return -ENOMEM;  	if (of_device_is_compatible(np, "st,stih416-hdmi")) {  		res = platform_get_resource_byname(pdev, IORESOURCE_MEM, @@ -725,8 +725,8 @@ static int sti_hdmi_probe(struct platform_device *pdev)  		}  		hdmi->syscfg = devm_ioremap_nocache(dev, res->start,  						    resource_size(res)); -		if (IS_ERR(hdmi->syscfg)) -			return PTR_ERR(hdmi->syscfg); +		if (!hdmi->syscfg) +			return -ENOMEM;  	} diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index b69e26fee76e..b8afe490356a 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -591,8 +591,8 @@ static int sti_tvout_probe(struct platform_device *pdev)  		return -ENOMEM;  	}  	tvout->regs = devm_ioremap_nocache(dev, res->start, resource_size(res)); -	if (IS_ERR(tvout->regs)) -		return PTR_ERR(tvout->regs); +	if (!tvout->regs) +		return -ENOMEM;  	/* get reset resources */  	tvout->reset = devm_reset_control_get(dev, "tvout"); @@ -624,7 +624,7 @@ static int sti_tvout_remove(struct platform_device *pdev)  	return 0;  } -static struct of_device_id tvout_of_match[] = { +static const struct of_device_id tvout_of_match[] = {  	{ .compatible = "st,stih416-tvout", },  	{ .compatible = "st,stih407-tvout", },  	{ /* end node */ }  | 

