summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c32
-rw-r--r--drivers/gpu/drm/drm_edid_load.c7
-rw-r--r--drivers/gpu/drm/i915/Makefile3
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c240
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c50
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h42
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c174
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c87
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h20
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c1
-rw-r--r--drivers/gpu/drm/i915/i915_params.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h89
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c13
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c20
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c20
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c9
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c414
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c101
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1774
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c151
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c13
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h46
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c46
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c7
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c87
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c19
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c67
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c251
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c262
-rw-r--r--include/drm/drm_crtc.h9
35 files changed, 2664 insertions, 1454 deletions
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 5d30592c83cd..5e68c3c7d5cf 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -280,6 +280,8 @@ mode_fixup(struct drm_atomic_state *state)
*/
encoder = conn_state->best_encoder;
funcs = encoder->helper_private;
+ if (!funcs)
+ continue;
if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
ret = encoder->bridge->funcs->mode_fixup(
@@ -317,6 +319,9 @@ mode_fixup(struct drm_atomic_state *state)
continue;
funcs = crtc->helper_private;
+ if (!funcs->mode_fixup)
+ continue;
+
ret = funcs->mode_fixup(crtc, &crtc_state->mode,
&crtc_state->adjusted_mode);
if (!ret) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 53bc7a628909..e426223482fb 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1041,13 +1041,15 @@ static bool drm_edid_is_zero(const u8 *in_edid, int length)
* @raw_edid: pointer to raw EDID block
* @block: type of block to validate (0 for base, extension otherwise)
* @print_bad_edid: if true, dump bad EDID blocks to the console
+ * @edid_corrupt: if true, the header or checksum is invalid
*
* Validate a base or extension EDID block and optionally dump bad blocks to
* the console.
*
* Return: True if the block is valid, false otherwise.
*/
-bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
+bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
+ bool *edid_corrupt)
{
u8 csum;
struct edid *edid = (struct edid *)raw_edid;
@@ -1060,11 +1062,22 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
if (block == 0) {
int score = drm_edid_header_is_valid(raw_edid);
- if (score == 8) ;
- else if (score >= edid_fixup) {
+ if (score == 8) {
+ if (edid_corrupt)
+ *edid_corrupt = false;
+ } else if (score >= edid_fixup) {
+ /* Displayport Link CTS Core 1.2 rev1.1 test 4.2.2.6
+ * The corrupt flag needs to be set here otherwise, the
+ * fix-up code here will correct the problem, the
+ * checksum is correct and the test fails
+ */
+ if (edid_corrupt)
+ *edid_corrupt = true;
DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
memcpy(raw_edid, edid_header, sizeof(edid_header));
} else {
+ if (edid_corrupt)
+ *edid_corrupt = true;
goto bad;
}
}
@@ -1075,6 +1088,9 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
}
+ if (edid_corrupt)
+ *edid_corrupt = true;
+
/* allow CEA to slide through, switches mangle this */
if (raw_edid[0] != 0x02)
goto bad;
@@ -1129,7 +1145,7 @@ bool drm_edid_is_valid(struct edid *edid)
return false;
for (i = 0; i <= edid->extensions; i++)
- if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true))
+ if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true, NULL))
return false;
return true;
@@ -1232,7 +1248,8 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
for (i = 0; i < 4; i++) {
if (get_edid_block(data, block, 0, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block, 0, print_bad_edid))
+ if (drm_edid_block_valid(block, 0, print_bad_edid,
+ &connector->edid_corrupt))
break;
if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
connector->null_edid_counter++;
@@ -1257,7 +1274,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
block + (valid_extensions + 1) * EDID_LENGTH,
j, EDID_LENGTH))
goto out;
- if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH, j, print_bad_edid)) {
+ if (drm_edid_block_valid(block + (valid_extensions + 1)
+ * EDID_LENGTH, j,
+ print_bad_edid,
+ NULL)) {
valid_extensions++;
break;
}
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 4c0aa97aaf03..c5605fe4907e 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -216,7 +216,8 @@ static void *edid_load(struct drm_connector *connector, const char *name,
goto out;
}
- if (!drm_edid_block_valid(edid, 0, print_bad_edid)) {
+ if (!drm_edid_block_valid(edid, 0, print_bad_edid,
+ &connector->edid_corrupt)) {
connector->bad_edid_counter++;
DRM_ERROR("Base block of EDID firmware \"%s\" is invalid ",
name);
@@ -229,7 +230,9 @@ static void *edid_load(struct drm_connector *connector, const char *name,
if (i != valid_extensions + 1)
memcpy(edid + (valid_extensions + 1) * EDID_LENGTH,
edid + i * EDID_LENGTH, EDID_LENGTH);
- if (drm_edid_block_valid(edid + i * EDID_LENGTH, i, print_bad_edid))
+ if (drm_edid_block_valid(edid + i * EDID_LENGTH, i,
+ print_bad_edid,
+ NULL))
valid_extensions++;
}
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index a69002e2257d..5238deb64505 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -12,7 +12,8 @@ i915-y := i915_drv.o \
i915_suspend.o \
i915_sysfs.o \
intel_pm.o \
- intel_runtime_pm.o
+ intel_runtime_pm.o \
+ intel_csr.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9c2b9e450799..adbbddab42c6 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1211,12 +1211,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "Up threshold: %d%%\n",
+ dev_priv->rps.up_threshold);
+
seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
GEN6_CURIAVG_MASK);
seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
GEN6_CURBSYTAVG_MASK);
seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
GEN6_CURBSYTAVG_MASK);
+ seq_printf(m, "Down threshold: %d%%\n",
+ dev_priv->rps.down_threshold);
max_freq = (rp_state_cap & 0xff0000) >> 16;
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
@@ -1232,12 +1237,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1);
seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(dev_priv, max_freq));
-
seq_printf(m, "Max overclocked frequency: %dMHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ seq_printf(m, "Current freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+ seq_printf(m, "Actual freq: %d MHz\n", cagf);
seq_printf(m, "Idle freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq));
+ seq_printf(m, "Min freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.min_freq));
+ seq_printf(m, "Max freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
+ seq_printf(m,
+ "efficient (RPe) frequency: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
} else if (IS_VALLEYVIEW(dev)) {
u32 freq_sts;
@@ -1246,6 +1260,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
+ seq_printf(m, "actual GPU freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
+
+ seq_printf(m, "current GPU freq: %d MHz\n",
+ intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq));
+
seq_printf(m, "max GPU freq: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.max_freq));
@@ -1258,9 +1278,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
seq_printf(m,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
-
- seq_printf(m, "current GPU freq: %d MHz\n",
- intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff));
mutex_unlock(&dev_priv->rps.hw_lock);
} else {
seq_puts(m, "no P-state info available\n");
@@ -3594,8 +3611,7 @@ static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
intel_display_power_get(dev_priv,
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
- dev_priv->display.crtc_disable(&crtc->base);
- dev_priv->display.crtc_enable(&crtc->base);
+ intel_crtc_reset(crtc);
}
drm_modeset_unlock_all(dev);
}
@@ -3616,8 +3632,7 @@ static void hsw_undo_trans_edp_pipe_A_crc_wa(struct drm_device *dev)
if (crtc->config->pch_pfit.force_thru) {
crtc->config->pch_pfit.force_thru = false;
- dev_priv->display.crtc_disable(&crtc->base);
- dev_priv->display.crtc_enable(&crtc->base);
+ intel_crtc_reset(crtc);
intel_display_power_put(dev_priv,
POWER_DOMAIN_PIPE_PANEL_FITTER(PIPE_A));
@@ -3934,6 +3949,212 @@ static const struct file_operations i915_display_crc_ctl_fops = {
.write = display_crc_ctl_write
};
+static ssize_t i915_displayport_test_active_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ char *input_buffer;
+ int status = 0;
+ struct seq_file *m;
+ struct drm_device *dev;
+ struct drm_connector *connector;
+ struct list_head *connector_list;
+ struct intel_dp *intel_dp;
+ int val = 0;
+
+ m = file->private_data;
+ if (!m) {
+ status = -ENODEV;
+ return status;
+ }
+ dev = m->private;
+
+ if (!dev) {
+ status = -ENODEV;
+ return status;
+ }
+ connector_list = &dev->mode_config.connector_list;
+
+ if (len == 0)
+ return 0;
+
+ input_buffer = kmalloc(len + 1, GFP_KERNEL);
+ if (!input_buffer)
+ return -ENOMEM;
+
+ if (copy_from_user(input_buffer, ubuf, len)) {
+ status = -EFAULT;
+ goto out;
+ }
+
+ input_buffer[len] = '\0';
+ DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
+
+ list_for_each_entry(connector, connector_list, head) {
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ if (connector->connector_type ==
+ DRM_MODE_CONNECTOR_DisplayPort &&
+ connector->status == connector_status_connected &&
+ connector->encoder != NULL) {
+ intel_dp = enc_to_intel_dp(connector->encoder);
+ status = kstrtoint(input_buffer, 10, &val);
+ if (status < 0)
+ goto out;
+ DRM_DEBUG_DRIVER("Got %d for test active\n", val);
+ /* To prevent erroneous activation of the compliance
+ * testing code, only accept an actual value of 1 here
+ */
+ if (val == 1)
+ intel_dp->compliance_test_active = 1;
+ else
+ intel_dp->compliance_test_active = 0;
+ }
+ }
+out:
+ kfree(input_buffer);
+ if (status < 0)
+ return status;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_displayport_test_active_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+ struct drm_connector *connector;
+ struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct intel_dp *intel_dp;
+
+ if (!dev)
+ return -ENODEV;
+
+ list_for_each_entry(connector, connector_list, head) {
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ if (connector->status == connector_status_connected &&
+ connector->encoder != NULL) {
+ intel_dp = enc_to_intel_dp(connector->encoder);
+ if (intel_dp->compliance_test_active)
+ seq_puts(m, "1");
+ else
+ seq_puts(m, "0");
+ } else
+ seq_puts(m, "0");
+ }
+
+ return 0;
+}
+
+static int i915_displayport_test_active_open(struct inode *inode,
+ struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ return single_open(file, i915_displayport_test_active_show, dev);
+}
+
+static const struct file_operations i915_displayport_test_active_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_displayport_test_active_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_displayport_test_active_write
+};
+
+static int i915_displayport_test_data_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+ struct drm_connector *connector;
+ struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct intel_dp *intel_dp;
+
+ if (!dev)
+ return -ENODEV;
+
+ list_for_each_entry(connector, connector_list, head) {
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ if (connector->status == connector_status_connected &&
+ connector->encoder != NULL) {
+ intel_dp = enc_to_intel_dp(connector->encoder);
+ seq_printf(m, "%lx", intel_dp->compliance_test_data);
+ } else
+ seq_puts(m, "0");
+ }
+
+ return 0;
+}
+static int i915_displayport_test_data_open(struct inode *inode,
+ struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ return single_open(file, i915_displayport_test_data_show, dev);
+}
+
+static const struct file_operations i915_displayport_test_data_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_displayport_test_data_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+static int i915_displayport_test_type_show(struct seq_file *m, void *data)
+{
+ struct drm_device *dev = m->private;
+ struct drm_connector *connector;
+ struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct intel_dp *intel_dp;
+
+ if (!dev)
+ return -ENODEV;
+
+ list_for_each_entry(connector, connector_list, head) {
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ if (connector->status == connector_status_connected &&
+ connector->encoder != NULL) {
+ intel_dp = enc_to_intel_dp(connector->encoder);
+ seq_printf(m, "%02lx", intel_dp->compliance_test_type);
+ } else
+ seq_puts(m, "0");
+ }
+
+ return 0;
+}
+
+static int i915_displayport_test_type_open(struct inode *inode,
+ struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
+
+ return single_open(file, i915_displayport_test_type_show, dev);
+}
+
+static const struct file_operations i915_displayport_test_type_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_displayport_test_type_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
{
struct drm_device *dev = m->private;
@@ -4829,6 +5050,9 @@ static const struct i915_debugfs_files {
{"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
{"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
{"i915_fbc_false_color", &i915_fbc_fc_fops},
+ {"i915_dp_test_data", &i915_displayport_test_data_fops},
+ {"i915_dp_test_type", &i915_displayport_test_type_fops},
+ {"i915_dp_test_active", &i915_displayport_test_active_fops}
};
void intel_display_crc_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index e44116f0ad0a..a238889630d8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -816,6 +816,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->mmio_flip_lock);
mutex_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->modeset_restore_lock);
+ mutex_init(&dev_priv->csr_lock);
intel_pm_setup(dev);
@@ -861,9 +862,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_uncore_init(dev);
+ /* Load CSR Firmware for SKL */
+ intel_csr_ucode_init(dev);
+
ret = i915_gem_gtt_init(dev);
if (ret)
- goto out_regs;
+ goto out_freecsr;
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over. */
@@ -1033,7 +1037,8 @@ out_mtrrfree:
io_mapping_free(dev_priv->gtt.mappable);
out_gtt:
i915_global_gtt_cleanup(dev);
-out_regs:
+out_freecsr:
+ intel_csr_ucode_fini(dev);
intel_uncore_fini(dev);
pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
@@ -1113,6 +1118,8 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_stolen(dev);
+ intel_csr_ucode_fini(dev);
+
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e70adfd3b2d1..6bb6c47db49f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -556,6 +556,26 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
cancel_delayed_work_sync(&dev_priv->hotplug_reenable_work);
}
+void i915_firmware_load_error_print(const char *fw_path, int err)
+{
+ DRM_ERROR("failed to load firmware %s (%d)\n", fw_path, err);
+
+ /*
+ * If the reason is not known assume -ENOENT since that's the most
+ * usual failure mode.
+ */
+ if (!err)
+ err = -ENOENT;
+
+ if (!(IS_BUILTIN(CONFIG_DRM_I915) && err == -ENOENT))
+ return;
+
+ DRM_ERROR(
+ "The driver is built-in, so to load the firmware you need to\n"
+ "include it either in the kernel (see CONFIG_EXTRA_FIRMWARE) or\n"
+ "in your initrd/initramfs image.\n");
+}
+
static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
@@ -574,6 +594,8 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
bool rpm_resume);
+static int skl_resume_prepare(struct drm_i915_private *dev_priv);
+
static int i915_drm_suspend(struct drm_device *dev)
{
@@ -788,6 +810,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
+ else if (IS_SKYLAKE(dev_priv))
+ ret = skl_resume_prepare(dev_priv);
intel_uncore_sanitize(dev);
intel_power_domains_init_hw(dev_priv);
@@ -1002,6 +1026,19 @@ static int i915_pm_resume(struct device *dev)
return i915_drm_resume(drm_dev);
}
+static int skl_suspend_complete(struct drm_i915_private *dev_priv)
+{
+ /* Enabling DC6 is not a hard requirement to enter runtime D3 */
+
+ /*
+ * This is to ensure that CSR isn't identified as loaded before
+ * CSR-loading program is called during runtime-resume.
+ */
+ intel_csr_load_status_set(dev_priv, FW_UNINITIALIZED);
+
+ return 0;
+}
+
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
{
hsw_enable_pc8(dev_priv);
@@ -1041,6 +1078,15 @@ static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
return 0;
}
+static int skl_resume_prepare(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ intel_csr_load_program(dev);
+
+ return 0;
+}
+
/*
* Save all Gunit registers that may be lost after a D3 and a subsequent
* S0i[R123] transition. The list of registers needing a save/restore is
@@ -1502,6 +1548,8 @@ static int intel_runtime_resume(struct device *device)
if (IS_BROXTON(dev))
ret = bxt_resume_prepare(dev_priv);
+ else if (IS_SKYLAKE(dev))
+ ret = skl_resume_prepare(dev_priv);
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
hsw_disable_pc8(dev_priv);
else if (IS_VALLEYVIEW(dev_priv))
@@ -1536,6 +1584,8 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
if (IS_BROXTON(dev))
ret = bxt_suspend_complete(dev_priv);
+ else if (IS_SKYLAKE(dev))
+ ret = skl_suspend_complete(dev_priv);
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
ret = hsw_suspend_complete(dev_priv);
else if (IS_VALLEYVIEW(dev))
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e8e8145df869..acfa4fc93803 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -56,7 +56,7 @@
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20150423"
+#define DRIVER_DATE "20150508"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
@@ -238,6 +238,11 @@ enum hpd_pin {
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+#define for_each_intel_plane(dev, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &dev->mode_config.plane_list, \
+ base.head)
+
#define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
@@ -295,7 +300,7 @@ struct intel_dpll_hw_state {
/* skl */
/*
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
- * lower part of crtl1 and they get shifted into position when writing
+ * lower part of ctrl1 and they get shifted into position when writing
* the register. This allows us to easily compare the state to share
* the DPLL.
*/
@@ -669,6 +674,22 @@ struct intel_uncore {
#define for_each_fw_domain(domain__, dev_priv__, i__) \
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
+enum csr_state {
+ FW_UNINITIALIZED = 0,
+ FW_LOADED,
+ FW_FAILED
+};
+
+struct intel_csr {
+ const char *fw_path;
+ __be32 *dmc_payload;
+ uint32_t dmc_fw_size;
+ uint32_t mmio_count;
+ uint32_t mmioaddr[8];
+ uint32_t mmiodata[8];
+ enum csr_state state;
+};
+
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
func(is_i85x) sep \
@@ -1348,7 +1369,6 @@ struct intel_vbt_data {
bool edp_initialized;
bool edp_support;
int edp_bpp;
- bool edp_low_vswing;
struct edp_power_seq edp_pps;
struct {
@@ -1574,6 +1594,11 @@ struct drm_i915_private {
struct i915_virtual_gpu vgpu;
+ struct intel_csr csr;
+
+ /* Display CSR-related protection */
+ struct mutex csr_lock;
+
struct intel_gmbus gmbus[GMBUS_NUM_PINS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
@@ -1757,6 +1782,8 @@ struct drm_i915_private {
u32 fdi_rx_config;
+ u32 chv_phy_control;
+
u32 suspend_count;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state;
@@ -1825,6 +1852,8 @@ struct drm_i915_private {
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
+ bool edp_low_vswing;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
@@ -2422,10 +2451,13 @@ struct drm_i915_cmd_table {
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
IS_SKYLAKE(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
- IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
+ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
+ IS_SKYLAKE(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+#define HAS_CSR(dev) (IS_SKYLAKE(dev))
+
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -2491,6 +2523,7 @@ struct i915_params {
int mmio_debug;
bool verbose_state_checks;
bool nuclear_pageflip;
+ int edp_vswing;
};
extern struct i915_params i915 __read_mostly;
@@ -2516,6 +2549,7 @@ extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+void i915_firmware_load_error_print(const char *fw_path, int err);
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e8f6f4c0a2c6..f128ed8d6f65 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1635,6 +1635,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_ggtt_view view = i915_ggtt_view_normal;
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
@@ -1667,8 +1668,23 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unlock;
}
- /* Now bind it into the GTT if needed */
- ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+ /* Use a partial view if the object is bigger than the aperture. */
+ if (obj->base.size >= dev_priv->gtt.mappable_end &&
+ obj->tiling_mode == I915_TILING_NONE) {
+ static const unsigned int chunk_size = 256; // 1 MiB
+
+ memset(&view, 0, sizeof(view));
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ view.params.partial.offset = rounddown(page_offset, chunk_size);
+ view.params.partial.size =
+ min_t(unsigned int,
+ chunk_size,
+ (vma->vm_end - vma->vm_start)/PAGE_SIZE -
+ view.params.partial.offset);
+ }
+
+ /* Now pin it into the GTT if needed */
+ ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
if (ret)
goto unlock;
@@ -1681,30 +1697,50 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unpin;
/* Finally, remap it using the new GTT offset */
- pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
+ pfn = dev_priv->gtt.mappable_base +
+ i915_gem_obj_ggtt_offset_view(obj, &view);
pfn >>= PAGE_SHIFT;
- if (!obj->fault_mappable) {
- unsigned long size = min_t(unsigned long,
- vma->vm_end - vma->vm_start,
- obj->base.size);
- int i;
+ if (unlikely(view.type == I915_GGTT_VIEW_PARTIAL)) {
+ /* Overriding existing pages in partial view does not cause
+ * us any trouble as TLBs are still valid because the fault
+ * is due to userspace losing part of the mapping or never
+ * having accessed it before (at this partials' range).
+ */
+ unsigned long base = vma->vm_start +
+ (view.params.partial.offset << PAGE_SHIFT);
+ unsigned int i;
- for (i = 0; i < size >> PAGE_SHIFT; i++) {
- ret = vm_insert_pfn(vma,
- (unsigned long)vma->vm_start + i * PAGE_SIZE,
- pfn + i);
+ for (i = 0; i < view.params.partial.size; i++) {
+ ret = vm_insert_pfn(vma, base + i * PAGE_SIZE, pfn + i);
if (ret)
break;
}
obj->fault_mappable = true;
- } else
- ret = vm_insert_pfn(vma,
- (unsigned long)vmf->virtual_address,
- pfn + page_offset);
+ } else {
+ if (!obj->fault_mappable) {
+ unsigned long size = min_t(unsigned long,
+ vma->vm_end - vma->vm_start,
+ obj->base.size);
+ int i;
+
+ for (i = 0; i < size >> PAGE_SHIFT; i++) {
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vma->vm_start + i * PAGE_SIZE,
+ pfn + i);
+ if (ret)
+ break;
+ }
+
+ obj->fault_mappable = true;
+ } else
+ ret = vm_insert_pfn(vma,
+ (unsigned long)vmf->virtual_address,
+ pfn + page_offset);
+ }
unpin:
- i915_gem_object_ggtt_unpin(obj);
+ i915_gem_object_ggtt_unpin_view(obj, &view);
unlock:
mutex_unlock(&dev->struct_mutex);
out:
@@ -1897,11 +1933,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
- if (obj->base.size > dev_priv->gtt.mappable_end) {
- ret = -E2BIG;
- goto out;
- }
-
if (obj->madv != I915_MADV_WILLNEED) {
DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
ret = -EFAULT;
@@ -3069,6 +3100,7 @@ int i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma);
vma->vm->unbind_vma(vma);
+ vma->bound = 0;
list_del_init(&vma->mm_list);
if (i915_is_ggtt(vma->vm)) {
@@ -3497,7 +3529,8 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
}
/**
- * Finds free space in the GTT aperture and binds the object there.
+ * Finds free space in the GTT aperture and binds the object or a view of it
+ * there.
*/
static struct i915_vma *
i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
@@ -3516,36 +3549,60 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
- if(WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
- return ERR_PTR(-EINVAL);
+ if (i915_is_ggtt(vm)) {
+ u32 view_size;
+
+ if (WARN_ON(!ggtt_view))
+ return ERR_PTR(-EINVAL);
- fence_size = i915_gem_get_gtt_size(dev,
- obj->base.size,
- obj->tiling_mode);
- fence_alignment = i915_gem_get_gtt_alignment(dev,
- obj->base.size,
- obj->tiling_mode, true);
- unfenced_alignment =
- i915_gem_get_gtt_alignment(dev,
- obj->base.size,
- obj->tiling_mode, false);
+ view_size = i915_ggtt_view_size(obj, ggtt_view);
+
+ fence_size = i915_gem_get_gtt_size(dev,
+ view_size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ view_size,
+ obj->tiling_mode,
+ true);
+ unfenced_alignment = i915_gem_get_gtt_alignment(dev,
+ view_size,
+ obj->tiling_mode,
+ false);
+ size = flags & PIN_MAPPABLE ? fence_size : view_size;
+ } else {
+ fence_size = i915_gem_get_gtt_size(dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode,
+ true);
+ unfenced_alignment =
+ i915_gem_get_gtt_alignment(dev,
+ obj->base.size,
+ obj->tiling_mode,
+ false);
+ size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
+ }
if (alignment == 0)
alignment = flags & PIN_MAPPABLE ? fence_alignment :
unfenced_alignment;
if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
- DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
+ DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
+ ggtt_view ? ggtt_view->type : 0,
+ alignment);
return ERR_PTR(-EINVAL);
}
- size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
-
- /* If the object is bigger than the entire aperture, reject it early
- * before evicting everything in a vain attempt to find space.
+ /* If binding the object/GGTT view requires more space than the entire
+ * aperture has, reject it early before evicting everything in a vain
+ * attempt to find space.
*/
- if (obj->base.size > end) {
- DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
- obj->base.size,
+ if (size > end) {
+ DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%u > %s aperture=%lu\n",
+ ggtt_view ? ggtt_view->type : 0,
+ size,
flags & PIN_MAPPABLE ? "mappable" : "total",
end);
return ERR_PTR(-E2BIG);
@@ -3841,17 +3898,10 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_caching *args = data;
struct drm_i915_gem_object *obj;
- int ret;
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- return ret;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
- if (&obj->base == NULL) {
- ret = -ENOENT;
- goto unlock;
- }
+ if (&obj->base == NULL)
+ return -ENOENT;
switch (obj->cache_level) {
case I915_CACHE_LLC:
@@ -3868,10 +3918,8 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
break;
}
- drm_gem_object_unreference(&obj->base);
-unlock:
- mutex_unlock(&dev->struct_mutex);
- return ret;
+ drm_gem_object_unreference_unlocked(&obj->base);
+ return 0;
}
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
@@ -4207,7 +4255,8 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
return ret;
}
- if ((bound ^ vma->bound) & GLOBAL_BIND) {
+ if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
+ (bound ^ vma->bound) & GLOBAL_BIND) {
bool mappable, fenceable;
u32 fence_size, fence_alignment;
@@ -4226,9 +4275,9 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
dev_priv->gtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
- }
- WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+ WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
+ }
vma->pin_count++;
return 0;
@@ -5226,13 +5275,10 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
{
struct i915_vma *vma;
- list_for_each_entry(vma, &obj->vma_list, vma_link) {
- if (i915_is_ggtt(vma->vm) &&
- vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
- continue;
+ list_for_each_entry(vma, &obj->vma_list, vma_link)
if (vma->pin_count > 0)
return true;
- }
+
return false;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d2e21c549756..560c79a8a43d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1540,29 +1540,39 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
if (i915_needs_cmd_parser(ring) && args->batch_len) {
- batch_obj = i915_gem_execbuffer_parse(ring,
+ struct drm_i915_gem_object *parsed_batch_obj;
+
+ parsed_batch_obj = i915_gem_execbuffer_parse(ring,
&shadow_exec_entry,
eb,
batch_obj,
args->batch_start_offset,
args->batch_len,
file->is_master);
- if (IS_ERR(batch_obj)) {
- ret = PTR_ERR(batch_obj);
+ if (IS_ERR(parsed_batch_obj)) {
+ ret = PTR_ERR(parsed_batch_obj);
goto err;
}
/*
- * Set the DISPATCH_SECURE bit to remove the NON_SECURE
- * bit from MI_BATCH_BUFFER_START commands issued in the
- * dispatch_execbuffer implementations. We specifically
- * don't want that set when the command parser is
- * enabled.
+ * parsed_batch_obj == batch_obj means batch not fully parsed:
+ * Accept, but don't promote to secure.
*/
- if (USES_PPGTT(dev))
- dispatch_flags |= I915_DISPATCH_SECURE;
- exec_start = 0;
+ if (parsed_batch_obj != batch_obj) {
+ /*
+ * Batch parsed and accepted:
+ *
+ * Set the DISPATCH_SECURE bit to remove the NON_SECURE
+ * bit from MI_BATCH_BUFFER_START commands issued in
+ * the dispatch_execbuffer implementations. We
+ * specifically don't want that set on batches the
+ * command parser has accepted.
+ */
+ dispatch_flags |= I915_DISPATCH_SECURE;
+ exec_start = 0;
+ batch_obj = parsed_batch_obj;
+ }
}
batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 9d3852c521c7..e3bcc3ba7e40 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -756,8 +756,8 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_hw_ppgtt *ppgtt,
WARN_ON(!bitmap_empty(new_pds, GEN8_LEGACY_PDPES));
- /* FIXME: PPGTT container_of won't work for 64b */
- WARN_ON((start + length) > 0x800000000ULL);
+ /* FIXME: upper bound must not overflow 32 bits */
+ WARN_ON((start + length) >= (1ULL << 32));
gen8_for_each_pdpe(pd, pdp, start, length, temp, pdpe) {
if (pd)
@@ -844,15 +844,6 @@ static int gen8_alloc_va_range(struct i915_address_space *vm,
uint32_t pdpe;
int ret;
-#ifndef CONFIG_64BIT
- /* Disallow 64b address on 32b platforms. Nothing is wrong with doing
- * this in hardware, but a lot of the drm code is not prepared to handle
- * 64b offset on 32b platforms.
- * This will be addressed when 48b PPGTT is added */
- if (start + length > 0x100000000ULL)
- return -E2BIG;
-#endif
-
/* Wrap is never okay since we can only represent 48b, and we don't
* actually use the other side of the canonical address space.
*/
@@ -1945,19 +1936,23 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
struct drm_device *dev = vma->vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = vma->obj;
+ const uint64_t size = min_t(uint64_t,
+ obj->base.size,
+ vma->node.size);
if (vma->bound & GLOBAL_BIND) {
vma->vm->clear_range(vma->vm,
vma->node.start,
- obj->base.size,
+ size,
true);
}
if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
+
appgtt->base.clear_range(&appgtt->base,
vma->node.start,
- obj->base.size,
+ size,
true);
}
}
@@ -2758,6 +2753,47 @@ err_st_alloc:
return ERR_PTR(ret);
}
+static struct sg_table *
+intel_partial_pages(const struct i915_ggtt_view *view,
+ struct drm_i915_gem_object *obj)
+{
+ struct sg_table *st;
+ struct scatterlist *sg;
+ struct sg_page_iter obj_sg_iter;
+ int ret = -ENOMEM;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_st_alloc;
+
+ ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
+ if (ret)
+ goto err_sg_alloc;
+
+ sg = st->sgl;
+ st->nents = 0;
+ for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
+ view->params.partial.offset)
+ {
+ if (st->nents >= view->params.partial.size)
+ break;
+
+ sg_set_page(sg, NULL, PAGE_SIZE, 0);
+ sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
+ sg_dma_len(sg) = PAGE_SIZE;
+
+ sg = sg_next(sg);
+ st->nents++;
+ }
+
+ return st;
+
+err_sg_alloc:
+ kfree(st);
+err_st_alloc:
+ return ERR_PTR(ret);
+}
+
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma)
{
@@ -2771,6 +2807,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
vma->ggtt_view.pages =
intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
+ else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
+ vma->ggtt_view.pages =
+ intel_partial_pages(&vma->ggtt_view, vma->obj);
else
WARN_ONCE(1, "GGTT view %u not implemented!\n",
vma->ggtt_view.type);
@@ -2843,3 +2882,25 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
return 0;
}
+
+/**
+ * i915_ggtt_view_size - Get the size of a GGTT view.
+ * @obj: Object the view is of.
+ * @view: The view in question.
+ *
+ * @return The size of the GGTT view in bytes.
+ */
+size_t
+i915_ggtt_view_size(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view)
+{
+ if (view->type == I915_GGTT_VIEW_NORMAL ||
+ view->type == I915_GGTT_VIEW_ROTATED) {
+ return obj->base.size;
+ } else if (view->type == I915_GGTT_VIEW_PARTIAL) {
+ return view->params.partial.size << PAGE_SHIFT;
+ } else {
+ WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
+ return obj->base.size;
+ }
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 4e6cac575cd8..0d46dd20bf71 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -117,7 +117,8 @@ typedef uint64_t gen8_pde_t;
enum i915_ggtt_view_type {
I915_GGTT_VIEW_NORMAL = 0,
- I915_GGTT_VIEW_ROTATED
+ I915_GGTT_VIEW_ROTATED,
+ I915_GGTT_VIEW_PARTIAL,
};
struct intel_rotation_info {
@@ -130,6 +131,13 @@ struct intel_rotation_info {
struct i915_ggtt_view {
enum i915_ggtt_view_type type;
+ union {
+ struct {
+ unsigned long offset;
+ unsigned int size;
+ } partial;
+ } params;
+
struct sg_table *pages;
union {
@@ -495,7 +503,15 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
if (WARN_ON(!a || !b))
return false;
- return a->type == b->type;
+ if (a->type != b->type)
+ return false;
+ if (a->type == I915_GGTT_VIEW_PARTIAL)
+ return !memcmp(&a->params, &b->params, sizeof(a->params));
+ return true;
}
+size_t
+i915_ggtt_view_size(struct drm_i915_gem_object *obj,
+ const struct i915_ggtt_view *view);
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index ac22614dbb0e..a3e330d2a1d8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -554,6 +554,7 @@ static void i915_error_state_free(struct kref *error_ref)
for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
i915_error_object_free(error->ring[i].batchbuffer);
+ i915_error_object_free(error->ring[i].wa_batchbuffer);
i915_error_object_free(error->ring[i].ringbuffer);
i915_error_object_free(error->ring[i].hws_page);
i915_error_object_free(error->ring[i].ctx);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index bb64415a1c3e..8ac5a1b29ac0 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -53,6 +53,7 @@ struct i915_params i915 __read_mostly = {
.mmio_debug = 0,
.verbose_state_checks = 1,
.nuclear_pageflip = 0,
+ .edp_vswing = 0,
};
module_param_named(modeset, i915.modeset, int, 0400);
@@ -184,3 +185,10 @@ MODULE_PARM_DESC(verbose_state_checks,
module_param_named_unsafe(nuclear_pageflip, i915.nuclear_pageflip, bool, 0600);
MODULE_PARM_DESC(nuclear_pageflip,
"Force atomic modeset functionality; only planes work for now (default: false).");
+
+/* WA to get away with the default setting in VBT for early platforms.Will be removed */
+module_param_named_unsafe(edp_vswing, i915.edp_vswing, int, 0400);
+MODULE_PARM_DESC(edp_vswing,
+ "Ignore/Override vswing pre-emph table selection from VBT "
+ "(0=use value from vbt [default], 1=low power swing(200mV),"
+ "2=default swing(400mV))");
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e35d7f29d7c2..58627a319416 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -670,6 +670,12 @@ enum skl_disp_power_wells {
#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27
#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000
+#define VLV_TURBO_SOC_OVERRIDE 0x04
+#define VLV_OVERRIDE_EN 1
+#define VLV_SOC_TDP_EN (1 << 1)
+#define VLV_BIAS_CPU_125_SOC_875 (6 << 2)
+#define CHV_BIAS_CPU_50_SOC_50 (3 << 2)
+
#define VLV_CZ_CLOCK_TO_MILLI_SEC 100000
/* vlv2 north clock has */
@@ -955,6 +961,7 @@ enum skl_disp_power_wells {
#define _VLV_PCS_DW11_CH0 0x822c
#define _VLV_PCS_DW11_CH1 0x842c
+#define DPIO_TX2_STAGGER_MASK(x) ((x)<<24)
#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
@@ -967,8 +974,20 @@ enum skl_disp_power_wells {
#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
+#define _VLV_PCS01_DW12_CH0 0x0230
+#define _VLV_PCS23_DW12_CH0 0x0430
+#define _VLV_PCS01_DW12_CH1 0x2630
+#define _VLV_PCS23_DW12_CH1 0x2830
+#define VLV_PCS01_DW12(ch) _PORT(ch, _VLV_PCS01_DW12_CH0, _VLV_PCS01_DW12_CH1)
+#define VLV_PCS23_DW12(ch) _PORT(ch, _VLV_PCS23_DW12_CH0, _VLV_PCS23_DW12_CH1)
+
#define _VLV_PCS_DW12_CH0 0x8230
#define _VLV_PCS_DW12_CH1 0x8430
+#define DPIO_TX2_STAGGER_MULT(x) ((x)<<20)
+#define DPIO_TX1_STAGGER_MULT(x) ((x)<<16)
+#define DPIO_TX1_STAGGER_MASK(x) ((x)<<8)
+#define DPIO_LANESTAGGER_STRAP_OVRD (1<<6)
+#define DPIO_LANESTAGGER_STRAP(x) ((x)<<0)
#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
#define _VLV_PCS_DW14_CH0 0x8238
@@ -2118,7 +2137,10 @@ enum skl_disp_power_wells {
#define DPIO_PHY_STATUS (VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
#define DISPLAY_PHY_CONTROL (VLV_DISPLAY_BASE + 0x60100)
-#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
+#define PHY_CH_SU_PSR 0x1
+#define PHY_CH_DEEP_PSR 0x7
+#define PHY_CH_POWER_MODE(mode, phy, ch) ((mode) << (6*(phy)+3*(ch)+2))
+#define PHY_COM_LANE_RESET_DEASSERT(phy) (1 << (phy))
#define DISPLAY_PHY_STATUS (VLV_DISPLAY_BASE + 0x60104)
#define PHY_POWERGOOD(phy) (((phy) == DPIO_PHY0) ? (1<<31) : (1<<30))
@@ -3480,6 +3502,18 @@ enum skl_disp_power_wells {
#define UTIL_PIN_CTL 0x48400
#define UTIL_PIN_ENABLE (1 << 31)
+/* BXT backlight register definition. */
+#define BXT_BLC_PWM_CTL1 0xC8250
+#define BXT_BLC_PWM_ENABLE (1 << 31)
+#define BXT_BLC_PWM_POLARITY (1 << 29)
+#define BXT_BLC_PWM_FREQ1 0xC8254
+#define BXT_BLC_PWM_DUTY1 0xC8258
+
+#define BXT_BLC_PWM_CTL2 0xC8350
+#define BXT_BLC_PWM_FREQ2 0xC8354
+#define BXT_BLC_PWM_DUTY2 0xC8358
+
+
#define PCH_GTC_CTL 0xe7000
#define PCH_GTC_ENABLE (1 << 31)
@@ -5700,7 +5734,7 @@ enum skl_disp_power_wells {
#define HSW_NDE_RSTWRN_OPT 0x46408
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
-#define FF_SLICE_CS_CHICKEN2 0x02e4
+#define FF_SLICE_CS_CHICKEN2 0x20e4
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
/* GEN7 chicken */
@@ -6638,15 +6672,20 @@ enum skl_disp_power_wells {
#define GEN6_PCODE_MAILBOX 0x138124
#define GEN6_PCODE_READY (1<<31)
-#define GEN6_READ_OC_PARAMS 0xc
-#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
-#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
+#define GEN9_PCODE_READ_MEM_LATENCY 0x6
+#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
+#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
+#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
+#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
+#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_READ_D_COMP 0x10
#define GEN6_PCODE_WRITE_D_COMP 0x11
-#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
-#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
#define DISPLAY_IPS_CONTROL 0x19
#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
@@ -6655,12 +6694,6 @@ enum skl_disp_power_wells {
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
#define GEN6_PCODE_DATA1 0x13812C
-#define GEN9_PCODE_READ_MEM_LATENCY 0x6
-#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
-#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
-#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
-#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
-
#define GEN6_GT_CORE_STATUS 0x138060
#define GEN6_CORE_CPD_STATE_MASK (7<<4)
#define GEN6_RCn_MASK 7
@@ -6721,6 +6754,7 @@ enum skl_disp_power_wells {
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
#define GEN7_MAX_PS_THREAD_DEP (8<<12)
#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
+#define GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE (1<<4)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
#define GEN9_HALF_SLICE_CHICKEN5 0xe188
@@ -7135,16 +7169,16 @@ enum skl_disp_power_wells {
#define DPLL_CTRL1 0x6C058
#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
-#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
-#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1)
-#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
+#define DPLL_CTRL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
+#define DPLL_CTRL1_LINK_RATE_SHIFT(id) ((id)*6+1)
+#define DPLL_CTRL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
-#define DPLL_CRTL1_LINK_RATE_2700 0
-#define DPLL_CRTL1_LINK_RATE_1350 1
-#define DPLL_CRTL1_LINK_RATE_810 2
-#define DPLL_CRTL1_LINK_RATE_1620 3
-#define DPLL_CRTL1_LINK_RATE_1080 4
-#define DPLL_CRTL1_LINK_RATE_2160 5
+#define DPLL_CTRL1_LINK_RATE_2700 0
+#define DPLL_CTRL1_LINK_RATE_1350 1
+#define DPLL_CTRL1_LINK_RATE_810 2
+#define DPLL_CTRL1_LINK_RATE_1620 3
+#define DPLL_CTRL1_LINK_RATE_1080 4
+#define DPLL_CTRL1_LINK_RATE_2160 5
/* DPLL control2 */
#define DPLL_CTRL2 0x6C05C
@@ -7204,6 +7238,17 @@ enum skl_disp_power_wells {
#define DC_STATE_EN_UPTO_DC5 (1<<0)
#define DC_STATE_EN_DC9 (1<<3)
+/*
+* SKL DC
+*/
+#define DC_STATE_EN 0x45504
+#define DC_STATE_EN_UPTO_DC5 (1<<0)
+#define DC_STATE_EN_UPTO_DC6 (2<<0)
+#define DC_STATE_EN_UPTO_DC5_DC6_MASK 0x3
+
+#define DC_STATE_DEBUG 0x45520
+#define DC_STATE_DEBUG_MASK_MEMORY_UP (1<<1)
+
/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
* since on HSW we can't write to it using I915_WRITE. */
#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 3c4b7cdeab77..7ed8033aae60 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -169,7 +169,7 @@ int intel_atomic_commit(struct drm_device *dev,
plane->state->state = NULL;
}
- /* swap crtc_state */
+ /* swap crtc_scaler_state */
for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct drm_crtc *crtc = state->crtcs[i];
if (!crtc) {
@@ -178,6 +178,9 @@ int intel_atomic_commit(struct drm_device *dev,
to_intel_crtc(crtc)->config->scaler_state =
to_intel_crtc_state(state->crtc_states[i])->scaler_state;
+
+ if (INTEL_INFO(dev)->gen >= 9)
+ skl_detach_scalers(to_intel_crtc(crtc));
}
drm_atomic_helper_commit_planes(dev, state);
@@ -247,8 +250,12 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc)
crtc_state = kmemdup(intel_crtc->config,
sizeof(*intel_crtc->config), GFP_KERNEL);
- if (crtc_state)
- crtc_state->base.crtc = crtc;
+ if (!crtc_state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
+
+ crtc_state->base.crtc = crtc;
return &crtc_state->base;
}
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index dc8e1360fb20..86ba4b2c3a65 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -85,8 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
return NULL;
state = &intel_state->base;
- if (state->fb)
- drm_framebuffer_reference(state->fb);
+
+ __drm_atomic_helper_plane_duplicate_state(plane, state);
return state;
}
@@ -111,6 +111,7 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
{
struct drm_crtc *crtc = state->crtc;
struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *crtc_state;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct intel_plane_state *intel_state = to_intel_plane_state(state);
@@ -126,6 +127,17 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
if (!crtc)
return 0;
+ /* FIXME: temporary hack necessary while we still use the plane update
+ * helper. */
+ if (state->state) {
+ crtc_state =
+ intel_atomic_get_crtc_state(state->state, intel_crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ } else {
+ crtc_state = intel_crtc->config;
+ }
+
/*
* The original src/dest coordinates are stored in state->base, but
* we want to keep another copy internal to our driver that we can
@@ -144,9 +156,9 @@ static int intel_plane_atomic_check(struct drm_plane *plane,
intel_state->clip.x1 = 0;
intel_state->clip.y1 = 0;
intel_state->clip.x2 =
- intel_crtc->active ? intel_crtc->config->pipe_src_w : 0;
+ crtc_state->base.active ? crtc_state->pipe_src_w : 0;
intel_state->clip.y2 =
- intel_crtc->active ? intel_crtc->config->pipe_src_h : 0;
+ crtc_state->base.active ? crtc_state->pipe_src_h : 0;
/*
* Disabling a plane is always okay; we just need to update
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index f72e93a45e11..c4312177b0ee 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -269,6 +269,9 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
+ if (WARN_ON(port == PORT_A))
+ return;
+
if (HAS_PCH_IBX(dev_priv->dev)) {
aud_config = IBX_AUD_CFG(pipe);
aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
@@ -290,12 +293,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
tmp |= AUD_CONFIG_N_VALUE_INDEX;
I915_WRITE(aud_config, tmp);
- if (WARN_ON(!port)) {
- eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
- IBX_ELD_VALID(PORT_D);
- } else {
- eldv = IBX_ELD_VALID(port);
- }
+ eldv = IBX_ELD_VALID(port);
/* Invalidate ELD */
tmp = I915_READ(aud_cntrl_st2);
@@ -325,6 +323,9 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
port_name(port), pipe_name(pipe), drm_eld_size(eld));
+ if (WARN_ON(port == PORT_A))
+ return;
+
/*
* FIXME: We're supposed to wait for vblank here, but we have vblanks
* disabled during the mode set. The proper fix would be to push the
@@ -349,12 +350,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
- if (WARN_ON(!port)) {
- eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
- IBX_ELD_VALID(PORT_D);
- } else {
- eldv = IBX_ELD_VALID(port);
- }
+ eldv = IBX_ELD_VALID(port);
/* Invalidate ELD */
tmp = I915_READ(aud_cntrl_st2);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index c08368c03dad..cee596d0a6a2 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -672,8 +672,13 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
if (bdb->version >= 173) {
uint8_t vswing;
- vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
- dev_priv->vbt.edp_low_vswing = vswing == 0;
+ /* Don't read from VBT if module parameter has valid value*/
+ if (i915.edp_vswing) {
+ dev_priv->edp_low_vswing = i915.edp_vswing == 1;
+ } else {
+ vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
+ dev_priv->edp_low_vswing = vswing == 0;
+ }
}
}
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
new file mode 100644
index 000000000000..9311cddb86e6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+#include <linux/firmware.h>
+#include "i915_drv.h"
+#include "i915_reg.h"
+
+#define I915_CSR_SKL "i915/skl_dmc_ver4.bin"
+
+MODULE_FIRMWARE(I915_CSR_SKL);
+
+/*
+* SKL CSR registers for DC5 and DC6
+*/
+#define CSR_PROGRAM_BASE 0x80000
+#define CSR_SSP_BASE_ADDR_GEN9 0x00002FC0
+#define CSR_HTP_ADDR_SKL 0x00500034
+#define CSR_SSP_BASE 0x8F074
+#define CSR_HTP_SKL 0x8F004
+#define CSR_LAST_WRITE 0x8F034
+#define CSR_LAST_WRITE_VALUE 0xc003b400
+/* MMIO address range for CSR program (0x80000 - 0x82FFF) */
+#define CSR_MAX_FW_SIZE 0x2FFF
+#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
+#define CSR_MMIO_START_RANGE 0x80000
+#define CSR_MMIO_END_RANGE 0x8FFFF
+
+struct intel_css_header {
+ /* 0x09 for DMC */
+ uint32_t module_type;
+
+ /* Includes the DMC specific header in dwords */
+ uint32_t header_len;
+
+ /* always value would be 0x10000 */
+ uint32_t header_ver;
+
+ /* Not used */
+ uint32_t module_id;
+
+ /* Not used */
+ uint32_t module_vendor;
+
+ /* in YYYYMMDD format */
+ uint32_t date;
+
+ /* Size in dwords (CSS_Headerlen + PackageHeaderLen + dmc FWsLen)/4 */
+ uint32_t size;
+
+ /* Not used */
+ uint32_t key_size;
+
+ /* Not used */
+ uint32_t modulus_size;
+
+ /* Not used */
+ uint32_t exponent_size;
+
+ /* Not used */
+ uint32_t reserved1[12];
+
+ /* Major Minor */
+ uint32_t version;
+
+ /* Not used */
+ uint32_t reserved2[8];
+
+ /* Not used */
+ uint32_t kernel_header_info;
+} __packed;
+
+struct intel_fw_info {
+ uint16_t reserved1;
+
+ /* Stepping (A, B, C, ..., *). * is a wildcard */
+ char stepping;
+
+ /* Sub-stepping (0, 1, ..., *). * is a wildcard */
+ char substepping;
+
+ uint32_t offset;
+ uint32_t reserved2;
+} __packed;
+
+struct intel_package_header {
+ /* DMC container header length in dwords */
+ unsigned char header_len;
+
+ /* always value would be 0x01 */
+ unsigned char header_ver;
+
+ unsigned char reserved[10];
+
+ /* Number of valid entries in the FWInfo array below */
+ uint32_t num_entries;
+
+ struct intel_fw_info fw_info[20];
+} __packed;
+
+struct intel_dmc_header {
+ /* always value would be 0x40403E3E */
+ uint32_t signature;
+
+ /* DMC binary header length */
+ unsigned char header_len;
+
+ /* 0x01 */
+ unsigned char header_ver;
+
+ /* Reserved */
+ uint16_t dmcc_ver;
+
+ /* Major, Minor */
+ uint32_t project;
+
+ /* Firmware program size (excluding header) in dwords */
+ uint32_t fw_size;
+
+ /* Major Minor version */
+ uint32_t fw_version;
+
+ /* Number of valid MMIO cycles present. */
+ uint32_t mmio_count;
+
+ /* MMIO address */
+ uint32_t mmioaddr[8];
+
+ /* MMIO data */
+ uint32_t mmiodata[8];
+
+ /* FW filename */
+ unsigned char dfile[32];
+
+ uint32_t reserved1[2];
+} __packed;
+
+struct stepping_info {
+ char stepping;
+ char substepping;
+};
+
+static const struct stepping_info skl_stepping_info[] = {
+ {'A', '0'}, {'B', '0'}, {'C', '0'},
+ {'D', '0'}, {'E', '0'}, {'F', '0'},
+ {'G', '0'}, {'H', '0'}, {'I', '0'}
+};
+
+static char intel_get_stepping(struct drm_device *dev)
+{
+ if (IS_SKYLAKE(dev) && (dev->pdev->revision <
+ ARRAY_SIZE(skl_stepping_info)))
+ return skl_stepping_info[dev->pdev->revision].stepping;
+ else
+ return -ENODATA;
+}
+
+static char intel_get_substepping(struct drm_device *dev)
+{
+ if (IS_SKYLAKE(dev) && (dev->pdev->revision <
+ ARRAY_SIZE(skl_stepping_info)))
+ return skl_stepping_info[dev->pdev->revision].substepping;
+ else
+ return -ENODATA;
+}
+
+enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv)
+{
+ enum csr_state state;
+
+ mutex_lock(&dev_priv->csr_lock);
+ state = dev_priv->csr.state;
+ mutex_unlock(&dev_priv->csr_lock);
+
+ return state;
+}
+
+void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
+ enum csr_state state)
+{
+ mutex_lock(&dev_priv->csr_lock);
+ dev_priv->csr.state = state;
+ mutex_unlock(&dev_priv->csr_lock);
+}
+
+void intel_csr_load_program(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ __be32 *payload = dev_priv->csr.dmc_payload;
+ uint32_t i, fw_size;
+
+ if (!IS_GEN9(dev)) {
+ DRM_ERROR("No CSR support available for this platform\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->csr_lock);
+ fw_size = dev_priv->csr.dmc_fw_size;
+ for (i = 0; i < fw_size; i++)
+ I915_WRITE(CSR_PROGRAM_BASE + i * 4,
+ (u32 __force)payload[i]);
+
+ for (i = 0; i < dev_priv->csr.mmio_count; i++) {
+ I915_WRITE(dev_priv->csr.mmioaddr[i],
+ dev_priv->csr.mmiodata[i]);
+ }
+
+ dev_priv->csr.state = FW_LOADED;
+ mutex_unlock(&dev_priv->csr_lock);
+}
+
+static void finish_csr_load(const struct firmware *fw, void *context)
+{
+ struct drm_i915_private *dev_priv = context;
+ struct drm_device *dev = dev_priv->dev;
+ struct intel_css_header *css_header;
+ struct intel_package_header *package_header;
+ struct intel_dmc_header *dmc_header;
+ struct intel_csr *csr = &dev_priv->csr;
+ char stepping = intel_get_stepping(dev);
+ char substepping = intel_get_substepping(dev);
+ uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
+ uint32_t i;
+ __be32 *dmc_payload;
+ bool fw_loaded = false;
+
+ if (!fw) {
+ i915_firmware_load_error_print(csr->fw_path, 0);
+ goto out;
+ }
+
+ if ((stepping == -ENODATA) || (substepping == -ENODATA)) {
+ DRM_ERROR("Unknown stepping info, firmware loading failed\n");
+ goto out;
+ }
+
+ /* Extract CSS Header information*/
+ css_header = (struct intel_css_header *)fw->data;
+ if (sizeof(struct intel_css_header) !=
+ (css_header->header_len * 4)) {
+ DRM_ERROR("Firmware has wrong CSS header length %u bytes\n",
+ (css_header->header_len * 4));
+ goto out;
+ }
+ readcount += sizeof(struct intel_css_header);
+
+ /* Extract Package Header information*/
+ package_header = (struct intel_package_header *)
+ &fw->data[readcount];
+ if (sizeof(struct intel_package_header) !=
+ (package_header->header_len * 4)) {
+ DRM_ERROR("Firmware has wrong package header length %u bytes\n",
+ (package_header->header_len * 4));
+ goto out;
+ }
+ readcount += sizeof(struct intel_package_header);
+
+ /* Search for dmc_offset to find firware binary. */
+ for (i = 0; i < package_header->num_entries; i++) {
+ if (package_header->fw_info[i].substepping == '*' &&
+ stepping == package_header->fw_info[i].stepping) {
+ dmc_offset = package_header->fw_info[i].offset;
+ break;
+ } else if (stepping == package_header->fw_info[i].stepping &&
+ substepping == package_header->fw_info[i].substepping) {
+ dmc_offset = package_header->fw_info[i].offset;
+ break;
+ } else if (package_header->fw_info[i].stepping == '*' &&
+ package_header->fw_info[i].substepping == '*')
+ dmc_offset = package_header->fw_info[i].offset;
+ }
+ if (dmc_offset == CSR_DEFAULT_FW_OFFSET) {
+ DRM_ERROR("Firmware not supported for %c stepping\n", stepping);
+ goto out;
+ }
+ readcount += dmc_offset;
+
+ /* Extract dmc_header information. */
+ dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
+ if (sizeof(struct intel_dmc_header) != (dmc_header->header_len)) {
+ DRM_ERROR("Firmware has wrong dmc header length %u bytes\n",
+ (dmc_header->header_len));
+ goto out;
+ }
+ readcount += sizeof(struct intel_dmc_header);
+
+ /* Cache the dmc header info. */
+ if (dmc_header->mmio_count > ARRAY_SIZE(csr->mmioaddr)) {
+ DRM_ERROR("Firmware has wrong mmio count %u\n",
+ dmc_header->mmio_count);
+ goto out;
+ }
+ csr->mmio_count = dmc_header->mmio_count;
+ for (i = 0; i < dmc_header->mmio_count; i++) {
+ if (dmc_header->mmioaddr[i] < CSR_MMIO_START_RANGE &&
+ dmc_header->mmioaddr[i] > CSR_MMIO_END_RANGE) {
+ DRM_ERROR(" Firmware has wrong mmio address 0x%x\n",
+ dmc_header->mmioaddr[i]);
+ goto out;
+ }
+ csr->mmioaddr[i] = dmc_header->mmioaddr[i];
+ csr->mmiodata[i] = dmc_header->mmiodata[i];
+ }
+
+ /* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
+ nbytes = dmc_header->fw_size * 4;
+ if (nbytes > CSR_MAX_FW_SIZE) {
+ DRM_ERROR("CSR firmware too big (%u) bytes\n", nbytes);
+ goto out;
+ }
+ csr->dmc_fw_size = dmc_header->fw_size;
+
+ csr->dmc_payload = kmalloc(nbytes, GFP_KERNEL);
+ if (!csr->dmc_payload) {
+ DRM_ERROR("Memory allocation failed for dmc payload\n");
+ goto out;
+ }
+
+ dmc_payload = csr->dmc_payload;
+ for (i = 0; i < dmc_header->fw_size; i++) {
+ uint32_t *tmp = (u32 *)&fw->data[readcount + i * 4];
+ /*
+ * The firmware payload is an array of 32 bit words stored in
+ * little-endian format in the firmware image and programmed
+ * as 32 bit big-endian format to memory.
+ */
+ dmc_payload[i] = cpu_to_be32(*tmp);
+ }
+
+ /* load csr program during system boot, as needed for DC states */
+ intel_csr_load_program(dev);
+ fw_loaded = true;
+
+out:
+ if (fw_loaded)
+ intel_runtime_pm_put(dev_priv);
+ else
+ intel_csr_load_status_set(dev_priv, FW_FAILED);
+
+ release_firmware(fw);
+}
+
+void intel_csr_ucode_init(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_csr *csr = &dev_priv->csr;
+ int ret;
+
+ if (!HAS_CSR(dev))
+ return;
+
+ if (IS_SKYLAKE(dev))
+ csr->fw_path = I915_CSR_SKL;
+ else {
+ DRM_ERROR("Unexpected: no known CSR firmware for platform\n");
+ intel_csr_load_status_set(dev_priv, FW_FAILED);
+ return;
+ }
+
+ /*
+ * Obtain a runtime pm reference, until CSR is loaded,
+ * to avoid entering runtime-suspend.
+ */
+ intel_runtime_pm_get(dev_priv);
+
+ /* CSR supported for platform, load firmware */
+ ret = request_firmware_nowait(THIS_MODULE, true, csr->fw_path,
+ &dev_priv->dev->pdev->dev,
+ GFP_KERNEL, dev_priv,
+ finish_csr_load);
+ if (ret) {
+ i915_firmware_load_error_print(csr->fw_path, ret);
+ intel_csr_load_status_set(dev_priv, FW_FAILED);
+ }
+}
+
+void intel_csr_ucode_fini(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!HAS_CSR(dev))
+ return;
+
+ intel_csr_load_status_set(dev_priv, FW_FAILED);
+ kfree(dev_priv->csr.dmc_payload);
+}
+
+void assert_csr_loaded(struct drm_i915_private *dev_priv)
+{
+ WARN((intel_csr_load_status_get(dev_priv) != FW_LOADED), "CSR is not loaded.\n");
+ WARN(!I915_READ(CSR_PROGRAM_BASE),
+ "CSR program storage start is NULL\n");
+ WARN(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
+ WARN(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
+}
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 9c1e74a3a277..807e15d41a1b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -282,7 +282,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
ddi_translations_fdi = NULL;
ddi_translations_dp = skl_ddi_translations_dp;
n_dp_entries = ARRAY_SIZE(skl_ddi_translations_dp);
- if (dev_priv->vbt.edp_low_vswing) {
+ if (dev_priv->edp_low_vswing) {
ddi_translations_edp = skl_ddi_translations_edp;
n_edp_entries = ARRAY_SIZE(skl_ddi_translations_edp);
} else {
@@ -584,17 +584,18 @@ intel_ddi_get_crtc_new_encoder(struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
struct intel_encoder *ret = NULL;
struct drm_atomic_state *state;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
int num_encoders = 0;
int i;
state = crtc_state->base.state;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i] ||
- state->connector_states[i]->crtc != crtc_state->base.crtc)
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != crtc_state->base.crtc)
continue;
- ret = to_intel_encoder(state->connector_states[i]->best_encoder);
+ ret = to_intel_encoder(connector_state->best_encoder);
num_encoders++;
}
@@ -870,26 +871,26 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
link_clock = skl_calc_wrpll_link(dev_priv, dpll);
} else {
- link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll);
- link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll);
+ link_clock = dpll_ctl1 & DPLL_CTRL1_LINK_RATE_MASK(dpll);
+ link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(dpll);
switch (link_clock) {
- case DPLL_CRTL1_LINK_RATE_810:
+ case DPLL_CTRL1_LINK_RATE_810:
link_clock = 81000;
break;
- case DPLL_CRTL1_LINK_RATE_1080:
+ case DPLL_CTRL1_LINK_RATE_1080:
link_clock = 108000;
break;
- case DPLL_CRTL1_LINK_RATE_1350:
+ case DPLL_CTRL1_LINK_RATE_1350:
link_clock = 135000;
break;
- case DPLL_CRTL1_LINK_RATE_1620:
+ case DPLL_CTRL1_LINK_RATE_1620:
link_clock = 162000;
break;
- case DPLL_CRTL1_LINK_RATE_2160:
+ case DPLL_CTRL1_LINK_RATE_2160:
link_clock = 216000;
break;
- case DPLL_CRTL1_LINK_RATE_2700:
+ case DPLL_CTRL1_LINK_RATE_2700:
link_clock = 270000;
break;
default:
@@ -1188,69 +1189,69 @@ found:
if (min_dco_index > 2) {
WARN(1, "No valid values found for the given pixel clock\n");
} else {
- wrpll_params->central_freq = dco_central_freq[min_dco_index];
+ wrpll_params->central_freq = dco_central_freq[min_dco_index];
- switch (dco_central_freq[min_dco_index]) {
- case 9600000000ULL:
+ switch (dco_central_freq[min_dco_index]) {
+ case 9600000000ULL:
wrpll_params->central_freq = 0;
break;
- case 9000000000ULL:
+ case 9000000000ULL:
wrpll_params->central_freq = 1;
break;
- case 8400000000ULL:
+ case 8400000000ULL:
wrpll_params->central_freq = 3;
- }
+ }
- switch (candidate_p0[min_dco_index]) {
- case 1:
+ switch (candidate_p0[min_dco_index]) {
+ case 1:
wrpll_params->pdiv = 0;
break;
- case 2:
+ case 2:
wrpll_params->pdiv = 1;
break;
- case 3:
+ case 3:
wrpll_params->pdiv = 2;
break;
- case 7:
+ case 7:
wrpll_params->pdiv = 4;
break;
- default:
+ default:
WARN(1, "Incorrect PDiv\n");
- }
+ }
- switch (candidate_p2[min_dco_index]) {
- case 5:
+ switch (candidate_p2[min_dco_index]) {
+ case 5:
wrpll_params->kdiv = 0;
break;
- case 2:
+ case 2:
wrpll_params->kdiv = 1;
break;
- case 3:
+ case 3:
wrpll_params->kdiv = 2;
break;
- case 1:
+ case 1:
wrpll_params->kdiv = 3;
break;
- default:
+ default:
WARN(1, "Incorrect KDiv\n");
- }
+ }
- wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
- wrpll_params->qdiv_mode =
+ wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
+ wrpll_params->qdiv_mode =
(wrpll_params->qdiv_ratio == 1) ? 0 : 1;
- dco_freq = candidate_p0[min_dco_index] *
- candidate_p1[min_dco_index] *
- candidate_p2[min_dco_index] * afe_clock;
+ dco_freq = candidate_p0[min_dco_index] *
+ candidate_p1[min_dco_index] *
+ candidate_p2[min_dco_index] * afe_clock;
/*
- * Intermediate values are in Hz.
- * Divide by MHz to match bsepc
- */
- wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
- wrpll_params->dco_fraction =
- div_u64(((div_u64(dco_freq, 24) -
- wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
+ * Intermediate values are in Hz.
+ * Divide by MHz to match bsepc
+ */
+ wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
+ wrpll_params->dco_fraction =
+ div_u64(((div_u64(dco_freq, 24) -
+ wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
}
}
@@ -1294,13 +1295,13 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
switch (intel_dp->link_bw) {
case DP_LINK_BW_1_62:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0);
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
break;
case DP_LINK_BW_2_7:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0);
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, 0);
break;
case DP_LINK_BW_5_4:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0);
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, 0);
break;
}
@@ -1854,7 +1855,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
DPLL_CTRL1_SSC(dpll) |
- DPLL_CRTL1_LINK_RATE_MASK(dpll));
+ DPLL_CTRL1_LINK_RATE_MASK(dpll));
val |= crtc->config->dpll_hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val);
@@ -2100,7 +2101,7 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
val = I915_READ(DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
- DPLL_CRTL1_LINK_RATE_MASK(dpll));
+ DPLL_CTRL1_LINK_RATE_MASK(dpll));
val |= pll->config.hw_state.ctrl1 << (dpll * 6);
I915_WRITE(DPLL_CTRL1, val);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3094b0807b40..22e6644f755e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -82,8 +82,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
static void ironlake_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-static int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *old_fb,
+static int intel_set_mode(struct drm_crtc *crtc,
struct drm_atomic_state *state);
static int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
@@ -107,6 +106,8 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
struct intel_crtc_state *crtc_state);
static int i9xx_get_refclk(const struct intel_crtc_state *crtc_state,
int num_connectors);
+static void intel_crtc_enable_planes(struct drm_crtc *crtc);
+static void intel_crtc_disable_planes(struct drm_crtc *crtc);
static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
{
@@ -451,15 +452,12 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
int type)
{
struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
int i, num_connectors = 0;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
@@ -1846,7 +1844,8 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
}
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dport)
+ struct intel_digital_port *dport,
+ unsigned int expected_mask)
{
u32 port_mask;
int dpll_reg;
@@ -1859,6 +1858,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
case PORT_C:
port_mask = DPLL_PORTC_READY_MASK;
dpll_reg = DPLL(0);
+ expected_mask <<= 4;
break;
case PORT_D:
port_mask = DPLL_PORTD_READY_MASK;
@@ -1868,9 +1868,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
BUG();
}
- if (wait_for((I915_READ(dpll_reg) & port_mask) == 0, 1000))
- WARN(1, "timed out waiting for port %c ready: 0x%08x\n",
- port_name(dport->port), I915_READ(dpll_reg));
+ if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
+ WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
+ port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
}
static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
@@ -2236,45 +2236,7 @@ static void intel_enable_primary_hw_plane(struct drm_plane *plane,
/* If the pipe isn't enabled, we can't pump pixels and may hang */
assert_pipe_enabled(dev_priv, intel_crtc->pipe);
-
- if (intel_crtc->primary_enabled)
- return;
-
- intel_crtc->primary_enabled = true;
-
- dev_priv->display.update_primary_plane(crtc, plane->fb,
- crtc->x, crtc->y);
-
- /*
- * BDW signals flip done immediately if the plane
- * is disabled, even if the plane enable is already
- * armed to occur at the next vblank :(
- */
- if (IS_BROADWELL(dev))
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-}
-
-/**
- * intel_disable_primary_hw_plane - disable the primary hardware plane
- * @plane: plane to be disabled
- * @crtc: crtc for the plane
- *
- * Disable @plane on @crtc, making sure that the pipe is running first.
- */
-static void intel_disable_primary_hw_plane(struct drm_plane *plane,
- struct drm_crtc *crtc)
-{
- struct drm_device *dev = plane->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- if (WARN_ON(!intel_crtc->active))
- return;
-
- if (!intel_crtc->primary_enabled)
- return;
-
- intel_crtc->primary_enabled = false;
+ to_intel_plane_state(plane->state)->visible = true;
dev_priv->display.update_primary_plane(crtc, plane->fb,
crtc->x, crtc->y);
@@ -2687,6 +2649,8 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_plane *primary = crtc->primary;
+ bool visible = to_intel_plane_state(primary->state)->visible;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long linear_offset;
@@ -2694,7 +2658,7 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
u32 reg = DSPCNTR(plane);
int pixel_size;
- if (!intel_crtc->primary_enabled) {
+ if (!visible || !fb) {
I915_WRITE(reg, 0);
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(DSPSURF(plane), 0);
@@ -2816,6 +2780,8 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_plane *primary = crtc->primary;
+ bool visible = to_intel_plane_state(primary->state)->visible;
struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long linear_offset;
@@ -2823,7 +2789,7 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
u32 reg = DSPCNTR(plane);
int pixel_size;
- if (!intel_crtc->primary_enabled) {
+ if (!visible || !fb) {
I915_WRITE(reg, 0);
I915_WRITE(DSPSURF(plane), 0);
POSTING_READ(reg);
@@ -2985,126 +2951,204 @@ void skl_detach_scalers(struct intel_crtc *intel_crtc)
}
}
-static void skylake_update_primary_plane(struct drm_crtc *crtc,
- struct drm_framebuffer *fb,
- int x, int y)
+u32 skl_plane_ctl_format(uint32_t pixel_format)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj;
- int pipe = intel_crtc->pipe;
- u32 plane_ctl, stride_div, stride;
- u32 tile_height, plane_offset, plane_size;
- unsigned int rotation;
- int x_offset, y_offset;
- unsigned long surf_addr;
- struct drm_plane *plane;
-
- if (!intel_crtc->primary_enabled) {
- I915_WRITE(PLANE_CTL(pipe, 0), 0);
- I915_WRITE(PLANE_SURF(pipe, 0), 0);
- POSTING_READ(PLANE_CTL(pipe, 0));
- return;
- }
-
- plane_ctl = PLANE_CTL_ENABLE |
- PLANE_CTL_PIPE_GAMMA_ENABLE |
- PLANE_CTL_PIPE_CSC_ENABLE;
-
- switch (fb->pixel_format) {
+ u32 plane_ctl_format = 0;
+ switch (pixel_format) {
case DRM_FORMAT_RGB565:
- plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
- break;
- case DRM_FORMAT_XRGB8888:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
- break;
- case DRM_FORMAT_ARGB8888:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
- plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ plane_ctl_format = PLANE_CTL_FORMAT_RGB_565;
break;
case DRM_FORMAT_XBGR8888:
- plane_ctl |= PLANE_CTL_ORDER_RGBX;
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
+ plane_ctl_format = PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ plane_ctl_format = PLANE_CTL_FORMAT_XRGB_8888;
break;
+ /*
+ * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
+ * to be already pre-multiplied. We need to add a knob (or a different
+ * DRM_FORMAT) for user-space to configure that.
+ */
case DRM_FORMAT_ABGR8888:
- plane_ctl |= PLANE_CTL_ORDER_RGBX;
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
- plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ plane_ctl_format = PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
+ PLANE_CTL_ALPHA_SW_PREMULTIPLY;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ plane_ctl_format = PLANE_CTL_FORMAT_XRGB_8888 |
+ PLANE_CTL_ALPHA_SW_PREMULTIPLY;
break;
case DRM_FORMAT_XRGB2101010:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+ plane_ctl_format = PLANE_CTL_FORMAT_XRGB_2101010;
break;
case DRM_FORMAT_XBGR2101010:
- plane_ctl |= PLANE_CTL_ORDER_RGBX;
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
+ plane_ctl_format = PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
+ break;
+ case DRM_FORMAT_YUYV:
+ plane_ctl_format = PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
+ break;
+ case DRM_FORMAT_YVYU:
+ plane_ctl_format = PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
+ break;
+ case DRM_FORMAT_UYVY:
+ plane_ctl_format = PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
+ break;
+ case DRM_FORMAT_VYUY:
+ plane_ctl_format = PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
break;
default:
BUG();
}
+ return plane_ctl_format;
+}
- switch (fb->modifier[0]) {
+u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
+{
+ u32 plane_ctl_tiling = 0;
+ switch (fb_modifier) {
case DRM_FORMAT_MOD_NONE:
break;
case I915_FORMAT_MOD_X_TILED:
- plane_ctl |= PLANE_CTL_TILED_X;
+ plane_ctl_tiling = PLANE_CTL_TILED_X;
break;
case I915_FORMAT_MOD_Y_TILED:
- plane_ctl |= PLANE_CTL_TILED_Y;
+ plane_ctl_tiling = PLANE_CTL_TILED_Y;
break;
case I915_FORMAT_MOD_Yf_TILED:
- plane_ctl |= PLANE_CTL_TILED_YF;
+ plane_ctl_tiling = PLANE_CTL_TILED_YF;
break;
default:
- MISSING_CASE(fb->modifier[0]);
+ MISSING_CASE(fb_modifier);
}
+ return plane_ctl_tiling;
+}
- plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
-
- plane = crtc->primary;
- rotation = plane->state->rotation;
+u32 skl_plane_ctl_rotation(unsigned int rotation)
+{
+ u32 plane_ctl_rotation = 0;
switch (rotation) {
+ case BIT(DRM_ROTATE_0):
+ break;
case BIT(DRM_ROTATE_90):
- plane_ctl |= PLANE_CTL_ROTATE_90;
+ plane_ctl_rotation = PLANE_CTL_ROTATE_90;
break;
-
case BIT(DRM_ROTATE_180):
- plane_ctl |= PLANE_CTL_ROTATE_180;
+ plane_ctl_rotation = PLANE_CTL_ROTATE_180;
break;
-
case BIT(DRM_ROTATE_270):
- plane_ctl |= PLANE_CTL_ROTATE_270;
+ plane_ctl_rotation = PLANE_CTL_ROTATE_270;
break;
+ default:
+ MISSING_CASE(rotation);
}
+ return plane_ctl_rotation;
+}
+
+static void skylake_update_primary_plane(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct drm_plane *plane = crtc->primary;
+ bool visible = to_intel_plane_state(plane->state)->visible;
+ struct drm_i915_gem_object *obj;
+ int pipe = intel_crtc->pipe;
+ u32 plane_ctl, stride_div, stride;
+ u32 tile_height, plane_offset, plane_size;
+ unsigned int rotation;
+ int x_offset, y_offset;
+ unsigned long surf_addr;
+ struct intel_crtc_state *crtc_state = intel_crtc->config;
+ struct intel_plane_state *plane_state;
+ int src_x = 0, src_y = 0, src_w = 0, src_h = 0;
+ int dst_x = 0, dst_y = 0, dst_w = 0, dst_h = 0;
+ int scaler_id = -1;
+
+ plane_state = to_intel_plane_state(plane->state);
+
+ if (!visible || !fb) {
+ I915_WRITE(PLANE_CTL(pipe, 0), 0);
+ I915_WRITE(PLANE_SURF(pipe, 0), 0);
+ POSTING_READ(PLANE_CTL(pipe, 0));
+ return;
+ }
+
+ plane_ctl = PLANE_CTL_ENABLE |
+ PLANE_CTL_PIPE_GAMMA_ENABLE |
+ PLANE_CTL_PIPE_CSC_ENABLE;
+
+ plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
+ plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
+
+ rotation = plane->state->rotation;
+ plane_ctl |= skl_plane_ctl_rotation(rotation);
+
obj = intel_fb_obj(fb);
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj);
+ /*
+ * FIXME: intel_plane_state->src, dst aren't set when transitional
+ * update_plane helpers are called from legacy paths.
+ * Once full atomic crtc is available, below check can be avoided.
+ */
+ if (drm_rect_width(&plane_state->src)) {
+ scaler_id = plane_state->scaler_id;
+ src_x = plane_state->src.x1 >> 16;
+ src_y = plane_state->src.y1 >> 16;
+ src_w = drm_rect_width(&plane_state->src) >> 16;
+ src_h = drm_rect_height(&plane_state->src) >> 16;
+ dst_x = plane_state->dst.x1;
+ dst_y = plane_state->dst.y1;
+ dst_w = drm_rect_width(&plane_state->dst);
+ dst_h = drm_rect_height(&plane_state->dst);
+
+ WARN_ON(x != src_x || y != src_y);
+ } else {
+ src_w = intel_crtc->config->pipe_src_w;
+ src_h = intel_crtc->config->pipe_src_h;
+ }
+
if (intel_rotation_90_or_270(rotation)) {
/* stride = Surface height in tiles */
tile_height = intel_tile_height(dev, fb->bits_per_pixel,
fb->modifier[0]);
stride = DIV_ROUND_UP(fb->height, tile_height);
- x_offset = stride * tile_height - y - (plane->state->src_h >> 16);
+ x_offset = stride * tile_height - y - src_h;
y_offset = x;
- plane_size = ((plane->state->src_w >> 16) - 1) << 16 |
- ((plane->state->src_h >> 16) - 1);
+ plane_size = (src_w - 1) << 16 | (src_h - 1);
} else {
stride = fb->pitches[0] / stride_div;
x_offset = x;
y_offset = y;
- plane_size = ((plane->state->src_h >> 16) - 1) << 16 |
- ((plane->state->src_w >> 16) - 1);
+ plane_size = (src_h - 1) << 16 | (src_w - 1);
}
plane_offset = y_offset << 16 | x_offset;
I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
- I915_WRITE(PLANE_POS(pipe, 0), 0);
I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
+
+ if (scaler_id >= 0) {
+ uint32_t ps_ctrl = 0;
+
+ WARN_ON(!dst_w || !dst_h);
+ ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
+ crtc_state->scaler_state.scalers[scaler_id].mode;
+ I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+ I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+ I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
+ I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
+ I915_WRITE(PLANE_POS(pipe, 0), 0);
+ } else {
+ I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
+ }
+
I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
POSTING_READ(PLANE_SURF(pipe, 0));
@@ -3162,6 +3206,19 @@ static void intel_update_primary_planes(struct drm_device *dev)
}
}
+void intel_crtc_reset(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ if (!crtc->active)
+ return;
+
+ intel_crtc_disable_planes(&crtc->base);
+ dev_priv->display.crtc_disable(&crtc->base);
+ dev_priv->display.crtc_enable(&crtc->base);
+ intel_crtc_enable_planes(&crtc->base);
+}
+
void intel_prepare_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3182,8 +3239,11 @@ void intel_prepare_reset(struct drm_device *dev)
* g33 docs say we should at least disable all the planes.
*/
for_each_intel_crtc(dev, crtc) {
- if (crtc->active)
- dev_priv->display.crtc_disable(&crtc->base);
+ if (!crtc->active)
+ continue;
+
+ intel_crtc_disable_planes(&crtc->base);
+ dev_priv->display.crtc_disable(&crtc->base);
}
}
@@ -4383,6 +4443,7 @@ skl_update_scaler_users(
int *scaler_id;
struct drm_framebuffer *fb;
struct intel_crtc_scaler_state *scaler_state;
+ unsigned int rotation;
if (!intel_crtc || !crtc_state)
return 0;
@@ -4398,6 +4459,7 @@ skl_update_scaler_users(
dst_w = drm_rect_width(&plane_state->dst);
dst_h = drm_rect_height(&plane_state->dst);
scaler_id = &plane_state->scaler_id;
+ rotation = plane_state->base.rotation;
} else {
struct drm_display_mode *adjusted_mode =
&crtc_state->base.adjusted_mode;
@@ -4406,8 +4468,12 @@ skl_update_scaler_users(
dst_w = adjusted_mode->hdisplay;
dst_h = adjusted_mode->vdisplay;
scaler_id = &scaler_state->scaler_id;
+ rotation = DRM_ROTATE_0;
}
- need_scaling = (src_w != dst_w || src_h != dst_h);
+
+ need_scaling = intel_rotation_90_or_270(rotation) ?
+ (src_h != dst_w || src_w != dst_h):
+ (src_w != dst_w || src_h != dst_h);
/*
* if plane is being disabled or scaler is no more required or force detach
@@ -4561,38 +4627,6 @@ static void intel_enable_sprite_planes(struct drm_crtc *crtc)
}
}
-/*
- * Disable a plane internally without actually modifying the plane's state.
- * This will allow us to easily restore the plane later by just reprogramming
- * its state.
- */
-static void disable_plane_internal(struct drm_plane *plane)
-{
- struct intel_plane *intel_plane = to_intel_plane(plane);
- struct drm_plane_state *state =
- plane->funcs->atomic_duplicate_state(plane);
- struct intel_plane_state *intel_state = to_intel_plane_state(state);
-
- intel_state->visible = false;
- intel_plane->commit_plane(plane, intel_state);
-
- intel_plane_destroy_state(plane, state);
-}
-
-static void intel_disable_sprite_planes(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- enum pipe pipe = to_intel_crtc(crtc)->pipe;
- struct drm_plane *plane;
- struct intel_plane *intel_plane;
-
- drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
- intel_plane = to_intel_plane(plane);
- if (plane->fb && intel_plane->pipe == pipe)
- disable_plane_internal(plane);
- }
-}
-
void hsw_enable_ips(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -4698,9 +4732,9 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
hsw_enable_ips(intel_crtc);
}
-static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
+static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
{
- if (!enable && intel_crtc->overlay) {
+ if (intel_crtc->overlay) {
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4716,17 +4750,38 @@ static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
*/
}
-static void intel_crtc_enable_planes(struct drm_crtc *crtc)
+/**
+ * intel_post_enable_primary - Perform operations after enabling primary plane
+ * @crtc: the CRTC whose primary plane was just enabled
+ *
+ * Performs potentially sleeping operations that must be done after the primary
+ * plane is enabled, such as updating FBC and IPS. Note that this may be
+ * called due to an explicit primary plane update, or due to an implicit
+ * re-enable that is caused when a sprite plane is updated to no longer
+ * completely hide the primary plane.
+ */
+static void
+intel_post_enable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- intel_enable_primary_hw_plane(crtc->primary, crtc);
- intel_enable_sprite_planes(crtc);
- intel_crtc_update_cursor(crtc, true);
- intel_crtc_dpms_overlay(intel_crtc, true);
+ /*
+ * BDW signals flip done immediately if the plane
+ * is disabled, even if the plane enable is already
+ * armed to occur at the next vblank :(
+ */
+ if (IS_BROADWELL(dev))
+ intel_wait_for_vblank(dev, pipe);
+ /*
+ * FIXME IPS should be fine as long as one plane is
+ * enabled, but in practice it seems to have problems
+ * when going from primary only to sprite only and vice
+ * versa.
+ */
hsw_enable_ips(intel_crtc);
mutex_lock(&dev->struct_mutex);
@@ -4734,31 +4789,102 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
mutex_unlock(&dev->struct_mutex);
/*
- * FIXME: Once we grow proper nuclear flip support out of this we need
- * to compute the mask of flip planes precisely. For the time being
- * consider this a flip from a NULL plane.
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So don't enable underrun reporting before at least some planes
+ * are enabled.
+ * FIXME: Need to fix the logic to work when we turn off all planes
+ * but leave the pipe running.
*/
- intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+ if (IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
+
+ /* Underruns don't raise interrupts, so check manually. */
+ if (HAS_GMCH_DISPLAY(dev))
+ i9xx_check_fifo_underruns(dev_priv);
}
-static void intel_crtc_disable_planes(struct drm_crtc *crtc)
+/**
+ * intel_pre_disable_primary - Perform operations before disabling primary plane
+ * @crtc: the CRTC whose primary plane is to be disabled
+ *
+ * Performs potentially sleeping operations that must be done before the
+ * primary plane is disabled, such as updating FBC and IPS. Note that this may
+ * be called due to an explicit primary plane update, or due to an implicit
+ * disable that is caused when a sprite plane completely hides the primary
+ * plane.
+ */
+static void
+intel_pre_disable_primary(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- intel_crtc_wait_for_pending_flips(crtc);
+ /*
+ * Gen2 reports pipe underruns whenever all planes are disabled.
+ * So diasble underrun reporting before all the planes get disabled.
+ * FIXME: Need to fix the logic to work when we turn off all planes
+ * but leave the pipe running.
+ */
+ if (IS_GEN2(dev))
+ intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
+
+ /*
+ * Vblank time updates from the shadow to live plane control register
+ * are blocked if the memory self-refresh mode is active at that
+ * moment. So to make sure the plane gets truly disabled, disable
+ * first the self-refresh mode. The self-refresh enable bit in turn
+ * will be checked/applied by the HW only at the next frame start
+ * event which is after the vblank start event, so we need to have a
+ * wait-for-vblank between disabling the plane and the pipe.
+ */
+ if (HAS_GMCH_DISPLAY(dev))
+ intel_set_memory_cxsr(dev_priv, false);
+ mutex_lock(&dev->struct_mutex);
if (dev_priv->fbc.crtc == intel_crtc)
intel_fbc_disable(dev);
+ mutex_unlock(&dev->struct_mutex);
+ /*
+ * FIXME IPS should be fine as long as one plane is
+ * enabled, but in practice it seems to have problems
+ * when going from primary only to sprite only and vice
+ * versa.
+ */
hsw_disable_ips(intel_crtc);
+}
- intel_crtc_dpms_overlay(intel_crtc, false);
- intel_crtc_update_cursor(crtc, false);
- intel_disable_sprite_planes(crtc);
- intel_disable_primary_hw_plane(crtc->primary, crtc);
+static void intel_crtc_enable_planes(struct drm_crtc *crtc)
+{
+ intel_enable_primary_hw_plane(crtc->primary, crtc);
+ intel_enable_sprite_planes(crtc);
+ intel_crtc_update_cursor(crtc, true);
+
+ intel_post_enable_primary(crtc);
+}
+
+static void intel_crtc_disable_planes(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane;
+ int pipe = intel_crtc->pipe;
+
+ intel_crtc_wait_for_pending_flips(crtc);
+
+ intel_pre_disable_primary(crtc);
+
+ intel_crtc_dpms_overlay_disable(intel_crtc);
+ for_each_intel_plane(dev, intel_plane) {
+ if (intel_plane->pipe == pipe) {
+ struct drm_crtc *from = intel_plane->base.crtc;
+
+ intel_plane->disable_plane(&intel_plane->base,
+ from ?: crtc, true);
+ }
+ }
/*
* FIXME: Once we grow proper nuclear flip support out of this we need
@@ -4837,8 +4963,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
if (HAS_PCH_CPT(dev))
cpt_verify_modeset(dev, intel_crtc->pipe);
-
- intel_crtc_enable_planes(crtc);
}
/* IPS only exists on ULT machines and is tied to pipe A. */
@@ -4962,7 +5086,6 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
/* If we change the relative order between pipe/planes enabling, we need
* to change the workaround. */
haswell_mode_set_planes_workaround(intel_crtc);
- intel_crtc_enable_planes(crtc);
}
static void ironlake_pfit_disable(struct intel_crtc *crtc)
@@ -4992,8 +5115,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
if (!intel_crtc->active)
return;
- intel_crtc_disable_planes(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
@@ -5056,8 +5177,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
if (!intel_crtc->active)
return;
- intel_crtc_disable_planes(crtc);
-
for_each_encoder_on_crtc(dev, crtc, encoder) {
intel_opregion_notify_encoder(encoder, false);
encoder->disable(encoder);
@@ -5398,6 +5517,8 @@ void broxton_init_cdclk(struct drm_device *dev)
broxton_set_cdclk(dev, 624000);
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL);
+
udelay(10);
if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
@@ -5409,6 +5530,8 @@ void broxton_uninit_cdclk(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
+ POSTING_READ(DBUF_CTL);
+
udelay(10);
if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
@@ -5607,16 +5730,21 @@ static int broxton_calc_cdclk(struct drm_i915_private *dev_priv,
return 144000;
}
-/* compute the max pixel clock for new configuration */
-static int intel_mode_max_pixclk(struct drm_atomic_state *state)
+/* Compute the max pixel clock for new configuration. Uses atomic state if
+ * that's non-NULL, look at current state otherwise. */
+static int intel_mode_max_pixclk(struct drm_device *dev,
+ struct drm_atomic_state *state)
{
- struct drm_device *dev = state->dev;
struct intel_crtc *intel_crtc;
struct intel_crtc_state *crtc_state;
int max_pixclk = 0;
for_each_intel_crtc(dev, intel_crtc) {
- crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (state)
+ crtc_state =
+ intel_atomic_get_crtc_state(state, intel_crtc);
+ else
+ crtc_state = intel_crtc->config;
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -5630,13 +5758,13 @@ static int intel_mode_max_pixclk(struct drm_atomic_state *state)
return max_pixclk;
}
-static int valleyview_modeset_global_pipes(struct drm_atomic_state *state,
- unsigned *prepare_pipes)
+static int valleyview_modeset_global_pipes(struct drm_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->dev);
- struct intel_crtc *intel_crtc;
- int max_pixclk = intel_mode_max_pixclk(state);
- int cdclk;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int max_pixclk = intel_mode_max_pixclk(state->dev, state);
+ int cdclk, i;
if (max_pixclk < 0)
return max_pixclk;
@@ -5649,10 +5777,20 @@ static int valleyview_modeset_global_pipes(struct drm_atomic_state *state,
if (cdclk == dev_priv->cdclk_freq)
return 0;
+ /* add all active pipes to the state */
+ for_each_crtc(state->dev, crtc) {
+ if (!crtc->state->enable)
+ continue;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
/* disable/enable all currently active pipes while we change cdclk */
- for_each_intel_crtc(state->dev, intel_crtc)
- if (intel_crtc->base.state->enable)
- *prepare_pipes |= (1 << intel_crtc->pipe);
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ if (crtc_state->enable)
+ crtc_state->mode_changed = true;
return 0;
}
@@ -5693,18 +5831,15 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
}
-static void valleyview_modeset_global_resources(struct drm_atomic_state *state)
+static void valleyview_modeset_global_resources(struct drm_atomic_state *old_state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = old_state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int max_pixclk = intel_mode_max_pixclk(state);
+ int max_pixclk = intel_mode_max_pixclk(dev, NULL);
int req_cdclk;
- /* The only reason this can fail is if we fail to add the crtc_state
- * to the atomic state. But that can't happen since the call to
- * intel_mode_max_pixclk() in valleyview_modeset_global_pipes() (which
- * can't have failed otherwise the mode set would be aborted) added all
- * the states already. */
+ /* The path in intel_mode_max_pixclk() with a NULL atomic state should
+ * never fail. */
if (WARN_ON(max_pixclk < 0))
return;
@@ -5801,11 +5936,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
-
- intel_crtc_enable_planes(crtc);
-
- /* Underruns don't raise interrupts, so check manually. */
- i9xx_check_fifo_underruns(dev_priv);
}
static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -5862,21 +5992,6 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
-
- intel_crtc_enable_planes(crtc);
-
- /*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So don't enable underrun reporting before at least some planes
- * are enabled.
- * FIXME: Need to fix the logic to work when we turn off all planes
- * but leave the pipe running.
- */
- if (IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
-
- /* Underruns don't raise interrupts, so check manually. */
- i9xx_check_fifo_underruns(dev_priv);
}
static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -5906,27 +6021,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
return;
/*
- * Gen2 reports pipe underruns whenever all planes are disabled.
- * So diasble underrun reporting before all the planes get disabled.
- * FIXME: Need to fix the logic to work when we turn off all planes
- * but leave the pipe running.
- */
- if (IS_GEN2(dev))
- intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
-
- /*
- * Vblank time updates from the shadow to live plane control register
- * are blocked if the memory self-refresh mode is active at that
- * moment. So to make sure the plane gets truly disabled, disable
- * first the self-refresh mode. The self-refresh enable bit in turn
- * will be checked/applied by the HW only at the next frame start
- * event which is after the vblank start event, so we need to have a
- * wait-for-vblank between disabling the plane and the pipe.
- */
- intel_set_memory_cxsr(dev_priv, false);
- intel_crtc_disable_planes(crtc);
-
- /*
* On gen2 planes are double buffered but the pipe isn't, so we must
* wait for planes to fully turn off before disabling the pipe.
* We also need to wait on all gmch platforms because of the
@@ -5989,9 +6083,11 @@ void intel_crtc_control(struct drm_crtc *crtc, bool enable)
intel_crtc->enabled_power_domains = domains;
dev_priv->display.crtc_enable(crtc);
+ intel_crtc_enable_planes(crtc);
}
} else {
if (intel_crtc->active) {
+ intel_crtc_disable_planes(crtc);
dev_priv->display.crtc_disable(crtc);
domains = intel_crtc->enabled_power_domains;
@@ -6015,6 +6111,8 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
enable |= intel_encoder->connectors_active;
intel_crtc_control(crtc, enable);
+
+ crtc->state->active = enable;
}
static void intel_crtc_disable(struct drm_crtc *crtc)
@@ -6026,6 +6124,7 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->state->enable);
+ intel_crtc_disable_planes(crtc);
dev_priv->display.crtc_disable(crtc);
dev_priv->display.off(crtc);
@@ -6383,10 +6482,10 @@ static int skylake_get_display_clock_speed(struct drm_device *dev)
return 540000;
linkrate = (I915_READ(DPLL_CTRL1) &
- DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
+ DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
- if (linkrate == DPLL_CRTL1_LINK_RATE_2160 ||
- linkrate == DPLL_CRTL1_LINK_RATE_1080) {
+ if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
+ linkrate == DPLL_CTRL1_LINK_RATE_1080) {
/* vco 8640 */
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
@@ -7374,14 +7473,11 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
struct intel_encoder *encoder;
const intel_limit_t *limit;
struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
int i;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
@@ -8065,16 +8161,13 @@ static int ironlake_get_refclk(struct intel_crtc_state *crtc_state)
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
int num_connectors = 0, i;
bool is_lvds = false;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
@@ -8328,17 +8421,14 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_atomic_state *state = crtc_state->base.state;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
struct intel_encoder *encoder;
uint32_t dpll;
int factor, num_connectors = 0, i;
bool is_lvds = false, is_sdvo = false;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
@@ -9068,11 +9158,11 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
intel_prepare_ddi(dev);
}
-static void broxton_modeset_global_resources(struct drm_atomic_state *state)
+static void broxton_modeset_global_resources(struct drm_atomic_state *old_state)
{
- struct drm_device *dev = state->dev;
+ struct drm_device *dev = old_state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- int max_pixclk = intel_mode_max_pixclk(state);
+ int max_pixclk = intel_mode_max_pixclk(dev, NULL);
int req_cdclk;
/* see the comment in valleyview_modeset_global_resources */
@@ -9625,6 +9715,41 @@ mode_fits_in_fbdev(struct drm_device *dev,
#endif
}
+static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_plane_state *plane_state;
+ int hdisplay, vdisplay;
+ int ret;
+
+ plane_state = drm_atomic_get_plane_state(state, crtc->primary);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+
+ if (mode)
+ drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
+ else
+ hdisplay = vdisplay = 0;
+
+ ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
+ if (ret)
+ return ret;
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = 0;
+ plane_state->crtc_y = 0;
+ plane_state->crtc_w = hdisplay;
+ plane_state->crtc_h = vdisplay;
+ plane_state->src_x = x << 16;
+ plane_state->src_y = y << 16;
+ plane_state->src_w = hdisplay << 16;
+ plane_state->src_h = vdisplay << 16;
+
+ return 0;
+}
+
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
@@ -9641,6 +9766,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_mode_config *config = &dev->mode_config;
struct drm_atomic_state *state = NULL;
struct drm_connector_state *connector_state;
+ struct intel_crtc_state *crtc_state;
int ret, i = -1;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
@@ -9736,6 +9862,14 @@ retry:
connector_state->crtc = crtc;
connector_state->best_encoder = &intel_encoder->base;
+ crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(crtc_state)) {
+ ret = PTR_ERR(crtc_state);
+ goto fail;
+ }
+
+ crtc_state->base.enable = true;
+
if (!mode)
mode = &load_detect_mode;
@@ -9758,7 +9892,13 @@ retry:
goto fail;
}
- if (intel_set_mode(crtc, mode, 0, 0, fb, state)) {
+ ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
+ if (ret)
+ goto fail;
+
+ drm_mode_copy(&crtc_state->base.mode, mode);
+
+ if (intel_set_mode(crtc, state)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
@@ -9773,10 +9913,8 @@ retry:
fail:
intel_crtc->new_enabled = crtc->state->enable;
fail_unlock:
- if (state) {
- drm_atomic_state_free(state);
- state = NULL;
- }
+ drm_atomic_state_free(state);
+ state = NULL;
if (ret == -EDEADLK) {
drm_modeset_backoff(ctx);
@@ -9798,6 +9936,8 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_atomic_state *state;
struct drm_connector_state *connector_state;
+ struct intel_crtc_state *crtc_state;
+ int ret;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, connector->name,
@@ -9814,6 +9954,10 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
if (IS_ERR(connector_state))
goto fail;
+ crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(crtc_state))
+ goto fail;
+
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_crtc->new_enabled = false;
@@ -9821,9 +9965,16 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
connector_state->best_encoder = NULL;
connector_state->crtc = NULL;
- intel_set_mode(crtc, NULL, 0, 0, NULL, state);
+ crtc_state->base.enable = false;
- drm_atomic_state_free(state);
+ ret = intel_modeset_setup_plane_state(state, crtc, NULL, NULL,
+ 0, 0);
+ if (ret)
+ goto fail;
+
+ ret = intel_set_mode(crtc, state);
+ if (ret)
+ goto fail;
if (old->release_fb) {
drm_framebuffer_unregister_private(old->release_fb);
@@ -10107,14 +10258,6 @@ void intel_mark_idle(struct drm_device *dev)
intel_runtime_pm_put(dev_priv);
}
-static void intel_crtc_set_state(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
-{
- kfree(crtc->config);
- crtc->config = crtc_state;
- crtc->base.state = &crtc_state->base;
-}
-
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -10131,7 +10274,6 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
kfree(work);
}
- intel_crtc_set_state(intel_crtc, NULL);
drm_crtc_cleanup(crtc);
kfree(intel_crtc);
@@ -10950,7 +11092,7 @@ out_hang:
return ret;
}
-static struct drm_crtc_helper_funcs intel_helper_funcs = {
+static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
.atomic_begin = intel_begin_crtc_commit,
@@ -11005,31 +11147,41 @@ static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
}
}
-/**
- * intel_modeset_commit_output_state
- *
- * This function copies the stage display pipe configuration to the real one.
+/* Fixup legacy state after an atomic state swap.
*/
-static void intel_modeset_commit_output_state(struct drm_device *dev)
+static void intel_modeset_fixup_state(struct drm_atomic_state *state)
{
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
- for_each_intel_connector(dev, connector) {
- connector->base.encoder = &connector->new_encoder->base;
+ for_each_intel_connector(state->dev, connector) {
+ connector->base.encoder = connector->base.state->best_encoder;
+ if (connector->base.encoder)
+ connector->base.encoder->crtc =
+ connector->base.state->crtc;
}
- for_each_intel_encoder(dev, encoder) {
- encoder->base.crtc = &encoder->new_crtc->base;
+ /* Update crtc of disabled encoders */
+ for_each_intel_encoder(state->dev, encoder) {
+ int num_connectors = 0;
+
+ for_each_intel_connector(state->dev, connector)
+ if (connector->base.encoder == &encoder->base)
+ num_connectors++;
+
+ if (num_connectors == 0)
+ encoder->base.crtc = NULL;
}
- for_each_intel_crtc(dev, crtc) {
- crtc->base.state->enable = crtc->new_enabled;
- crtc->base.enabled = crtc->new_enabled;
+ for_each_intel_crtc(state->dev, crtc) {
+ crtc->base.enabled = crtc->base.state->enable;
+ crtc->config = to_intel_crtc_state(crtc->base.state);
}
- intel_modeset_update_connector_atomic_state(dev);
+ /* Copy the new configuration to the staged state, to keep the few
+ * pieces of code that haven't been converted yet happy */
+ intel_modeset_update_staged_output_state(state->dev);
}
static void
@@ -11064,7 +11216,8 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_atomic_state *state;
- struct intel_connector *connector;
+ struct drm_connector *connector;
+ struct drm_connector_state *connector_state;
int bpp, i;
if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)))
@@ -11080,15 +11233,12 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc,
state = pipe_config->base.state;
/* Clamp display bpp to EDID value */
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector = to_intel_connector(state->connectors[i]);
- if (state->connector_states[i]->crtc != &crtc->base)
+ for_each_connector_in_state(state, connector, connector_state, i) {
+ if (connector_state->crtc != &crtc->base)
continue;
- connected_sink_compute_bpp(connector, pipe_config);
+ connected_sink_compute_bpp(to_intel_connector(connector),
+ pipe_config);
}
return bpp;
@@ -11215,14 +11365,11 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
struct intel_encoder *encoder)
{
struct intel_encoder *source_encoder;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
int i;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
@@ -11239,14 +11386,11 @@ static bool check_encoder_cloning(struct drm_atomic_state *state,
struct intel_crtc *crtc)
{
struct intel_encoder *encoder;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
int i;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != &crtc->base)
continue;
@@ -11262,6 +11406,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct intel_encoder *encoder;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
unsigned int used_ports = 0;
int i;
@@ -11271,11 +11416,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (!connector_state->best_encoder)
continue;
@@ -11311,51 +11452,49 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
{
struct drm_crtc_state tmp_state;
struct intel_crtc_scaler_state scaler_state;
+ struct intel_dpll_hw_state dpll_hw_state;
+ enum intel_dpll_id shared_dpll;
/* Clear only the intel specific part of the crtc state excluding scalers */
tmp_state = crtc_state->base;
scaler_state = crtc_state->scaler_state;
+ shared_dpll = crtc_state->shared_dpll;
+ dpll_hw_state = crtc_state->dpll_hw_state;
+
memset(crtc_state, 0, sizeof *crtc_state);
+
crtc_state->base = tmp_state;
crtc_state->scaler_state = scaler_state;
+ crtc_state->shared_dpll = shared_dpll;
+ crtc_state->dpll_hw_state = dpll_hw_state;
}
-static struct intel_crtc_state *
+static int
intel_modeset_pipe_config(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_atomic_state *state)
+ struct drm_atomic_state *state,
+ struct intel_crtc_state *pipe_config)
{
struct intel_encoder *encoder;
- struct intel_connector *connector;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
- struct intel_crtc_state *pipe_config;
int base_bpp, ret = -EINVAL;
int i;
bool retry = true;
if (!check_encoder_cloning(state, to_intel_crtc(crtc))) {
DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
if (!check_digital_port_conflicts(state)) {
DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
- return ERR_PTR(-EINVAL);
+ return -EINVAL;
}
- pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
- if (IS_ERR(pipe_config))
- return pipe_config;
-
clear_intel_crtc_state(pipe_config);
- pipe_config->base.crtc = crtc;
- drm_mode_copy(&pipe_config->base.adjusted_mode, mode);
- drm_mode_copy(&pipe_config->base.mode, mode);
-
pipe_config->cpu_transcoder =
(enum transcoder) to_intel_crtc(crtc)->pipe;
- pipe_config->shared_dpll = DPLL_ID_PRIVATE;
/*
* Sanitize sync polarity flags based on requested ones. If neither
@@ -11404,12 +11543,7 @@ encoder_retry:
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
- for (i = 0; i < state->num_connector; i++) {
- connector = to_intel_connector(state->connectors[i]);
- if (!connector)
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc)
continue;
@@ -11448,97 +11582,9 @@ encoder_retry:
DRM_DEBUG_KMS("plane bpp: %i, pipe bpp: %i, dithering: %i\n",
base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
- return pipe_config;
+ return 0;
fail:
- return ERR_PTR(ret);
-}
-
-/* Computes which crtcs are affected and sets the relevant bits in the mask. For
- * simplicity we use the crtc's pipe number (because it's easier to obtain). */
-static void
-intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
- unsigned *prepare_pipes, unsigned *disable_pipes)
-{
- struct intel_crtc *intel_crtc;
- struct drm_device *dev = crtc->dev;
- struct intel_encoder *encoder;
- struct intel_connector *connector;
- struct drm_crtc *tmp_crtc;
-
- *disable_pipes = *modeset_pipes = *prepare_pipes = 0;
-
- /* Check which crtcs have changed outputs connected to them, these need
- * to be part of the prepare_pipes mask. We don't (yet) support global
- * modeset across multiple crtcs, so modeset_pipes will only have one
- * bit set at most. */
- for_each_intel_connector(dev, connector) {
- if (connector->base.encoder == &connector->new_encoder->base)
- continue;
-
- if (connector->base.encoder) {
- tmp_crtc = connector->base.encoder->crtc;
-
- *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
- }
-
- if (connector->new_encoder)
- *prepare_pipes |=
- 1 << connector->new_encoder->new_crtc->pipe;
- }
-
- for_each_intel_encoder(dev, encoder) {
- if (encoder->base.crtc == &encoder->new_crtc->base)
- continue;
-
- if (encoder->base.crtc) {
- tmp_crtc = encoder->base.crtc;
-
- *prepare_pipes |= 1 << to_intel_crtc(tmp_crtc)->pipe;
- }
-
- if (encoder->new_crtc)
- *prepare_pipes |= 1 << encoder->new_crtc->pipe;
- }
-
- /* Check for pipes that will be enabled/disabled ... */
- for_each_intel_crtc(dev, intel_crtc) {
- if (intel_crtc->base.state->enable == intel_crtc->new_enabled)
- continue;
-
- if (!intel_crtc->new_enabled)
- *disable_pipes |= 1 << intel_crtc->pipe;
- else
- *prepare_pipes |= 1 << intel_crtc->pipe;
- }
-
-
- /* set_mode is also used to update properties on life display pipes. */
- intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->new_enabled)
- *prepare_pipes |= 1 << intel_crtc->pipe;
-
- /*
- * For simplicity do a full modeset on any pipe where the output routing
- * changed. We could be more clever, but that would require us to be
- * more careful with calling the relevant encoder->mode_set functions.
- */
- if (*prepare_pipes)
- *modeset_pipes = *prepare_pipes;
-
- /* ... and mask these out. */
- *modeset_pipes &= ~(*disable_pipes);
- *prepare_pipes &= ~(*disable_pipes);
-
- /*
- * HACK: We don't (yet) fully support global modesets. intel_set_config
- * obies this rule, but the modeset restore mode of
- * intel_modeset_setup_hw_state does not.
- */
- *modeset_pipes &= 1 << intel_crtc->pipe;
- *prepare_pipes &= 1 << intel_crtc->pipe;
-
- DRM_DEBUG_KMS("set mode pipe masks: modeset: %x, prepare: %x, disable: %x\n",
- *modeset_pipes, *prepare_pipes, *disable_pipes);
+ return ret;
}
static bool intel_crtc_in_use(struct drm_crtc *crtc)
@@ -11553,13 +11599,22 @@ static bool intel_crtc_in_use(struct drm_crtc *crtc)
return false;
}
+static bool
+needs_modeset(struct drm_crtc_state *state)
+{
+ return state->mode_changed || state->active_changed;
+}
+
static void
-intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
+intel_modeset_update_state(struct drm_atomic_state *state)
{
+ struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
- struct intel_crtc *intel_crtc;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
struct drm_connector *connector;
+ int i;
intel_shared_dpll_commit(dev_priv);
@@ -11567,26 +11622,37 @@ intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
if (!intel_encoder->base.crtc)
continue;
- intel_crtc = to_intel_crtc(intel_encoder->base.crtc);
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ if (crtc == intel_encoder->base.crtc)
+ break;
+
+ if (crtc != intel_encoder->base.crtc)
+ continue;
- if (prepare_pipes & (1 << intel_crtc->pipe))
+ if (crtc_state->enable && needs_modeset(crtc_state))
intel_encoder->connectors_active = false;
}
- intel_modeset_commit_output_state(dev);
+ drm_atomic_helper_swap_state(state->dev, state);
+ intel_modeset_fixup_state(state);
/* Double check state. */
- for_each_intel_crtc(dev, intel_crtc) {
- WARN_ON(intel_crtc->base.state->enable != intel_crtc_in_use(&intel_crtc->base));
+ for_each_crtc(dev, crtc) {
+ WARN_ON(crtc->state->enable != intel_crtc_in_use(crtc));
}
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (!connector->encoder || !connector->encoder->crtc)
continue;
- intel_crtc = to_intel_crtc(connector->encoder->crtc);
+ for_each_crtc_in_state(state, crtc, crtc_state, i)
+ if (crtc == connector->encoder->crtc)
+ break;
+
+ if (crtc != connector->encoder->crtc)
+ continue;
- if (prepare_pipes & (1 << intel_crtc->pipe)) {
+ if (crtc->state->enable && needs_modeset(crtc->state)) {
struct drm_property *dpms_property =
dev->mode_config.dpms_property;
@@ -12125,31 +12191,18 @@ static void update_scanline_offset(struct intel_crtc *crtc)
static struct intel_crtc_state *
intel_modeset_compute_config(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_atomic_state *state,
- unsigned *modeset_pipes,
- unsigned *prepare_pipes,
- unsigned *disable_pipes)
+ struct drm_atomic_state *state)
{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc_state *pipe_config = NULL;
- struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *pipe_config;
int ret = 0;
ret = drm_atomic_add_affected_connectors(state, crtc);
if (ret)
return ERR_PTR(ret);
- intel_modeset_affected_pipes(crtc, modeset_pipes,
- prepare_pipes, disable_pipes);
-
- for_each_intel_crtc_masked(dev, *disable_pipes, intel_crtc) {
- pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
- if (IS_ERR(pipe_config))
- return pipe_config;
-
- pipe_config->base.enable = false;
- }
+ ret = drm_atomic_helper_check_modeset(state->dev, state);
+ if (ret)
+ return ERR_PTR(ret);
/*
* Note this needs changes when we start tracking multiple modes
@@ -12157,52 +12210,78 @@ intel_modeset_compute_config(struct drm_crtc *crtc,
* (i.e. one pipe_config for each crtc) rather than just the one
* for this crtc.
*/
- for_each_intel_crtc_masked(dev, *modeset_pipes, intel_crtc) {
- /* FIXME: For now we still expect modeset_pipes has at most
- * one bit set. */
- if (WARN_ON(&intel_crtc->base != crtc))
- continue;
+ pipe_config = intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));
+ if (IS_ERR(pipe_config))
+ return pipe_config;
+
+ if (!pipe_config->base.enable)
+ return pipe_config;
+
+ ret = intel_modeset_pipe_config(crtc, state, pipe_config);
+ if (ret)
+ return ERR_PTR(ret);
- pipe_config = intel_modeset_pipe_config(crtc, mode, state);
- if (IS_ERR(pipe_config))
- return pipe_config;
+ /* Check things that can only be changed through modeset */
+ if (pipe_config->has_audio !=
+ to_intel_crtc(crtc)->config->has_audio)
+ pipe_config->base.mode_changed = true;
- pipe_config->base.enable = true;
+ /*
+ * Note we have an issue here with infoframes: current code
+ * only updates them on the full mode set path per hw
+ * requirements. So here we should be checking for any
+ * required changes and forcing a mode set.
+ */
- intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
- "[modeset]");
- }
+ intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,"[modeset]");
- return intel_atomic_get_crtc_state(state, to_intel_crtc(crtc));;
+ ret = drm_atomic_helper_check_planes(state->dev, state);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return pipe_config;
}
-static int __intel_set_mode_setup_plls(struct drm_atomic_state *state,
- unsigned modeset_pipes,
- unsigned disable_pipes)
+static int __intel_set_mode_setup_plls(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned clear_pipes = modeset_pipes | disable_pipes;
+ unsigned clear_pipes = 0;
struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *intel_crtc_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
int ret = 0;
+ int i;
if (!dev_priv->display.crtc_compute_clock)
return 0;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ intel_crtc = to_intel_crtc(crtc);
+ intel_crtc_state = to_intel_crtc_state(crtc_state);
+
+ if (needs_modeset(crtc_state)) {
+ clear_pipes |= 1 << intel_crtc->pipe;
+ intel_crtc_state->shared_dpll = DPLL_ID_PRIVATE;
+ memset(&intel_crtc_state->dpll_hw_state, 0,
+ sizeof(intel_crtc_state->dpll_hw_state));
+ }
+ }
+
ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
if (ret)
goto done;
- for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_crtc_state(state, intel_crtc);
-
- /* Modeset pipes should have a new state by now */
- if (WARN_ON(IS_ERR(crtc_state)))
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!needs_modeset(crtc_state) || !crtc_state->enable)
continue;
+ intel_crtc = to_intel_crtc(crtc);
+ intel_crtc_state = to_intel_crtc_state(crtc_state);
+
ret = dev_priv->display.crtc_compute_clock(intel_crtc,
- crtc_state);
+ intel_crtc_state);
if (ret) {
intel_shared_dpll_abort_config(dev_priv);
goto done;
@@ -12213,33 +12292,11 @@ done:
return ret;
}
-static int __intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb,
- struct intel_crtc_state *pipe_config,
- unsigned modeset_pipes,
- unsigned prepare_pipes,
- unsigned disable_pipes)
+/* Code that should eventually be part of atomic_check() */
+static int __intel_set_mode_checks(struct drm_atomic_state *state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_display_mode *saved_mode;
- struct drm_atomic_state *state = pipe_config->base.state;
- struct intel_crtc_state *crtc_state_copy = NULL;
- struct intel_crtc *intel_crtc;
- int ret = 0;
-
- saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
- if (!saved_mode)
- return -ENOMEM;
-
- crtc_state_copy = kmalloc(sizeof(*crtc_state_copy), GFP_KERNEL);
- if (!crtc_state_copy) {
- ret = -ENOMEM;
- goto done;
- }
-
- *saved_mode = crtc->mode;
+ struct drm_device *dev = state->dev;
+ int ret;
/*
* See if the config requires any additional preparation, e.g.
@@ -12249,24 +12306,48 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* adjusted_mode bits in the crtc directly.
*/
if (IS_VALLEYVIEW(dev) || IS_BROXTON(dev)) {
- ret = valleyview_modeset_global_pipes(state, &prepare_pipes);
+ ret = valleyview_modeset_global_pipes(state);
if (ret)
- goto done;
-
- /* may have added more to prepare_pipes than we should */
- prepare_pipes &= ~disable_pipes;
+ return ret;
}
- ret = __intel_set_mode_setup_plls(state, modeset_pipes, disable_pipes);
+ ret = __intel_set_mode_setup_plls(state);
if (ret)
- goto done;
+ return ret;
+
+ return 0;
+}
+
+static int __intel_set_mode(struct drm_crtc *modeset_crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = modeset_crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_atomic_state *state = pipe_config->base.state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int ret = 0;
+ int i;
+
+ ret = __intel_set_mode_checks(state);
+ if (ret < 0)
+ return ret;
+
+ ret = drm_atomic_helper_prepare_planes(dev, state);
+ if (ret)
+ return ret;
- for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
- intel_crtc_disable(&intel_crtc->base);
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!needs_modeset(crtc_state))
+ continue;
- for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
- if (intel_crtc->base.state->enable)
- dev_priv->display.crtc_disable(&intel_crtc->base);
+ if (!crtc_state->enable) {
+ crtc_state->active = false;
+ intel_crtc_disable(crtc);
+ } else if (crtc->state->enable) {
+ intel_crtc_disable_planes(crtc);
+ dev_priv->display.crtc_disable(crtc);
+ }
}
/* crtc->mode is already used by the ->mode_set callbacks, hence we need
@@ -12276,80 +12357,55 @@ static int __intel_set_mode(struct drm_crtc *crtc,
* pipes; here we assume a single modeset_pipe and only track the
* single crtc and mode.
*/
- if (modeset_pipes) {
- crtc->mode = *mode;
- /* mode_set/enable/disable functions rely on a correct pipe
- * config. */
- intel_crtc_set_state(to_intel_crtc(crtc), pipe_config);
+ if (pipe_config->base.enable && needs_modeset(&pipe_config->base)) {
+ modeset_crtc->mode = pipe_config->base.mode;
/*
* Calculate and store various constants which
* are later needed by vblank and swap-completion
* timestamping. They are derived from true hwmode.
*/
- drm_calc_timestamping_constants(crtc,
+ drm_calc_timestamping_constants(modeset_crtc,
&pipe_config->base.adjusted_mode);
}
/* Only after disabling all output pipelines that will be changed can we
* update the the output configuration. */
- intel_modeset_update_state(dev, prepare_pipes);
+ intel_modeset_update_state(state);
- modeset_update_crtc_power_domains(state);
+ /* The state has been swaped above, so state actually contains the
+ * old state now. */
- for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
- struct drm_plane *primary = intel_crtc->base.primary;
- int vdisplay, hdisplay;
+ modeset_update_crtc_power_domains(state);
- drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
- ret = drm_plane_helper_update(primary, &intel_crtc->base,
- fb, 0, 0,
- hdisplay, vdisplay,
- x << 16, y << 16,
- hdisplay << 16, vdisplay << 16);
- }
+ drm_atomic_helper_commit_planes(dev, state);
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc) {
- update_scanline_offset(intel_crtc);
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (!needs_modeset(crtc->state) || !crtc->state->enable)
+ continue;
- dev_priv->display.crtc_enable(&intel_crtc->base);
+ update_scanline_offset(to_intel_crtc(crtc));
+
+ dev_priv->display.crtc_enable(crtc);
+ intel_crtc_enable_planes(crtc);
}
/* FIXME: add subpixel order */
-done:
- if (ret && crtc->state->enable)
- crtc->mode = *saved_mode;
- if (ret == 0 && pipe_config) {
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ drm_atomic_helper_cleanup_planes(dev, state);
- /* The pipe_config will be freed with the atomic state, so
- * make a copy. */
- memcpy(crtc_state_copy, intel_crtc->config,
- sizeof *crtc_state_copy);
- intel_crtc->config = crtc_state_copy;
- intel_crtc->base.state = &crtc_state_copy->base;
- } else {
- kfree(crtc_state_copy);
- }
+ drm_atomic_state_free(state);
- kfree(saved_mode);
- return ret;
+ return 0;
}
-static int intel_set_mode_pipes(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb,
- struct intel_crtc_state *pipe_config,
- unsigned modeset_pipes,
- unsigned prepare_pipes,
- unsigned disable_pipes)
+static int intel_set_mode_with_config(struct drm_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
{
int ret;
- ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
- prepare_pipes, disable_pipes);
+ ret = __intel_set_mode(crtc, pipe_config);
if (ret == 0)
intel_modeset_check_state(crtc->dev);
@@ -12358,27 +12414,18 @@ static int intel_set_mode_pipes(struct drm_crtc *crtc,
}
static int intel_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y, struct drm_framebuffer *fb,
struct drm_atomic_state *state)
{
struct intel_crtc_state *pipe_config;
- unsigned modeset_pipes, prepare_pipes, disable_pipes;
int ret = 0;
- pipe_config = intel_modeset_compute_config(crtc, mode, state,
- &modeset_pipes,
- &prepare_pipes,
- &disable_pipes);
-
+ pipe_config = intel_modeset_compute_config(crtc, state);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
goto out;
}
- ret = intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
- modeset_pipes, prepare_pipes,
- disable_pipes);
+ ret = intel_set_mode_with_config(crtc, pipe_config);
if (ret)
goto out;
@@ -12390,9 +12437,12 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_atomic_state *state;
+ struct intel_crtc *intel_crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_state *connector_state;
+ struct intel_crtc_state *crtc_state;
+ int ret;
state = drm_atomic_state_alloc(dev);
if (!state) {
@@ -12430,181 +12480,57 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
}
}
- intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb,
- state);
-
- drm_atomic_state_free(state);
-}
-
-#undef for_each_intel_crtc_masked
-
-static void intel_set_config_free(struct intel_set_config *config)
-{
- if (!config)
- return;
-
- kfree(config->save_connector_encoders);
- kfree(config->save_encoder_crtcs);
- kfree(config->save_crtc_enabled);
- kfree(config);
-}
-
-static int intel_set_config_save_state(struct drm_device *dev,
- struct intel_set_config *config)
-{
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int count;
-
- config->save_crtc_enabled =
- kcalloc(dev->mode_config.num_crtc,
- sizeof(bool), GFP_KERNEL);
- if (!config->save_crtc_enabled)
- return -ENOMEM;
-
- config->save_encoder_crtcs =
- kcalloc(dev->mode_config.num_encoder,
- sizeof(struct drm_crtc *), GFP_KERNEL);
- if (!config->save_encoder_crtcs)
- return -ENOMEM;
+ for_each_intel_crtc(dev, intel_crtc) {
+ if (intel_crtc->new_enabled == intel_crtc->base.enabled)
+ continue;
- config->save_connector_encoders =
- kcalloc(dev->mode_config.num_connector,
- sizeof(struct drm_encoder *), GFP_KERNEL);
- if (!config->save_connector_encoders)
- return -ENOMEM;
+ crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
+ if (IS_ERR(crtc_state)) {
+ DRM_DEBUG_KMS("Failed to add [CRTC:%d] to state: %ld\n",
+ intel_crtc->base.base.id,
+ PTR_ERR(crtc_state));
+ continue;
+ }
- /* Copy data. Note that driver private data is not affected.
- * Should anything bad happen only the expected state is
- * restored, not the drivers personal bookkeeping.
- */
- count = 0;
- for_each_crtc(dev, crtc) {
- config->save_crtc_enabled[count++] = crtc->state->enable;
- }
+ crtc_state->base.enable = intel_crtc->new_enabled;
- count = 0;
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- config->save_encoder_crtcs[count++] = encoder->crtc;
+ if (&intel_crtc->base == crtc)
+ drm_mode_copy(&crtc_state->base.mode, &crtc->mode);
}
- count = 0;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- config->save_connector_encoders[count++] = connector->encoder;
- }
+ intel_modeset_setup_plane_state(state, crtc, &crtc->mode,
+ crtc->primary->fb, crtc->x, crtc->y);
- return 0;
+ ret = intel_set_mode(crtc, state);
+ if (ret)
+ drm_atomic_state_free(state);
}
-static void intel_set_config_restore_state(struct drm_device *dev,
- struct intel_set_config *config)
-{
- struct intel_crtc *crtc;
- struct intel_encoder *encoder;
- struct intel_connector *connector;
- int count;
-
- count = 0;
- for_each_intel_crtc(dev, crtc) {
- crtc->new_enabled = config->save_crtc_enabled[count++];
- }
-
- count = 0;
- for_each_intel_encoder(dev, encoder) {
- encoder->new_crtc =
- to_intel_crtc(config->save_encoder_crtcs[count++]);
- }
-
- count = 0;
- for_each_intel_connector(dev, connector) {
- connector->new_encoder =
- to_intel_encoder(config->save_connector_encoders[count++]);
- }
-}
+#undef for_each_intel_crtc_masked
-static bool
-is_crtc_connector_off(struct drm_mode_set *set)
+static bool intel_connector_in_mode_set(struct intel_connector *connector,
+ struct drm_mode_set *set)
{
- int i;
-
- if (set->num_connectors == 0)
- return false;
-
- if (WARN_ON(set->connectors == NULL))
- return false;
+ int ro;
- for (i = 0; i < set->num_connectors; i++)
- if (set->connectors[i]->encoder &&
- set->connectors[i]->encoder->crtc == set->crtc &&
- set->connectors[i]->dpms != DRM_MODE_DPMS_ON)
+ for (ro = 0; ro < set->num_connectors; ro++)
+ if (set->connectors[ro] == &connector->base)
return true;
return false;
}
-static void
-intel_set_config_compute_mode_changes(struct drm_mode_set *set,
- struct intel_set_config *config)
-{
-
- /* We should be able to check here if the fb has the same properties
- * and then just flip_or_move it */
- if (is_crtc_connector_off(set)) {
- config->mode_changed = true;
- } else if (set->crtc->primary->fb != set->fb) {
- /*
- * If we have no fb, we can only flip as long as the crtc is
- * active, otherwise we need a full mode set. The crtc may
- * be active if we've only disabled the primary plane, or
- * in fastboot situations.
- */
- if (set->crtc->primary->fb == NULL) {
- struct intel_crtc *intel_crtc =
- to_intel_crtc(set->crtc);
-
- if (intel_crtc->active) {
- DRM_DEBUG_KMS("crtc has no fb, will flip\n");
- config->fb_changed = true;
- } else {
- DRM_DEBUG_KMS("inactive crtc, full mode set\n");
- config->mode_changed = true;
- }
- } else if (set->fb == NULL) {
- config->mode_changed = true;
- } else if (set->fb->pixel_format !=
- set->crtc->primary->fb->pixel_format) {
- config->mode_changed = true;
- } else {
- config->fb_changed = true;
- }
- }
-
- if (set->fb && (set->x != set->crtc->x || set->y != set->crtc->y))
- config->fb_changed = true;
-
- if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) {
- DRM_DEBUG_KMS("modes are different, full mode set\n");
- drm_mode_debug_printmodeline(&set->crtc->mode);
- drm_mode_debug_printmodeline(set->mode);
- config->mode_changed = true;
- }
-
- DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
- set->crtc->base.id, config->mode_changed, config->fb_changed);
-}
-
static int
intel_modeset_stage_output_state(struct drm_device *dev,
struct drm_mode_set *set,
- struct intel_set_config *config,
struct drm_atomic_state *state)
{
struct intel_connector *connector;
+ struct drm_connector *drm_connector;
struct drm_connector_state *connector_state;
- struct intel_encoder *encoder;
- struct intel_crtc *crtc;
- int ro;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i, ret;
/* The upper layers ensure that we either disable a crtc or have a list
* of connectors. For paranoia, double-check this. */
@@ -12612,164 +12538,124 @@ intel_modeset_stage_output_state(struct drm_device *dev,
WARN_ON(set->fb && (set->num_connectors == 0));
for_each_intel_connector(dev, connector) {
- /* Otherwise traverse passed in connector list and get encoders
- * for them. */
- for (ro = 0; ro < set->num_connectors; ro++) {
- if (set->connectors[ro] == &connector->base) {
- connector->new_encoder = intel_find_encoder(connector, to_intel_crtc(set->crtc)->pipe);
- break;
- }
+ bool in_mode_set = intel_connector_in_mode_set(connector, set);
+
+ if (!in_mode_set && connector->base.state->crtc != set->crtc)
+ continue;
+
+ connector_state =
+ drm_atomic_get_connector_state(state, &connector->base);
+ if (IS_ERR(connector_state))
+ return PTR_ERR(connector_state);
+
+ if (in_mode_set) {
+ int pipe = to_intel_crtc(set->crtc)->pipe;
+ connector_state->best_encoder =
+ &intel_find_encoder(connector, pipe)->base;
}
+ if (connector->base.state->crtc != set->crtc)
+ continue;
+
/* If we disable the crtc, disable all its connectors. Also, if
* the connector is on the changing crtc but not on the new
* connector list, disable it. */
- if ((!set->fb || ro == set->num_connectors) &&
- connector->base.encoder &&
- connector->base.encoder->crtc == set->crtc) {
- connector->new_encoder = NULL;
+ if (!set->fb || !in_mode_set) {
+ connector_state->best_encoder = NULL;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
connector->base.base.id,
connector->base.name);
}
-
-
- if (&connector->new_encoder->base != connector->base.encoder) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] encoder changed, full mode switch\n",
- connector->base.base.id,
- connector->base.name);
- config->mode_changed = true;
- }
}
/* connector->new_encoder is now updated for all connectors. */
- /* Update crtc of enabled connectors. */
- for_each_intel_connector(dev, connector) {
- struct drm_crtc *new_crtc;
+ for_each_connector_in_state(state, drm_connector, connector_state, i) {
+ connector = to_intel_connector(drm_connector);
+
+ if (!connector_state->best_encoder) {
+ ret = drm_atomic_set_crtc_for_connector(connector_state,
+ NULL);
+ if (ret)
+ return ret;
- if (!connector->new_encoder)
continue;
+ }
+
+ if (intel_connector_in_mode_set(connector, set)) {
+ struct drm_crtc *crtc = connector->base.state->crtc;
- new_crtc = connector->new_encoder->base.crtc;
+ /* If this connector was in a previous crtc, add it
+ * to the state. We might need to disable it. */
+ if (crtc) {
+ crtc_state =
+ drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
- for (ro = 0; ro < set->num_connectors; ro++) {
- if (set->connectors[ro] == &connector->base)
- new_crtc = set->crtc;
+ ret = drm_atomic_set_crtc_for_connector(connector_state,
+ set->crtc);
+ if (ret)
+ return ret;
}
/* Make sure the new CRTC will work with the encoder */
- if (!drm_encoder_crtc_ok(&connector->new_encoder->base,
- new_crtc)) {
+ if (!drm_encoder_crtc_ok(connector_state->best_encoder,
+ connector_state->crtc)) {
return -EINVAL;
}
- connector->new_encoder->new_crtc = to_intel_crtc(new_crtc);
-
- connector_state =
- drm_atomic_get_connector_state(state, &connector->base);
- if (IS_ERR(connector_state))
- return PTR_ERR(connector_state);
-
- connector_state->crtc = new_crtc;
- connector_state->best_encoder = &connector->new_encoder->base;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
connector->base.base.id,
connector->base.name,
- new_crtc->base.id);
- }
+ connector_state->crtc->base.id);
- /* Check for any encoders that needs to be disabled. */
- for_each_intel_encoder(dev, encoder) {
- int num_connectors = 0;
- for_each_intel_connector(dev, connector) {
- if (connector->new_encoder == encoder) {
- WARN_ON(!connector->new_encoder->new_crtc);
- num_connectors++;
- }
- }
+ if (connector_state->best_encoder != &connector->encoder->base)
+ connector->encoder =
+ to_intel_encoder(connector_state->best_encoder);
+ }
- if (num_connectors == 0)
- encoder->new_crtc = NULL;
- else if (num_connectors > 1)
- return -EINVAL;
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ ret = drm_atomic_add_affected_connectors(state, crtc);
+ if (ret)
+ return ret;
- /* Only now check for crtc changes so we don't miss encoders
- * that will be disabled. */
- if (&encoder->new_crtc->base != encoder->base.crtc) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] crtc changed, full mode switch\n",
- encoder->base.base.id,
- encoder->base.name);
- config->mode_changed = true;
- }
+ crtc_state->enable = drm_atomic_connectors_for_crtc(state, crtc);
}
- /* Now we've also updated encoder->new_crtc for all encoders. */
- for_each_intel_connector(dev, connector) {
- connector_state =
- drm_atomic_get_connector_state(state, &connector->base);
- if (IS_ERR(connector_state))
- return PTR_ERR(connector_state);
- if (connector->new_encoder) {
- if (connector->new_encoder != connector->encoder)
- connector->encoder = connector->new_encoder;
- } else {
- connector_state->crtc = NULL;
- connector_state->best_encoder = NULL;
- }
- }
- for_each_intel_crtc(dev, crtc) {
- crtc->new_enabled = false;
+ ret = intel_modeset_setup_plane_state(state, set->crtc, set->mode,
+ set->fb, set->x, set->y);
+ if (ret)
+ return ret;
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc == crtc) {
- crtc->new_enabled = true;
- break;
- }
- }
+ crtc_state = drm_atomic_get_crtc_state(state, set->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
- if (crtc->new_enabled != crtc->base.state->enable) {
- DRM_DEBUG_KMS("[CRTC:%d] %sabled, full mode switch\n",
- crtc->base.base.id,
- crtc->new_enabled ? "en" : "dis");
- config->mode_changed = true;
- }
- }
+ if (set->mode)
+ drm_mode_copy(&crtc_state->mode, set->mode);
+
+ if (set->num_connectors)
+ crtc_state->active = true;
return 0;
}
-static void disable_crtc_nofb(struct intel_crtc *crtc)
+static bool primary_plane_visible(struct drm_crtc *crtc)
{
- struct drm_device *dev = crtc->base.dev;
- struct intel_encoder *encoder;
- struct intel_connector *connector;
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(crtc->primary->state);
- DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n",
- pipe_name(crtc->pipe));
-
- for_each_intel_connector(dev, connector) {
- if (connector->new_encoder &&
- connector->new_encoder->new_crtc == crtc)
- connector->new_encoder = NULL;
- }
-
- for_each_intel_encoder(dev, encoder) {
- if (encoder->new_crtc == crtc)
- encoder->new_crtc = NULL;
- }
-
- crtc->new_enabled = false;
+ return plane_state->visible;
}
static int intel_crtc_set_config(struct drm_mode_set *set)
{
struct drm_device *dev;
- struct drm_mode_set save_set;
struct drm_atomic_state *state = NULL;
- struct intel_set_config *config;
struct intel_crtc_state *pipe_config;
- unsigned modeset_pipes, prepare_pipes, disable_pipes;
+ bool primary_plane_was_visible;
int ret;
BUG_ON(!set);
@@ -12790,85 +12676,42 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
dev = set->crtc->dev;
- ret = -ENOMEM;
- config = kzalloc(sizeof(*config), GFP_KERNEL);
- if (!config)
- goto out_config;
-
- ret = intel_set_config_save_state(dev, config);
- if (ret)
- goto out_config;
-
- save_set.crtc = set->crtc;
- save_set.mode = &set->crtc->mode;
- save_set.x = set->crtc->x;
- save_set.y = set->crtc->y;
- save_set.fb = set->crtc->primary->fb;
-
- /* Compute whether we need a full modeset, only an fb base update or no
- * change at all. In the future we might also check whether only the
- * mode changed, e.g. for LVDS where we only change the panel fitter in
- * such cases. */
- intel_set_config_compute_mode_changes(set, config);
-
state = drm_atomic_state_alloc(dev);
- if (!state) {
- ret = -ENOMEM;
- goto out_config;
- }
+ if (!state)
+ return -ENOMEM;
state->acquire_ctx = dev->mode_config.acquire_ctx;
- ret = intel_modeset_stage_output_state(dev, set, config, state);
+ ret = intel_modeset_stage_output_state(dev, set, state);
if (ret)
- goto fail;
+ goto out;
- pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
- state,
- &modeset_pipes,
- &prepare_pipes,
- &disable_pipes);
+ pipe_config = intel_modeset_compute_config(set->crtc, state);
if (IS_ERR(pipe_config)) {
ret = PTR_ERR(pipe_config);
- goto fail;
- } else if (pipe_config) {
- if (pipe_config->has_audio !=
- to_intel_crtc(set->crtc)->config->has_audio)
- config->mode_changed = true;
-
- /*
- * Note we have an issue here with infoframes: current code
- * only updates them on the full mode set path per hw
- * requirements. So here we should be checking for any
- * required changes and forcing a mode set.
- */
+ goto out;
}
intel_update_pipe_size(to_intel_crtc(set->crtc));
- if (config->mode_changed) {
- ret = intel_set_mode_pipes(set->crtc, set->mode,
- set->x, set->y, set->fb, pipe_config,
- modeset_pipes, prepare_pipes,
- disable_pipes);
- } else if (config->fb_changed) {
- struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
- struct drm_plane *primary = set->crtc->primary;
- int vdisplay, hdisplay;
+ primary_plane_was_visible = primary_plane_visible(set->crtc);
- drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
- ret = drm_plane_helper_update(primary, set->crtc, set->fb,
- 0, 0, hdisplay, vdisplay,
- set->x << 16, set->y << 16,
- hdisplay << 16, vdisplay << 16);
+ ret = intel_set_mode_with_config(set->crtc, pipe_config);
+
+ if (ret == 0 &&
+ pipe_config->base.enable &&
+ pipe_config->base.planes_changed &&
+ !needs_modeset(&pipe_config->base)) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
/*
* We need to make sure the primary plane is re-enabled if it
* has previously been turned off.
*/
- if (!intel_crtc->primary_enabled && ret == 0) {
+ if (ret == 0 && !primary_plane_was_visible &&
+ primary_plane_visible(set->crtc)) {
WARN_ON(!intel_crtc->active);
- intel_enable_primary_hw_plane(set->crtc->primary, set->crtc);
+ intel_post_enable_primary(set->crtc);
}
/*
@@ -12886,33 +12729,11 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
if (ret) {
DRM_DEBUG_KMS("failed to set mode on [CRTC:%d], err = %d\n",
set->crtc->base.id, ret);
-fail:
- intel_set_config_restore_state(dev, config);
-
- drm_atomic_state_clear(state);
-
- /*
- * HACK: if the pipe was on, but we didn't have a framebuffer,
- * force the pipe off to avoid oopsing in the modeset code
- * due to fb==NULL. This should only happen during boot since
- * we don't yet reconstruct the FB from the hardware state.
- */
- if (to_intel_crtc(save_set.crtc)->new_enabled && !save_set.fb)
- disable_crtc_nofb(to_intel_crtc(save_set.crtc));
-
- /* Try to restore the config */
- if (config->mode_changed &&
- intel_set_mode(save_set.crtc, save_set.mode,
- save_set.x, save_set.y, save_set.fb,
- state))
- DRM_ERROR("failed to restore config after modeset failure\n");
}
-out_config:
- if (state)
+out:
+ if (ret)
drm_atomic_state_free(state);
-
- intel_set_config_free(config);
return ret;
}
@@ -13133,6 +12954,36 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
}
}
+int
+skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
+{
+ int max_scale;
+ struct drm_device *dev;
+ struct drm_i915_private *dev_priv;
+ int crtc_clock, cdclk;
+
+ if (!intel_crtc || !crtc_state)
+ return DRM_PLANE_HELPER_NO_SCALING;
+
+ dev = intel_crtc->base.dev;
+ dev_priv = dev->dev_private;
+ crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
+ cdclk = dev_priv->display.get_display_clock_speed(dev);
+
+ if (!crtc_clock || !cdclk)
+ return DRM_PLANE_HELPER_NO_SCALING;
+
+ /*
+ * skl max scale is lower of:
+ * close to 3 but not 3, -1 is for that purpose
+ * or
+ * cdclk/crtc_clock
+ */
+ max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
+
+ return max_scale;
+}
+
static int
intel_check_primary_plane(struct drm_plane *plane,
struct intel_plane_state *state)
@@ -13141,29 +12992,40 @@ intel_check_primary_plane(struct drm_plane *plane,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = state->base.crtc;
struct intel_crtc *intel_crtc;
+ struct intel_crtc_state *crtc_state;
struct drm_framebuffer *fb = state->base.fb;
struct drm_rect *dest = &state->dst;
struct drm_rect *src = &state->src;
const struct drm_rect *clip = &state->clip;
bool can_position = false;
+ int max_scale = DRM_PLANE_HELPER_NO_SCALING;
+ int min_scale = DRM_PLANE_HELPER_NO_SCALING;
int ret;
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
+ crtc_state = state->base.state ?
+ intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
- if (INTEL_INFO(dev)->gen >= 9)
+ if (INTEL_INFO(dev)->gen >= 9) {
+ min_scale = 1;
+ max_scale = skl_max_scale(intel_crtc, crtc_state);
can_position = true;
+ }
ret = drm_plane_helper_check_update(plane, crtc, fb,
src, dest, clip,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
+ min_scale,
+ max_scale,
can_position, true,
&state->visible);
if (ret)
return ret;
if (intel_crtc->active) {
+ struct intel_plane_state *old_state =
+ to_intel_plane_state(plane->state);
+
intel_crtc->atomic.wait_for_flips = true;
/*
@@ -13176,20 +13038,20 @@ intel_check_primary_plane(struct drm_plane *plane,
* one is done too late. We eventually need to unify
* this.
*/
- if (intel_crtc->primary_enabled &&
+ if (state->visible &&
INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
dev_priv->fbc.crtc == intel_crtc &&
state->base.rotation != BIT(DRM_ROTATE_0)) {
intel_crtc->atomic.disable_fbc = true;
}
- if (state->visible) {
+ if (state->visible && !old_state->visible) {
/*
* BDW signals flip done immediately if the plane
* is disabled, even if the plane enable is already
* armed to occur at the next vblank :(
*/
- if (IS_BROADWELL(dev) && !intel_crtc->primary_enabled)
+ if (IS_BROADWELL(dev))
intel_crtc->atomic.wait_vblank = true;
}
@@ -13202,6 +13064,13 @@ intel_check_primary_plane(struct drm_plane *plane,
intel_crtc->atomic.update_wm = true;
}
+ if (INTEL_INFO(dev)->gen >= 9) {
+ ret = skl_update_scaler_users(intel_crtc, crtc_state,
+ to_intel_plane(plane), state, 0);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -13224,27 +13093,26 @@ intel_commit_primary_plane(struct drm_plane *plane,
crtc->y = src->y1 >> 16;
if (intel_crtc->active) {
- if (state->visible) {
+ if (state->visible)
/* FIXME: kill this fastboot hack */
intel_update_pipe_size(intel_crtc);
- intel_crtc->primary_enabled = true;
-
- dev_priv->display.update_primary_plane(crtc, plane->fb,
- crtc->x, crtc->y);
- } else {
- /*
- * If clipping results in a non-visible primary plane,
- * we'll disable the primary plane. Note that this is
- * a bit different than what happens if userspace
- * explicitly disables the plane by passing fb=0
- * because plane->fb still gets set and pinned.
- */
- intel_disable_primary_hw_plane(plane, crtc);
- }
+ dev_priv->display.update_primary_plane(crtc, plane->fb,
+ crtc->x, crtc->y);
}
}
+static void
+intel_disable_primary_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ bool force)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ dev_priv->display.update_primary_plane(crtc, NULL, 0, 0);
+}
+
static void intel_begin_crtc_commit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@@ -13381,11 +13249,15 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
primary->can_scale = false;
primary->max_downscale = 1;
+ if (INTEL_INFO(dev)->gen >= 9) {
+ primary->can_scale = true;
+ }
state->scaler_id = -1;
primary->pipe = pipe;
primary->plane = pipe;
primary->check_plane = intel_check_primary_plane;
primary->commit_plane = intel_commit_primary_plane;
+ primary->disable_plane = intel_disable_primary_plane;
primary->ckey.flags = I915_SET_COLORKEY_NONE;
if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
primary->plane = !pipe;
@@ -13491,6 +13363,22 @@ finish:
}
static void
+intel_disable_cursor_plane(struct drm_plane *plane,
+ struct drm_crtc *crtc,
+ bool force)
+{
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+
+ if (!force) {
+ plane->fb = NULL;
+ intel_crtc->cursor_bo = NULL;
+ intel_crtc->cursor_addr = 0;
+ }
+
+ intel_crtc_update_cursor(crtc, false);
+}
+
+static void
intel_commit_cursor_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
@@ -13549,6 +13437,7 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
state->scaler_id = -1;
cursor->check_plane = intel_check_cursor_plane;
cursor->commit_plane = intel_commit_cursor_plane;
+ cursor->disable_plane = intel_disable_cursor_plane;
drm_universal_plane_init(dev, &cursor->base, 0,
&intel_plane_funcs,
@@ -13607,7 +13496,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
if (!crtc_state)
goto fail;
- intel_crtc_set_state(intel_crtc, crtc_state);
+ intel_crtc->config = crtc_state;
+ intel_crtc->base.state = &crtc_state->base;
crtc_state->base.crtc = &intel_crtc->base;
/* initialize shared scalers */
@@ -14684,8 +14574,9 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
* Temporarily change the plane mapping and disable everything
* ... */
plane = crtc->plane;
+ to_intel_plane_state(crtc->base.primary->state)->visible = true;
crtc->plane = !plane;
- crtc->primary_enabled = true;
+ intel_crtc_disable_planes(&crtc->base);
dev_priv->display.crtc_disable(&crtc->base);
crtc->plane = plane;
@@ -14862,6 +14753,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
int i;
for_each_intel_crtc(dev, crtc) {
+ struct drm_plane *primary = crtc->base.primary;
+ struct intel_plane_state *plane_state;
+
memset(crtc->config, 0, sizeof(*crtc->config));
crtc->config->quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
@@ -14871,7 +14765,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->base.state->enable = crtc->active;
crtc->base.enabled = crtc->active;
- crtc->primary_enabled = primary_get_hw_state(crtc);
+
+ plane_state = to_intel_plane_state(primary->state);
+ plane_state->visible = primary_get_hw_state(crtc);
DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
crtc->base.base.id,
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 937ba31d8dde..82dcc6ed8b1d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -41,6 +41,12 @@
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
+/* Compliance test status bits */
+#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
+#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
+
struct dp_link_dpll {
int link_bw;
struct dpll dpll;
@@ -84,8 +90,8 @@ static const struct dp_link_dpll chv_dpll[] = {
{ DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */
{ .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
};
-/* Skylake supports following rates */
-static const int gen9_rates[] = { 162000, 216000, 270000,
+
+static const int skl_rates[] = { 162000, 216000, 270000,
324000, 432000, 540000 };
static const int chv_rates[] = { 162000, 202500, 210000, 216000,
243000, 270000, 324000, 405000,
@@ -1098,30 +1104,30 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock)
ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
switch (link_clock / 2) {
case 81000:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
SKL_DPLL0);
break;
case 135000:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
SKL_DPLL0);
break;
case 270000:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
SKL_DPLL0);
break;
case 162000:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620,
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
SKL_DPLL0);
break;
/* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
results in CDCLK change. Need to handle the change of CDCLK by
disabling pipes and re-enabling them */
case 108000:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080,
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
SKL_DPLL0);
break;
case 216000:
- ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160,
+ ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
SKL_DPLL0);
break;
@@ -1161,9 +1167,9 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{
- if (INTEL_INFO(dev)->gen >= 9) {
- *source_rates = gen9_rates;
- return ARRAY_SIZE(gen9_rates);
+ if (IS_SKYLAKE(dev)) {
+ *source_rates = skl_rates;
+ return ARRAY_SIZE(skl_rates);
} else if (IS_CHERRYVIEW(dev)) {
*source_rates = chv_rates;
return ARRAY_SIZE(chv_rates);
@@ -2491,6 +2497,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
uint32_t dp_reg = I915_READ(intel_dp->output_reg);
+ unsigned int lane_mask = 0x0;
if (WARN_ON(dp_reg & DP_PORT_EN))
return;
@@ -2509,7 +2516,8 @@ static void intel_enable_dp(struct intel_encoder *encoder)
pps_unlock(intel_dp);
if (IS_VALLEYVIEW(dev))
- vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
+ vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
+ lane_mask);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
@@ -2726,7 +2734,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
to_intel_crtc(encoder->base.crtc);
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
- int data, i;
+ int data, i, stagger;
u32 val;
mutex_lock(&dev_priv->dpio_lock);
@@ -2766,7 +2774,38 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
}
/* Data lane stagger programming */
- /* FIXME: Fix up value only after power analysis */
+ if (intel_crtc->config->port_clock > 270000)
+ stagger = 0x18;
+ else if (intel_crtc->config->port_clock > 135000)
+ stagger = 0xd;
+ else if (intel_crtc->config->port_clock > 67500)
+ stagger = 0x7;
+ else if (intel_crtc->config->port_clock > 33750)
+ stagger = 0x4;
+ else
+ stagger = 0x2;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(6) |
+ DPIO_TX2_STAGGER_MULT(0));
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(7) |
+ DPIO_TX2_STAGGER_MULT(5));
mutex_unlock(&dev_priv->dpio_lock);
@@ -2894,7 +2933,7 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
if (IS_BROXTON(dev))
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
else if (INTEL_INFO(dev)->gen >= 9) {
- if (dev_priv->vbt.edp_low_vswing && port == PORT_A)
+ if (dev_priv->edp_low_vswing && port == PORT_A)
return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
} else if (IS_VALLEYVIEW(dev))
@@ -3547,7 +3586,8 @@ static bool
intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
uint8_t dp_train_pat)
{
- memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+ if (!intel_dp->train_set_valid)
+ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp_set_signal_levels(intel_dp, DP);
return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
}
@@ -3660,6 +3700,23 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
break;
}
+ /*
+ * if we used previously trained voltage and pre-emphasis values
+ * and we don't get clock recovery, reset link training values
+ */
+ if (intel_dp->train_set_valid) {
+ DRM_DEBUG_KMS("clock recovery not ok, reset");
+ /* clear the flag as we are not reusing train set */
+ intel_dp->train_set_valid = false;
+ if (!intel_dp_reset_link_train(intel_dp, &DP,
+ DP_TRAINING_PATTERN_1 |
+ DP_LINK_SCRAMBLING_DISABLE)) {
+ DRM_ERROR("failed to enable link training\n");
+ return;
+ }
+ continue;
+ }
+
/* Check to see if we've tried the max voltage */
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
@@ -3737,6 +3794,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
+ intel_dp->train_set_valid = false;
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
@@ -3752,6 +3810,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
/* Try 5 times, then try clock recovery if that fails */
if (tries > 5) {
+ intel_dp->train_set_valid = false;
intel_dp_start_link_train(intel_dp);
intel_dp_set_link_train(intel_dp, &DP,
training_pattern |
@@ -3773,9 +3832,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
intel_dp->DP = DP;
- if (channel_eq)
+ if (channel_eq) {
+ intel_dp->train_set_valid = true;
DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
-
+ }
}
void intel_dp_stop_link_train(struct intel_dp *intel_dp)
@@ -4058,6 +4118,39 @@ static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
{
uint8_t test_result = DP_TEST_NAK;
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_connector *connector = &intel_connector->base;
+
+ if (intel_connector->detect_edid == NULL ||
+ connector->edid_corrupt ||
+ intel_dp->aux.i2c_defer_count > 6) {
+ /* Check EDID read for NACKs, DEFERs and corruption
+ * (DP CTS 1.2 Core r1.1)
+ * 4.2.2.4 : Failed EDID read, I2C_NAK
+ * 4.2.2.5 : Failed EDID read, I2C_DEFER
+ * 4.2.2.6 : EDID corruption detected
+ * Use failsafe mode for all cases
+ */
+ if (intel_dp->aux.i2c_nack_count > 0 ||
+ intel_dp->aux.i2c_defer_count > 0)
+ DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
+ intel_dp->aux.i2c_nack_count,
+ intel_dp->aux.i2c_defer_count);
+ intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
+ } else {
+ if (!drm_dp_dpcd_write(&intel_dp->aux,
+ DP_TEST_EDID_CHECKSUM,
+ &intel_connector->detect_edid->checksum,
+ 1));
+ DRM_DEBUG_KMS("Failed to write EDID checksum\n");
+
+ test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
+ intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
+ }
+
+ /* Set test active flag here so userspace doesn't interrupt things */
+ intel_dp->compliance_test_active = 1;
+
return test_result;
}
@@ -4073,7 +4166,10 @@ static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
uint8_t rxdata = 0;
int status = 0;
+ intel_dp->compliance_test_active = 0;
intel_dp->compliance_test_type = 0;
+ intel_dp->compliance_test_data = 0;
+
intel_dp->aux.i2c_nack_count = 0;
intel_dp->aux.i2c_defer_count = 0;
@@ -4220,7 +4316,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
sink_irq_vector);
if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
- intel_dp_handle_test_request(intel_dp);
+ DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
@@ -4450,6 +4546,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
enum drm_connector_status status;
enum intel_display_power_domain power_domain;
bool ret;
+ u8 sink_irq_vector;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
@@ -4492,6 +4589,20 @@ intel_dp_detect(struct drm_connector *connector, bool force)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
status = connector_status_connected;
+ /* Try to read the source of the interrupt */
+ if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
+ intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
+ /* Clear interrupt source */
+ drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_DEVICE_SERVICE_IRQ_VECTOR,
+ sink_irq_vector);
+
+ if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
+ intel_dp_handle_test_request(intel_dp);
+ if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
+ DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
+ }
+
out:
intel_dp_power_put(intel_dp, power_domain);
return status;
@@ -4822,6 +4933,8 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
intel_display_power_get(dev_priv, power_domain);
if (long_hpd) {
+ /* indicate that we need to restart link training */
+ intel_dp->train_set_valid = false;
if (HAS_PCH_SPLIT(dev)) {
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 3945057c5bbe..6e4cc5334f47 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -40,7 +40,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
int bpp, i;
int lane_count, slots, rate;
struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
- struct intel_connector *found = NULL;
+ struct drm_connector *drm_connector;
+ struct intel_connector *connector, *found = NULL;
+ struct drm_connector_state *connector_state;
int mst_pbn;
pipe_config->dp_encoder_is_mst = true;
@@ -70,12 +72,11 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
state = pipe_config->base.state;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
+ for_each_connector_in_state(state, drm_connector, connector_state, i) {
+ connector = to_intel_connector(drm_connector);
- if (state->connector_states[i]->best_encoder == &encoder->base) {
- found = to_intel_connector(state->connectors[i]);
+ if (connector_state->best_encoder == &encoder->base) {
+ found = connector;
break;
}
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 43fe003ba3da..ea3368e83626 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -249,12 +249,6 @@ struct intel_plane_state {
bool visible;
/*
- * used only for sprite planes to determine when to implicitly
- * enable/disable the primary plane
- */
- bool hides_primary;
-
- /*
* scaler_id
* = -1 : not using a scaler
* >= 0 : using a scalers
@@ -285,11 +279,11 @@ struct intel_initial_plane_config {
#define SKL_MIN_SRC_W 8
#define SKL_MAX_SRC_W 4096
#define SKL_MIN_SRC_H 8
-#define SKL_MAX_SRC_H 2304
+#define SKL_MAX_SRC_H 4096
#define SKL_MIN_DST_W 8
#define SKL_MAX_DST_W 4096
#define SKL_MIN_DST_H 8
-#define SKL_MAX_DST_H 2304
+#define SKL_MAX_DST_H 4096
struct intel_scaler {
int id;
@@ -513,7 +507,6 @@ struct intel_crtc {
*/
bool active;
unsigned long enabled_power_domains;
- bool primary_enabled; /* is the primary plane (partially) visible? */
bool lowfreq_avail;
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
@@ -600,7 +593,7 @@ struct intel_plane {
uint32_t x, uint32_t y,
uint32_t src_w, uint32_t src_h);
void (*disable_plane)(struct drm_plane *plane,
- struct drm_crtc *crtc);
+ struct drm_crtc *crtc, bool force);
int (*check_plane)(struct drm_plane *plane,
struct intel_plane_state *state);
void (*commit_plane)(struct drm_plane *plane,
@@ -736,9 +729,12 @@ struct intel_dp {
bool has_aux_irq,
int send_bytes,
uint32_t aux_clock_divider);
+ bool train_set_valid;
/* Displayport compliance testing */
unsigned long compliance_test_type;
+ unsigned long compliance_test_data;
+ bool compliance_test_active;
};
struct intel_digital_port {
@@ -817,15 +813,6 @@ struct intel_unpin_work {
bool enable_stall_check;
};
-struct intel_set_config {
- struct drm_encoder **save_connector_encoders;
- struct drm_crtc **save_encoder_crtcs;
- bool *save_crtc_enabled;
-
- bool fb_changed;
- bool mode_changed;
-};
-
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
bool load_detect_temp;
@@ -998,6 +985,7 @@ void intel_mark_busy(struct drm_device *dev);
void intel_mark_idle(struct drm_device *dev);
void intel_crtc_restore_mode(struct drm_crtc *crtc);
void intel_crtc_control(struct drm_crtc *crtc, bool enable);
+void intel_crtc_reset(struct intel_crtc *crtc);
void intel_crtc_update_dpms(struct drm_crtc *crtc);
void intel_encoder_destroy(struct drm_encoder *encoder);
int intel_connector_init(struct intel_connector *);
@@ -1025,7 +1013,8 @@ intel_wait_for_vblank(struct drm_device *dev, int pipe)
}
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
- struct intel_digital_port *dport);
+ struct intel_digital_port *dport,
+ unsigned int expected_mask);
bool intel_get_load_detect_pipe(struct drm_connector *connector,
struct drm_display_mode *mode,
struct intel_load_detect_pipe *old,
@@ -1145,9 +1134,22 @@ void skl_detach_scalers(struct intel_crtc *intel_crtc);
int skl_update_scaler_users(struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state, struct intel_plane *intel_plane,
struct intel_plane_state *plane_state, int force_detach);
+int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
unsigned long intel_plane_obj_offset(struct intel_plane *intel_plane,
struct drm_i915_gem_object *obj);
+u32 skl_plane_ctl_format(uint32_t pixel_format);
+u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
+u32 skl_plane_ctl_rotation(unsigned int rotation);
+
+/* intel_csr.c */
+void intel_csr_ucode_init(struct drm_device *dev);
+enum csr_state intel_csr_load_status_get(struct drm_i915_private *dev_priv);
+void intel_csr_load_status_set(struct drm_i915_private *dev_priv,
+ enum csr_state state);
+void intel_csr_load_program(struct drm_device *dev);
+void intel_csr_ucode_fini(struct drm_device *dev);
+void assert_csr_loaded(struct drm_i915_private *dev_priv);
/* intel_dp.c */
void intel_dp_init(struct drm_device *dev, int output_reg, enum port port);
@@ -1380,8 +1382,6 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
bool intel_pipe_update_start(struct intel_crtc *crtc,
uint32_t *start_vbl_count);
void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
-void intel_post_enable_primary(struct drm_crtc *crtc);
-void intel_pre_disable_primary(struct drm_crtc *crtc);
/* intel_tv.c */
void intel_tv_init(struct drm_device *dev);
@@ -1406,7 +1406,7 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
crtc_state = drm_atomic_get_crtc_state(state, &crtc->base);
if (IS_ERR(crtc_state))
- return ERR_PTR(PTR_ERR(crtc_state));
+ return ERR_CAST(crtc_state);
return to_intel_crtc_state(crtc_state);
}
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 4165ce0644f7..6abb83432d4d 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -457,7 +457,7 @@ static struct drm_crtc *intel_fbc_find_crtc(struct drm_i915_private *dev_priv)
tmp_crtc = dev_priv->pipe_to_crtc_mapping[pipe];
if (intel_crtc_active(tmp_crtc) &&
- to_intel_crtc(tmp_crtc)->primary_enabled) {
+ to_intel_plane_state(tmp_crtc->primary->state)->visible) {
if (one_pipe_only && crtc) {
if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 65bc3867dda2..d04e6dc97fe5 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -964,6 +964,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
struct drm_device *dev = crtc_state->base.crtc->dev;
struct drm_atomic_state *state;
struct intel_encoder *encoder;
+ struct drm_connector *connector;
struct drm_connector_state *connector_state;
int count = 0, count_hdmi = 0;
int i;
@@ -973,11 +974,7 @@ static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
state = crtc_state->base.state;
- for (i = 0; i < state->num_connector; i++) {
- if (!state->connectors[i])
- continue;
-
- connector_state = state->connector_states[i];
+ for_each_connector_in_state(state, connector, connector_state, i) {
if (connector_state->crtc != crtc_state->base.crtc)
continue;
@@ -1327,7 +1324,7 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
intel_enable_hdmi(encoder);
- vlv_wait_port_ready(dev_priv, dport);
+ vlv_wait_port_ready(dev_priv, dport, 0x0);
}
static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
@@ -1490,7 +1487,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
&intel_crtc->config->base.adjusted_mode;
enum dpio_channel ch = vlv_dport_to_channel(dport);
int pipe = intel_crtc->pipe;
- int data, i;
+ int data, i, stagger;
u32 val;
mutex_lock(&dev_priv->dpio_lock);
@@ -1530,7 +1527,38 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
}
/* Data lane stagger programming */
- /* FIXME: Fix up value only after power analysis */
+ if (intel_crtc->config->port_clock > 270000)
+ stagger = 0x18;
+ else if (intel_crtc->config->port_clock > 135000)
+ stagger = 0xd;
+ else if (intel_crtc->config->port_clock > 67500)
+ stagger = 0x7;
+ else if (intel_crtc->config->port_clock > 33750)
+ stagger = 0x4;
+ else
+ stagger = 0x2;
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+
+ val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val |= DPIO_TX2_STAGGER_MASK(0x1f);
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(6) |
+ DPIO_TX2_STAGGER_MULT(0));
+
+ vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+ DPIO_LANESTAGGER_STRAP(stagger) |
+ DPIO_LANESTAGGER_STRAP_OVRD |
+ DPIO_TX1_STAGGER_MASK(0x1f) |
+ DPIO_TX1_STAGGER_MULT(7) |
+ DPIO_TX2_STAGGER_MULT(5));
/* Clear calc init */
val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
@@ -1613,7 +1641,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
intel_enable_hdmi(encoder);
- vlv_wait_port_ready(dev_priv, dport);
+ vlv_wait_port_ready(dev_priv, dport, 0x0);
}
static void intel_hdmi_destroy(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 732fd633e73a..0fa9209ff556 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1895,10 +1895,9 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
context_size = round_up(get_lr_context_size(ring), 4096);
ctx_obj = i915_gem_alloc_object(dev, context_size);
- if (IS_ERR(ctx_obj)) {
- ret = PTR_ERR(ctx_obj);
- DRM_DEBUG_DRIVER("Alloc LRC backing obj failed: %d\n", ret);
- return ret;
+ if (!ctx_obj) {
+ DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
+ return -ENOMEM;
}
if (is_global_default_ctx) {
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 08532d4ffe0a..7d83527f95f7 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -536,6 +536,14 @@ static u32 vlv_get_backlight(struct intel_connector *connector)
return _vlv_get_backlight(dev, pipe);
}
+static u32 bxt_get_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(BXT_BLC_PWM_DUTY1);
+}
+
static u32 intel_panel_get_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -616,6 +624,14 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level)
I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
}
+static void bxt_set_backlight(struct intel_connector *connector, u32 level)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(BXT_BLC_PWM_DUTY1, level);
+}
+
static void
intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level)
{
@@ -741,6 +757,18 @@ static void vlv_disable_backlight(struct intel_connector *connector)
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
}
+static void bxt_disable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp;
+
+ intel_panel_actually_set_backlight(connector, 0);
+
+ tmp = I915_READ(BXT_BLC_PWM_CTL1);
+ I915_WRITE(BXT_BLC_PWM_CTL1, tmp & ~BXT_BLC_PWM_ENABLE);
+}
+
void intel_panel_disable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -947,6 +975,33 @@ static void vlv_enable_backlight(struct intel_connector *connector)
I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
}
+static void bxt_enable_backlight(struct intel_connector *connector)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl;
+
+ pwm_ctl = I915_READ(BXT_BLC_PWM_CTL1);
+ if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
+ DRM_DEBUG_KMS("backlight already enabled\n");
+ pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
+ I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl);
+ }
+
+ I915_WRITE(BXT_BLC_PWM_FREQ1, panel->backlight.max);
+
+ intel_panel_actually_set_backlight(connector, panel->backlight.level);
+
+ pwm_ctl = 0;
+ if (panel->backlight.active_low_pwm)
+ pwm_ctl |= BXT_BLC_PWM_POLARITY;
+
+ I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl);
+ POSTING_READ(BXT_BLC_PWM_CTL1);
+ I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl | BXT_BLC_PWM_ENABLE);
+}
+
void intel_panel_enable_backlight(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
@@ -1299,6 +1354,30 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
return 0;
}
+static int
+bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl, val;
+
+ pwm_ctl = I915_READ(BXT_BLC_PWM_CTL1);
+ panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
+
+ panel->backlight.max = I915_READ(BXT_BLC_PWM_FREQ1);
+ if (!panel->backlight.max)
+ return -ENODEV;
+
+ val = bxt_get_backlight(connector);
+ panel->backlight.level = intel_panel_compute_brightness(connector, val);
+
+ panel->backlight.enabled = (pwm_ctl & BXT_BLC_PWM_ENABLE) &&
+ panel->backlight.level != 0;
+
+ return 0;
+}
+
int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
{
struct drm_device *dev = connector->dev;
@@ -1350,7 +1429,13 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
+ if (IS_BROXTON(dev)) {
+ dev_priv->display.setup_backlight = bxt_setup_backlight;
+ dev_priv->display.enable_backlight = bxt_enable_backlight;
+ dev_priv->display.disable_backlight = bxt_disable_backlight;
+ dev_priv->display.set_backlight = bxt_set_backlight;
+ dev_priv->display.get_backlight = bxt_get_backlight;
+ } else if (IS_BROADWELL(dev) || IS_SKYLAKE(dev)) {
dev_priv->display.setup_backlight = bdw_setup_backlight;
dev_priv->display.enable_backlight = bdw_enable_backlight;
dev_priv->display.disable_backlight = pch_disable_backlight;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a7516ed24eee..7006f94b94c1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -88,8 +88,7 @@ static void skl_init_clock_gating(struct drm_device *dev)
/* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
I915_WRITE(FF_SLICE_CS_CHICKEN2,
- I915_READ(FF_SLICE_CS_CHICKEN2) |
- GEN9_TSG_BARRIER_ACK_DISABLE);
+ _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
}
if (INTEL_REVID(dev) <= SKL_REVID_E0)
@@ -4295,8 +4294,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
if (dev_priv->rps.min_freq_softlimit == 0) {
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
dev_priv->rps.min_freq_softlimit =
- /* max(RPe, 450 MHz) */
- max(dev_priv->rps.efficient_freq, (u8) 9);
+ max_t(int, dev_priv->rps.efficient_freq,
+ intel_freq_opcode(dev_priv, 450));
else
dev_priv->rps.min_freq_softlimit =
dev_priv->rps.min_freq;
@@ -5082,6 +5081,12 @@ static void cherryview_enable_rps(struct drm_device *dev)
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
+ /* Setting Fixed Bias */
+ val = VLV_OVERRIDE_EN |
+ VLV_SOC_TDP_EN |
+ CHV_BIAS_CPU_50_SOC_50;
+ vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
+
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
/* RPS code assumes GPLL is used */
@@ -5166,6 +5171,12 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
+ /* Setting Fixed Bias */
+ val = VLV_OVERRIDE_EN |
+ VLV_SOC_TDP_EN |
+ VLV_BIAS_CPU_125_SOC_875;
+ vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val);
+
val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
/* RPS code assumes GPLL is used */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index de8c0747aaef..9b96ed7de9bb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -919,53 +919,45 @@ static int gen9_init_workarounds(struct intel_engine_cs *ring)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- /* WaDisablePartialInstShootdown:skl */
+ /* WaDisablePartialInstShootdown:skl,bxt */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
- /* Syncing dependencies between camera and graphics */
+ /* Syncing dependencies between camera and graphics:skl,bxt */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
- if (INTEL_REVID(dev) == SKL_REVID_A0 ||
- INTEL_REVID(dev) == SKL_REVID_B0) {
- /* WaDisableDgMirrorFixInHalfSliceChicken5:skl */
+ if ((IS_SKYLAKE(dev) && (INTEL_REVID(dev) == SKL_REVID_A0 ||
+ INTEL_REVID(dev) == SKL_REVID_B0)) ||
+ (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
+ /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_DG_MIRROR_FIX_ENABLE);
}
- if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) {
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl */
+ if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
+ (IS_BROXTON(dev) && INTEL_REVID(dev) < BXT_REVID_B0)) {
+ /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
WA_SET_BIT_MASKED(GEN9_SLICE_COMMON_ECO_CHICKEN0,
DISABLE_PIXEL_MASK_CAMMING);
}
- if (INTEL_REVID(dev) >= SKL_REVID_C0) {
- /* WaEnableYV12BugFixInHalfSliceChicken7:skl */
+ if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) >= SKL_REVID_C0) ||
+ IS_BROXTON(dev)) {
+ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX);
}
- if (INTEL_REVID(dev) <= SKL_REVID_D0) {
- /*
- *Use Force Non-Coherent whenever executing a 3D context. This
- * is a workaround for a possible hang in the unlikely event
- * a TLB invalidation occurs during a PSD flush.
- */
- /* WaForceEnableNonCoherent:skl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
- }
-
- /* Wa4x4STCOptimizationDisable:skl */
+ /* Wa4x4STCOptimizationDisable:skl,bxt */
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
- /* WaDisablePartialResolveInVc:skl */
+ /* WaDisablePartialResolveInVc:skl,bxt */
WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
- /* WaCcsTlbPrefetchDisable:skl */
+ /* WaCcsTlbPrefetchDisable:skl,bxt */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE);
@@ -1036,13 +1028,42 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
+ if (INTEL_REVID(dev) <= SKL_REVID_D0) {
+ /*
+ *Use Force Non-Coherent whenever executing a 3D context. This
+ * is a workaround for a possible hang in the unlikely event
+ * a TLB invalidation occurs during a PSD flush.
+ */
+ /* WaForceEnableNonCoherent:skl */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_NON_COHERENT);
+ }
+
return skl_tune_iz_hashing(ring);
}
static int bxt_init_workarounds(struct intel_engine_cs *ring)
{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
gen9_init_workarounds(ring);
+ /* WaDisableThreadStallDopClockGating:bxt */
+ WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
+ STALL_DOP_GATING_DISABLE);
+
+ /* WaDisableSbeCacheDispatchPortSharing:bxt */
+ if (INTEL_REVID(dev) <= BXT_REVID_B0) {
+ WA_SET_BIT_MASKED(
+ GEN7_HALF_SLICE_CHICKEN1,
+ GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
+ }
+
+ /* WaForceContextSaveRestoreNonCoherent:bxt */
+ WA_SET_BIT_MASKED(HDC_CHICKEN0,
+ HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8fe2fdeab652..317b9b43d1c1 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -49,6 +49,9 @@
* present for a given platform.
*/
+#define GEN9_ENABLE_DC5(dev) 0
+#define SKL_ENABLE_DC6(dev) IS_SKYLAKE(dev)
+
#define for_each_power_well(i, power_well, domain_mask, power_domains) \
for (i = 0; \
i < (power_domains)->power_well_count && \
@@ -62,6 +65,9 @@
i--) \
if ((power_well)->domains & (domain_mask))
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ int power_well_id);
+
/*
* We should only use the power well if we explicitly asked the hardware to
* enable it, so check if it's enabled and also check if we've requested it to
@@ -308,7 +314,9 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_MISC_IO_POWER_DOMAINS ( \
- SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS)
+ SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
+ BIT(POWER_DOMAIN_PLLS) | \
+ BIT(POWER_DOMAIN_INIT))
#define SKL_DISPLAY_ALWAYS_ON_POWER_DOMAINS ( \
(POWER_DOMAIN_MASK & ~(SKL_DISPLAY_POWERWELL_1_POWER_DOMAINS | \
SKL_DISPLAY_POWERWELL_2_POWER_DOMAINS | \
@@ -417,9 +425,148 @@ void bxt_disable_dc9(struct drm_i915_private *dev_priv)
POSTING_READ(DC_STATE_EN);
}
+static void gen9_set_dc_state_debugmask_memory_up(
+ struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ /* The below bit doesn't need to be cleared ever afterwards */
+ val = I915_READ(DC_STATE_DEBUG);
+ if (!(val & DC_STATE_DEBUG_MASK_MEMORY_UP)) {
+ val |= DC_STATE_DEBUG_MASK_MEMORY_UP;
+ I915_WRITE(DC_STATE_DEBUG, val);
+ POSTING_READ(DC_STATE_DEBUG);
+ }
+}
+
+static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
+ SKL_DISP_PW_2);
+
+ WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC5.\n");
+ WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
+ WARN(pg2_enabled, "PG2 not disabled to enable DC5.\n");
+
+ WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
+ "DC5 already programmed to be enabled.\n");
+ WARN(dev_priv->pm.suspended,
+ "DC5 cannot be enabled, if platform is runtime-suspended.\n");
+
+ assert_csr_loaded(dev_priv);
+}
+
+static void assert_can_disable_dc5(struct drm_i915_private *dev_priv)
+{
+ bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
+ SKL_DISP_PW_2);
+ /*
+ * During initialization, the firmware may not be loaded yet.
+ * We still want to make sure that the DC enabling flag is cleared.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ WARN(!pg2_enabled, "PG2 not enabled to disable DC5.\n");
+ WARN(dev_priv->pm.suspended,
+ "Disabling of DC5 while platform is runtime-suspended should never happen.\n");
+}
+
+static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ assert_can_enable_dc5(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC5\n");
+
+ gen9_set_dc_state_debugmask_memory_up(dev_priv);
+
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
+ val |= DC_STATE_EN_UPTO_DC5;
+ I915_WRITE(DC_STATE_EN, val);
+ POSTING_READ(DC_STATE_EN);
+}
+
+static void gen9_disable_dc5(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ assert_can_disable_dc5(dev_priv);
+
+ DRM_DEBUG_KMS("Disabling DC5\n");
+
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_EN_UPTO_DC5;
+ I915_WRITE(DC_STATE_EN, val);
+ POSTING_READ(DC_STATE_EN);
+}
+
+static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+
+ WARN(!IS_SKYLAKE(dev), "Platform doesn't support DC6.\n");
+ WARN(!HAS_RUNTIME_PM(dev), "Runtime PM not enabled.\n");
+ WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Backlight is not disabled.\n");
+ WARN((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be enabled.\n");
+
+ assert_csr_loaded(dev_priv);
+}
+
+static void assert_can_disable_dc6(struct drm_i915_private *dev_priv)
+{
+ /*
+ * During initialization, the firmware may not be loaded yet.
+ * We still want to make sure that the DC enabling flag is cleared.
+ */
+ if (dev_priv->power_domains.initializing)
+ return;
+
+ assert_csr_loaded(dev_priv);
+ WARN(!(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be disabled.\n");
+}
+
+static void skl_enable_dc6(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ assert_can_enable_dc6(dev_priv);
+
+ DRM_DEBUG_KMS("Enabling DC6\n");
+
+ gen9_set_dc_state_debugmask_memory_up(dev_priv);
+
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_EN_UPTO_DC5_DC6_MASK;
+ val |= DC_STATE_EN_UPTO_DC6;
+ I915_WRITE(DC_STATE_EN, val);
+ POSTING_READ(DC_STATE_EN);
+}
+
+static void skl_disable_dc6(struct drm_i915_private *dev_priv)
+{
+ uint32_t val;
+
+ assert_can_disable_dc6(dev_priv);
+
+ DRM_DEBUG_KMS("Disabling DC6\n");
+
+ val = I915_READ(DC_STATE_EN);
+ val &= ~DC_STATE_EN_UPTO_DC6;
+ I915_WRITE(DC_STATE_EN, val);
+ POSTING_READ(DC_STATE_EN);
+}
+
static void skl_set_power_well(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well, bool enable)
{
+ struct drm_device *dev = dev_priv->dev;
uint32_t tmp, fuse_status;
uint32_t req_mask, state_mask;
bool is_enabled, enable_requested, check_fuse_status = false;
@@ -459,6 +606,25 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
if (enable) {
if (!enable_requested) {
+ WARN((tmp & state_mask) &&
+ !I915_READ(HSW_PWR_WELL_BIOS),
+ "Invalid for power well status to be enabled, unless done by the BIOS, \
+ when request is to disable!\n");
+ if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
+ power_well->data == SKL_DISP_PW_2) {
+ if (SKL_ENABLE_DC6(dev)) {
+ skl_disable_dc6(dev_priv);
+ /*
+ * DDI buffer programming unnecessary during driver-load/resume
+ * as it's already done during modeset initialization then.
+ * It's also invalid here as encoder list is still uninitialized.
+ */
+ if (!dev_priv->power_domains.initializing)
+ intel_prepare_ddi(dev);
+ } else {
+ gen9_disable_dc5(dev_priv);
+ }
+ }
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask);
}
@@ -475,6 +641,25 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask);
POSTING_READ(HSW_PWR_WELL_DRIVER);
DRM_DEBUG_KMS("Disabling %s\n", power_well->name);
+
+ if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) &&
+ power_well->data == SKL_DISP_PW_2) {
+ enum csr_state state;
+ /* TODO: wait for a completion event or
+ * similar here instead of busy
+ * waiting using wait_for function.
+ */
+ wait_for((state = intel_csr_load_status_get(dev_priv)) !=
+ FW_UNINITIALIZED, 1000);
+ if (state != FW_LOADED)
+ DRM_ERROR("CSR firmware not ready (%d)\n",
+ state);
+ else
+ if (SKL_ENABLE_DC6(dev))
+ skl_enable_dc6(dev_priv);
+ else
+ gen9_enable_dc5(dev_priv);
+ }
}
}
@@ -764,8 +949,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
DRM_ERROR("Display PHY %d is not power up\n", phy);
- I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
- PHY_COM_LANE_RESET_DEASSERT(phy));
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
}
static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -785,8 +970,8 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, PIPE_C);
}
- I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
- ~PHY_COM_LANE_RESET_DEASSERT(phy));
+ dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
+ I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
vlv_set_power_well(dev_priv, power_well, false);
}
@@ -1279,23 +1464,13 @@ static struct i915_power_well chv_power_wells[] = {
#endif
{
.name = "dpio-common-bc",
- /*
- * XXX: cmnreset for one PHY seems to disturb the other.
- * As a workaround keep both powered on at the same
- * time for now.
- */
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_BC,
.ops = &chv_dpio_cmn_power_well_ops,
},
{
.name = "dpio-common-d",
- /*
- * XXX: cmnreset for one PHY seems to disturb the other.
- * As a workaround keep both powered on at the same
- * time for now.
- */
- .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
+ .domains = CHV_DPIO_CMN_D_POWER_DOMAINS,
.data = PUNIT_POWER_WELL_DPIO_CMN_D,
.ops = &chv_dpio_cmn_power_well_ops,
},
@@ -1346,7 +1521,7 @@ static struct i915_power_well chv_power_wells[] = {
};
static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
- enum punit_power_well power_well_id)
+ int power_well_id)
{
struct i915_power_domains *power_domains = &dev_priv->power_domains;
struct i915_power_well *power_well;
@@ -1360,6 +1535,18 @@ static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_pr
return NULL;
}
+bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
+ int power_well_id)
+{
+ struct i915_power_well *power_well;
+ bool ret;
+
+ power_well = lookup_power_well(dev_priv, power_well_id);
+ ret = power_well->ops->is_enabled(dev_priv, power_well);
+
+ return ret;
+}
+
static struct i915_power_well skl_power_wells[] = {
{
.name = "always-on",
@@ -1522,6 +1709,30 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
mutex_unlock(&power_domains->lock);
}
+static void chv_phy_control_init(struct drm_i915_private *dev_priv)
+{
+ struct i915_power_well *cmn_bc =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
+ struct i915_power_well *cmn_d =
+ lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D);
+
+ /*
+ * DISPLAY_PHY_CONTROL can get corrupted if read. As a
+ * workaround never ever read DISPLAY_PHY_CONTROL, and
+ * instead maintain a shadow copy ourselves. Use the actual
+ * power well state to reconstruct the expected initial
+ * value.
+ */
+ dev_priv->chv_phy_control =
+ PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH0) |
+ PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY0, DPIO_CH1) |
+ PHY_CH_POWER_MODE(PHY_CH_SU_PSR, DPIO_PHY1, DPIO_CH0);
+ if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc))
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0);
+ if (cmn_d->ops->is_enabled(dev_priv, cmn_d))
+ dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1);
+}
+
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
{
struct i915_power_well *cmn =
@@ -1564,7 +1775,9 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
power_domains->initializing = true;
- if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ if (IS_CHERRYVIEW(dev)) {
+ chv_phy_control_init(dev_priv);
+ } else if (IS_VALLEYVIEW(dev)) {
mutex_lock(&power_domains->lock);
vlv_cmnlane_wa(dev_priv);
mutex_unlock(&power_domains->lock);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 10cd33252838..0a0625761f42 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -242,7 +242,7 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
if (intel_sdvo->sdvo_reg == PCH_SDVOB) {
I915_WRITE(intel_sdvo->sdvo_reg, val);
- I915_READ(intel_sdvo->sdvo_reg);
+ POSTING_READ(intel_sdvo->sdvo_reg);
return;
}
@@ -259,9 +259,9 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
for (i = 0; i < 2; i++)
{
I915_WRITE(GEN3_SDVOB, bval);
- I915_READ(GEN3_SDVOB);
+ POSTING_READ(GEN3_SDVOB);
I915_WRITE(GEN3_SDVOC, cval);
- I915_READ(GEN3_SDVOC);
+ POSTING_READ(GEN3_SDVOC);
}
}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index e3d41c096dc6..f215e223aa4a 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -33,6 +33,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_rect.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_plane_helper.h>
#include "intel_drv.h"
#include <drm/i915_drm.h>
@@ -165,17 +166,6 @@ void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
pipe_name(pipe), start_vbl_count, end_vbl_count);
}
-static void intel_update_primary_plane(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
- int reg = DSPCNTR(crtc->plane);
-
- if (crtc->primary_enabled)
- I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
- else
- I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
-}
-
static void
skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
@@ -197,80 +187,17 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
u32 tile_height, plane_offset, plane_size;
unsigned int rotation;
int x_offset, y_offset;
+ struct intel_crtc_state *crtc_state = to_intel_crtc(crtc)->config;
+ int scaler_id;
plane_ctl = PLANE_CTL_ENABLE |
PLANE_CTL_PIPE_CSC_ENABLE;
- switch (fb->pixel_format) {
- case DRM_FORMAT_RGB565:
- plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
- break;
- case DRM_FORMAT_XBGR8888:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
- break;
- case DRM_FORMAT_XRGB8888:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
- break;
- /*
- * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
- * to be already pre-multiplied. We need to add a knob (or a different
- * DRM_FORMAT) for user-space to configure that.
- */
- case DRM_FORMAT_ABGR8888:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
- PLANE_CTL_ORDER_RGBX |
- PLANE_CTL_ALPHA_SW_PREMULTIPLY;
- break;
- case DRM_FORMAT_ARGB8888:
- plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
- PLANE_CTL_ALPHA_SW_PREMULTIPLY;
- break;
- case DRM_FORMAT_YUYV:
- plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
- break;
- case DRM_FORMAT_YVYU:
- plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
- break;
- case DRM_FORMAT_UYVY:
- plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
- break;
- case DRM_FORMAT_VYUY:
- plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
- break;
- default:
- BUG();
- }
-
- switch (fb->modifier[0]) {
- case DRM_FORMAT_MOD_NONE:
- break;
- case I915_FORMAT_MOD_X_TILED:
- plane_ctl |= PLANE_CTL_TILED_X;
- break;
- case I915_FORMAT_MOD_Y_TILED:
- plane_ctl |= PLANE_CTL_TILED_Y;
- break;
- case I915_FORMAT_MOD_Yf_TILED:
- plane_ctl |= PLANE_CTL_TILED_YF;
- break;
- default:
- MISSING_CASE(fb->modifier[0]);
- }
+ plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
+ plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
rotation = drm_plane->state->rotation;
- switch (rotation) {
- case BIT(DRM_ROTATE_90):
- plane_ctl |= PLANE_CTL_ROTATE_90;
- break;
-
- case BIT(DRM_ROTATE_180):
- plane_ctl |= PLANE_CTL_ROTATE_180;
- break;
-
- case BIT(DRM_ROTATE_270):
- plane_ctl |= PLANE_CTL_ROTATE_270;
- break;
- }
+ plane_ctl |= skl_plane_ctl_rotation(rotation);
intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
pixel_size, true,
@@ -279,6 +206,8 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
stride_div = intel_fb_stride_alignment(dev, fb->modifier[0],
fb->pixel_format);
+ scaler_id = to_intel_plane_state(drm_plane->state)->scaler_id;
+
/* Sizes are 0 based */
src_w--;
src_h--;
@@ -316,19 +245,38 @@ skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
I915_WRITE(PLANE_OFFSET(pipe, plane), plane_offset);
I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
- I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
I915_WRITE(PLANE_SIZE(pipe, plane), plane_size);
+
+ /* program plane scaler */
+ if (scaler_id >= 0) {
+ uint32_t ps_ctrl = 0;
+
+ DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
+ PS_PLANE_SEL(plane));
+ ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) |
+ crtc_state->scaler_state.scalers[scaler_id].mode;
+ I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
+ I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
+ I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
+ I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
+ ((crtc_w + 1) << 16)|(crtc_h + 1));
+
+ I915_WRITE(PLANE_POS(pipe, plane), 0);
+ } else {
+ I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
+ }
+
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
I915_WRITE(PLANE_SURF(pipe, plane), surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane));
}
static void
-skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
+skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
{
- struct drm_device *dev = drm_plane->dev;
+ struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_plane *intel_plane = to_intel_plane(drm_plane);
+ struct intel_plane *intel_plane = to_intel_plane(dplane);
const int pipe = intel_plane->pipe;
const int plane = intel_plane->plane + 1;
@@ -338,7 +286,7 @@ skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
I915_WRITE(PLANE_SURF(pipe, plane), 0);
POSTING_READ(PLANE_SURF(pipe, plane));
- intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
+ intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false);
}
static void
@@ -479,8 +427,6 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
}
- intel_update_primary_plane(intel_crtc);
-
if (key->flags) {
I915_WRITE(SPKEYMINVAL(pipe, plane), key->min_value);
I915_WRITE(SPKEYMAXVAL(pipe, plane), key->max_value);
@@ -512,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
}
static void
-vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
+vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc, bool force)
{
struct drm_device *dev = dplane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -521,8 +467,6 @@ vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
int pipe = intel_plane->pipe;
int plane = intel_plane->plane;
- intel_update_primary_plane(intel_crtc);
-
I915_WRITE(SPCNTR(pipe, plane), 0);
/* Activate double buffered register update */
@@ -626,8 +570,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
}
- intel_update_primary_plane(intel_crtc);
-
if (key->flags) {
I915_WRITE(SPRKEYVAL(pipe), key->min_value);
I915_WRITE(SPRKEYMAX(pipe), key->max_value);
@@ -662,7 +604,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
static void
-ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
+ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -670,8 +612,6 @@ ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
- intel_update_primary_plane(intel_crtc);
-
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
if (intel_plane->can_scale)
@@ -766,8 +706,6 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
linear_offset += src_h * fb->pitches[0] + src_w * pixel_size;
}
- intel_update_primary_plane(intel_crtc);
-
if (key->flags) {
I915_WRITE(DVSKEYVAL(pipe), key->min_value);
I915_WRITE(DVSKEYMAX(pipe), key->max_value);
@@ -797,7 +735,7 @@ ilk_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
}
static void
-ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
+ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc, bool force)
{
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -805,8 +743,6 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_plane->pipe;
- intel_update_primary_plane(intel_crtc);
-
I915_WRITE(DVSCNTR(pipe), 0);
/* Disable the scaler */
I915_WRITE(DVSSCALE(pipe), 0);
@@ -817,84 +753,13 @@ ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
intel_flush_primary_plane(dev_priv, intel_crtc->plane);
}
-/**
- * intel_post_enable_primary - Perform operations after enabling primary plane
- * @crtc: the CRTC whose primary plane was just enabled
- *
- * Performs potentially sleeping operations that must be done after the primary
- * plane is enabled, such as updating FBC and IPS. Note that this may be
- * called due to an explicit primary plane update, or due to an implicit
- * re-enable that is caused when a sprite plane is updated to no longer
- * completely hide the primary plane.
- */
-void
-intel_post_enable_primary(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- /*
- * BDW signals flip done immediately if the plane
- * is disabled, even if the plane enable is already
- * armed to occur at the next vblank :(
- */
- if (IS_BROADWELL(dev))
- intel_wait_for_vblank(dev, intel_crtc->pipe);
-
- /*
- * FIXME IPS should be fine as long as one plane is
- * enabled, but in practice it seems to have problems
- * when going from primary only to sprite only and vice
- * versa.
- */
- hsw_enable_ips(intel_crtc);
-
- mutex_lock(&dev->struct_mutex);
- intel_fbc_update(dev);
- mutex_unlock(&dev->struct_mutex);
-}
-
-/**
- * intel_pre_disable_primary - Perform operations before disabling primary plane
- * @crtc: the CRTC whose primary plane is to be disabled
- *
- * Performs potentially sleeping operations that must be done before the
- * primary plane is enabled, such as updating FBC and IPS. Note that this may
- * be called due to an explicit primary plane update, or due to an implicit
- * disable that is caused when a sprite plane completely hides the primary
- * plane.
- */
-void
-intel_pre_disable_primary(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-
- mutex_lock(&dev->struct_mutex);
- if (dev_priv->fbc.crtc == intel_crtc)
- intel_fbc_disable(dev);
- mutex_unlock(&dev->struct_mutex);
-
- /*
- * FIXME IPS should be fine as long as one plane is
- * enabled, but in practice it seems to have problems
- * when going from primary only to sprite only and vice
- * versa.
- */
- hsw_disable_ips(intel_crtc);
-}
-
-static bool colorkey_enabled(struct intel_plane *intel_plane)
-{
- return intel_plane->ckey.flags != I915_SET_COLORKEY_NONE;
-}
-
static int
intel_check_sprite_plane(struct drm_plane *plane,
struct intel_plane_state *state)
{
+ struct drm_device *dev = plane->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
+ struct intel_crtc_state *crtc_state;
struct intel_plane *intel_plane = to_intel_plane(plane);
struct drm_framebuffer *fb = state->base.fb;
int crtc_x, crtc_y;
@@ -906,8 +771,11 @@ intel_check_sprite_plane(struct drm_plane *plane,
int hscale, vscale;
int max_scale, min_scale;
int pixel_size;
+ int ret;
intel_crtc = intel_crtc ? intel_crtc : to_intel_crtc(plane->crtc);
+ crtc_state = state->base.state ?
+ intel_atomic_get_crtc_state(state->base.state, intel_crtc) : NULL;
if (!fb) {
state->visible = false;
@@ -934,6 +802,11 @@ intel_check_sprite_plane(struct drm_plane *plane,
max_scale = intel_plane->max_downscale << 16;
min_scale = intel_plane->can_scale ? 1 : (1 << 16);
+ if (INTEL_INFO(dev)->gen >= 9) {
+ min_scale = 1;
+ max_scale = skl_max_scale(intel_crtc, crtc_state);
+ }
+
drm_rect_rotate(src, fb->width << 16, fb->height << 16,
state->base.rotation);
@@ -1029,8 +902,8 @@ intel_check_sprite_plane(struct drm_plane *plane,
width_bytes = ((src_x * pixel_size) & 63) +
src_w * pixel_size;
- if (src_w > 2048 || src_h > 2048 ||
- width_bytes > 4096 || fb->pitches[0] > 4096) {
+ if (INTEL_INFO(dev)->gen < 9 && (src_w > 2048 || src_h > 2048 ||
+ width_bytes > 4096 || fb->pitches[0] > 4096)) {
DRM_DEBUG_KMS("Source dimensions exceed hardware limits\n");
return -EINVAL;
}
@@ -1053,23 +926,10 @@ finish:
* If the sprite is completely covering the primary plane,
* we can disable the primary and save power.
*/
- state->hides_primary = fb != NULL && drm_rect_equals(dst, clip) &&
- !colorkey_enabled(intel_plane);
- WARN_ON(state->hides_primary && !state->visible && intel_crtc->active);
-
if (intel_crtc->active) {
- if (intel_crtc->primary_enabled == state->hides_primary)
- intel_crtc->atomic.wait_for_flips = true;
-
- if (intel_crtc->primary_enabled && state->hides_primary)
- intel_crtc->atomic.pre_disable_primary = true;
-
intel_crtc->atomic.fb_bits |=
INTEL_FRONTBUFFER_SPRITE(intel_crtc->pipe);
- if (!intel_crtc->primary_enabled && !state->hides_primary)
- intel_crtc->atomic.post_enable_primary = true;
-
if (intel_wm_need_update(plane, &state->base))
intel_crtc->atomic.update_wm = true;
@@ -1084,6 +944,13 @@ finish:
}
}
+ if (INTEL_INFO(dev)->gen >= 9) {
+ ret = skl_update_scaler_users(intel_crtc, crtc_state, intel_plane,
+ state, 0);
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -1105,8 +972,6 @@ intel_commit_sprite_plane(struct drm_plane *plane,
plane->fb = fb;
if (intel_crtc->active) {
- intel_crtc->primary_enabled = !state->hides_primary;
-
if (state->visible) {
crtc_x = state->dst.x1;
crtc_y = state->dst.y1;
@@ -1120,7 +985,7 @@ intel_commit_sprite_plane(struct drm_plane *plane,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
} else {
- intel_plane->disable_plane(plane, crtc);
+ intel_plane->disable_plane(plane, crtc, false);
}
}
}
@@ -1150,6 +1015,16 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
}
intel_plane = to_intel_plane(plane);
+
+ if (INTEL_INFO(dev)->gen >= 9) {
+ /* plane scaling and colorkey are mutually exclusive */
+ if (to_intel_plane_state(plane->state)->scaler_id >= 0) {
+ DRM_ERROR("colorkey not allowed with scaler\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
intel_plane->ckey = *set;
/*
@@ -1286,12 +1161,7 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
}
break;
case 9:
- /*
- * FIXME: Skylake planes can be scaled (with some restrictions),
- * but this is for another time.
- */
- intel_plane->can_scale = false;
- intel_plane->max_downscale = 1;
+ intel_plane->can_scale = true;
intel_plane->update_plane = skl_update_plane;
intel_plane->disable_plane = skl_disable_plane;
state->scaler_id = -1;
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index db9a30f10bc4..0a4a040d6bb7 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -647,6 +647,7 @@ struct drm_encoder {
* @audio_latency: audio latency info from ELD, if found
* @null_edid_counter: track sinks that give us all zeros for the EDID
* @bad_edid_counter: track sinks that give us an EDID with invalid checksum
+ * @edid_corrupt: indicates whether the last read EDID was corrupt
* @debugfs_entry: debugfs directory for this connector
* @state: current atomic state for this connector
* @has_tile: is this connector connected to a tiled monitor
@@ -719,6 +720,11 @@ struct drm_connector {
int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
unsigned bad_edid_counter;
+ /* Flag for raw EDID header corruption - used in Displayport
+ * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
+ */
+ bool edid_corrupt;
+
struct dentry *debugfs_entry;
struct drm_connector_state *state;
@@ -1443,7 +1449,8 @@ extern void drm_set_preferred_mode(struct drm_connector *connector,
int hpref, int vpref);
extern int drm_edid_header_is_valid(const u8 *raw_edid);
-extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
+extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
+ bool *edid_corrupt);
extern bool drm_edid_is_valid(struct edid *edid);
extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
OpenPOWER on IntegriCloud