summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_dbg.c22
-rw-r--r--drivers/ata/sata_highbank.c2
-rw-r--r--drivers/block/brd.c2
-rw-r--r--drivers/block/rbd.c305
-rw-r--r--drivers/clk/tegra/clk-tegra210.c2
-rw-r--r--drivers/cpufreq/omap-cpufreq.c2
-rw-r--r--drivers/crypto/caam/ctrl.c2
-rw-r--r--drivers/dma/sun4i-dma.c16
-rw-r--r--drivers/gpio/gpio-xlp.c2
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c45
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c38
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c44
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c16
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c2
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c366
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h1
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h1
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c36
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c16
-rw-r--r--drivers/gpu/drm/i915/intel_display.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c141
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c104
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c146
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h1
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c12
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c55
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c207
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h6
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h12
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c8
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c4
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_tfp410.c2
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c5
-rw-r--r--drivers/iommu/arm-smmu-v3.c18
-rw-r--r--drivers/iommu/arm-smmu.c8
-rw-r--r--drivers/iommu/intel-iommu.c318
-rw-r--r--drivers/iommu/iova.c417
-rw-r--r--drivers/irqchip/irq-clps711x.c2
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/writeback.c3
-rw-r--r--drivers/media/i2c/adp1653.c10
-rw-r--r--drivers/media/platform/s5p-tv/mixer_drv.c2
-rw-r--r--drivers/mfd/twl4030-irq.c2
-rw-r--r--drivers/mmc/core/mmc.c4
-rw-r--r--drivers/mmc/host/dw_mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c2
-rw-r--r--drivers/mmc/host/sdhci-of-at91.c2
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mtd/nand/atmel_nand.c35
-rw-r--r--drivers/mtd/nand/atmel_nand_nfc.h3
-rw-r--r--drivers/mtd/ubi/build.c5
-rw-r--r--drivers/mtd/ubi/debug.c3
-rw-r--r--drivers/mtd/ubi/eba.c47
-rw-r--r--drivers/mtd/ubi/fastmap.c1
-rw-r--r--drivers/mtd/ubi/kapi.c2
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/vmt.c2
-rw-r--r--drivers/mtd/ubi/wl.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c4
-rw-r--r--drivers/nvdimm/pmem.c10
-rw-r--r--drivers/nvme/host/core.c37
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c23
-rw-r--r--drivers/nvmem/core.c22
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c10
-rw-r--r--drivers/platform/x86/Kconfig12
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus-laptop.c15
-rw-r--r--drivers/platform/x86/asus-wmi.c5
-rw-r--r--drivers/platform/x86/dell-rbtn.c56
-rw-r--r--drivers/platform/x86/fujitsu-laptop.c63
-rw-r--r--drivers/platform/x86/ideapad-laptop.c19
-rw-r--r--drivers/platform/x86/intel_menlow.c49
-rw-r--r--drivers/platform/x86/intel_pmc_core.c200
-rw-r--r--drivers/platform/x86/intel_pmc_core.h51
-rw-r--r--drivers/platform/x86/intel_telemetry_core.c6
-rw-r--r--drivers/platform/x86/intel_telemetry_pltdrv.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c3
-rw-r--r--drivers/platform/x86/surfacepro3_button.c9
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c43
-rw-r--r--drivers/s390/block/dcssblk.c4
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h4
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c6
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/sprd_serial.c2
-rw-r--r--drivers/video/fbdev/da8xx-fb.c4
120 files changed, 2453 insertions, 1029 deletions
diff --git a/drivers/acpi/acpi_dbg.c b/drivers/acpi/acpi_dbg.c
index 15e4604efba7..1f4128487dd4 100644
--- a/drivers/acpi/acpi_dbg.c
+++ b/drivers/acpi/acpi_dbg.c
@@ -265,7 +265,7 @@ static int acpi_aml_write_kern(const char *buf, int len)
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/* sync tail before inserting logs */
smp_mb();
@@ -286,7 +286,7 @@ static int acpi_aml_readb_kern(void)
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/* sync head before removing cmds */
smp_rmb();
@@ -330,7 +330,7 @@ again:
goto again;
break;
}
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
break;
size += ret;
count -= ret;
@@ -373,7 +373,7 @@ again:
if (ret == 0)
goto again;
}
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
break;
*(msg + size) = (char)ret;
size++;
@@ -526,7 +526,7 @@ static int acpi_aml_open(struct inode *inode, struct file *file)
}
acpi_aml_io.users++;
err_lock:
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
if (acpi_aml_active_reader == file)
acpi_aml_active_reader = NULL;
}
@@ -587,7 +587,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
char *p;
ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/* sync head before removing logs */
smp_rmb();
@@ -602,7 +602,7 @@ static int acpi_aml_read_user(char __user *buf, int len)
crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
- acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !IS_ERR_VALUE(ret));
+ acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, !ret);
return ret;
}
@@ -634,7 +634,7 @@ again:
goto again;
}
}
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
if (!acpi_aml_running())
ret = 0;
break;
@@ -657,7 +657,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
char *p;
ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/* sync tail before inserting cmds */
smp_mb();
@@ -672,7 +672,7 @@ static int acpi_aml_write_user(const char __user *buf, int len)
crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
ret = n;
out:
- acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !IS_ERR_VALUE(ret));
+ acpi_aml_unlock_fifo(ACPI_AML_IN_USER, !ret);
return n;
}
@@ -704,7 +704,7 @@ again:
goto again;
}
}
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
if (!acpi_aml_running())
ret = 0;
break;
diff --git a/drivers/ata/sata_highbank.c b/drivers/ata/sata_highbank.c
index 8638d575b2b9..aafb8cc03523 100644
--- a/drivers/ata/sata_highbank.c
+++ b/drivers/ata/sata_highbank.c
@@ -197,7 +197,7 @@ static void highbank_set_em_messages(struct device *dev,
for (i = 0; i < SGPIO_PINS; i++) {
err = of_get_named_gpio(np, "calxeda,sgpio-gpio", i);
- if (IS_ERR_VALUE(err))
+ if (err < 0)
return;
pdata->sgpio_gpio[i] = err;
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 51a071e32221..c04bd9bc39fd 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -381,7 +381,7 @@ static int brd_rw_page(struct block_device *bdev, sector_t sector,
#ifdef CONFIG_BLK_DEV_RAM_DAX
static long brd_direct_access(struct block_device *bdev, sector_t sector,
- void __pmem **kaddr, pfn_t *pfn)
+ void __pmem **kaddr, pfn_t *pfn, long size)
{
struct brd_device *brd = bdev->bd_disk->private_data;
struct page *page;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 0ede6d7e2568..81666a56415e 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -350,12 +350,12 @@ struct rbd_device {
struct rbd_spec *spec;
struct rbd_options *opts;
- char *header_name;
+ struct ceph_object_id header_oid;
+ struct ceph_object_locator header_oloc;
struct ceph_file_layout layout;
- struct ceph_osd_event *watch_event;
- struct rbd_obj_request *watch_request;
+ struct ceph_osd_linger_request *watch_handle;
struct rbd_spec *parent_spec;
u64 parent_overlap;
@@ -1596,12 +1596,6 @@ static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
return __rbd_obj_request_wait(obj_request, 0);
}
-static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
- unsigned long timeout)
-{
- return __rbd_obj_request_wait(obj_request, timeout);
-}
-
static void rbd_img_request_complete(struct rbd_img_request *img_request)
{
@@ -1751,12 +1745,6 @@ static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
complete_all(&obj_request->completion);
}
-static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
-{
- dout("%s: obj %p\n", __func__, obj_request);
- obj_request_done_set(obj_request);
-}
-
static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request = NULL;
@@ -1828,13 +1816,12 @@ static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
obj_request_done_set(obj_request);
}
-static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
- struct ceph_msg *msg)
+static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
{
struct rbd_obj_request *obj_request = osd_req->r_priv;
u16 opcode;
- dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
+ dout("%s: osd_req %p\n", __func__, osd_req);
rbd_assert(osd_req == obj_request->osd_req);
if (obj_request_img_data_test(obj_request)) {
rbd_assert(obj_request->img_request);
@@ -1878,10 +1865,6 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
case CEPH_OSD_OP_CALL:
rbd_osd_call_callback(obj_request);
break;
- case CEPH_OSD_OP_NOTIFY_ACK:
- case CEPH_OSD_OP_WATCH:
- rbd_osd_trivial_callback(obj_request);
- break;
default:
rbd_warn(NULL, "%s: unsupported op %hu",
obj_request->object_name, (unsigned short) opcode);
@@ -1896,27 +1879,17 @@ static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
{
struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_osd_request *osd_req = obj_request->osd_req;
- u64 snap_id;
- rbd_assert(osd_req != NULL);
-
- snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
- ceph_osdc_build_request(osd_req, obj_request->offset,
- NULL, snap_id, NULL);
+ if (img_request)
+ osd_req->r_snapid = img_request->snap_id;
}
static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
{
- struct rbd_img_request *img_request = obj_request->img_request;
struct ceph_osd_request *osd_req = obj_request->osd_req;
- struct ceph_snap_context *snapc;
- struct timespec mtime = CURRENT_TIME;
- rbd_assert(osd_req != NULL);
-
- snapc = img_request ? img_request->snapc : NULL;
- ceph_osdc_build_request(osd_req, obj_request->offset,
- snapc, CEPH_NOSNAP, &mtime);
+ osd_req->r_mtime = CURRENT_TIME;
+ osd_req->r_data_offset = obj_request->offset;
}
/*
@@ -1954,7 +1927,7 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
GFP_NOIO);
if (!osd_req)
- return NULL; /* ENOMEM */
+ goto fail;
if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
@@ -1965,9 +1938,18 @@ static struct ceph_osd_request *rbd_osd_req_create(
osd_req->r_priv = obj_request;
osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
- ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
+ if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
+ obj_request->object_name))
+ goto fail;
+
+ if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
+ goto fail;
return osd_req;
+
+fail:
+ ceph_osdc_put_request(osd_req);
+ return NULL;
}
/*
@@ -2003,16 +1985,25 @@ rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
false, GFP_NOIO);
if (!osd_req)
- return NULL; /* ENOMEM */
+ goto fail;
osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
osd_req->r_callback = rbd_osd_req_callback;
osd_req->r_priv = obj_request;
osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
- ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
+ if (ceph_oid_aprintf(&osd_req->r_base_oid, GFP_NOIO, "%s",
+ obj_request->object_name))
+ goto fail;
+
+ if (ceph_osdc_alloc_messages(osd_req, GFP_NOIO))
+ goto fail;
return osd_req;
+
+fail:
+ ceph_osdc_put_request(osd_req);
+ return NULL;
}
@@ -2973,17 +2964,20 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
{
struct rbd_obj_request *obj_request;
struct rbd_obj_request *next_obj_request;
+ int ret = 0;
dout("%s: img %p\n", __func__, img_request);
- for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
- int ret;
+ rbd_img_request_get(img_request);
+ for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
ret = rbd_img_obj_request_submit(obj_request);
if (ret)
- return ret;
+ goto out_put_ireq;
}
- return 0;
+out_put_ireq:
+ rbd_img_request_put(img_request);
+ return ret;
}
static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
@@ -3090,45 +3084,18 @@ out_err:
obj_request_done_set(obj_request);
}
-static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
-{
- struct rbd_obj_request *obj_request;
- struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- int ret;
-
- obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
- OBJ_REQUEST_NODATA);
- if (!obj_request)
- return -ENOMEM;
-
- ret = -ENOMEM;
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
- obj_request);
- if (!obj_request->osd_req)
- goto out;
-
- osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
- notify_id, 0, 0);
- rbd_osd_req_format_read(obj_request);
+static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev);
+static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev);
- ret = rbd_obj_request_submit(osdc, obj_request);
- if (ret)
- goto out;
- ret = rbd_obj_request_wait(obj_request);
-out:
- rbd_obj_request_put(obj_request);
-
- return ret;
-}
-
-static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
+static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
+ u64 notifier_id, void *data, size_t data_len)
{
- struct rbd_device *rbd_dev = (struct rbd_device *)data;
+ struct rbd_device *rbd_dev = arg;
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
int ret;
- dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
- rbd_dev->header_name, (unsigned long long)notify_id,
- (unsigned int)opcode);
+ dout("%s rbd_dev %p cookie %llu notify_id %llu\n", __func__, rbd_dev,
+ cookie, notify_id);
/*
* Until adequate refresh error handling is in place, there is
@@ -3140,63 +3107,31 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
if (ret)
rbd_warn(rbd_dev, "refresh failed: %d", ret);
- ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
+ ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, notify_id, cookie,
+ NULL, 0);
if (ret)
rbd_warn(rbd_dev, "notify_ack ret %d", ret);
}
-/*
- * Send a (un)watch request and wait for the ack. Return a request
- * with a ref held on success or error.
- */
-static struct rbd_obj_request *rbd_obj_watch_request_helper(
- struct rbd_device *rbd_dev,
- bool watch)
+static void rbd_watch_errcb(void *arg, u64 cookie, int err)
{
- struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- struct ceph_options *opts = osdc->client->options;
- struct rbd_obj_request *obj_request;
+ struct rbd_device *rbd_dev = arg;
int ret;
- obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
- OBJ_REQUEST_NODATA);
- if (!obj_request)
- return ERR_PTR(-ENOMEM);
-
- obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
- obj_request);
- if (!obj_request->osd_req) {
- ret = -ENOMEM;
- goto out;
- }
-
- osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
- rbd_dev->watch_event->cookie, 0, watch);
- rbd_osd_req_format_write(obj_request);
+ rbd_warn(rbd_dev, "encountered watch error: %d", err);
- if (watch)
- ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
-
- ret = rbd_obj_request_submit(osdc, obj_request);
- if (ret)
- goto out;
+ __rbd_dev_header_unwatch_sync(rbd_dev);
- ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
- if (ret)
- goto out;
-
- ret = obj_request->result;
+ ret = rbd_dev_header_watch_sync(rbd_dev);
if (ret) {
- if (watch)
- rbd_obj_request_end(obj_request);
- goto out;
+ rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
+ return;
}
- return obj_request;
-
-out:
- rbd_obj_request_put(obj_request);
- return ERR_PTR(ret);
+ ret = rbd_dev_refresh(rbd_dev);
+ if (ret)
+ rbd_warn(rbd_dev, "reregisteration refresh failed: %d", ret);
}
/*
@@ -3205,35 +3140,33 @@ out:
static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
{
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
- struct rbd_obj_request *obj_request;
- int ret;
+ struct ceph_osd_linger_request *handle;
- rbd_assert(!rbd_dev->watch_event);
- rbd_assert(!rbd_dev->watch_request);
+ rbd_assert(!rbd_dev->watch_handle);
- ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
- &rbd_dev->watch_event);
- if (ret < 0)
- return ret;
+ handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
+ &rbd_dev->header_oloc, rbd_watch_cb,
+ rbd_watch_errcb, rbd_dev);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
- obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
- if (IS_ERR(obj_request)) {
- ceph_osdc_cancel_event(rbd_dev->watch_event);
- rbd_dev->watch_event = NULL;
- return PTR_ERR(obj_request);
- }
+ rbd_dev->watch_handle = handle;
+ return 0;
+}
- /*
- * A watch request is set to linger, so the underlying osd
- * request won't go away until we unregister it. We retain
- * a pointer to the object request during that time (in
- * rbd_dev->watch_request), so we'll keep a reference to it.
- * We'll drop that reference after we've unregistered it in
- * rbd_dev_header_unwatch_sync().
- */
- rbd_dev->watch_request = obj_request;
+static void __rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
+{
+ struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
+ int ret;
- return 0;
+ if (!rbd_dev->watch_handle)
+ return;
+
+ ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
+ if (ret)
+ rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
+
+ rbd_dev->watch_handle = NULL;
}
/*
@@ -3241,24 +3174,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
*/
static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
{
- struct rbd_obj_request *obj_request;
-
- rbd_assert(rbd_dev->watch_event);
- rbd_assert(rbd_dev->watch_request);
-
- rbd_obj_request_end(rbd_dev->watch_request);
- rbd_obj_request_put(rbd_dev->watch_request);
- rbd_dev->watch_request = NULL;
-
- obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
- if (!IS_ERR(obj_request))
- rbd_obj_request_put(obj_request);
- else
- rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
- PTR_ERR(obj_request));
-
- ceph_osdc_cancel_event(rbd_dev->watch_event);
- rbd_dev->watch_event = NULL;
+ __rbd_dev_header_unwatch_sync(rbd_dev);
dout("%s flushing notifies\n", __func__);
ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
@@ -3591,7 +3507,7 @@ static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
if (!ondisk)
return -ENOMEM;
- ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_oid.name,
0, size, ondisk);
if (ret < 0)
goto out;
@@ -4033,6 +3949,8 @@ static void rbd_dev_release(struct device *dev)
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
bool need_put = !!rbd_dev->opts;
+ ceph_oid_destroy(&rbd_dev->header_oid);
+
rbd_put_client(rbd_dev->rbd_client);
rbd_spec_put(rbd_dev->spec);
kfree(rbd_dev->opts);
@@ -4063,6 +3981,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
INIT_LIST_HEAD(&rbd_dev->node);
init_rwsem(&rbd_dev->header_rwsem);
+ ceph_oid_init(&rbd_dev->header_oid);
+ ceph_oloc_init(&rbd_dev->header_oloc);
+
rbd_dev->dev.bus = &rbd_bus_type;
rbd_dev->dev.type = &rbd_device_type;
rbd_dev->dev.parent = &rbd_root_dev;
@@ -4111,7 +4032,7 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
__le64 size;
} __attribute__ ((packed)) size_buf = { 0 };
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_size",
&snapid, sizeof (snapid),
&size_buf, sizeof (size_buf));
@@ -4151,7 +4072,7 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
if (!reply_buf)
return -ENOMEM;
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_object_prefix", NULL, 0,
reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
@@ -4186,7 +4107,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 unsup;
int ret;
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_features",
&snapid, sizeof (snapid),
&features_buf, sizeof (features_buf));
@@ -4248,7 +4169,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
}
snapid = cpu_to_le64(rbd_dev->spec->snap_id);
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_parent",
&snapid, sizeof (snapid),
reply_buf, size);
@@ -4351,7 +4272,7 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
u64 stripe_count;
int ret;
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_stripe_unit_count", NULL, 0,
(char *)&striping_info_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
@@ -4599,7 +4520,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
if (!reply_buf)
return -ENOMEM;
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_snapcontext", NULL, 0,
reply_buf, size);
dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
@@ -4664,7 +4585,7 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
return ERR_PTR(-ENOMEM);
snapid = cpu_to_le64(snap_id);
- ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
+ ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_oid.name,
"rbd", "get_snapshot_name",
&snapid, sizeof (snapid),
reply_buf, size);
@@ -4975,13 +4896,13 @@ static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
again:
ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
if (ret == -ENOENT && tries++ < 1) {
- ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
- &newest_epoch);
+ ret = ceph_monc_get_version(&rbdc->client->monc, "osdmap",
+ &newest_epoch);
if (ret < 0)
return ret;
if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
- ceph_monc_request_next_osdmap(&rbdc->client->monc);
+ ceph_osdc_maybe_request_map(&rbdc->client->osdc);
(void) ceph_monc_wait_osdmap(&rbdc->client->monc,
newest_epoch,
opts->mount_timeout);
@@ -5260,35 +5181,26 @@ err_out_unlock:
static int rbd_dev_header_name(struct rbd_device *rbd_dev)
{
struct rbd_spec *spec = rbd_dev->spec;
- size_t size;
+ int ret;
/* Record the header object name for this rbd image. */
rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
+ rbd_dev->header_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
if (rbd_dev->image_format == 1)
- size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
+ ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
+ spec->image_name, RBD_SUFFIX);
else
- size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
-
- rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
- if (!rbd_dev->header_name)
- return -ENOMEM;
+ ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
+ RBD_HEADER_PREFIX, spec->image_id);
- if (rbd_dev->image_format == 1)
- sprintf(rbd_dev->header_name, "%s%s",
- spec->image_name, RBD_SUFFIX);
- else
- sprintf(rbd_dev->header_name, "%s%s",
- RBD_HEADER_PREFIX, spec->image_id);
- return 0;
+ return ret;
}
static void rbd_dev_image_release(struct rbd_device *rbd_dev)
{
rbd_dev_unprobe(rbd_dev);
- kfree(rbd_dev->header_name);
- rbd_dev->header_name = NULL;
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
rbd_dev->spec->image_id = NULL;
@@ -5327,7 +5239,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
pr_info("image %s/%s does not exist\n",
rbd_dev->spec->pool_name,
rbd_dev->spec->image_name);
- goto out_header_name;
+ goto err_out_format;
}
}
@@ -5373,7 +5285,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
goto err_out_probe;
dout("discovered format %u image, header name is %s\n",
- rbd_dev->image_format, rbd_dev->header_name);
+ rbd_dev->image_format, rbd_dev->header_oid.name);
return 0;
err_out_probe:
@@ -5381,9 +5293,6 @@ err_out_probe:
err_out_watch:
if (!depth)
rbd_dev_header_unwatch_sync(rbd_dev);
-out_header_name:
- kfree(rbd_dev->header_name);
- rbd_dev->header_name = NULL;
err_out_format:
rbd_dev->image_format = 0;
kfree(rbd_dev->spec->image_id);
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index b8551813ec43..456cf586d2c2 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -1221,7 +1221,7 @@ static int tegra210_pll_fixed_mdiv_cfg(struct clk_hw *hw,
p = rate >= params->vco_min ? 1 : -EINVAL;
}
- if (IS_ERR_VALUE(p))
+ if (p < 0)
return -EINVAL;
cfg->m = tegra_pll_get_fixed_mdiv(hw, input_rate);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index cead9bec4843..376e63ca94e8 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -54,7 +54,7 @@ static int omap_target(struct cpufreq_policy *policy, unsigned int index)
freq = new_freq * 1000;
ret = clk_round_rate(policy->clk, freq);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
dev_warn(mpu_dev,
"CPUfreq: Cannot find matching frequency for %lu\n",
freq);
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 44d30b45f3cc..5ad5f3009ae0 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -402,7 +402,7 @@ int caam_get_era(void)
ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
of_node_put(caam_node);
- return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop;
+ return ret ? -ENOTSUPP : prop;
}
EXPORT_SYMBOL(caam_get_era);
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index e0df233dde92..57aa227bfadb 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -461,25 +461,25 @@ generate_ndma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
/* Source burst */
ret = convert_burst(sconfig->src_maxburst);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
ret = convert_burst(sconfig->dst_maxburst);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
/* Source bus width */
ret = convert_buswidth(sconfig->src_addr_width);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
@@ -518,25 +518,25 @@ generate_ddma_promise(struct dma_chan *chan, dma_addr_t src, dma_addr_t dest,
/* Source burst */
ret = convert_burst(sconfig->src_maxburst);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_BURST_LENGTH(ret);
/* Destination burst */
ret = convert_burst(sconfig->dst_maxburst);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_BURST_LENGTH(ret);
/* Source bus width */
ret = convert_buswidth(sconfig->src_addr_width);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_SRC_DATA_WIDTH(ret);
/* Destination bus width */
ret = convert_buswidth(sconfig->dst_addr_width);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto fail;
promise->cfg |= SUN4I_DMA_CFG_DST_DATA_WIDTH(ret);
diff --git a/drivers/gpio/gpio-xlp.c b/drivers/gpio/gpio-xlp.c
index 08897dc11915..1a33a19d95b9 100644
--- a/drivers/gpio/gpio-xlp.c
+++ b/drivers/gpio/gpio-xlp.c
@@ -393,7 +393,7 @@ static int xlp_gpio_probe(struct platform_device *pdev)
irq_base = irq_alloc_descs(-1, 0, gc->ngpio, 0);
else
irq_base = irq_alloc_descs(-1, XLP_GPIO_IRQ_BASE, gc->ngpio, 0);
- if (IS_ERR_VALUE(irq_base)) {
+ if (irq_base < 0) {
dev_err(&pdev->dev, "Failed to allocate IRQ numbers\n");
return irq_base;
}
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 2bd3e5aa43c6..be43afb08c69 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -23,7 +23,7 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
- drm_kms_helper_common.o
+ drm_kms_helper_common.o drm_dp_dual_mode_helper.o
drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index ca77ec10147c..e503e3d6d920 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -2,6 +2,7 @@ menu "ACP (Audio CoProcessor) Configuration"
config DRM_AMD_ACP
bool "Enable AMD Audio CoProcessor IP support"
+ depends on DRM_AMDGPU
select MFD_CORE
select PM_GENERIC_DOMAINS if PM
help
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 2a009c398dcb..992f00b65be4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -602,6 +602,8 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_sync *sync);
int amdgpu_sync_init(void);
void amdgpu_sync_fini(void);
+int amdgpu_fence_slab_init(void);
+void amdgpu_fence_slab_fini(void);
/*
* GART structures, functions & helpers
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 60a0c9ac11b2..cb07da41152b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -194,12 +194,12 @@ int amdgpu_connector_get_monitor_bpc(struct drm_connector *connector)
bpc = 8;
DRM_DEBUG("%s: HDMI deep color 10 bpc exceeds max tmds clock. Using %d bpc.\n",
connector->name, bpc);
- } else if (bpc > 8) {
- /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
- DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
- connector->name);
- bpc = 8;
}
+ } else if (bpc > 8) {
+ /* max_tmds_clock missing, but hdmi spec mandates it for deep color. */
+ DRM_DEBUG("%s: Required max tmds clock for HDMI deep color missing. Using 8 bpc.\n",
+ connector->name);
+ bpc = 8;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 1dab5f2b725b..f888c015f76c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -50,9 +50,11 @@
* KMS wrapper.
* - 3.0.0 - initial driver
* - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
+ * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
+ * at the end of IBs.
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 1
+#define KMS_DRIVER_MINOR 2
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit = 0;
@@ -279,14 +281,26 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
/* Polaris11 */
{0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
- {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
- {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
{0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
+ {0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
/* Polaris10 */
{0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ {0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0, 0, 0}
};
@@ -563,9 +577,12 @@ static struct pci_driver amdgpu_kms_pci_driver = {
.driver.pm = &amdgpu_pm_ops,
};
+
+
static int __init amdgpu_init(void)
{
amdgpu_sync_init();
+ amdgpu_fence_slab_init();
if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL;
@@ -576,7 +593,6 @@ static int __init amdgpu_init(void)
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = amdgpu_max_kms_ioctl;
amdgpu_register_atpx_handler();
-
/* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver);
}
@@ -587,6 +603,7 @@ static void __exit amdgpu_exit(void)
drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler();
amdgpu_sync_fini();
+ amdgpu_fence_slab_fini();
}
module_init(amdgpu_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index ba9c04283d01..d1558768cfb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -55,8 +55,21 @@ struct amdgpu_fence {
};
static struct kmem_cache *amdgpu_fence_slab;
-static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
+int amdgpu_fence_slab_init(void)
+{
+ amdgpu_fence_slab = kmem_cache_create(
+ "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!amdgpu_fence_slab)
+ return -ENOMEM;
+ return 0;
+}
+
+void amdgpu_fence_slab_fini(void)
+{
+ kmem_cache_destroy(amdgpu_fence_slab);
+}
/*
* Cast helper
*/
@@ -396,13 +409,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
*/
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{
- if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
- amdgpu_fence_slab = kmem_cache_create(
- "amdgpu_fence", sizeof(struct amdgpu_fence), 0,
- SLAB_HWCACHE_ALIGN, NULL);
- if (!amdgpu_fence_slab)
- return -ENOMEM;
- }
if (amdgpu_debugfs_fence_init(adev))
dev_err(adev->dev, "fence debugfs file creation failed\n");
@@ -437,13 +443,10 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
- fence_put(ring->fence_drv.fences[i]);
+ fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
ring->fence_drv.initialized = false;
}
-
- if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
- kmem_cache_destroy(amdgpu_fence_slab);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index ea708cb94862..9f36ed30ba11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -53,6 +53,18 @@
/* Special value that no flush is necessary */
#define AMDGPU_VM_NO_FLUSH (~0ll)
+/* Local structure. Encapsulate some VM table update parameters to reduce
+ * the number of function parameters
+ */
+struct amdgpu_vm_update_params {
+ /* address where to copy page table entries from */
+ uint64_t src;
+ /* DMA addresses to use for mapping */
+ dma_addr_t *pages_addr;
+ /* indirect buffer to fill with commands */
+ struct amdgpu_ib *ib;
+};
+
/**
* amdgpu_vm_num_pde - return the number of page directory entries
*
@@ -389,9 +401,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* amdgpu_vm_update_pages - helper to call the right asic function
*
* @adev: amdgpu_device pointer
- * @src: address where to copy page table entries from
- * @pages_addr: DMA addresses to use for mapping
- * @ib: indirect buffer to fill with commands
+ * @vm_update_params: see amdgpu_vm_update_params definition
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -402,29 +412,29 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* to setup the page table using the DMA.
*/
static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
- uint64_t src,
- dma_addr_t *pages_addr,
- struct amdgpu_ib *ib,
+ struct amdgpu_vm_update_params
+ *vm_update_params,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
uint32_t flags)
{
trace_amdgpu_vm_set_page(pe, addr, count, incr, flags);
- if (src) {
- src += (addr >> 12) * 8;
- amdgpu_vm_copy_pte(adev, ib, pe, src, count);
+ if (vm_update_params->src) {
+ amdgpu_vm_copy_pte(adev, vm_update_params->ib,
+ pe, (vm_update_params->src + (addr >> 12) * 8), count);
- } else if (pages_addr) {
- amdgpu_vm_write_pte(adev, ib, pages_addr, pe, addr,
- count, incr, flags);
+ } else if (vm_update_params->pages_addr) {
+ amdgpu_vm_write_pte(adev, vm_update_params->ib,
+ vm_update_params->pages_addr,
+ pe, addr, count, incr, flags);
} else if (count < 3) {
- amdgpu_vm_write_pte(adev, ib, NULL, pe, addr,
+ amdgpu_vm_write_pte(adev, vm_update_params->ib, NULL, pe, addr,
count, incr, flags);
} else {
- amdgpu_vm_set_pte_pde(adev, ib, pe, addr,
+ amdgpu_vm_set_pte_pde(adev, vm_update_params->ib, pe, addr,
count, incr, flags);
}
}
@@ -444,10 +454,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
struct fence *fence = NULL;
struct amdgpu_job *job;
+ struct amdgpu_vm_update_params vm_update_params;
unsigned entries;
uint64_t addr;
int r;
+ memset(&vm_update_params, 0, sizeof(vm_update_params));
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
@@ -465,7 +477,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
- amdgpu_vm_update_pages(adev, 0, NULL, &job->ibs[0], addr, 0, entries,
+ vm_update_params.ib = &job->ibs[0];
+ amdgpu_vm_update_pages(adev, &vm_update_params, addr, 0, entries,
0, 0);
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
@@ -538,11 +551,12 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0, pt_idx, ndw;
struct amdgpu_job *job;
- struct amdgpu_ib *ib;
+ struct amdgpu_vm_update_params vm_update_params;
struct fence *fence = NULL;
int r;
+ memset(&vm_update_params, 0, sizeof(vm_update_params));
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
/* padding, etc. */
@@ -555,7 +569,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
if (r)
return r;
- ib = &job->ibs[0];
+ vm_update_params.ib = &job->ibs[0];
/* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
@@ -575,7 +589,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
((last_pt + incr * count) != pt)) {
if (count) {
- amdgpu_vm_update_pages(adev, 0, NULL, ib,
+ amdgpu_vm_update_pages(adev, &vm_update_params,
last_pde, last_pt,
count, incr,
AMDGPU_PTE_VALID);
@@ -590,14 +604,15 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
}
if (count)
- amdgpu_vm_update_pages(adev, 0, NULL, ib, last_pde, last_pt,
- count, incr, AMDGPU_PTE_VALID);
+ amdgpu_vm_update_pages(adev, &vm_update_params,
+ last_pde, last_pt,
+ count, incr, AMDGPU_PTE_VALID);
- if (ib->length_dw != 0) {
- amdgpu_ring_pad_ib(ring, ib);
+ if (vm_update_params.ib->length_dw != 0) {
+ amdgpu_ring_pad_ib(ring, vm_update_params.ib);
amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv,
AMDGPU_FENCE_OWNER_VM);
- WARN_ON(ib->length_dw > ndw);
+ WARN_ON(vm_update_params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &fence);
if (r)
@@ -623,18 +638,15 @@ error_free:
* amdgpu_vm_frag_ptes - add fragment information to PTEs
*
* @adev: amdgpu_device pointer
- * @src: address where to copy page table entries from
- * @pages_addr: DMA addresses to use for mapping
- * @ib: IB for the update
+ * @vm_update_params: see amdgpu_vm_update_params definition
* @pe_start: first PTE to handle
* @pe_end: last PTE to handle
* @addr: addr those PTEs should point to
* @flags: hw mapping flags
*/
static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
- uint64_t src,
- dma_addr_t *pages_addr,
- struct amdgpu_ib *ib,
+ struct amdgpu_vm_update_params
+ *vm_update_params,
uint64_t pe_start, uint64_t pe_end,
uint64_t addr, uint32_t flags)
{
@@ -671,11 +683,11 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
return;
/* system pages are non continuously */
- if (src || pages_addr || !(flags & AMDGPU_PTE_VALID) ||
- (frag_start >= frag_end)) {
+ if (vm_update_params->src || vm_update_params->pages_addr ||
+ !(flags & AMDGPU_PTE_VALID) || (frag_start >= frag_end)) {
count = (pe_end - pe_start) / 8;
- amdgpu_vm_update_pages(adev, src, pages_addr, ib, pe_start,
+ amdgpu_vm_update_pages(adev, vm_update_params, pe_start,
addr, count, AMDGPU_GPU_PAGE_SIZE,
flags);
return;
@@ -684,21 +696,21 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
/* handle the 4K area at the beginning */
if (pe_start != frag_start) {
count = (frag_start - pe_start) / 8;
- amdgpu_vm_update_pages(adev, 0, NULL, ib, pe_start, addr,
+ amdgpu_vm_update_pages(adev, vm_update_params, pe_start, addr,
count, AMDGPU_GPU_PAGE_SIZE, flags);
addr += AMDGPU_GPU_PAGE_SIZE * count;
}
/* handle the area in the middle */
count = (frag_end - frag_start) / 8;
- amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_start, addr, count,
+ amdgpu_vm_update_pages(adev, vm_update_params, frag_start, addr, count,
AMDGPU_GPU_PAGE_SIZE, flags | frag_flags);
/* handle the 4K area at the end */
if (frag_end != pe_end) {
addr += AMDGPU_GPU_PAGE_SIZE * count;
count = (pe_end - frag_end) / 8;
- amdgpu_vm_update_pages(adev, 0, NULL, ib, frag_end, addr,
+ amdgpu_vm_update_pages(adev, vm_update_params, frag_end, addr,
count, AMDGPU_GPU_PAGE_SIZE, flags);
}
}
@@ -707,8 +719,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
* @adev: amdgpu_device pointer
- * @src: address where to copy page table entries from
- * @pages_addr: DMA addresses to use for mapping
+ * @vm_update_params: see amdgpu_vm_update_params definition
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
@@ -718,10 +729,9 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
* Update the page tables in the range @start - @end.
*/
static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
- uint64_t src,
- dma_addr_t *pages_addr,
+ struct amdgpu_vm_update_params
+ *vm_update_params,
struct amdgpu_vm *vm,
- struct amdgpu_ib *ib,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
@@ -747,7 +757,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
if (last_pe_end != pe_start) {
- amdgpu_vm_frag_ptes(adev, src, pages_addr, ib,
+ amdgpu_vm_frag_ptes(adev, vm_update_params,
last_pe_start, last_pe_end,
last_dst, flags);
@@ -762,7 +772,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
dst += nptes * AMDGPU_GPU_PAGE_SIZE;
}
- amdgpu_vm_frag_ptes(adev, src, pages_addr, ib, last_pe_start,
+ amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start,
last_pe_end, last_dst, flags);
}
@@ -794,11 +804,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
void *owner = AMDGPU_FENCE_OWNER_VM;
unsigned nptes, ncmds, ndw;
struct amdgpu_job *job;
- struct amdgpu_ib *ib;
+ struct amdgpu_vm_update_params vm_update_params;
struct fence *f = NULL;
int r;
ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ memset(&vm_update_params, 0, sizeof(vm_update_params));
+ vm_update_params.src = src;
+ vm_update_params.pages_addr = pages_addr;
/* sync to everything on unmapping */
if (!(flags & AMDGPU_PTE_VALID))
@@ -815,11 +828,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
/* padding, etc. */
ndw = 64;
- if (src) {
+ if (vm_update_params.src) {
/* only copy commands needed */
ndw += ncmds * 7;
- } else if (pages_addr) {
+ } else if (vm_update_params.pages_addr) {
/* header for write data commands */
ndw += ncmds * 4;
@@ -838,7 +851,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
return r;
- ib = &job->ibs[0];
+ vm_update_params.ib = &job->ibs[0];
r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
owner);
@@ -849,11 +862,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_vm_update_ptes(adev, src, pages_addr, vm, ib, start,
+ amdgpu_vm_update_ptes(adev, &vm_update_params, vm, start,
last + 1, addr, flags);
- amdgpu_ring_pad_ib(ring, ib);
- WARN_ON(ib->length_dw > ndw);
+ amdgpu_ring_pad_ib(ring, vm_update_params.ib);
+ WARN_ON(vm_update_params.ib->length_dw > ndw);
r = amdgpu_job_submit(job, ring, &vm->entity,
AMDGPU_FENCE_OWNER_VM, &f);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index 845c21b1b2ee..be3d6f79a864 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -103,7 +103,6 @@ static void cik_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int cik_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
u64 wptr_off;
@@ -156,7 +155,7 @@ static int cik_ih_irq_init(struct amdgpu_device *adev)
/* enable irqs */
cik_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index fa4449e126e6..933e425a8154 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -1579,7 +1579,6 @@ static int cz_dpm_update_sclk_limit(struct amdgpu_device *adev)
static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
{
- int ret = 0;
struct cz_power_info *pi = cz_get_pi(adev);
if (pi->caps_sclk_ds) {
@@ -1588,20 +1587,19 @@ static int cz_dpm_set_deep_sleep_sclk_threshold(struct amdgpu_device *adev)
CZ_MIN_DEEP_SLEEP_SCLK);
}
- return ret;
+ return 0;
}
/* ?? without dal support, is this still needed in setpowerstate list*/
static int cz_dpm_set_watermark_threshold(struct amdgpu_device *adev)
{
- int ret = 0;
struct cz_power_info *pi = cz_get_pi(adev);
cz_send_msg_to_smc_with_parameter(adev,
PPSMC_MSG_SetWatermarkFrequency,
pi->sclk_dpm.soft_max_clk);
- return ret;
+ return 0;
}
static int cz_dpm_enable_nbdpm(struct amdgpu_device *adev)
@@ -1636,7 +1634,6 @@ static void cz_dpm_nbdpm_lm_pstate_enable(struct amdgpu_device *adev,
static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
{
- int ret = 0;
struct cz_power_info *pi = cz_get_pi(adev);
struct cz_ps *ps = &pi->requested_ps;
@@ -1647,21 +1644,19 @@ static int cz_dpm_update_low_memory_pstate(struct amdgpu_device *adev)
cz_dpm_nbdpm_lm_pstate_enable(adev, true);
}
- return ret;
+ return 0;
}
/* with dpm enabled */
static int cz_dpm_set_power_state(struct amdgpu_device *adev)
{
- int ret = 0;
-
cz_dpm_update_sclk_limit(adev);
cz_dpm_set_deep_sleep_sclk_threshold(adev);
cz_dpm_set_watermark_threshold(adev);
cz_dpm_enable_nbdpm(adev);
cz_dpm_update_low_memory_pstate(adev);
- return ret;
+ return 0;
}
static void cz_dpm_post_set_power_state(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index 863cb16f6126..3d23a70b6432 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -103,7 +103,6 @@ static void cz_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int cz_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
u64 wptr_off;
@@ -157,7 +156,7 @@ static int cz_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
cz_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index c11b6007af80..af26ec0bc59d 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -137,7 +137,7 @@ static const u32 polaris11_golden_settings_a11[] =
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
mmFBC_DEBUG1, 0xffffffff, 0x00000008,
- mmFBC_MISC, 0x9f313fff, 0x14300008,
+ mmFBC_MISC, 0x9f313fff, 0x14302008,
mmHDMI_CONTROL, 0x313f031f, 0x00000011,
};
@@ -145,7 +145,7 @@ static const u32 polaris10_golden_settings_a11[] =
{
mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
- mmFBC_MISC, 0x9f313fff, 0x14300008,
+ mmFBC_MISC, 0x9f313fff, 0x14302008,
mmHDMI_CONTROL, 0x313f031f, 0x00000011,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 92647fbf5b8b..f19bab68fd83 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -267,10 +267,13 @@ static const u32 tonga_mgcg_cgcg_init[] =
static const u32 golden_settings_polaris11_a11[] =
{
+ mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
+ mmPA_SC_RASTER_CONFIG, 0x3f3fffff, 0x16000012,
+ mmPA_SC_RASTER_CONFIG_1, 0x0000003f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmRLC_CGCG_CGLS_CTRL_3D, 0xffffffff, 0x0001003c,
mmSQ_CONFIG, 0x07f80000, 0x07180000,
@@ -284,8 +287,6 @@ static const u32 golden_settings_polaris11_a11[] =
static const u32 polaris11_golden_common_all[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
- mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x16000012,
- mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
mmGB_ADDR_CONFIG, 0xffffffff, 0x22011002,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
@@ -296,6 +297,7 @@ static const u32 polaris11_golden_common_all[] =
static const u32 golden_settings_polaris10_a11[] =
{
mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
+ mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
@@ -5725,6 +5727,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
amdgpu_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN |
+ EOP_TC_WB_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
EVENT_INDEX(5)));
amdgpu_ring_write(ring, addr & 0xfffffffc);
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index 39bfc52d0b42..3b8906ce3511 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -103,7 +103,6 @@ static void iceland_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int iceland_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
u64 wptr_off;
@@ -157,7 +156,7 @@ static int iceland_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
iceland_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index b45f54714574..a789a863d677 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2252,7 +2252,7 @@ static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
if (pi->caps_stable_p_state) {
stable_p_state_sclk = (max_limits->sclk * 75) / 100;
- for (i = table->count - 1; i >= 0; i++) {
+ for (i = table->count - 1; i >= 0; i--) {
if (stable_p_state_sclk >= table->entries[i].clk) {
stable_p_state_sclk = table->entries[i].clk;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 063f08a9957a..31d99b0010f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -109,10 +109,12 @@ static const u32 fiji_mgcg_cgcg_init[] =
static const u32 golden_settings_polaris11_a11[] =
{
mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
+ mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index f036af937fbc..c92055805a45 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -99,7 +99,6 @@ static void tonga_ih_disable_interrupts(struct amdgpu_device *adev)
*/
static int tonga_ih_irq_init(struct amdgpu_device *adev)
{
- int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_rb_cntl, ih_doorbell_rtpr;
u64 wptr_off;
@@ -165,7 +164,7 @@ static int tonga_ih_irq_init(struct amdgpu_device *adev)
/* enable interrupts */
tonga_ih_enable_interrupts(adev);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index c94f9faa220a..24a16e49b571 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -3573,46 +3573,11 @@ static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
return 0;
}
-static void fiji_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_clock_voltage_dependency_table *table =
- table_info->vddc_dep_on_dal_pwrl;
- struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
- enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
- uint32_t req_vddc = 0, req_volt, i;
-
- if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW &&
- dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE))
- return;
-
- for (i= 0; i < table->count; i++) {
- if (dal_power_level == table->entries[i].clk) {
- req_vddc = table->entries[i].v;
- break;
- }
- }
-
- vddc_table = table_info->vdd_dep_on_sclk;
- for (i= 0; i < vddc_table->count; i++) {
- if (req_vddc <= vddc_table->entries[i].vddc) {
- req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE)
- << VDDC_SHIFT;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VddC_Request, req_volt);
- return;
- }
- }
- printk(KERN_ERR "DAL requested level can not"
- " found a available voltage in VDDC DPM Table \n");
-}
-
static int fiji_upload_dpmlevel_enable_mask(struct pp_hwmgr *hwmgr)
{
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
- fiji_apply_dal_min_voltage_request(hwmgr);
+ phm_apply_dal_min_voltage_request(hwmgr);
if (!data->sclk_dpm_key_disabled) {
if (data->dpm_level_enable_mask.sclk_dpm_enable_mask)
@@ -4349,7 +4314,7 @@ static int fiji_populate_and_upload_sclk_mclk_dpm_levels(
if (data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = fiji_populate_all_memory_levels(hwmgr);
+ result = fiji_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
return result);
@@ -5109,11 +5074,11 @@ static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+ data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+ GFP_KERNEL);
if (!data->soft_pp_table)
return -ENOMEM;
- memcpy(data->soft_pp_table, hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size);
}
*table = (char *)&data->soft_pp_table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 7d69ed635bc2..1c48917da3cf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -30,6 +30,9 @@
#include "pppcielanes.h"
#include "pp_debug.h"
#include "ppatomctrl.h"
+#include "ppsmc.h"
+
+#define VOLTAGE_SCALE 4
extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr);
extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr);
@@ -566,3 +569,38 @@ uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
return level;
}
+
+void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
+{
+ struct phm_ppt_v1_information *table_info =
+ (struct phm_ppt_v1_information *)hwmgr->pptable;
+ struct phm_clock_voltage_dependency_table *table =
+ table_info->vddc_dep_on_dal_pwrl;
+ struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
+ enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
+ uint32_t req_vddc = 0, req_volt, i;
+
+ if (!table || table->count <= 0
+ || dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
+ || dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
+ return;
+
+ for (i = 0; i < table->count; i++) {
+ if (dal_power_level == table->entries[i].clk) {
+ req_vddc = table->entries[i].v;
+ break;
+ }
+ }
+
+ vddc_table = table_info->vdd_dep_on_sclk;
+ for (i = 0; i < vddc_table->count; i++) {
+ if (req_vddc <= vddc_table->entries[i].vddc) {
+ req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_VddC_Request, req_volt);
+ return;
+ }
+ }
+ printk(KERN_ERR "DAL requested level can not"
+ " found a available voltage in VDDC DPM Table \n");
+}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index 93768fa1dcdc..aa6be033f21b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -189,41 +189,6 @@ int phm_get_current_pcie_lane_number(struct pp_hwmgr *hwmgr)
return decode_pcie_lane_width(link_width);
}
-void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
-{
- struct phm_ppt_v1_information *table_info =
- (struct phm_ppt_v1_information *)hwmgr->pptable;
- struct phm_clock_voltage_dependency_table *table =
- table_info->vddc_dep_on_dal_pwrl;
- struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
- enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
- uint32_t req_vddc = 0, req_volt, i;
-
- if (!table && !(dal_power_level >= PP_DAL_POWERLEVEL_ULTRALOW &&
- dal_power_level <= PP_DAL_POWERLEVEL_PERFORMANCE))
- return;
-
- for (i = 0; i < table->count; i++) {
- if (dal_power_level == table->entries[i].clk) {
- req_vddc = table->entries[i].v;
- break;
- }
- }
-
- vddc_table = table_info->vdd_dep_on_sclk;
- for (i = 0; i < vddc_table->count; i++) {
- if (req_vddc <= vddc_table->entries[i].vddc) {
- req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE)
- << VDDC_SHIFT;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_VddC_Request, req_volt);
- return;
- }
- }
- printk(KERN_ERR "DAL requested level can not"
- " found a available voltage in VDDC DPM Table \n");
-}
-
/**
* Enable voltage control
*
@@ -2091,7 +2056,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
"Failed to populate Clock Stretcher Data Table!",
return result);
}
-
+ table->CurrSclkPllRange = 0xff;
table->GraphicsVoltageChangeEnable = 1;
table->GraphicsThermThrottleEnable = 1;
table->GraphicsInterval = 1;
@@ -2184,6 +2149,7 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask1);
CONVERT_FROM_HOST_TO_SMC_UL(table->SmioMask2);
CONVERT_FROM_HOST_TO_SMC_UL(table->SclkStepSize);
+ CONVERT_FROM_HOST_TO_SMC_UL(table->CurrSclkPllRange);
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitHigh);
CONVERT_FROM_HOST_TO_SMC_US(table->TemperatureLimitLow);
CONVERT_FROM_HOST_TO_SMC_US(table->VoltageResponseTime);
@@ -4760,11 +4726,11 @@ static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+ data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+ GFP_KERNEL);
if (!data->soft_pp_table)
return -ENOMEM;
- memcpy(data->soft_pp_table, hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size);
}
*table = (char *)&data->soft_pp_table;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 1faad92b50d3..16fed487973b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -5331,7 +5331,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
PP_ASSERT_WITH_CODE(
- true == tonga_is_dpm_running(hwmgr),
+ 0 == tonga_is_dpm_running(hwmgr),
"Trying to freeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
@@ -5344,7 +5344,7 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
if ((0 == data->mclk_dpm_key_disabled) &&
(data->need_update_smu7_dpm_table &
DPMTABLE_OD_UPDATE_MCLK)) {
- PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr),
+ PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
"Trying to freeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
@@ -5445,7 +5445,7 @@ static int tonga_populate_and_upload_sclk_mclk_dpm_levels(struct pp_hwmgr *hwmgr
}
if (data->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) {
- result = tonga_populate_all_memory_levels(hwmgr);
+ result = tonga_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
return result);
@@ -5647,7 +5647,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
- PP_ASSERT_WITH_CODE(true == tonga_is_dpm_running(hwmgr),
+ PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr),
"Trying to Unfreeze SCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
@@ -5661,7 +5661,7 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
(data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
PP_ASSERT_WITH_CODE(
- true == tonga_is_dpm_running(hwmgr),
+ 0 == tonga_is_dpm_running(hwmgr),
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(
@@ -6056,11 +6056,11 @@ static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
if (!data->soft_pp_table) {
- data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
+ data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
+ hwmgr->soft_pp_table_size,
+ GFP_KERNEL);
if (!data->soft_pp_table)
return -ENOMEM;
- memcpy(data->soft_pp_table, hwmgr->soft_pp_table,
- hwmgr->soft_pp_table_size);
}
*table = (char *)&data->soft_pp_table;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index fd4ce7aaeee9..28f571449495 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -673,7 +673,7 @@ extern int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, phm_ppt_v1_volta
extern int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr);
extern int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr);
extern uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask);
-
+extern void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr);
#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
index da18f44fd1c8..87c023e518ab 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/cz_smumgr.c
@@ -639,7 +639,7 @@ static int cz_smu_populate_firmware_entries(struct pp_smumgr *smumgr)
cz_smu->driver_buffer_length = 0;
- for (i = 0; i < sizeof(firmware_list)/sizeof(*firmware_list); i++) {
+ for (i = 0; i < ARRAY_SIZE(firmware_list); i++) {
firmware_type = cz_translate_firmware_enum_to_arg(smumgr,
firmware_list[i]);
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
new file mode 100644
index 000000000000..a7b2a751f6fe
--- /dev/null
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <drm/drm_dp_dual_mode_helper.h>
+#include <drm/drmP.h>
+
+/**
+ * DOC: dp dual mode helpers
+ *
+ * Helper functions to deal with DP dual mode (aka. DP++) adaptors.
+ *
+ * Type 1:
+ * Adaptor registers (if any) and the sink DDC bus may be accessed via I2C.
+ *
+ * Type 2:
+ * Adaptor registers and sink DDC bus can be accessed either via I2C or
+ * I2C-over-AUX. Source devices may choose to implement either of these
+ * access methods.
+ */
+
+#define DP_DUAL_MODE_SLAVE_ADDRESS 0x40
+
+/**
+ * drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s)
+ * @adapter: I2C adapter for the DDC bus
+ * @offset: register offset
+ * @buffer: buffer for return data
+ * @size: sizo of the buffer
+ *
+ * Reads @size bytes from the DP dual mode adaptor registers
+ * starting at @offset.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure
+ */
+ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
+ u8 offset, void *buffer, size_t size)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = DP_DUAL_MODE_SLAVE_ADDRESS,
+ .flags = 0,
+ .len = 1,
+ .buf = &offset,
+ },
+ {
+ .addr = DP_DUAL_MODE_SLAVE_ADDRESS,
+ .flags = I2C_M_RD,
+ .len = size,
+ .buf = buffer,
+ },
+ };
+ int ret;
+
+ ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret < 0)
+ return ret;
+ if (ret != ARRAY_SIZE(msgs))
+ return -EPROTO;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_read);
+
+/**
+ * drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s)
+ * @adapter: I2C adapter for the DDC bus
+ * @offset: register offset
+ * @buffer: buffer for write data
+ * @size: sizo of the buffer
+ *
+ * Writes @size bytes to the DP dual mode adaptor registers
+ * starting at @offset.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure
+ */
+ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter,
+ u8 offset, const void *buffer, size_t size)
+{
+ struct i2c_msg msg = {
+ .addr = DP_DUAL_MODE_SLAVE_ADDRESS,
+ .flags = 0,
+ .len = 1 + size,
+ .buf = NULL,
+ };
+ void *data;
+ int ret;
+
+ data = kmalloc(msg.len, GFP_TEMPORARY);
+ if (!data)
+ return -ENOMEM;
+
+ msg.buf = data;
+
+ memcpy(data, &offset, 1);
+ memcpy(data + 1, buffer, size);
+
+ ret = i2c_transfer(adapter, &msg, 1);
+
+ kfree(data);
+
+ if (ret < 0)
+ return ret;
+ if (ret != 1)
+ return -EPROTO;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_write);
+
+static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN])
+{
+ static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] =
+ "DP-HDMI ADAPTOR\x04";
+
+ return memcmp(hdmi_id, dp_dual_mode_hdmi_id,
+ sizeof(dp_dual_mode_hdmi_id)) == 0;
+}
+
+static bool is_type2_adaptor(uint8_t adaptor_id)
+{
+ return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 |
+ DP_DUAL_MODE_REV_TYPE2);
+}
+
+/**
+ * drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
+ * @adapter: I2C adapter for the DDC bus
+ *
+ * Attempt to identify the type of the DP dual mode adaptor used.
+ *
+ * Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not
+ * certain whether we're dealing with a native HDMI port or
+ * a type 1 DVI dual mode adaptor. The driver will have to use
+ * some other hardware/driver specific mechanism to make that
+ * distinction.
+ *
+ * Returns:
+ * The type of the DP dual mode adaptor used
+ */
+enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
+{
+ char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {};
+ uint8_t adaptor_id = 0x00;
+ ssize_t ret;
+
+ /*
+ * Let's see if the adaptor is there the by reading the
+ * HDMI ID registers.
+ *
+ * Note that type 1 DVI adaptors are not required to implemnt
+ * any registers, and that presents a problem for detection.
+ * If the i2c transfer is nacked, we may or may not be dealing
+ * with a type 1 DVI adaptor. Some other mechanism of detecting
+ * the presence of the adaptor is required. One way would be
+ * to check the state of the CONFIG1 pin, Another method would
+ * simply require the driver to know whether the port is a DP++
+ * port or a native HDMI port. Both of these methods are entirely
+ * hardware/driver specific so we can't deal with them here.
+ */
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
+ hdmi_id, sizeof(hdmi_id));
+ if (ret)
+ return DRM_DP_DUAL_MODE_UNKNOWN;
+
+ /*
+ * Sigh. Some (maybe all?) type 1 adaptors are broken and ack
+ * the offset but ignore it, and instead they just always return
+ * data from the start of the HDMI ID buffer. So for a broken
+ * type 1 HDMI adaptor a single byte read will always give us
+ * 0x44, and for a type 1 DVI adaptor it should give 0x00
+ * (assuming it implements any registers). Fortunately neither
+ * of those values will match the type 2 signature of the
+ * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with
+ * the type 2 adaptor detection safely even in the presence
+ * of broken type 1 adaptors.
+ */
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
+ &adaptor_id, sizeof(adaptor_id));
+ if (ret == 0) {
+ if (is_type2_adaptor(adaptor_id)) {
+ if (is_hdmi_adaptor(hdmi_id))
+ return DRM_DP_DUAL_MODE_TYPE2_HDMI;
+ else
+ return DRM_DP_DUAL_MODE_TYPE2_DVI;
+ }
+ }
+
+ if (is_hdmi_adaptor(hdmi_id))
+ return DRM_DP_DUAL_MODE_TYPE1_HDMI;
+ else
+ return DRM_DP_DUAL_MODE_TYPE1_DVI;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_detect);
+
+/**
+ * drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor
+ * @type: DP dual mode adaptor type
+ * @adapter: I2C adapter for the DDC bus
+ *
+ * Determine the max TMDS clock the adaptor supports based on the
+ * type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK
+ * register (on type2 adaptors). As some type 1 adaptors have
+ * problems with registers (see comments in drm_dp_dual_mode_detect())
+ * we don't read the register on those, instead we simply assume
+ * a 165 MHz limit based on the specification.
+ *
+ * Returns:
+ * Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz.
+ */
+int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
+ struct i2c_adapter *adapter)
+{
+ uint8_t max_tmds_clock;
+ ssize_t ret;
+
+ /* native HDMI so no limit */
+ if (type == DRM_DP_DUAL_MODE_NONE)
+ return 0;
+
+ /*
+ * Type 1 adaptors are limited to 165MHz
+ * Type 2 adaptors can tells us their limit
+ */
+ if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
+ return 165000;
+
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK,
+ &max_tmds_clock, sizeof(max_tmds_clock));
+ if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) {
+ DRM_DEBUG_KMS("Failed to query max TMDS clock\n");
+ return 165000;
+ }
+
+ return max_tmds_clock * 5000 / 2;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock);
+
+/**
+ * drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor
+ * @type: DP dual mode adaptor type
+ * @adapter: I2C adapter for the DDC bus
+ * @enabled: current state of the TMDS output buffers
+ *
+ * Get the state of the TMDS output buffers in the adaptor. For
+ * type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN
+ * register. As some type 1 adaptors have problems with registers
+ * (see comments in drm_dp_dual_mode_detect()) we don't read the
+ * register on those, instead we simply assume that the buffers
+ * are always enabled.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure
+ */
+int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
+ struct i2c_adapter *adapter,
+ bool *enabled)
+{
+ uint8_t tmds_oen;
+ ssize_t ret;
+
+ if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) {
+ *enabled = true;
+ return 0;
+ }
+
+ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
+ &tmds_oen, sizeof(tmds_oen));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to query state of TMDS output buffers\n");
+ return ret;
+ }
+
+ *enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
+
+/**
+ * drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor
+ * @type: DP dual mode adaptor type
+ * @adapter: I2C adapter for the DDC bus
+ * @enable: enable (as opposed to disable) the TMDS output buffers
+ *
+ * Set the state of the TMDS output buffers in the adaptor. For
+ * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As
+ * some type 1 adaptors have problems with registers (see comments
+ * in drm_dp_dual_mode_detect()) we avoid touching the register,
+ * making this function a no-op on type 1 adaptors.
+ *
+ * Returns:
+ * 0 on success, negative error code on failure
+ */
+int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
+ struct i2c_adapter *adapter, bool enable)
+{
+ uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
+ ssize_t ret;
+
+ if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
+ return 0;
+
+ ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
+ &tmds_oen, sizeof(tmds_oen));
+ if (ret) {
+ DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
+ enable ? "enable" : "disable");
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
+
+/**
+ * drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string
+ * @type: DP dual mode adaptor type
+ *
+ * Returns:
+ * String representation of the DP dual mode adaptor type
+ */
+const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type)
+{
+ switch (type) {
+ case DRM_DP_DUAL_MODE_NONE:
+ return "none";
+ case DRM_DP_DUAL_MODE_TYPE1_DVI:
+ return "type 1 DVI";
+ case DRM_DP_DUAL_MODE_TYPE1_HDMI:
+ return "type 1 HDMI";
+ case DRM_DP_DUAL_MODE_TYPE2_DVI:
+ return "type 2 DVI";
+ case DRM_DP_DUAL_MODE_TYPE2_HDMI:
+ return "type 2 HDMI";
+ default:
+ WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN);
+ return "unknown";
+ }
+}
+EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 15615fb9bde6..b3198fcd0536 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1183,6 +1183,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (ret)
return ret;
+ ret = i915_ggtt_enable_hw(dev);
+ if (ret) {
+ DRM_ERROR("failed to enable GGTT\n");
+ goto out_ggtt;
+ }
+
/* WARNING: Apparently we must kick fbdev drivers before vgacon,
* otherwise the vga fbdev driver falls over. */
ret = i915_kick_out_firmware_fb(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d37c0a671eed..f313b4d8344f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -734,9 +734,14 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
static int i915_drm_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
disable_rpm_wakeref_asserts(dev_priv);
+ ret = i915_ggtt_enable_hw(dev);
+ if (ret)
+ DRM_ERROR("failed to re-enable GGTT\n");
+
intel_csr_ucode_resume(dev_priv);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b87ca4fae20a..5faacc6e548d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3482,6 +3482,7 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
enum port port);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9b99490e8367..aad26851cee3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1456,7 +1456,10 @@ i915_wait_request(struct drm_i915_gem_request *req)
if (ret)
return ret;
- __i915_gem_request_retire__upto(req);
+ /* If the GPU hung, we want to keep the requests to find the guilty. */
+ if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
+ __i915_gem_request_retire__upto(req);
+
return 0;
}
@@ -1513,7 +1516,8 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
else if (obj->last_write_req == req)
i915_gem_object_retire__write(obj);
- __i915_gem_request_retire__upto(req);
+ if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
+ __i915_gem_request_retire__upto(req);
}
/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -4860,9 +4864,6 @@ i915_gem_init_hw(struct drm_device *dev)
struct intel_engine_cs *engine;
int ret, j;
- if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
- return -EIO;
-
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0d666b3f7e9b..92acdff9dad3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3236,6 +3236,14 @@ out_gtt_cleanup:
return ret;
}
+int i915_ggtt_enable_hw(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
+ return -EIO;
+
+ return 0;
+}
+
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d7dd3d8a8758..0008543d55f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -514,6 +514,7 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
}
int i915_ggtt_init_hw(struct drm_device *dev);
+int i915_ggtt_enable_hw(struct drm_device *dev);
void i915_gem_init_ggtt(struct drm_device *dev);
void i915_ggtt_cleanup_hw(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index e72dd9a8d6bf..b235b6e88ead 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1578,6 +1578,42 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
return false;
}
+bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port)
+{
+ static const struct {
+ u16 dp, hdmi;
+ } port_mapping[] = {
+ /*
+ * Buggy VBTs may declare DP ports as having
+ * HDMI type dvo_port :( So let's check both.
+ */
+ [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+ [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+ [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+ [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+ };
+ int i;
+
+ if (port == PORT_A || port >= ARRAY_SIZE(port_mapping))
+ return false;
+
+ if (!dev_priv->vbt.child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+ const union child_device_config *p_child =
+ &dev_priv->vbt.child_dev[i];
+
+ if ((p_child->common.dvo_port == port_mapping[port].dp ||
+ p_child->common.dvo_port == port_mapping[port].hdmi) &&
+ (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) ==
+ (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS))
+ return true;
+ }
+
+ return false;
+}
+
/**
* intel_bios_is_dsi_present - is DSI present in VBT
* @dev_priv: i915 device instance
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 3fac04602a25..01e523df363b 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1601,6 +1601,12 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+ }
+
intel_prepare_ddi_buffer(intel_encoder);
if (type == INTEL_OUTPUT_EDP) {
@@ -1667,6 +1673,12 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
DPLL_CTRL2_DDI_CLK_OFF(port)));
else if (INTEL_INFO(dev)->gen < 9)
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+
+ if (type == INTEL_OUTPUT_HDMI) {
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
+ }
}
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
@@ -2180,8 +2192,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config))
pipe_config->has_infoframe = true;
- break;
+ /* fall through */
case TRANS_DDI_MODE_SELECT_DVI:
+ pipe_config->lane_count = 4;
+ break;
case TRANS_DDI_MODE_SELECT_FDI:
break;
case TRANS_DDI_MODE_SELECT_DP_SST:
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 46f9be3ad5a2..2113f401f0ba 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12005,6 +12005,9 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
return ret;
}
+ } else if (dev_priv->display.compute_intermediate_wm) {
+ if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
+ pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk;
}
if (INTEL_INFO(dev)->gen >= 9) {
@@ -15990,6 +15993,9 @@ retry:
state->acquire_ctx = &ctx;
+ /* ignore any reset values/BIOS leftovers in the WM registers */
+ to_intel_atomic_state(state)->skip_intermediate_wm = true;
+
for_each_crtc_in_state(state, crtc, crtc_state, i) {
/*
* Force recalculation even if we restore
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 639bf0209c15..3ac705936b04 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -1702,9 +1702,9 @@ static const struct intel_dpll_mgr hsw_pll_mgr = {
static const struct dpll_info skl_plls[] = {
{ "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON },
- { "DPPL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 },
- { "DPPL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 },
- { "DPPL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 },
+ { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 },
+ { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 },
+ { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 },
{ NULL, -1, NULL, },
};
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5da29a02b9e3..a28b4aac1e02 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -33,6 +33,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
@@ -753,6 +754,10 @@ struct cxsr_latency {
struct intel_hdmi {
i915_reg_t hdmi_reg;
int ddc_bus;
+ struct {
+ enum drm_dp_dual_mode_type type;
+ int max_tmds_clock;
+ } dp_dual_mode;
bool limited_color_range;
bool color_range_auto;
bool has_hdmi_sink;
@@ -1401,6 +1406,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
bool intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
+void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
/* intel_lvds.c */
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 2b22bb9bb86f..366ad6c67ce4 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -46,6 +46,22 @@ static const struct {
},
};
+/* return pixels in terms of txbyteclkhs */
+static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
+ u16 burst_mode_ratio)
+{
+ return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
+ 8 * 100), lane_count);
+}
+
+/* return pixels equvalent to txbyteclkhs */
+static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count,
+ u16 burst_mode_ratio)
+{
+ return DIV_ROUND_UP((clk_hs * lane_count * 8 * 100),
+ (bpp * burst_mode_ratio));
+}
+
enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt)
{
/* It just so happens the VBT matches register contents. */
@@ -780,10 +796,19 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode =
&pipe_config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode_sw;
+ struct intel_crtc *intel_crtc;
struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
+ unsigned int lane_count = intel_dsi->lane_count;
unsigned int bpp, fmt;
enum port port;
- u16 vfp, vsync, vbp;
+ u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp;
+ u16 hfp_sw, hsync_sw, hbp_sw;
+ u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw,
+ crtc_hblank_start_sw, crtc_hblank_end_sw;
+
+ intel_crtc = to_intel_crtc(encoder->base.crtc);
+ adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode;
/*
* Atleast one port is active as encoder->get_config called only if
@@ -808,26 +833,118 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
adjusted_mode->crtc_vtotal =
I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
+ hactive = adjusted_mode->crtc_hdisplay;
+ hfp = I915_READ(MIPI_HFP_COUNT(port));
+
/*
- * TODO: Retrieve hfp, hsync and hbp. Adjust them for dual link and
- * calculate hsync_start, hsync_end, htotal and hblank_end
+ * Meaningful for video mode non-burst sync pulse mode only,
+ * can be zero for non-burst sync events and burst modes
*/
+ hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port));
+ hbp = I915_READ(MIPI_HBP_COUNT(port));
+
+ /* harizontal values are in terms of high speed byte clock */
+ hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp = pixels_from_txbyteclkhs(hbp, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ if (intel_dsi->dual_link) {
+ hfp *= 2;
+ hsync *= 2;
+ hbp *= 2;
+ }
/* vertical values are in terms of lines */
vfp = I915_READ(MIPI_VFP_COUNT(port));
vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
vbp = I915_READ(MIPI_VBP_COUNT(port));
+ adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
+ adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hsync_end = hsync + adjusted_mode->crtc_hsync_start;
adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay;
+ adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal;
- adjusted_mode->crtc_vsync_start =
- vfp + adjusted_mode->crtc_vdisplay;
- adjusted_mode->crtc_vsync_end =
- vsync + adjusted_mode->crtc_vsync_start;
+ adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay;
+ adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start;
adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay;
adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal;
-}
+ /*
+ * In BXT DSI there is no regs programmed with few horizontal timings
+ * in Pixels but txbyteclkhs.. So retrieval process adds some
+ * ROUND_UP ERRORS in the process of PIXELS<==>txbyteclkhs.
+ * Actually here for the given adjusted_mode, we are calculating the
+ * value programmed to the port and then back to the horizontal timing
+ * param in pixels. This is the expected value, including roundup errors
+ * And if that is same as retrieved value from port, then
+ * (HW state) adjusted_mode's horizontal timings are corrected to
+ * match with SW state to nullify the errors.
+ */
+ /* Calculating the value programmed to the Port register */
+ hfp_sw = adjusted_mode_sw->crtc_hsync_start -
+ adjusted_mode_sw->crtc_hdisplay;
+ hsync_sw = adjusted_mode_sw->crtc_hsync_end -
+ adjusted_mode_sw->crtc_hsync_start;
+ hbp_sw = adjusted_mode_sw->crtc_htotal -
+ adjusted_mode_sw->crtc_hsync_end;
+
+ if (intel_dsi->dual_link) {
+ hfp_sw /= 2;
+ hsync_sw /= 2;
+ hbp_sw /= 2;
+ }
+
+ hfp_sw = txbyteclkhs(hfp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync_sw = txbyteclkhs(hsync_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp_sw = txbyteclkhs(hbp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ /* Reverse calculating the adjusted mode parameters from port reg vals*/
+ hfp_sw = pixels_from_txbyteclkhs(hfp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hsync_sw = pixels_from_txbyteclkhs(hsync_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+ hbp_sw = pixels_from_txbyteclkhs(hbp_sw, bpp, lane_count,
+ intel_dsi->burst_mode_ratio);
+
+ if (intel_dsi->dual_link) {
+ hfp_sw *= 2;
+ hsync_sw *= 2;
+ hbp_sw *= 2;
+ }
+
+ crtc_htotal_sw = adjusted_mode_sw->crtc_hdisplay + hfp_sw +
+ hsync_sw + hbp_sw;
+ crtc_hsync_start_sw = hfp_sw + adjusted_mode_sw->crtc_hdisplay;
+ crtc_hsync_end_sw = hsync_sw + crtc_hsync_start_sw;
+ crtc_hblank_start_sw = adjusted_mode_sw->crtc_hdisplay;
+ crtc_hblank_end_sw = crtc_htotal_sw;
+
+ if (adjusted_mode->crtc_htotal == crtc_htotal_sw)
+ adjusted_mode->crtc_htotal = adjusted_mode_sw->crtc_htotal;
+
+ if (adjusted_mode->crtc_hsync_start == crtc_hsync_start_sw)
+ adjusted_mode->crtc_hsync_start =
+ adjusted_mode_sw->crtc_hsync_start;
+
+ if (adjusted_mode->crtc_hsync_end == crtc_hsync_end_sw)
+ adjusted_mode->crtc_hsync_end =
+ adjusted_mode_sw->crtc_hsync_end;
+
+ if (adjusted_mode->crtc_hblank_start == crtc_hblank_start_sw)
+ adjusted_mode->crtc_hblank_start =
+ adjusted_mode_sw->crtc_hblank_start;
+
+ if (adjusted_mode->crtc_hblank_end == crtc_hblank_end_sw)
+ adjusted_mode->crtc_hblank_end =
+ adjusted_mode_sw->crtc_hblank_end;
+}
static void intel_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
@@ -891,14 +1008,6 @@ static u16 txclkesc(u32 divider, unsigned int us)
}
}
-/* return pixels in terms of txbyteclkhs */
-static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
- u16 burst_mode_ratio)
-{
- return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio,
- 8 * 100), lane_count);
-}
-
static void set_dsi_timings(struct drm_encoder *encoder,
const struct drm_display_mode *adjusted_mode)
{
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 2cdab73046f8..2c3bd9c2573e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -836,6 +836,22 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode);
}
+void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
+ struct i2c_adapter *adapter =
+ intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+
+ if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
+ return;
+
+ DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n",
+ enable ? "Enabling" : "Disabling");
+
+ drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
+ adapter, enable);
+}
+
static void intel_hdmi_prepare(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
@@ -845,6 +861,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
u32 hdmi_val;
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
+
hdmi_val = SDVO_ENCODING_HDMI;
if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range)
hdmi_val |= HDMI_COLOR_RANGE_16_235;
@@ -953,6 +971,8 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
dotclock /= pipe_config->pixel_multiplier;
pipe_config->base.adjusted_mode.crtc_clock = dotclock;
+
+ pipe_config->lane_count = 4;
}
static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
@@ -1140,6 +1160,8 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
}
intel_hdmi->set_infoframes(&encoder->base, false, NULL);
+
+ intel_dp_dual_mode_set_tmds_output(intel_hdmi, false);
}
static void g4x_disable_hdmi(struct intel_encoder *encoder)
@@ -1165,27 +1187,42 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder)
intel_disable_hdmi(encoder);
}
-static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
+static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv)
{
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
-
- if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
+ if (IS_G4X(dev_priv))
return 165000;
- else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
+ else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)
return 300000;
else
return 225000;
}
+static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
+ bool respect_downstream_limits)
+{
+ struct drm_device *dev = intel_hdmi_to_dev(hdmi);
+ int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev));
+
+ if (respect_downstream_limits) {
+ if (hdmi->dp_dual_mode.max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock,
+ hdmi->dp_dual_mode.max_tmds_clock);
+ if (!hdmi->has_hdmi_sink)
+ max_tmds_clock = min(max_tmds_clock, 165000);
+ }
+
+ return max_tmds_clock;
+}
+
static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
- int clock, bool respect_dvi_limit)
+ int clock, bool respect_downstream_limits)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
if (clock < 25000)
return MODE_CLOCK_LOW;
- if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit))
+ if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits))
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
@@ -1309,7 +1346,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* within limits.
*/
if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink &&
- hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK &&
+ hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true) == MODE_OK &&
hdmi_12bpc_possible(pipe_config)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
@@ -1337,6 +1374,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
/* Set user selected PAR to incoming mode's member */
adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio;
+ pipe_config->lane_count = 4;
+
return true;
}
@@ -1349,10 +1388,57 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
+ intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE;
+ intel_hdmi->dp_dual_mode.max_tmds_clock = 0;
+
kfree(to_intel_connector(connector)->detect_edid);
to_intel_connector(connector)->detect_edid = NULL;
}
+static void
+intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_hdmi *hdmi = intel_attached_hdmi(connector);
+ enum port port = hdmi_to_dig_port(hdmi)->port;
+ struct i2c_adapter *adapter =
+ intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
+ enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
+
+ /*
+ * Type 1 DVI adaptors are not required to implement any
+ * registers, so we can't always detect their presence.
+ * Ideally we should be able to check the state of the
+ * CONFIG1 pin, but no such luck on our hardware.
+ *
+ * The only method left to us is to check the VBT to see
+ * if the port is a dual mode capable DP port. But let's
+ * only do that when we sucesfully read the EDID, to avoid
+ * confusing log messages about DP dual mode adaptors when
+ * there's nothing connected to the port.
+ */
+ if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
+ if (has_edid &&
+ intel_bios_is_port_dp_dual_mode(dev_priv, port)) {
+ DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n");
+ type = DRM_DP_DUAL_MODE_TYPE1_DVI;
+ } else {
+ type = DRM_DP_DUAL_MODE_NONE;
+ }
+ }
+
+ if (type == DRM_DP_DUAL_MODE_NONE)
+ return;
+
+ hdmi->dp_dual_mode.type = type;
+ hdmi->dp_dual_mode.max_tmds_clock =
+ drm_dp_dual_mode_max_tmds_clock(type, adapter);
+
+ DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
+ drm_dp_get_dual_mode_type_name(type),
+ hdmi->dp_dual_mode.max_tmds_clock);
+}
+
static bool
intel_hdmi_set_edid(struct drm_connector *connector, bool force)
{
@@ -1368,6 +1454,8 @@ intel_hdmi_set_edid(struct drm_connector *connector, bool force)
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
+ intel_hdmi_dp_dual_mode_detect(connector, edid != NULL);
+
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6179b591ee84..42eac37de047 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -721,48 +721,6 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
return ret;
}
-static int logical_ring_wait_for_space(struct drm_i915_gem_request *req,
- int bytes)
-{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
- struct intel_engine_cs *engine = req->engine;
- struct drm_i915_gem_request *target;
- unsigned space;
- int ret;
-
- if (intel_ring_space(ringbuf) >= bytes)
- return 0;
-
- /* The whole point of reserving space is to not wait! */
- WARN_ON(ringbuf->reserved_in_use);
-
- list_for_each_entry(target, &engine->request_list, list) {
- /*
- * The request queue is per-engine, so can contain requests
- * from multiple ringbuffers. Here, we must ignore any that
- * aren't from the ringbuffer we're considering.
- */
- if (target->ringbuf != ringbuf)
- continue;
-
- /* Would completion of this request free enough space? */
- space = __intel_ring_space(target->postfix, ringbuf->tail,
- ringbuf->size);
- if (space >= bytes)
- break;
- }
-
- if (WARN_ON(&target->list == &engine->request_list))
- return -ENOSPC;
-
- ret = i915_wait_request(target);
- if (ret)
- return ret;
-
- ringbuf->space = space;
- return 0;
-}
-
/*
* intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
* @request: Request to advance the logical ringbuffer of.
@@ -814,92 +772,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
return 0;
}
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
- uint32_t __iomem *virt;
- int rem = ringbuf->size - ringbuf->tail;
-
- virt = ringbuf->virtual_start + ringbuf->tail;
- rem /= 4;
- while (rem--)
- iowrite32(MI_NOOP, virt++);
-
- ringbuf->tail = 0;
- intel_ring_update_space(ringbuf);
-}
-
-static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
-{
- struct intel_ringbuffer *ringbuf = req->ringbuf;
- int remain_usable = ringbuf->effective_size - ringbuf->tail;
- int remain_actual = ringbuf->size - ringbuf->tail;
- int ret, total_bytes, wait_bytes = 0;
- bool need_wrap = false;
-
- if (ringbuf->reserved_in_use)
- total_bytes = bytes;
- else
- total_bytes = bytes + ringbuf->reserved_size;
-
- if (unlikely(bytes > remain_usable)) {
- /*
- * Not enough space for the basic request. So need to flush
- * out the remainder and then wait for base + reserved.
- */
- wait_bytes = remain_actual + total_bytes;
- need_wrap = true;
- } else {
- if (unlikely(total_bytes > remain_usable)) {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So don't need an immediate wrap
- * and only need to effectively wait for the reserved
- * size space from the start of ringbuffer.
- */
- wait_bytes = remain_actual + ringbuf->reserved_size;
- } else if (total_bytes > ringbuf->space) {
- /* No wrapping required, just waiting. */
- wait_bytes = total_bytes;
- }
- }
-
- if (wait_bytes) {
- ret = logical_ring_wait_for_space(req, wait_bytes);
- if (unlikely(ret))
- return ret;
-
- if (need_wrap)
- __wrap_ring_buffer(ringbuf);
- }
-
- return 0;
-}
-
-/**
- * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands
- *
- * @req: The request to start some new work for
- * @num_dwords: number of DWORDs that we plan to write to the ringbuffer.
- *
- * The ringbuffer might not be ready to accept the commands right away (maybe it needs to
- * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
- * and also preallocates a request (every workload submission is still mediated through
- * requests, same as it did with legacy ringbuffer submission).
- *
- * Return: non-zero if the ringbuffer is not ready to be written to.
- */
-int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
-{
- int ret;
-
- ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t));
- if (ret)
- return ret;
-
- req->ringbuf->space -= num_dwords * sizeof(uint32_t);
- return 0;
-}
-
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
{
/*
@@ -912,7 +784,7 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
*/
intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
- return intel_logical_ring_begin(request, 0);
+ return intel_ring_begin(request, 0);
}
/**
@@ -982,7 +854,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
if (engine == &dev_priv->engine[RCS] &&
instp_mode != dev_priv->relative_constants_mode) {
- ret = intel_logical_ring_begin(params->request, 4);
+ ret = intel_ring_begin(params->request, 4);
if (ret)
return ret;
@@ -1178,7 +1050,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
if (ret)
return ret;
- ret = intel_logical_ring_begin(req, w->count * 2 + 2);
+ ret = intel_ring_begin(req, w->count * 2 + 2);
if (ret)
return ret;
@@ -1669,7 +1541,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
int i, ret;
- ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2);
+ ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
if (ret)
return ret;
@@ -1716,7 +1588,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
}
- ret = intel_logical_ring_begin(req, 4);
+ ret = intel_ring_begin(req, 4);
if (ret)
return ret;
@@ -1778,7 +1650,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
uint32_t cmd;
int ret;
- ret = intel_logical_ring_begin(request, 4);
+ ret = intel_ring_begin(request, 4);
if (ret)
return ret;
@@ -1846,7 +1718,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
vf_flush_wa = true;
}
- ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6);
+ ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6);
if (ret)
return ret;
@@ -1920,7 +1792,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
- ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS);
+ ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
if (ret)
return ret;
@@ -1944,7 +1816,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
struct intel_ringbuffer *ringbuf = request->ringbuf;
int ret;
- ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS);
+ ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 461f1ef9b5c1..60a7385bc531 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -63,7 +63,6 @@ int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
void intel_logical_ring_stop(struct intel_engine_cs *engine);
void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int intel_logical_rings_init(struct drm_device *dev);
-int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
/**
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 23b8545ad6b0..6ba4bf7f2a89 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -239,11 +239,9 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
- if (ret) {
- DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+ ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+ if (ret)
return ret;
- }
intel_logical_ring_emit(ringbuf,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
@@ -305,11 +303,9 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
return -ENODEV;
- ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
- if (ret) {
- DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret);
+ ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+ if (ret)
return ret;
- }
intel_logical_ring_emit(ringbuf,
MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 4b60005cda37..a7ef45da0a9e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3904,6 +3904,8 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
if (IS_HASWELL(dev) || IS_BROADWELL(dev))
hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
+ memset(active, 0, sizeof(*active));
+
active->pipe_enabled = intel_crtc->active;
if (active->pipe_enabled) {
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index c3abae4bc596..a788d1e9589b 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -280,7 +280,10 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
* with the 5 or 6 idle patterns.
*/
uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- uint32_t val = 0x0;
+ uint32_t val = EDP_PSR_ENABLE;
+
+ val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
+ val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
if (IS_HASWELL(dev))
val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
@@ -288,14 +291,50 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
if (dev_priv->psr.link_standby)
val |= EDP_PSR_LINK_STANDBY;
- I915_WRITE(EDP_PSR_CTL, val |
- max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
- idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
- EDP_PSR_ENABLE);
+ if (dev_priv->vbt.psr.tp1_wakeup_time > 5)
+ val |= EDP_PSR_TP1_TIME_2500us;
+ else if (dev_priv->vbt.psr.tp1_wakeup_time > 1)
+ val |= EDP_PSR_TP1_TIME_500us;
+ else if (dev_priv->vbt.psr.tp1_wakeup_time > 0)
+ val |= EDP_PSR_TP1_TIME_100us;
+ else
+ val |= EDP_PSR_TP1_TIME_0us;
+
+ if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
+ val |= EDP_PSR_TP2_TP3_TIME_2500us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
+ val |= EDP_PSR_TP2_TP3_TIME_500us;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
+ val |= EDP_PSR_TP2_TP3_TIME_100us;
+ else
+ val |= EDP_PSR_TP2_TP3_TIME_0us;
+
+ if (intel_dp_source_supports_hbr2(intel_dp) &&
+ drm_dp_tps3_supported(intel_dp->dpcd))
+ val |= EDP_PSR_TP1_TP3_SEL;
+ else
+ val |= EDP_PSR_TP1_TP2_SEL;
+
+ I915_WRITE(EDP_PSR_CTL, val);
+
+ if (!dev_priv->psr.psr2_support)
+ return;
+
+ /* FIXME: selective update is probably totally broken because it doesn't
+ * mesh at all with our frontbuffer tracking. And the hw alone isn't
+ * good enough. */
+ val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
+
+ if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5)
+ val |= EDP_PSR2_TP2_TIME_2500;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1)
+ val |= EDP_PSR2_TP2_TIME_500;
+ else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0)
+ val |= EDP_PSR2_TP2_TIME_100;
+ else
+ val |= EDP_PSR2_TP2_TIME_50;
- if (dev_priv->psr.psr2_support)
- I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE |
- EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100);
+ I915_WRITE(EDP_PSR2_CTL, val);
}
static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 245386e20c52..04402bb9d26b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,12 +53,6 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
ringbuf->tail, ringbuf->size);
}
-int intel_ring_space(struct intel_ringbuffer *ringbuf)
-{
- intel_ring_update_space(ringbuf);
- return ringbuf->space;
-}
-
bool intel_engine_stopped(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->dev->dev_private;
@@ -1309,7 +1303,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, 0);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->id));
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
intel_ring_emit(signaller, 0);
}
@@ -1349,7 +1343,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
intel_ring_emit(signaller, upper_32_bits(gtt_offset));
intel_ring_emit(signaller, seqno);
intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->id));
+ MI_SEMAPHORE_TARGET(waiter->hw_id));
intel_ring_emit(signaller, 0);
}
@@ -1573,6 +1567,8 @@ pc_render_add_request(struct drm_i915_gem_request *req)
static void
gen6_seqno_barrier(struct intel_engine_cs *engine)
{
+ struct drm_i915_private *dev_priv = engine->dev->dev_private;
+
/* Workaround to force correct ordering between irq and seqno writes on
* ivb (and maybe also on snb) by reading from a CS register (like
* ACTHD) before reading the status page.
@@ -1584,9 +1580,13 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
* the write time to land, but that would incur a delay after every
* batch i.e. much more frequent than a delay when waiting for the
* interrupt (with the same net latency).
+ *
+ * Also note that to prevent whole machine hangs on gen7, we have to
+ * take the spinlock to guard against concurrent cacheline access.
*/
- struct drm_i915_private *dev_priv = engine->dev->dev_private;
+ spin_lock_irq(&dev_priv->uncore.lock);
POSTING_READ_FW(RING_ACTHD(engine->mmio_base));
+ spin_unlock_irq(&dev_priv->uncore.lock);
}
static u32
@@ -2312,51 +2312,6 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
engine->dev = NULL;
}
-static int ring_wait_for_space(struct intel_engine_cs *engine, int n)
-{
- struct intel_ringbuffer *ringbuf = engine->buffer;
- struct drm_i915_gem_request *request;
- unsigned space;
- int ret;
-
- if (intel_ring_space(ringbuf) >= n)
- return 0;
-
- /* The whole point of reserving space is to not wait! */
- WARN_ON(ringbuf->reserved_in_use);
-
- list_for_each_entry(request, &engine->request_list, list) {
- space = __intel_ring_space(request->postfix, ringbuf->tail,
- ringbuf->size);
- if (space >= n)
- break;
- }
-
- if (WARN_ON(&request->list == &engine->request_list))
- return -ENOSPC;
-
- ret = i915_wait_request(request);
- if (ret)
- return ret;
-
- ringbuf->space = space;
- return 0;
-}
-
-static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf)
-{
- uint32_t __iomem *virt;
- int rem = ringbuf->size - ringbuf->tail;
-
- virt = ringbuf->virtual_start + ringbuf->tail;
- rem /= 4;
- while (rem--)
- iowrite32(MI_NOOP, virt++);
-
- ringbuf->tail = 0;
- intel_ring_update_space(ringbuf);
-}
-
int intel_engine_idle(struct intel_engine_cs *engine)
{
struct drm_i915_gem_request *req;
@@ -2398,63 +2353,82 @@ int intel_ring_reserve_space(struct drm_i915_gem_request *request)
void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size)
{
- WARN_ON(ringbuf->reserved_size);
- WARN_ON(ringbuf->reserved_in_use);
-
+ GEM_BUG_ON(ringbuf->reserved_size);
ringbuf->reserved_size = size;
}
void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
{
- WARN_ON(ringbuf->reserved_in_use);
-
+ GEM_BUG_ON(!ringbuf->reserved_size);
ringbuf->reserved_size = 0;
- ringbuf->reserved_in_use = false;
}
void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf)
{
- WARN_ON(ringbuf->reserved_in_use);
-
- ringbuf->reserved_in_use = true;
- ringbuf->reserved_tail = ringbuf->tail;
+ GEM_BUG_ON(!ringbuf->reserved_size);
+ ringbuf->reserved_size = 0;
}
void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf)
{
- WARN_ON(!ringbuf->reserved_in_use);
- if (ringbuf->tail > ringbuf->reserved_tail) {
- WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size,
- "request reserved size too small: %d vs %d!\n",
- ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size);
- } else {
+ GEM_BUG_ON(ringbuf->reserved_size);
+}
+
+static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
+{
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
+ struct intel_engine_cs *engine = req->engine;
+ struct drm_i915_gem_request *target;
+
+ intel_ring_update_space(ringbuf);
+ if (ringbuf->space >= bytes)
+ return 0;
+
+ /*
+ * Space is reserved in the ringbuffer for finalising the request,
+ * as that cannot be allowed to fail. During request finalisation,
+ * reserved_space is set to 0 to stop the overallocation and the
+ * assumption is that then we never need to wait (which has the
+ * risk of failing with EINTR).
+ *
+ * See also i915_gem_request_alloc() and i915_add_request().
+ */
+ GEM_BUG_ON(!ringbuf->reserved_size);
+
+ list_for_each_entry(target, &engine->request_list, list) {
+ unsigned space;
+
/*
- * The ring was wrapped while the reserved space was in use.
- * That means that some unknown amount of the ring tail was
- * no-op filled and skipped. Thus simply adding the ring size
- * to the tail and doing the above space check will not work.
- * Rather than attempt to track how much tail was skipped,
- * it is much simpler to say that also skipping the sanity
- * check every once in a while is not a big issue.
+ * The request queue is per-engine, so can contain requests
+ * from multiple ringbuffers. Here, we must ignore any that
+ * aren't from the ringbuffer we're considering.
*/
+ if (target->ringbuf != ringbuf)
+ continue;
+
+ /* Would completion of this request free enough space? */
+ space = __intel_ring_space(target->postfix, ringbuf->tail,
+ ringbuf->size);
+ if (space >= bytes)
+ break;
}
- ringbuf->reserved_size = 0;
- ringbuf->reserved_in_use = false;
+ if (WARN_ON(&target->list == &engine->request_list))
+ return -ENOSPC;
+
+ return i915_wait_request(target);
}
-static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
+int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
{
- struct intel_ringbuffer *ringbuf = engine->buffer;
- int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ struct intel_ringbuffer *ringbuf = req->ringbuf;
int remain_actual = ringbuf->size - ringbuf->tail;
- int ret, total_bytes, wait_bytes = 0;
+ int remain_usable = ringbuf->effective_size - ringbuf->tail;
+ int bytes = num_dwords * sizeof(u32);
+ int total_bytes, wait_bytes;
bool need_wrap = false;
- if (ringbuf->reserved_in_use)
- total_bytes = bytes;
- else
- total_bytes = bytes + ringbuf->reserved_size;
+ total_bytes = bytes + ringbuf->reserved_size;
if (unlikely(bytes > remain_usable)) {
/*
@@ -2463,44 +2437,42 @@ static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes)
*/
wait_bytes = remain_actual + total_bytes;
need_wrap = true;
+ } else if (unlikely(total_bytes > remain_usable)) {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So we don't need an immediate wrap
+ * and only need to effectively wait for the reserved
+ * size space from the start of ringbuffer.
+ */
+ wait_bytes = remain_actual + ringbuf->reserved_size;
} else {
- if (unlikely(total_bytes > remain_usable)) {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So don't need an immediate wrap
- * and only need to effectively wait for the reserved
- * size space from the start of ringbuffer.
- */
- wait_bytes = remain_actual + ringbuf->reserved_size;
- } else if (total_bytes > ringbuf->space) {
- /* No wrapping required, just waiting. */
- wait_bytes = total_bytes;
- }
+ /* No wrapping required, just waiting. */
+ wait_bytes = total_bytes;
}
- if (wait_bytes) {
- ret = ring_wait_for_space(engine, wait_bytes);
+ if (wait_bytes > ringbuf->space) {
+ int ret = wait_for_space(req, wait_bytes);
if (unlikely(ret))
return ret;
- if (need_wrap)
- __wrap_ring_buffer(ringbuf);
+ intel_ring_update_space(ringbuf);
+ if (unlikely(ringbuf->space < wait_bytes))
+ return -EAGAIN;
}
- return 0;
-}
+ if (unlikely(need_wrap)) {
+ GEM_BUG_ON(remain_actual > ringbuf->space);
+ GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
-int intel_ring_begin(struct drm_i915_gem_request *req,
- int num_dwords)
-{
- struct intel_engine_cs *engine = req->engine;
- int ret;
-
- ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t));
- if (ret)
- return ret;
+ /* Fill the tail with MI_NOOP */
+ memset(ringbuf->virtual_start + ringbuf->tail,
+ 0, remain_actual);
+ ringbuf->tail = 0;
+ ringbuf->space -= remain_actual;
+ }
- engine->buffer->space -= num_dwords * sizeof(uint32_t);
+ ringbuf->space -= bytes;
+ GEM_BUG_ON(ringbuf->space < 0);
return 0;
}
@@ -2772,6 +2744,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
engine->name = "render ring";
engine->id = RCS;
engine->exec_id = I915_EXEC_RENDER;
+ engine->hw_id = 0;
engine->mmio_base = RENDER_RING_BASE;
if (INTEL_INFO(dev)->gen >= 8) {
@@ -2923,6 +2896,7 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
engine->name = "bsd ring";
engine->id = VCS;
engine->exec_id = I915_EXEC_BSD;
+ engine->hw_id = 1;
engine->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6) {
@@ -3001,6 +2975,7 @@ int intel_init_bsd2_ring_buffer(struct drm_device *dev)
engine->name = "bsd2 ring";
engine->id = VCS2;
engine->exec_id = I915_EXEC_BSD;
+ engine->hw_id = 4;
engine->write_tail = ring_write_tail;
engine->mmio_base = GEN8_BSD2_RING_BASE;
@@ -3033,6 +3008,7 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
engine->name = "blitter ring";
engine->id = BCS;
engine->exec_id = I915_EXEC_BLT;
+ engine->hw_id = 2;
engine->mmio_base = BLT_RING_BASE;
engine->write_tail = ring_write_tail;
@@ -3092,6 +3068,7 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)
engine->name = "video enhancement ring";
engine->id = VECS;
engine->exec_id = I915_EXEC_VEBOX;
+ engine->hw_id = 3;
engine->mmio_base = VEBOX_RING_BASE;
engine->write_tail = ring_write_tail;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2ade194bbea9..ff126485d398 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -108,8 +108,6 @@ struct intel_ringbuffer {
int size;
int effective_size;
int reserved_size;
- int reserved_tail;
- bool reserved_in_use;
/** We track the position of the requests in the ring buffer, and
* when each is retired we increment last_retired_head as the GPU
@@ -156,7 +154,8 @@ struct intel_engine_cs {
#define I915_NUM_ENGINES 5
#define _VCS(n) (VCS + (n))
unsigned int exec_id;
- unsigned int guc_id;
+ unsigned int hw_id;
+ unsigned int guc_id; /* XXX same as hw_id? */
u32 mmio_base;
struct drm_device *dev;
struct intel_ringbuffer *buffer;
@@ -459,7 +458,6 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
}
int __intel_ring_space(int head, int tail, int size);
void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
-int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_engine_stopped(struct intel_engine_cs *engine);
int __must_check intel_engine_idle(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 9ff1e960d617..c15051de8023 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -740,6 +740,7 @@ struct bdb_psr {
#define DEVICE_TYPE_INT_TV 0x1009
#define DEVICE_TYPE_HDMI 0x60D2
#define DEVICE_TYPE_DP 0x68C6
+#define DEVICE_TYPE_DP_DUAL_MODE 0x60D6
#define DEVICE_TYPE_eDP 0x78C6
#define DEVICE_TYPE_CLASS_EXTENSION (1 << 15)
@@ -774,6 +775,17 @@ struct bdb_psr {
DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
DEVICE_TYPE_ANALOG_OUTPUT)
+#define DEVICE_TYPE_DP_DUAL_MODE_BITS \
+ (DEVICE_TYPE_INTERNAL_CONNECTOR | \
+ DEVICE_TYPE_MIPI_OUTPUT | \
+ DEVICE_TYPE_COMPOSITE_OUTPUT | \
+ DEVICE_TYPE_LVDS_SINGALING | \
+ DEVICE_TYPE_TMDS_DVI_SIGNALING | \
+ DEVICE_TYPE_VIDEO_SIGNALING | \
+ DEVICE_TYPE_DISPLAYPORT_OUTPUT | \
+ DEVICE_TYPE_DIGITAL_OUTPUT | \
+ DEVICE_TYPE_ANALOG_OUTPUT)
+
/* define the DVO port for HDMI output type */
#define DVO_B 1
#define DVO_C 2
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 1080019e7b17..1f14b602882b 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -25,6 +25,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_of.h>
+#include <video/imx-ipu-v3.h>
#include "imx-drm.h"
@@ -437,6 +438,13 @@ static int compare_of(struct device *dev, void *data)
{
struct device_node *np = data;
+ /* Special case for DI, dev->of_node may not be set yet */
+ if (strcmp(dev->driver->name, "imx-ipuv3-crtc") == 0) {
+ struct ipu_client_platformdata *pdata = dev->platform_data;
+
+ return pdata->of_node == np;
+ }
+
/* Special case for LDB, one device for two channels */
if (of_node_cmp(np->name, "lvds-channel") == 0) {
np = of_get_parent(np);
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index dee8e8b3523b..b2c30b8d9816 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -473,7 +473,7 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
ret = imx_drm_add_crtc(drm, &ipu_crtc->base, &ipu_crtc->imx_crtc,
&ipu_crtc->plane[0]->base, &ipu_crtc_helper_funcs,
- ipu_crtc->dev->of_node);
+ pdata->of_node);
if (ret) {
dev_err(ipu_crtc->dev, "adding crtc failed with %d.\n", ret);
goto err_put_resources;
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index d0240743a17c..a7e978677937 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2164,7 +2164,7 @@ static void kv_apply_state_adjust_rules(struct radeon_device *rdev,
if (pi->caps_stable_p_state) {
stable_p_state_sclk = (max_limits->sclk * 75) / 100;
- for (i = table->count - 1; i >= 0; i++) {
+ for (i = table->count - 1; i >= 0; i--) {
if (stable_p_state_sclk >= table->entries[i].clk) {
stable_p_state_sclk = table->entries[i].clk;
break;
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 32c7986b63ab..6bf4ce466d20 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -437,7 +437,7 @@ static int vtg_probe(struct platform_device *pdev)
return -EPROBE_DEFER;
} else {
vtg->irq = platform_get_irq(pdev, 0);
- if (IS_ERR_VALUE(vtg->irq)) {
+ if (vtg->irq < 0) {
DRM_ERROR("Failed to get VTG interrupt\n");
return vtg->irq;
}
@@ -447,7 +447,7 @@ static int vtg_probe(struct platform_device *pdev)
ret = devm_request_threaded_irq(dev, vtg->irq, vtg_irq,
vtg_irq_thread, IRQF_ONESHOT,
dev_name(dev), vtg);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
DRM_ERROR("Failed to register VTG interrupt\n");
return ret;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
index 7716f42f8aab..6b8c5b3bf588 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
@@ -342,7 +342,7 @@ static int tfp410_probe(struct platform_device *pdev)
tfp410_mod->gpio = of_get_named_gpio_flags(node, "powerdn-gpio",
0, NULL);
- if (IS_ERR_VALUE(tfp410_mod->gpio)) {
+ if (tfp410_mod->gpio < 0) {
dev_warn(&pdev->dev, "No power down GPIO\n");
} else {
ret = gpio_request(tfp410_mod->gpio, "DVI_PDn");
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index 498b37e39058..e1e31e9e67cd 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -85,7 +85,7 @@ static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
err = devm_request_irq(host->dev, host->intr_syncpt_irq,
syncpt_thresh_isr, IRQF_SHARED,
"host1x_syncpt", host);
- if (IS_ERR_VALUE(err)) {
+ if (err < 0) {
WARN_ON(1);
return err;
}
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index abb98c77bad2..99dcacf05b99 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -997,7 +997,7 @@ struct ipu_platform_reg {
};
/* These must be in the order of the corresponding device tree port nodes */
-static const struct ipu_platform_reg client_reg[] = {
+static struct ipu_platform_reg client_reg[] = {
{
.pdata = {
.csi = 0,
@@ -1048,7 +1048,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
mutex_unlock(&ipu_client_id_mutex);
for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
- const struct ipu_platform_reg *reg = &client_reg[i];
+ struct ipu_platform_reg *reg = &client_reg[i];
struct platform_device *pdev;
struct device_node *of_node;
@@ -1070,6 +1070,7 @@ static int ipu_add_client_devices(struct ipu_soc *ipu, unsigned long ipu_base)
pdev->dev.parent = dev;
+ reg->pdata.of_node = of_node;
ret = platform_device_add_data(pdev, &reg->pdata,
sizeof(reg->pdata));
if (!ret)
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index ebab33e77d67..94b68213c50d 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1477,7 +1477,7 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
- if (IS_ERR_VALUE(asid))
+ if (asid < 0)
return asid;
cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
@@ -1508,7 +1508,7 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
- if (IS_ERR_VALUE(vmid))
+ if (vmid < 0)
return vmid;
cfg->vmid = (u16)vmid;
@@ -1569,7 +1569,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
smmu_domain->pgtbl_ops = pgtbl_ops;
ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
free_io_pgtable_ops(pgtbl_ops);
return ret;
@@ -1642,7 +1642,7 @@ static void arm_smmu_detach_dev(struct device *dev)
struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev);
smmu_group->ste.bypass = true;
- if (IS_ERR_VALUE(arm_smmu_install_ste_for_group(smmu_group)))
+ if (arm_smmu_install_ste_for_group(smmu_group) < 0)
dev_warn(dev, "failed to install bypass STE\n");
smmu_group->domain = NULL;
@@ -1694,7 +1694,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA;
ret = arm_smmu_install_ste_for_group(smmu_group);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
smmu_group->domain = NULL;
out_unlock:
@@ -2235,7 +2235,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
arm_smmu_evtq_handler,
arm_smmu_evtq_thread,
0, "arm-smmu-v3-evtq", smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
dev_warn(smmu->dev, "failed to enable evtq irq\n");
}
@@ -2244,7 +2244,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
ret = devm_request_irq(smmu->dev, irq,
arm_smmu_cmdq_sync_handler, 0,
"arm-smmu-v3-cmdq-sync", smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
}
@@ -2252,7 +2252,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
if (irq) {
ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
0, "arm-smmu-v3-gerror", smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
dev_warn(smmu->dev, "failed to enable gerror irq\n");
}
@@ -2264,7 +2264,7 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
arm_smmu_priq_thread,
0, "arm-smmu-v3-priq",
smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
dev_warn(smmu->dev,
"failed to enable priq irq\n");
else
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e206ce7a4e4b..9345a3fcb706 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -950,7 +950,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
smmu->num_context_banks);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
goto out_unlock;
cfg->cbndx = ret;
@@ -989,7 +989,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
"arm-smmu-context-fault", domain);
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
cfg->irptndx, irq);
cfg->irptndx = INVALID_IRPTNDX;
@@ -1099,7 +1099,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
for (i = 0; i < cfg->num_streamids; ++i) {
int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
smmu->num_mapping_groups);
- if (IS_ERR_VALUE(idx)) {
+ if (idx < 0) {
dev_err(smmu->dev, "failed to allocate free SMR\n");
goto err_free_smrs;
}
@@ -1233,7 +1233,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
/* Ensure that the domain is finalised */
ret = arm_smmu_init_domain_context(domain, smmu);
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
/*
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index b2bfb9594508..a644d0cec2d8 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -33,6 +33,7 @@
#include <linux/dma-mapping.h>
#include <linux/mempool.h>
#include <linux/memory.h>
+#include <linux/cpu.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/iova.h>
@@ -390,6 +391,7 @@ struct dmar_domain {
* domain ids are 16 bit wide according
* to VT-d spec, section 9.3 */
+ bool has_iotlb_device;
struct list_head devices; /* all devices' list */
struct iova_domain iovad; /* iova's that belong to this domain */
@@ -456,27 +458,32 @@ static LIST_HEAD(dmar_rmrr_units);
static void flush_unmaps_timeout(unsigned long data);
-static DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0);
+struct deferred_flush_entry {
+ unsigned long iova_pfn;
+ unsigned long nrpages;
+ struct dmar_domain *domain;
+ struct page *freelist;
+};
#define HIGH_WATER_MARK 250
-struct deferred_flush_tables {
+struct deferred_flush_table {
int next;
- struct iova *iova[HIGH_WATER_MARK];
- struct dmar_domain *domain[HIGH_WATER_MARK];
- struct page *freelist[HIGH_WATER_MARK];
+ struct deferred_flush_entry entries[HIGH_WATER_MARK];
+};
+
+struct deferred_flush_data {
+ spinlock_t lock;
+ int timer_on;
+ struct timer_list timer;
+ long size;
+ struct deferred_flush_table *tables;
};
-static struct deferred_flush_tables *deferred_flush;
+DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush);
/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;
-static DEFINE_SPINLOCK(async_umap_flush_lock);
-static LIST_HEAD(unmaps_to_do);
-
-static int timer_on;
-static long list_size;
-
static void domain_exit(struct dmar_domain *domain);
static void domain_remove_dev_info(struct dmar_domain *domain);
static void dmar_remove_one_dev_info(struct dmar_domain *domain,
@@ -1458,10 +1465,35 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
return NULL;
}
+static void domain_update_iotlb(struct dmar_domain *domain)
+{
+ struct device_domain_info *info;
+ bool has_iotlb_device = false;
+
+ assert_spin_locked(&device_domain_lock);
+
+ list_for_each_entry(info, &domain->devices, link) {
+ struct pci_dev *pdev;
+
+ if (!info->dev || !dev_is_pci(info->dev))
+ continue;
+
+ pdev = to_pci_dev(info->dev);
+ if (pdev->ats_enabled) {
+ has_iotlb_device = true;
+ break;
+ }
+ }
+
+ domain->has_iotlb_device = has_iotlb_device;
+}
+
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
+ assert_spin_locked(&device_domain_lock);
+
if (!info || !dev_is_pci(info->dev))
return;
@@ -1481,6 +1513,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info)
#endif
if (info->ats_supported && !pci_enable_ats(pdev, VTD_PAGE_SHIFT)) {
info->ats_enabled = 1;
+ domain_update_iotlb(info->domain);
info->ats_qdep = pci_ats_queue_depth(pdev);
}
}
@@ -1489,6 +1522,8 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
+ assert_spin_locked(&device_domain_lock);
+
if (!dev_is_pci(info->dev))
return;
@@ -1497,6 +1532,7 @@ static void iommu_disable_dev_iotlb(struct device_domain_info *info)
if (info->ats_enabled) {
pci_disable_ats(pdev);
info->ats_enabled = 0;
+ domain_update_iotlb(info->domain);
}
#ifdef CONFIG_INTEL_IOMMU_SVM
if (info->pri_enabled) {
@@ -1517,6 +1553,9 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
unsigned long flags;
struct device_domain_info *info;
+ if (!domain->has_iotlb_device)
+ return;
+
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link) {
if (!info->ats_enabled)
@@ -1734,6 +1773,7 @@ static struct dmar_domain *alloc_domain(int flags)
memset(domain, 0, sizeof(*domain));
domain->nid = -1;
domain->flags = flags;
+ domain->has_iotlb_device = false;
INIT_LIST_HEAD(&domain->devices);
return domain;
@@ -1918,8 +1958,12 @@ static void domain_exit(struct dmar_domain *domain)
return;
/* Flush any lazy unmaps that may reference this domain */
- if (!intel_iommu_strict)
- flush_unmaps_timeout(0);
+ if (!intel_iommu_strict) {
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ flush_unmaps_timeout(cpu);
+ }
/* Remove associated devices and clear attached or cached domains */
rcu_read_lock();
@@ -3077,7 +3121,7 @@ static int __init init_dmars(void)
bool copied_tables = false;
struct device *dev;
struct intel_iommu *iommu;
- int i, ret;
+ int i, ret, cpu;
/*
* for each drhd
@@ -3110,11 +3154,20 @@ static int __init init_dmars(void)
goto error;
}
- deferred_flush = kzalloc(g_num_of_iommus *
- sizeof(struct deferred_flush_tables), GFP_KERNEL);
- if (!deferred_flush) {
- ret = -ENOMEM;
- goto free_g_iommus;
+ for_each_possible_cpu(cpu) {
+ struct deferred_flush_data *dfd = per_cpu_ptr(&deferred_flush,
+ cpu);
+
+ dfd->tables = kzalloc(g_num_of_iommus *
+ sizeof(struct deferred_flush_table),
+ GFP_KERNEL);
+ if (!dfd->tables) {
+ ret = -ENOMEM;
+ goto free_g_iommus;
+ }
+
+ spin_lock_init(&dfd->lock);
+ setup_timer(&dfd->timer, flush_unmaps_timeout, cpu);
}
for_each_active_iommu(iommu, drhd) {
@@ -3291,19 +3344,20 @@ free_iommu:
disable_dmar_iommu(iommu);
free_dmar_iommu(iommu);
}
- kfree(deferred_flush);
free_g_iommus:
+ for_each_possible_cpu(cpu)
+ kfree(per_cpu_ptr(&deferred_flush, cpu)->tables);
kfree(g_iommus);
error:
return ret;
}
/* This takes a number of _MM_ pages, not VTD pages */
-static struct iova *intel_alloc_iova(struct device *dev,
+static unsigned long intel_alloc_iova(struct device *dev,
struct dmar_domain *domain,
unsigned long nrpages, uint64_t dma_mask)
{
- struct iova *iova = NULL;
+ unsigned long iova_pfn = 0;
/* Restrict dma_mask to the width that the iommu can handle */
dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
@@ -3316,19 +3370,19 @@ static struct iova *intel_alloc_iova(struct device *dev,
* DMA_BIT_MASK(32) and if that fails then try allocating
* from higher range
*/
- iova = alloc_iova(&domain->iovad, nrpages,
- IOVA_PFN(DMA_BIT_MASK(32)), 1);
- if (iova)
- return iova;
+ iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
+ IOVA_PFN(DMA_BIT_MASK(32)));
+ if (iova_pfn)
+ return iova_pfn;
}
- iova = alloc_iova(&domain->iovad, nrpages, IOVA_PFN(dma_mask), 1);
- if (unlikely(!iova)) {
+ iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask));
+ if (unlikely(!iova_pfn)) {
pr_err("Allocating %ld-page iova for %s failed",
nrpages, dev_name(dev));
- return NULL;
+ return 0;
}
- return iova;
+ return iova_pfn;
}
static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev)
@@ -3426,7 +3480,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
{
struct dmar_domain *domain;
phys_addr_t start_paddr;
- struct iova *iova;
+ unsigned long iova_pfn;
int prot = 0;
int ret;
struct intel_iommu *iommu;
@@ -3444,8 +3498,8 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
- iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
- if (!iova)
+ iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size), dma_mask);
+ if (!iova_pfn)
goto error;
/*
@@ -3463,7 +3517,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
* might have two guest_addr mapping to the same host paddr, but this
* is not a big problem
*/
- ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova->pfn_lo),
+ ret = domain_pfn_mapping(domain, mm_to_dma_pfn(iova_pfn),
mm_to_dma_pfn(paddr_pfn), size, prot);
if (ret)
goto error;
@@ -3471,18 +3525,18 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
/* it's a non-present to present mapping. Only flush if caching mode */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain,
- mm_to_dma_pfn(iova->pfn_lo),
+ mm_to_dma_pfn(iova_pfn),
size, 0, 1);
else
iommu_flush_write_buffer(iommu);
- start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT;
+ start_paddr = (phys_addr_t)iova_pfn << PAGE_SHIFT;
start_paddr += paddr & ~PAGE_MASK;
return start_paddr;
error:
- if (iova)
- __free_iova(&domain->iovad, iova);
+ if (iova_pfn)
+ free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
pr_err("Device %s request: %zx@%llx dir %d --- failed\n",
dev_name(dev), size, (unsigned long long)paddr, dir);
return 0;
@@ -3497,91 +3551,120 @@ static dma_addr_t intel_map_page(struct device *dev, struct page *page,
dir, *dev->dma_mask);
}
-static void flush_unmaps(void)
+static void flush_unmaps(struct deferred_flush_data *flush_data)
{
int i, j;
- timer_on = 0;
+ flush_data->timer_on = 0;
/* just flush them all */
for (i = 0; i < g_num_of_iommus; i++) {
struct intel_iommu *iommu = g_iommus[i];
+ struct deferred_flush_table *flush_table =
+ &flush_data->tables[i];
if (!iommu)
continue;
- if (!deferred_flush[i].next)
+ if (!flush_table->next)
continue;
/* In caching mode, global flushes turn emulation expensive */
if (!cap_caching_mode(iommu->cap))
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
- for (j = 0; j < deferred_flush[i].next; j++) {
+ for (j = 0; j < flush_table->next; j++) {
unsigned long mask;
- struct iova *iova = deferred_flush[i].iova[j];
- struct dmar_domain *domain = deferred_flush[i].domain[j];
+ struct deferred_flush_entry *entry =
+ &flush_table->entries[j];
+ unsigned long iova_pfn = entry->iova_pfn;
+ unsigned long nrpages = entry->nrpages;
+ struct dmar_domain *domain = entry->domain;
+ struct page *freelist = entry->freelist;
/* On real hardware multiple invalidations are expensive */
if (cap_caching_mode(iommu->cap))
iommu_flush_iotlb_psi(iommu, domain,
- iova->pfn_lo, iova_size(iova),
- !deferred_flush[i].freelist[j], 0);
+ mm_to_dma_pfn(iova_pfn),
+ nrpages, !freelist, 0);
else {
- mask = ilog2(mm_to_dma_pfn(iova_size(iova)));
- iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
- (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
+ mask = ilog2(nrpages);
+ iommu_flush_dev_iotlb(domain,
+ (uint64_t)iova_pfn << PAGE_SHIFT, mask);
}
- __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
- if (deferred_flush[i].freelist[j])
- dma_free_pagelist(deferred_flush[i].freelist[j]);
+ free_iova_fast(&domain->iovad, iova_pfn, nrpages);
+ if (freelist)
+ dma_free_pagelist(freelist);
}
- deferred_flush[i].next = 0;
+ flush_table->next = 0;
}
- list_size = 0;
+ flush_data->size = 0;
}
-static void flush_unmaps_timeout(unsigned long data)
+static void flush_unmaps_timeout(unsigned long cpuid)
{
+ struct deferred_flush_data *flush_data = per_cpu_ptr(&deferred_flush, cpuid);
unsigned long flags;
- spin_lock_irqsave(&async_umap_flush_lock, flags);
- flush_unmaps();
- spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+ spin_lock_irqsave(&flush_data->lock, flags);
+ flush_unmaps(flush_data);
+ spin_unlock_irqrestore(&flush_data->lock, flags);
}
-static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *freelist)
+static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn,
+ unsigned long nrpages, struct page *freelist)
{
unsigned long flags;
- int next, iommu_id;
+ int entry_id, iommu_id;
struct intel_iommu *iommu;
+ struct deferred_flush_entry *entry;
+ struct deferred_flush_data *flush_data;
+ unsigned int cpuid;
- spin_lock_irqsave(&async_umap_flush_lock, flags);
- if (list_size == HIGH_WATER_MARK)
- flush_unmaps();
+ cpuid = get_cpu();
+ flush_data = per_cpu_ptr(&deferred_flush, cpuid);
+
+ /* Flush all CPUs' entries to avoid deferring too much. If
+ * this becomes a bottleneck, can just flush us, and rely on
+ * flush timer for the rest.
+ */
+ if (flush_data->size == HIGH_WATER_MARK) {
+ int cpu;
+
+ for_each_online_cpu(cpu)
+ flush_unmaps_timeout(cpu);
+ }
+
+ spin_lock_irqsave(&flush_data->lock, flags);
iommu = domain_get_iommu(dom);
iommu_id = iommu->seq_id;
- next = deferred_flush[iommu_id].next;
- deferred_flush[iommu_id].domain[next] = dom;
- deferred_flush[iommu_id].iova[next] = iova;
- deferred_flush[iommu_id].freelist[next] = freelist;
- deferred_flush[iommu_id].next++;
+ entry_id = flush_data->tables[iommu_id].next;
+ ++(flush_data->tables[iommu_id].next);
- if (!timer_on) {
- mod_timer(&unmap_timer, jiffies + msecs_to_jiffies(10));
- timer_on = 1;
+ entry = &flush_data->tables[iommu_id].entries[entry_id];
+ entry->domain = dom;
+ entry->iova_pfn = iova_pfn;
+ entry->nrpages = nrpages;
+ entry->freelist = freelist;
+
+ if (!flush_data->timer_on) {
+ mod_timer(&flush_data->timer, jiffies + msecs_to_jiffies(10));
+ flush_data->timer_on = 1;
}
- list_size++;
- spin_unlock_irqrestore(&async_umap_flush_lock, flags);
+ flush_data->size++;
+ spin_unlock_irqrestore(&flush_data->lock, flags);
+
+ put_cpu();
}
-static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
+static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
{
struct dmar_domain *domain;
unsigned long start_pfn, last_pfn;
- struct iova *iova;
+ unsigned long nrpages;
+ unsigned long iova_pfn;
struct intel_iommu *iommu;
struct page *freelist;
@@ -3593,13 +3676,11 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
iommu = domain_get_iommu(domain);
- iova = find_iova(&domain->iovad, IOVA_PFN(dev_addr));
- if (WARN_ONCE(!iova, "Driver unmaps unmatched page at PFN %llx\n",
- (unsigned long long)dev_addr))
- return;
+ iova_pfn = IOVA_PFN(dev_addr);
- start_pfn = mm_to_dma_pfn(iova->pfn_lo);
- last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
+ nrpages = aligned_nrpages(dev_addr, size);
+ start_pfn = mm_to_dma_pfn(iova_pfn);
+ last_pfn = start_pfn + nrpages - 1;
pr_debug("Device %s unmapping: pfn %lx-%lx\n",
dev_name(dev), start_pfn, last_pfn);
@@ -3608,12 +3689,12 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
- last_pfn - start_pfn + 1, !freelist, 0);
+ nrpages, !freelist, 0);
/* free iova */
- __free_iova(&domain->iovad, iova);
+ free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(nrpages));
dma_free_pagelist(freelist);
} else {
- add_unmap(domain, iova, freelist);
+ add_unmap(domain, iova_pfn, nrpages, freelist);
/*
* queue up the release of the unmap to save the 1/6th of the
* cpu used up by the iotlb flush operation...
@@ -3625,7 +3706,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- intel_unmap(dev, dev_addr);
+ intel_unmap(dev, dev_addr, size);
}
static void *intel_alloc_coherent(struct device *dev, size_t size,
@@ -3684,7 +3765,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
size = PAGE_ALIGN(size);
order = get_order(size);
- intel_unmap(dev, dma_handle);
+ intel_unmap(dev, dma_handle, size);
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, order);
}
@@ -3693,7 +3774,16 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- intel_unmap(dev, sglist[0].dma_address);
+ dma_addr_t startaddr = sg_dma_address(sglist) & PAGE_MASK;
+ unsigned long nrpages = 0;
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sglist, sg, nelems, i) {
+ nrpages += aligned_nrpages(sg_dma_address(sg), sg_dma_len(sg));
+ }
+
+ intel_unmap(dev, startaddr, nrpages << VTD_PAGE_SHIFT);
}
static int intel_nontranslate_map_sg(struct device *hddev,
@@ -3717,7 +3807,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
struct dmar_domain *domain;
size_t size = 0;
int prot = 0;
- struct iova *iova = NULL;
+ unsigned long iova_pfn;
int ret;
struct scatterlist *sg;
unsigned long start_vpfn;
@@ -3736,9 +3826,9 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
for_each_sg(sglist, sg, nelems, i)
size += aligned_nrpages(sg->offset, sg->length);
- iova = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
+ iova_pfn = intel_alloc_iova(dev, domain, dma_to_mm_pfn(size),
*dev->dma_mask);
- if (!iova) {
+ if (!iova_pfn) {
sglist->dma_length = 0;
return 0;
}
@@ -3753,13 +3843,13 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
prot |= DMA_PTE_WRITE;
- start_vpfn = mm_to_dma_pfn(iova->pfn_lo);
+ start_vpfn = mm_to_dma_pfn(iova_pfn);
ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
if (unlikely(ret)) {
dma_pte_free_pagetable(domain, start_vpfn,
start_vpfn + size - 1);
- __free_iova(&domain->iovad, iova);
+ free_iova_fast(&domain->iovad, iova_pfn, dma_to_mm_pfn(size));
return 0;
}
@@ -4498,6 +4588,46 @@ static struct notifier_block intel_iommu_memory_nb = {
.priority = 0
};
+static void free_all_cpu_cached_iovas(unsigned int cpu)
+{
+ int i;
+
+ for (i = 0; i < g_num_of_iommus; i++) {
+ struct intel_iommu *iommu = g_iommus[i];
+ struct dmar_domain *domain;
+ u16 did;
+
+ if (!iommu)
+ continue;
+
+ for (did = 0; did < 0xffff; did++) {
+ domain = get_iommu_domain(iommu, did);
+
+ if (!domain)
+ continue;
+ free_cpu_cached_iovas(cpu, &domain->iovad);
+ }
+ }
+}
+
+static int intel_iommu_cpu_notifier(struct notifier_block *nfb,
+ unsigned long action, void *v)
+{
+ unsigned int cpu = (unsigned long)v;
+
+ switch (action) {
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ free_all_cpu_cached_iovas(cpu);
+ flush_unmaps_timeout(cpu);
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block intel_iommu_cpu_nb = {
+ .notifier_call = intel_iommu_cpu_notifier,
+};
static ssize_t intel_iommu_show_version(struct device *dev,
struct device_attribute *attr,
@@ -4631,7 +4761,6 @@ int __init intel_iommu_init(void)
up_write(&dmar_global_lock);
pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
- init_timer(&unmap_timer);
#ifdef CONFIG_SWIOTLB
swiotlb = 0;
#endif
@@ -4648,6 +4777,7 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
if (si_domain && !hw_pass_through)
register_memory_notifier(&intel_iommu_memory_nb);
+ register_hotcpu_notifier(&intel_iommu_cpu_nb);
intel_iommu_enabled = 1;
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index fa0adef32bd6..ba764a0835d3 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -20,6 +20,17 @@
#include <linux/iova.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/bitops.h>
+
+static bool iova_rcache_insert(struct iova_domain *iovad,
+ unsigned long pfn,
+ unsigned long size);
+static unsigned long iova_rcache_get(struct iova_domain *iovad,
+ unsigned long size,
+ unsigned long limit_pfn);
+static void init_iova_rcaches(struct iova_domain *iovad);
+static void free_iova_rcaches(struct iova_domain *iovad);
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
@@ -38,6 +49,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->granule = granule;
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = pfn_32bit;
+ init_iova_rcaches(iovad);
}
EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -291,33 +303,18 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
}
EXPORT_SYMBOL_GPL(alloc_iova);
-/**
- * find_iova - find's an iova for a given pfn
- * @iovad: - iova domain in question.
- * @pfn: - page frame number
- * This function finds and returns an iova belonging to the
- * given doamin which matches the given pfn.
- */
-struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
+static struct iova *
+private_find_iova(struct iova_domain *iovad, unsigned long pfn)
{
- unsigned long flags;
- struct rb_node *node;
+ struct rb_node *node = iovad->rbroot.rb_node;
+
+ assert_spin_locked(&iovad->iova_rbtree_lock);
- /* Take the lock so that no other thread is manipulating the rbtree */
- spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- node = iovad->rbroot.rb_node;
while (node) {
struct iova *iova = container_of(node, struct iova, node);
/* If pfn falls within iova's range, return iova */
if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- /* We are not holding the lock while this iova
- * is referenced by the caller as the same thread
- * which called this function also calls __free_iova()
- * and it is by design that only one thread can possibly
- * reference a particular iova and hence no conflict.
- */
return iova;
}
@@ -327,9 +324,35 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
node = node->rb_right;
}
- spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return NULL;
}
+
+static void private_free_iova(struct iova_domain *iovad, struct iova *iova)
+{
+ assert_spin_locked(&iovad->iova_rbtree_lock);
+ __cached_rbnode_delete_update(iovad, iova);
+ rb_erase(&iova->node, &iovad->rbroot);
+ free_iova_mem(iova);
+}
+
+/**
+ * find_iova - finds an iova for a given pfn
+ * @iovad: - iova domain in question.
+ * @pfn: - page frame number
+ * This function finds and returns an iova belonging to the
+ * given doamin which matches the given pfn.
+ */
+struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
+{
+ unsigned long flags;
+ struct iova *iova;
+
+ /* Take the lock so that no other thread is manipulating the rbtree */
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+ iova = private_find_iova(iovad, pfn);
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+ return iova;
+}
EXPORT_SYMBOL_GPL(find_iova);
/**
@@ -344,10 +367,8 @@ __free_iova(struct iova_domain *iovad, struct iova *iova)
unsigned long flags;
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
- __cached_rbnode_delete_update(iovad, iova);
- rb_erase(&iova->node, &iovad->rbroot);
+ private_free_iova(iovad, iova);
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
- free_iova_mem(iova);
}
EXPORT_SYMBOL_GPL(__free_iova);
@@ -370,6 +391,63 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
EXPORT_SYMBOL_GPL(free_iova);
/**
+ * alloc_iova_fast - allocates an iova from rcache
+ * @iovad: - iova domain in question
+ * @size: - size of page frames to allocate
+ * @limit_pfn: - max limit address
+ * This function tries to satisfy an iova allocation from the rcache,
+ * and falls back to regular allocation on failure.
+*/
+unsigned long
+alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
+ unsigned long limit_pfn)
+{
+ bool flushed_rcache = false;
+ unsigned long iova_pfn;
+ struct iova *new_iova;
+
+ iova_pfn = iova_rcache_get(iovad, size, limit_pfn);
+ if (iova_pfn)
+ return iova_pfn;
+
+retry:
+ new_iova = alloc_iova(iovad, size, limit_pfn, true);
+ if (!new_iova) {
+ unsigned int cpu;
+
+ if (flushed_rcache)
+ return 0;
+
+ /* Try replenishing IOVAs by flushing rcache. */
+ flushed_rcache = true;
+ for_each_online_cpu(cpu)
+ free_cpu_cached_iovas(cpu, iovad);
+ goto retry;
+ }
+
+ return new_iova->pfn_lo;
+}
+EXPORT_SYMBOL_GPL(alloc_iova_fast);
+
+/**
+ * free_iova_fast - free iova pfn range into rcache
+ * @iovad: - iova domain in question.
+ * @pfn: - pfn that is allocated previously
+ * @size: - # of pages in range
+ * This functions frees an iova range by trying to put it into the rcache,
+ * falling back to regular iova deallocation via free_iova() if this fails.
+ */
+void
+free_iova_fast(struct iova_domain *iovad, unsigned long pfn, unsigned long size)
+{
+ if (iova_rcache_insert(iovad, pfn, size))
+ return;
+
+ free_iova(iovad, pfn);
+}
+EXPORT_SYMBOL_GPL(free_iova_fast);
+
+/**
* put_iova_domain - destroys the iova doamin
* @iovad: - iova domain in question.
* All the iova's in that domain are destroyed.
@@ -379,6 +457,7 @@ void put_iova_domain(struct iova_domain *iovad)
struct rb_node *node;
unsigned long flags;
+ free_iova_rcaches(iovad);
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
node = rb_first(&iovad->rbroot);
while (node) {
@@ -550,5 +629,295 @@ error:
return NULL;
}
+/*
+ * Magazine caches for IOVA ranges. For an introduction to magazines,
+ * see the USENIX 2001 paper "Magazines and Vmem: Extending the Slab
+ * Allocator to Many CPUs and Arbitrary Resources" by Bonwick and Adams.
+ * For simplicity, we use a static magazine size and don't implement the
+ * dynamic size tuning described in the paper.
+ */
+
+#define IOVA_MAG_SIZE 128
+
+struct iova_magazine {
+ unsigned long size;
+ unsigned long pfns[IOVA_MAG_SIZE];
+};
+
+struct iova_cpu_rcache {
+ spinlock_t lock;
+ struct iova_magazine *loaded;
+ struct iova_magazine *prev;
+};
+
+static struct iova_magazine *iova_magazine_alloc(gfp_t flags)
+{
+ return kzalloc(sizeof(struct iova_magazine), flags);
+}
+
+static void iova_magazine_free(struct iova_magazine *mag)
+{
+ kfree(mag);
+}
+
+static void
+iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
+{
+ unsigned long flags;
+ int i;
+
+ if (!mag)
+ return;
+
+ spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+
+ for (i = 0 ; i < mag->size; ++i) {
+ struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
+
+ BUG_ON(!iova);
+ private_free_iova(iovad, iova);
+ }
+
+ spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+
+ mag->size = 0;
+}
+
+static bool iova_magazine_full(struct iova_magazine *mag)
+{
+ return (mag && mag->size == IOVA_MAG_SIZE);
+}
+
+static bool iova_magazine_empty(struct iova_magazine *mag)
+{
+ return (!mag || mag->size == 0);
+}
+
+static unsigned long iova_magazine_pop(struct iova_magazine *mag,
+ unsigned long limit_pfn)
+{
+ BUG_ON(iova_magazine_empty(mag));
+
+ if (mag->pfns[mag->size - 1] >= limit_pfn)
+ return 0;
+
+ return mag->pfns[--mag->size];
+}
+
+static void iova_magazine_push(struct iova_magazine *mag, unsigned long pfn)
+{
+ BUG_ON(iova_magazine_full(mag));
+
+ mag->pfns[mag->size++] = pfn;
+}
+
+static void init_iova_rcaches(struct iova_domain *iovad)
+{
+ struct iova_cpu_rcache *cpu_rcache;
+ struct iova_rcache *rcache;
+ unsigned int cpu;
+ int i;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ spin_lock_init(&rcache->lock);
+ rcache->depot_size = 0;
+ rcache->cpu_rcaches = __alloc_percpu(sizeof(*cpu_rcache), cache_line_size());
+ if (WARN_ON(!rcache->cpu_rcaches))
+ continue;
+ for_each_possible_cpu(cpu) {
+ cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+ spin_lock_init(&cpu_rcache->lock);
+ cpu_rcache->loaded = iova_magazine_alloc(GFP_KERNEL);
+ cpu_rcache->prev = iova_magazine_alloc(GFP_KERNEL);
+ }
+ }
+}
+
+/*
+ * Try inserting IOVA range starting with 'iova_pfn' into 'rcache', and
+ * return true on success. Can fail if rcache is full and we can't free
+ * space, and free_iova() (our only caller) will then return the IOVA
+ * range to the rbtree instead.
+ */
+static bool __iova_rcache_insert(struct iova_domain *iovad,
+ struct iova_rcache *rcache,
+ unsigned long iova_pfn)
+{
+ struct iova_magazine *mag_to_free = NULL;
+ struct iova_cpu_rcache *cpu_rcache;
+ bool can_insert = false;
+ unsigned long flags;
+
+ cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ if (!iova_magazine_full(cpu_rcache->loaded)) {
+ can_insert = true;
+ } else if (!iova_magazine_full(cpu_rcache->prev)) {
+ swap(cpu_rcache->prev, cpu_rcache->loaded);
+ can_insert = true;
+ } else {
+ struct iova_magazine *new_mag = iova_magazine_alloc(GFP_ATOMIC);
+
+ if (new_mag) {
+ spin_lock(&rcache->lock);
+ if (rcache->depot_size < MAX_GLOBAL_MAGS) {
+ rcache->depot[rcache->depot_size++] =
+ cpu_rcache->loaded;
+ } else {
+ mag_to_free = cpu_rcache->loaded;
+ }
+ spin_unlock(&rcache->lock);
+
+ cpu_rcache->loaded = new_mag;
+ can_insert = true;
+ }
+ }
+
+ if (can_insert)
+ iova_magazine_push(cpu_rcache->loaded, iova_pfn);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+
+ if (mag_to_free) {
+ iova_magazine_free_pfns(mag_to_free, iovad);
+ iova_magazine_free(mag_to_free);
+ }
+
+ return can_insert;
+}
+
+static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn,
+ unsigned long size)
+{
+ unsigned int log_size = order_base_2(size);
+
+ if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
+ return false;
+
+ return __iova_rcache_insert(iovad, &iovad->rcaches[log_size], pfn);
+}
+
+/*
+ * Caller wants to allocate a new IOVA range from 'rcache'. If we can
+ * satisfy the request, return a matching non-NULL range and remove
+ * it from the 'rcache'.
+ */
+static unsigned long __iova_rcache_get(struct iova_rcache *rcache,
+ unsigned long limit_pfn)
+{
+ struct iova_cpu_rcache *cpu_rcache;
+ unsigned long iova_pfn = 0;
+ bool has_pfn = false;
+ unsigned long flags;
+
+ cpu_rcache = this_cpu_ptr(rcache->cpu_rcaches);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ if (!iova_magazine_empty(cpu_rcache->loaded)) {
+ has_pfn = true;
+ } else if (!iova_magazine_empty(cpu_rcache->prev)) {
+ swap(cpu_rcache->prev, cpu_rcache->loaded);
+ has_pfn = true;
+ } else {
+ spin_lock(&rcache->lock);
+ if (rcache->depot_size > 0) {
+ iova_magazine_free(cpu_rcache->loaded);
+ cpu_rcache->loaded = rcache->depot[--rcache->depot_size];
+ has_pfn = true;
+ }
+ spin_unlock(&rcache->lock);
+ }
+
+ if (has_pfn)
+ iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+
+ return iova_pfn;
+}
+
+/*
+ * Try to satisfy IOVA allocation range from rcache. Fail if requested
+ * size is too big or the DMA limit we are given isn't satisfied by the
+ * top element in the magazine.
+ */
+static unsigned long iova_rcache_get(struct iova_domain *iovad,
+ unsigned long size,
+ unsigned long limit_pfn)
+{
+ unsigned int log_size = order_base_2(size);
+
+ if (log_size >= IOVA_RANGE_CACHE_MAX_SIZE)
+ return 0;
+
+ return __iova_rcache_get(&iovad->rcaches[log_size], limit_pfn);
+}
+
+/*
+ * Free a cpu's rcache.
+ */
+static void free_cpu_iova_rcache(unsigned int cpu, struct iova_domain *iovad,
+ struct iova_rcache *rcache)
+{
+ struct iova_cpu_rcache *cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+
+ iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
+ iova_magazine_free(cpu_rcache->loaded);
+
+ iova_magazine_free_pfns(cpu_rcache->prev, iovad);
+ iova_magazine_free(cpu_rcache->prev);
+
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+}
+
+/*
+ * free rcache data structures.
+ */
+static void free_iova_rcaches(struct iova_domain *iovad)
+{
+ struct iova_rcache *rcache;
+ unsigned long flags;
+ unsigned int cpu;
+ int i, j;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ for_each_possible_cpu(cpu)
+ free_cpu_iova_rcache(cpu, iovad, rcache);
+ spin_lock_irqsave(&rcache->lock, flags);
+ free_percpu(rcache->cpu_rcaches);
+ for (j = 0; j < rcache->depot_size; ++j) {
+ iova_magazine_free_pfns(rcache->depot[j], iovad);
+ iova_magazine_free(rcache->depot[j]);
+ }
+ spin_unlock_irqrestore(&rcache->lock, flags);
+ }
+}
+
+/*
+ * free all the IOVA ranges cached by a cpu (used when cpu is unplugged)
+ */
+void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad)
+{
+ struct iova_cpu_rcache *cpu_rcache;
+ struct iova_rcache *rcache;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+ rcache = &iovad->rcaches[i];
+ cpu_rcache = per_cpu_ptr(rcache->cpu_rcaches, cpu);
+ spin_lock_irqsave(&cpu_rcache->lock, flags);
+ iova_magazine_free_pfns(cpu_rcache->loaded, iovad);
+ iova_magazine_free_pfns(cpu_rcache->prev, iovad);
+ spin_unlock_irqrestore(&cpu_rcache->lock, flags);
+ }
+}
+
MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index eb5eb0cd414d..2223b3f15d68 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -182,7 +182,7 @@ static int __init _clps711x_intc_init(struct device_node *np,
writel_relaxed(0, clps711x_intc->intmr[2]);
err = irq_alloc_descs(-1, 0, ARRAY_SIZE(clps711x_irqs), numa_node_id());
- if (IS_ERR_VALUE(err))
+ if (err < 0)
goto out_iounmap;
clps711x_intc->ops.map = clps711x_intc_irq_map;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index b4e647179346..fbc4ae2afd29 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1123,7 +1123,7 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
irq_base = irq_alloc_descs(irq_start, 16, gic_irqs,
numa_node_id());
- if (IS_ERR_VALUE(irq_base)) {
+ if (irq_base < 0) {
WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
irq_start);
irq_base = irq_start;
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 9688d2e2a636..9e25d8ce08e5 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -402,7 +402,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
nr_irqs -= hwirq_base; /* calculate # of irqs to allocate */
irq_base = irq_alloc_descs(-1, hwirq_base, nr_irqs, numa_node_id());
- if (IS_ERR_VALUE(irq_base)) {
+ if (irq_base < 0) {
pr_err("failed to allocate IRQ numbers\n");
return -EINVAL;
}
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 1ccd2abed65f..1518ba31a80c 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -232,7 +232,7 @@ static int __init shirq_init(struct spear_shirq **shirq_blocks, int block_nr,
nr_irqs += shirq_blocks[i]->nr_irqs;
virq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
- if (IS_ERR_VALUE(virq_base)) {
+ if (virq_base < 0) {
pr_err("%s: irq desc alloc failed\n", __func__);
goto err_unmap;
}
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 8eeab72b93e2..ca4abe1ccd8d 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -64,7 +64,6 @@
#include "btree.h"
#include <linux/blkdev.h>
-#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/random.h>
#include <trace/events/bcache.h>
@@ -288,7 +287,6 @@ do { \
if (kthread_should_stop()) \
return 0; \
\
- try_to_freeze(); \
schedule(); \
mutex_lock(&(ca)->set->bucket_lock); \
} \
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 22b9e34ceb75..eab505ee0027 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -27,7 +27,6 @@
#include <linux/slab.h>
#include <linux/bitops.h>
-#include <linux/freezer.h>
#include <linux/hash.h>
#include <linux/kthread.h>
#include <linux/prefetch.h>
@@ -1787,7 +1786,6 @@ again:
mutex_unlock(&c->bucket_lock);
- try_to_freeze();
schedule();
}
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index b9346cd9cda1..60123677b382 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -12,7 +12,6 @@
#include "writeback.h"
#include <linux/delay.h>
-#include <linux/freezer.h>
#include <linux/kthread.h>
#include <trace/events/bcache.h>
@@ -228,7 +227,6 @@ static void read_dirty(struct cached_dev *dc)
*/
while (!kthread_should_stop()) {
- try_to_freeze();
w = bch_keybuf_next(&dc->writeback_keys);
if (!w)
@@ -433,7 +431,6 @@ static int bch_writeback_thread(void *arg)
if (kthread_should_stop())
return 0;
- try_to_freeze();
schedule();
continue;
}
diff --git a/drivers/media/i2c/adp1653.c b/drivers/media/i2c/adp1653.c
index 9e1731c565e7..e191e295c951 100644
--- a/drivers/media/i2c/adp1653.c
+++ b/drivers/media/i2c/adp1653.c
@@ -95,7 +95,7 @@ static int adp1653_get_fault(struct adp1653_flash *flash)
int rval;
fault = i2c_smbus_read_byte_data(client, ADP1653_REG_FAULT);
- if (IS_ERR_VALUE(fault))
+ if (fault < 0)
return fault;
flash->fault |= fault;
@@ -105,13 +105,13 @@ static int adp1653_get_fault(struct adp1653_flash *flash)
/* Clear faults. */
rval = i2c_smbus_write_byte_data(client, ADP1653_REG_OUT_SEL, 0);
- if (IS_ERR_VALUE(rval))
+ if (rval < 0)
return rval;
flash->led_mode->val = V4L2_FLASH_LED_MODE_NONE;
rval = adp1653_update_hw(flash);
- if (IS_ERR_VALUE(rval))
+ if (rval)
return rval;
return flash->fault;
@@ -158,7 +158,7 @@ static int adp1653_get_ctrl(struct v4l2_ctrl *ctrl)
int rval;
rval = adp1653_get_fault(flash);
- if (IS_ERR_VALUE(rval))
+ if (rval)
return rval;
ctrl->cur.val = 0;
@@ -184,7 +184,7 @@ static int adp1653_set_ctrl(struct v4l2_ctrl *ctrl)
int rval;
rval = adp1653_get_fault(flash);
- if (IS_ERR_VALUE(rval))
+ if (rval)
return rval;
if ((rval & (ADP1653_REG_FAULT_FLT_SCP |
ADP1653_REG_FAULT_FLT_OT |
diff --git a/drivers/media/platform/s5p-tv/mixer_drv.c b/drivers/media/platform/s5p-tv/mixer_drv.c
index 5ef67774971d..8a5d19469ddc 100644
--- a/drivers/media/platform/s5p-tv/mixer_drv.c
+++ b/drivers/media/platform/s5p-tv/mixer_drv.c
@@ -146,7 +146,7 @@ int mxr_power_get(struct mxr_device *mdev)
/* returning 1 means that power is already enabled,
* so zero success be returned */
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
return ret;
return 0;
}
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 40e51b0baa46..b46c0cfc27d9 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -696,7 +696,7 @@ int twl4030_init_irq(struct device *dev, int irq_num)
nr_irqs = TWL4030_PWR_NR_IRQS + TWL4030_CORE_NR_IRQS;
irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
- if (IS_ERR_VALUE(irq_base)) {
+ if (irq_base < 0) {
dev_err(dev, "Fail to allocate IRQ descs\n");
return irq_base;
}
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index b81b08f81325..c984321d1881 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1276,7 +1276,7 @@ static int mmc_select_hs200(struct mmc_card *card)
* switch to HS200 mode if bus width is set successfully.
*/
err = mmc_select_bus_width(card);
- if (!IS_ERR_VALUE(err)) {
+ if (!err) {
val = EXT_CSD_TIMING_HS200 |
card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1583,7 +1583,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
} else if (mmc_card_hs(card)) {
/* Select the desired bus width optionally */
err = mmc_select_bus_width(card);
- if (!IS_ERR_VALUE(err)) {
+ if (!err) {
err = mmc_select_hs_ddr(card);
if (err)
goto free_card;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 829a6eebcdce..2cc6123b1df9 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1431,7 +1431,7 @@ static int dw_mci_get_ro(struct mmc_host *mmc)
int gpio_ro = mmc_gpio_get_ro(mmc);
/* Use platform get_ro function, else try on board write protect */
- if (!IS_ERR_VALUE(gpio_ro))
+ if (gpio_ro >= 0)
read_only = gpio_ro;
else
read_only =
@@ -1454,7 +1454,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
if ((mmc->caps & MMC_CAP_NEEDS_POLL) ||
(mmc->caps & MMC_CAP_NONREMOVABLE))
present = 1;
- else if (!IS_ERR_VALUE(gpio_cd))
+ else if (gpio_cd >= 0)
present = gpio_cd;
else
present = (mci_readl(slot->host, CDETECT) & (1 << slot->id))
@@ -2927,7 +2927,7 @@ static void dw_mci_enable_cd(struct dw_mci *host)
if (slot->mmc->caps & MMC_CAP_NEEDS_POLL)
return;
- if (IS_ERR_VALUE(mmc_gpio_get_cd(slot->mmc)))
+ if (mmc_gpio_get_cd(slot->mmc) < 0)
break;
}
if (i == host->num_slots)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 2d300d87cda8..9d3ae1f4bd3c 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1011,7 +1011,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
if (ret)
return ret;
- if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+ if (mmc_gpio_get_cd(host->mmc) >= 0)
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
return 0;
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 25f779e09d8e..d4cef713d246 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -289,7 +289,7 @@ static int sdhci_at91_probe(struct platform_device *pdev)
* to enable polling via device tree with broken-cd property.
*/
if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE) &&
- IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) {
+ mmc_gpio_get_cd(host->mmc) < 0) {
host->mmc->caps |= MMC_CAP_NEEDS_POLL;
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e010ea4eb6f5..0e3d7c056cb1 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1624,7 +1624,7 @@ static int sdhci_get_cd(struct mmc_host *mmc)
* Try slot gpio detect, if defined it take precedence
* over build in controller functionality
*/
- if (!IS_ERR_VALUE(gpio_cd))
+ if (gpio_cd >= 0)
return !!gpio_cd;
/* If polling, assume that the card is always present. */
@@ -3077,7 +3077,7 @@ int sdhci_add_host(struct sdhci_host *host)
if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
- IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
+ mmc_gpio_get_cd(host->mmc) < 0)
mmc->caps |= MMC_CAP_NEEDS_POLL;
/* If there are external regulators, get them */
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index efc8ea250c1d..68b9160108c9 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -67,10 +67,6 @@ struct atmel_nand_caps {
uint8_t pmecc_max_correction;
};
-struct atmel_nand_nfc_caps {
- uint32_t rb_mask;
-};
-
/*
* oob layout for large page size
* bad block info is on bytes 0 and 1
@@ -129,7 +125,6 @@ struct atmel_nfc {
/* Point to the sram bank which include readed data via NFC */
void *data_in_sram;
bool will_write_sram;
- const struct atmel_nand_nfc_caps *caps;
};
static struct atmel_nfc nand_nfc;
@@ -1715,9 +1710,9 @@ static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
ret = IRQ_HANDLED;
}
- if (pending & host->nfc->caps->rb_mask) {
+ if (pending & NFC_SR_RB_EDGE) {
complete(&host->nfc->comp_ready);
- nfc_writel(host->nfc->hsmc_regs, IDR, host->nfc->caps->rb_mask);
+ nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
ret = IRQ_HANDLED;
}
if (pending & NFC_SR_CMD_DONE) {
@@ -1735,7 +1730,7 @@ static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
if (flag & NFC_SR_XFR_DONE)
init_completion(&host->nfc->comp_xfer_done);
- if (flag & host->nfc->caps->rb_mask)
+ if (flag & NFC_SR_RB_EDGE)
init_completion(&host->nfc->comp_ready);
if (flag & NFC_SR_CMD_DONE)
@@ -1753,7 +1748,7 @@ static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
if (flag & NFC_SR_XFR_DONE)
comp[index++] = &host->nfc->comp_xfer_done;
- if (flag & host->nfc->caps->rb_mask)
+ if (flag & NFC_SR_RB_EDGE)
comp[index++] = &host->nfc->comp_ready;
if (flag & NFC_SR_CMD_DONE)
@@ -1821,7 +1816,7 @@ static int nfc_device_ready(struct mtd_info *mtd)
dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
mask & status);
- return status & host->nfc->caps->rb_mask;
+ return status & NFC_SR_RB_EDGE;
}
static void nfc_select_chip(struct mtd_info *mtd, int chip)
@@ -1994,8 +1989,8 @@ static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
}
/* fall through */
default:
- nfc_prepare_interrupt(host, host->nfc->caps->rb_mask);
- nfc_wait_interrupt(host, host->nfc->caps->rb_mask);
+ nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
+ nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
}
}
@@ -2426,11 +2421,6 @@ static int atmel_nand_nfc_probe(struct platform_device *pdev)
}
}
- nfc->caps = (const struct atmel_nand_nfc_caps *)
- of_device_get_match_data(&pdev->dev);
- if (!nfc->caps)
- return -ENODEV;
-
nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
nfc_readl(nfc->hsmc_regs, SR); /* clear the NFC_SR */
@@ -2459,17 +2449,8 @@ static int atmel_nand_nfc_remove(struct platform_device *pdev)
return 0;
}
-static const struct atmel_nand_nfc_caps sama5d3_nfc_caps = {
- .rb_mask = NFC_SR_RB_EDGE0,
-};
-
-static const struct atmel_nand_nfc_caps sama5d4_nfc_caps = {
- .rb_mask = NFC_SR_RB_EDGE3,
-};
-
static const struct of_device_id atmel_nand_nfc_match[] = {
- { .compatible = "atmel,sama5d3-nfc", .data = &sama5d3_nfc_caps },
- { .compatible = "atmel,sama5d4-nfc", .data = &sama5d4_nfc_caps },
+ { .compatible = "atmel,sama5d3-nfc" },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
index 0bbc1fa97dba..4d5d26221a7e 100644
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ b/drivers/mtd/nand/atmel_nand_nfc.h
@@ -42,8 +42,7 @@
#define NFC_SR_UNDEF (1 << 21)
#define NFC_SR_AWB (1 << 22)
#define NFC_SR_ASE (1 << 23)
-#define NFC_SR_RB_EDGE0 (1 << 24)
-#define NFC_SR_RB_EDGE3 (1 << 27)
+#define NFC_SR_RB_EDGE (1 << 24)
#define ATMEL_HSMC_NFC_IER 0x0c
#define ATMEL_HSMC_NFC_IDR 0x10
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index a7d1febf667a..16baeb51b2bd 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -149,6 +149,8 @@ static struct device_attribute dev_bgt_enabled =
__ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL);
static struct device_attribute dev_mtd_num =
__ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL);
+static struct device_attribute dev_ro_mode =
+ __ATTR(ro_mode, S_IRUGO, dev_attribute_show, NULL);
/**
* ubi_volume_notify - send a volume change notification.
@@ -385,6 +387,8 @@ static ssize_t dev_attribute_show(struct device *dev,
ret = sprintf(buf, "%d\n", ubi->thread_enabled);
else if (attr == &dev_mtd_num)
ret = sprintf(buf, "%d\n", ubi->mtd->index);
+ else if (attr == &dev_ro_mode)
+ ret = sprintf(buf, "%d\n", ubi->ro_mode);
else
ret = -EINVAL;
@@ -404,6 +408,7 @@ static struct attribute *ubi_dev_attrs[] = {
&dev_min_io_size.attr,
&dev_bgt_enabled.attr,
&dev_mtd_num.attr,
+ &dev_ro_mode.attr,
NULL
};
ATTRIBUTE_GROUPS(ubi_dev);
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index c4cb15a3098c..f101a4985a7c 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -352,7 +352,8 @@ static ssize_t dfs_file_write(struct file *file, const char __user *user_buf,
} else if (dent == d->dfs_emulate_power_cut) {
if (kstrtoint(buf, 0, &val) != 0)
count = -EINVAL;
- d->emulate_power_cut = val;
+ else
+ d->emulate_power_cut = val;
goto out;
}
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 5b9834cf2820..5780dd1ba79d 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -426,8 +426,25 @@ retry:
pnum, vol_id, lnum);
err = -EBADMSG;
} else {
- err = -EINVAL;
- ubi_ro_mode(ubi);
+ /*
+ * Ending up here in the non-Fastmap case
+ * is a clear bug as the VID header had to
+ * be present at scan time to have it referenced.
+ * With fastmap the story is more complicated.
+ * Fastmap has the mapping info without the need
+ * of a full scan. So the LEB could have been
+ * unmapped, Fastmap cannot know this and keeps
+ * the LEB referenced.
+ * This is valid and works as the layer above UBI
+ * has to do bookkeeping about used/referenced
+ * LEBs in any case.
+ */
+ if (ubi->fast_attach) {
+ err = -EBADMSG;
+ } else {
+ err = -EINVAL;
+ ubi_ro_mode(ubi);
+ }
}
}
goto out_free;
@@ -1202,32 +1219,6 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
cond_resched();
-
- /*
- * We've written the data and are going to read it back to make
- * sure it was written correctly.
- */
- memset(ubi->peb_buf, 0xFF, aldata_size);
- err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
- if (err) {
- if (err != UBI_IO_BITFLIPS) {
- ubi_warn(ubi, "error %d while reading data back from PEB %d",
- err, to);
- if (is_error_sane(err))
- err = MOVE_TARGET_RD_ERR;
- } else
- err = MOVE_TARGET_BITFLIPS;
- goto out_unlock_buf;
- }
-
- cond_resched();
-
- if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
- ubi_warn(ubi, "read data back from PEB %d and it is different",
- to);
- err = -EINVAL;
- goto out_unlock_buf;
- }
}
ubi_assert(vol->eba_tbl[lnum] == from);
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index 263b439e21a8..990898b9dc72 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -1058,6 +1058,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
ubi_msg(ubi, "fastmap WL pool size: %d",
ubi->fm_wl_pool.max_size);
ubi->fm_disabled = 0;
+ ubi->fast_attach = 1;
ubi_free_vid_hdr(ubi, vh);
kfree(ech);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 437757c89b9e..348dbbcbedc8 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -705,7 +705,7 @@ int ubi_leb_map(struct ubi_volume_desc *desc, int lnum)
struct ubi_volume *vol = desc->vol;
struct ubi_device *ubi = vol->ubi;
- dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum);
+ dbg_gen("map LEB %d:%d", vol->vol_id, lnum);
if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
return -EROFS;
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index dadc6a9d5755..61d4e99755a4 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -466,6 +466,7 @@ struct ubi_debug_info {
* @fm_eba_sem: allows ubi_update_fastmap() to block EBA table changes
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
+ * @fast_attach: non-zero if UBI was attached by fastmap
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -574,6 +575,7 @@ struct ubi_device {
size_t fm_size;
struct work_struct fm_work;
int fm_work_scheduled;
+ int fast_attach;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 1ae17bb9b889..10059dfdc1b6 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -405,7 +405,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
if (!no_vtbl)
self_check_volumes(ubi);
- return err;
+ return 0;
out_err:
ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 17ec948ac40e..959c7b12e0b1 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1534,6 +1534,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
INIT_LIST_HEAD(&ubi->pq[i]);
ubi->pq_head = 0;
+ ubi->free_count = 0;
list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
cond_resched();
@@ -1552,7 +1553,6 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
found_pebs++;
}
- ubi->free_count = 0;
list_for_each_entry(aeb, &ai->free, u.list) {
cond_resched();
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index bcb9dccada4d..1de2e1e51c2b 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -615,7 +615,7 @@ struct fman {
struct fman_cfg *cfg;
struct muram_info *muram;
/* cam section in muram */
- int cam_offset;
+ unsigned long cam_offset;
size_t cam_size;
/* Fifo in MURAM */
int fifo_offset;
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 4eb0e9ac7182..47394c45b6e8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -129,7 +129,7 @@ unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
*
* Return: address of the allocated memory; NULL otherwise.
*/
-int fman_muram_alloc(struct muram_info *muram, size_t size)
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
{
unsigned long vaddr;
@@ -150,7 +150,7 @@ int fman_muram_alloc(struct muram_info *muram, size_t size)
*
* Free an allocated memory from FM-MURAM partition.
*/
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size)
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size)
{
unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index dbf0af9e5bb5..889649ad8931 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -44,8 +44,8 @@ struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
unsigned long offset);
-int fman_muram_alloc(struct muram_info *muram, size_t size);
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size);
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size);
#endif /* __FM_MURAM_EXT */
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 020ac1a4b408..cea9443c22a6 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -382,7 +382,7 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
ret = of_property_read_u32(dt_node, "ref-clock-frequency",
&pdev_data->ref_clock_freq);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(glue->dev,
"can't get reference clock frequency (%d)\n", ret);
return ret;
@@ -425,7 +425,7 @@ static int wl1271_probe(struct spi_device *spi)
}
ret = wlcore_probe_of(spi, glue, &pdev_data);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(glue->dev,
"can't get device tree parameters (%d)\n", ret);
return ret;
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 042baec56931..608fc4464574 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -164,14 +164,22 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
}
static long pmem_direct_access(struct block_device *bdev, sector_t sector,
- void __pmem **kaddr, pfn_t *pfn)
+ void __pmem **kaddr, pfn_t *pfn, long size)
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
resource_size_t offset = sector * 512 + pmem->data_offset;
+ if (unlikely(is_bad_pmem(&pmem->bb, sector, size)))
+ return -EIO;
*kaddr = pmem->virt_addr + offset;
*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+ /*
+ * If badblocks are present, limit known good range to the
+ * requested range.
+ */
+ if (unlikely(pmem->bb.count))
+ return size;
return pmem->size - pmem->pfn_pad - offset;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2de248bd462b..1a51584a382b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -95,6 +95,15 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
break;
}
break;
+ case NVME_CTRL_DEAD:
+ switch (old_state) {
+ case NVME_CTRL_DELETING:
+ changed = true;
+ /* FALLTHRU */
+ default:
+ break;
+ }
+ break;
default:
break;
}
@@ -720,10 +729,14 @@ static void nvme_init_integrity(struct nvme_ns *ns)
switch (ns->pi_type) {
case NVME_NS_DPS_PI_TYPE3:
integrity.profile = &t10_pi_type3_crc;
+ integrity.tag_size = sizeof(u16) + sizeof(u32);
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break;
case NVME_NS_DPS_PI_TYPE1:
case NVME_NS_DPS_PI_TYPE2:
integrity.profile = &t10_pi_type1_crc;
+ integrity.tag_size = sizeof(u16);
+ integrity.flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
break;
default:
integrity.profile = NULL;
@@ -1212,6 +1225,9 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd,
return ctrl->ops->reset_ctrl(ctrl);
case NVME_IOCTL_SUBSYS_RESET:
return nvme_reset_subsystem(ctrl);
+ case NVME_IOCTL_RESCAN:
+ nvme_queue_scan(ctrl);
+ return 0;
default:
return -ENOTTY;
}
@@ -1239,6 +1255,17 @@ static ssize_t nvme_sysfs_reset(struct device *dev,
}
static DEVICE_ATTR(reset_controller, S_IWUSR, NULL, nvme_sysfs_reset);
+static ssize_t nvme_sysfs_rescan(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
+ nvme_queue_scan(ctrl);
+ return count;
+}
+static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan);
+
static ssize_t wwid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -1342,6 +1369,7 @@ nvme_show_int_function(cntlid);
static struct attribute *nvme_dev_attrs[] = {
&dev_attr_reset_controller.attr,
+ &dev_attr_rescan_controller.attr,
&dev_attr_model.attr,
&dev_attr_serial.attr,
&dev_attr_firmware_rev.attr,
@@ -1580,6 +1608,15 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns, *next;
+ /*
+ * The dead states indicates the controller was not gracefully
+ * disconnected. In that case, we won't be able to flush any data while
+ * removing the namespaces' disks; fail all the queues now to avoid
+ * potentially having to clean up the failed sync later.
+ */
+ if (ctrl->state == NVME_CTRL_DEAD)
+ nvme_kill_queues(ctrl);
+
mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list)
nvme_ns_remove(ns);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 114b92873894..1daa0482de0e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -72,6 +72,7 @@ enum nvme_ctrl_state {
NVME_CTRL_LIVE,
NVME_CTRL_RESETTING,
NVME_CTRL_DELETING,
+ NVME_CTRL_DEAD,
};
struct nvme_ctrl {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 0f093f14d348..78dca3193ca4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1394,7 +1394,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
struct pci_dev *pdev = to_pci_dev(dev->dev);
int result, i, vecs, nr_io_queues, size;
- nr_io_queues = num_possible_cpus();
+ nr_io_queues = num_online_cpus();
result = nvme_set_queue_count(&dev->ctrl, &nr_io_queues);
if (result < 0)
return result;
@@ -1551,12 +1551,12 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode)
static void nvme_disable_io_queues(struct nvme_dev *dev)
{
- int pass;
+ int pass, queues = dev->online_queues - 1;
unsigned long timeout;
u8 opcode = nvme_admin_delete_sq;
for (pass = 0; pass < 2; pass++) {
- int sent = 0, i = dev->queue_count - 1;
+ int sent = 0, i = queues;
reinit_completion(&dev->ioq_wait);
retry:
@@ -1857,7 +1857,7 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
nvme_kill_queues(&dev->ctrl);
if (pci_get_drvdata(pdev))
- pci_stop_and_remove_bus_device_locked(pdev);
+ device_release_driver(&pdev->dev);
nvme_put_ctrl(&dev->ctrl);
}
@@ -2017,6 +2017,10 @@ static void nvme_remove(struct pci_dev *pdev)
nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DELETING);
pci_set_drvdata(pdev, NULL);
+
+ if (!pci_device_is_present(pdev))
+ nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
+
flush_work(&dev->reset_work);
nvme_uninit_ctrl(&dev->ctrl);
nvme_dev_disable(dev, true);
@@ -2060,14 +2064,17 @@ static pci_ers_result_t nvme_error_detected(struct pci_dev *pdev,
* shutdown the controller to quiesce. The controller will be restarted
* after the slot reset through driver's slot_reset callback.
*/
- dev_warn(dev->ctrl.device, "error detected: state:%d\n", state);
switch (state) {
case pci_channel_io_normal:
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
+ dev_warn(dev->ctrl.device,
+ "frozen state error detected, reset controller\n");
nvme_dev_disable(dev, false);
return PCI_ERS_RESULT_NEED_RESET;
case pci_channel_io_perm_failure:
+ dev_warn(dev->ctrl.device,
+ "failure state error detected, request disconnect\n");
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
@@ -2102,6 +2109,12 @@ static const struct pci_device_id nvme_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x0953),
.driver_data = NVME_QUIRK_STRIPE_SIZE |
NVME_QUIRK_DISCARD_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0x0a53),
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DISCARD_ZEROES, },
+ { PCI_VDEVICE(INTEL, 0x0a54),
+ .driver_data = NVME_QUIRK_STRIPE_SIZE |
+ NVME_QUIRK_DISCARD_ZEROES, },
{ PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index bb4ea123547f..965911d9b36a 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -113,7 +113,7 @@ static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
rc = nvmem_reg_read(nvmem, pos, buf, count);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return count;
@@ -147,7 +147,7 @@ static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
rc = nvmem_reg_write(nvmem, pos, buf, count);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return count;
@@ -366,7 +366,7 @@ static int nvmem_add_cells(struct nvmem_device *nvmem,
}
rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
- if (IS_ERR_VALUE(rval)) {
+ if (rval) {
kfree(cells[i]);
goto err;
}
@@ -963,7 +963,7 @@ static int __nvmem_cell_read(struct nvmem_device *nvmem,
rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
/* shift bits in-place */
@@ -998,7 +998,7 @@ void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
return ERR_PTR(-ENOMEM);
rc = __nvmem_cell_read(nvmem, cell, buf, len);
- if (IS_ERR_VALUE(rc)) {
+ if (rc) {
kfree(buf);
return ERR_PTR(rc);
}
@@ -1083,7 +1083,7 @@ int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
if (cell->bit_offset || cell->nbits)
kfree(buf);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return len;
@@ -1111,11 +1111,11 @@ ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return len;
@@ -1141,7 +1141,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem,
return -EINVAL;
rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return nvmem_cell_write(&cell, buf, cell.bytes);
@@ -1170,7 +1170,7 @@ int nvmem_device_read(struct nvmem_device *nvmem,
rc = nvmem_reg_read(nvmem, offset, buf, bytes);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
return bytes;
@@ -1198,7 +1198,7 @@ int nvmem_device_write(struct nvmem_device *nvmem,
rc = nvmem_reg_write(nvmem, offset, buf, bytes);
- if (IS_ERR_VALUE(rc))
+ if (rc)
return rc;
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 55182fc58c6a..677a811b3a6f 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -153,8 +153,10 @@ struct byt_community {
.name = (n), \
.pins = (p), \
.npins = ARRAY_SIZE((p)), \
- .has_simple_funcs = 1, \
- .simple_funcs = (f), \
+ .has_simple_funcs = 1, \
+ { \
+ .simple_funcs = (f), \
+ }, \
.nfuncs = ARRAY_SIZE((f)), \
}
#define PIN_GROUP_MIXED(n, p, f) \
@@ -163,7 +165,9 @@ struct byt_community {
.pins = (p), \
.npins = ARRAY_SIZE((p)), \
.has_simple_funcs = 0, \
- .mixed_funcs = (f), \
+ { \
+ .mixed_funcs = (f), \
+ }, \
.nfuncs = ARRAY_SIZE((f)), \
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index ed2004be13cf..c06bb85c2839 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -846,6 +846,18 @@ config INTEL_IMR
If you are running on a Galileo/Quark say Y here.
+config INTEL_PMC_CORE
+ bool "Intel PMC Core driver"
+ depends on X86 && PCI
+ ---help---
+ The Intel Platform Controller Hub for Intel Core SoCs provides access
+ to Power Management Controller registers via a PCI interface. This
+ driver can utilize debugging capabilities and supported features as
+ exposed by the Power Management Controller.
+
+ Supported features:
+ - SLP_S0_RESIDENCY counter.
+
config IBM_RTL
tristate "Device driver to enable PRTL support"
depends on X86 && PCI
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 448443c3baba..9b11b4073e03 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -69,3 +69,4 @@ obj-$(CONFIG_INTEL_PUNIT_IPC) += intel_punit_ipc.o
obj-$(CONFIG_INTEL_TELEMETRY) += intel_telemetry_core.o \
intel_telemetry_pltdrv.o \
intel_telemetry_debugfs.o
+obj-$(CONFIG_INTEL_PMC_CORE) += intel_pmc_core.o
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index f2b5d0a8adf0..15f131146501 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -771,12 +771,14 @@ static int asus_read_brightness(struct backlight_device *bd)
{
struct asus_laptop *asus = bl_get_data(bd);
unsigned long long value;
- acpi_status rv = AE_OK;
+ acpi_status rv;
rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
NULL, &value);
- if (ACPI_FAILURE(rv))
+ if (ACPI_FAILURE(rv)) {
pr_warn("Error reading brightness\n");
+ return 0;
+ }
return value;
}
@@ -865,7 +867,7 @@ static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
int len = 0;
unsigned long long temp;
char buf[16]; /* enough for all info */
- acpi_status rv = AE_OK;
+ acpi_status rv;
/*
* We use the easy way, we don't care of off and count,
@@ -946,11 +948,10 @@ static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
const char *method)
{
int rv, value;
- int out = 0;
rv = parse_arg(buf, count, &value);
- if (rv > 0)
- out = value ? 1 : 0;
+ if (rv <= 0)
+ return rv;
if (write_acpi_int(asus->handle, method, value))
return -ENODEV;
@@ -1265,7 +1266,7 @@ static DEVICE_ATTR_RO(ls_value);
static int asus_gps_status(struct asus_laptop *asus)
{
unsigned long long status;
- acpi_status rv = AE_OK;
+ acpi_status rv;
rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
NULL, &status);
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index a96630d52346..a26dca3640ea 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -114,6 +114,7 @@ MODULE_LICENSE("GPL");
#define ASUS_WMI_DEVID_LED6 0x00020016
/* Backlight and Brightness */
+#define ASUS_WMI_DEVID_ALS_ENABLE 0x00050001 /* Ambient Light Sensor */
#define ASUS_WMI_DEVID_BACKLIGHT 0x00050011
#define ASUS_WMI_DEVID_BRIGHTNESS 0x00050012
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
@@ -1730,6 +1731,7 @@ ASUS_WMI_CREATE_DEVICE_ATTR(touchpad, 0644, ASUS_WMI_DEVID_TOUCHPAD);
ASUS_WMI_CREATE_DEVICE_ATTR(camera, 0644, ASUS_WMI_DEVID_CAMERA);
ASUS_WMI_CREATE_DEVICE_ATTR(cardr, 0644, ASUS_WMI_DEVID_CARDREADER);
ASUS_WMI_CREATE_DEVICE_ATTR(lid_resume, 0644, ASUS_WMI_DEVID_LID_RESUME);
+ASUS_WMI_CREATE_DEVICE_ATTR(als_enable, 0644, ASUS_WMI_DEVID_ALS_ENABLE);
static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1756,6 +1758,7 @@ static struct attribute *platform_attributes[] = {
&dev_attr_cardr.attr,
&dev_attr_touchpad.attr,
&dev_attr_lid_resume.attr,
+ &dev_attr_als_enable.attr,
NULL
};
@@ -1776,6 +1779,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
devid = ASUS_WMI_DEVID_TOUCHPAD;
else if (attr == &dev_attr_lid_resume.attr)
devid = ASUS_WMI_DEVID_LID_RESUME;
+ else if (attr == &dev_attr_als_enable.attr)
+ devid = ASUS_WMI_DEVID_ALS_ENABLE;
if (devid != -1)
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
diff --git a/drivers/platform/x86/dell-rbtn.c b/drivers/platform/x86/dell-rbtn.c
index b51a2008d782..dcd9f40a4b18 100644
--- a/drivers/platform/x86/dell-rbtn.c
+++ b/drivers/platform/x86/dell-rbtn.c
@@ -28,6 +28,7 @@ struct rbtn_data {
enum rbtn_type type;
struct rfkill *rfkill;
struct input_dev *input_dev;
+ bool suspended;
};
@@ -235,9 +236,55 @@ static const struct acpi_device_id rbtn_ids[] = {
{ "", 0 },
};
+#ifdef CONFIG_PM_SLEEP
+static void ACPI_SYSTEM_XFACE rbtn_clear_suspended_flag(void *context)
+{
+ struct rbtn_data *rbtn_data = context;
+
+ rbtn_data->suspended = false;
+}
+
+static int rbtn_suspend(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct rbtn_data *rbtn_data = acpi_driver_data(device);
+
+ rbtn_data->suspended = true;
+
+ return 0;
+}
+
+static int rbtn_resume(struct device *dev)
+{
+ struct acpi_device *device = to_acpi_device(dev);
+ struct rbtn_data *rbtn_data = acpi_driver_data(device);
+ acpi_status status;
+
+ /*
+ * Upon resume, some BIOSes send an ACPI notification thet triggers
+ * an unwanted input event. In order to ignore it, we use a flag
+ * that we set at suspend and clear once we have received the extra
+ * ACPI notification. Since ACPI notifications are delivered
+ * asynchronously to drivers, we clear the flag from the workqueue
+ * used to deliver the notifications. This should be enough
+ * to have the flag cleared only after we received the extra
+ * notification, if any.
+ */
+ status = acpi_os_execute(OSL_NOTIFY_HANDLER,
+ rbtn_clear_suspended_flag, rbtn_data);
+ if (ACPI_FAILURE(status))
+ rbtn_clear_suspended_flag(rbtn_data);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(rbtn_pm_ops, rbtn_suspend, rbtn_resume);
+
static struct acpi_driver rbtn_driver = {
.name = "dell-rbtn",
.ids = rbtn_ids,
+ .drv.pm = &rbtn_pm_ops,
.ops = {
.add = rbtn_add,
.remove = rbtn_remove,
@@ -399,6 +446,15 @@ static void rbtn_notify(struct acpi_device *device, u32 event)
{
struct rbtn_data *rbtn_data = device->driver_data;
+ /*
+ * Some BIOSes send a notification at resume.
+ * Ignore it to prevent unwanted input events.
+ */
+ if (rbtn_data->suspended) {
+ dev_dbg(&device->dev, "ACPI notification ignored\n");
+ return;
+ }
+
if (event != 0x80) {
dev_info(&device->dev, "Received unknown event (0x%x)\n",
event);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index ffc84cc7b1c7..ce41bc34288d 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -69,7 +69,7 @@
#include <linux/kfifo.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
#include <linux/leds.h>
#endif
#include <acpi/video.h>
@@ -100,13 +100,14 @@
/* FUNC interface - responses */
#define UNSUPPORTED_CMD 0x80000000
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
/* FUNC interface - LED control */
#define FUNC_LED_OFF 0x1
#define FUNC_LED_ON 0x30001
#define KEYBOARD_LAMPS 0x100
#define LOGOLAMP_POWERON 0x2000
#define LOGOLAMP_ALWAYS 0x4000
+#define RADIO_LED_ON 0x20
#endif
/* Hotkey details */
@@ -174,13 +175,14 @@ struct fujitsu_hotkey_t {
int rfkill_state;
int logolamp_registered;
int kblamps_registered;
+ int radio_led_registered;
};
static struct fujitsu_hotkey_t *fujitsu_hotkey;
static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
static enum led_brightness logolamp_get(struct led_classdev *cdev);
static void logolamp_set(struct led_classdev *cdev,
enum led_brightness brightness);
@@ -200,6 +202,16 @@ static struct led_classdev kblamps_led = {
.brightness_get = kblamps_get,
.brightness_set = kblamps_set
};
+
+static enum led_brightness radio_led_get(struct led_classdev *cdev);
+static void radio_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness);
+
+static struct led_classdev radio_led = {
+ .name = "fujitsu::radio_led",
+ .brightness_get = radio_led_get,
+ .brightness_set = radio_led_set
+};
#endif
#ifdef CONFIG_FUJITSU_LAPTOP_DEBUG
@@ -249,7 +261,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
return value;
}
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
/* LED class callbacks */
static void logolamp_set(struct led_classdev *cdev,
@@ -275,6 +287,15 @@ static void kblamps_set(struct led_classdev *cdev,
call_fext_func(FUNC_LEDS, 0x1, KEYBOARD_LAMPS, FUNC_LED_OFF);
}
+static void radio_led_set(struct led_classdev *cdev,
+ enum led_brightness brightness)
+{
+ if (brightness >= LED_FULL)
+ call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
+ else
+ call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
+}
+
static enum led_brightness logolamp_get(struct led_classdev *cdev)
{
enum led_brightness brightness = LED_OFF;
@@ -299,6 +320,16 @@ static enum led_brightness kblamps_get(struct led_classdev *cdev)
return brightness;
}
+
+static enum led_brightness radio_led_get(struct led_classdev *cdev)
+{
+ enum led_brightness brightness = LED_OFF;
+
+ if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON)
+ brightness = LED_FULL;
+
+ return brightness;
+}
#endif
/* Hardware access for LCD brightness control */
@@ -872,7 +903,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
/* Suspect this is a keymap of the application panel, print it */
pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
result = led_classdev_register(&fujitsu->pf_device->dev,
&logolamp_led);
@@ -895,6 +926,23 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
result);
}
}
+
+ /*
+ * BTNI bit 24 seems to indicate the presence of a radio toggle
+ * button in place of a slide switch, and all such machines appear
+ * to also have an RF LED. Therefore use bit 24 as an indicator
+ * that an RF LED is present.
+ */
+ if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
+ result = led_classdev_register(&fujitsu->pf_device->dev,
+ &radio_led);
+ if (result == 0) {
+ fujitsu_hotkey->radio_led_registered = 1;
+ } else {
+ pr_err("Could not register LED handler for radio LED, error %i\n",
+ result);
+ }
+ }
#endif
return result;
@@ -915,12 +963,15 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
struct input_dev *input = fujitsu_hotkey->input;
-#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
+#if IS_ENABLED(CONFIG_LEDS_CLASS)
if (fujitsu_hotkey->logolamp_registered)
led_classdev_unregister(&logolamp_led);
if (fujitsu_hotkey->kblamps_registered)
led_classdev_unregister(&kblamps_led);
+
+ if (fujitsu_hotkey->radio_led_registered)
+ led_classdev_unregister(&radio_led);
#endif
input_unregister_device(input);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index be3bc2f4edd4..4a23fbc66b71 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -48,7 +48,10 @@
#define CFG_CAMERA_BIT (19)
#if IS_ENABLED(CONFIG_ACPI_WMI)
-static const char ideapad_wmi_fnesc_event[] = "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6";
+static const char *const ideapad_wmi_fnesc_events[] = {
+ "26CAB2E5-5CF1-46AE-AAC3-4A12B6BA50E6", /* Yoga 3 */
+ "56322276-8493-4CE8-A783-98C991274F5E", /* Yoga 700 */
+};
#endif
enum {
@@ -93,6 +96,7 @@ struct ideapad_private {
struct dentry *debug;
unsigned long cfg;
bool has_hw_rfkill_switch;
+ const char *fnesc_guid;
};
static bool no_bt_rfkill;
@@ -989,8 +993,16 @@ static int ideapad_acpi_add(struct platform_device *pdev)
ACPI_DEVICE_NOTIFY, ideapad_acpi_notify, priv);
if (ret)
goto notification_failed;
+
#if IS_ENABLED(CONFIG_ACPI_WMI)
- ret = wmi_install_notify_handler(ideapad_wmi_fnesc_event, ideapad_wmi_notify, priv);
+ for (i = 0; i < ARRAY_SIZE(ideapad_wmi_fnesc_events); i++) {
+ ret = wmi_install_notify_handler(ideapad_wmi_fnesc_events[i],
+ ideapad_wmi_notify, priv);
+ if (ret == AE_OK) {
+ priv->fnesc_guid = ideapad_wmi_fnesc_events[i];
+ break;
+ }
+ }
if (ret != AE_OK && ret != AE_NOT_EXIST)
goto notification_failed_wmi;
#endif
@@ -1020,7 +1032,8 @@ static int ideapad_acpi_remove(struct platform_device *pdev)
int i;
#if IS_ENABLED(CONFIG_ACPI_WMI)
- wmi_remove_notify_handler(ideapad_wmi_fnesc_event);
+ if (priv->fnesc_guid)
+ wmi_remove_notify_handler(priv->fnesc_guid);
#endif
acpi_remove_notify_handler(priv->adev->handle,
ACPI_DEVICE_NOTIFY, ideapad_acpi_notify);
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index 0a919d81662c..cbe01021c939 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -306,33 +306,32 @@ static int sensor_set_auxtrip(acpi_handle handle, int index, int value)
#define to_intel_menlow_attr(_attr) \
container_of(_attr, struct intel_menlow_attribute, attr)
-static ssize_t aux0_show(struct device *dev,
- struct device_attribute *dev_attr, char *buf)
+static ssize_t aux_show(struct device *dev, struct device_attribute *dev_attr,
+ char *buf, int idx)
{
struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
unsigned long long value;
int result;
- result = sensor_get_auxtrip(attr->handle, 0, &value);
+ result = sensor_get_auxtrip(attr->handle, idx, &value);
return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value));
}
-static ssize_t aux1_show(struct device *dev,
+static ssize_t aux0_show(struct device *dev,
struct device_attribute *dev_attr, char *buf)
{
- struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
- unsigned long long value;
- int result;
-
- result = sensor_get_auxtrip(attr->handle, 1, &value);
+ return aux_show(dev, dev_attr, buf, 0);
+}
- return result ? result : sprintf(buf, "%lu", DECI_KELVIN_TO_CELSIUS(value));
+static ssize_t aux1_show(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return aux_show(dev, dev_attr, buf, 1);
}
-static ssize_t aux0_store(struct device *dev,
- struct device_attribute *dev_attr,
- const char *buf, size_t count)
+static ssize_t aux_store(struct device *dev, struct device_attribute *dev_attr,
+ const char *buf, size_t count, int idx)
{
struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
int value;
@@ -345,27 +344,23 @@ static ssize_t aux0_store(struct device *dev,
if (value < 0)
return -EINVAL;
- result = sensor_set_auxtrip(attr->handle, 0, CELSIUS_TO_DECI_KELVIN(value));
+ result = sensor_set_auxtrip(attr->handle, idx,
+ CELSIUS_TO_DECI_KELVIN(value));
return result ? result : count;
}
-static ssize_t aux1_store(struct device *dev,
+static ssize_t aux0_store(struct device *dev,
struct device_attribute *dev_attr,
const char *buf, size_t count)
{
- struct intel_menlow_attribute *attr = to_intel_menlow_attr(dev_attr);
- int value;
- int result;
-
- /*Sanity check; should be a positive integer */
- if (!sscanf(buf, "%d", &value))
- return -EINVAL;
-
- if (value < 0)
- return -EINVAL;
+ return aux_store(dev, dev_attr, buf, count, 0);
+}
- result = sensor_set_auxtrip(attr->handle, 1, CELSIUS_TO_DECI_KELVIN(value));
- return result ? result : count;
+static ssize_t aux1_store(struct device *dev,
+ struct device_attribute *dev_attr,
+ const char *buf, size_t count)
+{
+ return aux_store(dev, dev_attr, buf, count, 1);
}
/* BIOS can enable/disable the thermal user application in dabney platform */
diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
new file mode 100644
index 000000000000..2776bec89c88
--- /dev/null
+++ b/drivers/platform/x86/intel_pmc_core.c
@@ -0,0 +1,200 @@
+/*
+ * Intel Core SoC Power Management Controller Driver
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+
+#include <asm/cpu_device_id.h>
+#include <asm/pmc_core.h>
+
+#include "intel_pmc_core.h"
+
+static struct pmc_dev pmc;
+
+static const struct pci_device_id pmc_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), (kernel_ulong_t)NULL },
+ { 0, },
+};
+
+static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset)
+{
+ return readl(pmcdev->regbase + reg_offset);
+}
+
+static inline u32 pmc_core_adjust_slp_s0_step(u32 value)
+{
+ return value * SPT_PMC_SLP_S0_RES_COUNTER_STEP;
+}
+
+/**
+ * intel_pmc_slp_s0_counter_read() - Read SLP_S0 residency.
+ * @data: Out param that contains current SLP_S0 count.
+ *
+ * This API currently supports Intel Skylake SoC and Sunrise
+ * Point Platform Controller Hub. Future platform support
+ * should be added for platforms that support low power modes
+ * beyond Package C10 state.
+ *
+ * SLP_S0_RESIDENCY counter counts in 100 us granularity per
+ * step hence function populates the multiplied value in out
+ * parameter @data.
+ *
+ * Return: an error code or 0 on success.
+ */
+int intel_pmc_slp_s0_counter_read(u32 *data)
+{
+ struct pmc_dev *pmcdev = &pmc;
+ u32 value;
+
+ if (!pmcdev->has_slp_s0_res)
+ return -EACCES;
+
+ value = pmc_core_reg_read(pmcdev, SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
+ *data = pmc_core_adjust_slp_s0_step(value);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(intel_pmc_slp_s0_counter_read);
+
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+static int pmc_core_dev_state_show(struct seq_file *s, void *unused)
+{
+ struct pmc_dev *pmcdev = s->private;
+ u32 counter_val;
+
+ counter_val = pmc_core_reg_read(pmcdev,
+ SPT_PMC_SLP_S0_RES_COUNTER_OFFSET);
+ seq_printf(s, "%u\n", pmc_core_adjust_slp_s0_step(counter_val));
+
+ return 0;
+}
+
+static int pmc_core_dev_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pmc_core_dev_state_show, inode->i_private);
+}
+
+static const struct file_operations pmc_core_dev_state_ops = {
+ .open = pmc_core_dev_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+ debugfs_remove_recursive(pmcdev->dbgfs_dir);
+}
+
+static int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+ struct dentry *dir, *file;
+
+ dir = debugfs_create_dir("pmc_core", NULL);
+ if (!dir)
+ return -ENOMEM;
+
+ pmcdev->dbgfs_dir = dir;
+ file = debugfs_create_file("slp_s0_residency_usec", S_IFREG | S_IRUGO,
+ dir, pmcdev, &pmc_core_dev_state_ops);
+
+ if (!file) {
+ pmc_core_dbgfs_unregister(pmcdev);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+#else
+static inline int pmc_core_dbgfs_register(struct pmc_dev *pmcdev)
+{
+ return 0;
+}
+
+static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static const struct x86_cpu_id intel_pmc_core_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x4e, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
+ { X86_VENDOR_INTEL, 6, 0x5e, X86_FEATURE_MWAIT,
+ (kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
+ {}
+};
+
+static int pmc_core_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+ struct device *ptr_dev = &dev->dev;
+ struct pmc_dev *pmcdev = &pmc;
+ const struct x86_cpu_id *cpu_id;
+ int err;
+
+ cpu_id = x86_match_cpu(intel_pmc_core_ids);
+ if (!cpu_id) {
+ dev_dbg(&dev->dev, "PMC Core: cpuid mismatch.\n");
+ return -EINVAL;
+ }
+
+ err = pcim_enable_device(dev);
+ if (err < 0) {
+ dev_dbg(&dev->dev, "PMC Core: failed to enable Power Management Controller.\n");
+ return err;
+ }
+
+ err = pci_read_config_dword(dev,
+ SPT_PMC_BASE_ADDR_OFFSET,
+ &pmcdev->base_addr);
+ if (err < 0) {
+ dev_dbg(&dev->dev, "PMC Core: failed to read PCI config space.\n");
+ return err;
+ }
+ dev_dbg(&dev->dev, "PMC Core: PWRMBASE is %#x\n", pmcdev->base_addr);
+
+ pmcdev->regbase = devm_ioremap_nocache(ptr_dev,
+ pmcdev->base_addr,
+ SPT_PMC_MMIO_REG_LEN);
+ if (!pmcdev->regbase) {
+ dev_dbg(&dev->dev, "PMC Core: ioremap failed.\n");
+ return -ENOMEM;
+ }
+
+ err = pmc_core_dbgfs_register(pmcdev);
+ if (err < 0) {
+ dev_err(&dev->dev, "PMC Core: debugfs register failed.\n");
+ return err;
+ }
+
+ pmc.has_slp_s0_res = true;
+ return 0;
+}
+
+static struct pci_driver intel_pmc_core_driver = {
+ .name = "intel_pmc_core",
+ .id_table = pmc_pci_ids,
+ .probe = pmc_core_probe,
+};
+
+builtin_pci_driver(intel_pmc_core_driver);
diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
new file mode 100644
index 000000000000..a9dadaf787c1
--- /dev/null
+++ b/drivers/platform/x86/intel_pmc_core.h
@@ -0,0 +1,51 @@
+/*
+ * Intel Core SoC Power Management Controller Header File
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ * All Rights Reserved.
+ *
+ * Authors: Rajneesh Bhardwaj <rajneesh.bhardwaj@intel.com>
+ * Vishwanath Somayaji <vishwanath.somayaji@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef PMC_CORE_H
+#define PMC_CORE_H
+
+/* Sunrise Point Power Management Controller PCI Device ID */
+#define SPT_PMC_PCI_DEVICE_ID 0x9d21
+#define SPT_PMC_BASE_ADDR_OFFSET 0x48
+#define SPT_PMC_SLP_S0_RES_COUNTER_OFFSET 0x13c
+#define SPT_PMC_MMIO_REG_LEN 0x100
+#define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64
+
+/**
+ * struct pmc_dev - pmc device structure
+ * @base_addr: comtains pmc base address
+ * @regbase: pointer to io-remapped memory location
+ * @dbgfs_dir: path to debug fs interface
+ * @feature_available: flag to indicate whether
+ * the feature is available
+ * on a particular platform or not.
+ *
+ * pmc_dev contains info about power management controller device.
+ */
+struct pmc_dev {
+ u32 base_addr;
+ void __iomem *regbase;
+#if IS_ENABLED(CONFIG_DEBUG_FS)
+ struct dentry *dbgfs_dir;
+#endif /* CONFIG_DEBUG_FS */
+ bool has_slp_s0_res;
+};
+
+#endif /* PMC_CORE_H */
diff --git a/drivers/platform/x86/intel_telemetry_core.c b/drivers/platform/x86/intel_telemetry_core.c
index a695a436a1c3..0d4c3808a6d8 100644
--- a/drivers/platform/x86/intel_telemetry_core.c
+++ b/drivers/platform/x86/intel_telemetry_core.c
@@ -25,7 +25,7 @@
struct telemetry_core_config {
struct telemetry_plt_config *plt_config;
- struct telemetry_core_ops *telem_ops;
+ const struct telemetry_core_ops *telem_ops;
};
static struct telemetry_core_config telm_core_conf;
@@ -95,7 +95,7 @@ static int telemetry_def_reset_events(void)
return 0;
}
-static struct telemetry_core_ops telm_defpltops = {
+static const struct telemetry_core_ops telm_defpltops = {
.set_sampling_period = telemetry_def_set_sampling_period,
.get_sampling_period = telemetry_def_get_sampling_period,
.get_trace_verbosity = telemetry_def_get_trace_verbosity,
@@ -332,7 +332,7 @@ EXPORT_SYMBOL_GPL(telemetry_set_trace_verbosity);
*
* Return: 0 success, < 0 for failure
*/
-int telemetry_set_pltdata(struct telemetry_core_ops *ops,
+int telemetry_set_pltdata(const struct telemetry_core_ops *ops,
struct telemetry_plt_config *pltconfig)
{
if (ops)
diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c
index 781bd10ca7ac..09c84a2b1c2c 100644
--- a/drivers/platform/x86/intel_telemetry_pltdrv.c
+++ b/drivers/platform/x86/intel_telemetry_pltdrv.c
@@ -1081,7 +1081,7 @@ out:
return ret;
}
-static struct telemetry_core_ops telm_pltops = {
+static const struct telemetry_core_ops telm_pltops = {
.get_trace_verbosity = telemetry_plt_get_trace_verbosity,
.set_trace_verbosity = telemetry_plt_set_trace_verbosity,
.set_sampling_period = telemetry_plt_set_sampling_period,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index e9caa347a9bf..1dba3598cfcb 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1446,6 +1446,9 @@ static void sony_nc_function_cleanup(struct platform_device *pd)
{
unsigned int i, result, bitmask, handle;
+ if (!handles)
+ return;
+
/* get enabled events and disable them */
sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c
index 700e0fa0eec2..6505c97705e1 100644
--- a/drivers/platform/x86/surfacepro3_button.c
+++ b/drivers/platform/x86/surfacepro3_button.c
@@ -24,6 +24,8 @@
#define SURFACE_BUTTON_OBJ_NAME "VGBI"
#define SURFACE_BUTTON_DEVICE_NAME "Surface Pro 3/4 Buttons"
+#define SURFACE_BUTTON_NOTIFY_TABLET_MODE 0xc8
+
#define SURFACE_BUTTON_NOTIFY_PRESS_POWER 0xc6
#define SURFACE_BUTTON_NOTIFY_RELEASE_POWER 0xc7
@@ -33,7 +35,7 @@
#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_UP 0xc0
#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_UP 0xc1
-#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2
+#define SURFACE_BUTTON_NOTIFY_PRESS_VOLUME_DOWN 0xc2
#define SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN 0xc3
ACPI_MODULE_NAME("surface pro 3 button");
@@ -105,9 +107,12 @@ static void surface_button_notify(struct acpi_device *device, u32 event)
case SURFACE_BUTTON_NOTIFY_RELEASE_VOLUME_DOWN:
key_code = KEY_VOLUMEDOWN;
break;
+ case SURFACE_BUTTON_NOTIFY_TABLET_MODE:
+ dev_warn_once(&device->dev, "Tablet mode is not supported\n");
+ break;
default:
dev_info_ratelimited(&device->dev,
- "Unsupported event [0x%x]\n", event);
+ "Unsupported event [0x%x]\n", event);
break;
}
input = button->input;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 9255ff3ee81a..c3bfa1fe95bf 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -5001,6 +5001,8 @@ static int kbdlight_set_level(int level)
return 0;
}
+static int kbdlight_set_level_and_update(int level);
+
static int kbdlight_get_level(void)
{
int status = 0;
@@ -5068,7 +5070,7 @@ static void kbdlight_set_worker(struct work_struct *work)
container_of(work, struct tpacpi_led_classdev, work);
if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
- kbdlight_set_level(data->new_state);
+ kbdlight_set_level_and_update(data->new_state);
}
static void kbdlight_sysfs_set(struct led_classdev *led_cdev,
@@ -5099,7 +5101,6 @@ static struct tpacpi_led_classdev tpacpi_led_kbdlight = {
.max_brightness = 2,
.brightness_set = &kbdlight_sysfs_set,
.brightness_get = &kbdlight_sysfs_get,
- .flags = LED_CORE_SUSPENDRESUME,
}
};
@@ -5137,6 +5138,20 @@ static void kbdlight_exit(void)
flush_workqueue(tpacpi_wq);
}
+static int kbdlight_set_level_and_update(int level)
+{
+ int ret;
+ struct led_classdev *led_cdev;
+
+ ret = kbdlight_set_level(level);
+ led_cdev = &tpacpi_led_kbdlight.led_classdev;
+
+ if (ret == 0 && !(led_cdev->flags & LED_SUSPENDED))
+ led_cdev->brightness = level;
+
+ return ret;
+}
+
static int kbdlight_read(struct seq_file *m)
{
int level;
@@ -5177,13 +5192,35 @@ static int kbdlight_write(char *buf)
if (level == -1)
return -EINVAL;
- return kbdlight_set_level(level);
+ return kbdlight_set_level_and_update(level);
+}
+
+static void kbdlight_suspend(void)
+{
+ struct led_classdev *led_cdev;
+
+ if (!tp_features.kbdlight)
+ return;
+
+ led_cdev = &tpacpi_led_kbdlight.led_classdev;
+ led_update_brightness(led_cdev);
+ led_classdev_suspend(led_cdev);
+}
+
+static void kbdlight_resume(void)
+{
+ if (!tp_features.kbdlight)
+ return;
+
+ led_classdev_resume(&tpacpi_led_kbdlight.led_classdev);
}
static struct ibm_struct kbdlight_driver_data = {
.name = "kbdlight",
.read = kbdlight_read,
.write = kbdlight_write,
+ .suspend = kbdlight_suspend,
+ .resume = kbdlight_resume,
.exit = kbdlight_exit,
};
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index b83908670a9a..bed53c46dd90 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -31,7 +31,7 @@ static void dcssblk_release(struct gendisk *disk, fmode_t mode);
static blk_qc_t dcssblk_make_request(struct request_queue *q,
struct bio *bio);
static long dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
- void __pmem **kaddr, pfn_t *pfn);
+ void __pmem **kaddr, pfn_t *pfn, long size);
static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
@@ -884,7 +884,7 @@ fail:
static long
dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
- void __pmem **kaddr, pfn_t *pfn)
+ void __pmem **kaddr, pfn_t *pfn, long size)
{
struct dcssblk_dev_info *dev_info;
unsigned long offset, dev_sz;
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index ce1f949430f1..3f2f30b6542c 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -976,8 +976,8 @@ static inline __u64 ll_file_maxbytes(struct inode *inode)
}
/* llite/xattr.c */
-int ll_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags);
+int ll_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags);
ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
const char *name, void *buffer, size_t size);
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index ed4de04381c3..608014b0dbcd 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -211,11 +211,9 @@ int ll_setxattr_common(struct inode *inode, const char *name,
return 0;
}
-int ll_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t size, int flags)
+int ll_setxattr(struct dentry *dentry, struct inode *inode,
+ const char *name, const void *value, size_t size, int flags)
{
- struct inode *inode = d_inode(dentry);
-
LASSERT(inode);
LASSERT(name);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index a2aa655f56c4..1b7331e40d79 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2360,7 +2360,7 @@ static int pl011_probe_dt_alias(int index, struct device *dev)
return ret;
ret = of_alias_get_id(np, "serial");
- if (IS_ERR_VALUE(ret)) {
+ if (ret < 0) {
seen_dev_without_alias = true;
ret = index;
} else {
diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c
index 18971063f95f..699447aa8b43 100644
--- a/drivers/tty/serial/sprd_serial.c
+++ b/drivers/tty/serial/sprd_serial.c
@@ -654,7 +654,7 @@ static int sprd_probe_dt_alias(int index, struct device *dev)
return ret;
ret = of_alias_get_id(np, "serial");
- if (IS_ERR_VALUE(ret))
+ if (ret < 0)
ret = index;
else if (ret >= ARRAY_SIZE(sprd_port) || sprd_port[ret] != NULL) {
dev_warn(dev, "requested serial port %d not available.\n", ret);
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index d8d583d32a37..c229b1a0d13b 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -713,7 +713,7 @@ static int da8xx_fb_config_clk_divider(struct da8xx_fb_par *par,
if (par->lcdc_clk_rate != lcdc_clk_rate) {
ret = clk_set_rate(par->lcdc_clk, lcdc_clk_rate);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(par->dev,
"unable to set clock rate at %u\n",
lcdc_clk_rate);
@@ -784,7 +784,7 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg,
int ret = 0;
ret = da8xx_fb_calc_config_clk_divider(par, panel);
- if (IS_ERR_VALUE(ret)) {
+ if (ret) {
dev_err(par->dev, "unable to configure clock\n");
return ret;
}
OpenPOWER on IntegriCloud