summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/arm64/iort.c2
-rw-r--r--drivers/ata/libata-core.c6
-rw-r--r--drivers/ata/sata_mv.c3
-rw-r--r--drivers/base/firmware_class.c5
-rw-r--r--drivers/base/memory.c12
-rw-r--r--drivers/bcma/bcma_private.h3
-rw-r--r--drivers/bcma/driver_chipcommon.c11
-rw-r--r--drivers/bcma/driver_mips.c3
-rw-r--r--drivers/dma/cppi41.c69
-rw-r--r--drivers/dma/pl330.c24
-rw-r--r--drivers/firmware/efi/libstub/fdt.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c13
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c9
-rw-r--r--drivers/gpu/drm/drm_connector.c23
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h16
-rw-r--r--drivers/gpu/drm/i915/intel_atomic_plane.c20
-rw-r--r--drivers/gpu/drm/i915/intel_display.c125
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h9
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c52
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c8
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/hw.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_led.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c4
-rw-r--r--drivers/hid/hid-cp2112.c28
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-lg.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/wacom_wac.c28
-rw-r--r--drivers/hv/ring_buffer.c1
-rw-r--r--drivers/iio/adc/palmas_gpadc.c4
-rw-r--r--drivers/iio/health/afe4403.c4
-rw-r--r--drivers/iio/health/afe4404.c4
-rw-r--r--drivers/iio/health/max30100.c2
-rw-r--r--drivers/iio/humidity/dht11.c6
-rw-r--r--drivers/input/rmi4/rmi_driver.c4
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c2
-rw-r--r--drivers/iommu/Kconfig3
-rw-r--r--drivers/iommu/amd_iommu.c72
-rw-r--r--drivers/iommu/amd_iommu_init.c11
-rw-r--r--drivers/iommu/amd_iommu_types.h4
-rw-r--r--drivers/iommu/arm-smmu-v3.c90
-rw-r--r--drivers/iommu/arm-smmu.c135
-rw-r--r--drivers/iommu/dma-iommu.c183
-rw-r--r--drivers/iommu/dmar.c20
-rw-r--r--drivers/iommu/exynos-iommu.c55
-rw-r--r--drivers/iommu/intel-iommu.c111
-rw-r--r--drivers/iommu/io-pgtable-arm-v7s.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c5
-rw-r--r--drivers/iommu/iommu-sysfs.c61
-rw-r--r--drivers/iommu/iommu.c285
-rw-r--r--drivers/iommu/iova.c23
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
-rw-r--r--drivers/iommu/msm_iommu.c73
-rw-r--r--drivers/iommu/msm_iommu.h3
-rw-r--r--drivers/iommu/mtk_iommu.c27
-rw-r--r--drivers/iommu/mtk_iommu.h2
-rw-r--r--drivers/iommu/of_iommu.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c1
-rw-r--r--drivers/mmc/host/sdhci.c3
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c45
-rw-r--r--drivers/net/ethernet/cadence/macb.c188
-rw-r--r--drivers/net/ethernet/cadence/macb.h20
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_xcv.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c33
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c4
-rw-r--r--drivers/net/hyperv/netvsc.c6
-rw-r--r--drivers/net/phy/micrel.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tt.c12
-rw-r--r--drivers/pci/pcie/aspm.c19
-rw-r--r--drivers/pinctrl/berlin/berlin-bg4ct.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c27
-rw-r--r--drivers/pinctrl/intel/pinctrl-merrifield.c3
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c3
-rw-r--r--drivers/regulator/axp20x-regulator.c2
-rw-r--r--drivers/regulator/fixed.c46
-rw-r--r--drivers/regulator/twl6030-regulator.c2
-rw-r--r--drivers/rtc/Kconfig5
-rw-r--r--drivers/rtc/rtc-jz4740.c12
-rw-r--r--drivers/scsi/virtio_scsi.c11
-rw-r--r--drivers/staging/greybus/timesync_platform.c6
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c13
-rw-r--r--drivers/usb/musb/musb_core.c26
-rw-r--r--drivers/usb/musb/musb_core.h1
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/pl2303.c1
-rw-r--r--drivers/usb/serial/pl2303.h1
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c11
-rw-r--r--drivers/vfio/vfio_iommu_type1.c40
-rw-r--r--drivers/vhost/vhost.c10
-rw-r--r--drivers/virtio/virtio_ring.c7
123 files changed, 1761 insertions, 911 deletions
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index e0d2e6e6e40c..3752521c62ab 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -536,7 +536,7 @@ static const struct iommu_ops *iort_iommu_xlate(struct device *dev,
if (!iort_fwnode)
return NULL;
- ops = iommu_get_instance(iort_fwnode);
+ ops = iommu_ops_from_fwnode(iort_fwnode);
if (!ops)
return NULL;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 9cd0a2d41816..c2d3785ec227 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
if (qc->err_mask & ~AC_ERR_OTHER)
qc->err_mask &= ~AC_ERR_OTHER;
+ } else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
+ qc->result_tf.command |= ATA_SENSE;
}
/* finish up */
@@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
/*
- * Device times out with higher max sects.
+ * These devices time out with higher max sects.
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
*/
- { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
+ { "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
/* Devices we expect to fail diagnostics */
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 823e938c9a78..2f32782cea6d 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4132,6 +4132,9 @@ static int mv_platform_probe(struct platform_device *pdev)
host->iomap = NULL;
hpriv->base = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
+ if (!hpriv->base)
+ return -ENOMEM;
+
hpriv->base -= SATAHC0_REG_BASE;
hpriv->clk = clk_get(&pdev->dev, NULL);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4497d263209f..ac350c518e0c 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -558,9 +558,6 @@ static void fw_load_abort(struct firmware_priv *fw_priv)
struct firmware_buf *buf = fw_priv->buf;
__fw_load_abort(buf);
-
- /* avoid user action after loading abort */
- fw_priv->buf = NULL;
}
static LIST_HEAD(pending_fw_head);
@@ -713,7 +710,7 @@ static ssize_t firmware_loading_store(struct device *dev,
mutex_lock(&fw_lock);
fw_buf = fw_priv->buf;
- if (!fw_buf)
+ if (fw_state_is_aborted(&fw_buf->fw_st))
goto out;
switch (loading) {
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index dacb6a8418aa..fa26ffd25fa6 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -389,33 +389,33 @@ static ssize_t show_valid_zones(struct device *dev,
{
struct memory_block *mem = to_memory_block(dev);
unsigned long start_pfn, end_pfn;
+ unsigned long valid_start, valid_end, valid_pages;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
- struct page *first_page;
struct zone *zone;
int zone_shift = 0;
start_pfn = section_nr_to_pfn(mem->start_section_nr);
end_pfn = start_pfn + nr_pages;
- first_page = pfn_to_page(start_pfn);
/* The block contains more than one zone can not be offlined. */
- if (!test_pages_in_a_zone(start_pfn, end_pfn))
+ if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
return sprintf(buf, "none\n");
- zone = page_zone(first_page);
+ zone = page_zone(pfn_to_page(valid_start));
+ valid_pages = valid_end - valid_start;
/* MMOP_ONLINE_KEEP */
sprintf(buf, "%s", zone->name);
/* MMOP_ONLINE_KERNEL */
- zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
+ zone_can_shift(valid_start, valid_pages, ZONE_NORMAL, &zone_shift);
if (zone_shift) {
strcat(buf, " ");
strcat(buf, (zone + zone_shift)->name);
}
/* MMOP_ONLINE_MOVABLE */
- zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
+ zone_can_shift(valid_start, valid_pages, ZONE_MOVABLE, &zone_shift);
if (zone_shift) {
strcat(buf, " ");
strcat(buf, (zone + zone_shift)->name);
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index f642c4264c27..168fa175d65a 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -45,6 +45,9 @@ int bcma_sprom_get(struct bcma_bus *bus);
void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc);
void bcma_core_chipcommon_init(struct bcma_drv_cc *cc);
void bcma_chipco_bcm4331_ext_pa_lines_ctl(struct bcma_drv_cc *cc, bool enable);
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
/* driver_chipcommon_b.c */
int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb);
diff --git a/drivers/bcma/driver_chipcommon.c b/drivers/bcma/driver_chipcommon.c
index b4f6520e74f0..62f5bfa5065d 100644
--- a/drivers/bcma/driver_chipcommon.c
+++ b/drivers/bcma/driver_chipcommon.c
@@ -15,8 +15,6 @@
#include <linux/platform_device.h>
#include <linux/bcma/bcma.h>
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc);
-
static inline u32 bcma_cc_write32_masked(struct bcma_drv_cc *cc, u16 offset,
u32 mask, u32 value)
{
@@ -186,9 +184,6 @@ void bcma_core_chipcommon_early_init(struct bcma_drv_cc *cc)
if (cc->capabilities & BCMA_CC_CAP_PMU)
bcma_pmu_early_init(cc);
- if (IS_BUILTIN(CONFIG_BCM47XX) && bus->hosttype == BCMA_HOSTTYPE_SOC)
- bcma_chipco_serial_init(cc);
-
if (bus->hosttype == BCMA_HOSTTYPE_SOC)
bcma_core_chipcommon_flash_detect(cc);
@@ -378,9 +373,9 @@ u32 bcma_chipco_gpio_pulldown(struct bcma_drv_cc *cc, u32 mask, u32 value)
return res;
}
-static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
+#ifdef CONFIG_BCMA_DRIVER_MIPS
+void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
{
-#if IS_BUILTIN(CONFIG_BCM47XX)
unsigned int irq;
u32 baud_base;
u32 i;
@@ -422,5 +417,5 @@ static void bcma_chipco_serial_init(struct bcma_drv_cc *cc)
ports[i].baud_base = baud_base;
ports[i].reg_shift = 0;
}
-#endif /* CONFIG_BCM47XX */
}
+#endif /* CONFIG_BCMA_DRIVER_MIPS */
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 96f171328200..89af807cf29c 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -278,9 +278,12 @@ static void bcma_core_mips_nvram_init(struct bcma_drv_mips *mcore)
void bcma_core_mips_early_init(struct bcma_drv_mips *mcore)
{
+ struct bcma_bus *bus = mcore->core->bus;
+
if (mcore->early_setup_done)
return;
+ bcma_chipco_serial_init(&bus->drv_cc);
bcma_core_mips_nvram_init(mcore);
mcore->early_setup_done = true;
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index d5ba43a87a68..200828c60db9 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -153,6 +153,8 @@ struct cppi41_dd {
/* context for suspend/resume */
unsigned int dma_tdfdq;
+
+ bool is_suspended;
};
#define FIST_COMPLETION_QUEUE 93
@@ -257,6 +259,10 @@ static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
BUG_ON(desc_num >= ALLOC_DECS_NUM);
c = cdd->chan_busy[desc_num];
cdd->chan_busy[desc_num] = NULL;
+
+ /* Usecount for chan_busy[], paired with push_desc_queue() */
+ pm_runtime_put(cdd->ddev.dev);
+
return c;
}
@@ -317,12 +323,12 @@ static irqreturn_t cppi41_irq(int irq, void *data)
while (val) {
u32 desc, len;
- int error;
- error = pm_runtime_get(cdd->ddev.dev);
- if (error < 0)
- dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
- __func__, error);
+ /*
+ * This should never trigger, see the comments in
+ * push_desc_queue()
+ */
+ WARN_ON(cdd->is_suspended);
q_num = __fls(val);
val &= ~(1 << q_num);
@@ -343,9 +349,6 @@ static irqreturn_t cppi41_irq(int irq, void *data)
c->residue = pd_trans_len(c->desc->pd6) - len;
dma_cookie_complete(&c->txd);
dmaengine_desc_get_callback_invoke(&c->txd, NULL);
-
- pm_runtime_mark_last_busy(cdd->ddev.dev);
- pm_runtime_put_autosuspend(cdd->ddev.dev);
}
}
return IRQ_HANDLED;
@@ -447,6 +450,15 @@ static void push_desc_queue(struct cppi41_channel *c)
*/
__iowmb();
+ /*
+ * DMA transfers can take at least 200ms to complete with USB mass
+ * storage connected. To prevent autosuspend timeouts, we must use
+ * pm_runtime_get/put() when chan_busy[] is modified. This will get
+ * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
+ * outcome of the transfer.
+ */
+ pm_runtime_get(cdd->ddev.dev);
+
desc_phys = lower_32_bits(c->desc_phys);
desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
WARN_ON(cdd->chan_busy[desc_num]);
@@ -457,20 +469,26 @@ static void push_desc_queue(struct cppi41_channel *c)
cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
}
-static void pending_desc(struct cppi41_channel *c)
+/*
+ * Caller must hold cdd->lock to prevent push_desc_queue()
+ * getting called out of order. We have both cppi41_dma_issue_pending()
+ * and cppi41_runtime_resume() call this function.
+ */
+static void cppi41_run_queue(struct cppi41_dd *cdd)
{
- struct cppi41_dd *cdd = c->cdd;
- unsigned long flags;
+ struct cppi41_channel *c, *_c;
- spin_lock_irqsave(&cdd->lock, flags);
- list_add_tail(&c->node, &cdd->pending);
- spin_unlock_irqrestore(&cdd->lock, flags);
+ list_for_each_entry_safe(c, _c, &cdd->pending, node) {
+ push_desc_queue(c);
+ list_del(&c->node);
+ }
}
static void cppi41_dma_issue_pending(struct dma_chan *chan)
{
struct cppi41_channel *c = to_cpp41_chan(chan);
struct cppi41_dd *cdd = c->cdd;
+ unsigned long flags;
int error;
error = pm_runtime_get(cdd->ddev.dev);
@@ -482,10 +500,11 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan)
return;
}
- if (likely(pm_runtime_active(cdd->ddev.dev)))
- push_desc_queue(c);
- else
- pending_desc(c);
+ spin_lock_irqsave(&cdd->lock, flags);
+ list_add_tail(&c->node, &cdd->pending);
+ if (!cdd->is_suspended)
+ cppi41_run_queue(cdd);
+ spin_unlock_irqrestore(&cdd->lock, flags);
pm_runtime_mark_last_busy(cdd->ddev.dev);
pm_runtime_put_autosuspend(cdd->ddev.dev);
@@ -705,6 +724,9 @@ static int cppi41_stop_chan(struct dma_chan *chan)
WARN_ON(!cdd->chan_busy[desc_num]);
cdd->chan_busy[desc_num] = NULL;
+ /* Usecount for chan_busy[], paired with push_desc_queue() */
+ pm_runtime_put(cdd->ddev.dev);
+
return 0;
}
@@ -1150,8 +1172,12 @@ static int __maybe_unused cppi41_resume(struct device *dev)
static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
+ unsigned long flags;
+ spin_lock_irqsave(&cdd->lock, flags);
+ cdd->is_suspended = true;
WARN_ON(!list_empty(&cdd->pending));
+ spin_unlock_irqrestore(&cdd->lock, flags);
return 0;
}
@@ -1159,14 +1185,11 @@ static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
static int __maybe_unused cppi41_runtime_resume(struct device *dev)
{
struct cppi41_dd *cdd = dev_get_drvdata(dev);
- struct cppi41_channel *c, *_c;
unsigned long flags;
spin_lock_irqsave(&cdd->lock, flags);
- list_for_each_entry_safe(c, _c, &cdd->pending, node) {
- push_desc_queue(c);
- list_del(&c->node);
- }
+ cdd->is_suspended = false;
+ cppi41_run_queue(cdd);
spin_unlock_irqrestore(&cdd->lock, flags);
return 0;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 740bbb942594..f37f4978dabb 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -1699,7 +1699,6 @@ static bool _chan_ns(const struct pl330_dmac *pl330, int i)
static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
{
struct pl330_thread *thrd = NULL;
- unsigned long flags;
int chans, i;
if (pl330->state == DYING)
@@ -1707,8 +1706,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
chans = pl330->pcfg.num_chan;
- spin_lock_irqsave(&pl330->lock, flags);
-
for (i = 0; i < chans; i++) {
thrd = &pl330->channels[i];
if ((thrd->free) && (!_manager_ns(thrd) ||
@@ -1726,8 +1723,6 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
thrd = NULL;
}
- spin_unlock_irqrestore(&pl330->lock, flags);
-
return thrd;
}
@@ -1745,7 +1740,6 @@ static inline void _free_event(struct pl330_thread *thrd, int ev)
static void pl330_release_channel(struct pl330_thread *thrd)
{
struct pl330_dmac *pl330;
- unsigned long flags;
if (!thrd || thrd->free)
return;
@@ -1757,10 +1751,8 @@ static void pl330_release_channel(struct pl330_thread *thrd)
pl330 = thrd->dmac;
- spin_lock_irqsave(&pl330->lock, flags);
_free_event(thrd, thrd->ev);
thrd->free = true;
- spin_unlock_irqrestore(&pl330->lock, flags);
}
/* Initialize the structure for PL330 configuration, that can be used
@@ -1867,9 +1859,10 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330)
* Alloc MicroCode buffer for 'chans' Channel threads.
* A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
*/
- pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev,
+ pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
chans * pl330->mcbufsz,
- &pl330->mcode_bus, GFP_KERNEL);
+ &pl330->mcode_bus, GFP_KERNEL,
+ DMA_ATTR_PRIVILEGED);
if (!pl330->mcode_cpu) {
dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
__func__, __LINE__);
@@ -2122,20 +2115,20 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan)
struct pl330_dmac *pl330 = pch->dmac;
unsigned long flags;
- spin_lock_irqsave(&pch->lock, flags);
+ spin_lock_irqsave(&pl330->lock, flags);
dma_cookie_init(chan);
pch->cyclic = false;
pch->thread = pl330_request_channel(pl330);
if (!pch->thread) {
- spin_unlock_irqrestore(&pch->lock, flags);
+ spin_unlock_irqrestore(&pl330->lock, flags);
return -ENOMEM;
}
tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
- spin_unlock_irqrestore(&pch->lock, flags);
+ spin_unlock_irqrestore(&pl330->lock, flags);
return 1;
}
@@ -2238,12 +2231,13 @@ static int pl330_pause(struct dma_chan *chan)
static void pl330_free_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
+ struct pl330_dmac *pl330 = pch->dmac;
unsigned long flags;
tasklet_kill(&pch->task);
pm_runtime_get_sync(pch->dmac->ddma.dev);
- spin_lock_irqsave(&pch->lock, flags);
+ spin_lock_irqsave(&pl330->lock, flags);
pl330_release_channel(pch->thread);
pch->thread = NULL;
@@ -2251,7 +2245,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
if (pch->cyclic)
list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
- spin_unlock_irqrestore(&pch->lock, flags);
+ spin_unlock_irqrestore(&pl330->lock, flags);
pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
}
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index 921dfa047202..260c4b4b492e 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -187,6 +187,7 @@ static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
struct exit_boot_struct {
efi_memory_desc_t *runtime_map;
int *runtime_entry_count;
+ void *new_fdt_addr;
};
static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
@@ -202,7 +203,7 @@ static efi_status_t exit_boot_func(efi_system_table_t *sys_table_arg,
efi_get_virtmap(*map->map, *map->map_size, *map->desc_size,
p->runtime_map, p->runtime_entry_count);
- return EFI_SUCCESS;
+ return update_fdt_memmap(p->new_fdt_addr, map);
}
/*
@@ -300,22 +301,13 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
priv.runtime_map = runtime_map;
priv.runtime_entry_count = &runtime_entry_count;
+ priv.new_fdt_addr = (void *)*new_fdt_addr;
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
exit_boot_func);
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
- status = update_fdt_memmap((void *)*new_fdt_addr, &map);
- if (status != EFI_SUCCESS) {
- /*
- * The kernel won't get far without the memory map, but
- * may still be able to print something meaningful so
- * return success here.
- */
- return EFI_SUCCESS;
- }
-
/* Install the new virtual address map */
svam = sys_table->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index e2b0b1646f99..0635829b18cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -254,6 +254,9 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
}
WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0);
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_set_vga_render_state(adev, false);
+
gmc_v6_0_mc_stop(adev, &save);
if (gmc_v6_0_wait_for_idle((void *)adev)) {
@@ -283,7 +286,6 @@ static void gmc_v6_0_mc_program(struct amdgpu_device *adev)
dev_warn(adev->dev, "Wait for MC idle timedout !\n");
}
gmc_v6_0_mc_resume(adev, &save);
- amdgpu_display_set_vga_render_state(adev, false);
}
static int gmc_v6_0_mc_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 50f5cf7b69d1..fdfb1ec17e66 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -2032,13 +2032,16 @@ static void complete_crtc_signaling(struct drm_device *dev,
}
for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ struct drm_pending_vblank_event *event = crtc_state->event;
/*
- * TEST_ONLY and PAGE_FLIP_EVENT are mutually
- * exclusive, if they weren't, this code should be
- * called on success for TEST_ONLY too.
+ * Free the allocated event. drm_atomic_helper_setup_commit
+ * can allocate an event too, so only free it if it's ours
+ * to prevent a double free in drm_atomic_state_clear.
*/
- if (crtc_state->event)
- drm_event_cancel_free(dev, &crtc_state->event->base);
+ if (event && (event->base.fence || event->base.file_priv)) {
+ drm_event_cancel_free(dev, &event->base);
+ crtc_state->event = NULL;
+ }
}
if (!fence_state)
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 34f757bcabae..4594477dee00 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1666,9 +1666,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
funcs = plane->helper_private;
- if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
- continue;
-
if (funcs->prepare_fb) {
ret = funcs->prepare_fb(plane, plane_state);
if (ret)
@@ -1685,9 +1682,6 @@ fail:
if (j >= i)
continue;
- if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc))
- continue;
-
funcs = plane->helper_private;
if (funcs->cleanup_fb)
@@ -1954,9 +1948,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
for_each_plane_in_state(old_state, plane, plane_state, i) {
const struct drm_plane_helper_funcs *funcs;
- if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc))
- continue;
-
funcs = plane->helper_private;
if (funcs->cleanup_fb)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 5a4526289392..7a7019ac9388 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -225,6 +225,7 @@ int drm_connector_init(struct drm_device *dev,
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
+ mutex_init(&connector->mutex);
connector->edid_blob_ptr = NULL;
connector->status = connector_status_unknown;
@@ -359,6 +360,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
connector->funcs->atomic_destroy_state(connector,
connector->state);
+ mutex_destroy(&connector->mutex);
+
memset(connector, 0, sizeof(*connector));
}
EXPORT_SYMBOL(drm_connector_cleanup);
@@ -374,14 +377,18 @@ EXPORT_SYMBOL(drm_connector_cleanup);
*/
int drm_connector_register(struct drm_connector *connector)
{
- int ret;
+ int ret = 0;
- if (connector->registered)
+ if (!connector->dev->registered)
return 0;
+ mutex_lock(&connector->mutex);
+ if (connector->registered)
+ goto unlock;
+
ret = drm_sysfs_connector_add(connector);
if (ret)
- return ret;
+ goto unlock;
ret = drm_debugfs_connector_add(connector);
if (ret) {
@@ -397,12 +404,14 @@ int drm_connector_register(struct drm_connector *connector)
drm_mode_object_register(connector->dev, &connector->base);
connector->registered = true;
- return 0;
+ goto unlock;
err_debugfs:
drm_debugfs_connector_remove(connector);
err_sysfs:
drm_sysfs_connector_remove(connector);
+unlock:
+ mutex_unlock(&connector->mutex);
return ret;
}
EXPORT_SYMBOL(drm_connector_register);
@@ -415,8 +424,11 @@ EXPORT_SYMBOL(drm_connector_register);
*/
void drm_connector_unregister(struct drm_connector *connector)
{
- if (!connector->registered)
+ mutex_lock(&connector->mutex);
+ if (!connector->registered) {
+ mutex_unlock(&connector->mutex);
return;
+ }
if (connector->funcs->early_unregister)
connector->funcs->early_unregister(connector);
@@ -425,6 +437,7 @@ void drm_connector_unregister(struct drm_connector *connector)
drm_debugfs_connector_remove(connector);
connector->registered = false;
+ mutex_unlock(&connector->mutex);
}
EXPORT_SYMBOL(drm_connector_unregister);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index a525751b4559..6594b4088f11 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -745,6 +745,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_minors;
+ dev->registered = true;
+
if (dev->driver->load) {
ret = dev->driver->load(dev, flags);
if (ret)
@@ -785,6 +787,8 @@ void drm_dev_unregister(struct drm_device *dev)
drm_lastclose(dev);
+ dev->registered = false;
+
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_modeset_unregister_all(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 69bc3b0c4390..8493e19b563a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1012,6 +1012,8 @@ struct intel_fbc {
struct work_struct underrun_work;
struct intel_fbc_state_cache {
+ struct i915_vma *vma;
+
struct {
unsigned int mode_flags;
uint32_t hsw_bdw_pixel_rate;
@@ -1025,15 +1027,14 @@ struct intel_fbc {
} plane;
struct {
- u64 ilk_ggtt_offset;
uint32_t pixel_format;
unsigned int stride;
- int fence_reg;
- unsigned int tiling_mode;
} fb;
} state_cache;
struct intel_fbc_reg_params {
+ struct i915_vma *vma;
+
struct {
enum pipe pipe;
enum plane plane;
@@ -1041,10 +1042,8 @@ struct intel_fbc {
} crtc;
struct {
- u64 ggtt_offset;
uint32_t pixel_format;
unsigned int stride;
- int fence_reg;
} fb;
int cfb_size;
@@ -3168,13 +3167,6 @@ i915_gem_object_to_ggtt(struct drm_i915_gem_object *obj,
return i915_gem_obj_to_vma(obj, &to_i915(obj->base.dev)->ggtt.base, view);
}
-static inline unsigned long
-i915_gem_object_ggtt_offset(struct drm_i915_gem_object *o,
- const struct i915_ggtt_view *view)
-{
- return i915_ggtt_offset(i915_gem_object_to_ggtt(o, view));
-}
-
/* i915_gem_fence_reg.c */
int __must_check i915_vma_get_fence(struct i915_vma *vma);
int __must_check i915_vma_put_fence(struct i915_vma *vma);
diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c
index dbe9fb41ae53..8d3e515f27ba 100644
--- a/drivers/gpu/drm/i915/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/intel_atomic_plane.c
@@ -85,6 +85,8 @@ intel_plane_duplicate_state(struct drm_plane *plane)
__drm_atomic_helper_plane_duplicate_state(plane, state);
+ intel_state->vma = NULL;
+
return state;
}
@@ -100,6 +102,24 @@ void
intel_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ struct i915_vma *vma;
+
+ vma = fetch_and_zero(&to_intel_plane_state(state)->vma);
+
+ /*
+ * FIXME: Normally intel_cleanup_plane_fb handles destruction of vma.
+ * We currently don't clear all planes during driver unload, so we have
+ * to be able to unpin vma here for now.
+ *
+ * Normally this can only happen during unload when kmscon is disabled
+ * and userspace doesn't attempt to set a framebuffer at all.
+ */
+ if (vma) {
+ mutex_lock(&plane->dev->struct_mutex);
+ intel_unpin_fb_vma(vma);
+ mutex_unlock(&plane->dev->struct_mutex);
+ }
+
drm_atomic_helper_plane_destroy_state(plane, state);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f0b9aa7a0483..f1e4a21d4664 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2235,27 +2235,22 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
i915_vma_pin_fence(vma);
}
+ i915_vma_get(vma);
err:
intel_runtime_pm_put(dev_priv);
return vma;
}
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
+void intel_unpin_fb_vma(struct i915_vma *vma)
{
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct i915_ggtt_view view;
- struct i915_vma *vma;
-
- WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
-
- intel_fill_fb_ggtt_view(&view, fb, rotation);
- vma = i915_gem_object_to_ggtt(obj, &view);
+ lockdep_assert_held(&vma->vm->dev->struct_mutex);
if (WARN_ON_ONCE(!vma))
return;
i915_vma_unpin_fence(vma);
i915_gem_object_unpin_from_display_plane(vma);
+ i915_vma_put(vma);
}
static int intel_fb_pitch(const struct drm_framebuffer *fb, int plane,
@@ -2750,7 +2745,6 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_crtc *c;
- struct intel_crtc *i;
struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
struct drm_plane_state *plane_state = primary->state;
@@ -2775,20 +2769,20 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
* an fb with another CRTC instead
*/
for_each_crtc(dev, c) {
- i = to_intel_crtc(c);
+ struct intel_plane_state *state;
if (c == &intel_crtc->base)
continue;
- if (!i->active)
+ if (!to_intel_crtc(c)->active)
continue;
- fb = c->primary->fb;
- if (!fb)
+ state = to_intel_plane_state(c->primary->state);
+ if (!state->vma)
continue;
- obj = intel_fb_obj(fb);
- if (i915_gem_object_ggtt_offset(obj, NULL) == plane_config->base) {
+ if (intel_plane_ggtt_offset(state) == plane_config->base) {
+ fb = c->primary->fb;
drm_framebuffer_reference(fb);
goto valid_fb;
}
@@ -2809,6 +2803,19 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
return;
valid_fb:
+ mutex_lock(&dev->struct_mutex);
+ intel_state->vma =
+ intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
+ mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(intel_state->vma)) {
+ DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
+ intel_crtc->pipe, PTR_ERR(intel_state->vma));
+
+ intel_state->vma = NULL;
+ drm_framebuffer_unreference(fb);
+ return;
+ }
+
plane_state->src_x = 0;
plane_state->src_y = 0;
plane_state->src_w = fb->width << 16;
@@ -3104,13 +3111,13 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_GEN(dev_priv) >= 4) {
I915_WRITE(DSPSURF(plane),
- intel_fb_gtt_offset(fb, rotation) +
+ intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
} else {
I915_WRITE(DSPADDR(plane),
- intel_fb_gtt_offset(fb, rotation) +
+ intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
}
POSTING_READ(reg);
@@ -3207,7 +3214,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane),
- intel_fb_gtt_offset(fb, rotation) +
+ intel_plane_ggtt_offset(plane_state) +
intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
@@ -3230,23 +3237,6 @@ u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
}
}
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb,
- unsigned int rotation)
-{
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct i915_ggtt_view view;
- struct i915_vma *vma;
-
- intel_fill_fb_ggtt_view(&view, fb, rotation);
-
- vma = i915_gem_object_to_ggtt(obj, &view);
- if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
- view.type))
- return -1;
-
- return i915_ggtt_offset(vma);
-}
-
static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
@@ -3441,7 +3431,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
}
I915_WRITE(PLANE_SURF(pipe, 0),
- intel_fb_gtt_offset(fb, rotation) + surf_addr);
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
POSTING_READ(PLANE_SURF(pipe, 0));
}
@@ -11536,7 +11526,7 @@ static void intel_unpin_work_fn(struct work_struct *__work)
flush_work(&work->mmio_work);
mutex_lock(&dev->struct_mutex);
- intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
+ intel_unpin_fb_vma(work->old_vma);
i915_gem_object_put(work->pending_flip_obj);
mutex_unlock(&dev->struct_mutex);
@@ -12246,8 +12236,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_pending;
}
- work->gtt_offset = intel_fb_gtt_offset(fb, primary->state->rotation);
- work->gtt_offset += intel_crtc->dspaddr_offset;
+ work->old_vma = to_intel_plane_state(primary->state)->vma;
+ to_intel_plane_state(primary->state)->vma = vma;
+
+ work->gtt_offset = i915_ggtt_offset(vma) + intel_crtc->dspaddr_offset;
work->rotation = crtc->primary->state->rotation;
/*
@@ -12301,7 +12293,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_request:
i915_add_request_no_flush(request);
cleanup_unpin:
- intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
+ to_intel_plane_state(primary->state)->vma = work->old_vma;
+ intel_unpin_fb_vma(vma);
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
unlock:
@@ -14794,6 +14787,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
DRM_DEBUG_KMS("failed to pin object\n");
return PTR_ERR(vma);
}
+
+ to_intel_plane_state(new_state)->vma = vma;
}
return 0;
@@ -14812,19 +14807,12 @@ void
intel_cleanup_plane_fb(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct intel_plane_state *old_intel_state;
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
- struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
-
- old_intel_state = to_intel_plane_state(old_state);
-
- if (!obj && !old_obj)
- return;
+ struct i915_vma *vma;
- if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
- !INTEL_INFO(dev_priv)->cursor_needs_physical))
- intel_unpin_fb_obj(old_state->fb, old_state->rotation);
+ /* Should only be called after a successful intel_prepare_plane_fb()! */
+ vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma);
+ if (vma)
+ intel_unpin_fb_vma(vma);
}
int
@@ -15166,7 +15154,7 @@ intel_update_cursor_plane(struct drm_plane *plane,
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev_priv)->cursor_needs_physical)
- addr = i915_gem_object_ggtt_offset(obj, NULL);
+ addr = intel_plane_ggtt_offset(state);
else
addr = obj->phys_handle->busaddr;
@@ -17066,41 +17054,12 @@ void intel_display_resume(struct drm_device *dev)
void intel_modeset_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_crtc *c;
- struct drm_i915_gem_object *obj;
intel_init_gt_powersave(dev_priv);
intel_modeset_init_hw(dev);
intel_setup_overlay(dev_priv);
-
- /*
- * Make sure any fbs we allocated at startup are properly
- * pinned & fenced. When we do the allocation it's too early
- * for this.
- */
- for_each_crtc(dev, c) {
- struct i915_vma *vma;
-
- obj = intel_fb_obj(c->primary->fb);
- if (obj == NULL)
- continue;
-
- mutex_lock(&dev->struct_mutex);
- vma = intel_pin_and_fence_fb_obj(c->primary->fb,
- c->primary->state->rotation);
- mutex_unlock(&dev->struct_mutex);
- if (IS_ERR(vma)) {
- DRM_ERROR("failed to pin boot fb on pipe %d\n",
- to_intel_crtc(c)->pipe);
- drm_framebuffer_unreference(c->primary->fb);
- c->primary->fb = NULL;
- c->primary->crtc = c->primary->state->crtc = NULL;
- update_state_fb(c->primary);
- c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
- }
- }
}
int intel_connector_register(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd72ae171eeb..03a2112004f9 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -377,6 +377,7 @@ struct intel_atomic_state {
struct intel_plane_state {
struct drm_plane_state base;
struct drm_rect clip;
+ struct i915_vma *vma;
struct {
u32 offset;
@@ -1046,6 +1047,7 @@ struct intel_flip_work {
struct work_struct mmio_work;
struct drm_crtc *crtc;
+ struct i915_vma *old_vma;
struct drm_framebuffer *old_fb;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
@@ -1273,7 +1275,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
struct drm_modeset_acquire_ctx *ctx);
struct i915_vma *
intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
-void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
+void intel_unpin_fb_vma(struct i915_vma *vma);
struct drm_framebuffer *
__intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd2 *mode_cmd,
@@ -1362,7 +1364,10 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state);
int skl_max_scale(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state);
-u32 intel_fb_gtt_offset(struct drm_framebuffer *fb, unsigned int rotation);
+static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state)
+{
+ return i915_ggtt_offset(state->vma);
+}
u32 skl_plane_ctl_format(uint32_t pixel_format);
u32 skl_plane_ctl_tiling(uint64_t fb_modifier);
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 62f215b12eb5..f3a1d6a5cabe 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -173,7 +173,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_I945GM(dev_priv))
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
- fbc_ctl |= params->fb.fence_reg;
+ fbc_ctl |= params->vma->fence->id;
I915_WRITE(FBC_CONTROL, fbc_ctl);
}
@@ -193,8 +193,8 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
else
dpfc_ctl |= DPFC_CTL_LIMIT_1X;
- if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
- dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fb.fence_reg;
+ if (params->vma->fence) {
+ dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id;
I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
} else {
I915_WRITE(DPFC_FENCE_YOFF, 0);
@@ -251,13 +251,14 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+ if (params->vma->fence) {
dpfc_ctl |= DPFC_CTL_FENCE_EN;
if (IS_GEN5(dev_priv))
- dpfc_ctl |= params->fb.fence_reg;
+ dpfc_ctl |= params->vma->fence->id;
if (IS_GEN6(dev_priv)) {
I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+ SNB_CPU_FENCE_ENABLE |
+ params->vma->fence->id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET,
params->crtc.fence_y_offset);
}
@@ -269,7 +270,8 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
}
I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
- I915_WRITE(ILK_FBC_RT_BASE, params->fb.ggtt_offset | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE,
+ i915_ggtt_offset(params->vma) | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
@@ -319,10 +321,11 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
break;
}
- if (params->fb.fence_reg != I915_FENCE_REG_NONE) {
+ if (params->vma->fence) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE | params->fb.fence_reg);
+ SNB_CPU_FENCE_ENABLE |
+ params->vma->fence->id);
I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
} else {
I915_WRITE(SNB_DPFC_CTL_SA,0);
@@ -727,14 +730,6 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
return effective_w <= max_w && effective_h <= max_h;
}
-/* XXX replace me when we have VMA tracking for intel_plane_state */
-static int get_fence_id(struct drm_framebuffer *fb)
-{
- struct i915_vma *vma = i915_gem_object_to_ggtt(intel_fb_obj(fb), NULL);
-
- return vma && vma->fence ? vma->fence->id : I915_FENCE_REG_NONE;
-}
-
static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
@@ -743,7 +738,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
struct drm_framebuffer *fb = plane_state->base.fb;
- struct drm_i915_gem_object *obj;
+
+ cache->vma = NULL;
cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
@@ -758,16 +754,10 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
if (!cache->plane.visible)
return;
- obj = intel_fb_obj(fb);
-
- /* FIXME: We lack the proper locking here, so only run this on the
- * platforms that need. */
- if (IS_GEN(dev_priv, 5, 6))
- cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL);
cache->fb.pixel_format = fb->pixel_format;
cache->fb.stride = fb->pitches[0];
- cache->fb.fence_reg = get_fence_id(fb);
- cache->fb.tiling_mode = i915_gem_object_get_tiling(obj);
+
+ cache->vma = plane_state->vma;
}
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
@@ -784,7 +774,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- if (!cache->plane.visible) {
+ if (!cache->vma) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
}
@@ -807,8 +797,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
* so have no fence associated with it) due to aperture constaints
* at the time of pinning.
*/
- if (cache->fb.tiling_mode != I915_TILING_X ||
- cache->fb.fence_reg == I915_FENCE_REG_NONE) {
+ if (!cache->vma->fence) {
fbc->no_fbc_reason = "framebuffer not tiled or fenced";
return false;
}
@@ -888,17 +877,16 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
* zero. */
memset(params, 0, sizeof(*params));
+ params->vma = cache->vma;
+
params->crtc.pipe = crtc->pipe;
params->crtc.plane = crtc->plane;
params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc);
params->fb.pixel_format = cache->fb.pixel_format;
params->fb.stride = cache->fb.stride;
- params->fb.fence_reg = cache->fb.fence_reg;
params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
- params->fb.ggtt_offset = cache->fb.ilk_ggtt_offset;
}
static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 8cf2d80f2254..f4a8c4fc57c4 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -284,7 +284,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
out_destroy_fbi:
drm_fb_helper_release_fbi(helper);
out_unpin:
- intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+ intel_unpin_fb_vma(vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -549,7 +549,7 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
if (ifbdev->fb) {
mutex_lock(&ifbdev->helper.dev->struct_mutex);
- intel_unpin_fb_obj(&ifbdev->fb->base, DRM_ROTATE_0);
+ intel_unpin_fb_vma(ifbdev->vma);
mutex_unlock(&ifbdev->helper.dev->struct_mutex);
drm_framebuffer_remove(&ifbdev->fb->base);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 8f131a08d440..242a73e66d82 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -273,7 +273,7 @@ skl_update_plane(struct drm_plane *drm_plane,
I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
I915_WRITE(PLANE_SURF(pipe, plane),
- intel_fb_gtt_offset(fb, rotation) + surf_addr);
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
POSTING_READ(PLANE_SURF(pipe, plane));
}
@@ -458,7 +458,7 @@ vlv_update_plane(struct drm_plane *dplane,
I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
I915_WRITE(SPCNTR(pipe, plane), sprctl);
I915_WRITE(SPSURF(pipe, plane),
- intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
POSTING_READ(SPSURF(pipe, plane));
}
@@ -594,7 +594,7 @@ ivb_update_plane(struct drm_plane *plane,
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_WRITE(SPRSURF(pipe),
- intel_fb_gtt_offset(fb, rotation) + sprsurf_offset);
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
}
@@ -721,7 +721,7 @@ ilk_update_plane(struct drm_plane *plane,
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_WRITE(DVSSURF(pipe),
- intel_fb_gtt_offset(fb, rotation) + dvssurf_offset);
+ intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c
index 74856a8b8f35..e64f52464ecf 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/hw.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c
@@ -222,6 +222,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
uint32_t mpllP;
pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
+ mpllP = (mpllP >> 8) & 0xf;
if (!mpllP)
mpllP = 4;
@@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
uint32_t clock;
pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
- return clock;
+ return clock / 1000;
}
ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index ccdce1b4eec4..d5e58a38f160 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -99,6 +99,7 @@ struct nv84_fence_priv {
struct nouveau_bo *bo;
struct nouveau_bo *bo_gart;
u32 *suspend;
+ struct mutex mutex;
};
int nv84_fence_context_new(struct nouveau_channel *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_led.h b/drivers/gpu/drm/nouveau/nouveau_led.h
index 187ecdb82002..21a5775028cc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_led.h
+++ b/drivers/gpu/drm/nouveau/nouveau_led.h
@@ -42,7 +42,7 @@ nouveau_led(struct drm_device *dev)
}
/* nouveau_led.c */
-#if IS_ENABLED(CONFIG_LEDS_CLASS)
+#if IS_REACHABLE(CONFIG_LEDS_CLASS)
int nouveau_led_init(struct drm_device *dev);
void nouveau_led_suspend(struct drm_device *dev);
void nouveau_led_resume(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 08f9c6fa0f7f..1fba38622744 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -313,7 +313,8 @@ usif_ioctl(struct drm_file *filp, void __user *user, u32 argc)
if (!(ret = nvif_unpack(-ENOSYS, &data, &size, argv->v0, 0, 0, true))) {
/* block access to objects not created via this interface */
owner = argv->v0.owner;
- if (argv->v0.object == 0ULL)
+ if (argv->v0.object == 0ULL &&
+ argv->v0.type != NVIF_IOCTL_V0_DEL)
argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
else
argv->v0.owner = NVDRM_OBJECT_USIF;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 2c2c64507661..32097fd615fd 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -4052,6 +4052,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
}
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc->state->event)
+ drm_crtc_vblank_get(crtc);
+ }
+
/* Update plane(s). */
for_each_plane_in_state(state, plane, plane_state, i) {
struct nv50_wndw_atom *asyw = nv50_wndw_atom(plane->state);
@@ -4101,6 +4106,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
drm_crtc_send_vblank_event(crtc, crtc->state->event);
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
crtc->state->event = NULL;
+ drm_crtc_vblank_put(crtc);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 52b87ae83e7b..f0b322bec7df 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -107,8 +107,10 @@ nv84_fence_context_del(struct nouveau_channel *chan)
struct nv84_fence_chan *fctx = chan->fence;
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+ mutex_lock(&priv->mutex);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
nouveau_bo_vma_del(priv->bo, &fctx->vma);
+ mutex_unlock(&priv->mutex);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
@@ -134,11 +136,13 @@ nv84_fence_context_new(struct nouveau_channel *chan)
fctx->base.sync32 = nv84_fence_sync32;
fctx->base.sequence = nv84_fence_read(chan);
+ mutex_lock(&priv->mutex);
ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
if (ret == 0) {
ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
&fctx->vma_gart);
}
+ mutex_unlock(&priv->mutex);
if (ret)
nv84_fence_context_del(chan);
@@ -212,6 +216,8 @@ nv84_fence_create(struct nouveau_drm *drm)
priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
+ mutex_init(&priv->mutex);
+
/* Use VRAM if there is any ; otherwise fallback to system memory */
domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
/*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
index 6f0436df0219..f8f2f16c22a2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/hdagt215.c
@@ -59,7 +59,7 @@ gt215_hda_eld(NV50_DISP_MTHD_V1)
);
}
for (i = 0; i < size; i++)
- nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[0]);
+ nvkm_wr32(device, 0x61c440 + soff, (i << 8) | args->v0.data[i]);
for (; i < 0x60; i++)
nvkm_wr32(device, 0x61c440 + soff, (i << 8));
nvkm_mask(device, 0x61c448 + soff, 0x80000003, 0x80000003);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index 567466f93cd5..0db8efbf1c2e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -433,8 +433,6 @@ nv50_disp_dptmds_war(struct nvkm_device *device)
case 0x94:
case 0x96:
case 0x98:
- case 0xaa:
- case 0xac:
return true;
default:
break;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e0c143b865f3..30bd4a6a9d46 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -97,9 +97,10 @@
* 2.46.0 - Add PFP_SYNC_ME support on evergreen
* 2.47.0 - Add UVD_NO_OP register support
* 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI
+ * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 48
+#define KMS_DRIVER_MINOR 49
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 0bcffd8a7bd3..96683f5b2b1b 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -220,8 +220,8 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
- args->vram_size = rdev->mc.real_vram_size;
- args->vram_visible = (u64)man->size << PAGE_SHIFT;
+ args->vram_size = (u64)man->size << PAGE_SHIFT;
+ args->vram_visible = rdev->mc.visible_vram_size;
args->vram_visible -= rdev->vram_pin_size;
args->gart_size = rdev->mc.gtt_size;
args->gart_size -= rdev->gart_pin_size;
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index f31a778b0851..b22d0f83f8e3 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -168,7 +168,7 @@ struct cp2112_device {
atomic_t xfer_avail;
struct gpio_chip gc;
u8 *in_out_buffer;
- spinlock_t lock;
+ struct mutex lock;
struct gpio_desc *desc[8];
bool gpio_poll;
@@ -186,10 +186,9 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&dev->lock, flags);
+ mutex_lock(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -213,8 +212,8 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
ret = 0;
exit:
- spin_unlock_irqrestore(&dev->lock, flags);
- return ret <= 0 ? ret : -EIO;
+ mutex_unlock(&dev->lock);
+ return ret < 0 ? ret : -EIO;
}
static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
@@ -222,10 +221,9 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&dev->lock, flags);
+ mutex_lock(&dev->lock);
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? 0xff : 0;
@@ -237,7 +235,7 @@ static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
- spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock);
}
static int cp2112_gpio_get_all(struct gpio_chip *chip)
@@ -245,10 +243,9 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&dev->lock, flags);
+ mutex_lock(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
@@ -262,7 +259,7 @@ static int cp2112_gpio_get_all(struct gpio_chip *chip)
ret = buf[1];
exit:
- spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock);
return ret;
}
@@ -284,10 +281,9 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
u8 *buf = dev->in_out_buffer;
- unsigned long flags;
int ret;
- spin_lock_irqsave(&dev->lock, flags);
+ mutex_lock(&dev->lock);
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
@@ -308,7 +304,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
goto fail;
}
- spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock);
/*
* Set gpio value when output direction is already set,
@@ -319,7 +315,7 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip,
return 0;
fail:
- spin_unlock_irqrestore(&dev->lock, flags);
+ mutex_unlock(&dev->lock);
return ret < 0 ? ret : -EIO;
}
@@ -1235,7 +1231,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!dev->in_out_buffer)
return -ENOMEM;
- spin_lock_init(&dev->lock);
+ mutex_init(&dev->lock);
ret = hid_parse(hdev);
if (ret) {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f46f2c5117fa..350accfee8e8 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -76,6 +76,9 @@
#define USB_VENDOR_ID_ALPS_JP 0x044E
#define HID_DEVICE_ID_ALPS_U1_DUAL 0x120B
+#define USB_VENDOR_ID_AMI 0x046b
+#define USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE 0xff10
+
#define USB_VENDOR_ID_ANTON 0x1130
#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c
index c5c5fbe9d605..52026dc94d5c 100644
--- a/drivers/hid/hid-lg.c
+++ b/drivers/hid/hid-lg.c
@@ -872,7 +872,7 @@ static const struct hid_device_id lg_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG),
.driver_data = LG_NOGET | LG_FF4 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2),
- .driver_data = LG_FF2 },
+ .driver_data = LG_NOGET | LG_FF2 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FLIGHT_SYSTEM_G940),
.driver_data = LG_FF3 },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR),
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index e9d6cc7cdfc5..30a2977e2645 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -57,6 +57,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_AKAI, USB_DEVICE_ID_AKAI_MPKMINI2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_AKAI_09E8, USB_DEVICE_ID_AKAI_09E8_MIDIMIX, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_AMI, USB_DEVICE_ID_AMI_VIRT_KEYBOARD_AND_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0884dc9554fd..672145b0d8f5 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -166,19 +166,21 @@ static int wacom_pl_irq(struct wacom_wac *wacom)
wacom->id[0] = STYLUS_DEVICE_ID;
}
- pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
- if (features->pressure_max > 255)
- pressure = (pressure << 1) | ((data[4] >> 6) & 1);
- pressure += (features->pressure_max + 1) / 2;
-
- input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
- input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
- input_report_abs(input, ABS_PRESSURE, pressure);
-
- input_report_key(input, BTN_TOUCH, data[4] & 0x08);
- input_report_key(input, BTN_STYLUS, data[4] & 0x10);
- /* Only allow the stylus2 button to be reported for the pen tool. */
- input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+ if (prox) {
+ pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1));
+ if (features->pressure_max > 255)
+ pressure = (pressure << 1) | ((data[4] >> 6) & 1);
+ pressure += (features->pressure_max + 1) / 2;
+
+ input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14));
+ input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14));
+ input_report_abs(input, ABS_PRESSURE, pressure);
+
+ input_report_key(input, BTN_TOUCH, data[4] & 0x08);
+ input_report_key(input, BTN_STYLUS, data[4] & 0x10);
+ /* Only allow the stylus2 button to be reported for the pen tool. */
+ input_report_key(input, BTN_STYLUS2, (wacom->tool[0] == BTN_TOOL_PEN) && (data[4] & 0x20));
+ }
if (!prox)
wacom->id[0] = 0;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index cd49cb17eb7f..308dbda700eb 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -383,6 +383,7 @@ int hv_ringbuffer_read(struct vmbus_channel *channel,
return ret;
}
+ init_cached_read_index(channel);
next_read_location = hv_get_next_read_location(inring_info);
next_read_location = hv_copyfrom_ringbuffer(inring_info, &desc,
sizeof(desc),
diff --git a/drivers/iio/adc/palmas_gpadc.c b/drivers/iio/adc/palmas_gpadc.c
index 2bbf0c521beb..7d61b566e148 100644
--- a/drivers/iio/adc/palmas_gpadc.c
+++ b/drivers/iio/adc/palmas_gpadc.c
@@ -775,7 +775,7 @@ static int palmas_adc_wakeup_reset(struct palmas_gpadc *adc)
static int palmas_gpadc_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct palmas_gpadc *adc = iio_priv(indio_dev);
int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
int ret;
@@ -798,7 +798,7 @@ static int palmas_gpadc_suspend(struct device *dev)
static int palmas_gpadc_resume(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct palmas_gpadc *adc = iio_priv(indio_dev);
int wakeup = adc->wakeup1_enable || adc->wakeup2_enable;
int ret;
diff --git a/drivers/iio/health/afe4403.c b/drivers/iio/health/afe4403.c
index 9a081465c42f..6bb23a49e81e 100644
--- a/drivers/iio/health/afe4403.c
+++ b/drivers/iio/health/afe4403.c
@@ -422,7 +422,7 @@ MODULE_DEVICE_TABLE(of, afe4403_of_match);
static int __maybe_unused afe4403_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
struct afe4403_data *afe = iio_priv(indio_dev);
int ret;
@@ -443,7 +443,7 @@ static int __maybe_unused afe4403_suspend(struct device *dev)
static int __maybe_unused afe4403_resume(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = spi_get_drvdata(to_spi_device(dev));
struct afe4403_data *afe = iio_priv(indio_dev);
int ret;
diff --git a/drivers/iio/health/afe4404.c b/drivers/iio/health/afe4404.c
index 45266404f7e3..964f5231a831 100644
--- a/drivers/iio/health/afe4404.c
+++ b/drivers/iio/health/afe4404.c
@@ -428,7 +428,7 @@ MODULE_DEVICE_TABLE(of, afe4404_of_match);
static int __maybe_unused afe4404_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct afe4404_data *afe = iio_priv(indio_dev);
int ret;
@@ -449,7 +449,7 @@ static int __maybe_unused afe4404_suspend(struct device *dev)
static int __maybe_unused afe4404_resume(struct device *dev)
{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
struct afe4404_data *afe = iio_priv(indio_dev);
int ret;
diff --git a/drivers/iio/health/max30100.c b/drivers/iio/health/max30100.c
index 90ab8a2d2846..183c14329d6e 100644
--- a/drivers/iio/health/max30100.c
+++ b/drivers/iio/health/max30100.c
@@ -238,7 +238,7 @@ static irqreturn_t max30100_interrupt_handler(int irq, void *private)
mutex_lock(&data->lock);
- while (cnt || (cnt = max30100_fifo_count(data) > 0)) {
+ while (cnt || (cnt = max30100_fifo_count(data)) > 0) {
ret = max30100_read_measurement(data);
if (ret)
break;
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 9c47bc98f3ac..2a22ad920333 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -71,7 +71,8 @@
* a) select an implementation using busy loop polling on those systems
* b) use the checksum to do some probabilistic decoding
*/
-#define DHT11_START_TRANSMISSION 18 /* ms */
+#define DHT11_START_TRANSMISSION_MIN 18000 /* us */
+#define DHT11_START_TRANSMISSION_MAX 20000 /* us */
#define DHT11_MIN_TIMERES 34000 /* ns */
#define DHT11_THRESHOLD 49000 /* ns */
#define DHT11_AMBIG_LOW 23000 /* ns */
@@ -228,7 +229,8 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
ret = gpio_direction_output(dht11->gpio, 0);
if (ret)
goto err;
- msleep(DHT11_START_TRANSMISSION);
+ usleep_range(DHT11_START_TRANSMISSION_MIN,
+ DHT11_START_TRANSMISSION_MAX);
ret = gpio_direction_input(dht11->gpio);
if (ret)
goto err;
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 11447ab1055c..bf5c36e229ba 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -901,7 +901,7 @@ void rmi_enable_irq(struct rmi_device *rmi_dev, bool clear_wake)
data->enabled = true;
if (clear_wake && device_may_wakeup(rmi_dev->xport->dev)) {
retval = disable_irq_wake(irq);
- if (!retval)
+ if (retval)
dev_warn(&rmi_dev->dev,
"Failed to disable irq for wake: %d\n",
retval);
@@ -936,7 +936,7 @@ void rmi_disable_irq(struct rmi_device *rmi_dev, bool enable_wake)
disable_irq(irq);
if (enable_wake && device_may_wakeup(rmi_dev->xport->dev)) {
retval = enable_irq_wake(irq);
- if (!retval)
+ if (retval)
dev_warn(&rmi_dev->dev,
"Failed to enable irq for wake: %d\n",
retval);
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index 83cf11312fd9..c9d1c91e1887 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -682,7 +682,7 @@ static int wm97xx_probe(struct device *dev)
}
platform_set_drvdata(wm->battery_dev, wm);
wm->battery_dev->dev.parent = dev;
- wm->battery_dev->dev.platform_data = pdata->batt_pdata;
+ wm->battery_dev->dev.platform_data = pdata ? pdata->batt_pdata : NULL;
ret = platform_device_add(wm->battery_dev);
if (ret < 0)
goto batt_reg_err;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 8ee54d71c7eb..37e204f3d9be 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -352,9 +352,6 @@ config MTK_IOMMU_V1
select IOMMU_API
select MEMORY
select MTK_SMI
- select COMMON_CLK_MT2701_MMSYS
- select COMMON_CLK_MT2701_IMGSYS
- select COMMON_CLK_MT2701_VDECSYS
help
Support for the M4U on certain Mediatek SoCs. M4U generation 1 HW is
Multimedia Memory Managememt Unit. This option enables remapping of
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 3ef0f42984f2..1b5b8c5361c5 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -112,7 +112,7 @@ static struct timer_list queue_timer;
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
*/
-static const struct iommu_ops amd_iommu_ops;
+const struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
@@ -445,6 +445,7 @@ static void init_iommu_group(struct device *dev)
static int iommu_init_device(struct device *dev)
{
struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
int devid;
if (dev->archdata.iommu)
@@ -454,6 +455,8 @@ static int iommu_init_device(struct device *dev)
if (devid < 0)
return devid;
+ iommu = amd_iommu_rlookup_table[devid];
+
dev_data = find_dev_data(devid);
if (!dev_data)
return -ENOMEM;
@@ -469,8 +472,7 @@ static int iommu_init_device(struct device *dev)
dev->archdata.iommu = dev_data;
- iommu_device_link(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
- dev);
+ iommu_device_link(&iommu->iommu, dev);
return 0;
}
@@ -495,13 +497,16 @@ static void iommu_ignore_device(struct device *dev)
static void iommu_uninit_device(struct device *dev)
{
- int devid;
struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
+ int devid;
devid = get_device_id(dev);
if (devid < 0)
return;
+ iommu = amd_iommu_rlookup_table[devid];
+
dev_data = search_dev_data(devid);
if (!dev_data)
return;
@@ -509,8 +514,7 @@ static void iommu_uninit_device(struct device *dev)
if (dev_data->domain)
detach_device(dev);
- iommu_device_unlink(amd_iommu_rlookup_table[dev_data->devid]->iommu_dev,
- dev);
+ iommu_device_unlink(&iommu->iommu, dev);
iommu_group_remove_device(dev);
@@ -3161,9 +3165,10 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return false;
}
-static void amd_iommu_get_dm_regions(struct device *dev,
- struct list_head *head)
+static void amd_iommu_get_resv_regions(struct device *dev,
+ struct list_head *head)
{
+ struct iommu_resv_region *region;
struct unity_map_entry *entry;
int devid;
@@ -3172,41 +3177,56 @@ static void amd_iommu_get_dm_regions(struct device *dev,
return;
list_for_each_entry(entry, &amd_iommu_unity_map, list) {
- struct iommu_dm_region *region;
+ size_t length;
+ int prot = 0;
if (devid < entry->devid_start || devid > entry->devid_end)
continue;
- region = kzalloc(sizeof(*region), GFP_KERNEL);
+ length = entry->address_end - entry->address_start;
+ if (entry->prot & IOMMU_PROT_IR)
+ prot |= IOMMU_READ;
+ if (entry->prot & IOMMU_PROT_IW)
+ prot |= IOMMU_WRITE;
+
+ region = iommu_alloc_resv_region(entry->address_start,
+ length, prot,
+ IOMMU_RESV_DIRECT);
if (!region) {
pr_err("Out of memory allocating dm-regions for %s\n",
dev_name(dev));
return;
}
-
- region->start = entry->address_start;
- region->length = entry->address_end - entry->address_start;
- if (entry->prot & IOMMU_PROT_IR)
- region->prot |= IOMMU_READ;
- if (entry->prot & IOMMU_PROT_IW)
- region->prot |= IOMMU_WRITE;
-
list_add_tail(&region->list, head);
}
+
+ region = iommu_alloc_resv_region(MSI_RANGE_START,
+ MSI_RANGE_END - MSI_RANGE_START + 1,
+ 0, IOMMU_RESV_RESERVED);
+ if (!region)
+ return;
+ list_add_tail(&region->list, head);
+
+ region = iommu_alloc_resv_region(HT_RANGE_START,
+ HT_RANGE_END - HT_RANGE_START + 1,
+ 0, IOMMU_RESV_RESERVED);
+ if (!region)
+ return;
+ list_add_tail(&region->list, head);
}
-static void amd_iommu_put_dm_regions(struct device *dev,
+static void amd_iommu_put_resv_regions(struct device *dev,
struct list_head *head)
{
- struct iommu_dm_region *entry, *next;
+ struct iommu_resv_region *entry, *next;
list_for_each_entry_safe(entry, next, head, list)
kfree(entry);
}
-static void amd_iommu_apply_dm_region(struct device *dev,
+static void amd_iommu_apply_resv_region(struct device *dev,
struct iommu_domain *domain,
- struct iommu_dm_region *region)
+ struct iommu_resv_region *region)
{
struct dma_ops_domain *dma_dom = to_dma_ops_domain(to_pdomain(domain));
unsigned long start, end;
@@ -3217,7 +3237,7 @@ static void amd_iommu_apply_dm_region(struct device *dev,
WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
}
-static const struct iommu_ops amd_iommu_ops = {
+const struct iommu_ops amd_iommu_ops = {
.capable = amd_iommu_capable,
.domain_alloc = amd_iommu_domain_alloc,
.domain_free = amd_iommu_domain_free,
@@ -3230,9 +3250,9 @@ static const struct iommu_ops amd_iommu_ops = {
.add_device = amd_iommu_add_device,
.remove_device = amd_iommu_remove_device,
.device_group = amd_iommu_device_group,
- .get_dm_regions = amd_iommu_get_dm_regions,
- .put_dm_regions = amd_iommu_put_dm_regions,
- .apply_dm_region = amd_iommu_apply_dm_region,
+ .get_resv_regions = amd_iommu_get_resv_regions,
+ .put_resv_regions = amd_iommu_put_resv_regions,
+ .apply_resv_region = amd_iommu_apply_resv_region,
.pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 6799cf9713f7..04cdac7ab3e3 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -94,6 +94,8 @@
* out of it.
*/
+extern const struct iommu_ops amd_iommu_ops;
+
/*
* structure describing one IOMMU in the ACPI table. Typically followed by one
* or more ivhd_entrys.
@@ -1635,9 +1637,10 @@ static int iommu_init_pci(struct amd_iommu *iommu)
amd_iommu_erratum_746_workaround(iommu);
amd_iommu_ats_write_check_workaround(iommu);
- iommu->iommu_dev = iommu_device_create(&iommu->dev->dev, iommu,
- amd_iommu_groups, "ivhd%d",
- iommu->index);
+ iommu_device_sysfs_add(&iommu->iommu, &iommu->dev->dev,
+ amd_iommu_groups, "ivhd%d", iommu->index);
+ iommu_device_set_ops(&iommu->iommu, &amd_iommu_ops);
+ iommu_device_register(&iommu->iommu);
return pci_enable_device(iommu->dev);
}
@@ -2230,7 +2233,7 @@ static int __init early_amd_iommu_init(void)
*/
ret = check_ivrs_checksum(ivrs_base);
if (ret)
- return ret;
+ goto out;
amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 0d91785ebdc3..af00f381a7b1 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -535,8 +535,8 @@ struct amd_iommu {
/* if one, we need to send a completion wait command */
bool need_sync;
- /* IOMMU sysfs device */
- struct device *iommu_dev;
+ /* Handle for IOMMU core code */
+ struct iommu_device iommu;
/*
* We can't rely on the BIOS to restore all values on reinit, so we
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 4d6ec444a9d6..5806a6acc94e 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -269,9 +269,6 @@
#define STRTAB_STE_1_SHCFG_INCOMING 1UL
#define STRTAB_STE_1_SHCFG_SHIFT 44
-#define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL
-#define STRTAB_STE_1_PRIVCFG_SHIFT 48
-
#define STRTAB_STE_2_S2VMID_SHIFT 0
#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
#define STRTAB_STE_2_VTCR_SHIFT 32
@@ -412,6 +409,9 @@
/* High-level queue structures */
#define ARM_SMMU_POLL_TIMEOUT_US 100
+#define MSI_IOVA_BASE 0x8000000
+#define MSI_IOVA_LENGTH 0x100000
+
static bool disable_bypass;
module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
MODULE_PARM_DESC(disable_bypass,
@@ -616,6 +616,9 @@ struct arm_smmu_device {
unsigned int sid_bits;
struct arm_smmu_strtab_cfg strtab_cfg;
+
+ /* IOMMU core code handle */
+ struct iommu_device iommu;
};
/* SMMU private data for each master */
@@ -1042,13 +1045,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
}
}
- /* Nuke the existing Config, as we're going to rewrite it */
- val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
-
- if (ste->valid)
- val |= STRTAB_STE_0_V;
- else
- val &= ~STRTAB_STE_0_V;
+ /* Nuke the existing STE_0 value, as we're going to rewrite it */
+ val = ste->valid ? STRTAB_STE_0_V : 0;
if (ste->bypass) {
val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
@@ -1073,9 +1071,7 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
#ifdef CONFIG_PCI_ATS
STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
#endif
- STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT |
- STRTAB_STE_1_PRIVCFG_UNPRIV <<
- STRTAB_STE_1_PRIVCFG_SHIFT);
+ STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT);
if (smmu->features & ARM_SMMU_FEAT_STALLS)
dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
@@ -1083,7 +1079,6 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
<< STRTAB_STE_0_S1CTXPTR_SHIFT) |
STRTAB_STE_0_CFG_S1_TRANS;
-
}
if (ste->s2_cfg) {
@@ -1372,8 +1367,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
switch (cap) {
case IOMMU_CAP_CACHE_COHERENCY:
return true;
- case IOMMU_CAP_INTR_REMAP:
- return true; /* MSIs are just memory writes */
case IOMMU_CAP_NOEXEC:
return true;
default:
@@ -1795,8 +1788,10 @@ static int arm_smmu_add_device(struct device *dev)
}
group = iommu_group_get_for_dev(dev);
- if (!IS_ERR(group))
+ if (!IS_ERR(group)) {
iommu_group_put(group);
+ iommu_device_link(&smmu->iommu, dev);
+ }
return PTR_ERR_OR_ZERO(group);
}
@@ -1805,14 +1800,17 @@ static void arm_smmu_remove_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct arm_smmu_master_data *master;
+ struct arm_smmu_device *smmu;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
master = fwspec->iommu_priv;
+ smmu = master->smmu;
if (master && master->ste.valid)
arm_smmu_detach_dev(dev);
iommu_group_remove_device(dev);
+ iommu_device_unlink(&smmu->iommu, dev);
kfree(master);
iommu_fwspec_free(dev);
}
@@ -1883,6 +1881,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, args->args, 1);
}
+static void arm_smmu_get_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *region;
+ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+
+ region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
+ prot, IOMMU_RESV_MSI);
+ if (!region)
+ return;
+
+ list_add_tail(&region->list, head);
+}
+
+static void arm_smmu_put_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, list)
+ kfree(entry);
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1898,6 +1919,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
+ .get_resv_regions = arm_smmu_get_resv_regions,
+ .put_resv_regions = arm_smmu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -1983,17 +2006,9 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
u32 size, l1size;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
- /*
- * If we can resolve everything with a single L2 table, then we
- * just need a single L1 descriptor. Otherwise, calculate the L1
- * size, capped to the SIDSIZE.
- */
- if (smmu->sid_bits < STRTAB_SPLIT) {
- size = 0;
- } else {
- size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
- size = min(size, smmu->sid_bits - STRTAB_SPLIT);
- }
+ /* Calculate the L1 size, capped to the SIDSIZE. */
+ size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
+ size = min(size, smmu->sid_bits - STRTAB_SPLIT);
cfg->num_l1_ents = 1 << size;
size += STRTAB_SPLIT;
@@ -2504,6 +2519,13 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
+ /*
+ * If the SMMU supports fewer bits than would fill a single L2 stream
+ * table, use a linear table instead.
+ */
+ if (smmu->sid_bits <= STRTAB_SPLIT)
+ smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB;
+
/* IDR5 */
reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
@@ -2613,6 +2635,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
{
int irq, ret;
struct resource *res;
+ resource_size_t ioaddr;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
bool bypass;
@@ -2630,6 +2653,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
dev_err(dev, "MMIO region too small (%pr)\n", res);
return -EINVAL;
}
+ ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
if (IS_ERR(smmu->base))
@@ -2682,7 +2706,15 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return ret;
/* And we're up. Go go go! */
- iommu_register_instance(dev->fwnode, &arm_smmu_ops);
+ ret = iommu_device_sysfs_add(&smmu->iommu, dev, NULL,
+ "smmu3.%pa", &ioaddr);
+ if (ret)
+ return ret;
+
+ iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
+ iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
+
+ ret = iommu_device_register(&smmu->iommu);
#ifdef CONFIG_PCI
if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index a60cded8a6ed..abf6496843a6 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -24,6 +24,7 @@
* - v7/v8 long-descriptor format
* - Non-secure access to the SMMU
* - Context fault reporting
+ * - Extended Stream ID (16 bit)
*/
#define pr_fmt(fmt) "arm-smmu: " fmt
@@ -87,6 +88,7 @@
#define sCR0_CLIENTPD (1 << 0)
#define sCR0_GFRE (1 << 1)
#define sCR0_GFIE (1 << 2)
+#define sCR0_EXIDENABLE (1 << 3)
#define sCR0_GCFGFRE (1 << 4)
#define sCR0_GCFGFIE (1 << 5)
#define sCR0_USFCFG (1 << 10)
@@ -126,6 +128,7 @@
#define ID0_NUMIRPT_MASK 0xff
#define ID0_NUMSIDB_SHIFT 9
#define ID0_NUMSIDB_MASK 0xf
+#define ID0_EXIDS (1 << 8)
#define ID0_NUMSMRG_SHIFT 0
#define ID0_NUMSMRG_MASK 0xff
@@ -169,6 +172,7 @@
#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
#define S2CR_CBNDX_SHIFT 0
#define S2CR_CBNDX_MASK 0xff
+#define S2CR_EXIDVALID (1 << 10)
#define S2CR_TYPE_SHIFT 16
#define S2CR_TYPE_MASK 0x3
enum arm_smmu_s2cr_type {
@@ -260,6 +264,7 @@ enum arm_smmu_s2cr_privcfg {
#define TTBCR2_SEP_SHIFT 15
#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
+#define TTBCR2_AS (1 << 4)
#define TTBRn_ASID_SHIFT 48
@@ -281,6 +286,9 @@ enum arm_smmu_s2cr_privcfg {
#define FSYNR0_WNR (1 << 4)
+#define MSI_IOVA_BASE 0x8000000
+#define MSI_IOVA_LENGTH 0x100000
+
static int force_stage;
module_param(force_stage, int, S_IRUGO);
MODULE_PARM_DESC(force_stage,
@@ -351,6 +359,7 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
+#define ARM_SMMU_FEAT_EXIDS (1 << 12)
u32 features;
#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
@@ -380,6 +389,9 @@ struct arm_smmu_device {
unsigned int *irqs;
u32 cavium_id_base; /* Specific to Cavium */
+
+ /* IOMMU core code handle */
+ struct iommu_device iommu;
};
enum arm_smmu_context_fmt {
@@ -778,6 +790,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
reg2 |= TTBCR2_SEP_UPSTREAM;
+ if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
+ reg2 |= TTBCR2_AS;
}
if (smmu->version > ARM_SMMU_V1)
writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
@@ -1048,7 +1062,7 @@ static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
struct arm_smmu_smr *smr = smmu->smrs + idx;
u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
- if (smr->valid)
+ if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
reg |= SMR_VALID;
writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
}
@@ -1060,6 +1074,9 @@ static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
(s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
(s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
+ if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
+ smmu->smrs[idx].valid)
+ reg |= S2CR_EXIDVALID;
writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
}
@@ -1070,6 +1087,34 @@ static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
arm_smmu_write_smr(smmu, idx);
}
+/*
+ * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
+ * should be called after sCR0 is written.
+ */
+static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
+{
+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+ u32 smr;
+
+ if (!smmu->smrs)
+ return;
+
+ /*
+ * SMR.ID bits may not be preserved if the corresponding MASK
+ * bits are set, so check each one separately. We can reject
+ * masters later if they try to claim IDs outside these masks.
+ */
+ smr = smmu->streamid_mask << SMR_ID_SHIFT;
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+ smmu->streamid_mask = smr >> SMR_ID_SHIFT;
+
+ smr = smmu->streamid_mask << SMR_MASK_SHIFT;
+ writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+ smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
+ smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
+}
+
static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
{
struct arm_smmu_smr *smrs = smmu->smrs;
@@ -1214,7 +1259,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
continue;
s2cr[idx].type = type;
- s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
+ s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
s2cr[idx].cbndx = cbndx;
arm_smmu_write_s2cr(smmu, idx);
}
@@ -1371,8 +1416,6 @@ static bool arm_smmu_capable(enum iommu_cap cap)
* requests.
*/
return true;
- case IOMMU_CAP_INTR_REMAP:
- return true; /* MSIs are just memory writes */
case IOMMU_CAP_NOEXEC:
return true;
default:
@@ -1444,6 +1487,8 @@ static int arm_smmu_add_device(struct device *dev)
if (ret)
goto out_free;
+ iommu_device_link(&smmu->iommu, dev);
+
return 0;
out_free:
@@ -1456,10 +1501,17 @@ out_free:
static void arm_smmu_remove_device(struct device *dev)
{
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
+ struct arm_smmu_master_cfg *cfg;
+ struct arm_smmu_device *smmu;
+
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
+ cfg = fwspec->iommu_priv;
+ smmu = cfg->smmu;
+
+ iommu_device_unlink(&smmu->iommu, dev);
arm_smmu_master_free_smes(fwspec);
iommu_group_remove_device(dev);
kfree(fwspec->iommu_priv);
@@ -1549,6 +1601,29 @@ static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
return iommu_fwspec_add_ids(dev, &fwid, 1);
}
+static void arm_smmu_get_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *region;
+ int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+
+ region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
+ prot, IOMMU_RESV_MSI);
+ if (!region)
+ return;
+
+ list_add_tail(&region->list, head);
+}
+
+static void arm_smmu_put_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, list)
+ kfree(entry);
+}
+
static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
@@ -1564,6 +1639,8 @@ static struct iommu_ops arm_smmu_ops = {
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
+ .get_resv_regions = arm_smmu_get_resv_regions,
+ .put_resv_regions = arm_smmu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
};
@@ -1648,6 +1725,9 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
if (smmu->features & ARM_SMMU_FEAT_VMID16)
reg |= sCR0_VMID16EN;
+ if (smmu->features & ARM_SMMU_FEAT_EXIDS)
+ reg |= sCR0_EXIDENABLE;
+
/* Push the button */
__arm_smmu_tlb_sync(smmu);
writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
@@ -1735,11 +1815,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
"\t(IDR0.CTTW overridden by FW configuration)\n");
/* Max. number of entries we have for stream matching/indexing */
- size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+ if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
+ smmu->features |= ARM_SMMU_FEAT_EXIDS;
+ size = 1 << 16;
+ } else {
+ size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
+ }
smmu->streamid_mask = size - 1;
if (id & ID0_SMS) {
- u32 smr;
-
smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
if (size == 0) {
@@ -1748,21 +1831,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENODEV;
}
- /*
- * SMR.ID bits may not be preserved if the corresponding MASK
- * bits are set, so check each one separately. We can reject
- * masters later if they try to claim IDs outside these masks.
- */
- smr = smmu->streamid_mask << SMR_ID_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
- smmu->streamid_mask = smr >> SMR_ID_SHIFT;
-
- smr = smmu->streamid_mask << SMR_MASK_SHIFT;
- writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
- smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
- smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
-
/* Zero-initialised to mark as invalid */
smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
GFP_KERNEL);
@@ -1770,8 +1838,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
return -ENOMEM;
dev_notice(smmu->dev,
- "\tstream matching with %lu register groups, mask 0x%x",
- size, smmu->smr_mask_mask);
+ "\tstream matching with %lu register groups", size);
}
/* s2cr->type == 0 means translation, so initialise explicitly */
smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
@@ -2011,6 +2078,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
static int arm_smmu_device_probe(struct platform_device *pdev)
{
struct resource *res;
+ resource_size_t ioaddr;
struct arm_smmu_device *smmu;
struct device *dev = &pdev->dev;
int num_irqs, i, err;
@@ -2031,6 +2099,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return err;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ioaddr = res->start;
smmu->base = devm_ioremap_resource(dev, res);
if (IS_ERR(smmu->base))
return PTR_ERR(smmu->base);
@@ -2091,9 +2160,25 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
}
}
- iommu_register_instance(dev->fwnode, &arm_smmu_ops);
+ err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
+ "smmu.%pa", &ioaddr);
+ if (err) {
+ dev_err(dev, "Failed to register iommu in sysfs\n");
+ return err;
+ }
+
+ iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
+ iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
+
+ err = iommu_device_register(&smmu->iommu);
+ if (err) {
+ dev_err(dev, "Failed to register iommu\n");
+ return err;
+ }
+
platform_set_drvdata(pdev, smmu);
arm_smmu_device_reset(smmu);
+ arm_smmu_test_smr_masks(smmu);
/* Oh, for a proper bus abstraction */
if (!iommu_present(&platform_bus_type))
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 2db0d641cf45..48d36ce59efb 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -37,15 +37,50 @@ struct iommu_dma_msi_page {
phys_addr_t phys;
};
+enum iommu_dma_cookie_type {
+ IOMMU_DMA_IOVA_COOKIE,
+ IOMMU_DMA_MSI_COOKIE,
+};
+
struct iommu_dma_cookie {
- struct iova_domain iovad;
- struct list_head msi_page_list;
- spinlock_t msi_lock;
+ enum iommu_dma_cookie_type type;
+ union {
+ /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
+ struct iova_domain iovad;
+ /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
+ dma_addr_t msi_iova;
+ };
+ struct list_head msi_page_list;
+ spinlock_t msi_lock;
};
+static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
+{
+ if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
+ return cookie->iovad.granule;
+ return PAGE_SIZE;
+}
+
static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
{
- return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+
+ if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
+ return &cookie->iovad;
+ return NULL;
+}
+
+static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
+{
+ struct iommu_dma_cookie *cookie;
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (cookie) {
+ spin_lock_init(&cookie->msi_lock);
+ INIT_LIST_HEAD(&cookie->msi_page_list);
+ cookie->type = type;
+ }
+ return cookie;
}
int iommu_dma_init(void)
@@ -62,25 +97,53 @@ int iommu_dma_init(void)
*/
int iommu_get_dma_cookie(struct iommu_domain *domain)
{
+ if (domain->iova_cookie)
+ return -EEXIST;
+
+ domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
+ if (!domain->iova_cookie)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL(iommu_get_dma_cookie);
+
+/**
+ * iommu_get_msi_cookie - Acquire just MSI remapping resources
+ * @domain: IOMMU domain to prepare
+ * @base: Start address of IOVA region for MSI mappings
+ *
+ * Users who manage their own IOVA allocation and do not want DMA API support,
+ * but would still like to take advantage of automatic MSI remapping, can use
+ * this to initialise their own domain appropriately. Users should reserve a
+ * contiguous IOVA region, starting at @base, large enough to accommodate the
+ * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
+ * used by the devices attached to @domain.
+ */
+int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
+{
struct iommu_dma_cookie *cookie;
+ if (domain->type != IOMMU_DOMAIN_UNMANAGED)
+ return -EINVAL;
+
if (domain->iova_cookie)
return -EEXIST;
- cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
if (!cookie)
return -ENOMEM;
- spin_lock_init(&cookie->msi_lock);
- INIT_LIST_HEAD(&cookie->msi_page_list);
+ cookie->msi_iova = base;
domain->iova_cookie = cookie;
return 0;
}
-EXPORT_SYMBOL(iommu_get_dma_cookie);
+EXPORT_SYMBOL(iommu_get_msi_cookie);
/**
* iommu_put_dma_cookie - Release a domain's DMA mapping resources
- * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
+ * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
+ * iommu_get_msi_cookie()
*
* IOMMU drivers should normally call this from their domain_free callback.
*/
@@ -92,7 +155,7 @@ void iommu_put_dma_cookie(struct iommu_domain *domain)
if (!cookie)
return;
- if (cookie->iovad.granule)
+ if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
put_iova_domain(&cookie->iovad);
list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
@@ -137,11 +200,13 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
{
- struct iova_domain *iovad = cookie_iovad(domain);
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ struct iova_domain *iovad = &cookie->iovad;
unsigned long order, base_pfn, end_pfn;
+ bool pci = dev && dev_is_pci(dev);
- if (!iovad)
- return -ENODEV;
+ if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
+ return -EINVAL;
/* Use the smallest supported page size for IOVA granularity */
order = __ffs(domain->pgsize_bitmap);
@@ -161,19 +226,31 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
end_pfn = min_t(unsigned long, end_pfn,
domain->geometry.aperture_end >> order);
}
+ /*
+ * PCI devices may have larger DMA masks, but still prefer allocating
+ * within a 32-bit mask to avoid DAC addressing. Such limitations don't
+ * apply to the typical platform device, so for those we may as well
+ * leave the cache limit at the top of their range to save an rb_last()
+ * traversal on every allocation.
+ */
+ if (pci)
+ end_pfn &= DMA_BIT_MASK(32) >> order;
- /* All we can safely do with an existing domain is enlarge it */
+ /* start_pfn is always nonzero for an already-initialised domain */
if (iovad->start_pfn) {
if (1UL << order != iovad->granule ||
- base_pfn != iovad->start_pfn ||
- end_pfn < iovad->dma_32bit_pfn) {
+ base_pfn != iovad->start_pfn) {
pr_warn("Incompatible range for DMA domain\n");
return -EFAULT;
}
- iovad->dma_32bit_pfn = end_pfn;
+ /*
+ * If we have devices with different DMA masks, move the free
+ * area cache limit down for the benefit of the smaller one.
+ */
+ iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
} else {
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
- if (dev && dev_is_pci(dev))
+ if (pci)
iova_reserve_pci_windows(to_pci_dev(dev), iovad);
}
return 0;
@@ -181,16 +258,22 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
EXPORT_SYMBOL(iommu_dma_init_domain);
/**
- * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
+ * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
+ * page flags.
* @dir: Direction of DMA transfer
* @coherent: Is the DMA master cache-coherent?
+ * @attrs: DMA attributes for the mapping
*
* Return: corresponding IOMMU API page protection flags
*/
-int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
+int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
+ unsigned long attrs)
{
int prot = coherent ? IOMMU_CACHE : 0;
+ if (attrs & DMA_ATTR_PRIVILEGED)
+ prot |= IOMMU_PRIV;
+
switch (dir) {
case DMA_BIDIRECTIONAL:
return prot | IOMMU_READ | IOMMU_WRITE;
@@ -204,19 +287,28 @@ int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
}
static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
- dma_addr_t dma_limit)
+ dma_addr_t dma_limit, struct device *dev)
{
struct iova_domain *iovad = cookie_iovad(domain);
unsigned long shift = iova_shift(iovad);
unsigned long length = iova_align(iovad, size) >> shift;
+ struct iova *iova = NULL;
if (domain->geometry.force_aperture)
dma_limit = min(dma_limit, domain->geometry.aperture_end);
+
+ /* Try to get PCI devices a SAC address */
+ if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
+ iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift,
+ true);
/*
* Enforce size-alignment to be safe - there could perhaps be an
* attribute to control this per-device, or at least per-domain...
*/
- return alloc_iova(iovad, length, dma_limit >> shift, true);
+ if (!iova)
+ iova = alloc_iova(iovad, length, dma_limit >> shift, true);
+
+ return iova;
}
/* The IOVA allocator knows what we mapped, so just unmap whatever that was */
@@ -369,7 +461,7 @@ struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
if (!pages)
return NULL;
- iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
+ iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev);
if (!iova)
goto out_free_pages;
@@ -440,7 +532,7 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
struct iova_domain *iovad = cookie_iovad(domain);
size_t iova_off = iova_offset(iovad, phys);
size_t len = iova_align(iovad, size + iova_off);
- struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
+ struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev);
if (!iova)
return DMA_ERROR_CODE;
@@ -598,7 +690,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
prev = s;
}
- iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
+ iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
if (!iova)
goto out_restore_sg;
@@ -633,7 +725,7 @@ dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_direction_to_prot(dir, false) | IOMMU_MMIO);
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
}
void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
@@ -642,16 +734,6 @@ void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
__iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
}
-int iommu_dma_supported(struct device *dev, u64 mask)
-{
- /*
- * 'Special' IOMMUs which don't have the same addressing capability
- * as the CPU will have to wait until we have some way to query that
- * before they'll be able to use this framework.
- */
- return 1;
-}
-
int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == DMA_ERROR_CODE;
@@ -662,11 +744,12 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi_page;
- struct iova_domain *iovad = &cookie->iovad;
+ struct iova_domain *iovad = cookie_iovad(domain);
struct iova *iova;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
+ size_t size = cookie_msi_granule(cookie);
- msi_addr &= ~(phys_addr_t)iova_mask(iovad);
+ msi_addr &= ~(phys_addr_t)(size - 1);
list_for_each_entry(msi_page, &cookie->msi_page_list, list)
if (msi_page->phys == msi_addr)
return msi_page;
@@ -675,13 +758,18 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __alloc_iova(domain, iovad->granule, dma_get_mask(dev));
- if (!iova)
- goto out_free_page;
-
msi_page->phys = msi_addr;
- msi_page->iova = iova_dma_addr(iovad, iova);
- if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
+ if (iovad) {
+ iova = __alloc_iova(domain, size, dma_get_mask(dev), dev);
+ if (!iova)
+ goto out_free_page;
+ msi_page->iova = iova_dma_addr(iovad, iova);
+ } else {
+ msi_page->iova = cookie->msi_iova;
+ cookie->msi_iova += size;
+ }
+
+ if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
goto out_free_iova;
INIT_LIST_HEAD(&msi_page->list);
@@ -689,7 +777,10 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
return msi_page;
out_free_iova:
- __free_iova(iovad, iova);
+ if (iovad)
+ __free_iova(iovad, iova);
+ else
+ cookie->msi_iova -= size;
out_free_page:
kfree(msi_page);
return NULL;
@@ -730,7 +821,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
msg->data = ~0U;
} else {
msg->address_hi = upper_32_bits(msi_page->iova);
- msg->address_lo &= iova_mask(&cookie->iovad);
+ msg->address_lo &= cookie_msi_granule(cookie) - 1;
msg->address_lo += lower_32_bits(msi_page->iova);
}
}
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 8ccbd7023194..d9c0decfc91a 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -74,6 +74,8 @@ static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
static int alloc_iommu(struct dmar_drhd_unit *drhd);
static void free_iommu(struct intel_iommu *iommu);
+extern const struct iommu_ops intel_iommu_ops;
+
static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
{
/*
@@ -1078,14 +1080,17 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
raw_spin_lock_init(&iommu->register_lock);
if (intel_iommu_enabled) {
- iommu->iommu_dev = iommu_device_create(NULL, iommu,
- intel_iommu_groups,
- "%s", iommu->name);
+ err = iommu_device_sysfs_add(&iommu->iommu, NULL,
+ intel_iommu_groups,
+ "%s", iommu->name);
+ if (err)
+ goto err_unmap;
- if (IS_ERR(iommu->iommu_dev)) {
- err = PTR_ERR(iommu->iommu_dev);
+ iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
+
+ err = iommu_device_register(&iommu->iommu);
+ if (err)
goto err_unmap;
- }
}
drhd->iommu = iommu;
@@ -1103,7 +1108,8 @@ error:
static void free_iommu(struct intel_iommu *iommu)
{
- iommu_device_destroy(iommu->iommu_dev);
+ iommu_device_sysfs_remove(&iommu->iommu);
+ iommu_device_unregister(&iommu->iommu);
if (iommu->irq) {
if (iommu->pr_irq) {
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
index 57ba0d3091ea..a7e0821c9967 100644
--- a/drivers/iommu/exynos-iommu.c
+++ b/drivers/iommu/exynos-iommu.c
@@ -276,6 +276,8 @@ struct sysmmu_drvdata {
struct list_head owner_node; /* node for owner controllers list */
phys_addr_t pgtable; /* assigned page table structure */
unsigned int version; /* our version */
+
+ struct iommu_device iommu; /* IOMMU core handle */
};
static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
@@ -381,13 +383,14 @@ static void show_fault_information(struct sysmmu_drvdata *data,
{
sysmmu_pte_t *ent;
- dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
- finfo->name, fault_addr, &data->pgtable);
+ dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
+ dev_name(data->master), finfo->name, fault_addr);
+ dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
- dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
+ dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
if (lv1ent_page(ent)) {
ent = page_entry(ent, fault_addr);
- dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
+ dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
}
}
@@ -611,6 +614,18 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
data->sysmmu = dev;
spin_lock_init(&data->lock);
+ ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
+ dev_name(data->sysmmu));
+ if (ret)
+ return ret;
+
+ iommu_device_set_ops(&data->iommu, &exynos_iommu_ops);
+ iommu_device_set_fwnode(&data->iommu, &dev->of_node->fwnode);
+
+ ret = iommu_device_register(&data->iommu);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, data);
__sysmmu_get_version(data);
@@ -628,8 +643,6 @@ static int __init exynos_sysmmu_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
- of_iommu_set_ops(dev->of_node, &exynos_iommu_ops);
-
return 0;
}
@@ -743,6 +756,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
DMA_TO_DEVICE);
/* For mapping page table entries we rely on dma == phys */
BUG_ON(handle != virt_to_phys(domain->pgtable));
+ if (dma_mapping_error(dma_dev, handle))
+ goto err_lv2ent;
spin_lock_init(&domain->lock);
spin_lock_init(&domain->pgtablelock);
@@ -754,6 +769,8 @@ static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
return &domain->domain;
+err_lv2ent:
+ free_pages((unsigned long)domain->lv2entcnt, 1);
err_counter:
free_pages((unsigned long)domain->pgtable, 2);
err_dma_cookie:
@@ -897,6 +914,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
}
if (lv1ent_fault(sent)) {
+ dma_addr_t handle;
sysmmu_pte_t *pent;
bool need_flush_flpd_cache = lv1ent_zero(sent);
@@ -908,7 +926,12 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
kmemleak_ignore(pent);
*pgcounter = NUM_LV2ENTRIES;
- dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
+ handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, handle)) {
+ kmem_cache_free(lv2table_kmem_cache, pent);
+ return ERR_PTR(-EADDRINUSE);
+ }
/*
* If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
@@ -1231,9 +1254,21 @@ static int exynos_iommu_add_device(struct device *dev)
static void exynos_iommu_remove_device(struct device *dev)
{
+ struct exynos_iommu_owner *owner = dev->archdata.iommu;
+
if (!has_sysmmu(dev))
return;
+ if (owner->domain) {
+ struct iommu_group *group = iommu_group_get(dev);
+
+ if (group) {
+ WARN_ON(owner->domain !=
+ iommu_group_default_domain(group));
+ exynos_iommu_detach_device(owner->domain, dev);
+ iommu_group_put(group);
+ }
+ }
iommu_group_remove_device(dev);
}
@@ -1242,7 +1277,7 @@ static int exynos_iommu_of_xlate(struct device *dev,
{
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct platform_device *sysmmu = of_find_device_by_node(spec->np);
- struct sysmmu_drvdata *data;
+ struct sysmmu_drvdata *data, *entry;
if (!sysmmu)
return -ENODEV;
@@ -1261,6 +1296,10 @@ static int exynos_iommu_of_xlate(struct device *dev,
dev->archdata.iommu = owner;
}
+ list_for_each_entry(entry, &owner->controllers, owner_node)
+ if (entry == data)
+ return 0;
+
list_add_tail(&data->owner_node, &owner->controllers);
data->master = dev;
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 5d179c8765c6..f5e02f8e7371 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -440,6 +440,7 @@ struct dmar_rmrr_unit {
u64 end_address; /* reserved end address */
struct dmar_dev_scope *devices; /* target devices */
int devices_cnt; /* target device count */
+ struct iommu_resv_region *resv; /* reserved region handle */
};
struct dmar_atsr_unit {
@@ -547,7 +548,7 @@ EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list);
-static const struct iommu_ops intel_iommu_ops;
+const struct iommu_ops intel_iommu_ops;
static bool translation_pre_enabled(struct intel_iommu *iommu)
{
@@ -4247,27 +4248,40 @@ static inline void init_iommu_pm_ops(void) {}
int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
{
struct acpi_dmar_reserved_memory *rmrr;
+ int prot = DMA_PTE_READ|DMA_PTE_WRITE;
struct dmar_rmrr_unit *rmrru;
+ size_t length;
rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL);
if (!rmrru)
- return -ENOMEM;
+ goto out;
rmrru->hdr = header;
rmrr = (struct acpi_dmar_reserved_memory *)header;
rmrru->base_address = rmrr->base_address;
rmrru->end_address = rmrr->end_address;
+
+ length = rmrr->end_address - rmrr->base_address + 1;
+ rmrru->resv = iommu_alloc_resv_region(rmrr->base_address, length, prot,
+ IOMMU_RESV_DIRECT);
+ if (!rmrru->resv)
+ goto free_rmrru;
+
rmrru->devices = dmar_alloc_dev_scope((void *)(rmrr + 1),
((void *)rmrr) + rmrr->header.length,
&rmrru->devices_cnt);
- if (rmrru->devices_cnt && rmrru->devices == NULL) {
- kfree(rmrru);
- return -ENOMEM;
- }
+ if (rmrru->devices_cnt && rmrru->devices == NULL)
+ goto free_all;
list_add(&rmrru->list, &dmar_rmrr_units);
return 0;
+free_all:
+ kfree(rmrru->resv);
+free_rmrru:
+ kfree(rmrru);
+out:
+ return -ENOMEM;
}
static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
@@ -4481,6 +4495,7 @@ static void intel_iommu_free_dmars(void)
list_for_each_entry_safe(rmrru, rmrr_n, &dmar_rmrr_units, list) {
list_del(&rmrru->list);
dmar_free_dev_scope(&rmrru->devices, &rmrru->devices_cnt);
+ kfree(rmrru->resv);
kfree(rmrru);
}
@@ -4854,10 +4869,13 @@ int __init intel_iommu_init(void)
init_iommu_pm_ops();
- for_each_active_iommu(iommu, drhd)
- iommu->iommu_dev = iommu_device_create(NULL, iommu,
- intel_iommu_groups,
- "%s", iommu->name);
+ for_each_active_iommu(iommu, drhd) {
+ iommu_device_sysfs_add(&iommu->iommu, NULL,
+ intel_iommu_groups,
+ "%s", iommu->name);
+ iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
+ iommu_device_register(&iommu->iommu);
+ }
bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
bus_register_notifier(&pci_bus_type, &device_nb);
@@ -5179,7 +5197,7 @@ static int intel_iommu_add_device(struct device *dev)
if (!iommu)
return -ENODEV;
- iommu_device_link(iommu->iommu_dev, dev);
+ iommu_device_link(&iommu->iommu, dev);
group = iommu_group_get_for_dev(dev);
@@ -5201,7 +5219,46 @@ static void intel_iommu_remove_device(struct device *dev)
iommu_group_remove_device(dev);
- iommu_device_unlink(iommu->iommu_dev, dev);
+ iommu_device_unlink(&iommu->iommu, dev);
+}
+
+static void intel_iommu_get_resv_regions(struct device *device,
+ struct list_head *head)
+{
+ struct iommu_resv_region *reg;
+ struct dmar_rmrr_unit *rmrr;
+ struct device *i_dev;
+ int i;
+
+ rcu_read_lock();
+ for_each_rmrr_units(rmrr) {
+ for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
+ i, i_dev) {
+ if (i_dev != device)
+ continue;
+
+ list_add_tail(&rmrr->resv->list, head);
+ }
+ }
+ rcu_read_unlock();
+
+ reg = iommu_alloc_resv_region(IOAPIC_RANGE_START,
+ IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1,
+ 0, IOMMU_RESV_RESERVED);
+ if (!reg)
+ return;
+ list_add_tail(&reg->list, head);
+}
+
+static void intel_iommu_put_resv_regions(struct device *dev,
+ struct list_head *head)
+{
+ struct iommu_resv_region *entry, *next;
+
+ list_for_each_entry_safe(entry, next, head, list) {
+ if (entry->type == IOMMU_RESV_RESERVED)
+ kfree(entry);
+ }
}
#ifdef CONFIG_INTEL_IOMMU_SVM
@@ -5333,20 +5390,22 @@ struct intel_iommu *intel_svm_device_to_iommu(struct device *dev)
}
#endif /* CONFIG_INTEL_IOMMU_SVM */
-static const struct iommu_ops intel_iommu_ops = {
- .capable = intel_iommu_capable,
- .domain_alloc = intel_iommu_domain_alloc,
- .domain_free = intel_iommu_domain_free,
- .attach_dev = intel_iommu_attach_device,
- .detach_dev = intel_iommu_detach_device,
- .map = intel_iommu_map,
- .unmap = intel_iommu_unmap,
- .map_sg = default_iommu_map_sg,
- .iova_to_phys = intel_iommu_iova_to_phys,
- .add_device = intel_iommu_add_device,
- .remove_device = intel_iommu_remove_device,
- .device_group = pci_device_group,
- .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
+const struct iommu_ops intel_iommu_ops = {
+ .capable = intel_iommu_capable,
+ .domain_alloc = intel_iommu_domain_alloc,
+ .domain_free = intel_iommu_domain_free,
+ .attach_dev = intel_iommu_attach_device,
+ .detach_dev = intel_iommu_detach_device,
+ .map = intel_iommu_map,
+ .unmap = intel_iommu_unmap,
+ .map_sg = default_iommu_map_sg,
+ .iova_to_phys = intel_iommu_iova_to_phys,
+ .add_device = intel_iommu_add_device,
+ .remove_device = intel_iommu_remove_device,
+ .get_resv_regions = intel_iommu_get_resv_regions,
+ .put_resv_regions = intel_iommu_put_resv_regions,
+ .device_group = pci_device_group,
+ .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
static void quirk_iommu_g4x_gfx(struct pci_dev *dev)
diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
index 0769276c0537..1c049e2e12bf 100644
--- a/drivers/iommu/io-pgtable-arm-v7s.c
+++ b/drivers/iommu/io-pgtable-arm-v7s.c
@@ -265,7 +265,9 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl,
if (!(prot & IOMMU_MMIO))
pte |= ARM_V7S_ATTR_TEX(1);
if (ap) {
- pte |= ARM_V7S_PTE_AF | ARM_V7S_PTE_AP_UNPRIV;
+ pte |= ARM_V7S_PTE_AF;
+ if (!(prot & IOMMU_PRIV))
+ pte |= ARM_V7S_PTE_AP_UNPRIV;
if (!(prot & IOMMU_WRITE))
pte |= ARM_V7S_PTE_AP_RDONLY;
}
@@ -288,6 +290,8 @@ static int arm_v7s_pte_to_prot(arm_v7s_iopte pte, int lvl)
if (!(attr & ARM_V7S_PTE_AP_RDONLY))
prot |= IOMMU_WRITE;
+ if (!(attr & ARM_V7S_PTE_AP_UNPRIV))
+ prot |= IOMMU_PRIV;
if ((attr & (ARM_V7S_TEX_MASK << ARM_V7S_TEX_SHIFT)) == 0)
prot |= IOMMU_MMIO;
else if (pte & ARM_V7S_ATTR_C)
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index a40ce3406fef..feacc54bec68 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -350,11 +350,14 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
if (data->iop.fmt == ARM_64_LPAE_S1 ||
data->iop.fmt == ARM_32_LPAE_S1) {
- pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
+ pte = ARM_LPAE_PTE_nG;
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pte |= ARM_LPAE_PTE_AP_RDONLY;
+ if (!(prot & IOMMU_PRIV))
+ pte |= ARM_LPAE_PTE_AP_UNPRIV;
+
if (prot & IOMMU_MMIO)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
diff --git a/drivers/iommu/iommu-sysfs.c b/drivers/iommu/iommu-sysfs.c
index 39b2d9127dbf..c58351ed61c1 100644
--- a/drivers/iommu/iommu-sysfs.c
+++ b/drivers/iommu/iommu-sysfs.c
@@ -50,85 +50,76 @@ static int __init iommu_dev_init(void)
postcore_initcall(iommu_dev_init);
/*
- * Create an IOMMU device and return a pointer to it. IOMMU specific
- * attributes can be provided as an attribute group, allowing a unique
- * namespace per IOMMU type.
+ * Init the struct device for the IOMMU. IOMMU specific attributes can
+ * be provided as an attribute group, allowing a unique namespace per
+ * IOMMU type.
*/
-struct device *iommu_device_create(struct device *parent, void *drvdata,
- const struct attribute_group **groups,
- const char *fmt, ...)
+int iommu_device_sysfs_add(struct iommu_device *iommu,
+ struct device *parent,
+ const struct attribute_group **groups,
+ const char *fmt, ...)
{
- struct device *dev;
va_list vargs;
int ret;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return ERR_PTR(-ENOMEM);
+ device_initialize(&iommu->dev);
- device_initialize(dev);
-
- dev->class = &iommu_class;
- dev->parent = parent;
- dev->groups = groups;
- dev_set_drvdata(dev, drvdata);
+ iommu->dev.class = &iommu_class;
+ iommu->dev.parent = parent;
+ iommu->dev.groups = groups;
va_start(vargs, fmt);
- ret = kobject_set_name_vargs(&dev->kobj, fmt, vargs);
+ ret = kobject_set_name_vargs(&iommu->dev.kobj, fmt, vargs);
va_end(vargs);
if (ret)
goto error;
- ret = device_add(dev);
+ ret = device_add(&iommu->dev);
if (ret)
goto error;
- return dev;
+ return 0;
error:
- put_device(dev);
- return ERR_PTR(ret);
+ put_device(&iommu->dev);
+ return ret;
}
-void iommu_device_destroy(struct device *dev)
+void iommu_device_sysfs_remove(struct iommu_device *iommu)
{
- if (!dev || IS_ERR(dev))
- return;
-
- device_unregister(dev);
+ device_unregister(&iommu->dev);
}
-
/*
* IOMMU drivers can indicate a device is managed by a given IOMMU using
* this interface. A link to the device will be created in the "devices"
* directory of the IOMMU device in sysfs and an "iommu" link will be
* created under the linked device, pointing back at the IOMMU device.
*/
-int iommu_device_link(struct device *dev, struct device *link)
+int iommu_device_link(struct iommu_device *iommu, struct device *link)
{
int ret;
- if (!dev || IS_ERR(dev))
+ if (!iommu || IS_ERR(iommu))
return -ENODEV;
- ret = sysfs_add_link_to_group(&dev->kobj, "devices",
+ ret = sysfs_add_link_to_group(&iommu->dev.kobj, "devices",
&link->kobj, dev_name(link));
if (ret)
return ret;
- ret = sysfs_create_link_nowarn(&link->kobj, &dev->kobj, "iommu");
+ ret = sysfs_create_link_nowarn(&link->kobj, &iommu->dev.kobj, "iommu");
if (ret)
- sysfs_remove_link_from_group(&dev->kobj, "devices",
+ sysfs_remove_link_from_group(&iommu->dev.kobj, "devices",
dev_name(link));
return ret;
}
-void iommu_device_unlink(struct device *dev, struct device *link)
+void iommu_device_unlink(struct iommu_device *iommu, struct device *link)
{
- if (!dev || IS_ERR(dev))
+ if (!iommu || IS_ERR(iommu))
return;
sysfs_remove_link(&link->kobj, "iommu");
- sysfs_remove_link_from_group(&dev->kobj, "devices", dev_name(link));
+ sysfs_remove_link_from_group(&iommu->dev.kobj, "devices", dev_name(link));
}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index dbe7f653bb7c..8ea14f41a979 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -55,7 +55,7 @@ struct iommu_group {
struct iommu_domain *domain;
};
-struct iommu_device {
+struct group_device {
struct list_head list;
struct device *dev;
char *name;
@@ -68,6 +68,12 @@ struct iommu_group_attribute {
const char *buf, size_t count);
};
+static const char * const iommu_group_resv_type_string[] = {
+ [IOMMU_RESV_DIRECT] = "direct",
+ [IOMMU_RESV_RESERVED] = "reserved",
+ [IOMMU_RESV_MSI] = "msi",
+};
+
#define IOMMU_GROUP_ATTR(_name, _mode, _show, _store) \
struct iommu_group_attribute iommu_group_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
@@ -77,6 +83,25 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
#define to_iommu_group(_kobj) \
container_of(_kobj, struct iommu_group, kobj)
+static LIST_HEAD(iommu_device_list);
+static DEFINE_SPINLOCK(iommu_device_lock);
+
+int iommu_device_register(struct iommu_device *iommu)
+{
+ spin_lock(&iommu_device_lock);
+ list_add_tail(&iommu->list, &iommu_device_list);
+ spin_unlock(&iommu_device_lock);
+
+ return 0;
+}
+
+void iommu_device_unregister(struct iommu_device *iommu)
+{
+ spin_lock(&iommu_device_lock);
+ list_del(&iommu->list);
+ spin_unlock(&iommu_device_lock);
+}
+
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
unsigned type);
static int __iommu_attach_device(struct iommu_domain *domain,
@@ -133,8 +158,131 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf)
return sprintf(buf, "%s\n", group->name);
}
+/**
+ * iommu_insert_resv_region - Insert a new region in the
+ * list of reserved regions.
+ * @new: new region to insert
+ * @regions: list of regions
+ *
+ * The new element is sorted by address with respect to the other
+ * regions of the same type. In case it overlaps with another
+ * region of the same type, regions are merged. In case it
+ * overlaps with another region of different type, regions are
+ * not merged.
+ */
+static int iommu_insert_resv_region(struct iommu_resv_region *new,
+ struct list_head *regions)
+{
+ struct iommu_resv_region *region;
+ phys_addr_t start = new->start;
+ phys_addr_t end = new->start + new->length - 1;
+ struct list_head *pos = regions->next;
+
+ while (pos != regions) {
+ struct iommu_resv_region *entry =
+ list_entry(pos, struct iommu_resv_region, list);
+ phys_addr_t a = entry->start;
+ phys_addr_t b = entry->start + entry->length - 1;
+ int type = entry->type;
+
+ if (end < a) {
+ goto insert;
+ } else if (start > b) {
+ pos = pos->next;
+ } else if ((start >= a) && (end <= b)) {
+ if (new->type == type)
+ goto done;
+ else
+ pos = pos->next;
+ } else {
+ if (new->type == type) {
+ phys_addr_t new_start = min(a, start);
+ phys_addr_t new_end = max(b, end);
+
+ list_del(&entry->list);
+ entry->start = new_start;
+ entry->length = new_end - new_start + 1;
+ iommu_insert_resv_region(entry, regions);
+ } else {
+ pos = pos->next;
+ }
+ }
+ }
+insert:
+ region = iommu_alloc_resv_region(new->start, new->length,
+ new->prot, new->type);
+ if (!region)
+ return -ENOMEM;
+
+ list_add_tail(&region->list, pos);
+done:
+ return 0;
+}
+
+static int
+iommu_insert_device_resv_regions(struct list_head *dev_resv_regions,
+ struct list_head *group_resv_regions)
+{
+ struct iommu_resv_region *entry;
+ int ret = 0;
+
+ list_for_each_entry(entry, dev_resv_regions, list) {
+ ret = iommu_insert_resv_region(entry, group_resv_regions);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+int iommu_get_group_resv_regions(struct iommu_group *group,
+ struct list_head *head)
+{
+ struct group_device *device;
+ int ret = 0;
+
+ mutex_lock(&group->mutex);
+ list_for_each_entry(device, &group->devices, list) {
+ struct list_head dev_resv_regions;
+
+ INIT_LIST_HEAD(&dev_resv_regions);
+ iommu_get_resv_regions(device->dev, &dev_resv_regions);
+ ret = iommu_insert_device_resv_regions(&dev_resv_regions, head);
+ iommu_put_resv_regions(device->dev, &dev_resv_regions);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&group->mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_get_group_resv_regions);
+
+static ssize_t iommu_group_show_resv_regions(struct iommu_group *group,
+ char *buf)
+{
+ struct iommu_resv_region *region, *next;
+ struct list_head group_resv_regions;
+ char *str = buf;
+
+ INIT_LIST_HEAD(&group_resv_regions);
+ iommu_get_group_resv_regions(group, &group_resv_regions);
+
+ list_for_each_entry_safe(region, next, &group_resv_regions, list) {
+ str += sprintf(str, "0x%016llx 0x%016llx %s\n",
+ (long long int)region->start,
+ (long long int)(region->start +
+ region->length - 1),
+ iommu_group_resv_type_string[region->type]);
+ kfree(region);
+ }
+
+ return (str - buf);
+}
+
static IOMMU_GROUP_ATTR(name, S_IRUGO, iommu_group_show_name, NULL);
+static IOMMU_GROUP_ATTR(reserved_regions, 0444,
+ iommu_group_show_resv_regions, NULL);
+
static void iommu_group_release(struct kobject *kobj)
{
struct iommu_group *group = to_iommu_group(kobj);
@@ -212,6 +360,11 @@ struct iommu_group *iommu_group_alloc(void)
*/
kobject_put(&group->kobj);
+ ret = iommu_group_create_file(group,
+ &iommu_group_attr_reserved_regions);
+ if (ret)
+ return ERR_PTR(ret);
+
pr_debug("Allocated group %d\n", group->id);
return group;
@@ -318,7 +471,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
struct device *dev)
{
struct iommu_domain *domain = group->default_domain;
- struct iommu_dm_region *entry;
+ struct iommu_resv_region *entry;
struct list_head mappings;
unsigned long pg_size;
int ret = 0;
@@ -331,18 +484,21 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
pg_size = 1UL << __ffs(domain->pgsize_bitmap);
INIT_LIST_HEAD(&mappings);
- iommu_get_dm_regions(dev, &mappings);
+ iommu_get_resv_regions(dev, &mappings);
/* We need to consider overlapping regions for different devices */
list_for_each_entry(entry, &mappings, list) {
dma_addr_t start, end, addr;
- if (domain->ops->apply_dm_region)
- domain->ops->apply_dm_region(dev, domain, entry);
+ if (domain->ops->apply_resv_region)
+ domain->ops->apply_resv_region(dev, domain, entry);
start = ALIGN(entry->start, pg_size);
end = ALIGN(entry->start + entry->length, pg_size);
+ if (entry->type != IOMMU_RESV_DIRECT)
+ continue;
+
for (addr = start; addr < end; addr += pg_size) {
phys_addr_t phys_addr;
@@ -358,7 +514,7 @@ static int iommu_group_create_direct_mappings(struct iommu_group *group,
}
out:
- iommu_put_dm_regions(dev, &mappings);
+ iommu_put_resv_regions(dev, &mappings);
return ret;
}
@@ -374,7 +530,7 @@ out:
int iommu_group_add_device(struct iommu_group *group, struct device *dev)
{
int ret, i = 0;
- struct iommu_device *device;
+ struct group_device *device;
device = kzalloc(sizeof(*device), GFP_KERNEL);
if (!device)
@@ -383,36 +539,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
device->dev = dev;
ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
- if (ret) {
- kfree(device);
- return ret;
- }
+ if (ret)
+ goto err_free_device;
device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename:
if (!device->name) {
- sysfs_remove_link(&dev->kobj, "iommu_group");
- kfree(device);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_remove_link;
}
ret = sysfs_create_link_nowarn(group->devices_kobj,
&dev->kobj, device->name);
if (ret) {
- kfree(device->name);
if (ret == -EEXIST && i >= 0) {
/*
* Account for the slim chance of collision
* and append an instance to the name.
*/
+ kfree(device->name);
device->name = kasprintf(GFP_KERNEL, "%s.%d",
kobject_name(&dev->kobj), i++);
goto rename;
}
-
- sysfs_remove_link(&dev->kobj, "iommu_group");
- kfree(device);
- return ret;
+ goto err_free_name;
}
kobject_get(group->devices_kobj);
@@ -424,8 +574,10 @@ rename:
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
if (group->domain)
- __iommu_attach_device(group->domain, dev);
+ ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
+ if (ret)
+ goto err_put_group;
/* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier,
@@ -436,6 +588,21 @@ rename:
pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
return 0;
+
+err_put_group:
+ mutex_lock(&group->mutex);
+ list_del(&device->list);
+ mutex_unlock(&group->mutex);
+ dev->iommu_group = NULL;
+ kobject_put(group->devices_kobj);
+err_free_name:
+ kfree(device->name);
+err_remove_link:
+ sysfs_remove_link(&dev->kobj, "iommu_group");
+err_free_device:
+ kfree(device);
+ pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);
@@ -449,7 +616,7 @@ EXPORT_SYMBOL_GPL(iommu_group_add_device);
void iommu_group_remove_device(struct device *dev)
{
struct iommu_group *group = dev->iommu_group;
- struct iommu_device *tmp_device, *device = NULL;
+ struct group_device *tmp_device, *device = NULL;
pr_info("Removing device %s from group %d\n", dev_name(dev), group->id);
@@ -484,7 +651,7 @@ EXPORT_SYMBOL_GPL(iommu_group_remove_device);
static int iommu_group_device_count(struct iommu_group *group)
{
- struct iommu_device *entry;
+ struct group_device *entry;
int ret = 0;
list_for_each_entry(entry, &group->devices, list)
@@ -507,7 +674,7 @@ static int iommu_group_device_count(struct iommu_group *group)
static int __iommu_group_for_each_dev(struct iommu_group *group, void *data,
int (*fn)(struct device *, void *))
{
- struct iommu_device *device;
+ struct group_device *device;
int ret = 0;
list_for_each_entry(device, &group->devices, list) {
@@ -1559,20 +1726,38 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
-void iommu_get_dm_regions(struct device *dev, struct list_head *list)
+void iommu_get_resv_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
- if (ops && ops->get_dm_regions)
- ops->get_dm_regions(dev, list);
+ if (ops && ops->get_resv_regions)
+ ops->get_resv_regions(dev, list);
}
-void iommu_put_dm_regions(struct device *dev, struct list_head *list)
+void iommu_put_resv_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;
- if (ops && ops->put_dm_regions)
- ops->put_dm_regions(dev, list);
+ if (ops && ops->put_resv_regions)
+ ops->put_resv_regions(dev, list);
+}
+
+struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start,
+ size_t length,
+ int prot, int type)
+{
+ struct iommu_resv_region *region;
+
+ region = kzalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return NULL;
+
+ INIT_LIST_HEAD(&region->list);
+ region->start = start;
+ region->length = length;
+ region->prot = prot;
+ region->type = type;
+ return region;
}
/* Request that a device is direct mapped by the IOMMU */
@@ -1628,43 +1813,18 @@ out:
return ret;
}
-struct iommu_instance {
- struct list_head list;
- struct fwnode_handle *fwnode;
- const struct iommu_ops *ops;
-};
-static LIST_HEAD(iommu_instance_list);
-static DEFINE_SPINLOCK(iommu_instance_lock);
-
-void iommu_register_instance(struct fwnode_handle *fwnode,
- const struct iommu_ops *ops)
+const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
{
- struct iommu_instance *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
-
- if (WARN_ON(!iommu))
- return;
-
- of_node_get(to_of_node(fwnode));
- INIT_LIST_HEAD(&iommu->list);
- iommu->fwnode = fwnode;
- iommu->ops = ops;
- spin_lock(&iommu_instance_lock);
- list_add_tail(&iommu->list, &iommu_instance_list);
- spin_unlock(&iommu_instance_lock);
-}
-
-const struct iommu_ops *iommu_get_instance(struct fwnode_handle *fwnode)
-{
- struct iommu_instance *instance;
const struct iommu_ops *ops = NULL;
+ struct iommu_device *iommu;
- spin_lock(&iommu_instance_lock);
- list_for_each_entry(instance, &iommu_instance_list, list)
- if (instance->fwnode == fwnode) {
- ops = instance->ops;
+ spin_lock(&iommu_device_lock);
+ list_for_each_entry(iommu, &iommu_device_list, list)
+ if (iommu->fwnode == fwnode) {
+ ops = iommu->ops;
break;
}
- spin_unlock(&iommu_instance_lock);
+ spin_unlock(&iommu_device_lock);
return ops;
}
@@ -1714,13 +1874,14 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids)
fwspec = krealloc(dev->iommu_fwspec, size, GFP_KERNEL);
if (!fwspec)
return -ENOMEM;
+
+ dev->iommu_fwspec = fwspec;
}
for (i = 0; i < num_ids; i++)
fwspec->ids[fwspec->num_ids + i] = ids[i];
fwspec->num_ids += num_ids;
- dev->iommu_fwspec = fwspec;
return 0;
}
EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids);
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 080beca0197d..b7268a14184f 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -62,7 +62,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
else {
struct rb_node *prev_node = rb_prev(iovad->cached32_node);
struct iova *curr_iova =
- container_of(iovad->cached32_node, struct iova, node);
+ rb_entry(iovad->cached32_node, struct iova, node);
*limit_pfn = curr_iova->pfn_lo - 1;
return prev_node;
}
@@ -86,11 +86,11 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
if (!iovad->cached32_node)
return;
curr = iovad->cached32_node;
- cached_iova = container_of(curr, struct iova, node);
+ cached_iova = rb_entry(curr, struct iova, node);
if (free->pfn_lo >= cached_iova->pfn_lo) {
struct rb_node *node = rb_next(&free->node);
- struct iova *iova = container_of(node, struct iova, node);
+ struct iova *iova = rb_entry(node, struct iova, node);
/* only cache if it's below 32bit pfn */
if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
@@ -125,7 +125,7 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
curr = __get_cached_rbnode(iovad, &limit_pfn);
prev = curr;
while (curr) {
- struct iova *curr_iova = container_of(curr, struct iova, node);
+ struct iova *curr_iova = rb_entry(curr, struct iova, node);
if (limit_pfn < curr_iova->pfn_lo)
goto move_left;
@@ -171,8 +171,7 @@ move_left:
/* Figure out where to put new node */
while (*entry) {
- struct iova *this = container_of(*entry,
- struct iova, node);
+ struct iova *this = rb_entry(*entry, struct iova, node);
parent = *entry;
if (new->pfn_lo < this->pfn_lo)
@@ -201,7 +200,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
struct rb_node **new = &(root->rb_node), *parent = NULL;
/* Figure out where to put new node */
while (*new) {
- struct iova *this = container_of(*new, struct iova, node);
+ struct iova *this = rb_entry(*new, struct iova, node);
parent = *new;
@@ -311,7 +310,7 @@ private_find_iova(struct iova_domain *iovad, unsigned long pfn)
assert_spin_locked(&iovad->iova_rbtree_lock);
while (node) {
- struct iova *iova = container_of(node, struct iova, node);
+ struct iova *iova = rb_entry(node, struct iova, node);
/* If pfn falls within iova's range, return iova */
if ((pfn >= iova->pfn_lo) && (pfn <= iova->pfn_hi)) {
@@ -463,7 +462,7 @@ void put_iova_domain(struct iova_domain *iovad)
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
node = rb_first(&iovad->rbroot);
while (node) {
- struct iova *iova = container_of(node, struct iova, node);
+ struct iova *iova = rb_entry(node, struct iova, node);
rb_erase(node, &iovad->rbroot);
free_iova_mem(iova);
@@ -477,7 +476,7 @@ static int
__is_range_overlap(struct rb_node *node,
unsigned long pfn_lo, unsigned long pfn_hi)
{
- struct iova *iova = container_of(node, struct iova, node);
+ struct iova *iova = rb_entry(node, struct iova, node);
if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo))
return 1;
@@ -541,7 +540,7 @@ reserve_iova(struct iova_domain *iovad,
spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
- iova = container_of(node, struct iova, node);
+ iova = rb_entry(node, struct iova, node);
__adjust_overlap_range(iova, &pfn_lo, &pfn_hi);
if ((pfn_lo >= iova->pfn_lo) &&
(pfn_hi <= iova->pfn_hi))
@@ -578,7 +577,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
spin_lock_irqsave(&from->iova_rbtree_lock, flags);
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
- struct iova *iova = container_of(node, struct iova, node);
+ struct iova *iova = rb_entry(node, struct iova, node);
struct iova *new_iova;
new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index ace331da6459..b7e14ee863f9 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -313,6 +313,8 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
domain->cfg.ias = 32;
domain->cfg.oas = 40;
domain->cfg.tlb = &ipmmu_gather_ops;
+ domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32);
+ domain->io_domain.geometry.force_aperture = true;
/*
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling. For now, delegate it to the io-pgtable code.
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index b09692bb5b0a..d0448353d501 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -371,6 +371,58 @@ static int msm_iommu_domain_config(struct msm_priv *priv)
return 0;
}
+/* Must be called under msm_iommu_lock */
+static struct msm_iommu_dev *find_iommu_for_dev(struct device *dev)
+{
+ struct msm_iommu_dev *iommu, *ret = NULL;
+ struct msm_iommu_ctx_dev *master;
+
+ list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) {
+ master = list_first_entry(&iommu->ctx_list,
+ struct msm_iommu_ctx_dev,
+ list);
+ if (master->of_node == dev->of_node) {
+ ret = iommu;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int msm_iommu_add_device(struct device *dev)
+{
+ struct msm_iommu_dev *iommu;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+
+ iommu = find_iommu_for_dev(dev);
+ if (iommu)
+ iommu_device_link(&iommu->iommu, dev);
+ else
+ ret = -ENODEV;
+
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+
+ return ret;
+}
+
+static void msm_iommu_remove_device(struct device *dev)
+{
+ struct msm_iommu_dev *iommu;
+ unsigned long flags;
+
+ spin_lock_irqsave(&msm_iommu_lock, flags);
+
+ iommu = find_iommu_for_dev(dev);
+ if (iommu)
+ iommu_device_unlink(&iommu->iommu, dev);
+
+ spin_unlock_irqrestore(&msm_iommu_lock, flags);
+}
+
static int msm_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
int ret = 0;
@@ -646,6 +698,8 @@ static struct iommu_ops msm_iommu_ops = {
.unmap = msm_iommu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = msm_iommu_iova_to_phys,
+ .add_device = msm_iommu_add_device,
+ .remove_device = msm_iommu_remove_device,
.pgsize_bitmap = MSM_IOMMU_PGSIZES,
.of_xlate = qcom_iommu_of_xlate,
};
@@ -653,6 +707,7 @@ static struct iommu_ops msm_iommu_ops = {
static int msm_iommu_probe(struct platform_device *pdev)
{
struct resource *r;
+ resource_size_t ioaddr;
struct msm_iommu_dev *iommu;
int ret, par, val;
@@ -696,6 +751,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
ret = PTR_ERR(iommu->base);
goto fail;
}
+ ioaddr = r->start;
iommu->irq = platform_get_irq(pdev, 0);
if (iommu->irq < 0) {
@@ -737,7 +793,22 @@ static int msm_iommu_probe(struct platform_device *pdev)
}
list_add(&iommu->dev_node, &qcom_iommu_devices);
- of_iommu_set_ops(pdev->dev.of_node, &msm_iommu_ops);
+
+ ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL,
+ "msm-smmu.%pa", &ioaddr);
+ if (ret) {
+ pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr);
+ goto fail;
+ }
+
+ iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops);
+ iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode);
+
+ ret = iommu_device_register(&iommu->iommu);
+ if (ret) {
+ pr_err("Could not register msm-smmu at %pa\n", &ioaddr);
+ goto fail;
+ }
pr_info("device mapped at %p, irq %d with %d ctx banks\n",
iommu->base, iommu->irq, iommu->ncb);
diff --git a/drivers/iommu/msm_iommu.h b/drivers/iommu/msm_iommu.h
index 4ca25d50d679..ae92d2779c42 100644
--- a/drivers/iommu/msm_iommu.h
+++ b/drivers/iommu/msm_iommu.h
@@ -19,6 +19,7 @@
#define MSM_IOMMU_H
#include <linux/interrupt.h>
+#include <linux/iommu.h>
#include <linux/clk.h>
/* Sharability attributes of MSM IOMMU mappings */
@@ -68,6 +69,8 @@ struct msm_iommu_dev {
struct list_head dom_node;
struct list_head ctx_list;
DECLARE_BITMAP(context_map, IOMMU_MAX_CBS);
+
+ struct iommu_device iommu;
};
/**
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index 1479c76ece9e..5d14cd15198d 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -360,11 +360,15 @@ static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
static int mtk_iommu_add_device(struct device *dev)
{
+ struct mtk_iommu_data *data;
struct iommu_group *group;
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return -ENODEV; /* Not a iommu client device */
+ data = dev->iommu_fwspec->iommu_priv;
+ iommu_device_link(&data->iommu, dev);
+
group = iommu_group_get_for_dev(dev);
if (IS_ERR(group))
return PTR_ERR(group);
@@ -375,9 +379,14 @@ static int mtk_iommu_add_device(struct device *dev)
static void mtk_iommu_remove_device(struct device *dev)
{
+ struct mtk_iommu_data *data;
+
if (!dev->iommu_fwspec || dev->iommu_fwspec->ops != &mtk_iommu_ops)
return;
+ data = dev->iommu_fwspec->iommu_priv;
+ iommu_device_unlink(&data->iommu, dev);
+
iommu_group_remove_device(dev);
iommu_fwspec_free(dev);
}
@@ -497,6 +506,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
struct mtk_iommu_data *data;
struct device *dev = &pdev->dev;
struct resource *res;
+ resource_size_t ioaddr;
struct component_match *match = NULL;
void *protect;
int i, larb_nr, ret;
@@ -519,6 +529,7 @@ static int mtk_iommu_probe(struct platform_device *pdev)
data->base = devm_ioremap_resource(dev, res);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
+ ioaddr = res->start;
data->irq = platform_get_irq(pdev, 0);
if (data->irq < 0)
@@ -567,6 +578,18 @@ static int mtk_iommu_probe(struct platform_device *pdev)
if (ret)
return ret;
+ ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
+ "mtk-iommu.%pa", &ioaddr);
+ if (ret)
+ return ret;
+
+ iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
+ iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
+
+ ret = iommu_device_register(&data->iommu);
+ if (ret)
+ return ret;
+
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
@@ -577,6 +600,9 @@ static int mtk_iommu_remove(struct platform_device *pdev)
{
struct mtk_iommu_data *data = platform_get_drvdata(pdev);
+ iommu_device_sysfs_remove(&data->iommu);
+ iommu_device_unregister(&data->iommu);
+
if (iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, NULL);
@@ -655,7 +681,6 @@ static int mtk_iommu_init_fn(struct device_node *np)
return ret;
}
- of_iommu_set_ops(np, &mtk_iommu_ops);
return 0;
}
diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h
index 50177f738e4e..2a28eadeea0e 100644
--- a/drivers/iommu/mtk_iommu.h
+++ b/drivers/iommu/mtk_iommu.h
@@ -47,6 +47,8 @@ struct mtk_iommu_data {
struct iommu_group *m4u_group;
struct mtk_smi_iommu smi_imu; /* SMI larb iommu info */
bool enable_4GB;
+
+ struct iommu_device iommu;
};
static inline int compare_of(struct device *dev, void *data)
diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c
index 0f57ddc4ecc2..2683e9fc0dcf 100644
--- a/drivers/iommu/of_iommu.c
+++ b/drivers/iommu/of_iommu.c
@@ -127,7 +127,7 @@ static const struct iommu_ops
"iommu-map-mask", &iommu_spec.np, iommu_spec.args))
return NULL;
- ops = of_iommu_get_ops(iommu_spec.np);
+ ops = iommu_ops_from_fwnode(&iommu_spec.np->fwnode);
if (!ops || !ops->of_xlate ||
iommu_fwspec_init(&pdev->dev, &iommu_spec.np->fwnode, ops) ||
ops->of_xlate(&pdev->dev, &iommu_spec))
@@ -157,7 +157,7 @@ const struct iommu_ops *of_iommu_configure(struct device *dev,
"#iommu-cells", idx,
&iommu_spec)) {
np = iommu_spec.np;
- ops = of_iommu_get_ops(np);
+ ops = iommu_ops_from_fwnode(&np->fwnode);
if (!ops || !ops->of_xlate ||
iommu_fwspec_init(dev, &np->fwnode, ops) ||
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 69b040f47d56..9d4fefc59827 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1642,6 +1642,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
inner_domain->parent = its_parent;
inner_domain->bus_token = DOMAIN_BUS_NEXUS;
+ inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
info->ops = &its_msi_domain_ops;
info->data = its;
inner_domain->host_data = info;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 23909804ffb8..0def99590d16 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2733,7 +2733,8 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
if (intmask & SDHCI_INT_RETUNE)
mmc_retune_needed(host->mmc);
- if (intmask & SDHCI_INT_CARD_INT) {
+ if ((intmask & SDHCI_INT_CARD_INT) &&
+ (host->ier & SDHCI_INT_CARD_INT)) {
sdhci_enable_sdio_irq_nolock(host, false);
host->thread_isr |= SDHCI_INT_CARD_INT;
result = IRQ_WAKE_THREAD;
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index c12d2618eebf..3872ab96b80a 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1152,6 +1152,12 @@ static void init_ring(struct net_device *dev)
if (skb == NULL)
break;
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->rx_info[i].mapping)) {
+ dev_kfree_skb(skb);
+ np->rx_info[i].skb = NULL;
+ break;
+ }
/* Grrr, we cannot offset to correctly align the IP header. */
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
}
@@ -1182,8 +1188,9 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct netdev_private *np = netdev_priv(dev);
unsigned int entry;
+ unsigned int prev_tx;
u32 status;
- int i;
+ int i, j;
/*
* be cautious here, wrapping the queue has weird semantics
@@ -1201,6 +1208,7 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
}
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
+ prev_tx = np->cur_tx;
entry = np->cur_tx % TX_RING_SIZE;
for (i = 0; i < skb_num_frags(skb); i++) {
int wrap_ring = 0;
@@ -1234,6 +1242,11 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
skb_frag_size(this_frag),
PCI_DMA_TODEVICE);
}
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->tx_info[entry].mapping)) {
+ dev->stats.tx_dropped++;
+ goto err_out;
+ }
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1268,8 +1281,30 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
return NETDEV_TX_OK;
-}
+err_out:
+ entry = prev_tx % TX_RING_SIZE;
+ np->tx_info[entry].skb = NULL;
+ if (i > 0) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_first_frag_len(skb),
+ PCI_DMA_TODEVICE);
+ np->tx_info[entry].mapping = 0;
+ entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+ for (j = 1; j < i; j++) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_frag_size(
+ &skb_shinfo(skb)->frags[j-1]),
+ PCI_DMA_TODEVICE);
+ entry++;
+ }
+ }
+ dev_kfree_skb_any(skb);
+ np->cur_tx = prev_tx;
+ return NETDEV_TX_OK;
+}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
@@ -1569,6 +1604,12 @@ static void refill_rx_ring(struct net_device *dev)
break; /* Better luck next round. */
np->rx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->rx_info[entry].mapping)) {
+ dev_kfree_skb(skb);
+ np->rx_info[entry].skb = NULL;
+ break;
+ }
np->rx_ring[entry].rxaddr =
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
}
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c0fb80acc2da..baba2db9d9c2 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -43,13 +43,13 @@
#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
#define MIN_RX_RING_SIZE 64
#define MAX_RX_RING_SIZE 8192
-#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
+#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
* (bp)->rx_ring_size)
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
#define MIN_TX_RING_SIZE 64
#define MAX_TX_RING_SIZE 4096
-#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
+#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
* (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */
@@ -78,6 +78,37 @@
*/
#define MACB_HALT_TIMEOUT 1230
+/* DMA buffer descriptor might be different size
+ * depends on hardware configuration.
+ */
+static unsigned int macb_dma_desc_get_size(struct macb *bp)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ return sizeof(struct macb_dma_desc) + sizeof(struct macb_dma_desc_64);
+#endif
+ return sizeof(struct macb_dma_desc);
+}
+
+static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int idx)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ /* Dma buffer descriptor is 4 words length (instead of 2 words)
+ * for 64b GEM.
+ */
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ idx <<= 1;
+#endif
+ return idx;
+}
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
+{
+ return (struct macb_dma_desc_64 *)((void *)desc + sizeof(struct macb_dma_desc));
+}
+#endif
+
/* Ring buffer accessors */
static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
{
@@ -87,7 +118,9 @@ static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
unsigned int index)
{
- return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)];
+ index = macb_tx_ring_wrap(queue->bp, index);
+ index = macb_adj_dma_desc_idx(queue->bp, index);
+ return &queue->tx_ring[index];
}
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
@@ -101,7 +134,7 @@ static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
dma_addr_t offset;
offset = macb_tx_ring_wrap(queue->bp, index) *
- sizeof(struct macb_dma_desc);
+ macb_dma_desc_get_size(queue->bp);
return queue->tx_ring_dma + offset;
}
@@ -113,7 +146,9 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
{
- return &bp->rx_ring[macb_rx_ring_wrap(bp, index)];
+ index = macb_rx_ring_wrap(bp, index);
+ index = macb_adj_dma_desc_idx(bp, index);
+ return &bp->rx_ring[index];
}
static void *macb_rx_buffer(struct macb *bp, unsigned int index)
@@ -560,12 +595,32 @@ static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
}
}
-static inline void macb_set_addr(struct macb_dma_desc *desc, dma_addr_t addr)
+static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
{
- desc->addr = (u32)addr;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- desc->addrh = (u32)(addr >> 32);
+ struct macb_dma_desc_64 *desc_64;
+
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+ desc_64 = macb_64b_desc(bp, desc);
+ desc_64->addrh = upper_32_bits(addr);
+ }
#endif
+ desc->addr = lower_32_bits(addr);
+}
+
+static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
+{
+ dma_addr_t addr = 0;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ struct macb_dma_desc_64 *desc_64;
+
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B) {
+ desc_64 = macb_64b_desc(bp, desc);
+ addr = ((u64)(desc_64->addrh) << 32);
+ }
+#endif
+ addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+ return addr;
}
static void macb_tx_error_task(struct work_struct *work)
@@ -649,16 +704,17 @@ static void macb_tx_error_task(struct work_struct *work)
/* Set end of TX queue */
desc = macb_tx_desc(queue, 0);
- macb_set_addr(desc, 0);
+ macb_set_addr(bp, desc, 0);
desc->ctrl = MACB_BIT(TX_USED);
/* Make descriptor updates visible to hardware */
wmb();
/* Reinitialize the TX desc queue */
- queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
#endif
/* Make TX ring reflect state of hardware */
queue->tx_head = 0;
@@ -750,6 +806,7 @@ static void gem_rx_refill(struct macb *bp)
unsigned int entry;
struct sk_buff *skb;
dma_addr_t paddr;
+ struct macb_dma_desc *desc;
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
bp->rx_ring_size) > 0) {
@@ -759,6 +816,7 @@ static void gem_rx_refill(struct macb *bp)
rmb();
bp->rx_prepared_head++;
+ desc = macb_rx_desc(bp, entry);
if (!bp->rx_skbuff[entry]) {
/* allocate sk_buff for this free entry in ring */
@@ -782,14 +840,14 @@ static void gem_rx_refill(struct macb *bp)
if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP);
- macb_set_addr(&(bp->rx_ring[entry]), paddr);
- bp->rx_ring[entry].ctrl = 0;
+ macb_set_addr(bp, desc, paddr);
+ desc->ctrl = 0;
/* properly align Ethernet header */
skb_reserve(skb, NET_IP_ALIGN);
} else {
- bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
- bp->rx_ring[entry].ctrl = 0;
+ desc->addr &= ~MACB_BIT(RX_USED);
+ desc->ctrl = 0;
}
}
@@ -835,16 +893,13 @@ static int gem_rx(struct macb *bp, int budget)
bool rxused;
entry = macb_rx_ring_wrap(bp, bp->rx_tail);
- desc = &bp->rx_ring[entry];
+ desc = macb_rx_desc(bp, entry);
/* Make hw descriptor updates visible to CPU */
rmb();
rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
- addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- addr |= ((u64)(desc->addrh) << 32);
-#endif
+ addr = macb_get_addr(bp, desc);
ctrl = desc->ctrl;
if (!rxused)
@@ -987,15 +1042,17 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
static inline void macb_init_rx_ring(struct macb *bp)
{
dma_addr_t addr;
+ struct macb_dma_desc *desc = NULL;
int i;
addr = bp->rx_buffers_dma;
for (i = 0; i < bp->rx_ring_size; i++) {
- bp->rx_ring[i].addr = addr;
- bp->rx_ring[i].ctrl = 0;
+ desc = macb_rx_desc(bp, i);
+ macb_set_addr(bp, desc, addr);
+ desc->ctrl = 0;
addr += bp->rx_buffer_size;
}
- bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
+ desc->addr |= MACB_BIT(RX_WRAP);
bp->rx_tail = 0;
}
@@ -1008,15 +1065,14 @@ static int macb_rx(struct macb *bp, int budget)
for (tail = bp->rx_tail; budget > 0; tail++) {
struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
- u32 addr, ctrl;
+ u32 ctrl;
/* Make hw descriptor updates visible to CPU */
rmb();
- addr = desc->addr;
ctrl = desc->ctrl;
- if (!(addr & MACB_BIT(RX_USED)))
+ if (!(desc->addr & MACB_BIT(RX_USED)))
break;
if (ctrl & MACB_BIT(RX_SOF)) {
@@ -1336,7 +1392,7 @@ static unsigned int macb_tx_map(struct macb *bp,
i = tx_head;
entry = macb_tx_ring_wrap(bp, i);
ctrl = MACB_BIT(TX_USED);
- desc = &queue->tx_ring[entry];
+ desc = macb_tx_desc(queue, entry);
desc->ctrl = ctrl;
if (lso_ctrl) {
@@ -1358,7 +1414,7 @@ static unsigned int macb_tx_map(struct macb *bp,
i--;
entry = macb_tx_ring_wrap(bp, i);
tx_skb = &queue->tx_skb[entry];
- desc = &queue->tx_ring[entry];
+ desc = macb_tx_desc(queue, entry);
ctrl = (u32)tx_skb->size;
if (eof) {
@@ -1379,7 +1435,7 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BF(MSS_MFS, mss_mfs);
/* Set TX buffer descriptor */
- macb_set_addr(desc, tx_skb->mapping);
+ macb_set_addr(bp, desc, tx_skb->mapping);
/* desc->addr must be visible to hardware before clearing
* 'TX_USED' bit in desc->ctrl.
*/
@@ -1586,11 +1642,9 @@ static void gem_free_rx_buffers(struct macb *bp)
if (!skb)
continue;
- desc = &bp->rx_ring[i];
- addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
-#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- addr |= ((u64)(desc->addrh) << 32);
-#endif
+ desc = macb_rx_desc(bp, i);
+ addr = macb_get_addr(bp, desc);
+
dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
@@ -1711,15 +1765,17 @@ out_err:
static void gem_init_rings(struct macb *bp)
{
struct macb_queue *queue;
+ struct macb_dma_desc *desc = NULL;
unsigned int q;
int i;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
for (i = 0; i < bp->tx_ring_size; i++) {
- queue->tx_ring[i].addr = 0;
- queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ desc = macb_tx_desc(queue, i);
+ macb_set_addr(bp, desc, 0);
+ desc->ctrl = MACB_BIT(TX_USED);
}
- queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+ desc->ctrl |= MACB_BIT(TX_WRAP);
queue->tx_head = 0;
queue->tx_tail = 0;
}
@@ -1733,16 +1789,18 @@ static void gem_init_rings(struct macb *bp)
static void macb_init_rings(struct macb *bp)
{
int i;
+ struct macb_dma_desc *desc = NULL;
macb_init_rx_ring(bp);
for (i = 0; i < bp->tx_ring_size; i++) {
- bp->queues[0].tx_ring[i].addr = 0;
- bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
+ desc = macb_tx_desc(&bp->queues[0], i);
+ macb_set_addr(bp, desc, 0);
+ desc->ctrl = MACB_BIT(TX_USED);
}
bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0;
- bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
+ desc->ctrl |= MACB_BIT(TX_WRAP);
}
static void macb_reset_hw(struct macb *bp)
@@ -1863,7 +1921,8 @@ static void macb_configure_dma(struct macb *bp)
dmacfg &= ~GEM_BIT(TXCOEN);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- dmacfg |= GEM_BIT(ADDR64);
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ dmacfg |= GEM_BIT(ADDR64);
#endif
netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
dmacfg);
@@ -1910,14 +1969,16 @@ static void macb_init_hw(struct macb *bp)
macb_configure_dma(bp);
/* Initialize TX and RX buffers */
- macb_writel(bp, RBQP, (u32)(bp->rx_ring_dma));
+ macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- macb_writel(bp, RBQPH, (u32)(bp->rx_ring_dma >> 32));
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma));
#endif
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, TBQP, (u32)(queue->tx_ring_dma));
+ queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue_writel(queue, TBQPH, (u32)(queue->tx_ring_dma >> 32));
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
#endif
/* Enable interrupts */
@@ -2627,7 +2688,8 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue->TBQPH = GEM_TBQPH(hw_q -1);
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ queue->TBQPH = GEM_TBQPH(hw_q - 1);
#endif
} else {
/* queue0 uses legacy registers */
@@ -2637,7 +2699,8 @@ static int macb_init(struct platform_device *pdev)
queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- queue->TBQPH = MACB_TBQPH;
+ if (bp->hw_dma_cap == HW_DMA_CAP_64B)
+ queue->TBQPH = MACB_TBQPH;
#endif
}
@@ -2730,13 +2793,14 @@ static int macb_init(struct platform_device *pdev)
static int at91ether_start(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_dma_desc *desc;
dma_addr_t addr;
u32 ctl;
int i;
lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR *
- sizeof(struct macb_dma_desc)),
+ macb_dma_desc_get_size(lp)),
&lp->rx_ring_dma, GFP_KERNEL);
if (!lp->rx_ring)
return -ENOMEM;
@@ -2748,7 +2812,7 @@ static int at91ether_start(struct net_device *dev)
if (!lp->rx_buffers) {
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
- sizeof(struct macb_dma_desc),
+ macb_dma_desc_get_size(lp),
lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
return -ENOMEM;
@@ -2756,13 +2820,14 @@ static int at91ether_start(struct net_device *dev)
addr = lp->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
- lp->rx_ring[i].addr = addr;
- lp->rx_ring[i].ctrl = 0;
+ desc = macb_rx_desc(lp, i);
+ macb_set_addr(lp, desc, addr);
+ desc->ctrl = 0;
addr += AT91ETHER_MAX_RBUFF_SZ;
}
/* Set the Wrap bit on the last descriptor */
- lp->rx_ring[AT91ETHER_MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+ desc->addr |= MACB_BIT(RX_WRAP);
/* Reset buffer index */
lp->rx_tail = 0;
@@ -2834,7 +2899,7 @@ static int at91ether_close(struct net_device *dev)
dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR *
- sizeof(struct macb_dma_desc),
+ macb_dma_desc_get_size(lp),
lp->rx_ring, lp->rx_ring_dma);
lp->rx_ring = NULL;
@@ -2885,13 +2950,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void at91ether_rx(struct net_device *dev)
{
struct macb *lp = netdev_priv(dev);
+ struct macb_dma_desc *desc;
unsigned char *p_recv;
struct sk_buff *skb;
unsigned int pktlen;
- while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+ desc = macb_rx_desc(lp, lp->rx_tail);
+ while (desc->addr & MACB_BIT(RX_USED)) {
p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
- pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+ pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
skb = netdev_alloc_skb(dev, pktlen + 2);
if (skb) {
skb_reserve(skb, 2);
@@ -2905,17 +2972,19 @@ static void at91ether_rx(struct net_device *dev)
lp->stats.rx_dropped++;
}
- if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+ if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
lp->stats.multicast++;
/* reset ownership bit */
- lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+ desc->addr &= ~MACB_BIT(RX_USED);
/* wrap after last buffer */
if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
lp->rx_tail = 0;
else
lp->rx_tail++;
+
+ desc = macb_rx_desc(lp, lp->rx_tail);
}
}
@@ -3211,8 +3280,11 @@ static int macb_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- if (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1)) > GEM_DBW32)
+ if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
+ bp->hw_dma_cap = HW_DMA_CAP_64B;
+ } else
+ bp->hw_dma_cap = HW_DMA_CAP_32B;
#endif
spin_lock_init(&bp->lock);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d67adad67be1..fc8550a5d47f 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -385,6 +385,8 @@
/* Bitfields in DCFG6. */
#define GEM_PBUF_LSO_OFFSET 27
#define GEM_PBUF_LSO_SIZE 1
+#define GEM_DAW64_OFFSET 23
+#define GEM_DAW64_SIZE 1
/* Constants for CLK */
#define MACB_CLK_DIV8 0
@@ -487,12 +489,20 @@
struct macb_dma_desc {
u32 addr;
u32 ctrl;
+};
+
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- u32 addrh;
- u32 resvd;
-#endif
+enum macb_hw_dma_cap {
+ HW_DMA_CAP_32B,
+ HW_DMA_CAP_64B,
};
+struct macb_dma_desc_64 {
+ u32 addrh;
+ u32 resvd;
+};
+#endif
+
/* DMA descriptor bitfields */
#define MACB_RX_USED_OFFSET 0
#define MACB_RX_USED_SIZE 1
@@ -874,6 +884,10 @@ struct macb {
unsigned int jumbo_max_len;
u32 wol;
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ enum macb_hw_dma_cap hw_dma_cap;
+#endif
};
static inline bool macb_is_gem(struct macb *bp)
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
index 67befedef709..578c7f8f11bf 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_xcv.c
@@ -116,8 +116,7 @@ void xcv_setup_link(bool link_up, int link_speed)
int speed = 2;
if (!xcv) {
- dev_err(&xcv->pdev->dev,
- "XCV init not done, probe may have failed\n");
+ pr_err("XCV init not done, probe may have failed\n");
return;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 1a7f8ad7b9c6..cd49a54c538d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -362,8 +362,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
status = -EPERM;
goto err;
}
-done:
+
+ /* Remember currently programmed MAC */
ether_addr_copy(adapter->dev_mac, addr->sa_data);
+done:
ether_addr_copy(netdev->dev_addr, addr->sa_data);
dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
@@ -3618,8 +3620,10 @@ static void be_disable_if_filters(struct be_adapter *adapter)
{
/* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
- check_privilege(adapter, BE_PRIV_FILTMGMT))
+ check_privilege(adapter, BE_PRIV_FILTMGMT)) {
be_dev_mac_del(adapter, adapter->pmac_id[0]);
+ eth_zero_addr(adapter->dev_mac);
+ }
be_clear_uc_list(adapter);
be_clear_mc_list(adapter);
@@ -3773,12 +3777,27 @@ static int be_enable_if_filters(struct be_adapter *adapter)
if (status)
return status;
- /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
- if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
- check_privilege(adapter, BE_PRIV_FILTMGMT)) {
+ /* Normally this condition usually true as the ->dev_mac is zeroed.
+ * But on BE3 VFs the initial MAC is pre-programmed by PF and
+ * subsequent be_dev_mac_add() can fail (after fresh boot)
+ */
+ if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
+ int old_pmac_id = -1;
+
+ /* Remember old programmed MAC if any - can happen on BE3 VF */
+ if (!is_zero_ether_addr(adapter->dev_mac))
+ old_pmac_id = adapter->pmac_id[0];
+
status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;
+
+ /* Delete the old programmed MAC as we successfully programmed
+ * a new MAC
+ */
+ if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
+ be_dev_mac_del(adapter, old_pmac_id);
+
ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
}
@@ -4552,6 +4571,10 @@ static int be_mac_setup(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+
+ /* Initial MAC for BE3 VFs is already programmed by PF */
+ if (BEx_chip(adapter) && be_virtfn(adapter))
+ memcpy(adapter->dev_mac, mac, ETH_ALEN);
}
return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index c1b671667920..957bfc220978 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2010,8 +2010,8 @@ static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
if (!rxb->page)
continue;
- dma_unmap_single(rx_queue->dev, rxb->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(rxb->page);
rxb->page = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index c7e939945259..53daa6ca5d83 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -158,7 +158,7 @@ static int mlx4_reset_slave(struct mlx4_dev *dev)
return -ETIMEDOUT;
}
-static int mlx4_comm_internal_err(u32 slave_read)
+int mlx4_comm_internal_err(u32 slave_read)
{
return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0e8b7c44931f..8258d08acd8c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -222,6 +222,18 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
return;
mlx4_stop_catas_poll(dev);
+ if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
+ mlx4_is_slave(dev)) {
+ /* In mlx4_remove_one on a VF */
+ u32 slave_read =
+ swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
+
+ if (mlx4_comm_internal_err(slave_read)) {
+ mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
+ __func__);
+ mlx4_enter_error_state(dev->persist);
+ }
+ }
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 88ee7d8a5923..086920b615af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1220,6 +1220,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type);
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
+int mlx4_comm_internal_err(u32 slave_read);
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 3797cc7c1288..caa837e5e2b9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -1728,7 +1728,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
if (cmd->cmdif_rev > CMD_IF_REV) {
dev_err(&dev->pdev->dev, "driver does not support command interface version. driver %d, firmware %d\n",
CMD_IF_REV, cmd->cmdif_rev);
- err = -ENOTSUPP;
+ err = -EOPNOTSUPP;
goto err_free_page;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 951dbd58594d..d5ecb8f53fd4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -791,7 +791,8 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+ enum mlx5e_traffic_types tt);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
@@ -863,12 +864,12 @@ static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
#else
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index f0b460f47f29..0523ed47f597 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -89,7 +89,7 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
int i;
if (!MLX5_CAP_GEN(priv->mdev, ets))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
for (i = 0; i < ets->ets_cap; i++) {
@@ -236,7 +236,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
int err;
if (!MLX5_CAP_GEN(priv->mdev, ets))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
err = mlx5e_dbcnl_validate_ets(netdev, ets);
if (err)
@@ -402,7 +402,7 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
struct mlx5_core_dev *mdev = priv->mdev;
struct ieee_ets ets;
struct ieee_pfc pfc;
- int err = -ENOTSUPP;
+ int err = -EOPNOTSUPP;
int i;
if (!MLX5_CAP_GEN(mdev, ets))
@@ -511,6 +511,11 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ if (!MLX5_CAP_GEN(priv->mdev, ets)) {
+ netdev_err(netdev, "%s, ets is not supported\n", __func__);
+ return;
+ }
+
if (priority >= CEE_DCBX_MAX_PRIO) {
netdev_err(netdev,
"%s, priority is out of range\n", __func__);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 5197817e4b2f..bb67863aa361 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -595,7 +595,7 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
@@ -620,7 +620,7 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
int i;
if (!MLX5_CAP_GEN(mdev, cq_moderation))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
mutex_lock(&priv->state_lock);
@@ -980,15 +980,18 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
{
- struct mlx5_core_dev *mdev = priv->mdev;
void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
- int i;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int ctxlen = MLX5_ST_SZ_BYTES(tirc);
+ int tt;
MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
- mlx5e_build_tir_ctx_hash(tirc, priv);
- for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
- mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(tirc, 0, ctxlen);
+ mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
+ mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
+ }
}
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -996,6 +999,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ bool hash_changed = false;
void *in;
if ((hfunc != ETH_RSS_HASH_NO_CHANGE) &&
@@ -1017,14 +1021,21 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
}
- if (key)
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ hfunc != priv->params.rss_hfunc) {
+ priv->params.rss_hfunc = hfunc;
+ hash_changed = true;
+ }
+
+ if (key) {
memcpy(priv->params.toeplitz_hash_key, key,
sizeof(priv->params.toeplitz_hash_key));
+ hash_changed = hash_changed ||
+ priv->params.rss_hfunc == ETH_RSS_HASH_TOP;
+ }
- if (hfunc != ETH_RSS_HASH_NO_CHANGE)
- priv->params.rss_hfunc = hfunc;
-
- mlx5e_modify_tirs_hash(priv, in, inlen);
+ if (hash_changed)
+ mlx5e_modify_tirs_hash(priv, in, inlen);
mutex_unlock(&priv->state_lock);
@@ -1296,7 +1307,7 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
u32 mlx5_wol_mode;
if (!wol_supported)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (wol->wolopts & ~wol_supported)
return -EINVAL;
@@ -1426,7 +1437,7 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!rx_mode_changed)
return 0;
@@ -1452,7 +1463,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
bool reset;
if (!MLX5_CAP_GEN(mdev, cqe_compression))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1fe80de5d68f..a0e5a69402b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -1089,7 +1089,7 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
MLX5_FLOW_NAMESPACE_KERNEL);
if (!priv->fs.ns)
- return -EINVAL;
+ return -EOPNOTSUPP;
err = mlx5e_arfs_create_tables(priv);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index d088effd7160..f33f72d0237c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -92,7 +92,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_ETHTOOL);
if (!ns)
- return ERR_PTR(-ENOTSUPP);
+ return ERR_PTR(-EOPNOTSUPP);
table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
flow_table_properties_nic_receive.log_max_ft_size)),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2b7dd315020c..f14ca3385fdd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2022,8 +2022,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
}
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+ enum mlx5e_traffic_types tt)
{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
MLX5_SET(tirc, tirc, rx_hash_fn,
mlx5e_rx_hash_fn(priv->params.rss_hfunc));
if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2035,6 +2050,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, priv->params.toeplitz_hash_key, len);
}
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_L4PORTS);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_AH:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV6_IPSEC_ESP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP_IPSEC_SPI);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+ default:
+ WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
+ }
}
static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2404,110 +2501,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
enum mlx5e_traffic_types tt)
{
- void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
-#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_L4_SPORT |\
- MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
- MLX5_HASH_FIELD_SEL_DST_IP |\
- MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-
mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
- mlx5e_build_tir_ctx_hash(tirc, priv);
-
- switch (tt) {
- case MLX5E_TT_IPV4_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV6_TCP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_TCP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV4_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV6_UDP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
- MLX5_L4_PROT_TYPE_UDP);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_L4PORTS);
- break;
-
- case MLX5E_TT_IPV4_IPSEC_AH:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
-
- case MLX5E_TT_IPV6_IPSEC_AH:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
-
- case MLX5E_TT_IPV4_IPSEC_ESP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
-
- case MLX5E_TT_IPV6_IPSEC_ESP:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP_IPSEC_SPI);
- break;
-
- case MLX5E_TT_IPV4:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV4);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
-
- case MLX5E_TT_IPV6:
- MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
- MLX5_L3_PROT_TYPE_IPV6);
- MLX5_SET(rx_hash_field_select, hfso, selected_fields,
- MLX5_HASH_IP);
- break;
- default:
- WARN_ONCE(true,
- "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
- }
+ mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
}
static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -3331,7 +3331,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
{
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
!MLX5_CAP_GEN(mdev, nic_flow_table) ||
!MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3343,7 +3343,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
< 3) {
mlx5_core_warn(mdev,
"Not creating net device, some required device capabilities are missing\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 46bef6a26a8c..c5282b6aba8b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -663,6 +663,7 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
__be32 *saddr,
int *out_ttl)
{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct rtable *rt;
struct neighbour *n = NULL;
int ttl;
@@ -677,12 +678,11 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
#else
return -EOPNOTSUPP;
#endif
-
- if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
- pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
- ip_rt_put(rt);
- return -EOPNOTSUPP;
- }
+ /* if the egress device isn't on the same HW e-switch, we use the uplink */
+ if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
+ *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ else
+ *out_dev = rt->dst.dev;
ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
@@ -693,7 +693,6 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
*out_n = n;
*saddr = fl4->saddr;
*out_ttl = ttl;
- *out_dev = rt->dst.dev;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f14d9c9ba773..d0c8bf014453 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -133,7 +133,7 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
!MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
vport, vlan, qos, set_flags);
@@ -353,7 +353,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
- return -ENOMEM;
+ return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@@ -962,7 +962,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
- return -EIO;
+ return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@@ -1079,7 +1079,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
if (!root_ns) {
esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
- return -EIO;
+ return -EOPNOTSUPP;
}
flow_group_in = mlx5_vzalloc(inlen);
@@ -1630,7 +1630,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 03293ed1cc22..595f7c7383b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -166,7 +166,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
return 0;
out_notsupp:
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
@@ -424,6 +424,7 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
if (!root_ns) {
esw_warn(dev, "Failed to get FDB flow namespace\n");
+ err = -EOPNOTSUPP;
goto ns_err;
}
@@ -535,7 +536,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
if (!ns) {
esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
- return -ENOMEM;
+ return -EOPNOTSUPP;
}
ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
@@ -655,7 +656,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err1)
- esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+ esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
@@ -674,9 +675,14 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
int vport;
int err;
+ /* disable PF RoCE so missed packets don't go through RoCE steering */
+ mlx5_dev_list_lock();
+ mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_dev_list_unlock();
+
err = esw_create_offloads_fdb_table(esw, nvports);
if (err)
- return err;
+ goto create_fdb_err;
err = esw_create_offloads_table(esw);
if (err)
@@ -696,11 +702,6 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
goto err_reps;
}
- /* disable PF RoCE so missed packets don't go through RoCE steering */
- mlx5_dev_list_lock();
- mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_dev_list_unlock();
-
return 0;
err_reps:
@@ -717,6 +718,13 @@ create_fg_err:
create_ft_err:
esw_destroy_offloads_fdb_table(esw);
+
+create_fdb_err:
+ /* enable back PF RoCE */
+ mlx5_dev_list_lock();
+ mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_dev_list_unlock();
+
return err;
}
@@ -724,11 +732,6 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
- /* enable back PF RoCE */
- mlx5_dev_list_lock();
- mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_dev_list_unlock();
-
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err) {
@@ -738,6 +741,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
}
+ /* enable back PF RoCE */
+ mlx5_dev_list_lock();
+ mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+ mlx5_dev_list_unlock();
+
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index c4478ecd8056..b53fc85a2375 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -322,7 +322,7 @@ int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
flow_table_properties_nic_receive.
flow_modify_en);
if (!atomic_mod_cap)
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
opmod = 1;
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0ac7a2fc916c..6346a8f5883b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1822,7 +1822,7 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
struct mlx5_flow_table *ft;
ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
- if (!ns)
+ if (WARN_ON(!ns))
return -EINVAL;
ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL, 0);
if (IS_ERR(ft)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d01e9f21d469..3c315eb8d270 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -807,7 +807,7 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
return 0;
}
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index d2ec9d232a70..fd12e0a377a5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -620,7 +620,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
u32 out[MLX5_ST_SZ_DW(qtct_reg)];
if (!MLX5_CAP_GEN(mdev, ets))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
return mlx5_core_access_reg(mdev, in, inlen, out, sizeof(out),
MLX5_REG_QETCR, 0, 1);
@@ -632,7 +632,7 @@ static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
u32 in[MLX5_ST_SZ_DW(qtct_reg)];
if (!MLX5_CAP_GEN(mdev, ets))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
memset(in, 0, sizeof(in));
return mlx5_core_access_reg(mdev, in, sizeof(in), out, outlen,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 269e4401c342..7129c30a2ab4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -532,7 +532,7 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return -EACCES;
if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
in = mlx5_vzalloc(inlen);
if (!in)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index be3c91c7f211..5484fd726d5a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -305,8 +305,12 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
{
void __iomem *ioaddr = hw->pcsr;
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+ u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
int ret = 0;
+ /* Discard masked bits */
+ intr_status &= ~intr_mask;
+
/* Not used events (e.g. MMC interrupts) are not handled. */
if ((intr_status & GMAC_INT_STATUS_MMCTIS))
x->mmc_tx_irq_n++;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5a1cc089acb7..86e5749226ef 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1295,6 +1295,9 @@ void netvsc_channel_cb(void *context)
ndev = hv_get_drvdata(device);
buffer = get_per_channel_state(channel);
+ /* commit_rd_index() -> hv_signal_on_read() needs this. */
+ init_cached_read_index(channel);
+
do {
desc = get_next_pkt_raw(channel);
if (desc != NULL) {
@@ -1347,6 +1350,9 @@ void netvsc_channel_cb(void *context)
bufferlen = bytes_recvd;
}
+
+ init_cached_read_index(channel);
+
} while (1);
if (bufferlen > NETVSC_PACKET_SIZE)
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index e55809c5beb7..6742070ca676 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1012,7 +1012,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id = PHY_ID_KSZ8795,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Micrel KSZ8795",
- .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+ .features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.config_init = kszphy_config_init,
.config_aneg = ksz8873mll_config_aneg,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index d02ca1491d16..8d3e53fac1da 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -91,7 +91,7 @@
#define IWL8000_FW_PRE "iwlwifi-8000C-"
#define IWL8000_MODULE_FIRMWARE(api) \
- IWL8000_FW_PRE "-" __stringify(api) ".ucode"
+ IWL8000_FW_PRE __stringify(api) ".ucode"
#define IWL8265_FW_PRE "iwlwifi-8265-"
#define IWL8265_MODULE_FIRMWARE(api) \
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 636c8b03e318..09e9e2e3ed04 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -1164,9 +1164,10 @@ static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
.frame_limit = IWL_FRAME_LIMIT,
};
- /* Make sure reserved queue is still marked as such (or allocated) */
- mvm->queue_info[mvm_sta->reserved_queue].status =
- IWL_MVM_QUEUE_RESERVED;
+ /* Make sure reserved queue is still marked as such (if allocated) */
+ if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
+ mvm->queue_info[mvm_sta->reserved_queue].status =
+ IWL_MVM_QUEUE_RESERVED;
for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
index 63a051be832e..bec7d9c46087 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
@@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
return;
IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
- thermal_zone_device_unregister(mvm->tz_device.tzone);
- mvm->tz_device.tzone = NULL;
+ if (mvm->tz_device.tzone) {
+ thermal_zone_device_unregister(mvm->tz_device.tzone);
+ mvm->tz_device.tzone = NULL;
+ }
}
static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
@@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
return;
IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
- thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
- mvm->cooling_dev.cdev = NULL;
+ if (mvm->cooling_dev.cdev) {
+ thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
+ mvm->cooling_dev.cdev = NULL;
+ }
}
#endif /* CONFIG_THERMAL */
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 17ac1dce3286..3dd8bcbb3011 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -532,25 +532,32 @@ static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
return NULL;
+
INIT_LIST_HEAD(&link->sibling);
INIT_LIST_HEAD(&link->children);
INIT_LIST_HEAD(&link->link);
link->pdev = pdev;
- if (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT) {
+
+ /*
+ * Root Ports and PCI/PCI-X to PCIe Bridges are roots of PCIe
+ * hierarchies.
+ */
+ if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
+ pci_pcie_type(pdev) == PCI_EXP_TYPE_PCIE_BRIDGE) {
+ link->root = link;
+ } else {
struct pcie_link_state *parent;
+
parent = pdev->bus->parent->self->link_state;
if (!parent) {
kfree(link);
return NULL;
}
+
link->parent = parent;
+ link->root = link->parent->root;
list_add(&link->link, &parent->children);
}
- /* Setup a pointer to the root port link */
- if (!link->parent)
- link->root = link;
- else
- link->root = link->parent->root;
list_add(&link->sibling, &link_list);
pdev->link_state = link;
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index 09172043d589..c617ec49e9ed 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -217,7 +217,7 @@ static const struct berlin_desc_group berlin4ct_soc_pinctrl_groups[] = {
BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
- BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index c123488266ce..d94aef17348b 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -731,16 +731,23 @@ static void __iomem *byt_gpio_reg(struct byt_gpio *vg, unsigned int offset,
int reg)
{
struct byt_community *comm = byt_get_community(vg, offset);
- u32 reg_offset = 0;
+ u32 reg_offset;
if (!comm)
return NULL;
offset -= comm->pin_base;
- if (reg == BYT_INT_STAT_REG)
+ switch (reg) {
+ case BYT_INT_STAT_REG:
reg_offset = (offset / 32) * 4;
- else
+ break;
+ case BYT_DEBOUNCE_REG:
+ reg_offset = 0;
+ break;
+ default:
reg_offset = comm->pad_map[offset] * 16;
+ break;
+ }
return comm->reg_base + reg_offset + reg;
}
@@ -1243,10 +1250,12 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
debounce = readl(db_reg);
debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
+ if (arg)
+ conf |= BYT_DEBOUNCE_EN;
+ else
+ conf &= ~BYT_DEBOUNCE_EN;
+
switch (arg) {
- case 0:
- conf &= BYT_DEBOUNCE_EN;
- break;
case 375:
debounce |= BYT_DEBOUNCE_PULSE_375US;
break;
@@ -1269,7 +1278,9 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
debounce |= BYT_DEBOUNCE_PULSE_24MS;
break;
default:
- ret = -EINVAL;
+ if (arg)
+ ret = -EINVAL;
+ break;
}
if (!ret)
@@ -1612,7 +1623,9 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
continue;
}
+ raw_spin_lock(&vg->lock);
pending = readl(reg);
+ raw_spin_unlock(&vg->lock);
for_each_set_bit(pin, &pending, 32) {
virq = irq_find_mapping(vg->chip.irqdomain, base + pin);
generic_handle_irq(virq);
diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c
index b21896126f76..4d4ef42a39b5 100644
--- a/drivers/pinctrl/intel/pinctrl-merrifield.c
+++ b/drivers/pinctrl/intel/pinctrl-merrifield.c
@@ -794,6 +794,9 @@ static int mrfld_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned int i;
int ret;
+ if (!mrfld_buf_available(mp, pin))
+ return -ENOTSUPP;
+
for (i = 0; i < nconfigs; i++) {
switch (pinconf_to_config_param(configs[i])) {
case PIN_CONFIG_BIAS_DISABLE:
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index 0eb51e33cb1b..207a8de4e1ed 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -564,8 +564,7 @@ static int sunxi_pconf_group_set(struct pinctrl_dev *pctldev,
val = arg / 10 - 1;
break;
case PIN_CONFIG_BIAS_DISABLE:
- val = 0;
- break;
+ continue;
case PIN_CONFIG_BIAS_PULL_UP:
if (arg == 0)
return -EINVAL;
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
index e6a512ebeae2..a3ade9e4ef47 100644
--- a/drivers/regulator/axp20x-regulator.c
+++ b/drivers/regulator/axp20x-regulator.c
@@ -272,7 +272,7 @@ static const struct regulator_desc axp806_regulators[] = {
64, AXP806_DCDCD_V_CTRL, 0x3f, AXP806_PWR_OUT_CTRL1,
BIT(3)),
AXP_DESC(AXP806, DCDCE, "dcdce", "vine", 1100, 3400, 100,
- AXP806_DCDCB_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
+ AXP806_DCDCE_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(4)),
AXP_DESC(AXP806, ALDO1, "aldo1", "aldoin", 700, 3300, 100,
AXP806_ALDO1_V_CTRL, 0x1f, AXP806_PWR_OUT_CTRL1, BIT(5)),
AXP_DESC(AXP806, ALDO2, "aldo2", "aldoin", 700, 3400, 100,
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index a43b0e8a438d..988a7472c2ab 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -30,9 +30,6 @@
#include <linux/of_gpio.h>
#include <linux/regulator/of_regulator.h>
#include <linux/regulator/machine.h>
-#include <linux/acpi.h>
-#include <linux/property.h>
-#include <linux/gpio/consumer.h>
struct fixed_voltage_data {
struct regulator_desc desc;
@@ -97,44 +94,6 @@ of_get_fixed_voltage_config(struct device *dev,
return config;
}
-/**
- * acpi_get_fixed_voltage_config - extract fixed_voltage_config structure info
- * @dev: device requesting for fixed_voltage_config
- * @desc: regulator description
- *
- * Populates fixed_voltage_config structure by extracting data through ACPI
- * interface, returns a pointer to the populated structure of NULL if memory
- * alloc fails.
- */
-static struct fixed_voltage_config *
-acpi_get_fixed_voltage_config(struct device *dev,
- const struct regulator_desc *desc)
-{
- struct fixed_voltage_config *config;
- const char *supply_name;
- struct gpio_desc *gpiod;
- int ret;
-
- config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
- if (!config)
- return ERR_PTR(-ENOMEM);
-
- ret = device_property_read_string(dev, "supply-name", &supply_name);
- if (!ret)
- config->supply_name = supply_name;
-
- gpiod = gpiod_get(dev, "gpio", GPIOD_ASIS);
- if (IS_ERR(gpiod))
- return ERR_PTR(-ENODEV);
-
- config->gpio = desc_to_gpio(gpiod);
- config->enable_high = device_property_read_bool(dev,
- "enable-active-high");
- gpiod_put(gpiod);
-
- return config;
-}
-
static struct regulator_ops fixed_voltage_ops = {
};
@@ -155,11 +114,6 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
&drvdata->desc);
if (IS_ERR(config))
return PTR_ERR(config);
- } else if (ACPI_HANDLE(&pdev->dev)) {
- config = acpi_get_fixed_voltage_config(&pdev->dev,
- &drvdata->desc);
- if (IS_ERR(config))
- return PTR_ERR(config);
} else {
config = dev_get_platdata(&pdev->dev);
}
diff --git a/drivers/regulator/twl6030-regulator.c b/drivers/regulator/twl6030-regulator.c
index 4864b9d742c0..716191046a70 100644
--- a/drivers/regulator/twl6030-regulator.c
+++ b/drivers/regulator/twl6030-regulator.c
@@ -452,7 +452,7 @@ static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV,
vsel = 62;
else if ((min_uV > 1800000) && (min_uV <= 1900000))
vsel = 61;
- else if ((min_uV > 1350000) && (min_uV <= 1800000))
+ else if ((min_uV > 1500000) && (min_uV <= 1800000))
vsel = 60;
else if ((min_uV > 1350000) && (min_uV <= 1500000))
vsel = 59;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index c93c5a8fba32..5dc673dc9487 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1551,12 +1551,15 @@ config RTC_DRV_MPC5121
will be called rtc-mpc5121.
config RTC_DRV_JZ4740
- bool "Ingenic JZ4740 SoC"
+ tristate "Ingenic JZ4740 SoC"
depends on MACH_INGENIC || COMPILE_TEST
help
If you say yes here you get support for the Ingenic JZ47xx SoCs RTC
controllers.
+ This driver can also be buillt as a module. If so, the module
+ will be called rtc-jz4740.
+
config RTC_DRV_LPC24XX
tristate "NXP RTC for LPC178x/18xx/408x/43xx"
depends on ARCH_LPC18XX || COMPILE_TEST
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index 72918c1ba092..64989afffa3d 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -17,6 +17,7 @@
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reboot.h>
@@ -294,7 +295,7 @@ static void jz4740_rtc_power_off(void)
JZ_REG_RTC_RESET_COUNTER, reset_counter_ticks);
jz4740_rtc_poweroff(dev_for_power_off);
- machine_halt();
+ kernel_halt();
}
static const struct of_device_id jz4740_rtc_of_match[] = {
@@ -302,6 +303,7 @@ static const struct of_device_id jz4740_rtc_of_match[] = {
{ .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 },
{},
};
+MODULE_DEVICE_TABLE(of, jz4740_rtc_of_match);
static int jz4740_rtc_probe(struct platform_device *pdev)
{
@@ -429,6 +431,7 @@ static const struct platform_device_id jz4740_rtc_ids[] = {
{ "jz4780-rtc", ID_JZ4780 },
{}
};
+MODULE_DEVICE_TABLE(platform, jz4740_rtc_ids);
static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
@@ -440,4 +443,9 @@ static struct platform_driver jz4740_rtc_driver = {
.id_table = jz4740_rtc_ids,
};
-builtin_platform_driver(jz4740_rtc_driver);
+module_platform_driver(jz4740_rtc_driver);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RTC driver for the JZ4740 SoC\n");
+MODULE_ALIAS("platform:jz4740-rtc");
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index ec91bd07f00a..c680d7641311 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
{
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+ unsigned long flags;
int req_size;
+ int ret;
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
@@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
req_size = sizeof(cmd->req.cmd);
}
- if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
+ ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+ if (ret == -EIO) {
+ cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ spin_lock_irqsave(&req_vq->vq_lock, flags);
+ virtscsi_complete_cmd(vscsi, cmd);
+ spin_unlock_irqrestore(&req_vq->vq_lock, flags);
+ } else if (ret != 0) {
return SCSI_MLQUEUE_HOST_BUSY;
+ }
return 0;
}
diff --git a/drivers/staging/greybus/timesync_platform.c b/drivers/staging/greybus/timesync_platform.c
index 113f3d6c4b3a..27f75b17679b 100644
--- a/drivers/staging/greybus/timesync_platform.c
+++ b/drivers/staging/greybus/timesync_platform.c
@@ -45,12 +45,18 @@ u32 gb_timesync_platform_get_clock_rate(void)
int gb_timesync_platform_lock_bus(struct gb_timesync_svc *pdata)
{
+ if (!arche_platform_change_state_cb)
+ return 0;
+
return arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_TIME_SYNC,
pdata);
}
void gb_timesync_platform_unlock_bus(void)
{
+ if (!arche_platform_change_state_cb)
+ return;
+
arche_platform_change_state_cb(ARCHE_PLATFORM_STATE_ACTIVE, NULL);
}
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index d2e50a27140c..24f9f98968a5 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -37,6 +37,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* CBM - Flash disk */
{ USB_DEVICE(0x0204, 0x6025), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* WORLDE easy key (easykey.25) MIDI controller */
+ { USB_DEVICE(0x0218, 0x0401), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* HP 5300/5370C scanner */
{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 5490fc51638e..fd80c1b9c823 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2269,6 +2269,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
if (len < sizeof(*d) || h->interface >= ffs->interfaces_count)
return -EINVAL;
length = le32_to_cpu(d->dwSize);
+ if (len < length)
+ return -EINVAL;
type = le32_to_cpu(d->dwPropertyDataType);
if (type < USB_EXT_PROP_UNICODE ||
type > USB_EXT_PROP_UNICODE_MULTI) {
@@ -2277,6 +2279,11 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
return -EINVAL;
}
pnl = le16_to_cpu(d->wPropertyNameLength);
+ if (length < 14 + pnl) {
+ pr_vdebug("invalid os descriptor length: %d pnl:%d (descriptor %d)\n",
+ length, pnl, type);
+ return -EINVAL;
+ }
pdl = le32_to_cpu(*(u32 *)((u8 *)data + 10 + pnl));
if (length != 14 + pnl + pdl) {
pr_vdebug("invalid os descriptor length: %d pnl:%d pdl:%d (descriptor %d)\n",
@@ -2363,6 +2370,9 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
}
}
if (flags & (1 << i)) {
+ if (len < 4) {
+ goto error;
+ }
os_descs_count = get_unaligned_le32(data);
data += 4;
len -= 4;
@@ -2435,7 +2445,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
ENTER();
- if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
+ if (unlikely(len < 16 ||
+ get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
get_unaligned_le32(data + 4) != len))
goto error;
str_count = get_unaligned_le32(data + 8);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index fca288bbc800..772f15821242 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -594,11 +594,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
| MUSB_PORT_STAT_RESUME;
musb->rh_timer = jiffies
+ msecs_to_jiffies(USB_RESUME_TIMEOUT);
- musb->need_finish_resume = 1;
-
musb->xceiv->otg->state = OTG_STATE_A_HOST;
musb->is_active = 1;
musb_host_resume_root_hub(musb);
+ schedule_delayed_work(&musb->finish_resume_work,
+ msecs_to_jiffies(USB_RESUME_TIMEOUT));
break;
case OTG_STATE_B_WAIT_ACON:
musb->xceiv->otg->state = OTG_STATE_B_PERIPHERAL;
@@ -1925,6 +1925,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
static void musb_irq_work(struct work_struct *data)
{
struct musb *musb = container_of(data, struct musb, irq_work.work);
+ int error;
+
+ error = pm_runtime_get_sync(musb->controller);
+ if (error < 0) {
+ dev_err(musb->controller, "Could not enable: %i\n", error);
+
+ return;
+ }
musb_pm_runtime_check_session(musb);
@@ -1932,6 +1940,9 @@ static void musb_irq_work(struct work_struct *data)
musb->xceiv_old_state = musb->xceiv->otg->state;
sysfs_notify(&musb->controller->kobj, NULL, "mode");
}
+
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
}
static void musb_recover_from_babble(struct musb *musb)
@@ -2710,11 +2721,6 @@ static int musb_resume(struct device *dev)
mask = MUSB_DEVCTL_BDEVICE | MUSB_DEVCTL_FSDEV | MUSB_DEVCTL_LSDEV;
if ((devctl & mask) != (musb->context.devctl & mask))
musb->port1_status = 0;
- if (musb->need_finish_resume) {
- musb->need_finish_resume = 0;
- schedule_delayed_work(&musb->finish_resume_work,
- msecs_to_jiffies(USB_RESUME_TIMEOUT));
- }
/*
* The USB HUB code expects the device to be in RPM_ACTIVE once it came
@@ -2766,12 +2772,6 @@ static int musb_runtime_resume(struct device *dev)
musb_restore_context(musb);
- if (musb->need_finish_resume) {
- musb->need_finish_resume = 0;
- schedule_delayed_work(&musb->finish_resume_work,
- msecs_to_jiffies(USB_RESUME_TIMEOUT));
- }
-
spin_lock_irqsave(&musb->lock, flags);
error = musb_run_resume_work(musb);
if (error)
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index ade902ea1221..ce5a18c98c6d 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -410,7 +410,6 @@ struct musb {
/* is_suspended means USB B_PERIPHERAL suspend */
unsigned is_suspended:1;
- unsigned need_finish_resume :1;
/* may_wakeup means remote wakeup is enabled */
unsigned may_wakeup:1;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7ce31a4c7e7f..42cc72e54c05 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -2007,6 +2007,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_6802, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD300, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d, 0xff, 0xff, 0xff) }, /* HP lt2523 (Novatel E371) */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 46fca6b75846..1db4b61bdf7b 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -49,6 +49,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID_UCSGT) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index e3b7af8adfb7..09d9be88209e 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -27,6 +27,7 @@
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
#define ATEN_PRODUCT_ID 0x2008
+#define ATEN_PRODUCT_ID2 0x2118
#define IODATA_VENDOR_ID 0x04bb
#define IODATA_PRODUCT_ID 0x0a03
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 1bc6089b9008..696458db7e3c 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -124,6 +124,7 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
{USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
{USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
+ {USB_DEVICE(0x413c, 0x81a6)}, /* Dell DW5570 QDL (MC8805) */
{USB_DEVICE(0x1199, 0x68a4)}, /* Sierra Wireless QDL */
{USB_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */
{USB_DEVICE(0x1199, 0x68a8)}, /* Sierra Wireless QDL */
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 128d10282d16..7690e5bf3cf1 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1123,12 +1123,11 @@ static long tce_iommu_ioctl(void *iommu_data,
mutex_lock(&container->lock);
ret = tce_iommu_create_default_window(container);
- if (ret)
- return ret;
-
- ret = tce_iommu_create_window(container, create.page_shift,
- create.window_size, create.levels,
- &create.start_addr);
+ if (!ret)
+ ret = tce_iommu_create_window(container,
+ create.page_shift,
+ create.window_size, create.levels,
+ &create.start_addr);
mutex_unlock(&container->lock);
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index b3cc33fa6d26..bd6f293c4ebd 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -38,6 +38,8 @@
#include <linux/workqueue.h>
#include <linux/mdev.h>
#include <linux/notifier.h>
+#include <linux/dma-iommu.h>
+#include <linux/irqdomain.h>
#define DRIVER_VERSION "0.2"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
@@ -1179,6 +1181,28 @@ static struct vfio_group *find_iommu_group(struct vfio_domain *domain,
return NULL;
}
+static bool vfio_iommu_has_resv_msi(struct iommu_group *group,
+ phys_addr_t *base)
+{
+ struct list_head group_resv_regions;
+ struct iommu_resv_region *region, *next;
+ bool ret = false;
+
+ INIT_LIST_HEAD(&group_resv_regions);
+ iommu_get_group_resv_regions(group, &group_resv_regions);
+ list_for_each_entry(region, &group_resv_regions, list) {
+ if (region->type & IOMMU_RESV_MSI) {
+ *base = region->start;
+ ret = true;
+ goto out;
+ }
+ }
+out:
+ list_for_each_entry_safe(region, next, &group_resv_regions, list)
+ kfree(region);
+ return ret;
+}
+
static int vfio_iommu_type1_attach_group(void *iommu_data,
struct iommu_group *iommu_group)
{
@@ -1187,6 +1211,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
struct vfio_domain *domain, *d;
struct bus_type *bus = NULL, *mdev_bus;
int ret;
+ bool resv_msi, msi_remap;
+ phys_addr_t resv_msi_base;
mutex_lock(&iommu->lock);
@@ -1256,11 +1282,15 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret)
goto out_domain;
+ resv_msi = vfio_iommu_has_resv_msi(iommu_group, &resv_msi_base);
+
INIT_LIST_HEAD(&domain->group_list);
list_add(&group->next, &domain->group_list);
- if (!allow_unsafe_interrupts &&
- !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) {
+ msi_remap = resv_msi ? irq_domain_check_msi_remap() :
+ iommu_capable(bus, IOMMU_CAP_INTR_REMAP);
+
+ if (!allow_unsafe_interrupts && !msi_remap) {
pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
__func__);
ret = -EPERM;
@@ -1302,6 +1332,12 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
if (ret)
goto out_detach;
+ if (resv_msi) {
+ ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
+ if (ret)
+ goto out_detach;
+ }
+
list_add(&domain->next, &iommu->domain_list);
mutex_unlock(&iommu->lock);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d6432603880c..8f99fe08de02 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -130,14 +130,14 @@ static long vhost_get_vring_endian(struct vhost_virtqueue *vq, u32 idx,
static void vhost_init_is_le(struct vhost_virtqueue *vq)
{
- if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
- vq->is_le = true;
+ vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
+ || virtio_legacy_is_little_endian();
}
#endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
static void vhost_reset_is_le(struct vhost_virtqueue *vq)
{
- vq->is_le = virtio_legacy_is_little_endian();
+ vhost_init_is_le(vq);
}
struct vhost_flush_struct {
@@ -1714,10 +1714,8 @@ int vhost_vq_init_access(struct vhost_virtqueue *vq)
int r;
bool is_le = vq->is_le;
- if (!vq->private_data) {
- vhost_reset_is_le(vq);
+ if (!vq->private_data)
return 0;
- }
vhost_init_is_le(vq);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 7e38ed79c3fc..409aeaa49246 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -159,13 +159,6 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
if (xen_domain())
return true;
- /*
- * On ARM-based machines, the DMA ops will do the right thing,
- * so always use them with legacy devices.
- */
- if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
- return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
-
return false;
}
OpenPOWER on IntegriCloud