summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2016-03-04 11:32:40 -0800
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2016-03-04 11:32:40 -0800
commit52cdce8adb635746f53306ab2599ca64902bb1dc (patch)
tree0b61680c30eb150796cf01186f15b4845cdee6dd /drivers
parent3cd47869431d7402d0613cf0f7fbb392f2b97565 (diff)
parent7dde4e74744772efdc85d7ed13495c7b6a0d881b (diff)
downloadblackbird-op-linux-52cdce8adb635746f53306ab2599ca64902bb1dc.tar.gz
blackbird-op-linux-52cdce8adb635746f53306ab2599ca64902bb1dc.zip
Merge branch 'rotary-encoder' into next
Bring in updates to roraty encoder driver switching it away from legacy platform data and over to generic device properties and adding support for encoders using more than 2 GPIOs.
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/device_sysfs.c2
-rw-r--r--drivers/acpi/nfit.c2
-rw-r--r--drivers/acpi/processor_driver.c3
-rw-r--r--drivers/base/power/domain.c33
-rw-r--r--drivers/block/null_blk.c15
-rw-r--r--drivers/block/xen-blkback/blkback.c15
-rw-r--r--drivers/block/xen-blkback/common.h8
-rw-r--r--drivers/bus/sunxi-rsb.c8
-rw-r--r--drivers/connector/connector.c11
-rw-r--r--drivers/cpufreq/Kconfig.arm2
-rw-r--r--drivers/cpufreq/intel_pstate.c2
-rw-r--r--drivers/cpufreq/scpi-cpufreq.c2
-rw-r--r--drivers/dma/at_xdmac.c9
-rw-r--r--drivers/dma/bcm2835-dma.c78
-rw-r--r--drivers/dma/edma.c53
-rw-r--r--drivers/dma/xgene-dma.c4
-rw-r--r--drivers/firmware/dmi_scan.c6
-rw-r--r--drivers/gpio/gpio-ath79.c2
-rw-r--r--drivers/gpio/gpio-generic.c4
-rw-r--r--drivers/gpio/gpiolib.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c63
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h28
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c111
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c1
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c82
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c7
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c5
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/tmp102.c16
-rw-r--r--drivers/i2c/busses/i2c-davinci.c11
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c6
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h1
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c16
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c27
-rw-r--r--drivers/i2c/busses/i2c-rcar.c4
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c2
-rw-r--r--drivers/i2c/busses/i2c-st.c2
-rw-r--r--drivers/infiniband/core/cma.c16
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h10
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c49
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h4
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c57
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h49
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c2
-rw-r--r--drivers/input/joystick/db9.c1
-rw-r--r--drivers/input/joystick/gamecon.c1
-rw-r--r--drivers/input/joystick/turbografx.c1
-rw-r--r--drivers/input/joystick/walkera0701.c1
-rw-r--r--drivers/input/misc/arizona-haptics.c3
-rw-r--r--drivers/input/misc/rotary_encoder.c403
-rw-r--r--drivers/input/mouse/elan_i2c_core.c3
-rw-r--r--drivers/input/serio/parkbd.c1
-rw-r--r--drivers/input/tablet/aiptek.c9
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c34
-rw-r--r--drivers/input/touchscreen/elants_i2c.c21
-rw-r--r--drivers/iommu/amd_iommu_v2.c20
-rw-r--r--drivers/iommu/dma-iommu.c11
-rw-r--r--drivers/iommu/intel-iommu.c4
-rw-r--r--drivers/iommu/intel-svm.c20
-rw-r--r--drivers/iommu/iommu.c2
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c23
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNipac.c7
-rw-r--r--drivers/lightnvm/gennvm.c2
-rw-r--r--drivers/md/md.c33
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c4
-rw-r--r--drivers/media/usb/airspy/airspy.c2
-rw-r--r--drivers/media/usb/hackrf/hackrf.c13
-rw-r--r--drivers/memory/fsl_ifc.c1
-rw-r--r--drivers/mtd/mtdcore.c26
-rw-r--r--drivers/mtd/ofpart.c12
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c10
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/mtd/ubi/io.c2
-rw-r--r--drivers/mtd/ubi/wl.c53
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c38
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c7
-rw-r--r--drivers/net/ethernet/aurora/Kconfig1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c39
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c8
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c36
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h4
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c30
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c49
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c11
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_clock.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c5
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h3
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c53
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c33
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c56
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c63
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c5
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c5
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c80
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h58
-rw-r--r--drivers/net/ethernet/sfc/ef10.c24
-rw-r--r--drivers/net/ethernet/sfc/efx.h5
-rw-r--r--drivers/net/ethernet/sfc/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/txc43128_phy.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c9
-rw-r--r--drivers/net/ethernet/ti/cpsw.c63
-rw-r--r--drivers/net/geneve.c12
-rw-r--r--drivers/net/hamradio/6pack.c12
-rw-r--r--drivers/net/hamradio/mkiss.c9
-rw-r--r--drivers/net/phy/mdio-mux.c7
-rw-r--r--drivers/net/phy/micrel.c13
-rw-r--r--drivers/net/ppp/pppoe.c14
-rw-r--r--drivers/net/ppp/pptp.c6
-rw-r--r--drivers/net/usb/cdc_mbim.c28
-rw-r--r--drivers/net/usb/cdc_ncm.c59
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c31
-rw-r--r--drivers/net/veth.c6
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--drivers/net/vrf.c10
-rw-r--r--drivers/net/vxlan.c75
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c49
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c15
-rw-r--r--drivers/net/xen-netback/netback.c34
-rw-r--r--drivers/nvme/host/pci.c20
-rw-r--r--drivers/pci/host/Kconfig1
-rw-r--r--drivers/pci/host/pcie-hisi.c4
-rw-r--r--drivers/phy/Kconfig1
-rw-r--r--drivers/phy/phy-bcm-cygnus-pcie.c16
-rw-r--r--drivers/phy/phy-berlin-sata.c20
-rw-r--r--drivers/phy/phy-brcmstb-sata.c17
-rw-r--r--drivers/phy/phy-core.c21
-rw-r--r--drivers/phy/phy-miphy28lp.c16
-rw-r--r--drivers/phy/phy-miphy365x.c16
-rw-r--r--drivers/phy/phy-mt65xx-usb3.c20
-rw-r--r--drivers/phy/phy-rockchip-usb.c17
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c13
-rw-r--r--drivers/pinctrl/freescale/pinctrl-vf610.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c1
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c41
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.h3
-rw-r--r--drivers/pinctrl/intel/pinctrl-sunrisepoint.c1
-rw-r--r--drivers/powercap/intel_rapl.c7
-rw-r--r--drivers/rtc/rtc-da9063.c19
-rw-r--r--drivers/rtc/rtc-rk808.c48
-rw-r--r--drivers/s390/crypto/ap_bus.c4
-rw-r--r--drivers/s390/virtio/virtio_ccw.c62
-rw-r--r--drivers/scsi/scsi_pm.c20
-rw-r--r--drivers/scsi/sd.c9
-rw-r--r--drivers/scsi/ses.c30
-rw-r--r--drivers/spi/spi-fsl-dspi.c12
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c4
-rw-r--r--drivers/tty/n_tty.c22
-rw-r--r--drivers/tty/serial/8250/8250_uniphier.c8
-rw-r--r--drivers/tty/serial/earlycon.c2
-rw-r--r--drivers/tty/serial/sh-sci.c2
-rw-r--r--drivers/tty/serial/sunhv.c12
-rw-r--r--drivers/tty/tty_buffer.c2
-rw-r--r--drivers/usb/core/hub.c22
-rw-r--r--drivers/usb/serial/ipaq.c3
-rw-r--r--drivers/video/fbdev/fsl-diu-fb.c13
-rw-r--r--drivers/video/fbdev/omap2/dss/venc.c12
-rw-r--r--drivers/xen/events/events_fifo.c23
-rw-r--r--drivers/xen/xen-pciback/pciback.h1
-rw-r--r--drivers/xen/xen-pciback/pciback_ops.c75
-rw-r--r--drivers/xen/xen-pciback/xenbus.c4
-rw-r--r--drivers/xen/xen-scsiback.c2
207 files changed, 2255 insertions, 1357 deletions
diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
index 707cf6213bc2..b9afb47db7ed 100644
--- a/drivers/acpi/device_sysfs.c
+++ b/drivers/acpi/device_sysfs.c
@@ -104,7 +104,7 @@ static void acpi_expose_nondev_subnodes(struct kobject *kobj,
init_completion(&dn->kobj_done);
ret = kobject_init_and_add(&dn->kobj, &acpi_data_node_ktype,
- kobj, dn->name);
+ kobj, "%s", dn->name);
if (ret)
acpi_handle_err(dn->handle, "Failed to expose (%d)\n", ret);
else
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index e7ed39bab97d..aa45d4802707 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -1810,7 +1810,7 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
if (!dev->driver) {
/* dev->driver may be null if we're being removed */
dev_dbg(dev, "%s: no driver found for dev\n", __func__);
- return;
+ goto out_unlock;
}
if (!acpi_desc) {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index f4e02ae93f58..11154a330f07 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -200,7 +200,8 @@ static int acpi_pss_perf_init(struct acpi_processor *pr,
goto err_remove_sysfs_thermal;
}
- sysfs_remove_link(&pr->cdev->device.kobj, "device");
+ return 0;
+
err_remove_sysfs_thermal:
sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
err_thermal_unregister:
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 167418e73445..65f50eccd49b 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -390,6 +390,7 @@ static int pm_genpd_runtime_suspend(struct device *dev)
struct generic_pm_domain *genpd;
bool (*stop_ok)(struct device *__dev);
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ bool runtime_pm = pm_runtime_enabled(dev);
ktime_t time_start;
s64 elapsed_ns;
int ret;
@@ -400,12 +401,19 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
+ /*
+ * A runtime PM centric subsystem/driver may re-use the runtime PM
+ * callbacks for other purposes than runtime PM. In those scenarios
+ * runtime PM is disabled. Under these circumstances, we shall skip
+ * validating/measuring the PM QoS latency.
+ */
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
- if (stop_ok && !stop_ok(dev))
+ if (runtime_pm && stop_ok && !stop_ok(dev))
return -EBUSY;
/* Measure suspend latency. */
- time_start = ktime_get();
+ if (runtime_pm)
+ time_start = ktime_get();
ret = genpd_save_dev(genpd, dev);
if (ret)
@@ -418,13 +426,15 @@ static int pm_genpd_runtime_suspend(struct device *dev)
}
/* Update suspend latency value if the measured time exceeds it. */
- elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
- if (elapsed_ns > td->suspend_latency_ns) {
- td->suspend_latency_ns = elapsed_ns;
- dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
- elapsed_ns);
- genpd->max_off_time_changed = true;
- td->constraint_changed = true;
+ if (runtime_pm) {
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > td->suspend_latency_ns) {
+ td->suspend_latency_ns = elapsed_ns;
+ dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
+ elapsed_ns);
+ genpd->max_off_time_changed = true;
+ td->constraint_changed = true;
+ }
}
/*
@@ -453,6 +463,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ bool runtime_pm = pm_runtime_enabled(dev);
ktime_t time_start;
s64 elapsed_ns;
int ret;
@@ -479,14 +490,14 @@ static int pm_genpd_runtime_resume(struct device *dev)
out:
/* Measure resume latency. */
- if (timed)
+ if (timed && runtime_pm)
time_start = ktime_get();
genpd_start_dev(genpd, dev);
genpd_restore_dev(genpd, dev);
/* Update resume latency value if the measured time exceeds it. */
- if (timed) {
+ if (timed && runtime_pm) {
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
if (elapsed_ns > td->resume_latency_ns) {
td->resume_latency_ns = elapsed_ns;
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8162475d96b5..09e3c0d87ecc 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -219,6 +219,9 @@ static void end_cmd(struct nullb_cmd *cmd)
{
struct request_queue *q = NULL;
+ if (cmd->rq)
+ q = cmd->rq->q;
+
switch (queue_mode) {
case NULL_Q_MQ:
blk_mq_end_request(cmd->rq, 0);
@@ -229,23 +232,19 @@ static void end_cmd(struct nullb_cmd *cmd)
break;
case NULL_Q_BIO:
bio_endio(cmd->bio);
- goto free_cmd;
+ break;
}
- if (cmd->rq)
- q = cmd->rq->q;
+ free_cmd(cmd);
/* Restart queue if needed, as we are freeing a tag */
- if (q && !q->mq_ops && blk_queue_stopped(q)) {
+ if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
unsigned long flags;
spin_lock_irqsave(q->queue_lock, flags);
- if (blk_queue_stopped(q))
- blk_start_queue(q);
+ blk_start_queue_async(q);
spin_unlock_irqrestore(q->queue_lock, flags);
}
-free_cmd:
- free_cmd(cmd);
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index f9099940c272..41fb1a917b17 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
goto unmap;
for (n = 0, i = 0; n < nseg; n++) {
+ uint8_t first_sect, last_sect;
+
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
/* Map indirect segments */
if (segments)
@@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
}
i = n % SEGS_PER_INDIRECT_FRAME;
+
pending_req->segments[n]->gref = segments[i].gref;
- seg[n].nsec = segments[i].last_sect -
- segments[i].first_sect + 1;
- seg[n].offset = (segments[i].first_sect << 9);
- if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
- (segments[i].last_sect < segments[i].first_sect)) {
+
+ first_sect = READ_ONCE(segments[i].first_sect);
+ last_sect = READ_ONCE(segments[i].last_sect);
+ if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
rc = -EINVAL;
goto unmap;
}
+
+ seg[n].nsec = last_sect - first_sect + 1;
+ seg[n].offset = first_sect << 9;
preq->nr_sects += seg[n].nsec;
}
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 68e87a037b99..c929ae22764c 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
- dst->operation = src->operation;
- switch (src->operation) {
+ dst->operation = READ_ONCE(src->operation);
+ switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
@@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
struct blkif_x86_64_request *src)
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
- dst->operation = src->operation;
- switch (src->operation) {
+ dst->operation = READ_ONCE(src->operation);
+ switch (dst->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 846bc29c157d..25996e256110 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -342,13 +342,13 @@ static int sunxi_rsb_read(struct sunxi_rsb *rsb, u8 rtaddr, u8 addr,
ret = _sunxi_rsb_run_xfer(rsb);
if (ret)
- goto out;
+ goto unlock;
*buf = readl(rsb->regs + RSB_DATA);
+unlock:
mutex_unlock(&rsb->lock);
-out:
return ret;
}
@@ -527,9 +527,9 @@ static int sunxi_rsb_init_device_mode(struct sunxi_rsb *rsb)
*/
static const struct sunxi_rsb_addr_map sunxi_rsb_addr_maps[] = {
- { 0x3e3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */
+ { 0x3a3, 0x2d }, /* Primary PMIC: AXP223, AXP809, AXP81X, ... */
{ 0x745, 0x3a }, /* Secondary PMIC: AXP806, ... */
- { 0xe89, 0x45 }, /* Peripheral IC: AC100, ... */
+ { 0xe89, 0x4e }, /* Peripheral IC: AC100, ... */
};
static u8 sunxi_rsb_get_rtaddr(u16 hwaddr)
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index d7373ca69c99..25693b045371 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -179,26 +179,21 @@ static int cn_call_callback(struct sk_buff *skb)
*
* It checks skb, netlink header and msg sizes, and calls callback helper.
*/
-static void cn_rx_skb(struct sk_buff *__skb)
+static void cn_rx_skb(struct sk_buff *skb)
{
struct nlmsghdr *nlh;
- struct sk_buff *skb;
int len, err;
- skb = skb_get(__skb);
-
if (skb->len >= NLMSG_HDRLEN) {
nlh = nlmsg_hdr(skb);
len = nlmsg_len(nlh);
if (len < (int)sizeof(struct cn_msg) ||
skb->len < nlh->nlmsg_len ||
- len > CONNECTOR_MAX_MSG_SIZE) {
- kfree_skb(skb);
+ len > CONNECTOR_MAX_MSG_SIZE)
return;
- }
- err = cn_call_callback(skb);
+ err = cn_call_callback(skb_get(skb));
if (err < 0)
kfree_skb(skb);
}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 235a1ba73d92..b1f8a73e5a94 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -226,7 +226,7 @@ config ARM_TEGRA20_CPUFREQ
config ARM_TEGRA124_CPUFREQ
tristate "Tegra124 CPUFreq support"
- depends on ARCH_TEGRA && CPUFREQ_DT
+ depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR
default y
help
This adds the CPUFreq driver support for Tegra124 SOCs.
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4d07cbd2b23c..98fb8821382d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1123,7 +1123,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits->max_sysfs_pct);
limits->max_perf_pct = max(limits->min_policy_pct,
limits->max_perf_pct);
- limits->max_perf = round_up(limits->max_perf, 8);
+ limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
/* Make sure min_perf_pct <= max_perf_pct */
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
index 2c3b16fd3a01..de5e89b2eaaa 100644
--- a/drivers/cpufreq/scpi-cpufreq.c
+++ b/drivers/cpufreq/scpi-cpufreq.c
@@ -31,7 +31,7 @@ static struct scpi_ops *scpi_ops;
static struct scpi_dvfs_info *scpi_get_dvfs_info(struct device *cpu_dev)
{
- u8 domain = topology_physical_package_id(cpu_dev->id);
+ int domain = topology_physical_package_id(cpu_dev->id);
if (domain < 0)
return ERR_PTR(-EINVAL);
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 7f039de143f0..370c661c7d7b 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -156,7 +156,7 @@
#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
-#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */
+#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */
#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
@@ -965,7 +965,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
NULL,
src_addr, dst_addr,
xt, xt->sgl);
- for (i = 0; i < xt->numf; i++)
+
+ /* Length of the block is (BLEN+1) microblocks. */
+ for (i = 0; i < xt->numf - 1; i++)
at_xdmac_increment_block_count(chan, first);
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
@@ -1086,6 +1088,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
/* Check remaining length and change data width if needed. */
dwidth = at_xdmac_align_width(chan,
src_addr | dst_addr | xfer_size);
+ chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
ublen = xfer_size >> dwidth;
@@ -1333,7 +1336,7 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
* since we don't care about the stride anymore.
*/
if ((i == (sg_len - 1)) &&
- sg_dma_len(ppsg) == sg_dma_len(psg)) {
+ sg_dma_len(psg) == sg_dma_len(sg)) {
dev_dbg(chan2dev(chan),
"%s: desc 0x%p can be merged with desc 0x%p\n",
__func__, desc, pdesc);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index c92d6a70ccf3..996c4b00d323 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -31,6 +31,7 @@
*/
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -62,6 +63,11 @@ struct bcm2835_dma_cb {
uint32_t pad[2];
};
+struct bcm2835_cb_entry {
+ struct bcm2835_dma_cb *cb;
+ dma_addr_t paddr;
+};
+
struct bcm2835_chan {
struct virt_dma_chan vc;
struct list_head node;
@@ -72,18 +78,18 @@ struct bcm2835_chan {
int ch;
struct bcm2835_desc *desc;
+ struct dma_pool *cb_pool;
void __iomem *chan_base;
int irq_number;
};
struct bcm2835_desc {
+ struct bcm2835_chan *c;
struct virt_dma_desc vd;
enum dma_transfer_direction dir;
- unsigned int control_block_size;
- struct bcm2835_dma_cb *control_block_base;
- dma_addr_t control_block_base_phys;
+ struct bcm2835_cb_entry *cb_list;
unsigned int frames;
size_t size;
@@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc(
static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
{
struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
- dma_free_coherent(desc->vd.tx.chan->device->dev,
- desc->control_block_size,
- desc->control_block_base,
- desc->control_block_base_phys);
+ int i;
+
+ for (i = 0; i < desc->frames; i++)
+ dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
+ desc->cb_list[i].paddr);
+
+ kfree(desc->cb_list);
kfree(desc);
}
@@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
c->desc = d = to_bcm2835_dma_desc(&vd->tx);
- writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
+ writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
}
@@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
{
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+ struct device *dev = c->vc.chan.device->dev;
+
+ dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
- dev_dbg(c->vc.chan.device->dev,
- "Allocating DMA channel %d\n", c->ch);
+ c->cb_pool = dma_pool_create(dev_name(dev), dev,
+ sizeof(struct bcm2835_dma_cb), 0, 0);
+ if (!c->cb_pool) {
+ dev_err(dev, "unable to allocate descriptor pool\n");
+ return -ENOMEM;
+ }
return request_irq(c->irq_number,
bcm2835_dma_callback, 0, "DMA IRQ", c);
@@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
vchan_free_chan_resources(&c->vc);
free_irq(c->irq_number, c);
+ dma_pool_destroy(c->cb_pool);
dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
}
@@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
size_t size;
for (size = i = 0; i < d->frames; i++) {
- struct bcm2835_dma_cb *control_block =
- &d->control_block_base[i];
+ struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
size_t this_size = control_block->length;
dma_addr_t dma;
@@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
dma_addr_t dev_addr;
unsigned int es, sync_type;
unsigned int frame;
+ int i;
/* Grab configuration */
if (!is_slave_direction(direction)) {
@@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
if (!d)
return NULL;
+ d->c = c;
d->dir = direction;
d->frames = buf_len / period_len;
- /* Allocate memory for control blocks */
- d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
- d->control_block_base = dma_zalloc_coherent(chan->device->dev,
- d->control_block_size, &d->control_block_base_phys,
- GFP_NOWAIT);
-
- if (!d->control_block_base) {
+ d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL);
+ if (!d->cb_list) {
kfree(d);
return NULL;
}
+ /* Allocate memory for control blocks */
+ for (i = 0; i < d->frames; i++) {
+ struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
+
+ cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC,
+ &cb_entry->paddr);
+ if (!cb_entry->cb)
+ goto error_cb;
+ }
/*
* Iterate over all frames, create a control block
* for each frame and link them together.
*/
for (frame = 0; frame < d->frames; frame++) {
- struct bcm2835_dma_cb *control_block =
- &d->control_block_base[frame];
+ struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb;
/* Setup adresses */
if (d->dir == DMA_DEV_TO_MEM) {
@@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
* This DMA engine driver currently only supports cyclic DMA.
* Therefore, wrap around at number of frames.
*/
- control_block->next = d->control_block_base_phys +
- sizeof(struct bcm2835_dma_cb)
- * ((frame + 1) % d->frames);
+ control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr;
}
return vchan_tx_prep(&c->vc, &d->vd, flags);
+error_cb:
+ i--;
+ for (; i >= 0; i--) {
+ struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
+
+ dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr);
+ }
+
+ kfree(d->cb_list);
+ kfree(d);
+ return NULL;
}
static int bcm2835_dma_slave_config(struct dma_chan *chan,
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 0675e268d577..16fe773fb846 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1752,16 +1752,14 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
return ret;
}
-static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels)
+static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
{
- s16 *memcpy_ch = memcpy_channels;
-
if (!memcpy_channels)
return false;
- while (*memcpy_ch != -1) {
- if (*memcpy_ch == ch_num)
+ while (*memcpy_channels != -1) {
+ if (*memcpy_channels == ch_num)
return true;
- memcpy_ch++;
+ memcpy_channels++;
}
return false;
}
@@ -1775,7 +1773,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
{
struct dma_device *s_ddev = &ecc->dma_slave;
struct dma_device *m_ddev = NULL;
- s16 *memcpy_channels = ecc->info->memcpy_channels;
+ s32 *memcpy_channels = ecc->info->memcpy_channels;
int i, j;
dma_cap_zero(s_ddev->cap_mask);
@@ -1996,16 +1994,16 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
if (prop) {
const char pname[] = "ti,edma-memcpy-channels";
- size_t nelm = sz / sizeof(s16);
- s16 *memcpy_ch;
+ size_t nelm = sz / sizeof(s32);
+ s32 *memcpy_ch;
- memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16),
+ memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
GFP_KERNEL);
if (!memcpy_ch)
return ERR_PTR(-ENOMEM);
- ret = of_property_read_u16_array(dev->of_node, pname,
- (u16 *)memcpy_ch, nelm);
+ ret = of_property_read_u32_array(dev->of_node, pname,
+ (u32 *)memcpy_ch, nelm);
if (ret)
return ERR_PTR(ret);
@@ -2017,31 +2015,50 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
&sz);
if (prop) {
const char pname[] = "ti,edma-reserved-slot-ranges";
+ u32 (*tmp)[2];
s16 (*rsv_slots)[2];
- size_t nelm = sz / sizeof(*rsv_slots);
+ size_t nelm = sz / sizeof(*tmp);
struct edma_rsv_info *rsv_info;
+ int i;
if (!nelm)
return info;
+ tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ERR_PTR(-ENOMEM);
+
rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
- if (!rsv_info)
+ if (!rsv_info) {
+ kfree(tmp);
return ERR_PTR(-ENOMEM);
+ }
rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
GFP_KERNEL);
- if (!rsv_slots)
+ if (!rsv_slots) {
+ kfree(tmp);
return ERR_PTR(-ENOMEM);
+ }
- ret = of_property_read_u16_array(dev->of_node, pname,
- (u16 *)rsv_slots, nelm * 2);
- if (ret)
+ ret = of_property_read_u32_array(dev->of_node, pname,
+ (u32 *)tmp, nelm * 2);
+ if (ret) {
+ kfree(tmp);
return ERR_PTR(ret);
+ }
+ for (i = 0; i < nelm; i++) {
+ rsv_slots[i][0] = tmp[i][0];
+ rsv_slots[i][1] = tmp[i][1];
+ }
rsv_slots[nelm][0] = -1;
rsv_slots[nelm][1] = -1;
+
info->rsv = rsv_info;
info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
+
+ kfree(tmp);
}
return info;
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 9dfa2b0fa5da..9cb93c5b655d 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -29,6 +29,7 @@
#include <linux/dmapool.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of_device.h>
@@ -1610,6 +1611,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma)
/* Register DMA channel rx irq */
for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
chan = &pdma->chan[i];
+ irq_set_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(chan->dev, chan->rx_irq,
xgene_dma_chan_ring_isr,
0, chan->name, chan);
@@ -1620,6 +1622,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma)
for (j = 0; j < i; j++) {
chan = &pdma->chan[i];
+ irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
devm_free_irq(chan->dev, chan->rx_irq, chan);
}
@@ -1640,6 +1643,7 @@ static void xgene_dma_free_irqs(struct xgene_dma *pdma)
for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
chan = &pdma->chan[i];
+ irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
devm_free_irq(chan->dev, chan->rx_irq, chan);
}
}
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index ac1ce4a73edf..0e08e665f715 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -521,6 +521,7 @@ static int __init dmi_present(const u8 *buf)
dmi_ver = smbios_ver;
else
dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
+ dmi_ver <<= 8;
dmi_num = get_unaligned_le16(buf + 12);
dmi_len = get_unaligned_le16(buf + 6);
dmi_base = get_unaligned_le32(buf + 8);
@@ -528,15 +529,14 @@ static int __init dmi_present(const u8 *buf)
if (dmi_walk_early(dmi_decode) == 0) {
if (smbios_ver) {
pr_info("SMBIOS %d.%d present.\n",
- dmi_ver >> 8, dmi_ver & 0xFF);
+ dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
} else {
smbios_entry_point_size = 15;
memcpy(smbios_entry_point, buf,
smbios_entry_point_size);
pr_info("Legacy DMI %d.%d present.\n",
- dmi_ver >> 8, dmi_ver & 0xFF);
+ dmi_ver >> 16, (dmi_ver >> 8) & 0xFF);
}
- dmi_ver <<= 8;
dmi_format_ids(dmi_ids_string, sizeof(dmi_ids_string));
printk(KERN_DEBUG "DMI: %s\n", dmi_ids_string);
return 0;
diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
index e5827a56ff3b..5eaea8b812cf 100644
--- a/drivers/gpio/gpio-ath79.c
+++ b/drivers/gpio/gpio-ath79.c
@@ -113,7 +113,7 @@ static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
__raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
__raw_writel(
- __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & BIT(offset),
+ __raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset),
ctrl->base + AR71XX_GPIO_REG_OE);
spin_unlock_irqrestore(&ctrl->lock, flags);
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index bd5193c67a9c..88ae70ddb127 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -141,9 +141,9 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
unsigned long pinmask = bgc->pin2mask(bgc, gpio);
if (bgc->dir & pinmask)
- return bgc->read_reg(bgc->reg_set) & pinmask;
+ return !!(bgc->read_reg(bgc->reg_set) & pinmask);
else
- return bgc->read_reg(bgc->reg_dat) & pinmask;
+ return !!(bgc->read_reg(bgc->reg_dat) & pinmask);
}
static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index c8a1e884da74..a3ad3bab2942 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1279,7 +1279,13 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc)
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
value = chip->get ? chip->get(chip, offset) : -EIO;
- value = value < 0 ? value : !!value;
+ /*
+ * FIXME: fix all drivers to clamp to [0,1] or return negative,
+ * then change this to:
+ * value = value < 0 ? value : !!value;
+ * so we can properly propagate error codes.
+ */
+ value = !!value;
trace_gpio_value(desc_to_gpio(desc), 1, value);
return value;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 5a5f04d0902d..048cfe073dae 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1264,7 +1264,8 @@ struct amdgpu_cs_parser {
struct ww_acquire_ctx ticket;
/* user fence */
- struct amdgpu_user_fence uf;
+ struct amdgpu_user_fence uf;
+ struct amdgpu_bo_list_entry uf_entry;
};
struct amdgpu_job {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 4f352ec9dec4..25a3e2485cc2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
return 0;
}
+static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
+ struct drm_amdgpu_cs_chunk_fence *fence_data)
+{
+ struct drm_gem_object *gobj;
+ uint32_t handle;
+
+ handle = fence_data->handle;
+ gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
+ fence_data->handle);
+ if (gobj == NULL)
+ return -EINVAL;
+
+ p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
+ p->uf.offset = fence_data->offset;
+
+ if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
+ drm_gem_object_unreference_unlocked(gobj);
+ return -EINVAL;
+ }
+
+ p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
+ p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
+ p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
+ p->uf_entry.priority = 0;
+ p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
+ p->uf_entry.tv.shared = true;
+
+ drm_gem_object_unreference_unlocked(gobj);
+ return 0;
+}
+
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
union drm_amdgpu_cs *cs = data;
@@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
case AMDGPU_CHUNK_ID_FENCE:
size = sizeof(struct drm_amdgpu_cs_chunk_fence);
- if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
- uint32_t handle;
- struct drm_gem_object *gobj;
- struct drm_amdgpu_cs_chunk_fence *fence_data;
-
- fence_data = (void *)p->chunks[i].kdata;
- handle = fence_data->handle;
- gobj = drm_gem_object_lookup(p->adev->ddev,
- p->filp, handle);
- if (gobj == NULL) {
- ret = -EINVAL;
- goto free_partial_kdata;
- }
-
- p->uf.bo = gem_to_amdgpu_bo(gobj);
- amdgpu_bo_ref(p->uf.bo);
- drm_gem_object_unreference_unlocked(gobj);
- p->uf.offset = fence_data->offset;
- } else {
+ if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
ret = -EINVAL;
goto free_partial_kdata;
}
+
+ ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata);
+ if (ret)
+ goto free_partial_kdata;
+
break;
case AMDGPU_CHUNK_ID_DEPENDENCIES:
@@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
&p->validated);
+ if (p->uf.bo)
+ list_add(&p->uf_entry.tv.head, &p->validated);
+
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
@@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
for (i = 0; i < parser->num_ibs; i++)
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
kfree(parser->ibs);
- if (parser->uf.bo)
- amdgpu_bo_unref(&parser->uf.bo);
+ amdgpu_bo_unref(&parser->uf.bo);
+ amdgpu_bo_unref(&parser->uf_entry.robj);
}
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index a18164f2f6d2..f8b5fcfa91a2 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -229,7 +229,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
mode_flags |= DRM_MODE_FLAG_3D_MASK;
list_for_each_entry(mode, &connector->modes, head) {
- mode->status = drm_mode_validate_basic(mode);
+ if (mode->status == MODE_OK)
+ mode->status = drm_mode_validate_basic(mode);
if (mode->status == MODE_OK)
mode->status = drm_mode_validate_size(mode, maxX, maxY);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index b3ba27fd9a6b..e69357172ffb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -55,6 +55,9 @@ static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
{
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+ if (!state->enable)
+ return 0;
+
if (exynos_crtc->ops->atomic_check)
return exynos_crtc->ops->atomic_check(exynos_crtc, state);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a01e51581c4c..f4af19a0d569 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2193,8 +2193,17 @@ struct drm_i915_gem_request {
struct drm_i915_private *i915;
struct intel_engine_cs *ring;
- /** GEM sequence number associated with this request. */
- uint32_t seqno;
+ /** GEM sequence number associated with the previous request,
+ * when the HWS breadcrumb is equal to this the GPU is processing
+ * this request.
+ */
+ u32 previous_seqno;
+
+ /** GEM sequence number associated with this request,
+ * when the HWS breadcrumb is equal or greater than this the GPU
+ * has finished processing this request.
+ */
+ u32 seqno;
/** Position in the ringbuffer of the start of the request */
u32 head;
@@ -2839,6 +2848,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
u32 flags);
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
int __must_check i915_vma_unbind(struct i915_vma *vma);
/*
* BEWARE: Do not use the function below unless you can _absolutely_
@@ -2910,15 +2920,17 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
+static inline bool i915_gem_request_started(struct drm_i915_gem_request *req,
+ bool lazy_coherency)
+{
+ u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
+ return i915_seqno_passed(seqno, req->previous_seqno);
+}
+
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
bool lazy_coherency)
{
- u32 seqno;
-
- BUG_ON(req == NULL);
-
- seqno = req->ring->get_seqno(req->ring, lazy_coherency);
-
+ u32 seqno = req->ring->get_seqno(req->ring, lazy_coherency);
return i915_seqno_passed(seqno, req->seqno);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 32e6aade6223..f56af0aaafde 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1146,23 +1146,74 @@ static bool missed_irq(struct drm_i915_private *dev_priv,
return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
}
-static int __i915_spin_request(struct drm_i915_gem_request *req)
+static unsigned long local_clock_us(unsigned *cpu)
+{
+ unsigned long t;
+
+ /* Cheaply and approximately convert from nanoseconds to microseconds.
+ * The result and subsequent calculations are also defined in the same
+ * approximate microseconds units. The principal source of timing
+ * error here is from the simple truncation.
+ *
+ * Note that local_clock() is only defined wrt to the current CPU;
+ * the comparisons are no longer valid if we switch CPUs. Instead of
+ * blocking preemption for the entire busywait, we can detect the CPU
+ * switch and use that as indicator of system load and a reason to
+ * stop busywaiting, see busywait_stop().
+ */
+ *cpu = get_cpu();
+ t = local_clock() >> 10;
+ put_cpu();
+
+ return t;
+}
+
+static bool busywait_stop(unsigned long timeout, unsigned cpu)
+{
+ unsigned this_cpu;
+
+ if (time_after(local_clock_us(&this_cpu), timeout))
+ return true;
+
+ return this_cpu != cpu;
+}
+
+static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
{
unsigned long timeout;
+ unsigned cpu;
+
+ /* When waiting for high frequency requests, e.g. during synchronous
+ * rendering split between the CPU and GPU, the finite amount of time
+ * required to set up the irq and wait upon it limits the response
+ * rate. By busywaiting on the request completion for a short while we
+ * can service the high frequency waits as quick as possible. However,
+ * if it is a slow request, we want to sleep as quickly as possible.
+ * The tradeoff between waiting and sleeping is roughly the time it
+ * takes to sleep on a request, on the order of a microsecond.
+ */
- if (i915_gem_request_get_ring(req)->irq_refcount)
+ if (req->ring->irq_refcount)
return -EBUSY;
- timeout = jiffies + 1;
+ /* Only spin if we know the GPU is processing this request */
+ if (!i915_gem_request_started(req, true))
+ return -EAGAIN;
+
+ timeout = local_clock_us(&cpu) + 5;
while (!need_resched()) {
if (i915_gem_request_completed(req, true))
return 0;
- if (time_after_eq(jiffies, timeout))
+ if (signal_pending_state(state, current))
+ break;
+
+ if (busywait_stop(timeout, cpu))
break;
cpu_relax_lowlatency();
}
+
if (i915_gem_request_completed(req, false))
return 0;
@@ -1197,6 +1248,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
struct drm_i915_private *dev_priv = dev->dev_private;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
+ int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(wait);
unsigned long timeout_expire;
s64 before, now;
@@ -1229,7 +1281,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
before = ktime_get_raw_ns();
/* Optimistic spin for the next jiffie before touching IRQs */
- ret = __i915_spin_request(req);
+ ret = __i915_spin_request(req, state);
if (ret == 0)
goto out;
@@ -1241,8 +1293,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
for (;;) {
struct timer_list timer;
- prepare_to_wait(&ring->irq_queue, &wait,
- interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+ prepare_to_wait(&ring->irq_queue, &wait, state);
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
@@ -1260,7 +1311,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
break;
}
- if (interruptible && signal_pending(current)) {
+ if (signal_pending_state(state, current)) {
ret = -ERESTARTSYS;
break;
}
@@ -2554,6 +2605,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
request->batch_obj = obj;
request->emitted_jiffies = jiffies;
+ request->previous_seqno = ring->last_submitted_seqno;
ring->last_submitted_seqno = request->seqno;
list_add_tail(&request->list, &ring->request_list);
@@ -4080,6 +4132,29 @@ i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
return false;
}
+void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ bool mappable, fenceable;
+ u32 fence_size, fence_alignment;
+
+ fence_size = i915_gem_get_gtt_size(obj->base.dev,
+ obj->base.size,
+ obj->tiling_mode);
+ fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
+ obj->base.size,
+ obj->tiling_mode,
+ true);
+
+ fenceable = (vma->node.size == fence_size &&
+ (vma->node.start & (fence_alignment - 1)) == 0);
+
+ mappable = (vma->node.start + fence_size <=
+ to_i915(obj->base.dev)->gtt.mappable_end);
+
+ obj->map_and_fenceable = mappable && fenceable;
+}
+
static int
i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
struct i915_address_space *vm,
@@ -4147,25 +4222,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
(bound ^ vma->bound) & GLOBAL_BIND) {
- bool mappable, fenceable;
- u32 fence_size, fence_alignment;
-
- fence_size = i915_gem_get_gtt_size(obj->base.dev,
- obj->base.size,
- obj->tiling_mode);
- fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
- obj->base.size,
- obj->tiling_mode,
- true);
-
- fenceable = (vma->node.size == fence_size &&
- (vma->node.start & (fence_alignment - 1)) == 0);
-
- mappable = (vma->node.start + fence_size <=
- dev_priv->gtt.mappable_end);
-
- obj->map_and_fenceable = mappable && fenceable;
-
+ __i915_vma_set_map_and_fenceable(vma);
WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 8c688a5f1589..02ceb7a4b481 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -141,8 +141,6 @@ static void i915_gem_context_clean(struct intel_context *ctx)
if (!ppgtt)
return;
- WARN_ON(!list_empty(&ppgtt->base.active_list));
-
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
mm_list) {
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 43f35d12b677..86c7500454b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2676,6 +2676,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
return ret;
}
vma->bound |= GLOBAL_BIND;
+ __i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->mm_list, &ggtt_vm->inactive_list);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index cdacf3f5b77a..87e919a06b27 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -687,6 +687,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
}
vma->bound |= GLOBAL_BIND;
+ __i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->mm_list, &ggtt->inactive_list);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 22e86d2e408d..32cf97346978 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -116,6 +116,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc);
static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
static void ironlake_pfit_enable(struct intel_crtc *crtc);
static void intel_modeset_setup_hw_state(struct drm_device *dev);
+static void intel_pre_disable_primary(struct drm_crtc *crtc);
typedef struct {
int min, max;
@@ -2607,6 +2608,8 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct drm_i915_gem_object *obj;
struct drm_plane *primary = intel_crtc->base.primary;
struct drm_plane_state *plane_state = primary->state;
+ struct drm_crtc_state *crtc_state = intel_crtc->base.state;
+ struct intel_plane *intel_plane = to_intel_plane(primary);
struct drm_framebuffer *fb;
if (!plane_config->fb)
@@ -2643,6 +2646,18 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
}
}
+ /*
+ * We've failed to reconstruct the BIOS FB. Current display state
+ * indicates that the primary plane is visible, but has a NULL FB,
+ * which will lead to problems later if we don't fix it up. The
+ * simplest solution is to just disable the primary plane now and
+ * pretend the BIOS never had it enabled.
+ */
+ to_intel_plane_state(plane_state)->visible = false;
+ crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
+ intel_pre_disable_primary(&intel_crtc->base);
+ intel_plane->disable_plane(primary, &intel_crtc->base);
+
return;
valid_fb:
@@ -6309,9 +6324,11 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
if (to_intel_plane_state(crtc->primary->state)->visible) {
intel_crtc_wait_for_pending_flips(crtc);
intel_pre_disable_primary(crtc);
+
+ intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
+ to_intel_plane_state(crtc->primary->state)->visible = false;
}
- intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
dev_priv->display.crtc_disable(crtc);
intel_crtc->active = false;
intel_update_watermarks(crtc);
@@ -9908,14 +9925,14 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
return true;
}
-static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
+static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t cntl = 0, size = 0;
- if (base) {
+ if (on) {
unsigned int width = intel_crtc->base.cursor->state->crtc_w;
unsigned int height = intel_crtc->base.cursor->state->crtc_h;
unsigned int stride = roundup_pow_of_two(width) * 4;
@@ -9970,16 +9987,15 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
}
}
-static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
+static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
- uint32_t cntl;
+ uint32_t cntl = 0;
- cntl = 0;
- if (base) {
+ if (on) {
cntl = MCURSOR_GAMMA_ENABLE;
switch (intel_crtc->base.cursor->state->crtc_w) {
case 64:
@@ -10030,18 +10046,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
int y = cursor_state->crtc_y;
u32 base = 0, pos = 0;
- if (on)
- base = intel_crtc->cursor_addr;
+ base = intel_crtc->cursor_addr;
if (x >= intel_crtc->config->pipe_src_w)
- base = 0;
+ on = false;
if (y >= intel_crtc->config->pipe_src_h)
- base = 0;
+ on = false;
if (x < 0) {
if (x + cursor_state->crtc_w <= 0)
- base = 0;
+ on = false;
pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
x = -x;
@@ -10050,16 +10065,13 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
if (y < 0) {
if (y + cursor_state->crtc_h <= 0)
- base = 0;
+ on = false;
pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
y = -y;
}
pos |= y << CURSOR_Y_SHIFT;
- if (base == 0 && intel_crtc->cursor_base == 0)
- return;
-
I915_WRITE(CURPOS(pipe), pos);
/* ILK+ do this automagically */
@@ -10070,9 +10082,9 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
}
if (IS_845G(dev) || IS_I865G(dev))
- i845_update_cursor(crtc, base);
+ i845_update_cursor(crtc, base, on);
else
- i9xx_update_cursor(crtc, base);
+ i9xx_update_cursor(crtc, base, on);
}
static bool cursor_size_ok(struct drm_device *dev,
@@ -12111,18 +12123,22 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
static bool check_digital_port_conflicts(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
- struct intel_encoder *encoder;
struct drm_connector *connector;
- struct drm_connector_state *connector_state;
unsigned int used_ports = 0;
- int i;
/*
* Walk the connector list instead of the encoder
* list to detect the problem on ddi platforms
* where there's just one encoder per digital port.
*/
- for_each_connector_in_state(state, connector, connector_state, i) {
+ drm_for_each_connector(connector, dev) {
+ struct drm_connector_state *connector_state;
+ struct intel_encoder *encoder;
+
+ connector_state = drm_atomic_get_existing_connector_state(state, connector);
+ if (!connector_state)
+ connector_state = connector->state;
+
if (!connector_state->best_encoder)
continue;
@@ -13716,6 +13732,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
struct drm_crtc *crtc = crtc_state->base.crtc;
struct drm_framebuffer *fb = state->base.fb;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ enum pipe pipe = to_intel_plane(plane)->pipe;
unsigned stride;
int ret;
@@ -13749,6 +13766,22 @@ intel_check_cursor_plane(struct drm_plane *plane,
return -EINVAL;
}
+ /*
+ * There's something wrong with the cursor on CHV pipe C.
+ * If it straddles the left edge of the screen then
+ * moving it away from the edge or disabling it often
+ * results in a pipe underrun, and often that can lead to
+ * dead pipe (constant underrun reported, and it scans
+ * out just a solid color). To recover from that, the
+ * display power well must be turned off and on again.
+ * Refuse the put the cursor into that compromised position.
+ */
+ if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
+ state->visible && state->base.crtc_x < 0) {
+ DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -13772,9 +13805,6 @@ intel_commit_cursor_plane(struct drm_plane *plane,
crtc = crtc ? crtc : plane->crtc;
intel_crtc = to_intel_crtc(crtc);
- if (intel_crtc->cursor_bo == obj)
- goto update;
-
if (!obj)
addr = 0;
else if (!INTEL_INFO(dev)->cursor_needs_physical)
@@ -13783,9 +13813,7 @@ intel_commit_cursor_plane(struct drm_plane *plane,
addr = obj->phys_handle->busaddr;
intel_crtc->cursor_addr = addr;
- intel_crtc->cursor_bo = obj;
-update:
if (crtc->state->active)
intel_crtc_update_cursor(crtc, state->visible);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f2a1142bff34..0d00f07b7163 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -550,7 +550,6 @@ struct intel_crtc {
int adjusted_x;
int adjusted_y;
- struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
uint32_t cursor_cntl;
uint32_t cursor_size;
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 81cdd9ff3892..e6c035b0fc1c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1374,17 +1374,18 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
bool live_status = false;
- unsigned int retry = 3;
+ unsigned int try;
DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- while (!live_status && --retry) {
+ for (try = 0; !live_status && try < 9; try++) {
+ if (try)
+ msleep(10);
live_status = intel_digital_port_connected(dev_priv,
hdmi_to_dig_port(intel_hdmi));
- mdelay(10);
}
if (!live_status)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 071a76b9ac52..f091ad12d694 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4782,8 +4782,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
/* 2b: Program RC6 thresholds.*/
/* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
- if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
- (INTEL_REVID(dev) <= SKL_REVID_E0)))
+ if (IS_SKYLAKE(dev))
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
else
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4825,7 +4824,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/
if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
- ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
+ ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
I915_WRITE(GEN9_PG_ENABLE, 0);
else
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
index ffa902ece872..05a895496fc6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv40.c
@@ -156,6 +156,7 @@ nv40_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
return -ENOMEM;
nvkm_object_ctor(&nv40_gr_chan, oclass, &chan->object);
chan->gr = gr;
+ chan->fifo = fifoch;
*pobject = &chan->object;
spin_lock_irqsave(&chan->gr->base.engine.lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
index 43006db6fd58..80fed7e78dcb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/fan.c
@@ -83,6 +83,7 @@ nvbios_fan_parse(struct nvkm_bios *bios, struct nvbios_therm_fan *fan)
fan->type = NVBIOS_THERM_FAN_UNK;
}
+ fan->fan_mode = NVBIOS_THERM_FAN_LINEAR;
fan->min_duty = nvbios_rd08(bios, data + 0x02);
fan->max_duty = nvbios_rd08(bios, data + 0x03);
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b8e4cdec28c3..24f92bea39c7 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -112,11 +112,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
dma_addr_t paddr;
int ret;
- /* only doing ARGB32 since this is what is needed to alpha-blend
- * with video overlays:
- */
sizes->surface_bpp = 32;
- sizes->surface_depth = 32;
+ sizes->surface_depth = 24;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
sizes->surface_height, sizes->surface_bpp,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 8f59f057cdf4..80a73bfc1a65 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1217,6 +1217,7 @@ config SENSORS_PWM_FAN
config SENSORS_SHT15
tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
depends on GPIOLIB || COMPILE_TEST
+ select BITREVERSE
help
If you say yes here you get support for the Sensiron SHT10, SHT11,
SHT15, SHT71, SHT75 humidity and temperature sensors.
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 65482624ea2c..5289aa0980a8 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -58,6 +58,7 @@ struct tmp102 {
u16 config_orig;
unsigned long last_update;
int temp[3];
+ bool first_time;
};
/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
@@ -93,6 +94,7 @@ static struct tmp102 *tmp102_update_device(struct device *dev)
tmp102->temp[i] = tmp102_reg_to_mC(status);
}
tmp102->last_update = jiffies;
+ tmp102->first_time = false;
}
mutex_unlock(&tmp102->lock);
return tmp102;
@@ -102,6 +104,12 @@ static int tmp102_read_temp(void *dev, int *temp)
{
struct tmp102 *tmp102 = tmp102_update_device(dev);
+ /* Is it too early even to return a conversion? */
+ if (tmp102->first_time) {
+ dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__);
+ return -EAGAIN;
+ }
+
*temp = tmp102->temp[0];
return 0;
@@ -114,6 +122,10 @@ static ssize_t tmp102_show_temp(struct device *dev,
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct tmp102 *tmp102 = tmp102_update_device(dev);
+ /* Is it too early even to return a read? */
+ if (tmp102->first_time)
+ return -EAGAIN;
+
return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
}
@@ -207,7 +219,9 @@ static int tmp102_probe(struct i2c_client *client,
status = -ENODEV;
goto fail_restore_config;
}
- tmp102->last_update = jiffies - HZ;
+ tmp102->last_update = jiffies;
+ /* Mark that we are not ready with data until conversion is complete */
+ tmp102->first_time = true;
mutex_init(&tmp102->lock);
hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index c5628a42170a..a8bdcb5292f5 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -202,8 +202,15 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
* d is always 6 on Keystone I2C controller
*/
- /* get minimum of 7 MHz clock, but max of 12 MHz */
- psc = (input_clock / 7000000) - 1;
+ /*
+ * Both Davinci and current Keystone User Guides recommend a value
+ * between 7MHz and 12MHz. In reality 7MHz module clock doesn't
+ * always produce enough margin between SDA and SCL transitions.
+ * Measurements show that the higher the module clock is, the
+ * bigger is the margin, providing more reliable communication.
+ * So we better target for 12MHz.
+ */
+ psc = (input_clock / 12000000) - 1;
if ((input_clock / (psc + 1)) > 12000000)
psc++; /* better to run under spec than over */
d = (psc >= 2) ? 5 : 7 - psc;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 8c48b27ba059..de7fbbb374cd 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -813,6 +813,12 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
tx_aborted:
if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
complete(&dev->cmd_complete);
+ else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
+ /* workaround to trigger pending interrupt */
+ stat = dw_readl(dev, DW_IC_INTR_MASK);
+ i2c_dw_disable_int(dev);
+ dw_writel(dev, stat, DW_IC_INTR_MASK);
+ }
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 1d50898e7b24..9ffb63a60f95 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -111,6 +111,7 @@ struct dw_i2c_dev {
#define ACCESS_SWAP 0x00000001
#define ACCESS_16BIT 0x00000002
+#define ACCESS_INTR_MASK 0x00000004
extern int i2c_dw_init(struct dw_i2c_dev *dev);
extern void i2c_dw_disable(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 809579ecb5a4..6b00061c3746 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -93,6 +93,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
static int dw_i2c_acpi_configure(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
+ const struct acpi_device_id *id;
dev->adapter.nr = -1;
dev->tx_fifo_depth = 32;
@@ -106,6 +107,10 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt,
&dev->sda_hold_time);
+ id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
+ if (id && id->driver_data)
+ dev->accessor_flags |= (u32)id->driver_data;
+
return 0;
}
@@ -116,7 +121,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
{ "INT3433", 0 },
{ "80860F41", 0 },
{ "808622C1", 0 },
- { "AMD0010", 0 },
+ { "AMD0010", ACCESS_INTR_MASK },
{ }
};
MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
@@ -240,12 +245,10 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
}
r = i2c_dw_probe(dev);
- if (r) {
+ if (r && !dev->pm_runtime_disabled)
pm_runtime_disable(&pdev->dev);
- return r;
- }
- return 0;
+ return r;
}
static int dw_i2c_plat_remove(struct platform_device *pdev)
@@ -260,7 +263,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+ if (!dev->pm_runtime_disabled)
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 9bb0b056b25f..d4d853680ae4 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -1119,6 +1119,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
i2c_imx, IMX_I2C_I2CR);
imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
+ i2c_imx_init_recovery_info(i2c_imx, pdev);
+
/* Add I2C adapter */
ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
if (ret < 0) {
@@ -1126,8 +1128,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
goto clk_disable;
}
- i2c_imx_init_recovery_info(i2c_imx, pdev);
-
/* Set up platform driver data */
platform_set_drvdata(pdev, i2c_imx);
clk_disable_unprepare(i2c_imx->clk);
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 5801227b97ab..43207f52e5a3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -146,6 +146,8 @@ struct mv64xxx_i2c_data {
bool errata_delay;
struct reset_control *rstc;
bool irq_clear_inverted;
+ /* Clk div is 2 to the power n, not 2 to the power n + 1 */
+ bool clk_n_base_0;
};
static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = {
@@ -757,25 +759,29 @@ MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
#ifdef CONFIG_OF
#ifdef CONFIG_HAVE_CLK
static int
-mv64xxx_calc_freq(const int tclk, const int n, const int m)
+mv64xxx_calc_freq(struct mv64xxx_i2c_data *drv_data,
+ const int tclk, const int n, const int m)
{
- return tclk / (10 * (m + 1) * (2 << n));
+ if (drv_data->clk_n_base_0)
+ return tclk / (10 * (m + 1) * (1 << n));
+ else
+ return tclk / (10 * (m + 1) * (2 << n));
}
static bool
-mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
- u32 *best_m)
+mv64xxx_find_baud_factors(struct mv64xxx_i2c_data *drv_data,
+ const u32 req_freq, const u32 tclk)
{
int freq, delta, best_delta = INT_MAX;
int m, n;
for (n = 0; n <= 7; n++)
for (m = 0; m <= 15; m++) {
- freq = mv64xxx_calc_freq(tclk, n, m);
+ freq = mv64xxx_calc_freq(drv_data, tclk, n, m);
delta = req_freq - freq;
if (delta >= 0 && delta < best_delta) {
- *best_m = m;
- *best_n = n;
+ drv_data->freq_m = m;
+ drv_data->freq_n = n;
best_delta = delta;
}
if (best_delta == 0)
@@ -813,8 +819,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
if (of_property_read_u32(np, "clock-frequency", &bus_freq))
bus_freq = 100000; /* 100kHz by default */
- if (!mv64xxx_find_baud_factors(bus_freq, tclk,
- &drv_data->freq_n, &drv_data->freq_m)) {
+ if (of_device_is_compatible(np, "allwinner,sun4i-a10-i2c") ||
+ of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
+ drv_data->clk_n_base_0 = true;
+
+ if (!mv64xxx_find_baud_factors(drv_data, bus_freq, tclk)) {
rc = -EINVAL;
goto out;
}
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index b0ae560b38c3..599c0d7bd906 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -576,7 +576,7 @@ static int rcar_reg_slave(struct i2c_client *slave)
if (slave->flags & I2C_CLIENT_TEN)
return -EAFNOSUPPORT;
- pm_runtime_forbid(rcar_i2c_priv_to_dev(priv));
+ pm_runtime_get_sync(rcar_i2c_priv_to_dev(priv));
priv->slave = slave;
rcar_i2c_write(priv, ICSAR, slave->addr);
@@ -598,7 +598,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
priv->slave = NULL;
- pm_runtime_allow(rcar_i2c_priv_to_dev(priv));
+ pm_runtime_put(rcar_i2c_priv_to_dev(priv));
return 0;
}
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index c1935ebd6a9c..9096d17beb5b 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -908,7 +908,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
&i2c->scl_fall_ns))
i2c->scl_fall_ns = 300;
if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
- &i2c->scl_fall_ns))
+ &i2c->sda_fall_ns))
i2c->sda_fall_ns = i2c->scl_fall_ns;
strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
diff --git a/drivers/i2c/busses/i2c-st.c b/drivers/i2c/busses/i2c-st.c
index ea72dca32fdf..25020ec777c9 100644
--- a/drivers/i2c/busses/i2c-st.c
+++ b/drivers/i2c/busses/i2c-st.c
@@ -822,7 +822,7 @@ static int st_i2c_probe(struct platform_device *pdev)
adap = &i2c_dev->adap;
i2c_set_adapdata(adap, i2c_dev);
- snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start);
+ snprintf(adap->name, sizeof(adap->name), "ST I2C(%pa)", &res->start);
adap->owner = THIS_MODULE;
adap->timeout = 2 * HZ;
adap->retries = 0;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d2d5d004f16d..2d762a2ecd81 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1265,15 +1265,17 @@ static bool cma_protocol_roce(const struct rdma_cm_id *id)
return cma_protocol_roce_dev_port(device, port_num);
}
-static bool cma_match_net_dev(const struct rdma_id_private *id_priv,
- const struct net_device *net_dev)
+static bool cma_match_net_dev(const struct rdma_cm_id *id,
+ const struct net_device *net_dev,
+ u8 port_num)
{
- const struct rdma_addr *addr = &id_priv->id.route.addr;
+ const struct rdma_addr *addr = &id->route.addr;
if (!net_dev)
/* This request is an AF_IB request or a RoCE request */
- return addr->src_addr.ss_family == AF_IB ||
- cma_protocol_roce(&id_priv->id);
+ return (!id->port_num || id->port_num == port_num) &&
+ (addr->src_addr.ss_family == AF_IB ||
+ cma_protocol_roce_dev_port(id->device, port_num));
return !addr->dev_addr.bound_dev_if ||
(net_eq(dev_net(net_dev), addr->dev_addr.net) &&
@@ -1295,13 +1297,13 @@ static struct rdma_id_private *cma_find_listener(
hlist_for_each_entry(id_priv, &bind_list->owners, node) {
if (cma_match_private_data(id_priv, ib_event->private_data)) {
if (id_priv->id.device == cm_id->device &&
- cma_match_net_dev(id_priv, net_dev))
+ cma_match_net_dev(&id_priv->id, net_dev, req->port))
return id_priv;
list_for_each_entry(id_priv_dev,
&id_priv->listen_list,
listen_list) {
if (id_priv_dev->id.device == cm_id->device &&
- cma_match_net_dev(id_priv_dev, net_dev))
+ cma_match_net_dev(&id_priv_dev->id, net_dev, req->port))
return id_priv_dev;
}
}
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index 8d133c40fa0e..c394376ebe06 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -286,7 +286,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
mlx4_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
ib_umem_release(msrq->umem);
} else {
- kfree(msrq->wrid);
+ kvfree(msrq->wrid);
mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
&msrq->buf);
mlx4_db_free(dev->dev, &msrq->db);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index ae80590aabdf..040bb8b5cb15 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -232,6 +232,10 @@ struct phy_info {
u16 interface_type;
};
+enum ocrdma_flags {
+ OCRDMA_FLAGS_LINK_STATUS_INIT = 0x01
+};
+
struct ocrdma_dev {
struct ib_device ibdev;
struct ocrdma_dev_attr attr;
@@ -287,6 +291,7 @@ struct ocrdma_dev {
atomic_t update_sl;
u16 pvid;
u32 asic_id;
+ u32 flags;
ulong last_stats_time;
struct mutex stats_lock; /* provide synch for debugfs operations */
@@ -591,4 +596,9 @@ static inline u8 ocrdma_is_enabled_and_synced(u32 state)
(state & OCRDMA_STATE_FLAG_SYNC);
}
+static inline u8 ocrdma_get_ae_link_state(u32 ae_state)
+{
+ return ((ae_state & OCRDMA_AE_LSC_LS_MASK) >> OCRDMA_AE_LSC_LS_SHIFT);
+}
+
#endif
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 30f67bebffa3..283ca842ff74 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -579,6 +579,8 @@ static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
cmd->async_event_bitmap = BIT(OCRDMA_ASYNC_GRP5_EVE_CODE);
cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_RDMA_EVE_CODE);
+ /* Request link events on this MQ. */
+ cmd->async_event_bitmap |= BIT(OCRDMA_ASYNC_LINK_EVE_CODE);
cmd->async_cqid_ringsize = cq->id;
cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
@@ -819,20 +821,42 @@ static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
}
}
+static void ocrdma_process_link_state(struct ocrdma_dev *dev,
+ struct ocrdma_ae_mcqe *cqe)
+{
+ struct ocrdma_ae_lnkst_mcqe *evt;
+ u8 lstate;
+
+ evt = (struct ocrdma_ae_lnkst_mcqe *)cqe;
+ lstate = ocrdma_get_ae_link_state(evt->speed_state_ptn);
+
+ if (!(lstate & OCRDMA_AE_LSC_LLINK_MASK))
+ return;
+
+ if (dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)
+ ocrdma_update_link_state(dev, (lstate & OCRDMA_LINK_ST_MASK));
+}
+
static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
{
/* async CQE processing */
struct ocrdma_ae_mcqe *cqe = ae_cqe;
u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
-
- if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE)
+ switch (evt_code) {
+ case OCRDMA_ASYNC_LINK_EVE_CODE:
+ ocrdma_process_link_state(dev, cqe);
+ break;
+ case OCRDMA_ASYNC_RDMA_EVE_CODE:
ocrdma_dispatch_ibevent(dev, cqe);
- else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE)
+ break;
+ case OCRDMA_ASYNC_GRP5_EVE_CODE:
ocrdma_process_grp5_aync(dev, cqe);
- else
+ break;
+ default:
pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
dev->id, evt_code);
+ }
}
static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
@@ -1363,7 +1387,8 @@ mbx_err:
return status;
}
-int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
+int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
+ u8 *lnk_state)
{
int status = -ENOMEM;
struct ocrdma_get_link_speed_rsp *rsp;
@@ -1384,8 +1409,11 @@ int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
goto mbx_err;
rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
- *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
- >> OCRDMA_PHY_PS_SHIFT;
+ if (lnk_speed)
+ *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
+ >> OCRDMA_PHY_PS_SHIFT;
+ if (lnk_state)
+ *lnk_state = (rsp->res_lnk_st & OCRDMA_LINK_ST_MASK);
mbx_err:
kfree(cmd);
@@ -2515,9 +2543,10 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
- if (vlan_id < 0x1000) {
- if (dev->pfc_state) {
- vlan_id = 0;
+ if (vlan_id == 0xFFFF)
+ vlan_id = 0;
+ if (vlan_id || dev->pfc_state) {
+ if (!vlan_id) {
pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
dev->id);
pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index 7ed885c1851e..ebc1f442aec3 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -106,7 +106,8 @@ void ocrdma_ring_cq_db(struct ocrdma_dev *, u16 cq_id, bool armed,
bool solicited, u16 cqe_popped);
/* verbs specific mailbox commands */
-int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed);
+int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed,
+ u8 *lnk_st);
int ocrdma_query_config(struct ocrdma_dev *,
struct ocrdma_mbx_query_config *config);
@@ -153,5 +154,6 @@ char *port_speed_string(struct ocrdma_dev *dev);
void ocrdma_init_service_level(struct ocrdma_dev *);
void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
void ocrdma_free_pd_range(struct ocrdma_dev *dev);
+void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate);
#endif /* __OCRDMA_HW_H__ */
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 62b7009daa6c..3afb40b85159 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -290,6 +290,7 @@ static void ocrdma_remove_sysfiles(struct ocrdma_dev *dev)
static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
{
int status = 0, i;
+ u8 lstate = 0;
struct ocrdma_dev *dev;
dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev));
@@ -319,6 +320,11 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
if (status)
goto alloc_err;
+ /* Query Link state and update */
+ status = ocrdma_mbx_get_link_speed(dev, NULL, &lstate);
+ if (!status)
+ ocrdma_update_link_state(dev, lstate);
+
for (i = 0; i < ARRAY_SIZE(ocrdma_attributes); i++)
if (device_create_file(&dev->ibdev.dev, ocrdma_attributes[i]))
goto sysfs_err;
@@ -373,7 +379,7 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
ocrdma_remove_free(dev);
}
-static int ocrdma_open(struct ocrdma_dev *dev)
+static int ocrdma_dispatch_port_active(struct ocrdma_dev *dev)
{
struct ib_event port_event;
@@ -384,32 +390,9 @@ static int ocrdma_open(struct ocrdma_dev *dev)
return 0;
}
-static int ocrdma_close(struct ocrdma_dev *dev)
+static int ocrdma_dispatch_port_error(struct ocrdma_dev *dev)
{
- int i;
- struct ocrdma_qp *qp, **cur_qp;
struct ib_event err_event;
- struct ib_qp_attr attrs;
- int attr_mask = IB_QP_STATE;
-
- attrs.qp_state = IB_QPS_ERR;
- mutex_lock(&dev->dev_lock);
- if (dev->qp_tbl) {
- cur_qp = dev->qp_tbl;
- for (i = 0; i < OCRDMA_MAX_QP; i++) {
- qp = cur_qp[i];
- if (qp && qp->ibqp.qp_type != IB_QPT_GSI) {
- /* change the QP state to ERROR */
- _ocrdma_modify_qp(&qp->ibqp, &attrs, attr_mask);
-
- err_event.event = IB_EVENT_QP_FATAL;
- err_event.element.qp = &qp->ibqp;
- err_event.device = &dev->ibdev;
- ib_dispatch_event(&err_event);
- }
- }
- }
- mutex_unlock(&dev->dev_lock);
err_event.event = IB_EVENT_PORT_ERR;
err_event.element.port_num = 1;
@@ -420,7 +403,7 @@ static int ocrdma_close(struct ocrdma_dev *dev)
static void ocrdma_shutdown(struct ocrdma_dev *dev)
{
- ocrdma_close(dev);
+ ocrdma_dispatch_port_error(dev);
ocrdma_remove(dev);
}
@@ -431,18 +414,28 @@ static void ocrdma_shutdown(struct ocrdma_dev *dev)
static void ocrdma_event_handler(struct ocrdma_dev *dev, u32 event)
{
switch (event) {
- case BE_DEV_UP:
- ocrdma_open(dev);
- break;
- case BE_DEV_DOWN:
- ocrdma_close(dev);
- break;
case BE_DEV_SHUTDOWN:
ocrdma_shutdown(dev);
break;
+ default:
+ break;
}
}
+void ocrdma_update_link_state(struct ocrdma_dev *dev, u8 lstate)
+{
+ if (!(dev->flags & OCRDMA_FLAGS_LINK_STATUS_INIT)) {
+ dev->flags |= OCRDMA_FLAGS_LINK_STATUS_INIT;
+ if (!lstate)
+ return;
+ }
+
+ if (!lstate)
+ ocrdma_dispatch_port_error(dev);
+ else
+ ocrdma_dispatch_port_active(dev);
+}
+
static struct ocrdma_driver ocrdma_drv = {
.name = "ocrdma_driver",
.add = ocrdma_add,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 6a38268bbe9f..99dd6fdf06d7 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -465,8 +465,11 @@ struct ocrdma_ae_qp_mcqe {
u32 valid_ae_event;
};
-#define OCRDMA_ASYNC_RDMA_EVE_CODE 0x14
-#define OCRDMA_ASYNC_GRP5_EVE_CODE 0x5
+enum ocrdma_async_event_code {
+ OCRDMA_ASYNC_LINK_EVE_CODE = 0x01,
+ OCRDMA_ASYNC_GRP5_EVE_CODE = 0x05,
+ OCRDMA_ASYNC_RDMA_EVE_CODE = 0x14
+};
enum ocrdma_async_grp5_events {
OCRDMA_ASYNC_EVENT_QOS_VALUE = 0x01,
@@ -489,6 +492,44 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
OCRDMA_MAX_ASYNC_ERRORS
};
+struct ocrdma_ae_lnkst_mcqe {
+ u32 speed_state_ptn;
+ u32 qos_reason_falut;
+ u32 evt_tag;
+ u32 valid_ae_event;
+};
+
+enum {
+ OCRDMA_AE_LSC_PORT_NUM_MASK = 0x3F,
+ OCRDMA_AE_LSC_PT_SHIFT = 0x06,
+ OCRDMA_AE_LSC_PT_MASK = (0x03 <<
+ OCRDMA_AE_LSC_PT_SHIFT),
+ OCRDMA_AE_LSC_LS_SHIFT = 0x08,
+ OCRDMA_AE_LSC_LS_MASK = (0xFF <<
+ OCRDMA_AE_LSC_LS_SHIFT),
+ OCRDMA_AE_LSC_LD_SHIFT = 0x10,
+ OCRDMA_AE_LSC_LD_MASK = (0xFF <<
+ OCRDMA_AE_LSC_LD_SHIFT),
+ OCRDMA_AE_LSC_PPS_SHIFT = 0x18,
+ OCRDMA_AE_LSC_PPS_MASK = (0xFF <<
+ OCRDMA_AE_LSC_PPS_SHIFT),
+ OCRDMA_AE_LSC_PPF_MASK = 0xFF,
+ OCRDMA_AE_LSC_ER_SHIFT = 0x08,
+ OCRDMA_AE_LSC_ER_MASK = (0xFF <<
+ OCRDMA_AE_LSC_ER_SHIFT),
+ OCRDMA_AE_LSC_QOS_SHIFT = 0x10,
+ OCRDMA_AE_LSC_QOS_MASK = (0xFFFF <<
+ OCRDMA_AE_LSC_QOS_SHIFT)
+};
+
+enum {
+ OCRDMA_AE_LSC_PLINK_DOWN = 0x00,
+ OCRDMA_AE_LSC_PLINK_UP = 0x01,
+ OCRDMA_AE_LSC_LLINK_DOWN = 0x02,
+ OCRDMA_AE_LSC_LLINK_MASK = 0x02,
+ OCRDMA_AE_LSC_LLINK_UP = 0x03
+};
+
/* mailbox command request and responses */
enum {
OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT = 2,
@@ -676,7 +717,7 @@ enum {
OCRDMA_PHY_PFLT_SHIFT = 0x18,
OCRDMA_QOS_LNKSP_MASK = 0xFFFF0000,
OCRDMA_QOS_LNKSP_SHIFT = 0x10,
- OCRDMA_LLST_MASK = 0xFF,
+ OCRDMA_LINK_ST_MASK = 0x01,
OCRDMA_PLFC_MASK = 0x00000400,
OCRDMA_PLFC_SHIFT = 0x8,
OCRDMA_PLRFC_MASK = 0x00000200,
@@ -691,7 +732,7 @@ struct ocrdma_get_link_speed_rsp {
u32 pflt_pps_ld_pnum;
u32 qos_lsp;
- u32 res_lls;
+ u32 res_lnk_st;
};
enum {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 583001bcfb8f..76e96f97b3f6 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -171,7 +171,7 @@ static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
int status;
u8 speed;
- status = ocrdma_mbx_get_link_speed(dev, &speed);
+ status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
if (status)
speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c
index 932d07307454..da326090c2b0 100644
--- a/drivers/input/joystick/db9.c
+++ b/drivers/input/joystick/db9.c
@@ -592,6 +592,7 @@ static void db9_attach(struct parport *pp)
return;
}
+ memset(&db9_parport_cb, 0, sizeof(db9_parport_cb));
db9_parport_cb.flags = PARPORT_FLAG_EXCL;
pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx);
diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c
index 5a672dcac0d8..eae14d512353 100644
--- a/drivers/input/joystick/gamecon.c
+++ b/drivers/input/joystick/gamecon.c
@@ -951,6 +951,7 @@ static void gc_attach(struct parport *pp)
pads = gc_cfg[port_idx].args + 1;
n_pads = gc_cfg[port_idx].nargs - 1;
+ memset(&gc_parport_cb, 0, sizeof(gc_parport_cb));
gc_parport_cb.flags = PARPORT_FLAG_EXCL;
pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb,
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 9f5bca26bd2f..77f575dd0901 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -181,6 +181,7 @@ static void tgfx_attach(struct parport *pp)
n_buttons = tgfx_cfg[port_idx].args + 1;
n_devs = tgfx_cfg[port_idx].nargs - 1;
+ memset(&tgfx_parport_cb, 0, sizeof(tgfx_parport_cb));
tgfx_parport_cb.flags = PARPORT_FLAG_EXCL;
pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb,
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index 9c07fe911075..70a893a17467 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -218,6 +218,7 @@ static void walkera0701_attach(struct parport *pp)
w->parport = pp;
+ memset(&walkera0701_parport_cb, 0, sizeof(walkera0701_parport_cb));
walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL;
walkera0701_parport_cb.irq_func = walkera0701_irq_handler;
walkera0701_parport_cb.private = w;
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 4bf678541496..d5994a745ffa 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -97,8 +97,7 @@ static void arizona_haptics_work(struct work_struct *work)
ret = regmap_update_bits(arizona->regmap,
ARIZONA_HAPTICS_CONTROL_1,
- ARIZONA_HAP_CTRL_MASK,
- 1 << ARIZONA_HAP_CTRL_SHIFT);
+ ARIZONA_HAP_CTRL_MASK, 0);
if (ret != 0) {
dev_err(arizona->dev, "Failed to stop haptics: %d\n",
ret);
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 8aee71986430..96c486de49e0 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -20,70 +20,78 @@
#include <linux/input.h>
#include <linux/device.h>
#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/rotary_encoder.h>
+#include <linux/gpio/consumer.h>
#include <linux/slab.h>
#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
#include <linux/pm.h>
+#include <linux/property.h>
#define DRV_NAME "rotary-encoder"
struct rotary_encoder {
struct input_dev *input;
- const struct rotary_encoder_platform_data *pdata;
- unsigned int axis;
+ struct mutex access_mutex;
+
+ u32 steps;
+ u32 axis;
+ bool relative_axis;
+ bool rollover;
+
unsigned int pos;
- unsigned int irq_a;
- unsigned int irq_b;
+ struct gpio_descs *gpios;
+
+ unsigned int *irq;
bool armed;
- unsigned char dir; /* 0 - clockwise, 1 - CCW */
+ signed char dir; /* 1 - clockwise, -1 - CCW */
- char last_stable;
+ unsigned last_stable;
};
-static int rotary_encoder_get_state(const struct rotary_encoder_platform_data *pdata)
+static unsigned rotary_encoder_get_state(struct rotary_encoder *encoder)
{
- int a = !!gpio_get_value(pdata->gpio_a);
- int b = !!gpio_get_value(pdata->gpio_b);
+ int i;
+ unsigned ret = 0;
- a ^= pdata->inverted_a;
- b ^= pdata->inverted_b;
+ for (i = 0; i < encoder->gpios->ndescs; ++i) {
+ int val = gpiod_get_value_cansleep(encoder->gpios->desc[i]);
+ /* convert from gray encoding to normal */
+ if (ret & 1)
+ val = !val;
- return ((a << 1) | b);
+ ret = ret << 1 | val;
+ }
+
+ return ret & 3;
}
static void rotary_encoder_report_event(struct rotary_encoder *encoder)
{
- const struct rotary_encoder_platform_data *pdata = encoder->pdata;
-
- if (pdata->relative_axis) {
+ if (encoder->relative_axis) {
input_report_rel(encoder->input,
- pdata->axis, encoder->dir ? -1 : 1);
+ encoder->axis, encoder->dir);
} else {
unsigned int pos = encoder->pos;
- if (encoder->dir) {
+ if (encoder->dir < 0) {
/* turning counter-clockwise */
- if (pdata->rollover)
- pos += pdata->steps;
+ if (encoder->rollover)
+ pos += encoder->steps;
if (pos)
pos--;
} else {
/* turning clockwise */
- if (pdata->rollover || pos < pdata->steps)
+ if (encoder->rollover || pos < encoder->steps)
pos++;
}
- if (pdata->rollover)
- pos %= pdata->steps;
+ if (encoder->rollover)
+ pos %= encoder->steps;
encoder->pos = pos;
- input_report_abs(encoder->input, pdata->axis, encoder->pos);
+ input_report_abs(encoder->input, encoder->axis, encoder->pos);
}
input_sync(encoder->input);
@@ -92,9 +100,11 @@ static void rotary_encoder_report_event(struct rotary_encoder *encoder)
static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
- int state;
+ unsigned state;
- state = rotary_encoder_get_state(encoder->pdata);
+ mutex_lock(&encoder->access_mutex);
+
+ state = rotary_encoder_get_state(encoder);
switch (state) {
case 0x0:
@@ -105,334 +115,227 @@ static irqreturn_t rotary_encoder_irq(int irq, void *dev_id)
break;
case 0x1:
- case 0x2:
+ case 0x3:
if (encoder->armed)
- encoder->dir = state - 1;
+ encoder->dir = 2 - state;
break;
- case 0x3:
+ case 0x2:
encoder->armed = true;
break;
}
+ mutex_unlock(&encoder->access_mutex);
+
return IRQ_HANDLED;
}
static irqreturn_t rotary_encoder_half_period_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
- int state;
+ unsigned int state;
- state = rotary_encoder_get_state(encoder->pdata);
+ mutex_lock(&encoder->access_mutex);
- switch (state) {
- case 0x00:
- case 0x03:
+ state = rotary_encoder_get_state(encoder);
+
+ if (state & 1) {
+ encoder->dir = ((encoder->last_stable - state + 1) % 4) - 1;
+ } else {
if (state != encoder->last_stable) {
rotary_encoder_report_event(encoder);
encoder->last_stable = state;
}
- break;
-
- case 0x01:
- case 0x02:
- encoder->dir = (encoder->last_stable + state) & 0x01;
- break;
}
+ mutex_unlock(&encoder->access_mutex);
+
return IRQ_HANDLED;
}
static irqreturn_t rotary_encoder_quarter_period_irq(int irq, void *dev_id)
{
struct rotary_encoder *encoder = dev_id;
- unsigned char sum;
- int state;
-
- state = rotary_encoder_get_state(encoder->pdata);
-
- /*
- * We encode the previous and the current state using a byte.
- * The previous state in the MSB nibble, the current state in the LSB
- * nibble. Then use a table to decide the direction of the turn.
- */
- sum = (encoder->last_stable << 4) + state;
- switch (sum) {
- case 0x31:
- case 0x10:
- case 0x02:
- case 0x23:
- encoder->dir = 0; /* clockwise */
- break;
+ unsigned int state;
- case 0x13:
- case 0x01:
- case 0x20:
- case 0x32:
- encoder->dir = 1; /* counter-clockwise */
- break;
+ mutex_lock(&encoder->access_mutex);
- default:
- /*
- * Ignore all other values. This covers the case when the
- * state didn't change (a spurious interrupt) and the
- * cases where the state changed by two steps, making it
- * impossible to tell the direction.
- *
- * In either case, don't report any event and save the
- * state for later.
- */
+ state = rotary_encoder_get_state(encoder);
+
+ if ((encoder->last_stable + 1) % 4 == state)
+ encoder->dir = 1;
+ else if (encoder->last_stable == (state + 1) % 4)
+ encoder->dir = -1;
+ else
goto out;
- }
rotary_encoder_report_event(encoder);
out:
encoder->last_stable = state;
+ mutex_unlock(&encoder->access_mutex);
+
return IRQ_HANDLED;
}
-#ifdef CONFIG_OF
-static const struct of_device_id rotary_encoder_of_match[] = {
- { .compatible = "rotary-encoder", },
- { },
-};
-MODULE_DEVICE_TABLE(of, rotary_encoder_of_match);
-
-static struct rotary_encoder_platform_data *rotary_encoder_parse_dt(struct device *dev)
+static int rotary_encoder_probe(struct platform_device *pdev)
{
- const struct of_device_id *of_id =
- of_match_device(rotary_encoder_of_match, dev);
- struct device_node *np = dev->of_node;
- struct rotary_encoder_platform_data *pdata;
- enum of_gpio_flags flags;
- int error;
-
- if (!of_id || !np)
- return NULL;
-
- pdata = kzalloc(sizeof(struct rotary_encoder_platform_data),
- GFP_KERNEL);
- if (!pdata)
- return ERR_PTR(-ENOMEM);
-
- of_property_read_u32(np, "rotary-encoder,steps", &pdata->steps);
- of_property_read_u32(np, "linux,axis", &pdata->axis);
+ struct device *dev = &pdev->dev;
+ struct rotary_encoder *encoder;
+ struct input_dev *input;
+ irq_handler_t handler;
+ u32 steps_per_period;
+ unsigned int i;
+ int err;
- pdata->gpio_a = of_get_gpio_flags(np, 0, &flags);
- pdata->inverted_a = flags & OF_GPIO_ACTIVE_LOW;
+ encoder = devm_kzalloc(dev, sizeof(struct rotary_encoder), GFP_KERNEL);
+ if (!encoder)
+ return -ENOMEM;
- pdata->gpio_b = of_get_gpio_flags(np, 1, &flags);
- pdata->inverted_b = flags & OF_GPIO_ACTIVE_LOW;
+ mutex_init(&encoder->access_mutex);
- pdata->relative_axis =
- of_property_read_bool(np, "rotary-encoder,relative-axis");
- pdata->rollover = of_property_read_bool(np, "rotary-encoder,rollover");
+ device_property_read_u32(dev, "rotary-encoder,steps", &encoder->steps);
- error = of_property_read_u32(np, "rotary-encoder,steps-per-period",
- &pdata->steps_per_period);
- if (error) {
+ err = device_property_read_u32(dev, "rotary-encoder,steps-per-period",
+ &steps_per_period);
+ if (err) {
/*
- * The 'half-period' property has been deprecated, you must use
- * 'steps-per-period' and set an appropriate value, but we still
- * need to parse it to maintain compatibility.
+ * The 'half-period' property has been deprecated, you must
+ * use 'steps-per-period' and set an appropriate value, but
+ * we still need to parse it to maintain compatibility. If
+ * neither property is present we fall back to the one step
+ * per period behavior.
*/
- if (of_property_read_bool(np, "rotary-encoder,half-period")) {
- pdata->steps_per_period = 2;
- } else {
- /* Fallback to one step per period behavior */
- pdata->steps_per_period = 1;
- }
+ steps_per_period = device_property_read_bool(dev,
+ "rotary-encoder,half-period") ? 2 : 1;
}
- pdata->wakeup_source = of_property_read_bool(np, "wakeup-source");
+ encoder->rollover =
+ device_property_read_bool(dev, "rotary-encoder,rollover");
- return pdata;
-}
-#else
-static inline struct rotary_encoder_platform_data *
-rotary_encoder_parse_dt(struct device *dev)
-{
- return NULL;
-}
-#endif
-
-static int rotary_encoder_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- const struct rotary_encoder_platform_data *pdata = dev_get_platdata(dev);
- struct rotary_encoder *encoder;
- struct input_dev *input;
- irq_handler_t handler;
- int err;
-
- if (!pdata) {
- pdata = rotary_encoder_parse_dt(dev);
- if (IS_ERR(pdata))
- return PTR_ERR(pdata);
+ device_property_read_u32(dev, "linux,axis", &encoder->axis);
+ encoder->relative_axis =
+ device_property_read_bool(dev, "rotary-encoder,relative-axis");
- if (!pdata) {
- dev_err(dev, "missing platform data\n");
- return -EINVAL;
- }
+ encoder->gpios = devm_gpiod_get_array(dev, NULL, GPIOD_IN);
+ if (IS_ERR(encoder->gpios)) {
+ dev_err(dev, "unable to get gpios\n");
+ return PTR_ERR(encoder->gpios);
}
-
- encoder = kzalloc(sizeof(struct rotary_encoder), GFP_KERNEL);
- input = input_allocate_device();
- if (!encoder || !input) {
- err = -ENOMEM;
- goto exit_free_mem;
+ if (encoder->gpios->ndescs < 2) {
+ dev_err(dev, "not enough gpios found\n");
+ return -EINVAL;
}
+ input = devm_input_allocate_device(dev);
+ if (!input)
+ return -ENOMEM;
+
encoder->input = input;
- encoder->pdata = pdata;
input->name = pdev->name;
input->id.bustype = BUS_HOST;
input->dev.parent = dev;
- if (pdata->relative_axis) {
- input->evbit[0] = BIT_MASK(EV_REL);
- input->relbit[0] = BIT_MASK(pdata->axis);
- } else {
- input->evbit[0] = BIT_MASK(EV_ABS);
- input_set_abs_params(encoder->input,
- pdata->axis, 0, pdata->steps, 0, 1);
- }
-
- /* request the GPIOs */
- err = gpio_request_one(pdata->gpio_a, GPIOF_IN, dev_name(dev));
- if (err) {
- dev_err(dev, "unable to request GPIO %d\n", pdata->gpio_a);
- goto exit_free_mem;
- }
-
- err = gpio_request_one(pdata->gpio_b, GPIOF_IN, dev_name(dev));
- if (err) {
- dev_err(dev, "unable to request GPIO %d\n", pdata->gpio_b);
- goto exit_free_gpio_a;
- }
-
- encoder->irq_a = gpio_to_irq(pdata->gpio_a);
- encoder->irq_b = gpio_to_irq(pdata->gpio_b);
+ if (encoder->relative_axis)
+ input_set_capability(input, EV_REL, encoder->axis);
+ else
+ input_set_abs_params(input,
+ encoder->axis, 0, encoder->steps, 0, 1);
- switch (pdata->steps_per_period) {
+ switch (steps_per_period >> (encoder->gpios->ndescs - 2)) {
case 4:
handler = &rotary_encoder_quarter_period_irq;
- encoder->last_stable = rotary_encoder_get_state(pdata);
+ encoder->last_stable = rotary_encoder_get_state(encoder);
break;
case 2:
handler = &rotary_encoder_half_period_irq;
- encoder->last_stable = rotary_encoder_get_state(pdata);
+ encoder->last_stable = rotary_encoder_get_state(encoder);
break;
case 1:
handler = &rotary_encoder_irq;
break;
default:
dev_err(dev, "'%d' is not a valid steps-per-period value\n",
- pdata->steps_per_period);
- err = -EINVAL;
- goto exit_free_gpio_b;
+ steps_per_period);
+ return -EINVAL;
}
- err = request_irq(encoder->irq_a, handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- DRV_NAME, encoder);
- if (err) {
- dev_err(dev, "unable to request IRQ %d\n", encoder->irq_a);
- goto exit_free_gpio_b;
- }
-
- err = request_irq(encoder->irq_b, handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- DRV_NAME, encoder);
- if (err) {
- dev_err(dev, "unable to request IRQ %d\n", encoder->irq_b);
- goto exit_free_irq_a;
+ encoder->irq =
+ devm_kzalloc(dev,
+ sizeof(*encoder->irq) * encoder->gpios->ndescs,
+ GFP_KERNEL);
+ if (!encoder->irq)
+ return -ENOMEM;
+
+ for (i = 0; i < encoder->gpios->ndescs; ++i) {
+ encoder->irq[i] = gpiod_to_irq(encoder->gpios->desc[i]);
+
+ err = devm_request_threaded_irq(dev, encoder->irq[i],
+ NULL, handler,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ DRV_NAME, encoder);
+ if (err) {
+ dev_err(dev, "unable to request IRQ %d (gpio#%d)\n",
+ encoder->irq[i], i);
+ return err;
+ }
}
err = input_register_device(input);
if (err) {
dev_err(dev, "failed to register input device\n");
- goto exit_free_irq_b;
+ return err;
}
- device_init_wakeup(&pdev->dev, pdata->wakeup_source);
+ device_init_wakeup(dev,
+ device_property_read_bool(dev, "wakeup-source"));
platform_set_drvdata(pdev, encoder);
return 0;
-
-exit_free_irq_b:
- free_irq(encoder->irq_b, encoder);
-exit_free_irq_a:
- free_irq(encoder->irq_a, encoder);
-exit_free_gpio_b:
- gpio_free(pdata->gpio_b);
-exit_free_gpio_a:
- gpio_free(pdata->gpio_a);
-exit_free_mem:
- input_free_device(input);
- kfree(encoder);
- if (!dev_get_platdata(&pdev->dev))
- kfree(pdata);
-
- return err;
}
-static int rotary_encoder_remove(struct platform_device *pdev)
-{
- struct rotary_encoder *encoder = platform_get_drvdata(pdev);
- const struct rotary_encoder_platform_data *pdata = encoder->pdata;
-
- device_init_wakeup(&pdev->dev, false);
-
- free_irq(encoder->irq_a, encoder);
- free_irq(encoder->irq_b, encoder);
- gpio_free(pdata->gpio_a);
- gpio_free(pdata->gpio_b);
-
- input_unregister_device(encoder->input);
- kfree(encoder);
-
- if (!dev_get_platdata(&pdev->dev))
- kfree(pdata);
-
- return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int rotary_encoder_suspend(struct device *dev)
+static int __maybe_unused rotary_encoder_suspend(struct device *dev)
{
struct rotary_encoder *encoder = dev_get_drvdata(dev);
+ unsigned int i;
if (device_may_wakeup(dev)) {
- enable_irq_wake(encoder->irq_a);
- enable_irq_wake(encoder->irq_b);
+ for (i = 0; i < encoder->gpios->ndescs; ++i)
+ enable_irq_wake(encoder->irq[i]);
}
return 0;
}
-static int rotary_encoder_resume(struct device *dev)
+static int __maybe_unused rotary_encoder_resume(struct device *dev)
{
struct rotary_encoder *encoder = dev_get_drvdata(dev);
+ unsigned int i;
if (device_may_wakeup(dev)) {
- disable_irq_wake(encoder->irq_a);
- disable_irq_wake(encoder->irq_b);
+ for (i = 0; i < encoder->gpios->ndescs; ++i)
+ disable_irq_wake(encoder->irq[i]);
}
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(rotary_encoder_pm_ops,
- rotary_encoder_suspend, rotary_encoder_resume);
+ rotary_encoder_suspend, rotary_encoder_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id rotary_encoder_of_match[] = {
+ { .compatible = "rotary-encoder", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rotary_encoder_of_match);
+#endif
static struct platform_driver rotary_encoder_driver = {
.probe = rotary_encoder_probe,
- .remove = rotary_encoder_remove,
.driver = {
.name = DRV_NAME,
.pm = &rotary_encoder_pm_ops,
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 5e1665bbaa0b..2f589857a039 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -41,6 +41,7 @@
#define DRIVER_NAME "elan_i2c"
#define ELAN_DRIVER_VERSION "1.6.1"
+#define ELAN_VENDOR_ID 0x04f3
#define ETP_MAX_PRESSURE 255
#define ETP_FWIDTH_REDUCE 90
#define ETP_FINGER_WIDTH 15
@@ -914,6 +915,8 @@ static int elan_setup_input_device(struct elan_tp_data *data)
input->name = "Elan Touchpad";
input->id.bustype = BUS_I2C;
+ input->id.vendor = ELAN_VENDOR_ID;
+ input->id.product = data->product_id;
input_set_drvdata(input, data);
error = input_mt_init_slots(input, ETP_MAX_FINGERS,
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 92c31b8f8fb4..1edfac78d4ac 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -145,6 +145,7 @@ static int parkbd_getport(struct parport *pp)
{
struct pardev_cb parkbd_parport_cb;
+ memset(&parkbd_parport_cb, 0, sizeof(parkbd_parport_cb));
parkbd_parport_cb.irq_func = parkbd_interrupt;
parkbd_parport_cb.flags = PARPORT_FLAG_EXCL;
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index e7f966da6efa..78ca44840d60 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1819,6 +1819,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0);
input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
+ /* Verify that a device really has an endpoint */
+ if (intf->altsetting[0].desc.bNumEndpoints < 1) {
+ dev_err(&intf->dev,
+ "interface has %d endpoints, but must have minimum 1\n",
+ intf->altsetting[0].desc.bNumEndpoints);
+ err = -EINVAL;
+ goto fail3;
+ }
endpoint = &intf->altsetting[0].endpoint[0].desc;
/* Go set up our URB, which is called when the tablet receives
@@ -1861,6 +1869,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (i == ARRAY_SIZE(speeds)) {
dev_info(&intf->dev,
"Aiptek tried all speeds, no sane response\n");
+ err = -EINVAL;
goto fail3;
}
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 4a1d218f883b..2160512e861a 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2471,6 +2471,31 @@ static struct mxt_acpi_platform_data samus_platform_data[] = {
{ }
};
+static unsigned int chromebook_tp_buttons[] = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ BTN_LEFT
+};
+
+static struct mxt_acpi_platform_data chromebook_platform_data[] = {
+ {
+ /* Touchpad */
+ .hid = "ATML0000",
+ .pdata = {
+ .t19_num_keys = ARRAY_SIZE(chromebook_tp_buttons),
+ .t19_keymap = chromebook_tp_buttons,
+ },
+ },
+ {
+ /* Touchscreen */
+ .hid = "ATML0001",
+ },
+ { }
+};
+
static const struct dmi_system_id mxt_dmi_table[] = {
{
/* 2015 Google Pixel */
@@ -2481,6 +2506,14 @@ static const struct dmi_system_id mxt_dmi_table[] = {
},
.driver_data = samus_platform_data,
},
+ {
+ /* Other Google Chromebooks */
+ .ident = "Chromebook",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
+ },
+ .driver_data = chromebook_platform_data,
+ },
{ }
};
@@ -2685,6 +2718,7 @@ static const struct i2c_device_id mxt_id[] = {
{ "qt602240_ts", 0 },
{ "atmel_mxt_ts", 0 },
{ "atmel_mxt_tp", 0 },
+ { "maxtouch", 0 },
{ "mXT224", 0 },
{ }
};
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 17cc20ef4923..ac09855fa435 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -1316,7 +1316,13 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev)
disable_irq(client->irq);
- if (device_may_wakeup(dev) || ts->keep_power_in_suspend) {
+ if (device_may_wakeup(dev)) {
+ /*
+ * The device will automatically enter idle mode
+ * that has reduced power consumption.
+ */
+ ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0);
+ } else if (ts->keep_power_in_suspend) {
for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
error = elants_i2c_send(client, set_sleep_cmd,
sizeof(set_sleep_cmd));
@@ -1326,10 +1332,6 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev)
dev_err(&client->dev,
"suspend command failed: %d\n", error);
}
-
- if (device_may_wakeup(dev))
- ts->wake_irq_enabled =
- (enable_irq_wake(client->irq) == 0);
} else {
elants_i2c_power_off(ts);
}
@@ -1345,10 +1347,11 @@ static int __maybe_unused elants_i2c_resume(struct device *dev)
int retry_cnt;
int error;
- if (device_may_wakeup(dev) && ts->wake_irq_enabled)
- disable_irq_wake(client->irq);
-
- if (ts->keep_power_in_suspend) {
+ if (device_may_wakeup(dev)) {
+ if (ts->wake_irq_enabled)
+ disable_irq_wake(client->irq);
+ elants_i2c_sw_reset(client);
+ } else if (ts->keep_power_in_suspend) {
for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
error = elants_i2c_send(client, set_active_cmd,
sizeof(set_active_cmd));
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d21d4edf7236..7caf2fa237f2 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -494,6 +494,22 @@ static void handle_fault_error(struct fault *fault)
}
}
+static bool access_error(struct vm_area_struct *vma, struct fault *fault)
+{
+ unsigned long requested = 0;
+
+ if (fault->flags & PPR_FAULT_EXEC)
+ requested |= VM_EXEC;
+
+ if (fault->flags & PPR_FAULT_READ)
+ requested |= VM_READ;
+
+ if (fault->flags & PPR_FAULT_WRITE)
+ requested |= VM_WRITE;
+
+ return (requested & ~vma->vm_flags) != 0;
+}
+
static void do_fault(struct work_struct *work)
{
struct fault *fault = container_of(work, struct fault, work);
@@ -516,8 +532,8 @@ static void do_fault(struct work_struct *work)
goto out;
}
- if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) {
- /* handle_mm_fault would BUG_ON() */
+ /* Check if we have the right permissions on the vma */
+ if (access_error(vma, fault)) {
up_read(&mm->mmap_sem);
handle_fault_error(fault);
goto out;
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 3a20db4f8604..72d6182666cb 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -21,10 +21,13 @@
#include <linux/device.h>
#include <linux/dma-iommu.h>
+#include <linux/gfp.h>
#include <linux/huge_mm.h>
#include <linux/iommu.h>
#include <linux/iova.h>
#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/vmalloc.h>
int iommu_dma_init(void)
{
@@ -191,6 +194,7 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
{
struct page **pages;
unsigned int i = 0, array_size = count * sizeof(*pages);
+ unsigned int order = MAX_ORDER;
if (array_size <= PAGE_SIZE)
pages = kzalloc(array_size, GFP_KERNEL);
@@ -204,14 +208,15 @@ static struct page **__iommu_dma_alloc_pages(unsigned int count, gfp_t gfp)
while (count) {
struct page *page = NULL;
- int j, order = __fls(count);
+ int j;
/*
* Higher-order allocations are a convenience rather
* than a necessity, hence using __GFP_NORETRY until
* falling back to single-page allocations.
*/
- for (order = min(order, MAX_ORDER); order > 0; order--) {
+ for (order = min_t(unsigned int, order, __fls(count));
+ order > 0; order--) {
page = alloc_pages(gfp | __GFP_NORETRY, order);
if (!page)
continue;
@@ -453,7 +458,7 @@ int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
size_t s_offset = iova_offset(iovad, s->offset);
size_t s_length = s->length;
- sg_dma_address(s) = s->offset;
+ sg_dma_address(s) = s_offset;
sg_dma_len(s) = s_length;
s->offset -= s_offset;
s_length = iova_align(iovad, s_length + s_offset);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f1042daef9ad..ac7387686ddc 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
sg_res = aligned_nrpages(sg->offset, sg->length);
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
sg->dma_length = sg->length;
- pteval = (sg_phys(sg) & PAGE_MASK) | prot;
+ pteval = page_to_phys(sg_page(sg)) | prot;
phys_pfn = pteval >> VTD_PAGE_SHIFT;
}
@@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
for_each_sg(sglist, sg, nelems, i) {
BUG_ON(!sg_page(sg));
- sg->dma_address = sg_phys(sg);
+ sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
sg->dma_length = sg->length;
}
return nelems;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index c69e3f9ec958..50464833d0b8 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -484,6 +484,23 @@ struct page_req_dsc {
};
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
+
+static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
+{
+ unsigned long requested = 0;
+
+ if (req->exe_req)
+ requested |= VM_EXEC;
+
+ if (req->rd_req)
+ requested |= VM_READ;
+
+ if (req->wr_req)
+ requested |= VM_WRITE;
+
+ return (requested & ~vma->vm_flags) != 0;
+}
+
static irqreturn_t prq_event_thread(int irq, void *d)
{
struct intel_iommu *iommu = d;
@@ -539,6 +556,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
if (!vma || address < vma->vm_start)
goto invalid;
+ if (access_error(vma, req))
+ goto invalid;
+
ret = handle_mm_fault(svm->mm, vma, address,
req->wr_req ? FAULT_FLAG_WRITE : 0);
if (ret & VM_FAULT_ERROR)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index abae363c7b9b..0e3b0092ec92 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
for_each_sg(sg, s, nents, i) {
- phys_addr_t phys = sg_phys(s);
+ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
/*
* We are mapping on IOMMU page boundaries, so offset within
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 8cf605fa9946..dfb868e2d129 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -295,7 +295,7 @@ static struct iommu_gather_ops ipmmu_gather_ops = {
static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
{
- phys_addr_t ttbr;
+ u64 ttbr;
/*
* Allocate the page table operations.
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 375be509e95f..2a506fe0c8a4 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -67,8 +67,7 @@ static int write_modem(struct cardstate *cs)
struct sk_buff *skb = bcs->tx_skb;
int sent = -EOPNOTSUPP;
- if (!tty || !tty->driver || !skb)
- return -EINVAL;
+ WARN_ON(!tty || !tty->ops || !skb);
if (!skb->len) {
dev_kfree_skb_any(skb);
@@ -109,8 +108,7 @@ static int send_cb(struct cardstate *cs)
unsigned long flags;
int sent = 0;
- if (!tty || !tty->driver)
- return -EFAULT;
+ WARN_ON(!tty || !tty->ops);
cb = cs->cmdbuf;
if (!cb)
@@ -370,19 +368,18 @@ static void gigaset_freecshw(struct cardstate *cs)
tasklet_kill(&cs->write_tasklet);
if (!cs->hw.ser)
return;
- dev_set_drvdata(&cs->hw.ser->dev.dev, NULL);
platform_device_unregister(&cs->hw.ser->dev);
- kfree(cs->hw.ser);
- cs->hw.ser = NULL;
}
static void gigaset_device_release(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
+ struct cardstate *cs = dev_get_drvdata(dev);
- /* adapted from platform_device_release() in drivers/base/platform.c */
- kfree(dev->platform_data);
- kfree(pdev->resource);
+ if (!cs)
+ return;
+ dev_set_drvdata(dev, NULL);
+ kfree(cs->hw.ser);
+ cs->hw.ser = NULL;
}
/*
@@ -432,7 +429,9 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
struct tty_struct *tty = cs->hw.ser->tty;
unsigned int set, clear;
- if (!tty || !tty->driver || !tty->ops->tiocmset)
+ WARN_ON(!tty || !tty->ops);
+ /* tiocmset is an optional tty driver method */
+ if (!tty->ops->tiocmset)
return -EINVAL;
set = new_state & ~old_state;
clear = old_state & ~new_state;
diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c
index a77eea594b69..cb428b9ee441 100644
--- a/drivers/isdn/hardware/mISDN/mISDNipac.c
+++ b/drivers/isdn/hardware/mISDN/mISDNipac.c
@@ -1170,7 +1170,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
if (ipac->type & IPAC_TYPE_IPACX) {
ista = ReadIPAC(ipac, ISACX_ISTA);
- while (ista && cnt--) {
+ while (ista && --cnt) {
pr_debug("%s: ISTA %02x\n", ipac->name, ista);
if (ista & IPACX__ICA)
ipac_irq(&ipac->hscx[0], ista);
@@ -1182,7 +1182,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
}
} else if (ipac->type & IPAC_TYPE_IPAC) {
ista = ReadIPAC(ipac, IPAC_ISTA);
- while (ista && cnt--) {
+ while (ista && --cnt) {
pr_debug("%s: ISTA %02x\n", ipac->name, ista);
if (ista & (IPAC__ICD | IPAC__EXD)) {
istad = ReadISAC(isac, ISAC_ISTA);
@@ -1200,7 +1200,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
ista = ReadIPAC(ipac, IPAC_ISTA);
}
} else if (ipac->type & IPAC_TYPE_HSCX) {
- while (cnt) {
+ while (--cnt) {
ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off);
pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista);
if (ista)
@@ -1211,7 +1211,6 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
mISDNisac_irq(isac, istad);
if (0 == (ista | istad))
break;
- cnt--;
}
}
if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index f434e89e1c7a..a54b339951a3 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -75,7 +75,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
struct nvm_block *blk;
int i;
- lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun];
+ lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
for (i = 0; i < nr_blocks; i++) {
if (blks[i] == 0)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 807095f4c793..61aacab424cf 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -314,8 +314,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
*/
void mddev_suspend(struct mddev *mddev)
{
- BUG_ON(mddev->suspended);
- mddev->suspended = 1;
+ if (mddev->suspended++)
+ return;
synchronize_rcu();
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
mddev->pers->quiesce(mddev, 1);
@@ -326,7 +326,8 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
void mddev_resume(struct mddev *mddev)
{
- mddev->suspended = 0;
+ if (--mddev->suspended)
+ return;
wake_up(&mddev->sb_wait);
mddev->pers->quiesce(mddev, 0);
@@ -1652,7 +1653,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
rdev->journal_tail = le64_to_cpu(sb->journal_tail);
if (mddev->recovery_cp == MaxSector)
set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
- rdev->raid_disk = mddev->raid_disks;
+ rdev->raid_disk = 0;
break;
default:
rdev->saved_raid_disk = role;
@@ -2773,6 +2774,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
/* Activating a spare .. or possibly reactivating
* if we ever get bitmaps working here.
*/
+ int err;
if (rdev->raid_disk != -1)
return -EBUSY;
@@ -2794,9 +2796,15 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
rdev->saved_raid_disk = -1;
clear_bit(In_sync, &rdev->flags);
clear_bit(Bitmap_sync, &rdev->flags);
- remove_and_add_spares(rdev->mddev, rdev);
- if (rdev->raid_disk == -1)
- return -EBUSY;
+ err = rdev->mddev->pers->
+ hot_add_disk(rdev->mddev, rdev);
+ if (err) {
+ rdev->raid_disk = -1;
+ return err;
+ } else
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
+ if (sysfs_link_rdev(rdev->mddev, rdev))
+ /* failure here is OK */;
/* don't wakeup anyone, leave that to userspace. */
} else {
if (slot >= rdev->mddev->raid_disks &&
@@ -4318,8 +4326,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
}
mddev_unlock(mddev);
}
- } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
- test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
return -EBUSY;
else if (cmd_match(page, "resync"))
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
@@ -4332,8 +4339,12 @@ action_store(struct mddev *mddev, const char *page, size_t len)
return -EINVAL;
err = mddev_lock(mddev);
if (!err) {
- clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
- err = mddev->pers->start_reshape(mddev);
+ if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
+ err = -EBUSY;
+ else {
+ clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+ err = mddev->pers->start_reshape(mddev);
+ }
mddev_unlock(mddev);
}
if (err)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 2bea51edfab7..ca0b643fe3c1 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -566,7 +566,9 @@ static inline char * mdname (struct mddev * mddev)
static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
- if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
+ if (!test_bit(Replacement, &rdev->flags) &&
+ !test_bit(Journal, &rdev->flags) &&
+ mddev->kobj.sd) {
sprintf(nm, "rd%d", rdev->raid_disk);
return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
} else
@@ -576,7 +578,9 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
- if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
+ if (!test_bit(Replacement, &rdev->flags) &&
+ !test_bit(Journal, &rdev->flags) &&
+ mddev->kobj.sd) {
sprintf(nm, "rd%d", rdev->raid_disk);
sysfs_remove_link(&mddev->kobj, nm);
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 41d70bc9ba2f..84e597e1c489 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1946,6 +1946,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
first = i;
fbio = r10_bio->devs[i].bio;
+ fbio->bi_iter.bi_size = r10_bio->sectors << 9;
+ fbio->bi_iter.bi_idx = 0;
vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
/* now find blocks with errors */
@@ -1989,7 +1991,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
bio_reset(tbio);
tbio->bi_vcnt = vcnt;
- tbio->bi_iter.bi_size = r10_bio->sectors << 9;
+ tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
tbio->bi_rw = WRITE;
tbio->bi_private = r10_bio;
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 8616fa8193bc..c2e60b4f292d 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -805,11 +805,11 @@ static void ivtv_init_struct2(struct ivtv *itv)
{
int i;
- for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS - 1; i++)
+ for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++)
if (itv->card->video_inputs[i].video_type == 0)
break;
itv->nof_inputs = i;
- for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS - 1; i++)
+ for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++)
if (itv->card->audio_inputs[i].audio_type == 0)
break;
itv->nof_audio_inputs = i;
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index fcbb49757614..565a59310747 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -134,7 +134,7 @@ struct airspy {
int urbs_submitted;
/* USB control message buffer */
- #define BUF_SIZE 24
+ #define BUF_SIZE 128
u8 buf[BUF_SIZE];
/* Current configuration */
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index e05bfec90f46..0fe5cb2c260c 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -24,6 +24,15 @@
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-vmalloc.h>
+/*
+ * Used Avago MGA-81563 RF amplifier could be destroyed pretty easily with too
+ * strong signal or transmitting to bad antenna.
+ * Set RF gain control to 'grabbed' state by default for sure.
+ */
+static bool hackrf_enable_rf_gain_ctrl;
+module_param_named(enable_rf_gain_ctrl, hackrf_enable_rf_gain_ctrl, bool, 0644);
+MODULE_PARM_DESC(enable_rf_gain_ctrl, "enable RX/TX RF amplifier control (warn: could damage amplifier)");
+
/* HackRF USB API commands (from HackRF Library) */
enum {
CMD_SET_TRANSCEIVER_MODE = 0x01,
@@ -1451,6 +1460,7 @@ static int hackrf_probe(struct usb_interface *intf,
dev_err(dev->dev, "Could not initialize controls\n");
goto err_v4l2_ctrl_handler_free_rx;
}
+ v4l2_ctrl_grab(dev->rx_rf_gain, !hackrf_enable_rf_gain_ctrl);
v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler);
/* Register controls for transmitter */
@@ -1471,6 +1481,7 @@ static int hackrf_probe(struct usb_interface *intf,
dev_err(dev->dev, "Could not initialize controls\n");
goto err_v4l2_ctrl_handler_free_tx;
}
+ v4l2_ctrl_grab(dev->tx_rf_gain, !hackrf_enable_rf_gain_ctrl);
v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler);
/* Register the v4l2_device structure */
@@ -1530,7 +1541,7 @@ err_v4l2_ctrl_handler_free_rx:
err_kfree:
kfree(dev);
err:
- dev_dbg(dev->dev, "failed=%d\n", ret);
+ dev_dbg(&intf->dev, "failed=%d\n", ret);
return ret;
}
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index e87459f6d686..acd1460cf787 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
+#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/slab.h>
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index 95c13b2ffa79..ffa288474820 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -426,15 +426,6 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
- if (mtd->dev.parent) {
- if (!mtd->owner && mtd->dev.parent->driver)
- mtd->owner = mtd->dev.parent->driver->owner;
- if (!mtd->name)
- mtd->name = dev_name(mtd->dev.parent);
- } else {
- pr_debug("mtd device won't show a device symlink in sysfs\n");
- }
-
/* Some chips always power up locked. Unlock them now */
if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
error = mtd_unlock(mtd, 0, mtd->size);
@@ -549,6 +540,21 @@ static int mtd_add_device_partitions(struct mtd_info *mtd,
return 0;
}
+/*
+ * Set a few defaults based on the parent devices, if not provided by the
+ * driver
+ */
+static void mtd_set_dev_defaults(struct mtd_info *mtd)
+{
+ if (mtd->dev.parent) {
+ if (!mtd->owner && mtd->dev.parent->driver)
+ mtd->owner = mtd->dev.parent->driver->owner;
+ if (!mtd->name)
+ mtd->name = dev_name(mtd->dev.parent);
+ } else {
+ pr_debug("mtd device won't show a device symlink in sysfs\n");
+ }
+}
/**
* mtd_device_parse_register - parse partitions and register an MTD device.
@@ -587,6 +593,8 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
int ret;
struct mtd_partition *real_parts = NULL;
+ mtd_set_dev_defaults(mtd);
+
ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
if (ret <= 0 && nr_parts && parts) {
real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c
index 669c3452f278..9ed6038e47d2 100644
--- a/drivers/mtd/ofpart.c
+++ b/drivers/mtd/ofpart.c
@@ -46,10 +46,18 @@ static int parse_ofpart_partitions(struct mtd_info *master,
ofpart_node = of_get_child_by_name(mtd_node, "partitions");
if (!ofpart_node) {
- pr_warn("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n",
- master->name, mtd_node->full_name);
+ /*
+ * We might get here even when ofpart isn't used at all (e.g.,
+ * when using another parser), so don't be louder than
+ * KERN_DEBUG
+ */
+ pr_debug("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n",
+ master->name, mtd_node->full_name);
ofpart_node = mtd_node;
dedicated = false;
+ } else if (!of_device_is_compatible(ofpart_node, "fixed-partitions")) {
+ /* The 'partitions' subnode might be used by another parser */
+ return 0;
}
/* First count the subnodes */
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 49883905a434..32477c4eb421 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -516,8 +516,8 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
status_old = read_sr(nor);
/* Cannot unlock; would unlock larger region than requested */
- if (stm_is_locked_sr(nor, status_old, ofs - mtd->erasesize,
- mtd->erasesize))
+ if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize,
+ status_old))
return -EINVAL;
/*
@@ -1200,8 +1200,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
if (JEDEC_MFR(info) == SNOR_MFR_ATMEL ||
JEDEC_MFR(info) == SNOR_MFR_INTEL ||
- JEDEC_MFR(info) == SNOR_MFR_SST ||
- JEDEC_MFR(info) == SNOR_MFR_WINBOND) {
+ JEDEC_MFR(info) == SNOR_MFR_SST) {
write_enable(nor);
write_sr(nor, 0);
}
@@ -1217,8 +1216,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
mtd->_read = spi_nor_read;
/* NOR protection support for STmicro/Micron chips and similar */
- if (JEDEC_MFR(info) == SNOR_MFR_MICRON ||
- JEDEC_MFR(info) == SNOR_MFR_WINBOND) {
+ if (JEDEC_MFR(info) == SNOR_MFR_MICRON) {
nor->flash_lock = stm_lock;
nor->flash_unlock = stm_unlock;
nor->flash_is_locked = stm_is_locked;
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index b077e43b5ba9..c4cb15a3098c 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -236,7 +236,7 @@ int ubi_debugfs_init(void)
dfs_rootdir = debugfs_create_dir("ubi", NULL);
if (IS_ERR_OR_NULL(dfs_rootdir)) {
- int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
+ int err = dfs_rootdir ? PTR_ERR(dfs_rootdir) : -ENODEV;
pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
err);
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 1fc23e48fe8e..10cf3b549959 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -1299,7 +1299,7 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
goto exit;
- crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
+ crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
if (hdr_crc != crc) {
ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index eb4489f9082f..56065632a5b8 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -603,6 +603,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
return 0;
}
+static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
/**
* do_sync_erase - run the erase worker synchronously.
* @ubi: UBI device description object
@@ -615,20 +616,16 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
int vol_id, int lnum, int torture)
{
- struct ubi_work *wl_wrk;
+ struct ubi_work wl_wrk;
dbg_wl("sync erase of PEB %i", e->pnum);
- wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
- if (!wl_wrk)
- return -ENOMEM;
-
- wl_wrk->e = e;
- wl_wrk->vol_id = vol_id;
- wl_wrk->lnum = lnum;
- wl_wrk->torture = torture;
+ wl_wrk.e = e;
+ wl_wrk.vol_id = vol_id;
+ wl_wrk.lnum = lnum;
+ wl_wrk.torture = torture;
- return erase_worker(ubi, wl_wrk, 0);
+ return __erase_worker(ubi, &wl_wrk);
}
/**
@@ -1014,7 +1011,7 @@ out_unlock:
}
/**
- * erase_worker - physical eraseblock erase worker function.
+ * __erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
* @wl_wrk: the work object
* @shutdown: non-zero if the worker has to free memory and exit
@@ -1025,8 +1022,7 @@ out_unlock:
* needed. Returns zero in case of success and a negative error code in case of
* failure.
*/
-static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
- int shutdown)
+static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
{
struct ubi_wl_entry *e = wl_wrk->e;
int pnum = e->pnum;
@@ -1034,21 +1030,11 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int lnum = wl_wrk->lnum;
int err, available_consumed = 0;
- if (shutdown) {
- dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
- kfree(wl_wrk);
- wl_entry_destroy(ubi, e);
- return 0;
- }
-
dbg_wl("erase PEB %d EC %d LEB %d:%d",
pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
- /* Fine, we've erased it successfully */
- kfree(wl_wrk);
-
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->free);
ubi->free_count++;
@@ -1066,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
}
ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
- kfree(wl_wrk);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
@@ -1075,6 +1060,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
/* Re-schedule the LEB for erasure */
err1 = schedule_erase(ubi, e, vol_id, lnum, 0);
if (err1) {
+ wl_entry_destroy(ubi, e);
err = err1;
goto out_ro;
}
@@ -1150,6 +1136,25 @@ out_ro:
return err;
}
+static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
+ int shutdown)
+{
+ int ret;
+
+ if (shutdown) {
+ struct ubi_wl_entry *e = wl_wrk->e;
+
+ dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
+ kfree(wl_wrk);
+ wl_entry_destroy(ubi, e);
+ return 0;
+ }
+
+ ret = __erase_worker(ubi, wl_wrk);
+ kfree(wl_wrk);
+ return ret;
+}
+
/**
* ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
* @ubi: UBI device description object
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 970781a9e677..f6a7161e3b85 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1849,7 +1849,7 @@ static int xgbe_exit(struct xgbe_prv_data *pdata)
usleep_range(10, 15);
/* Poll Until Poll Condition */
- while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+ while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
usleep_range(500, 600);
if (!count)
@@ -1873,7 +1873,7 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
/* Poll Until Poll Condition */
for (i = 0; i < pdata->tx_q_count; i++) {
count = 2000;
- while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
+ while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
MTL_Q_TQOMR, FTQ))
usleep_range(500, 600);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 9147a0107c44..d0ae1a6cc212 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -289,6 +289,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
struct sk_buff *skb)
{
struct device *dev = ndev_to_dev(tx_ring->ndev);
+ struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
struct xgene_enet_raw_desc *raw_desc;
__le64 *exp_desc = NULL, *exp_bufs = NULL;
dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
@@ -419,6 +420,7 @@ out:
raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
SET_VAL(USERINFO, tx_ring->tail));
tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
+ pdata->tx_level += count;
tx_ring->tail = tail;
return count;
@@ -429,14 +431,13 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
- struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
- u32 tx_level, cq_level;
+ u32 tx_level = pdata->tx_level;
int count;
- tx_level = pdata->ring_ops->len(tx_ring);
- cq_level = pdata->ring_ops->len(cp_ring);
- if (unlikely(tx_level > pdata->tx_qcnt_hi ||
- cq_level > pdata->cp_qcnt_hi)) {
+ if (tx_level < pdata->txc_level)
+ tx_level += ((typeof(pdata->tx_level))~0U);
+
+ if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) {
netif_stop_queue(ndev);
return NETDEV_TX_BUSY;
}
@@ -539,10 +540,13 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
struct xgene_enet_raw_desc *raw_desc, *exp_desc;
u16 head = ring->head;
u16 slots = ring->slots - 1;
- int ret, count = 0, processed = 0;
+ int ret, desc_count, count = 0, processed = 0;
+ bool is_completion;
do {
raw_desc = &ring->raw_desc[head];
+ desc_count = 0;
+ is_completion = false;
exp_desc = NULL;
if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
break;
@@ -559,18 +563,24 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
}
dma_rmb();
count++;
+ desc_count++;
}
- if (is_rx_desc(raw_desc))
+ if (is_rx_desc(raw_desc)) {
ret = xgene_enet_rx_frame(ring, raw_desc);
- else
+ } else {
ret = xgene_enet_tx_completion(ring, raw_desc);
+ is_completion = true;
+ }
xgene_enet_mark_desc_slot_empty(raw_desc);
if (exp_desc)
xgene_enet_mark_desc_slot_empty(exp_desc);
head = (head + 1) & slots;
count++;
+ desc_count++;
processed++;
+ if (is_completion)
+ pdata->txc_level += desc_count;
if (ret)
break;
@@ -580,10 +590,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
pdata->ring_ops->wr_cmd(ring, -count);
ring->head = head;
- if (netif_queue_stopped(ring->ndev)) {
- if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
- netif_wake_queue(ring->ndev);
- }
+ if (netif_queue_stopped(ring->ndev))
+ netif_start_queue(ring->ndev);
}
return processed;
@@ -1033,9 +1041,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
pdata->tx_ring->cp_ring = cp_ring;
pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
- pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
- pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
- pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
+ pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128;
return 0;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index a6e56b88c0a0..1aa72c787f8d 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -155,11 +155,11 @@ struct xgene_enet_pdata {
enum xgene_enet_id enet_id;
struct xgene_enet_desc_ring *tx_ring;
struct xgene_enet_desc_ring *rx_ring;
+ u16 tx_level;
+ u16 txc_level;
char *dev_name;
u32 rx_buff_cnt;
u32 tx_qcnt_hi;
- u32 cp_qcnt_hi;
- u32 cp_qcnt_low;
u32 rx_irq;
u32 txc_irq;
u8 cq_cnt;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 2795d6db10e1..8b5988e210d5 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1016,13 +1016,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
8 * 4;
- ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
- &ring_header->dma);
+ ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
+ &ring_header->dma, GFP_KERNEL);
if (unlikely(!ring_header->desc)) {
- dev_err(&pdev->dev, "pci_alloc_consistend failed\n");
+ dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
goto err_nomem;
}
- memset(ring_header->desc, 0, ring_header->size);
/* init TPD ring */
tpd_ring[0].dma = roundup(ring_header->dma, 8);
diff --git a/drivers/net/ethernet/aurora/Kconfig b/drivers/net/ethernet/aurora/Kconfig
index a3c7106fdf85..8ba7f8ff3434 100644
--- a/drivers/net/ethernet/aurora/Kconfig
+++ b/drivers/net/ethernet/aurora/Kconfig
@@ -13,6 +13,7 @@ if NET_VENDOR_AURORA
config AURORA_NB8800
tristate "Aurora AU-NB8800 support"
+ depends on HAS_DMA
select PHYLIB
help
Support for the AU-NB8800 gigabit Ethernet controller.
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index f8d7a2f06950..c82ab87fcbe8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3430,25 +3430,29 @@ static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
return rc;
}
-#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+/* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
+#define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS 4
+
+/* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
+#define BNX2X_NUM_TSO_WIN_SUB_BDS 3
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
/* check if packet requires linearization (packet is too fragmented)
no need to check fragmentation if page size > 8K (there will be no
violation to FW restrictions) */
static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
u32 xmit_type)
{
- int to_copy = 0;
- int hlen = 0;
- int first_bd_sz = 0;
+ int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
+ int to_copy = 0, hlen = 0;
- /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
- if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
+ if (xmit_type & XMIT_GSO_ENC)
+ num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
+ if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
if (xmit_type & XMIT_GSO) {
unsigned short lso_mss = skb_shinfo(skb)->gso_size;
- /* Check if LSO packet needs to be copied:
- 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
- int wnd_size = MAX_FETCH_BD - 3;
+ int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
/* Number of windows to check */
int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
int wnd_idx = 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index bdf094fb6ef9..07f5f239cb65 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2693,17 +2693,16 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
req.ver_upd = DRV_VER_UPD;
if (BNXT_PF(bp)) {
- unsigned long vf_req_snif_bmap[4];
+ DECLARE_BITMAP(vf_req_snif_bmap, 256);
u32 *data = (u32 *)vf_req_snif_bmap;
- memset(vf_req_snif_bmap, 0, 32);
+ memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
__set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
- for (i = 0; i < 8; i++) {
- req.vf_req_fwd[i] = cpu_to_le32(*data);
- data++;
- }
+ for (i = 0; i < 8; i++)
+ req.vf_req_fwd[i] = cpu_to_le32(data[i]);
+
req.enables |=
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
}
@@ -4603,7 +4602,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
bp->nge_port_cnt = 1;
}
- bp->state = BNXT_STATE_OPEN;
+ set_bit(BNXT_STATE_OPEN, &bp->state);
bnxt_enable_int(bp);
/* Enable TX queues */
bnxt_tx_enable(bp);
@@ -4679,8 +4678,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* Change device state to avoid TX queue wake up's */
bnxt_tx_disable(bp);
- bp->state = BNXT_STATE_CLOSED;
- cancel_work_sync(&bp->sp_task);
+ clear_bit(BNXT_STATE_OPEN, &bp->state);
+ smp_mb__after_atomic();
+ while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
+ msleep(20);
/* Flush rings before disabling interrupts */
bnxt_shutdown_nic(bp, irq_re_init);
@@ -5030,8 +5031,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
static void bnxt_reset_task(struct bnxt *bp)
{
bnxt_dbg_dump_states(bp);
- if (netif_running(bp->dev))
- bnxt_tx_disable(bp); /* prevent tx timout again */
+ if (netif_running(bp->dev)) {
+ bnxt_close_nic(bp, false, false);
+ bnxt_open_nic(bp, false, false);
+ }
}
static void bnxt_tx_timeout(struct net_device *dev)
@@ -5081,8 +5084,12 @@ static void bnxt_sp_task(struct work_struct *work)
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
int rc;
- if (bp->state != BNXT_STATE_OPEN)
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ smp_mb__after_atomic();
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
return;
+ }
if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
bnxt_cfg_rx_mode(bp);
@@ -5106,8 +5113,19 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
}
- if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+ if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
+ /* bnxt_reset_task() calls bnxt_close_nic() which waits
+ * for BNXT_STATE_IN_SP_TASK to clear.
+ */
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_lock();
bnxt_reset_task(bp);
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_unlock();
+ }
+
+ smp_mb__before_atomic();
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
@@ -5186,7 +5204,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->timer.function = bnxt_timer;
bp->current_interval = BNXT_TIMER_INTERVAL;
- bp->state = BNXT_STATE_CLOSED;
+ clear_bit(BNXT_STATE_OPEN, &bp->state);
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 674bc5159b91..f199f4cc8ffe 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -925,9 +925,9 @@ struct bnxt {
struct timer_list timer;
- int state;
-#define BNXT_STATE_CLOSED 0
-#define BNXT_STATE_OPEN 1
+ unsigned long state;
+#define BNXT_STATE_OPEN 0
+#define BNXT_STATE_IN_SP_TASK 1
struct bnxt_irq *irq_tbl;
u8 mac_addr[ETH_ALEN];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 7a9af2887d8e..ea044bbcd384 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -21,7 +21,7 @@
#ifdef CONFIG_BNXT_SRIOV
static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id)
{
- if (bp->state != BNXT_STATE_OPEN) {
+ if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
netdev_err(bp->dev, "vf ndo called though PF is down\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 4b7fd63ae57c..5f24d11cb16a 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -37,7 +37,6 @@ struct nicpf {
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
u8 vf_lmac_map[MAX_LMAC];
- u8 lmac_cnt;
struct delayed_work dwork;
struct workqueue_struct *check_link;
u8 link[MAX_LMAC];
@@ -280,7 +279,6 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
u64 lmac_credit;
nic->num_vf_en = 0;
- nic->lmac_cnt = 0;
for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
if (!(bgx_map & (1 << bgx)))
@@ -290,7 +288,6 @@ static void nic_set_lmac_vf_mapping(struct nicpf *nic)
nic->vf_lmac_map[next_bgx_lmac++] =
NIC_SET_VF_LMAC_MAP(bgx, lmac);
nic->num_vf_en += lmac_cnt;
- nic->lmac_cnt += lmac_cnt;
/* Program LMAC credits */
lmac_credit = (1ull << 1); /* channel credit enable */
@@ -618,6 +615,21 @@ static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
return 0;
}
+static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
+{
+ int bgx, lmac;
+
+ nic->vf_enabled[vf] = enable;
+
+ if (vf >= nic->num_vf_en)
+ return;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
+}
+
/* Interrupt handler to handle mailbox messages from VFs */
static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
{
@@ -717,29 +729,14 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
break;
case NIC_MBOX_MSG_CFG_DONE:
/* Last message of VF config msg sequence */
- nic->vf_enabled[vf] = true;
- if (vf >= nic->lmac_cnt)
- goto unlock;
-
- bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
-
- bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true);
+ nic_enable_vf(nic, vf, true);
goto unlock;
case NIC_MBOX_MSG_SHUTDOWN:
/* First msg in VF teardown sequence */
- nic->vf_enabled[vf] = false;
if (vf >= nic->num_vf_en)
nic->sqs_used[vf - nic->num_vf_en] = false;
nic->pqs_vf[vf] = 0;
-
- if (vf >= nic->lmac_cnt)
- break;
-
- bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
- lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
-
- bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false);
+ nic_enable_vf(nic, vf, false);
break;
case NIC_MBOX_MSG_ALLOC_SQS:
nic_alloc_sqs(nic, &mbx.sqs_alloc);
@@ -958,7 +955,7 @@ static void nic_poll_for_link(struct work_struct *work)
mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
- for (vf = 0; vf < nic->lmac_cnt; vf++) {
+ for (vf = 0; vf < nic->num_vf_en; vf++) {
/* Poll only if VF is UP */
if (!nic->vf_enabled[vf])
continue;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
index c308429dd9c7..11dd91e4db56 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c
@@ -295,6 +295,10 @@ struct clip_tbl *t4_init_clip_tbl(unsigned int clipt_start,
INIT_LIST_HEAD(&ctbl->hash_list[i]);
cl_list = t4_alloc_mem(clipt_size*sizeof(struct clip_entry));
+ if (!cl_list) {
+ t4_free_mem(ctbl);
+ return NULL;
+ }
ctbl->cl_list = (void *)cl_list;
for (i = 0; i < clipt_size; i++) {
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index d463563e1f70..6ee78c203eca 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -848,8 +848,6 @@ void be_roce_dev_remove(struct be_adapter *);
/*
* internal function to open-close roce device during ifup-ifdown.
*/
-void be_roce_dev_open(struct be_adapter *);
-void be_roce_dev_close(struct be_adapter *);
void be_roce_dev_shutdown(struct be_adapter *);
#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b6ad02909d6b..8a1d9fffd7d6 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3299,8 +3299,10 @@ static int be_msix_register(struct be_adapter *adapter)
return 0;
err_msix:
- for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
+ for (i--; i >= 0; i--) {
+ eqo = &adapter->eq_obj[i];
free_irq(be_msix_vec_get(adapter, eqo), eqo);
+ }
dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
status);
be_msix_disable(adapter);
@@ -3432,8 +3434,6 @@ static int be_close(struct net_device *netdev)
be_disable_if_filters(adapter);
- be_roce_dev_close(adapter);
-
if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
for_all_evt_queues(adapter, eqo, i) {
napi_disable(&eqo->napi);
@@ -3601,8 +3601,6 @@ static int be_open(struct net_device *netdev)
be_link_status_update(adapter, link_status);
netif_tx_start_all_queues(netdev);
- be_roce_dev_open(adapter);
-
#ifdef CONFIG_BE2NET_VXLAN
if (skyhawk_chip(adapter))
vxlan_get_rx_port(netdev);
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 60368207bf58..4089156a7f5e 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -116,40 +116,6 @@ void be_roce_dev_remove(struct be_adapter *adapter)
}
}
-static void _be_roce_dev_open(struct be_adapter *adapter)
-{
- if (ocrdma_drv && adapter->ocrdma_dev &&
- ocrdma_drv->state_change_handler)
- ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
- BE_DEV_UP);
-}
-
-void be_roce_dev_open(struct be_adapter *adapter)
-{
- if (be_roce_supported(adapter)) {
- mutex_lock(&be_adapter_list_lock);
- _be_roce_dev_open(adapter);
- mutex_unlock(&be_adapter_list_lock);
- }
-}
-
-static void _be_roce_dev_close(struct be_adapter *adapter)
-{
- if (ocrdma_drv && adapter->ocrdma_dev &&
- ocrdma_drv->state_change_handler)
- ocrdma_drv->state_change_handler(adapter->ocrdma_dev,
- BE_DEV_DOWN);
-}
-
-void be_roce_dev_close(struct be_adapter *adapter)
-{
- if (be_roce_supported(adapter)) {
- mutex_lock(&be_adapter_list_lock);
- _be_roce_dev_close(adapter);
- mutex_unlock(&be_adapter_list_lock);
- }
-}
-
void be_roce_dev_shutdown(struct be_adapter *adapter)
{
if (be_roce_supported(adapter)) {
@@ -177,8 +143,6 @@ int be_roce_register_driver(struct ocrdma_driver *drv)
_be_roce_dev_add(dev);
netdev = dev->netdev;
- if (netif_running(netdev) && netif_oper_up(netdev))
- _be_roce_dev_open(dev);
}
mutex_unlock(&be_adapter_list_lock);
return 0;
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index cde6ef905ec4..fde609789483 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -60,9 +60,7 @@ struct ocrdma_driver {
void (*state_change_handler) (struct ocrdma_dev *, u32 new_state);
};
-enum {
- BE_DEV_UP = 0,
- BE_DEV_DOWN = 1,
+enum be_roce_event {
BE_DEV_SHUTDOWN = 2
};
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 63c2bcf8031a..b1026689b78f 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -48,21 +48,15 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev,
*reg = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
else { /* !dst_is_aligned */
for (i = 0; i < len; i++, reg++) {
- u32 buf =
- nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
-
- /* to accommodate word-unaligned address of "reg"
- * we have to do memcpy_toio() instead of simple "=".
- */
- memcpy_toio((void __iomem *)reg, &buf, sizeof(buf));
+ u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+ put_unaligned(buf, reg);
}
}
/* copy last bytes (if any) */
if (last) {
u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
-
- memcpy_toio((void __iomem *)reg, &buf, last);
+ memcpy((u8*)reg, &buf, last);
}
}
@@ -367,7 +361,7 @@ static void nps_enet_send_frame(struct net_device *ndev,
struct nps_enet_tx_ctl tx_ctrl;
short length = skb->len;
u32 i, len = DIV_ROUND_UP(length, sizeof(u32));
- u32 *src = (u32 *)virt_to_phys(skb->data);
+ u32 *src = (void *)skb->data;
bool src_is_aligned = IS_ALIGNED((unsigned long)src, sizeof(u32));
tx_ctrl.value = 0;
@@ -375,17 +369,11 @@ static void nps_enet_send_frame(struct net_device *ndev,
if (src_is_aligned)
for (i = 0; i < len; i++, src++)
nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, *src);
- else { /* !src_is_aligned */
- for (i = 0; i < len; i++, src++) {
- u32 buf;
-
- /* to accommodate word-unaligned address of "src"
- * we have to do memcpy_fromio() instead of simple "="
- */
- memcpy_fromio(&buf, (void __iomem *)src, sizeof(buf));
- nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF, buf);
- }
- }
+ else /* !src_is_aligned */
+ for (i = 0; i < len; i++, src++)
+ nps_enet_reg_set(priv, NPS_ENET_REG_TX_BUF,
+ get_unaligned(src));
+
/* Write the length of the Frame */
tx_ctrl.nt = length;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 08f5b911d96b..52e0091b4fb2 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -552,7 +552,7 @@ static void tx_restart(struct net_device *dev)
cbd_t __iomem *prev_bd;
cbd_t __iomem *last_tx_bd;
- last_tx_bd = fep->tx_bd_base + (fpi->tx_ring * sizeof(cbd_t));
+ last_tx_bd = fep->tx_bd_base + ((fpi->tx_ring - 1) * sizeof(cbd_t));
/* get the current bd held in TBPTR and scan back from this point */
recheck_bd = curr_tbptr = (cbd_t __iomem *)
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 55c36230e176..40071dad1c57 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -464,7 +464,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
* address). Print error message but continue anyway.
*/
if ((void *)tbipa > priv->map + resource_size(&res) - 4)
- dev_err(&pdev->dev, "invalid register map (should be at least 0x%04x to contain TBI address)\n",
+ dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
((void *)tbipa - priv->map) + 4);
iowrite32be(be32_to_cpup(prop), tbipa);
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 7cf898455e60..3e233d924cce 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -894,7 +894,8 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
- FSL_GIANFAR_DEV_HAS_TIMER;
+ FSL_GIANFAR_DEV_HAS_TIMER |
+ FSL_GIANFAR_DEV_HAS_RX_FILER;
err = of_property_read_string(np, "phy-connection-type", &ctype);
@@ -1396,8 +1397,9 @@ static int gfar_probe(struct platform_device *ofdev)
priv->rx_queue[i]->rxic = DEFAULT_RXIC;
}
- /* always enable rx filer */
- priv->rx_filer_enable = 1;
+ /* Always enable rx filer if available */
+ priv->rx_filer_enable =
+ (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
/* Enable most messages by default */
priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
/* use pritority h/w tx queue scheduling for single queue devices */
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index f266b20f9ef5..cb77667971a7 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -923,6 +923,7 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
#define FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER 0x00001000
+#define FSL_GIANFAR_DEV_HAS_RX_FILER 0x00002000
#if (MAXGROUPS == 2)
#define DEFAULT_MAPPING 0xAA
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 2a98eba660c0..b674414a4d72 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1259,12 +1259,8 @@ int hns_dsaf_set_mac_uc_entry(
if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
MAC_IS_BROADCAST(mac_entry->addr) ||
MAC_IS_MULTICAST(mac_entry->addr)) {
- dev_err(dsaf_dev->dev,
- "set_uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n",
- dsaf_dev->ae_dev.name, mac_entry->addr[0],
- mac_entry->addr[1], mac_entry->addr[2],
- mac_entry->addr[3], mac_entry->addr[4],
- mac_entry->addr[5]);
+ dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr);
return -EINVAL;
}
@@ -1331,12 +1327,8 @@ int hns_dsaf_set_mac_mc_entry(
/* mac addr check */
if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
- dev_err(dsaf_dev->dev,
- "set uc %s Mac %02x:%02x:%02x:%02x:%02x:%02x err!\n",
- dsaf_dev->ae_dev.name, mac_entry->addr[0],
- mac_entry->addr[1], mac_entry->addr[2],
- mac_entry->addr[3],
- mac_entry->addr[4], mac_entry->addr[5]);
+ dev_err(dsaf_dev->dev, "set uc %s Mac %pM err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr);
return -EINVAL;
}
@@ -1410,11 +1402,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
/*chechk mac addr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
- dev_err(dsaf_dev->dev,
- "set_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
- mac_entry->addr[0], mac_entry->addr[1],
- mac_entry->addr[2], mac_entry->addr[3],
- mac_entry->addr[4], mac_entry->addr[5]);
+ dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n",
+ mac_entry->addr);
return -EINVAL;
}
@@ -1497,9 +1486,8 @@ int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
/*check mac addr */
if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) {
- dev_err(dsaf_dev->dev,
- "del_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
- addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+ dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n",
+ addr);
return -EINVAL;
}
@@ -1563,11 +1551,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/*check mac addr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
- dev_err(dsaf_dev->dev,
- "del_port failed, addr %02x:%02x:%02x:%02x:%02x:%02x!\n",
- mac_entry->addr[0], mac_entry->addr[1],
- mac_entry->addr[2], mac_entry->addr[3],
- mac_entry->addr[4], mac_entry->addr[5]);
+ dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n",
+ mac_entry->addr);
return -EINVAL;
}
@@ -1644,11 +1629,8 @@ int hns_dsaf_get_mac_uc_entry(struct dsaf_device *dsaf_dev,
/* check macaddr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
MAC_IS_BROADCAST(mac_entry->addr)) {
- dev_err(dsaf_dev->dev,
- "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n",
- mac_entry->addr[0], mac_entry->addr[1],
- mac_entry->addr[2], mac_entry->addr[3],
- mac_entry->addr[4], mac_entry->addr[5]);
+ dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
+ mac_entry->addr);
return -EINVAL;
}
@@ -1695,11 +1677,8 @@ int hns_dsaf_get_mac_mc_entry(struct dsaf_device *dsaf_dev,
/*check mac addr */
if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
MAC_IS_BROADCAST(mac_entry->addr)) {
- dev_err(dsaf_dev->dev,
- "get_entry failed,addr %02x:%02x:%02x:%02x:%02x:%02x\n",
- mac_entry->addr[0], mac_entry->addr[1],
- mac_entry->addr[2], mac_entry->addr[3],
- mac_entry->addr[4], mac_entry->addr[5]);
+ dev_err(dsaf_dev->dev, "get_entry failed,addr %pM\n",
+ mac_entry->addr);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b475e1bf2e6f..bdbd80423b17 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -898,7 +898,7 @@
#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
-static inline void dsaf_write_reg(void *base, u32 reg, u32 value)
+static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
{
u8 __iomem *reg_addr = ACCESS_ONCE(base);
@@ -908,7 +908,7 @@ static inline void dsaf_write_reg(void *base, u32 reg, u32 value)
#define dsaf_write_dev(a, reg, value) \
dsaf_write_reg((a)->io_base, (reg), (value))
-static inline u32 dsaf_read_reg(u8 *base, u32 reg)
+static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
{
u8 __iomem *reg_addr = ACCESS_ONCE(base);
@@ -927,8 +927,8 @@ static inline u32 dsaf_read_reg(u8 *base, u32 reg)
#define dsaf_set_bit(origin, shift, val) \
dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
-static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
- u32 val)
+static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
+ u32 shift, u32 val)
{
u32 origin = dsaf_read_reg(base, reg);
@@ -947,7 +947,8 @@ static inline void dsaf_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
#define dsaf_get_bit(origin, shift) \
dsaf_get_field((origin), (1ull << (shift)), (shift))
-static inline u32 dsaf_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
+ u32 shift)
{
u32 origin;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 0ff8f01e57ee..1fd5ea82a9bc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -567,10 +567,6 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
goto init_adminq_exit;
}
- /* initialize locks */
- mutex_init(&hw->aq.asq_mutex);
- mutex_init(&hw->aq.arq_mutex);
-
/* Set up register offsets */
i40e_adminq_init_regs(hw);
@@ -664,8 +660,6 @@ i40e_status i40e_shutdown_adminq(struct i40e_hw *hw)
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
- /* destroy the locks */
-
if (hw->nvm_buff.va)
i40e_free_virt_mem(hw, &hw->nvm_buff);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b825f978d441..4a9873ec28c7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -10295,6 +10295,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up a default setting for link flow control */
pf->hw.fc.requested_mode = I40E_FC_NONE;
+ /* set up the locks for the AQ, do this only once in probe
+ * and destroy them only once in remove
+ */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
err = i40e_init_adminq(hw);
/* provide nvm, fw, api versions */
@@ -10697,7 +10703,6 @@ static void i40e_remove(struct pci_dev *pdev)
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
- i40e_fdir_teardown(pf);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
@@ -10740,6 +10745,10 @@ static void i40e_remove(struct pci_dev *pdev)
"Failed to destroy the Admin Queue resources: %d\n",
ret_code);
+ /* destroy the locks only once, here */
+ mutex_destroy(&hw->aq.arq_mutex);
+ mutex_destroy(&hw->aq.asq_mutex);
+
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
i40e_clear_interrupt_scheme(pf);
for (i = 0; i < pf->num_alloc_vsi; i++) {
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index fd123ca60761..3f65e39b3fe4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -551,10 +551,6 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw)
goto init_adminq_exit;
}
- /* initialize locks */
- mutex_init(&hw->aq.asq_mutex);
- mutex_init(&hw->aq.arq_mutex);
-
/* Set up register offsets */
i40e_adminq_init_regs(hw);
@@ -596,8 +592,6 @@ i40e_status i40evf_shutdown_adminq(struct i40e_hw *hw)
i40e_shutdown_asq(hw);
i40e_shutdown_arq(hw);
- /* destroy the locks */
-
if (hw->nvm_buff.va)
i40e_free_virt_mem(hw, &hw->nvm_buff);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index d962164dfb0f..99d2cffae0cd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -2476,6 +2476,12 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.device = PCI_SLOT(pdev->devfn);
hw->bus.func = PCI_FUNC(pdev->devfn);
+ /* set up the locks for the AQ, do this only once in probe
+ * and destroy them only once in remove
+ */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2629,6 +2635,10 @@ static void i40evf_remove(struct pci_dev *pdev)
if (hw->aq.asq.count)
i40evf_shutdown_adminq(hw);
+ /* destroy the locks only once, here */
+ mutex_destroy(&hw->aq.arq_mutex);
+ mutex_destroy(&hw->aq.asq_mutex);
+
iounmap(hw->hw_addr);
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 47395ff5d908..aed8d029b23d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7920,6 +7920,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
*/
if (netif_running(dev))
ixgbe_close(dev);
+ else
+ ixgbe_reset(adapter);
+
ixgbe_clear_interrupt_scheme(adapter);
#ifdef CONFIG_IXGBE_DCB
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index d9884fd15b45..a4beccf1fd46 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3413,16 +3413,23 @@ static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
}
/* Free all buffers from the pool */
-static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
+static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
+ struct mvpp2_bm_pool *bm_pool)
{
int i;
for (i = 0; i < bm_pool->buf_num; i++) {
+ dma_addr_t buf_phys_addr;
u32 vaddr;
/* Get buffer virtual address (indirect access) */
- mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
+ buf_phys_addr = mvpp2_read(priv,
+ MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
+
+ dma_unmap_single(dev, buf_phys_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+
if (!vaddr)
break;
dev_kfree_skb_any((struct sk_buff *)vaddr);
@@ -3439,7 +3446,7 @@ static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
{
u32 val;
- mvpp2_bm_bufs_free(priv, bm_pool);
+ mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
if (bm_pool->buf_num) {
WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
return 0;
@@ -3692,7 +3699,8 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
MVPP2_BM_LONG_BUF_NUM :
MVPP2_BM_SHORT_BUF_NUM;
else
- mvpp2_bm_bufs_free(port->priv, new_pool);
+ mvpp2_bm_bufs_free(port->dev->dev.parent,
+ port->priv, new_pool);
new_pool->pkt_size = pkt_size;
@@ -3756,7 +3764,7 @@ static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
/* Update BM pool with new buffer size */
- mvpp2_bm_bufs_free(port->priv, port_pool);
+ mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
if (port_pool->buf_num) {
WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
return -EIO;
@@ -4401,11 +4409,10 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
mvpp2_txq_inc_get(txq_pcpu);
- if (!skb)
- continue;
-
dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
skb_headlen(skb), DMA_TO_DEVICE);
+ if (!skb)
+ continue;
dev_kfree_skb_any(skb);
}
}
@@ -5092,7 +5099,8 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
struct mvpp2_rx_queue *rxq)
{
struct net_device *dev = port->dev;
- int rx_received, rx_filled, i;
+ int rx_received;
+ int rx_done = 0;
u32 rcvd_pkts = 0;
u32 rcvd_bytes = 0;
@@ -5101,17 +5109,18 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
if (rx_todo > rx_received)
rx_todo = rx_received;
- rx_filled = 0;
- for (i = 0; i < rx_todo; i++) {
+ while (rx_done < rx_todo) {
struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
struct mvpp2_bm_pool *bm_pool;
struct sk_buff *skb;
+ dma_addr_t phys_addr;
u32 bm, rx_status;
int pool, rx_bytes, err;
- rx_filled++;
+ rx_done++;
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
+ phys_addr = rx_desc->buf_phys_addr;
bm = mvpp2_bm_cookie_build(rx_desc);
pool = mvpp2_bm_cookie_pool_get(bm);
@@ -5128,8 +5137,10 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
* comprised by the RX descriptor.
*/
if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
+ err_drop_frame:
dev->stats.rx_errors++;
mvpp2_rx_error(port, rx_desc);
+ /* Return the buffer to the pool */
mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
rx_desc->buf_cookie);
continue;
@@ -5137,6 +5148,15 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
skb = (struct sk_buff *)rx_desc->buf_cookie;
+ err = mvpp2_rx_refill(port, bm_pool, bm, 0);
+ if (err) {
+ netdev_err(port->dev, "failed to refill BM pools\n");
+ goto err_drop_frame;
+ }
+
+ dma_unmap_single(dev->dev.parent, phys_addr,
+ bm_pool->buf_size, DMA_FROM_DEVICE);
+
rcvd_pkts++;
rcvd_bytes += rx_bytes;
atomic_inc(&bm_pool->in_use);
@@ -5147,12 +5167,6 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
mvpp2_rx_csum(port, rx_status, skb);
napi_gro_receive(&port->napi, skb);
-
- err = mvpp2_rx_refill(port, bm_pool, bm, 0);
- if (err) {
- netdev_err(port->dev, "failed to refill BM pools\n");
- rx_filled--;
- }
}
if (rcvd_pkts) {
@@ -5166,7 +5180,7 @@ static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
/* Update Rx queue management counters */
wmb();
- mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled);
+ mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
return rx_todo;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 8a083d73efdb..038f9ce391e6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -242,6 +242,13 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
unsigned long flags;
u64 ns, zero = 0;
+ /* mlx4_en_init_timestamp is called for each netdev.
+ * mdev->ptp_clock is common for all ports, skip initialization if
+ * was done for other port.
+ */
+ if (mdev->ptp_clock)
+ return;
+
rwlock_init(&mdev->clock_lock);
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 005f910ec955..e0ec280a7fa1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -232,9 +232,6 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
- mlx4_en_remove_timestamp(mdev);
-
flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
(void) mlx4_mr_free(dev, &mdev->mr);
@@ -320,10 +317,6 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
mdev->port_cnt++;
- /* Initialize time stamp mechanism */
- if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
- mlx4_en_init_timestamp(mdev);
-
/* Set default number of RX rings*/
mlx4_en_set_num_rx_rings(mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 886e1bc86374..7869f97de5da 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2072,6 +2072,9 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
/* flush any pending task for this netdev */
flush_workqueue(mdev->workqueue);
+ if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
+ mlx4_en_remove_timestamp(mdev);
+
/* Detach the netdev so tasks would not attempt to access it */
mutex_lock(&mdev->state_lock);
mdev->pndev[priv->port] = NULL;
@@ -3058,9 +3061,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+ /* Initialize time stamp mechanism */
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
- queue_delayed_work(mdev->workqueue, &priv->service_task,
- SERVICE_TASK_DELAY);
+ mlx4_en_init_timestamp(mdev);
+
+ queue_delayed_work(mdev->workqueue, &priv->service_task,
+ SERVICE_TASK_DELAY);
mlx4_en_set_stats_bitmap(mdev->dev, &priv->stats_bitmap,
mdev->profile.prof[priv->port].rx_ppp,
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 6fec3e993d02..cad6c44df91c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4306,9 +4306,10 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
return -EOPNOTSUPP;
ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
- ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
- if (ctrl->port <= 0)
+ err = mlx4_slave_convert_port(dev, slave, ctrl->port);
+ if (err <= 0)
return -EINVAL;
+ ctrl->port = err;
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index b83f7c0fcf99..122c2ee3dfe2 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -1937,6 +1937,12 @@ static void refill_rx(struct net_device *dev)
break; /* Better luck next round. */
np->rx_dma[entry] = pci_map_single(np->pci_dev,
skb->data, buflen, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->rx_dma[entry])) {
+ dev_kfree_skb_any(skb);
+ np->rx_skbuff[entry] = NULL;
+ break; /* Better luck next round. */
+ }
np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
}
np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
@@ -2093,6 +2099,12 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
np->tx_skbuff[entry] = skb;
np->tx_dma[entry] = pci_map_single(np->pci_dev,
skb->data,skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(np->pci_dev, np->tx_dma[entry])) {
+ np->tx_skbuff[entry] = NULL;
+ dev_kfree_skb_irq(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index ac17d8669b1a..1292c360390c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -299,6 +299,7 @@ struct qed_hwfn {
/* Flag indicating whether interrupts are enabled or not*/
bool b_int_enabled;
+ bool b_int_requested;
struct qed_mcp_info *mcp_info;
@@ -491,6 +492,8 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
u32 input_len, u8 *input_buf,
u32 max_size, u8 *unzip_buf);
+int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
+
#define QED_ETH_INTERFACE_VERSION 300
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 803b190ccada..817bbd5476ff 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1385,52 +1385,63 @@ err0:
return rc;
}
-static u32 qed_hw_bar_size(struct qed_dev *cdev,
- u8 bar_id)
+static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
+ u8 bar_id)
{
- u32 size = pci_resource_len(cdev->pdev, (bar_id > 0) ? 2 : 0);
+ u32 bar_reg = (bar_id == 0 ? PGLUE_B_REG_PF_BAR0_SIZE
+ : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
- return size / cdev->num_hwfns;
+ /* Get the BAR size(in KB) from hardware given val */
+ return 1 << (val + 15);
}
int qed_hw_prepare(struct qed_dev *cdev,
int personality)
{
- int rc, i;
+ struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
+ int rc;
/* Store the precompiled init data ptrs */
qed_init_iro_array(cdev);
/* Initialize the first hwfn - will learn number of hwfns */
- rc = qed_hw_prepare_single(&cdev->hwfns[0], cdev->regview,
+ rc = qed_hw_prepare_single(p_hwfn,
+ cdev->regview,
cdev->doorbells, personality);
if (rc)
return rc;
- personality = cdev->hwfns[0].hw_info.personality;
+ personality = p_hwfn->hw_info.personality;
/* Initialize the rest of the hwfns */
- for (i = 1; i < cdev->num_hwfns; i++) {
+ if (cdev->num_hwfns > 1) {
void __iomem *p_regview, *p_doorbell;
+ u8 __iomem *addr;
+
+ /* adjust bar offset for second engine */
+ addr = cdev->regview + qed_hw_bar_size(p_hwfn, 0) / 2;
+ p_regview = addr;
- p_regview = cdev->regview +
- i * qed_hw_bar_size(cdev, 0);
- p_doorbell = cdev->doorbells +
- i * qed_hw_bar_size(cdev, 1);
- rc = qed_hw_prepare_single(&cdev->hwfns[i], p_regview,
+ /* adjust doorbell bar offset for second engine */
+ addr = cdev->doorbells + qed_hw_bar_size(p_hwfn, 1) / 2;
+ p_doorbell = addr;
+
+ /* prepare second hw function */
+ rc = qed_hw_prepare_single(&cdev->hwfns[1], p_regview,
p_doorbell, personality);
+
+ /* in case of error, need to free the previously
+ * initiliazed hwfn 0.
+ */
if (rc) {
- /* Cleanup previously initialized hwfns */
- while (--i >= 0) {
- qed_init_free(&cdev->hwfns[i]);
- qed_mcp_free(&cdev->hwfns[i]);
- qed_hw_hwfn_free(&cdev->hwfns[i]);
- }
- return rc;
+ qed_init_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
}
}
- return 0;
+ return rc;
}
void qed_hw_remove(struct qed_dev *cdev)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index de50e84902af..9cc9d62c1fec 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -783,22 +783,16 @@ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
}
-void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum qed_int_mode int_mode)
+int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode)
{
- int i;
-
- p_hwfn->b_int_enabled = 1;
+ int rc, i;
/* Mask non-link attentions */
for (i = 0; i < 9; i++)
qed_wr(p_hwfn, p_ptt,
MISC_REG_AEU_ENABLE1_IGU_OUT_0 + (i << 2), 0);
- /* Enable interrupt Generation */
- qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
-
/* Configure AEU signal change to produce attentions for link */
qed_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
qed_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
@@ -808,6 +802,19 @@ void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
/* Unmask AEU signals toward IGU */
qed_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
+ if ((int_mode != QED_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_slowpath_irq_req(p_hwfn);
+ if (rc != 0) {
+ DP_NOTICE(p_hwfn, "Slowpath IRQ request failed\n");
+ return -EINVAL;
+ }
+ p_hwfn->b_int_requested = true;
+ }
+ /* Enable interrupt Generation */
+ qed_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
+ p_hwfn->b_int_enabled = 1;
+
+ return rc;
}
void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
@@ -1127,3 +1134,11 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
return info->igu_sb_cnt;
}
+
+void qed_int_disable_post_isr_release(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i)
+ cdev->hwfns[i].b_int_requested = false;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 16b57518e706..51e0b09a7f47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -169,10 +169,14 @@ int qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
int *p_iov_blks);
/**
- * @file
+ * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
+ * release. The API need to be called after releasing all slowpath IRQs
+ * of the device.
+ *
+ * @param cdev
*
- * @brief Interrupt handler
*/
+void qed_int_disable_post_isr_release(struct qed_dev *cdev);
#define QED_CAU_DEF_RX_TIMER_RES 0
#define QED_CAU_DEF_TX_TIMER_RES 0
@@ -366,10 +370,11 @@ void qed_int_setup(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param p_ptt
* @param int_mode
+ *
+ * @return int
*/
-void qed_int_igu_enable(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum qed_int_mode int_mode);
+int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_int_mode int_mode);
/**
* @brief - Initialize CAU status block entry
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 947c7af72b25..174f7341c5c3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -476,41 +476,22 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
return rc;
}
-static int qed_slowpath_irq_req(struct qed_dev *cdev)
+int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
{
- int i = 0, rc = 0;
+ struct qed_dev *cdev = hwfn->cdev;
+ int rc = 0;
+ u8 id;
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
- /* Request all the slowpath MSI-X vectors */
- for (i = 0; i < cdev->num_hwfns; i++) {
- snprintf(cdev->hwfns[i].name, NAME_SIZE,
- "sp-%d-%02x:%02x.%02x",
- i, cdev->pdev->bus->number,
- PCI_SLOT(cdev->pdev->devfn),
- cdev->hwfns[i].abs_pf_id);
-
- rc = request_irq(cdev->int_params.msix_table[i].vector,
- qed_msix_sp_int, 0,
- cdev->hwfns[i].name,
- cdev->hwfns[i].sp_dpc);
- if (rc)
- break;
-
- DP_VERBOSE(&cdev->hwfns[i],
- (NETIF_MSG_INTR | QED_MSG_SP),
+ id = hwfn->my_id;
+ snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
+ id, cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+ rc = request_irq(cdev->int_params.msix_table[id].vector,
+ qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
+ if (!rc)
+ DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
"Requested slowpath MSI-X\n");
- }
-
- if (i != cdev->num_hwfns) {
- /* Free already request MSI-X vectors */
- for (i--; i >= 0; i--) {
- unsigned int vec =
- cdev->int_params.msix_table[i].vector;
- synchronize_irq(vec);
- free_irq(cdev->int_params.msix_table[i].vector,
- cdev->hwfns[i].sp_dpc);
- }
- }
} else {
unsigned long flags = 0;
@@ -534,13 +515,17 @@ static void qed_slowpath_irq_free(struct qed_dev *cdev)
if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].b_int_requested)
+ break;
synchronize_irq(cdev->int_params.msix_table[i].vector);
free_irq(cdev->int_params.msix_table[i].vector,
cdev->hwfns[i].sp_dpc);
}
} else {
- free_irq(cdev->pdev->irq, cdev);
+ if (QED_LEADING_HWFN(cdev)->b_int_requested)
+ free_irq(cdev->pdev->irq, cdev);
}
+ qed_int_disable_post_isr_release(cdev);
}
static int qed_nic_stop(struct qed_dev *cdev)
@@ -765,16 +750,11 @@ static int qed_slowpath_start(struct qed_dev *cdev,
if (rc)
goto err1;
- /* Request the slowpath IRQ */
- rc = qed_slowpath_irq_req(cdev);
- if (rc)
- goto err2;
-
/* Allocate stream for unzipping */
rc = qed_alloc_stream_mem(cdev);
if (rc) {
DP_NOTICE(cdev, "Failed to allocate stream memory\n");
- goto err3;
+ goto err2;
}
/* Start the slowpath */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index 7a5ce5914ace..e8df12335a97 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -363,4 +363,8 @@
0x7 << 0)
#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \
0
+#define PGLUE_B_REG_PF_BAR0_SIZE \
+ 0x2aae60UL
+#define PGLUE_B_REG_PF_BAR1_SIZE \
+ 0x2aae64UL
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 31a1f1eb4f56..287fadfab52d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -124,8 +124,12 @@ struct qed_spq {
dma_addr_t p_phys;
struct qed_spq_entry *p_virt;
- /* Used as index for completions (returns on EQ by FW) */
- u16 echo_idx;
+#define SPQ_RING_SIZE \
+ (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element))
+
+ /* Bitmap for handling out-of-order completions */
+ DECLARE_BITMAP(p_comp_bitmap, SPQ_RING_SIZE);
+ u8 comp_bitmap_idx;
/* Statistics */
u32 unlimited_pending_count;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 7c0b8459666e..3dd548ab8df1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -112,8 +112,6 @@ static int
qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
{
- p_ent->elem.hdr.echo = 0;
- p_hwfn->p_spq->echo_idx++;
p_ent->flags = 0;
switch (p_ent->comp_mode) {
@@ -195,10 +193,12 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq,
struct qed_spq_entry *p_ent)
{
- struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
+ struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
+ u16 echo = qed_chain_get_prod_idx(p_chain);
struct slow_path_element *elem;
struct core_db_data db;
+ p_ent->elem.hdr.echo = cpu_to_le16(echo);
elem = qed_chain_produce(p_chain);
if (!elem) {
DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
@@ -437,7 +437,9 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
p_spq->comp_count = 0;
p_spq->comp_sent_count = 0;
p_spq->unlimited_pending_count = 0;
- p_spq->echo_idx = 0;
+
+ bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
+ p_spq->comp_bitmap_idx = 0;
/* SPQ cid, cannot fail */
qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
@@ -582,26 +584,32 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq = p_hwfn->p_spq;
if (p_ent->queue == &p_spq->unlimited_pending) {
- struct qed_spq_entry *p_en2;
if (list_empty(&p_spq->free_pool)) {
list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
p_spq->unlimited_pending_count++;
return 0;
- }
+ } else {
+ struct qed_spq_entry *p_en2;
- p_en2 = list_first_entry(&p_spq->free_pool,
- struct qed_spq_entry,
- list);
- list_del(&p_en2->list);
+ p_en2 = list_first_entry(&p_spq->free_pool,
+ struct qed_spq_entry,
+ list);
+ list_del(&p_en2->list);
+
+ /* Copy the ring element physical pointer to the new
+ * entry, since we are about to override the entire ring
+ * entry and don't want to lose the pointer.
+ */
+ p_ent->elem.data_ptr = p_en2->elem.data_ptr;
- /* Strcut assignment */
- *p_en2 = *p_ent;
+ *p_en2 = *p_ent;
- kfree(p_ent);
+ kfree(p_ent);
- p_ent = p_en2;
+ p_ent = p_en2;
+ }
}
/* entry is to be placed in 'pending' queue */
@@ -777,13 +785,38 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending,
list) {
if (p_ent->elem.hdr.echo == echo) {
+ u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
+
list_del(&p_ent->list);
- qed_chain_return_produced(&p_spq->chain);
+ /* Avoid overriding of SPQ entries when getting
+ * out-of-order completions, by marking the completions
+ * in a bitmap and increasing the chain consumer only
+ * for the first successive completed entries.
+ */
+ bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
+
+ while (test_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap)) {
+ bitmap_clear(p_spq->p_comp_bitmap,
+ p_spq->comp_bitmap_idx,
+ SPQ_RING_SIZE);
+ p_spq->comp_bitmap_idx++;
+ qed_chain_return_produced(&p_spq->chain);
+ }
+
p_spq->comp_count++;
found = p_ent;
break;
}
+
+ /* This is relatively uncommon - depends on scenarios
+ * which have mutliple per-PF sent ramrods.
+ */
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
+ le16_to_cpu(echo),
+ le16_to_cpu(p_ent->elem.hdr.echo));
}
/* Release lock before callback, as callback may post
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
index be7d7a62cc0d..34906750b7e7 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
@@ -246,12 +246,13 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *adapter)
u32 state;
state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
- while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit--) {
+ while (state != QLCNIC_DEV_NPAR_OPER && idc->vnic_wait_limit) {
+ idc->vnic_wait_limit--;
msleep(1000);
state = QLCRDX(ahw, QLC_83XX_VNIC_STATE);
}
- if (!idc->vnic_wait_limit) {
+ if (state != QLCNIC_DEV_NPAR_OPER) {
dev_err(&adapter->pdev->dev,
"vNIC mode not operational, state check timed out.\n");
return -EIO;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index a5f422f26cb4..daf05155b732 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -772,8 +772,10 @@ int qlcnic_82xx_config_intrpt(struct qlcnic_adapter *adapter, u8 op_type)
int i, err = 0;
for (i = 0; i < ahw->num_msix; i++) {
- qlcnic_alloc_mbx_args(&cmd, adapter,
- QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+ err = qlcnic_alloc_mbx_args(&cmd, adapter,
+ QLCNIC_CMD_MQ_TX_CONFIG_INTR);
+ if (err)
+ return err;
type = op_type ? QLCNIC_INTRPT_ADD : QLCNIC_INTRPT_DEL;
val = type | (ahw->intr_tbl[i].type << 4);
if (ahw->intr_tbl[i].type == QLCNIC_INTRPT_MSIX)
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 02b7115b6aaa..997976426799 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -4211,8 +4211,9 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
/* Wait for an outstanding reset to complete. */
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- int i = 3;
- while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
+ int i = 4;
+
+ while (--i && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
netif_err(qdev, ifup, qdev->ndev,
"Waiting for adapter UP...\n");
ssleep(1);
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index ddb2c6c6ec94..689a4a5c8dcf 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -736,9 +736,8 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
jiffies, jiffies - dev->trans_start);
qca->net_dev->stats.tx_errors++;
- /* wake the queue if there is room */
- if (qcaspi_tx_ring_has_space(&qca->txr))
- netif_wake_queue(dev);
+ /* Trigger tx queue flush and QCA7000 reset */
+ qca->sync = QCASPI_SYNC_UNKNOWN;
}
static int
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ed5da4d47668..467d41698fd5 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -905,6 +905,9 @@ static int ravb_phy_init(struct net_device *ndev)
netdev_info(ndev, "limited PHY to 100Mbit/s\n");
}
+ /* 10BASE is not supported */
+ phydev->supported &= ~PHY_10BT_FEATURES;
+
netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
phydev->addr, phydev->irq, phydev->drv->name);
@@ -1037,7 +1040,7 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_queue_1_mcast_packets",
"rx_queue_1_errors",
"rx_queue_1_crc_errors",
- "rx_queue_1_frame_errors_",
+ "rx_queue_1_frame_errors",
"rx_queue_1_length_errors",
"rx_queue_1_missed_errors",
"rx_queue_1_over_errors",
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e7bab7909ed9..6a8fc0f341ff 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -52,6 +52,8 @@
NETIF_MSG_RX_ERR| \
NETIF_MSG_TX_ERR)
+#define SH_ETH_OFFSET_INVALID ((u16)~0)
+
#define SH_ETH_OFFSET_DEFAULTS \
[0 ... SH_ETH_MAX_REGISTER_OFFSET - 1] = SH_ETH_OFFSET_INVALID
@@ -404,6 +406,28 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
static void sh_eth_rcv_snd_disable(struct net_device *ndev);
static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
+static void sh_eth_write(struct net_device *ndev, u32 data, int enum_index)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u16 offset = mdp->reg_offset[enum_index];
+
+ if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+ return;
+
+ iowrite32(data, mdp->addr + offset);
+}
+
+static u32 sh_eth_read(struct net_device *ndev, int enum_index)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ u16 offset = mdp->reg_offset[enum_index];
+
+ if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
+ return ~0U;
+
+ return ioread32(mdp->addr + offset);
+}
+
static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{
return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -1143,6 +1167,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
dma_addr_t dma_addr;
+ u32 buf_len;
mdp->cur_rx = 0;
mdp->cur_tx = 0;
@@ -1163,16 +1188,16 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* RX descriptor */
rxdesc = &mdp->rx_ring[i];
/* The size of the buffer is a multiple of 32 bytes. */
- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
- dma_addr = dma_map_single(&ndev->dev, skb->data,
- rxdesc->buffer_length,
+ buf_len = ALIGN(mdp->rx_buf_sz, 32);
+ rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
+ dma_addr = dma_map_single(&ndev->dev, skb->data, buf_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr)) {
kfree_skb(skb);
break;
}
mdp->rx_skbuff[i] = skb;
- rxdesc->addr = dma_addr;
+ rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* Rx descriptor address set */
@@ -1196,7 +1221,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
mdp->tx_skbuff[i] = NULL;
txdesc = &mdp->tx_ring[i];
txdesc->status = cpu_to_edmac(mdp, TD_TFP);
- txdesc->buffer_length = 0;
+ txdesc->len = cpu_to_edmac(mdp, 0);
if (i == 0) {
/* Tx descriptor address set */
sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
@@ -1403,8 +1428,10 @@ static int sh_eth_txfree(struct net_device *ndev)
entry, edmac_to_cpu(mdp, txdesc->status));
/* Free the original skb. */
if (mdp->tx_skbuff[entry]) {
- dma_unmap_single(&ndev->dev, txdesc->addr,
- txdesc->buffer_length, DMA_TO_DEVICE);
+ dma_unmap_single(&ndev->dev,
+ edmac_to_cpu(mdp, txdesc->addr),
+ edmac_to_cpu(mdp, txdesc->len) >> 16,
+ DMA_TO_DEVICE);
dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
mdp->tx_skbuff[entry] = NULL;
free_num++;
@@ -1414,7 +1441,7 @@ static int sh_eth_txfree(struct net_device *ndev)
txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += txdesc->buffer_length;
+ ndev->stats.tx_bytes += edmac_to_cpu(mdp, txdesc->len) >> 16;
}
return free_num;
}
@@ -1433,6 +1460,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
u32 desc_status;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
dma_addr_t dma_addr;
+ u32 buf_len;
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
@@ -1441,7 +1469,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
/* RACT bit must be checked before all the following reads */
dma_rmb();
desc_status = edmac_to_cpu(mdp, rxdesc->status);
- pkt_len = rxdesc->frame_length;
+ pkt_len = edmac_to_cpu(mdp, rxdesc->len) & RD_RFL;
if (--boguscnt < 0)
break;
@@ -1462,6 +1490,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (mdp->cd->shift_rd0)
desc_status >>= 16;
+ skb = mdp->rx_skbuff[entry];
if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
RD_RFS5 | RD_RFS6 | RD_RFS10)) {
ndev->stats.rx_errors++;
@@ -1477,16 +1506,16 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
ndev->stats.rx_missed_errors++;
if (desc_status & RD_RFS10)
ndev->stats.rx_over_errors++;
- } else {
+ } else if (skb) {
+ dma_addr = edmac_to_cpu(mdp, rxdesc->addr);
if (!mdp->cd->hw_swap)
sh_eth_soft_swap(
- phys_to_virt(ALIGN(rxdesc->addr, 4)),
+ phys_to_virt(ALIGN(dma_addr, 4)),
pkt_len + 2);
- skb = mdp->rx_skbuff[entry];
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
- dma_unmap_single(&ndev->dev, rxdesc->addr,
+ dma_unmap_single(&ndev->dev, dma_addr,
ALIGN(mdp->rx_buf_sz, 32),
DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
@@ -1506,7 +1535,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
entry = mdp->dirty_rx % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
/* The size of the buffer is 32 byte boundary. */
- rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 32);
+ buf_len = ALIGN(mdp->rx_buf_sz, 32);
+ rxdesc->len = cpu_to_edmac(mdp, buf_len << 16);
if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, skbuff_size);
@@ -1514,8 +1544,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
break; /* Better luck next round. */
sh_eth_set_receive_align(skb);
dma_addr = dma_map_single(&ndev->dev, skb->data,
- rxdesc->buffer_length,
- DMA_FROM_DEVICE);
+ buf_len, DMA_FROM_DEVICE);
if (dma_mapping_error(&ndev->dev, dma_addr)) {
kfree_skb(skb);
break;
@@ -1523,7 +1552,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = skb;
skb_checksum_none_assert(skb);
- rxdesc->addr = dma_addr;
+ rxdesc->addr = cpu_to_edmac(mdp, dma_addr);
}
dma_wmb(); /* RACT bit must be set after all the above writes */
if (entry >= mdp->num_rx_ring - 1)
@@ -2331,8 +2360,8 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < mdp->num_rx_ring; i++) {
rxdesc = &mdp->rx_ring[i];
- rxdesc->status = 0;
- rxdesc->addr = 0xBADF00D0;
+ rxdesc->status = cpu_to_edmac(mdp, 0);
+ rxdesc->addr = cpu_to_edmac(mdp, 0xBADF00D0);
dev_kfree_skb(mdp->rx_skbuff[i]);
mdp->rx_skbuff[i] = NULL;
}
@@ -2350,6 +2379,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
+ dma_addr_t dma_addr;
u32 entry;
unsigned long flags;
@@ -2372,15 +2402,15 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
txdesc = &mdp->tx_ring[entry];
/* soft swap. */
if (!mdp->cd->hw_swap)
- sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
- skb->len + 2);
- txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
+ sh_eth_soft_swap(PTR_ALIGN(skb->data, 4), skb->len + 2);
+ dma_addr = dma_map_single(&ndev->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
- txdesc->buffer_length = skb->len;
+ txdesc->addr = cpu_to_edmac(mdp, dma_addr);
+ txdesc->len = cpu_to_edmac(mdp, skb->len << 16);
dma_wmb(); /* TACT bit must be set after all the above writes */
if (entry >= mdp->num_tx_ring - 1)
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 50382b1c9ddc..72fcfc924589 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -283,7 +283,7 @@ enum DMAC_IM_BIT {
DMAC_M_RINT1 = 0x00000001,
};
-/* Receive descriptor bit */
+/* Receive descriptor 0 bits */
enum RD_STS_BIT {
RD_RACT = 0x80000000, RD_RDLE = 0x40000000,
RD_RFP1 = 0x20000000, RD_RFP0 = 0x10000000,
@@ -298,6 +298,12 @@ enum RD_STS_BIT {
#define RDFEND RD_RFP0
#define RD_RFP (RD_RFP1|RD_RFP0)
+/* Receive descriptor 1 bits */
+enum RD_LEN_BIT {
+ RD_RFL = 0x0000ffff, /* receive frame length */
+ RD_RBL = 0xffff0000, /* receive buffer length */
+};
+
/* FCFTR */
enum FCFTR_BIT {
FCFTR_RFF2 = 0x00040000, FCFTR_RFF1 = 0x00020000,
@@ -307,7 +313,7 @@ enum FCFTR_BIT {
#define DEFAULT_FIFO_F_D_RFF (FCFTR_RFF2 | FCFTR_RFF1 | FCFTR_RFF0)
#define DEFAULT_FIFO_F_D_RFD (FCFTR_RFD2 | FCFTR_RFD1 | FCFTR_RFD0)
-/* Transmit descriptor bit */
+/* Transmit descriptor 0 bits */
enum TD_STS_BIT {
TD_TACT = 0x80000000, TD_TDLE = 0x40000000,
TD_TFP1 = 0x20000000, TD_TFP0 = 0x10000000,
@@ -317,6 +323,11 @@ enum TD_STS_BIT {
#define TDFEND TD_TFP0
#define TD_TFP (TD_TFP1|TD_TFP0)
+/* Transmit descriptor 1 bits */
+enum TD_LEN_BIT {
+ TD_TBL = 0xffff0000, /* transmit buffer length */
+};
+
/* RMCR */
enum RMCR_BIT {
RMCR_RNC = 0x00000001,
@@ -425,15 +436,9 @@ enum TSU_FWSLC_BIT {
*/
struct sh_eth_txdesc {
u32 status; /* TD0 */
-#if defined(__LITTLE_ENDIAN)
- u16 pad0; /* TD1 */
- u16 buffer_length; /* TD1 */
-#else
- u16 buffer_length; /* TD1 */
- u16 pad0; /* TD1 */
-#endif
+ u32 len; /* TD1 */
u32 addr; /* TD2 */
- u32 pad1; /* padding data */
+ u32 pad0; /* padding data */
} __aligned(2) __packed;
/* The sh ether Rx buffer descriptors.
@@ -441,13 +446,7 @@ struct sh_eth_txdesc {
*/
struct sh_eth_rxdesc {
u32 status; /* RD0 */
-#if defined(__LITTLE_ENDIAN)
- u16 frame_length; /* RD1 */
- u16 buffer_length; /* RD1 */
-#else
- u16 buffer_length; /* RD1 */
- u16 frame_length; /* RD1 */
-#endif
+ u32 len; /* RD1 */
u32 addr; /* RD2 */
u32 pad0; /* padding data */
} __aligned(2) __packed;
@@ -546,31 +545,6 @@ static inline void sh_eth_soft_swap(char *src, int len)
#endif
}
-#define SH_ETH_OFFSET_INVALID ((u16) ~0)
-
-static inline void sh_eth_write(struct net_device *ndev, u32 data,
- int enum_index)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- u16 offset = mdp->reg_offset[enum_index];
-
- if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
- return;
-
- iowrite32(data, mdp->addr + offset);
-}
-
-static inline u32 sh_eth_read(struct net_device *ndev, int enum_index)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- u16 offset = mdp->reg_offset[enum_index];
-
- if (WARN_ON(offset == SH_ETH_OFFSET_INVALID))
- return ~0U;
-
- return ioread32(mdp->addr + offset);
-}
-
static inline void *sh_eth_tsu_get_offset(struct sh_eth_private *mdp,
int enum_index)
{
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index bc6d21b471be..e6a084a6be12 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -3299,7 +3299,8 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
new_spec.priority = EFX_FILTER_PRI_AUTO;
new_spec.flags = (EFX_FILTER_FLAG_RX |
- EFX_FILTER_FLAG_RX_RSS);
+ (efx_rss_enabled(efx) ?
+ EFX_FILTER_FLAG_RX_RSS : 0));
new_spec.dmaq_id = 0;
new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
rc = efx_ef10_filter_push(efx, &new_spec,
@@ -3921,6 +3922,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_dev_addr *addr_list;
+ enum efx_filter_flags filter_flags;
struct efx_filter_spec spec;
u8 baddr[ETH_ALEN];
unsigned int i, j;
@@ -3935,11 +3937,11 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
addr_count = table->dev_uc_count;
}
+ filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
+
/* Insert/renew filters */
for (i = 0; i < addr_count; i++) {
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
addr_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -3968,9 +3970,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
if (multicast && rollback) {
/* Also need an Ethernet broadcast filter */
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
@@ -4000,13 +4000,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ enum efx_filter_flags filter_flags;
struct efx_filter_spec spec;
u8 baddr[ETH_ALEN];
int rc;
- efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
+ filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
+
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
if (multicast)
efx_filter_set_mc_def(&spec);
@@ -4023,8 +4024,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
if (!nic_data->workaround_26807) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
- EFX_FILTER_FLAG_RX_RSS,
- 0);
+ filter_flags, 0);
eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
baddr);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 1aaf76c1ace8..10827476bc0b 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -76,6 +76,11 @@ void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_35388(efx) ? \
EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
+static inline bool efx_rss_enabled(struct efx_nic *efx)
+{
+ return efx->rss_spread > 1;
+}
+
/* Filters */
void efx_mac_reconfigure(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 5a1c5a8f278a..133e9e35be9e 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2242,7 +2242,7 @@ efx_farch_filter_init_rx_auto(struct efx_nic *efx,
*/
spec->priority = EFX_FILTER_PRI_AUTO;
spec->flags = (EFX_FILTER_FLAG_RX |
- (efx->n_rx_channels > 1 ? EFX_FILTER_FLAG_RX_RSS : 0) |
+ (efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0) |
(efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0));
spec->dmaq_id = 0;
}
diff --git a/drivers/net/ethernet/sfc/txc43128_phy.c b/drivers/net/ethernet/sfc/txc43128_phy.c
index 3d5ee3259885..194f67d9f3bf 100644
--- a/drivers/net/ethernet/sfc/txc43128_phy.c
+++ b/drivers/net/ethernet/sfc/txc43128_phy.c
@@ -418,7 +418,7 @@ static void txc_reset_logic_mmd(struct efx_nic *efx, int mmd)
val |= (1 << TXC_GLCMD_LMTSWRST_LBN);
efx_mdio_write(efx, mmd, TXC_GLRGS_GLCMD, val);
- while (tries--) {
+ while (--tries) {
val = efx_mdio_read(efx, mmd, TXC_GLRGS_GLCMD);
if (!(val & (1 << TXC_GLCMD_LMTSWRST_LBN)))
break;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
index 52b8ed9bd87c..adff46375a32 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -153,7 +153,11 @@ static int sun7i_gmac_probe(struct platform_device *pdev)
if (ret)
return ret;
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
+
+ return ret;
}
static const struct of_device_id sun7i_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 3c6549aee11d..a5b869eb4678 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3046,8 +3046,6 @@ int stmmac_suspend(struct net_device *ndev)
priv->hw->dma->stop_tx(priv->ioaddr);
priv->hw->dma->stop_rx(priv->ioaddr);
- stmmac_clear_descriptors(priv);
-
/* Enable Power down mode by programming the PMT regs */
if (device_may_wakeup(priv->device)) {
priv->hw->mac->pmt(priv->hw, priv->wolopts);
@@ -3105,7 +3103,12 @@ int stmmac_resume(struct net_device *ndev)
netif_device_attach(ndev);
- init_dma_desc_rings(ndev, GFP_ATOMIC);
+ priv->cur_rx = 0;
+ priv->dirty_rx = 0;
+ priv->dirty_tx = 0;
+ priv->cur_tx = 0;
+ stmmac_clear_descriptors(priv);
+
stmmac_hw_setup(ndev, false);
stmmac_init_tx_coalesce(priv);
stmmac_set_rx_mode(ndev);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 48b92c9de12a..fc958067d10a 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2026,45 +2026,54 @@ static int cpsw_probe_dt(struct cpsw_priv *priv,
for_each_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = data->slave_data + i;
const void *mac_addr = NULL;
- u32 phyid;
int lenp;
const __be32 *parp;
- struct device_node *mdio_node;
- struct platform_device *mdio;
/* This is no slave child node, continue */
if (strcmp(slave_node->name, "slave"))
continue;
priv->phy_node = of_parse_phandle(slave_node, "phy-handle", 0);
+ parp = of_get_property(slave_node, "phy_id", &lenp);
if (of_phy_is_fixed_link(slave_node)) {
- struct phy_device *pd;
+ struct device_node *phy_node;
+ struct phy_device *phy_dev;
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
ret = of_phy_register_fixed_link(slave_node);
if (ret)
return ret;
- pd = of_phy_find_device(slave_node);
- if (!pd)
+ phy_node = of_node_get(slave_node);
+ phy_dev = of_phy_find_device(phy_node);
+ if (!phy_dev)
return -ENODEV;
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, pd->bus->id, pd->phy_id);
- goto no_phy_slave;
- }
- parp = of_get_property(slave_node, "phy_id", &lenp);
- if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
- dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
+ PHY_ID_FMT, phy_dev->bus->id, phy_dev->addr);
+ } else if (parp) {
+ u32 phyid;
+ struct device_node *mdio_node;
+ struct platform_device *mdio;
+
+ if (lenp != (sizeof(__be32) * 2)) {
+ dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
+ goto no_phy_slave;
+ }
+ mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
+ phyid = be32_to_cpup(parp+1);
+ mdio = of_find_device_by_node(mdio_node);
+ of_node_put(mdio_node);
+ if (!mdio) {
+ dev_err(&pdev->dev, "Missing mdio platform device\n");
+ return -EINVAL;
+ }
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, mdio->name, phyid);
+ } else {
+ dev_err(&pdev->dev, "No slave[%d] phy_id or fixed-link property\n", i);
goto no_phy_slave;
}
- mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
- phyid = be32_to_cpup(parp+1);
- mdio = of_find_device_by_node(mdio_node);
- of_node_put(mdio_node);
- if (!mdio) {
- dev_err(&pdev->dev, "Missing mdio platform device\n");
- return -EINVAL;
- }
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, mdio->name, phyid);
slave_data->phy_if = of_get_phy_mode(slave_node);
if (slave_data->phy_if < 0) {
dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
@@ -2418,7 +2427,7 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->irq = platform_get_irq(pdev, 1);
if (ndev->irq < 0) {
dev_err(priv->dev, "error getting irq resource\n");
- ret = -ENOENT;
+ ret = ndev->irq;
goto clean_ale_ret;
}
@@ -2439,8 +2448,10 @@ static int cpsw_probe(struct platform_device *pdev)
/* RX IRQ */
irq = platform_get_irq(pdev, 1);
- if (irq < 0)
+ if (irq < 0) {
+ ret = irq;
goto clean_ale_ret;
+ }
priv->irqs_table[0] = irq;
ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
@@ -2452,8 +2463,10 @@ static int cpsw_probe(struct platform_device *pdev)
/* TX IRQ */
irq = platform_get_irq(pdev, 2);
- if (irq < 0)
+ if (irq < 0) {
+ ret = irq;
goto clean_ale_ret;
+ }
priv->irqs_table[1] = irq;
ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index de5c30c9f059..58efdec12f30 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -967,8 +967,6 @@ static netdev_tx_t geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
err = udp_tunnel6_xmit_skb(dst, gs6->sock->sk, skb, dev,
&fl6.saddr, &fl6.daddr, prio, ttl,
sport, geneve->dst_port, !udp_csum);
-
- iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
return NETDEV_TX_OK;
tx_error:
@@ -1157,7 +1155,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *t, *geneve = netdev_priv(dev);
bool tun_collect_md, tun_on_same_port;
- int err;
+ int err, encap_len;
if (!remote)
return -EINVAL;
@@ -1189,6 +1187,14 @@ static int geneve_configure(struct net *net, struct net_device *dev,
if (t)
return -EBUSY;
+ /* make enough headroom for basic scenario */
+ encap_len = GENEVE_BASE_HLEN + ETH_HLEN;
+ if (remote->sa.sa_family == AF_INET)
+ encap_len += sizeof(struct iphdr);
+ else
+ encap_len += sizeof(struct ipv6hdr);
+ dev->needed_headroom = encap_len + ETH_HLEN;
+
if (metadata) {
if (tun_on_same_port)
return -EPERM;
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 7c4a4151ef0f..5a1e98547031 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -683,14 +683,20 @@ static void sixpack_close(struct tty_struct *tty)
if (!atomic_dec_and_test(&sp->refcnt))
down(&sp->dead_sem);
- unregister_netdev(sp->dev);
+ /* We must stop the queue to avoid potentially scribbling
+ * on the free buffers. The sp->dead_sem is not sufficient
+ * to protect us from sp->xbuff access.
+ */
+ netif_stop_queue(sp->dev);
- del_timer(&sp->tx_t);
- del_timer(&sp->resync_t);
+ del_timer_sync(&sp->tx_t);
+ del_timer_sync(&sp->resync_t);
/* Free all 6pack frame buffers. */
kfree(sp->rbuff);
kfree(sp->xbuff);
+
+ unregister_netdev(sp->dev);
}
/* Perform I/O control on an active 6pack channel. */
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 216bfd350169..85828f153445 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -797,14 +797,19 @@ static void mkiss_close(struct tty_struct *tty)
*/
if (!atomic_dec_and_test(&ax->refcnt))
down(&ax->dead_sem);
-
- unregister_netdev(ax->dev);
+ /*
+ * Halt the transmit queue so that a new transmit cannot scribble
+ * on our buffers
+ */
+ netif_stop_queue(ax->dev);
/* Free all AX25 frame buffers. */
kfree(ax->rbuff);
kfree(ax->xbuff);
ax->tty = NULL;
+
+ unregister_netdev(ax->dev);
}
/* Perform I/O control on an active ax25 channel. */
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 908e8d486342..7f8e7662e28c 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -149,9 +149,14 @@ int mdio_mux_init(struct device *dev,
}
cb->bus_number = v;
cb->parent = pb;
+
cb->mii_bus = mdiobus_alloc();
+ if (!cb->mii_bus) {
+ ret_val = -ENOMEM;
+ of_node_put(child_bus_node);
+ break;
+ }
cb->mii_bus->priv = cb;
-
cb->mii_bus->irq = cb->phy_irq;
cb->mii_bus->name = "mdio_mux";
snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%x.%x",
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index cf6312fafea5..e13ad6cdcc22 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -339,9 +339,18 @@ static int ksz9021_config_init(struct phy_device *phydev)
{
const struct device *dev = &phydev->dev;
const struct device_node *of_node = dev->of_node;
+ const struct device *dev_walker;
- if (!of_node && dev->parent->of_node)
- of_node = dev->parent->of_node;
+ /* The Micrel driver has a deprecated option to place phy OF
+ * properties in the MAC node. Walk up the tree of devices to
+ * find a device with an OF node.
+ */
+ dev_walker = &phydev->dev;
+ do {
+ of_node = dev_walker->of_node;
+ dev_walker = dev_walker->parent;
+
+ } while (!of_node && dev_walker);
if (of_node) {
ksz9021_load_values_from_of(phydev, of_node,
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 5e0b43283bce..0a37f840fcc5 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -568,6 +568,9 @@ static int pppoe_create(struct net *net, struct socket *sock, int kern)
sk->sk_family = PF_PPPOX;
sk->sk_protocol = PX_PROTO_OE;
+ INIT_WORK(&pppox_sk(sk)->proto.pppoe.padt_work,
+ pppoe_unbind_sock_work);
+
return 0;
}
@@ -632,8 +635,6 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
- INIT_WORK(&po->proto.pppoe.padt_work, pppoe_unbind_sock_work);
-
error = -EINVAL;
if (sp->sa_protocol != PX_PROTO_OE)
goto end;
@@ -663,8 +664,13 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
po->pppoe_dev = NULL;
}
- memset(sk_pppox(po) + 1, 0,
- sizeof(struct pppox_sock) - sizeof(struct sock));
+ po->pppoe_ifindex = 0;
+ memset(&po->pppoe_pa, 0, sizeof(po->pppoe_pa));
+ memset(&po->pppoe_relay, 0, sizeof(po->pppoe_relay));
+ memset(&po->chan, 0, sizeof(po->chan));
+ po->next = NULL;
+ po->num = 0;
+
sk->sk_state = PPPOX_NONE;
}
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index fc69e41d0950..597c53e0a2ec 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -419,6 +419,9 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
struct pptp_opt *opt = &po->proto.pptp;
int error = 0;
+ if (sockaddr_len < sizeof(struct sockaddr_pppox))
+ return -EINVAL;
+
lock_sock(sk);
opt->src_addr = sp->sa_addr.pptp;
@@ -440,6 +443,9 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
struct flowi4 fl4;
int error = 0;
+ if (sockaddr_len < sizeof(struct sockaddr_pppox))
+ return -EINVAL;
+
if (sp->sa_protocol != PX_PROTO_PPTP)
return -EINVAL;
diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
index bbde9884ab8a..bdd83d95ec0a 100644
--- a/drivers/net/usb/cdc_mbim.c
+++ b/drivers/net/usb/cdc_mbim.c
@@ -100,7 +100,7 @@ static const struct net_device_ops cdc_mbim_netdev_ops = {
.ndo_stop = usbnet_stop,
.ndo_start_xmit = usbnet_start_xmit,
.ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_change_mtu = cdc_ncm_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_vlan_rx_add_vid = cdc_mbim_rx_add_vid,
@@ -158,7 +158,7 @@ static int cdc_mbim_bind(struct usbnet *dev, struct usb_interface *intf)
if (!cdc_ncm_comm_intf_is_mbim(intf->cur_altsetting))
goto err;
- ret = cdc_ncm_bind_common(dev, intf, data_altsetting, 0);
+ ret = cdc_ncm_bind_common(dev, intf, data_altsetting, dev->driver_info->data);
if (ret)
goto err;
@@ -582,6 +582,26 @@ static const struct driver_info cdc_mbim_info_zlp = {
.tx_fixup = cdc_mbim_tx_fixup,
};
+/* The spefication explicitly allows NDPs to be placed anywhere in the
+ * frame, but some devices fail unless the NDP is placed after the IP
+ * packets. Using the CDC_NCM_FLAG_NDP_TO_END flags to force this
+ * behaviour.
+ *
+ * Note: The current implementation of this feature restricts each NTB
+ * to a single NDP, implying that multiplexed sessions cannot share an
+ * NTB. This might affect performace for multiplexed sessions.
+ */
+static const struct driver_info cdc_mbim_info_ndp_to_end = {
+ .description = "CDC MBIM",
+ .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
+ .bind = cdc_mbim_bind,
+ .unbind = cdc_mbim_unbind,
+ .manage_power = cdc_mbim_manage_power,
+ .rx_fixup = cdc_mbim_rx_fixup,
+ .tx_fixup = cdc_mbim_tx_fixup,
+ .data = CDC_NCM_FLAG_NDP_TO_END,
+};
+
static const struct usb_device_id mbim_devs[] = {
/* This duplicate NCM entry is intentional. MBIM devices can
* be disguised as NCM by default, and this is necessary to
@@ -597,6 +617,10 @@ static const struct usb_device_id mbim_devs[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bdb, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info,
},
+ /* Huawei E3372 fails unless NDP comes after the IP packets */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x12d1, 0x157d, USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&cdc_mbim_info_ndp_to_end,
+ },
/* default entry */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_MBIM, USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&cdc_mbim_info_zlp,
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 3b1ba8237768..e8a1144c5a8b 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -41,6 +41,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/ctype.h>
+#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/workqueue.h>
#include <linux/mii.h>
@@ -689,6 +690,33 @@ static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
kfree(ctx);
}
+/* we need to override the usbnet change_mtu ndo for two reasons:
+ * - respect the negotiated maximum datagram size
+ * - avoid unwanted changes to rx and tx buffers
+ */
+int cdc_ncm_change_mtu(struct net_device *net, int new_mtu)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+ int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev);
+
+ if (new_mtu <= 0 || new_mtu > maxmtu)
+ return -EINVAL;
+ net->mtu = new_mtu;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cdc_ncm_change_mtu);
+
+static const struct net_device_ops cdc_ncm_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = cdc_ncm_change_mtu,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_altsetting, int drvflags)
{
struct cdc_ncm_ctx *ctx;
@@ -823,6 +851,9 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
/* add our sysfs attrs */
dev->net->sysfs_groups[0] = &cdc_ncm_sysfs_attr_group;
+ /* must handle MTU changes */
+ dev->net->netdev_ops = &cdc_ncm_netdev_ops;
+
return 0;
error2:
@@ -955,10 +986,18 @@ static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_
* NTH16 header as we would normally do. NDP isn't written to the SKB yet, and
* the wNdpIndex field in the header is actually not consistent with reality. It will be later.
*/
- if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END)
+ if (ctx->drvflags & CDC_NCM_FLAG_NDP_TO_END) {
if (ctx->delayed_ndp16->dwSignature == sign)
return ctx->delayed_ndp16;
+ /* We can only push a single NDP to the end. Return
+ * NULL to send what we've already got and queue this
+ * skb for later.
+ */
+ else if (ctx->delayed_ndp16->dwSignature)
+ return NULL;
+ }
+
/* follow the chain of NDPs, looking for a match */
while (ndpoffset) {
ndp16 = (struct usb_cdc_ncm_ndp16 *)(skb->data + ndpoffset);
@@ -1550,6 +1589,24 @@ static const struct usb_device_id cdc_devs[] = {
.driver_info = (unsigned long) &wwan_info,
},
+ /* DW5812 LTE Verizon Mobile Broadband Card
+ * Unlike DW5550 this device requires FLAG_NOARP
+ */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x81bb,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_noarp_info,
+ },
+
+ /* DW5813 LTE AT&T Mobile Broadband Card
+ * Unlike DW5550 this device requires FLAG_NOARP
+ */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x81bc,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&wwan_noarp_info,
+ },
+
/* Dell branded MBM devices like DW5550 */
{ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_VENDOR,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9a5be8b85186..5fccc5a8153f 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -742,6 +742,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
+ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d9427ca3dba7..2fb637ad594a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3067,17 +3067,6 @@ static int rtl8152_open(struct net_device *netdev)
mutex_lock(&tp->control);
- /* The WORK_ENABLE may be set when autoresume occurs */
- if (test_bit(WORK_ENABLE, &tp->flags)) {
- clear_bit(WORK_ENABLE, &tp->flags);
- usb_kill_urb(tp->intr_urb);
- cancel_delayed_work_sync(&tp->schedule);
-
- /* disable the tx/rx, if the workqueue has enabled them. */
- if (netif_carrier_ok(netdev))
- tp->rtl_ops.disable(tp);
- }
-
tp->rtl_ops.up(tp);
rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3124,12 +3113,6 @@ static int rtl8152_close(struct net_device *netdev)
} else {
mutex_lock(&tp->control);
- /* The autosuspend may have been enabled and wouldn't
- * be disable when autoresume occurs, because the
- * netif_running() would be false.
- */
- rtl_runtime_suspend_enable(tp, false);
-
tp->rtl_ops.down(tp);
mutex_unlock(&tp->control);
@@ -3512,7 +3495,7 @@ static int rtl8152_resume(struct usb_interface *intf)
netif_device_attach(tp->netdev);
}
- if (netif_running(tp->netdev)) {
+ if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
rtl_runtime_suspend_enable(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
@@ -3532,6 +3515,8 @@ static int rtl8152_resume(struct usb_interface *intf)
}
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
} else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ if (tp->netdev->flags & IFF_UP)
+ rtl_runtime_suspend_enable(tp, false);
clear_bit(SELECTIVE_SUSPEND, &tp->flags);
}
@@ -3540,6 +3525,14 @@ static int rtl8152_resume(struct usb_interface *intf)
return 0;
}
+static int rtl8152_reset_resume(struct usb_interface *intf)
+{
+ struct r8152 *tp = usb_get_intfdata(intf);
+
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ return rtl8152_resume(intf);
+}
+
static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct r8152 *tp = netdev_priv(dev);
@@ -4291,7 +4284,7 @@ static struct usb_driver rtl8152_driver = {
.disconnect = rtl8152_disconnect,
.suspend = rtl8152_suspend,
.resume = rtl8152_resume,
- .reset_resume = rtl8152_resume,
+ .reset_resume = rtl8152_reset_resume,
.pre_reset = rtl8152_pre_reset,
.post_reset = rtl8152_post_reset,
.supports_autosuspend = 1,
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 0ef4a5ad5557..ba21d072be31 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -117,12 +117,6 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
kfree_skb(skb);
goto drop;
}
- /* don't change ip_summed == CHECKSUM_PARTIAL, as that
- * will cause bad checksum on forwarded packets
- */
- if (skb->ip_summed == CHECKSUM_NONE &&
- rcv->features & NETIF_F_RXCSUM)
- skb->ip_summed = CHECKSUM_UNNECESSARY;
if (likely(dev_forward_skb(rcv, skb) == NET_RX_SUCCESS)) {
struct pcpu_vstats *stats = this_cpu_ptr(dev->vstats);
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 417903715437..0cbf520cea77 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1380,10 +1380,10 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
skip_page_frags = true;
goto rcd_done;
}
- new_dma_addr = dma_map_page(&adapter->pdev->dev
- , rbi->page,
- 0, PAGE_SIZE,
- PCI_DMA_FROMDEVICE);
+ new_dma_addr = dma_map_page(&adapter->pdev->dev,
+ new_page,
+ 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&adapter->pdev->dev,
new_dma_addr)) {
put_page(new_page);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 4c58c83dc225..bdb8a6c0f8aa 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.4.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.5.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040400
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040500
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 4f9748457f5a..0a242b200df4 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -800,7 +800,7 @@ static struct rtable *vrf_get_rtable(const struct net_device *dev,
}
/* called under rcu_read_lock */
-static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
+static int vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
{
struct fib_result res = { .tclassid = 0 };
struct net *net = dev_net(dev);
@@ -808,9 +808,10 @@ static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
u8 flags = fl4->flowi4_flags;
u8 scope = fl4->flowi4_scope;
u8 tos = RT_FL_TOS(fl4);
+ int rc;
if (unlikely(!fl4->daddr))
- return;
+ return 0;
fl4->flowi4_flags |= FLOWI_FLAG_SKIP_NH_OIF;
fl4->flowi4_iif = LOOPBACK_IFINDEX;
@@ -818,7 +819,8 @@ static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
- if (!fib_lookup(net, fl4, &res, 0)) {
+ rc = fib_lookup(net, fl4, &res, 0);
+ if (!rc) {
if (res.type == RTN_LOCAL)
fl4->saddr = res.fi->fib_prefsrc ? : fl4->daddr;
else
@@ -828,6 +830,8 @@ static void vrf_get_saddr(struct net_device *dev, struct flowi4 *fl4)
fl4->flowi4_flags = flags;
fl4->flowi4_tos = orig_tos;
fl4->flowi4_scope = scope;
+
+ return rc;
}
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 6369a5734d4c..ba363cedef80 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1158,7 +1158,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
struct pcpu_sw_netstats *stats;
union vxlan_addr saddr;
int err = 0;
- union vxlan_addr *remote_ip;
/* For flow based devices, map all packets to VNI 0 */
if (vs->flags & VXLAN_F_COLLECT_METADATA)
@@ -1169,7 +1168,6 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
if (!vxlan)
goto drop;
- remote_ip = &vxlan->default_dst.remote_ip;
skb_reset_mac_header(skb);
skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
skb->protocol = eth_type_trans(skb, vxlan->dev);
@@ -1179,8 +1177,8 @@ static void vxlan_rcv(struct vxlan_sock *vs, struct sk_buff *skb,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
goto drop;
- /* Re-examine inner Ethernet packet */
- if (remote_ip->sa.sa_family == AF_INET) {
+ /* Get data from the outer IP header */
+ if (vxlan_get_sk_family(vs) == AF_INET) {
oip = ip_hdr(skb);
saddr.sin.sin_addr.s_addr = oip->saddr;
saddr.sa.sa_family = AF_INET;
@@ -1848,6 +1846,34 @@ static int vxlan_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *sk
!(vxflags & VXLAN_F_UDP_CSUM));
}
+#if IS_ENABLED(CONFIG_IPV6)
+static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
+ struct sk_buff *skb, int oif,
+ const struct in6_addr *daddr,
+ struct in6_addr *saddr)
+{
+ struct dst_entry *ndst;
+ struct flowi6 fl6;
+ int err;
+
+ memset(&fl6, 0, sizeof(fl6));
+ fl6.flowi6_oif = oif;
+ fl6.daddr = *daddr;
+ fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
+ fl6.flowi6_mark = skb->mark;
+ fl6.flowi6_proto = IPPROTO_UDP;
+
+ err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
+ vxlan->vn6_sock->sock->sk,
+ &ndst, &fl6);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ *saddr = fl6.saddr;
+ return ndst;
+}
+#endif
+
/* Bypass encapsulation if the destination is local */
static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct vxlan_dev *dst_vxlan)
@@ -2035,21 +2061,17 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
#if IS_ENABLED(CONFIG_IPV6)
} else {
struct dst_entry *ndst;
- struct flowi6 fl6;
+ struct in6_addr saddr;
u32 rt6i_flags;
if (!vxlan->vn6_sock)
goto drop;
sk = vxlan->vn6_sock->sock->sk;
- memset(&fl6, 0, sizeof(fl6));
- fl6.flowi6_oif = rdst ? rdst->remote_ifindex : 0;
- fl6.daddr = dst->sin6.sin6_addr;
- fl6.saddr = vxlan->cfg.saddr.sin6.sin6_addr;
- fl6.flowi6_mark = skb->mark;
- fl6.flowi6_proto = IPPROTO_UDP;
-
- if (ipv6_stub->ipv6_dst_lookup(vxlan->net, sk, &ndst, &fl6)) {
+ ndst = vxlan6_get_route(vxlan, skb,
+ rdst ? rdst->remote_ifindex : 0,
+ &dst->sin6.sin6_addr, &saddr);
+ if (IS_ERR(ndst)) {
netdev_dbg(dev, "no route to %pI6\n",
&dst->sin6.sin6_addr);
dev->stats.tx_carrier_errors++;
@@ -2081,7 +2103,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
}
ttl = ttl ? : ip6_dst_hoplimit(ndst);
- err = vxlan6_xmit_skb(ndst, sk, skb, dev, &fl6.saddr, &fl6.daddr,
+ err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
0, ttl, src_port, dst_port, htonl(vni << 8), md,
!net_eq(vxlan->net, dev_net(vxlan->dev)),
flags);
@@ -2395,9 +2417,30 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
vxlan->cfg.port_max, true);
dport = info->key.tp_dst ? : vxlan->cfg.dst_port;
- if (ip_tunnel_info_af(info) == AF_INET)
+ if (ip_tunnel_info_af(info) == AF_INET) {
+ if (!vxlan->vn4_sock)
+ return -EINVAL;
return egress_ipv4_tun_info(dev, skb, info, sport, dport);
- return -EINVAL;
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct dst_entry *ndst;
+
+ if (!vxlan->vn6_sock)
+ return -EINVAL;
+ ndst = vxlan6_get_route(vxlan, skb, 0,
+ &info->key.u.ipv6.dst,
+ &info->key.u.ipv6.src);
+ if (IS_ERR(ndst))
+ return PTR_ERR(ndst);
+ dst_release(ndst);
+
+ info->key.tp_src = sport;
+ info->key.tp_dst = dport;
+#else /* !CONFIG_IPV6 */
+ return -EPFNOSUPPORT;
+#endif
+ }
+ return 0;
}
static const struct net_device_ops vxlan_netdev_ops = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index bf88ec3a65fa..d9a4aee246a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,13 +69,19 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 19
+#define IWL7260_UCODE_API_MAX 17
+#define IWL7265_UCODE_API_MAX 19
+#define IWL7265D_UCODE_API_MAX 19
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 13
+#define IWL7265_UCODE_API_OK 13
+#define IWL7265D_UCODE_API_OK 13
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 13
+#define IWL7265_UCODE_API_MIN 13
+#define IWL7265D_UCODE_API_MIN 13
/* NVM versions */
#define IWL7260_NVM_VERSION 0x0a1d
@@ -149,10 +155,7 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
};
-#define IWL_DEVICE_7000 \
- .ucode_api_max = IWL7260_UCODE_API_MAX, \
- .ucode_api_ok = IWL7260_UCODE_API_OK, \
- .ucode_api_min = IWL7260_UCODE_API_MIN, \
+#define IWL_DEVICE_7000_COMMON \
.device_family = IWL_DEVICE_FAMILY_7000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
@@ -163,6 +166,24 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.dccm_offset = IWL7000_DCCM_OFFSET
+#define IWL_DEVICE_7000 \
+ IWL_DEVICE_7000_COMMON, \
+ .ucode_api_max = IWL7260_UCODE_API_MAX, \
+ .ucode_api_ok = IWL7260_UCODE_API_OK, \
+ .ucode_api_min = IWL7260_UCODE_API_MIN
+
+#define IWL_DEVICE_7005 \
+ IWL_DEVICE_7000_COMMON, \
+ .ucode_api_max = IWL7265_UCODE_API_MAX, \
+ .ucode_api_ok = IWL7265_UCODE_API_OK, \
+ .ucode_api_min = IWL7265_UCODE_API_MIN
+
+#define IWL_DEVICE_7005D \
+ IWL_DEVICE_7000_COMMON, \
+ .ucode_api_max = IWL7265D_UCODE_API_MAX, \
+ .ucode_api_ok = IWL7265D_UCODE_API_OK, \
+ .ucode_api_min = IWL7265D_UCODE_API_MIN
+
const struct iwl_cfg iwl7260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7260",
.fw_name_pre = IWL7260_FW_PRE,
@@ -266,7 +287,7 @@ static const struct iwl_ht_params iwl7265_ht_params = {
const struct iwl_cfg iwl3165_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 3165",
.fw_name_pre = IWL7265D_FW_PRE,
- IWL_DEVICE_7000,
+ IWL_DEVICE_7005D,
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3165_NVM_VERSION,
.nvm_calib_ver = IWL3165_TX_POWER_VERSION,
@@ -277,7 +298,7 @@ const struct iwl_cfg iwl3165_2ac_cfg = {
const struct iwl_cfg iwl7265_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7265",
.fw_name_pre = IWL7265_FW_PRE,
- IWL_DEVICE_7000,
+ IWL_DEVICE_7005,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
@@ -288,7 +309,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
const struct iwl_cfg iwl7265_2n_cfg = {
.name = "Intel(R) Dual Band Wireless N 7265",
.fw_name_pre = IWL7265_FW_PRE,
- IWL_DEVICE_7000,
+ IWL_DEVICE_7005,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
@@ -299,7 +320,7 @@ const struct iwl_cfg iwl7265_2n_cfg = {
const struct iwl_cfg iwl7265_n_cfg = {
.name = "Intel(R) Wireless N 7265",
.fw_name_pre = IWL7265_FW_PRE,
- IWL_DEVICE_7000,
+ IWL_DEVICE_7005,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
@@ -310,7 +331,7 @@ const struct iwl_cfg iwl7265_n_cfg = {
const struct iwl_cfg iwl7265d_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7265",
.fw_name_pre = IWL7265D_FW_PRE,
- IWL_DEVICE_7000,
+ IWL_DEVICE_7005D,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265D_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
@@ -321,7 +342,7 @@ const struct iwl_cfg iwl7265d_2ac_cfg = {
const struct iwl_cfg iwl7265d_2n_cfg = {
.name = "Intel(R) Dual Band Wireless N 7265",
.fw_name_pre = IWL7265D_FW_PRE,
- IWL_DEVICE_7000,
+ IWL_DEVICE_7005D,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265D_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
@@ -332,7 +353,7 @@ const struct iwl_cfg iwl7265d_2n_cfg = {
const struct iwl_cfg iwl7265d_n_cfg = {
.name = "Intel(R) Wireless N 7265",
.fw_name_pre = IWL7265D_FW_PRE,
- IWL_DEVICE_7000,
+ IWL_DEVICE_7005D,
.ht_params = &iwl7265_ht_params,
.nvm_ver = IWL7265D_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
@@ -342,5 +363,5 @@ const struct iwl_cfg iwl7265d_n_cfg = {
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
-MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
-MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
+MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7265_UCODE_API_OK));
+MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7265D_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 354acbde088e..2b976b110207 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1222,8 +1222,8 @@ static u8 iwl_mvm_get_key_sta_id(struct iwl_mvm *mvm,
mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
u8 sta_id = mvmvif->ap_sta_id;
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
+ sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
/*
* It is possible that the 'sta' parameter is NULL,
* for example when a GTK is removed - the sta_id will then
@@ -1590,14 +1590,15 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
u16 *phase1key)
{
struct iwl_mvm_sta *mvm_sta;
- u8 sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
+ u8 sta_id;
bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
- if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
- return;
-
rcu_read_lock();
+ sta_id = iwl_mvm_get_key_sta_id(mvm, vif, sta);
+ if (WARN_ON_ONCE(sta_id == IWL_MVM_STATION_COUNT))
+ goto unlock;
+
if (!sta) {
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
if (WARN_ON(IS_ERR_OR_NULL(sta))) {
@@ -1609,6 +1610,8 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
mvm_sta = iwl_mvm_sta_from_mac80211(sta);
iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast,
iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx);
+
+ unlock:
rcu_read_unlock();
}
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index e481f3710bd3..1049c34e7d43 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -258,18 +258,18 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
struct netrx_pending_operations *npo)
{
struct xenvif_rx_meta *meta;
- struct xen_netif_rx_request *req;
+ struct xen_netif_rx_request req;
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
+ RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
meta->size = 0;
- meta->id = req->id;
+ meta->id = req.id;
npo->copy_off = 0;
- npo->copy_gref = req->gref;
+ npo->copy_gref = req.gref;
return meta;
}
@@ -424,7 +424,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
struct xenvif *vif = netdev_priv(skb->dev);
int nr_frags = skb_shinfo(skb)->nr_frags;
int i;
- struct xen_netif_rx_request *req;
+ struct xen_netif_rx_request req;
struct xenvif_rx_meta *meta;
unsigned char *data;
int head = 1;
@@ -443,15 +443,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
/* Set up a GSO prefix descriptor, if necessary */
if ((1 << gso_type) & vif->gso_prefix_mask) {
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
+ RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
- meta->id = req->id;
+ meta->id = req.id;
}
- req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
+ RING_COPY_REQUEST(&queue->rx, queue->rx.req_cons++, &req);
meta = npo->meta + npo->meta_prod++;
if ((1 << gso_type) & vif->gso_mask) {
@@ -463,9 +463,9 @@ static int xenvif_gop_skb(struct sk_buff *skb,
}
meta->size = 0;
- meta->id = req->id;
+ meta->id = req.id;
npo->copy_off = 0;
- npo->copy_gref = req->gref;
+ npo->copy_gref = req.gref;
data = skb->data;
while (data < skb_tail_pointer(skb)) {
@@ -679,9 +679,7 @@ static void tx_add_credit(struct xenvif_queue *queue)
* Allow a burst big enough to transmit a jumbo packet of up to 128kB.
* Otherwise the interface can seize up due to insufficient credit.
*/
- max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
- max_burst = min(max_burst, 131072UL);
- max_burst = max(max_burst, queue->credit_bytes);
+ max_burst = max(131072UL, queue->credit_bytes);
/* Take care that adding a new chunk of credit doesn't wrap to zero. */
max_credit = queue->remaining_credit + queue->credit_bytes;
@@ -711,7 +709,7 @@ static void xenvif_tx_err(struct xenvif_queue *queue,
spin_unlock_irqrestore(&queue->response_lock, flags);
if (cons == end)
break;
- txp = RING_GET_REQUEST(&queue->tx, cons++);
+ RING_COPY_REQUEST(&queue->tx, cons++, txp);
} while (1);
queue->tx.req_cons = cons;
}
@@ -778,8 +776,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
if (drop_err)
txp = &dropped_tx;
- memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
- sizeof(*txp));
+ RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
/* If the guest submitted a frame >= 64 KiB then
* first->size overflowed and following slots will
@@ -1112,8 +1109,7 @@ static int xenvif_get_extras(struct xenvif_queue *queue,
return -EBADR;
}
- memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
- sizeof(extra));
+ RING_COPY_REQUEST(&queue->tx, cons, &extra);
if (unlikely(!extra.type ||
extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
queue->tx.req_cons = ++cons;
@@ -1322,7 +1318,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
idx = queue->tx.req_cons;
rmb(); /* Ensure that we see the request before we copy it. */
- memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
+ RING_COPY_REQUEST(&queue->tx, idx, &txreq);
/* Credit-based scheduling. */
if (txreq.size > queue->remaining_credit &&
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 9e294ff4e652..0c67b57be83c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2540,8 +2540,17 @@ static void nvme_ns_remove(struct nvme_ns *ns)
{
bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue);
- if (kill)
+ if (kill) {
blk_set_queue_dying(ns->queue);
+
+ /*
+ * The controller was shutdown first if we got here through
+ * device removal. The shutdown may requeue outstanding
+ * requests. These need to be aborted immediately so
+ * del_gendisk doesn't block indefinitely for their completion.
+ */
+ blk_mq_abort_requeue_list(ns->queue);
+ }
if (ns->disk->flags & GENHD_FL_UP)
del_gendisk(ns->disk);
if (kill || !blk_queue_dying(ns->queue)) {
@@ -2977,6 +2986,15 @@ static void nvme_dev_remove(struct nvme_dev *dev)
{
struct nvme_ns *ns, *next;
+ if (nvme_io_incapable(dev)) {
+ /*
+ * If the device is not capable of IO (surprise hot-removal,
+ * for example), we need to quiesce prior to deleting the
+ * namespaces. This will end outstanding requests and prevent
+ * attempts to sync dirty data.
+ */
+ nvme_dev_shutdown(dev);
+ }
list_for_each_entry_safe(ns, next, &dev->namespaces, list)
nvme_ns_remove(ns);
}
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index f131ba947dc6..c0ad9aaa16a7 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -5,6 +5,7 @@ config PCI_DRA7XX
bool "TI DRA7xx PCIe controller"
select PCIE_DW
depends on OF && HAS_IOMEM && TI_PIPE3
+ depends on BROKEN
help
Enables support for the PCIe controller in the DRA7xx SoC. There
are two instances of PCIe controller in DRA7xx. This controller can
diff --git a/drivers/pci/host/pcie-hisi.c b/drivers/pci/host/pcie-hisi.c
index 163671a4f798..77f7c669a1b9 100644
--- a/drivers/pci/host/pcie-hisi.c
+++ b/drivers/pci/host/pcie-hisi.c
@@ -61,7 +61,9 @@ static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size,
*val = *(u8 __force *) walker;
else if (size == 2)
*val = *(u16 __force *) walker;
- else if (size != 4)
+ else if (size == 4)
+ *val = reg_val;
+ else
return PCIBIOS_BAD_REGISTER_NUMBER;
return PCIBIOS_SUCCESSFUL;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 7eb5859dd035..03cb3ea2d2c0 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -233,6 +233,7 @@ config PHY_SUN9I_USB
tristate "Allwinner sun9i SoC USB PHY driver"
depends on ARCH_SUNXI && HAS_IOMEM && OF
depends on RESET_CONTROLLER
+ depends on USB_COMMON
select GENERIC_PHY
help
Enable this to support the transceiver that is part of Allwinner
diff --git a/drivers/phy/phy-bcm-cygnus-pcie.c b/drivers/phy/phy-bcm-cygnus-pcie.c
index 7ad72b7d2b98..082c03f6438f 100644
--- a/drivers/phy/phy-bcm-cygnus-pcie.c
+++ b/drivers/phy/phy-bcm-cygnus-pcie.c
@@ -128,6 +128,7 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
struct phy_provider *provider;
struct resource *res;
unsigned cnt = 0;
+ int ret;
if (of_get_child_count(node) == 0) {
dev_err(dev, "PHY no child node\n");
@@ -154,24 +155,28 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
if (of_property_read_u32(child, "reg", &id)) {
dev_err(dev, "missing reg property for %s\n",
child->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_child;
}
if (id >= MAX_NUM_PHYS) {
dev_err(dev, "invalid PHY id: %u\n", id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_child;
}
if (core->phys[id].phy) {
dev_err(dev, "duplicated PHY id: %u\n", id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_child;
}
p = &core->phys[id];
p->phy = devm_phy_create(dev, child, &cygnus_pcie_phy_ops);
if (IS_ERR(p->phy)) {
dev_err(dev, "failed to create PHY\n");
- return PTR_ERR(p->phy);
+ ret = PTR_ERR(p->phy);
+ goto put_child;
}
p->core = core;
@@ -191,6 +196,9 @@ static int cygnus_pcie_phy_probe(struct platform_device *pdev)
dev_dbg(dev, "registered %u PCIe PHY(s)\n", cnt);
return 0;
+put_child:
+ of_node_put(child);
+ return ret;
}
static const struct of_device_id cygnus_pcie_phy_match_table[] = {
diff --git a/drivers/phy/phy-berlin-sata.c b/drivers/phy/phy-berlin-sata.c
index 77a2e054fdea..f84a33a1bdd9 100644
--- a/drivers/phy/phy-berlin-sata.c
+++ b/drivers/phy/phy-berlin-sata.c
@@ -195,7 +195,7 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
struct phy_provider *phy_provider;
struct phy_berlin_priv *priv;
struct resource *res;
- int i = 0;
+ int ret, i = 0;
u32 phy_id;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
@@ -237,22 +237,27 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
if (of_property_read_u32(child, "reg", &phy_id)) {
dev_err(dev, "missing reg property in node %s\n",
child->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_child;
}
if (phy_id >= ARRAY_SIZE(phy_berlin_power_down_bits)) {
dev_err(dev, "invalid reg in node %s\n", child->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_child;
}
phy_desc = devm_kzalloc(dev, sizeof(*phy_desc), GFP_KERNEL);
- if (!phy_desc)
- return -ENOMEM;
+ if (!phy_desc) {
+ ret = -ENOMEM;
+ goto put_child;
+ }
phy = devm_phy_create(dev, NULL, &phy_berlin_sata_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create PHY %d\n", phy_id);
- return PTR_ERR(phy);
+ ret = PTR_ERR(phy);
+ goto put_child;
}
phy_desc->phy = phy;
@@ -269,6 +274,9 @@ static int phy_berlin_sata_probe(struct platform_device *pdev)
phy_provider =
devm_of_phy_provider_register(dev, phy_berlin_sata_phy_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
+put_child:
+ of_node_put(child);
+ return ret;
}
static const struct of_device_id phy_berlin_sata_of_match[] = {
diff --git a/drivers/phy/phy-brcmstb-sata.c b/drivers/phy/phy-brcmstb-sata.c
index 8a2cb16a1937..cd9dba820566 100644
--- a/drivers/phy/phy-brcmstb-sata.c
+++ b/drivers/phy/phy-brcmstb-sata.c
@@ -140,7 +140,7 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
struct brcm_sata_phy *priv;
struct resource *res;
struct phy_provider *provider;
- int count = 0;
+ int ret, count = 0;
if (of_get_child_count(dn) == 0)
return -ENODEV;
@@ -163,16 +163,19 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
if (of_property_read_u32(child, "reg", &id)) {
dev_err(dev, "missing reg property in node %s\n",
child->name);
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_child;
}
if (id >= MAX_PORTS) {
dev_err(dev, "invalid reg: %u\n", id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_child;
}
if (priv->phys[id].phy) {
dev_err(dev, "already registered port %u\n", id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto put_child;
}
port = &priv->phys[id];
@@ -182,7 +185,8 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
port->ssc_en = of_property_read_bool(child, "brcm,enable-ssc");
if (IS_ERR(port->phy)) {
dev_err(dev, "failed to create PHY\n");
- return PTR_ERR(port->phy);
+ ret = PTR_ERR(port->phy);
+ goto put_child;
}
phy_set_drvdata(port->phy, port);
@@ -198,6 +202,9 @@ static int brcm_sata_phy_probe(struct platform_device *pdev)
dev_info(dev, "registered %d port(s)\n", count);
return 0;
+put_child:
+ of_node_put(child);
+ return ret;
}
static struct platform_driver brcm_sata_phy_driver = {
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index fc48fac003a6..8c7f27db6ad3 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -636,8 +636,9 @@ EXPORT_SYMBOL_GPL(devm_of_phy_get);
* @np: node containing the phy
* @index: index of the phy
*
- * Gets the phy using _of_phy_get(), and associates a device with it using
- * devres. On driver detach, release function is invoked on the devres data,
+ * Gets the phy using _of_phy_get(), then gets a refcount to it,
+ * and associates a device with it using devres. On driver detach,
+ * release function is invoked on the devres data,
* then, devres data is freed.
*
*/
@@ -651,13 +652,21 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
return ERR_PTR(-ENOMEM);
phy = _of_phy_get(np, index);
- if (!IS_ERR(phy)) {
- *ptr = phy;
- devres_add(dev, ptr);
- } else {
+ if (IS_ERR(phy)) {
devres_free(ptr);
+ return phy;
}
+ if (!try_module_get(phy->ops->owner)) {
+ devres_free(ptr);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
+
+ get_device(&phy->dev);
+
+ *ptr = phy;
+ devres_add(dev, ptr);
+
return phy;
}
EXPORT_SYMBOL_GPL(devm_of_phy_get_by_index);
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index c47b56b4a2b8..3acd2a1808df 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -1226,15 +1226,18 @@ static int miphy28lp_probe(struct platform_device *pdev)
miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
GFP_KERNEL);
- if (!miphy_phy)
- return -ENOMEM;
+ if (!miphy_phy) {
+ ret = -ENOMEM;
+ goto put_child;
+ }
miphy_dev->phys[port] = miphy_phy;
phy = devm_phy_create(&pdev->dev, child, &miphy28lp_ops);
if (IS_ERR(phy)) {
dev_err(&pdev->dev, "failed to create PHY\n");
- return PTR_ERR(phy);
+ ret = PTR_ERR(phy);
+ goto put_child;
}
miphy_dev->phys[port]->phy = phy;
@@ -1242,11 +1245,11 @@ static int miphy28lp_probe(struct platform_device *pdev)
ret = miphy28lp_of_probe(child, miphy_phy);
if (ret)
- return ret;
+ goto put_child;
ret = miphy28lp_probe_resets(child, miphy_dev->phys[port]);
if (ret)
- return ret;
+ goto put_child;
phy_set_drvdata(phy, miphy_dev->phys[port]);
port++;
@@ -1255,6 +1258,9 @@ static int miphy28lp_probe(struct platform_device *pdev)
provider = devm_of_phy_provider_register(&pdev->dev, miphy28lp_xlate);
return PTR_ERR_OR_ZERO(provider);
+put_child:
+ of_node_put(child);
+ return ret;
}
static const struct of_device_id miphy28lp_of_match[] = {
diff --git a/drivers/phy/phy-miphy365x.c b/drivers/phy/phy-miphy365x.c
index 00a686a073ed..e661f3b36eaa 100644
--- a/drivers/phy/phy-miphy365x.c
+++ b/drivers/phy/phy-miphy365x.c
@@ -566,22 +566,25 @@ static int miphy365x_probe(struct platform_device *pdev)
miphy_phy = devm_kzalloc(&pdev->dev, sizeof(*miphy_phy),
GFP_KERNEL);
- if (!miphy_phy)
- return -ENOMEM;
+ if (!miphy_phy) {
+ ret = -ENOMEM;
+ goto put_child;
+ }
miphy_dev->phys[port] = miphy_phy;
phy = devm_phy_create(&pdev->dev, child, &miphy365x_ops);
if (IS_ERR(phy)) {
dev_err(&pdev->dev, "failed to create PHY\n");
- return PTR_ERR(phy);
+ ret = PTR_ERR(phy);
+ goto put_child;
}
miphy_dev->phys[port]->phy = phy;
ret = miphy365x_of_probe(child, miphy_phy);
if (ret)
- return ret;
+ goto put_child;
phy_set_drvdata(phy, miphy_dev->phys[port]);
@@ -591,12 +594,15 @@ static int miphy365x_probe(struct platform_device *pdev)
&miphy_phy->ctrlreg);
if (ret) {
dev_err(&pdev->dev, "No sysconfig offset found\n");
- return ret;
+ goto put_child;
}
}
provider = devm_of_phy_provider_register(&pdev->dev, miphy365x_xlate);
return PTR_ERR_OR_ZERO(provider);
+put_child:
+ of_node_put(child);
+ return ret;
}
static const struct of_device_id miphy365x_of_match[] = {
diff --git a/drivers/phy/phy-mt65xx-usb3.c b/drivers/phy/phy-mt65xx-usb3.c
index f30b28bd41fe..e427c3b788ff 100644
--- a/drivers/phy/phy-mt65xx-usb3.c
+++ b/drivers/phy/phy-mt65xx-usb3.c
@@ -415,7 +415,7 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
struct resource *sif_res;
struct mt65xx_u3phy *u3phy;
struct resource res;
- int port;
+ int port, retval;
u3phy = devm_kzalloc(dev, sizeof(*u3phy), GFP_KERNEL);
if (!u3phy)
@@ -447,31 +447,34 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
for_each_child_of_node(np, child_np) {
struct mt65xx_phy_instance *instance;
struct phy *phy;
- int retval;
instance = devm_kzalloc(dev, sizeof(*instance), GFP_KERNEL);
- if (!instance)
- return -ENOMEM;
+ if (!instance) {
+ retval = -ENOMEM;
+ goto put_child;
+ }
u3phy->phys[port] = instance;
phy = devm_phy_create(dev, child_np, &mt65xx_u3phy_ops);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create phy\n");
- return PTR_ERR(phy);
+ retval = PTR_ERR(phy);
+ goto put_child;
}
retval = of_address_to_resource(child_np, 0, &res);
if (retval) {
dev_err(dev, "failed to get address resource(id-%d)\n",
port);
- return retval;
+ goto put_child;
}
instance->port_base = devm_ioremap_resource(&phy->dev, &res);
if (IS_ERR(instance->port_base)) {
dev_err(dev, "failed to remap phy regs\n");
- return PTR_ERR(instance->port_base);
+ retval = PTR_ERR(instance->port_base);
+ goto put_child;
}
instance->phy = phy;
@@ -483,6 +486,9 @@ static int mt65xx_u3phy_probe(struct platform_device *pdev)
provider = devm_of_phy_provider_register(dev, mt65xx_phy_xlate);
return PTR_ERR_OR_ZERO(provider);
+put_child:
+ of_node_put(child_np);
+ return retval;
}
static const struct of_device_id mt65xx_u3phy_id_table[] = {
diff --git a/drivers/phy/phy-rockchip-usb.c b/drivers/phy/phy-rockchip-usb.c
index 91d6f342c565..62c43c435194 100644
--- a/drivers/phy/phy-rockchip-usb.c
+++ b/drivers/phy/phy-rockchip-usb.c
@@ -108,13 +108,16 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
for_each_available_child_of_node(dev->of_node, child) {
rk_phy = devm_kzalloc(dev, sizeof(*rk_phy), GFP_KERNEL);
- if (!rk_phy)
- return -ENOMEM;
+ if (!rk_phy) {
+ err = -ENOMEM;
+ goto put_child;
+ }
if (of_property_read_u32(child, "reg", &reg_offset)) {
dev_err(dev, "missing reg property in node %s\n",
child->name);
- return -EINVAL;
+ err = -EINVAL;
+ goto put_child;
}
rk_phy->reg_offset = reg_offset;
@@ -127,18 +130,22 @@ static int rockchip_usb_phy_probe(struct platform_device *pdev)
rk_phy->phy = devm_phy_create(dev, child, &ops);
if (IS_ERR(rk_phy->phy)) {
dev_err(dev, "failed to create PHY\n");
- return PTR_ERR(rk_phy->phy);
+ err = PTR_ERR(rk_phy->phy);
+ goto put_child;
}
phy_set_drvdata(rk_phy->phy, rk_phy);
/* only power up usb phy when it use, so disable it when init*/
err = rockchip_usb_phy_power(rk_phy, 1);
if (err)
- return err;
+ goto put_child;
}
phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
return PTR_ERR_OR_ZERO(phy_provider);
+put_child:
+ of_node_put(child);
+ return err;
}
static const struct of_device_id rockchip_usb_phy_dt_ids[] = {
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index a1ea565fcd46..2e6ca69635aa 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -342,12 +342,6 @@ static int bcm2835_gpio_get(struct gpio_chip *chip, unsigned offset)
return bcm2835_gpio_get_bit(pc, GPLEV0, offset);
}
-static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
- unsigned offset, int value)
-{
- return pinctrl_gpio_direction_output(chip->base + offset);
-}
-
static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev);
@@ -355,6 +349,13 @@ static void bcm2835_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
bcm2835_gpio_set_bit(pc, value ? GPSET0 : GPCLR0, offset);
}
+static int bcm2835_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ bcm2835_gpio_set(chip, offset, value);
+ return pinctrl_gpio_direction_output(chip->base + offset);
+}
+
static int bcm2835_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
struct bcm2835_pinctrl *pc = dev_get_drvdata(chip->dev);
diff --git a/drivers/pinctrl/freescale/pinctrl-vf610.c b/drivers/pinctrl/freescale/pinctrl-vf610.c
index 37a037543d29..587d1ff6210e 100644
--- a/drivers/pinctrl/freescale/pinctrl-vf610.c
+++ b/drivers/pinctrl/freescale/pinctrl-vf610.c
@@ -299,7 +299,7 @@ static const struct pinctrl_pin_desc vf610_pinctrl_pads[] = {
static struct imx_pinctrl_soc_info vf610_pinctrl_info = {
.pins = vf610_pinctrl_pads,
.npins = ARRAY_SIZE(vf610_pinctrl_pads),
- .flags = SHARE_MUX_CONF_REG,
+ .flags = SHARE_MUX_CONF_REG | ZERO_OFFSET_VALID,
};
static const struct of_device_id vf610_pinctrl_of_match[] = {
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index e42d5d4183f5..5979d38c46b2 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -28,6 +28,7 @@
.padcfglock_offset = BXT_PADCFGLOCK, \
.hostown_offset = BXT_HOSTSW_OWN, \
.ie_offset = BXT_GPI_IE, \
+ .gpp_size = 32, \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
}
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 392e28d3f48d..26f6b6ffea5b 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -25,9 +25,6 @@
#include "pinctrl-intel.h"
-/* Maximum number of pads in each group */
-#define NPADS_IN_GPP 24
-
/* Offset from regs */
#define PADBAR 0x00c
#define GPI_IS 0x100
@@ -37,6 +34,7 @@
#define PADOWN_BITS 4
#define PADOWN_SHIFT(p) ((p) % 8 * PADOWN_BITS)
#define PADOWN_MASK(p) (0xf << PADOWN_SHIFT(p))
+#define PADOWN_GPP(p) ((p) / 8)
/* Offset from pad_regs */
#define PADCFG0 0x000
@@ -142,7 +140,7 @@ static void __iomem *intel_get_padcfg(struct intel_pinctrl *pctrl, unsigned pin,
static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
{
const struct intel_community *community;
- unsigned padno, gpp, gpp_offset, offset;
+ unsigned padno, gpp, offset, group;
void __iomem *padown;
community = intel_get_community(pctrl, pin);
@@ -152,9 +150,9 @@ static bool intel_pad_owned_by_host(struct intel_pinctrl *pctrl, unsigned pin)
return true;
padno = pin_to_padno(community, pin);
- gpp = padno / NPADS_IN_GPP;
- gpp_offset = padno % NPADS_IN_GPP;
- offset = community->padown_offset + gpp * 16 + (gpp_offset / 8) * 4;
+ group = padno / community->gpp_size;
+ gpp = PADOWN_GPP(padno % community->gpp_size);
+ offset = community->padown_offset + 0x10 * group + gpp * 4;
padown = community->regs + offset;
return !(readl(padown) & PADOWN_MASK(padno));
@@ -173,11 +171,11 @@ static bool intel_pad_acpi_mode(struct intel_pinctrl *pctrl, unsigned pin)
return false;
padno = pin_to_padno(community, pin);
- gpp = padno / NPADS_IN_GPP;
+ gpp = padno / community->gpp_size;
offset = community->hostown_offset + gpp * 4;
hostown = community->regs + offset;
- return !(readl(hostown) & BIT(padno % NPADS_IN_GPP));
+ return !(readl(hostown) & BIT(padno % community->gpp_size));
}
static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
@@ -193,7 +191,7 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
return false;
padno = pin_to_padno(community, pin);
- gpp = padno / NPADS_IN_GPP;
+ gpp = padno / community->gpp_size;
/*
* If PADCFGLOCK and PADCFGLOCKTX bits are both clear for this pad,
@@ -202,12 +200,12 @@ static bool intel_pad_locked(struct intel_pinctrl *pctrl, unsigned pin)
*/
offset = community->padcfglock_offset + gpp * 8;
value = readl(community->regs + offset);
- if (value & BIT(pin % NPADS_IN_GPP))
+ if (value & BIT(pin % community->gpp_size))
return true;
offset = community->padcfglock_offset + 4 + gpp * 8;
value = readl(community->regs + offset);
- if (value & BIT(pin % NPADS_IN_GPP))
+ if (value & BIT(pin % community->gpp_size))
return true;
return false;
@@ -663,8 +661,8 @@ static void intel_gpio_irq_ack(struct irq_data *d)
community = intel_get_community(pctrl, pin);
if (community) {
unsigned padno = pin_to_padno(community, pin);
- unsigned gpp_offset = padno % NPADS_IN_GPP;
- unsigned gpp = padno / NPADS_IN_GPP;
+ unsigned gpp_offset = padno % community->gpp_size;
+ unsigned gpp = padno / community->gpp_size;
writel(BIT(gpp_offset), community->regs + GPI_IS + gpp * 4);
}
@@ -685,8 +683,8 @@ static void intel_gpio_irq_mask_unmask(struct irq_data *d, bool mask)
community = intel_get_community(pctrl, pin);
if (community) {
unsigned padno = pin_to_padno(community, pin);
- unsigned gpp_offset = padno % NPADS_IN_GPP;
- unsigned gpp = padno / NPADS_IN_GPP;
+ unsigned gpp_offset = padno % community->gpp_size;
+ unsigned gpp = padno / community->gpp_size;
void __iomem *reg;
u32 value;
@@ -780,8 +778,8 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on)
return -EINVAL;
padno = pin_to_padno(community, pin);
- gpp = padno / NPADS_IN_GPP;
- gpp_offset = padno % NPADS_IN_GPP;
+ gpp = padno / community->gpp_size;
+ gpp_offset = padno % community->gpp_size;
/* Clear the existing wake status */
writel(BIT(gpp_offset), community->regs + GPI_GPE_STS + gpp * 4);
@@ -819,14 +817,14 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl,
/* Only interrupts that are enabled */
pending &= enabled;
- for_each_set_bit(gpp_offset, &pending, NPADS_IN_GPP) {
+ for_each_set_bit(gpp_offset, &pending, community->gpp_size) {
unsigned padno, irq;
/*
* The last group in community can have less pins
* than NPADS_IN_GPP.
*/
- padno = gpp_offset + gpp * NPADS_IN_GPP;
+ padno = gpp_offset + gpp * community->gpp_size;
if (padno >= community->npins)
break;
@@ -1002,7 +1000,8 @@ int intel_pinctrl_probe(struct platform_device *pdev,
community->regs = regs;
community->pad_regs = regs + padbar;
- community->ngpps = DIV_ROUND_UP(community->npins, NPADS_IN_GPP);
+ community->ngpps = DIV_ROUND_UP(community->npins,
+ community->gpp_size);
}
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h
index 4ec8b572a288..b60215793017 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.h
+++ b/drivers/pinctrl/intel/pinctrl-intel.h
@@ -55,6 +55,8 @@ struct intel_function {
* ACPI).
* @ie_offset: Register offset of GPI_IE from @regs.
* @pin_base: Starting pin of pins in this community
+ * @gpp_size: Maximum number of pads in each group, such as PADCFGLOCK,
+ * HOSTSW_OWN, GPI_IS, GPI_IE, etc.
* @npins: Number of pins in this community
* @regs: Community specific common registers (reserved for core driver)
* @pad_regs: Community specific pad registers (reserved for core driver)
@@ -68,6 +70,7 @@ struct intel_community {
unsigned hostown_offset;
unsigned ie_offset;
unsigned pin_base;
+ unsigned gpp_size;
size_t npins;
void __iomem *regs;
void __iomem *pad_regs;
diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
index 1de9ae5010db..c725a5313b4e 100644
--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
@@ -30,6 +30,7 @@
.padcfglock_offset = SPT_PADCFGLOCK, \
.hostown_offset = SPT_HOSTSW_OWN, \
.ie_offset = SPT_GPI_IE, \
+ .gpp_size = 24, \
.pin_base = (s), \
.npins = ((e) - (s) + 1), \
}
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index cc97f0869791..48747c28a43d 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1341,10 +1341,13 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu)
for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) {
/* check if the domain is locked by BIOS */
- if (rapl_read_data_raw(rd, FW_LOCK, false, &locked)) {
+ ret = rapl_read_data_raw(rd, FW_LOCK, false, &locked);
+ if (ret)
+ return ret;
+ if (locked) {
pr_info("RAPL package %d domain %s locked by BIOS\n",
rp->id, rd->name);
- rd->state |= DOMAIN_STATE_BIOS_LOCKED;
+ rd->state |= DOMAIN_STATE_BIOS_LOCKED;
}
}
diff --git a/drivers/rtc/rtc-da9063.c b/drivers/rtc/rtc-da9063.c
index 284b587da65c..d6c853bbfa9f 100644
--- a/drivers/rtc/rtc-da9063.c
+++ b/drivers/rtc/rtc-da9063.c
@@ -483,24 +483,23 @@ static int da9063_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
+ rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC,
+ &da9063_rtc_ops, THIS_MODULE);
+ if (IS_ERR(rtc->rtc_dev))
+ return PTR_ERR(rtc->rtc_dev);
+
+ da9063_data_to_tm(data, &rtc->alarm_time, rtc);
+ rtc->rtc_sync = false;
+
irq_alarm = platform_get_irq_byname(pdev, "ALARM");
ret = devm_request_threaded_irq(&pdev->dev, irq_alarm, NULL,
da9063_alarm_event,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"ALARM", rtc);
- if (ret) {
+ if (ret)
dev_err(&pdev->dev, "Failed to request ALARM IRQ %d: %d\n",
irq_alarm, ret);
- return ret;
- }
-
- rtc->rtc_dev = devm_rtc_device_register(&pdev->dev, DA9063_DRVNAME_RTC,
- &da9063_rtc_ops, THIS_MODULE);
- if (IS_ERR(rtc->rtc_dev))
- return PTR_ERR(rtc->rtc_dev);
- da9063_data_to_tm(data, &rtc->alarm_time, rtc);
- rtc->rtc_sync = false;
return ret;
}
diff --git a/drivers/rtc/rtc-rk808.c b/drivers/rtc/rtc-rk808.c
index 91ca0bc1b484..35c9aada07c8 100644
--- a/drivers/rtc/rtc-rk808.c
+++ b/drivers/rtc/rtc-rk808.c
@@ -56,6 +56,42 @@ struct rk808_rtc {
int irq;
};
+/*
+ * The Rockchip calendar used by the RK808 counts November with 31 days. We use
+ * these translation functions to convert its dates to/from the Gregorian
+ * calendar used by the rest of the world. We arbitrarily define Jan 1st, 2016
+ * as the day when both calendars were in sync, and treat all other dates
+ * relative to that.
+ * NOTE: Other system software (e.g. firmware) that reads the same hardware must
+ * implement this exact same conversion algorithm, with the same anchor date.
+ */
+static time64_t nov2dec_transitions(struct rtc_time *tm)
+{
+ return (tm->tm_year + 1900) - 2016 + (tm->tm_mon + 1 > 11 ? 1 : 0);
+}
+
+static void rockchip_to_gregorian(struct rtc_time *tm)
+{
+ /* If it's Nov 31st, rtc_tm_to_time64() will count that like Dec 1st */
+ time64_t time = rtc_tm_to_time64(tm);
+ rtc_time64_to_tm(time + nov2dec_transitions(tm) * 86400, tm);
+}
+
+static void gregorian_to_rockchip(struct rtc_time *tm)
+{
+ time64_t extra_days = nov2dec_transitions(tm);
+ time64_t time = rtc_tm_to_time64(tm);
+ rtc_time64_to_tm(time - extra_days * 86400, tm);
+
+ /* Compensate if we went back over Nov 31st (will work up to 2381) */
+ if (nov2dec_transitions(tm) < extra_days) {
+ if (tm->tm_mon + 1 == 11)
+ tm->tm_mday++; /* This may result in 31! */
+ else
+ rtc_time64_to_tm(time - (extra_days - 1) * 86400, tm);
+ }
+}
+
/* Read current time and date in RTC */
static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm)
{
@@ -101,9 +137,10 @@ static int rk808_rtc_readtime(struct device *dev, struct rtc_time *tm)
tm->tm_mon = (bcd2bin(rtc_data[4] & MONTHS_REG_MSK)) - 1;
tm->tm_year = (bcd2bin(rtc_data[5] & YEARS_REG_MSK)) + 100;
tm->tm_wday = bcd2bin(rtc_data[6] & WEEKS_REG_MSK);
+ rockchip_to_gregorian(tm);
dev_dbg(dev, "RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n",
1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec);
+ tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec);
return ret;
}
@@ -116,6 +153,10 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm)
u8 rtc_data[NUM_TIME_REGS];
int ret;
+ dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n",
+ 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
+ tm->tm_wday, tm->tm_hour, tm->tm_min, tm->tm_sec);
+ gregorian_to_rockchip(tm);
rtc_data[0] = bin2bcd(tm->tm_sec);
rtc_data[1] = bin2bcd(tm->tm_min);
rtc_data[2] = bin2bcd(tm->tm_hour);
@@ -123,9 +164,6 @@ static int rk808_rtc_set_time(struct device *dev, struct rtc_time *tm)
rtc_data[4] = bin2bcd(tm->tm_mon + 1);
rtc_data[5] = bin2bcd(tm->tm_year - 100);
rtc_data[6] = bin2bcd(tm->tm_wday);
- dev_dbg(dev, "set RTC date/time %4d-%02d-%02d(%d) %02d:%02d:%02d\n",
- 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday,
- tm->tm_wday, tm->tm_hour , tm->tm_min, tm->tm_sec);
/* Stop RTC while updating the RTC registers */
ret = regmap_update_bits(rk808->regmap, RK808_RTC_CTRL_REG,
@@ -170,6 +208,7 @@ static int rk808_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->time.tm_mday = bcd2bin(alrm_data[3] & DAYS_REG_MSK);
alrm->time.tm_mon = (bcd2bin(alrm_data[4] & MONTHS_REG_MSK)) - 1;
alrm->time.tm_year = (bcd2bin(alrm_data[5] & YEARS_REG_MSK)) + 100;
+ rockchip_to_gregorian(&alrm->time);
ret = regmap_read(rk808->regmap, RK808_RTC_INT_REG, &int_reg);
if (ret) {
@@ -227,6 +266,7 @@ static int rk808_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
alrm->time.tm_mday, alrm->time.tm_wday, alrm->time.tm_hour,
alrm->time.tm_min, alrm->time.tm_sec);
+ gregorian_to_rockchip(&alrm->time);
alrm_data[0] = bin2bcd(alrm->time.tm_sec);
alrm_data[1] = bin2bcd(alrm->time.tm_min);
alrm_data[2] = bin2bcd(alrm->time.tm_hour);
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 61f768518a34..24ec282e15d8 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -599,8 +599,10 @@ static enum ap_wait ap_sm_read(struct ap_device *ap_dev)
status = ap_sm_recv(ap_dev);
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
- if (ap_dev->queue_count > 0)
+ if (ap_dev->queue_count > 0) {
+ ap_dev->state = AP_STATE_WORKING;
return AP_WAIT_AGAIN;
+ }
ap_dev->state = AP_STATE_IDLE;
return AP_WAIT_NONE;
case AP_RESPONSE_NO_PENDING_REPLY:
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index b2a1a81e6fc8..1b831598df7c 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -984,6 +984,36 @@ static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev,
return vq;
}
+static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
+ __u32 activity)
+{
+ if (vcdev->curr_io & activity) {
+ switch (activity) {
+ case VIRTIO_CCW_DOING_READ_FEAT:
+ case VIRTIO_CCW_DOING_WRITE_FEAT:
+ case VIRTIO_CCW_DOING_READ_CONFIG:
+ case VIRTIO_CCW_DOING_WRITE_CONFIG:
+ case VIRTIO_CCW_DOING_WRITE_STATUS:
+ case VIRTIO_CCW_DOING_SET_VQ:
+ case VIRTIO_CCW_DOING_SET_IND:
+ case VIRTIO_CCW_DOING_SET_CONF_IND:
+ case VIRTIO_CCW_DOING_RESET:
+ case VIRTIO_CCW_DOING_READ_VQ_CONF:
+ case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
+ case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
+ vcdev->curr_io &= ~activity;
+ wake_up(&vcdev->wait_q);
+ break;
+ default:
+ /* don't know what to do... */
+ dev_warn(&vcdev->cdev->dev,
+ "Suspicious activity '%08x'\n", activity);
+ WARN_ON(1);
+ break;
+ }
+ }
+}
+
static void virtio_ccw_int_handler(struct ccw_device *cdev,
unsigned long intparm,
struct irb *irb)
@@ -995,6 +1025,12 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
if (!vcdev)
return;
+ if (IS_ERR(irb)) {
+ vcdev->err = PTR_ERR(irb);
+ virtio_ccw_check_activity(vcdev, activity);
+ /* Don't poke around indicators, something's wrong. */
+ return;
+ }
/* Check if it's a notification from the host. */
if ((intparm == 0) &&
(scsw_stctl(&irb->scsw) ==
@@ -1010,31 +1046,7 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
/* Map everything else to -EIO. */
vcdev->err = -EIO;
}
- if (vcdev->curr_io & activity) {
- switch (activity) {
- case VIRTIO_CCW_DOING_READ_FEAT:
- case VIRTIO_CCW_DOING_WRITE_FEAT:
- case VIRTIO_CCW_DOING_READ_CONFIG:
- case VIRTIO_CCW_DOING_WRITE_CONFIG:
- case VIRTIO_CCW_DOING_WRITE_STATUS:
- case VIRTIO_CCW_DOING_SET_VQ:
- case VIRTIO_CCW_DOING_SET_IND:
- case VIRTIO_CCW_DOING_SET_CONF_IND:
- case VIRTIO_CCW_DOING_RESET:
- case VIRTIO_CCW_DOING_READ_VQ_CONF:
- case VIRTIO_CCW_DOING_SET_IND_ADAPTER:
- case VIRTIO_CCW_DOING_SET_VIRTIO_REV:
- vcdev->curr_io &= ~activity;
- wake_up(&vcdev->wait_q);
- break;
- default:
- /* don't know what to do... */
- dev_warn(&cdev->dev, "Suspicious activity '%08x'\n",
- activity);
- WARN_ON(1);
- break;
- }
- }
+ virtio_ccw_check_activity(vcdev, activity);
for_each_set_bit(i, &vcdev->indicators,
sizeof(vcdev->indicators) * BITS_PER_BYTE) {
/* The bit clear must happen before the vring kick. */
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index e4b799837948..459abe1dcc87 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -219,13 +219,13 @@ static int sdev_runtime_suspend(struct device *dev)
struct scsi_device *sdev = to_scsi_device(dev);
int err = 0;
- if (pm && pm->runtime_suspend) {
- err = blk_pre_runtime_suspend(sdev->request_queue);
- if (err)
- return err;
+ err = blk_pre_runtime_suspend(sdev->request_queue);
+ if (err)
+ return err;
+ if (pm && pm->runtime_suspend)
err = pm->runtime_suspend(dev);
- blk_post_runtime_suspend(sdev->request_queue, err);
- }
+ blk_post_runtime_suspend(sdev->request_queue, err);
+
return err;
}
@@ -248,11 +248,11 @@ static int sdev_runtime_resume(struct device *dev)
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int err = 0;
- if (pm && pm->runtime_resume) {
- blk_pre_runtime_resume(sdev->request_queue);
+ blk_pre_runtime_resume(sdev->request_queue);
+ if (pm && pm->runtime_resume)
err = pm->runtime_resume(dev);
- blk_post_runtime_resume(sdev->request_queue, err);
- }
+ blk_post_runtime_resume(sdev->request_queue, err);
+
return err;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3d22fc3e3c1a..4e08d1cd704d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2885,10 +2885,13 @@ static int sd_revalidate_disk(struct gendisk *disk)
/*
* Use the device's preferred I/O size for reads and writes
- * unless the reported value is unreasonably large (or garbage).
+ * unless the reported value is unreasonably small, large, or
+ * garbage.
*/
- if (sdkp->opt_xfer_blocks && sdkp->opt_xfer_blocks <= dev_max &&
- sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS)
+ if (sdkp->opt_xfer_blocks &&
+ sdkp->opt_xfer_blocks <= dev_max &&
+ sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
+ sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
rw_max = q->limits.io_opt =
logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
else
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index dcb0d76d7312..044d06410d4c 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -84,6 +84,7 @@ static void init_device_slot_control(unsigned char *dest_desc,
static int ses_recv_diag(struct scsi_device *sdev, int page_code,
void *buf, int bufflen)
{
+ int ret;
unsigned char cmd[] = {
RECEIVE_DIAGNOSTIC,
1, /* Set PCV bit */
@@ -92,9 +93,26 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code,
bufflen & 0xff,
0
};
+ unsigned char recv_page_code;
- return scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
+ ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen,
NULL, SES_TIMEOUT, SES_RETRIES, NULL);
+ if (unlikely(!ret))
+ return ret;
+
+ recv_page_code = ((unsigned char *)buf)[0];
+
+ if (likely(recv_page_code == page_code))
+ return ret;
+
+ /* successful diagnostic but wrong page code. This happens to some
+ * USB devices, just print a message and pretend there was an error */
+
+ sdev_printk(KERN_ERR, sdev,
+ "Wrong diagnostic page; asked for %d got %u\n",
+ page_code, recv_page_code);
+
+ return -EINVAL;
}
static int ses_send_diag(struct scsi_device *sdev, int page_code,
@@ -541,7 +559,15 @@ static void ses_enclosure_data_process(struct enclosure_device *edev,
if (desc_ptr)
desc_ptr += len;
- if (addl_desc_ptr)
+ if (addl_desc_ptr &&
+ /* only find additional descriptions for specific devices */
+ (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
+ type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE ||
+ type_ptr[0] == ENCLOSURE_COMPONENT_SAS_EXPANDER ||
+ /* these elements are optional */
+ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_TARGET_PORT ||
+ type_ptr[0] == ENCLOSURE_COMPONENT_SCSI_INITIATOR_PORT ||
+ type_ptr[0] == ENCLOSURE_COMPONENT_CONTROLLER_ELECTRONICS))
addl_desc_ptr += addl_desc_ptr[1] + 2;
}
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 59a11437db70..39412c9097c6 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -167,7 +167,7 @@ static inline int is_double_byte_mode(struct fsl_dspi *dspi)
{
unsigned int val;
- regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val);
+ regmap_read(dspi->regmap, SPI_CTAR(0), &val);
return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
}
@@ -257,7 +257,7 @@ static u32 dspi_data_to_pushr(struct fsl_dspi *dspi, int tx_word)
return SPI_PUSHR_TXDATA(d16) |
SPI_PUSHR_PCS(dspi->cs) |
- SPI_PUSHR_CTAS(dspi->cs) |
+ SPI_PUSHR_CTAS(0) |
SPI_PUSHR_CONT;
}
@@ -290,7 +290,7 @@ static int dspi_eoq_write(struct fsl_dspi *dspi)
*/
if (tx_word && (dspi->len == 1)) {
dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
- regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ regmap_update_bits(dspi->regmap, SPI_CTAR(0),
SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
tx_word = 0;
}
@@ -339,7 +339,7 @@ static int dspi_tcfq_write(struct fsl_dspi *dspi)
if (tx_word && (dspi->len == 1)) {
dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
- regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ regmap_update_bits(dspi->regmap, SPI_CTAR(0),
SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
tx_word = 0;
}
@@ -407,7 +407,7 @@ static int dspi_transfer_one_message(struct spi_master *master,
regmap_update_bits(dspi->regmap, SPI_MCR,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
- regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
+ regmap_write(dspi->regmap, SPI_CTAR(0),
dspi->cur_chip->ctar_val);
trans_mode = dspi->devtype_data->trans_mode;
@@ -566,7 +566,7 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
if (!dspi->len) {
if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM) {
regmap_update_bits(dspi->regmap,
- SPI_CTAR(dspi->cs),
+ SPI_CTAR(0),
SPI_FRAME_BITS_MASK,
SPI_FRAME_BITS(16));
dspi->dataflags &= ~TRAN_STATE_WORD_ODD_NUM;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 2b0a8ec3affb..dee1cb87d24f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1705,7 +1705,7 @@ struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
master->bus_num = -1;
master->num_chipselect = 1;
master->dev.class = &spi_master_class;
- master->dev.parent = get_device(dev);
+ master->dev.parent = dev;
spi_master_set_devdata(master, &master[1]);
return master;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 91a0fcd72423..d0e7dfc647cf 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -651,11 +651,11 @@ static int spidev_release(struct inode *inode, struct file *filp)
kfree(spidev->rx_buffer);
spidev->rx_buffer = NULL;
+ spin_lock_irq(&spidev->spi_lock);
if (spidev->spi)
spidev->speed_hz = spidev->spi->max_speed_hz;
/* ... after we unbound from the underlying device? */
- spin_lock_irq(&spidev->spi_lock);
dofree = (spidev->spi == NULL);
spin_unlock_irq(&spidev->spi_lock);
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 195c41d7bd53..0813163f962f 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -81,7 +81,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
err:
sg = table->sgl;
for (i -= 1; i >= 0; i--) {
- gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg->length);
sg = sg_next(sg);
}
@@ -109,7 +109,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
DMA_BIDIRECTIONAL);
for_each_sg(table->sgl, sg, table->nents, i) {
- gen_pool_free(chunk_heap->pool, sg_phys(sg) & PAGE_MASK,
+ gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg->length);
}
chunk_heap->allocated -= allocated_size;
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index ed776149261e..e49c2bce551d 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2054,13 +2054,13 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
size_t eol;
size_t tail;
int ret, found = 0;
- bool eof_push = 0;
/* N.B. avoid overrun if nr == 0 */
- n = min(*nr, smp_load_acquire(&ldata->canon_head) - ldata->read_tail);
- if (!n)
+ if (!*nr)
return 0;
+ n = min(*nr + 1, smp_load_acquire(&ldata->canon_head) - ldata->read_tail);
+
tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1);
size = min_t(size_t, tail + n, N_TTY_BUF_SIZE);
@@ -2081,12 +2081,11 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
n = eol - tail;
if (n > N_TTY_BUF_SIZE)
n += N_TTY_BUF_SIZE;
- n += found;
- c = n;
+ c = n + found;
- if (found && !ldata->push && read_buf(ldata, eol) == __DISABLED_CHAR) {
- n--;
- eof_push = !n && ldata->read_tail != ldata->line_start;
+ if (!found || read_buf(ldata, eol) != __DISABLED_CHAR) {
+ c = min(*nr, c);
+ n = c;
}
n_tty_trace("%s: eol:%zu found:%d n:%zu c:%zu size:%zu more:%zu\n",
@@ -2116,7 +2115,7 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
ldata->push = 0;
tty_audit_push(tty);
}
- return eof_push ? -EAGAIN : 0;
+ return 0;
}
extern ssize_t redirected_tty_write(struct file *, const char __user *,
@@ -2273,10 +2272,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
if (ldata->icanon && !L_EXTPROC(tty)) {
retval = canon_copy_from_read_buf(tty, &b, &nr);
- if (retval == -EAGAIN) {
- retval = 0;
- continue;
- } else if (retval)
+ if (retval)
break;
} else {
int uncopied;
diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
index d11621e2cf1d..245edbb68d4b 100644
--- a/drivers/tty/serial/8250/8250_uniphier.c
+++ b/drivers/tty/serial/8250/8250_uniphier.c
@@ -115,12 +115,16 @@ static void uniphier_serial_out(struct uart_port *p, int offset, int value)
*/
static int uniphier_serial_dl_read(struct uart_8250_port *up)
{
- return readl(up->port.membase + UNIPHIER_UART_DLR);
+ int offset = UNIPHIER_UART_DLR << up->port.regshift;
+
+ return readl(up->port.membase + offset);
}
static void uniphier_serial_dl_write(struct uart_8250_port *up, int value)
{
- writel(value, up->port.membase + UNIPHIER_UART_DLR);
+ int offset = UNIPHIER_UART_DLR << up->port.regshift;
+
+ writel(value, up->port.membase + offset);
}
static int uniphier_of_serial_setup(struct device *dev, struct uart_port *port,
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
index f09636083426..b5b2f2be6be7 100644
--- a/drivers/tty/serial/earlycon.c
+++ b/drivers/tty/serial/earlycon.c
@@ -115,6 +115,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
if (buf && !parse_options(&early_console_dev, buf))
buf = NULL;
+ spin_lock_init(&port->lock);
port->uartclk = BASE_BAUD * 16;
if (port->mapbase)
port->membase = earlycon_map(port->mapbase, 64);
@@ -202,6 +203,7 @@ int __init of_setup_earlycon(unsigned long addr,
int err;
struct uart_port *port = &early_console_dev.port;
+ spin_lock_init(&port->lock);
port->iotype = UPIO_MEM;
port->mapbase = addr;
port->uartclk = BASE_BAUD * 16;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 960e50a97558..51c7507b0444 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1437,7 +1437,7 @@ static void sci_request_dma(struct uart_port *port)
sg_init_table(sg, 1);
s->rx_buf[i] = buf;
sg_dma_address(sg) = dma;
- sg->length = s->buf_len_rx;
+ sg_dma_len(sg) = s->buf_len_rx;
buf += s->buf_len_rx;
dma += s->buf_len_rx;
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index 064031870ba0..ca0d3802f2af 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -148,8 +148,10 @@ static int receive_chars_read(struct uart_port *port)
uart_handle_dcd_change(port, 1);
}
- for (i = 0; i < bytes_read; i++)
- uart_handle_sysrq_char(port, con_read_page[i]);
+ if (port->sysrq != 0 && *con_read_page) {
+ for (i = 0; i < bytes_read; i++)
+ uart_handle_sysrq_char(port, con_read_page[i]);
+ }
if (port->state == NULL)
continue;
@@ -168,17 +170,17 @@ struct sunhv_ops {
int (*receive_chars)(struct uart_port *port);
};
-static struct sunhv_ops bychar_ops = {
+static const struct sunhv_ops bychar_ops = {
.transmit_chars = transmit_chars_putchar,
.receive_chars = receive_chars_getchar,
};
-static struct sunhv_ops bywrite_ops = {
+static const struct sunhv_ops bywrite_ops = {
.transmit_chars = transmit_chars_write,
.receive_chars = receive_chars_read,
};
-static struct sunhv_ops *sunhv_ops = &bychar_ops;
+static const struct sunhv_ops *sunhv_ops = &bychar_ops;
static struct tty_port *receive_chars(struct uart_port *port)
{
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index 9a479e61791a..3cd31e0d4bd9 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -450,7 +450,7 @@ receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count)
count = disc->ops->receive_buf2(tty, p, f, count);
else {
count = min_t(int, count, tty->receive_room);
- if (count)
+ if (count && disc->ops->receive_buf)
disc->ops->receive_buf(tty, p, f, count);
}
return count;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index a5cc032ef77a..ddbf32d599cb 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1035,10 +1035,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
unsigned delay;
/* Continue a partial initialization */
- if (type == HUB_INIT2)
- goto init2;
- if (type == HUB_INIT3)
+ if (type == HUB_INIT2 || type == HUB_INIT3) {
+ device_lock(hub->intfdev);
+
+ /* Was the hub disconnected while we were waiting? */
+ if (hub->disconnected) {
+ device_unlock(hub->intfdev);
+ kref_put(&hub->kref, hub_release);
+ return;
+ }
+ if (type == HUB_INIT2)
+ goto init2;
goto init3;
+ }
+ kref_get(&hub->kref);
/* The superspeed hub except for root hub has to use Hub Depth
* value as an offset into the route string to locate the bits
@@ -1236,6 +1246,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
queue_delayed_work(system_power_efficient_wq,
&hub->init_work,
msecs_to_jiffies(delay));
+ device_unlock(hub->intfdev);
return; /* Continues at init3: below */
} else {
msleep(delay);
@@ -1257,6 +1268,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Allow autosuspend if it was suppressed */
if (type <= HUB_INIT3)
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
+
+ if (type == HUB_INIT2 || type == HUB_INIT3)
+ device_unlock(hub->intfdev);
+
+ kref_put(&hub->kref, hub_release);
}
/* Implement the continuations for the delays above */
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index f51a5d52c0ed..ec1b8f2c1183 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -531,7 +531,8 @@ static int ipaq_open(struct tty_struct *tty,
* through. Since this has a reasonably high failure rate, we retry
* several times.
*/
- while (retries--) {
+ while (retries) {
+ retries--;
result = usb_control_msg(serial->dev,
usb_sndctrlpipe(serial->dev, 0), 0x22, 0x21,
0x1, 0, NULL, 0, 100);
diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c
index b335c1ae8625..fe00a07c122e 100644
--- a/drivers/video/fbdev/fsl-diu-fb.c
+++ b/drivers/video/fbdev/fsl-diu-fb.c
@@ -479,7 +479,10 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
port = FSL_DIU_PORT_DLVDS;
}
- return diu_ops.valid_monitor_port(port);
+ if (diu_ops.valid_monitor_port)
+ port = diu_ops.valid_monitor_port(port);
+
+ return port;
}
/*
@@ -1915,6 +1918,14 @@ static int __init fsl_diu_init(void)
#else
monitor_port = fsl_diu_name_to_port(monitor_string);
#endif
+
+ /*
+ * Must to verify set_pixel_clock. If not implement on platform,
+ * then that means that there is no platform support for the DIU.
+ */
+ if (!diu_ops.set_pixel_clock)
+ return -ENODEV;
+
pr_info("Freescale Display Interface Unit (DIU) framebuffer driver\n");
#ifdef CONFIG_NOT_COHERENT_CACHE
diff --git a/drivers/video/fbdev/omap2/dss/venc.c b/drivers/video/fbdev/omap2/dss/venc.c
index 99ca268c1cdd..d05a54922ba6 100644
--- a/drivers/video/fbdev/omap2/dss/venc.c
+++ b/drivers/video/fbdev/omap2/dss/venc.c
@@ -275,6 +275,12 @@ const struct omap_video_timings omap_dss_pal_timings = {
.vbp = 41,
.interlace = true,
+
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
};
EXPORT_SYMBOL(omap_dss_pal_timings);
@@ -290,6 +296,12 @@ const struct omap_video_timings omap_dss_ntsc_timings = {
.vbp = 31,
.interlace = true,
+
+ .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
+ .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
+ .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
+ .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE,
};
EXPORT_SYMBOL(omap_dss_ntsc_timings);
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index e3e9e3d46d1b..96a1b8da5371 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -281,7 +281,8 @@ static void handle_irq_for_port(unsigned port)
static void consume_one_event(unsigned cpu,
struct evtchn_fifo_control_block *control_block,
- unsigned priority, unsigned long *ready)
+ unsigned priority, unsigned long *ready,
+ bool drop)
{
struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
uint32_t head;
@@ -313,13 +314,17 @@ static void consume_one_event(unsigned cpu,
if (head == 0)
clear_bit(priority, ready);
- if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port))
- handle_irq_for_port(port);
+ if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
+ if (unlikely(drop))
+ pr_warn("Dropping pending event for port %u\n", port);
+ else
+ handle_irq_for_port(port);
+ }
q->head[priority] = head;
}
-static void evtchn_fifo_handle_events(unsigned cpu)
+static void __evtchn_fifo_handle_events(unsigned cpu, bool drop)
{
struct evtchn_fifo_control_block *control_block;
unsigned long ready;
@@ -331,11 +336,16 @@ static void evtchn_fifo_handle_events(unsigned cpu)
while (ready) {
q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
- consume_one_event(cpu, control_block, q, &ready);
+ consume_one_event(cpu, control_block, q, &ready, drop);
ready |= xchg(&control_block->ready, 0);
}
}
+static void evtchn_fifo_handle_events(unsigned cpu)
+{
+ __evtchn_fifo_handle_events(cpu, false);
+}
+
static void evtchn_fifo_resume(void)
{
unsigned cpu;
@@ -420,6 +430,9 @@ static int evtchn_fifo_cpu_notification(struct notifier_block *self,
if (!per_cpu(cpu_control_block, cpu))
ret = evtchn_fifo_alloc_control_block(cpu);
break;
+ case CPU_DEAD:
+ __evtchn_fifo_handle_events(cpu, true);
+ break;
default:
break;
}
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index 58e38d586f52..4d529f3e40df 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -37,6 +37,7 @@ struct xen_pcibk_device {
struct xen_pci_sharedinfo *sh_info;
unsigned long flags;
struct work_struct op_work;
+ struct xen_pci_op op;
};
struct xen_pcibk_dev_data {
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index c4a0666de6f5..73dafdc494aa 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -70,6 +70,13 @@ static void xen_pcibk_control_isr(struct pci_dev *dev, int reset)
enable ? "enable" : "disable");
if (enable) {
+ /*
+ * The MSI or MSI-X should not have an IRQ handler. Otherwise
+ * if the guest terminates we BUG_ON in free_msi_irqs.
+ */
+ if (dev->msi_enabled || dev->msix_enabled)
+ goto out;
+
rc = request_irq(dev_data->irq,
xen_pcibk_guest_interrupt, IRQF_SHARED,
dev_data->irq_name, dev);
@@ -144,7 +151,12 @@ int xen_pcibk_enable_msi(struct xen_pcibk_device *pdev,
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: enable MSI\n", pci_name(dev));
- status = pci_enable_msi(dev);
+ if (dev->msi_enabled)
+ status = -EALREADY;
+ else if (dev->msix_enabled)
+ status = -ENXIO;
+ else
+ status = pci_enable_msi(dev);
if (status) {
pr_warn_ratelimited("%s: error enabling MSI for guest %u: err %d\n",
@@ -173,20 +185,23 @@ static
int xen_pcibk_disable_msi(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
- struct xen_pcibk_dev_data *dev_data;
-
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: disable MSI\n",
pci_name(dev));
- pci_disable_msi(dev);
+ if (dev->msi_enabled) {
+ struct xen_pcibk_dev_data *dev_data;
+
+ pci_disable_msi(dev);
+
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ dev_data->ack_intr = 1;
+ }
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: MSI: %d\n", pci_name(dev),
op->value);
- dev_data = pci_get_drvdata(dev);
- if (dev_data)
- dev_data->ack_intr = 1;
return 0;
}
@@ -197,13 +212,26 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
struct xen_pcibk_dev_data *dev_data;
int i, result;
struct msix_entry *entries;
+ u16 cmd;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: enable MSI-X\n",
pci_name(dev));
+
if (op->value > SH_INFO_MAX_VEC)
return -EINVAL;
+ if (dev->msix_enabled)
+ return -EALREADY;
+
+ /*
+ * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
+ * to access the BARs where the MSI-X entries reside.
+ */
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
+ return -ENXIO;
+
entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
if (entries == NULL)
return -ENOMEM;
@@ -245,23 +273,27 @@ static
int xen_pcibk_disable_msix(struct xen_pcibk_device *pdev,
struct pci_dev *dev, struct xen_pci_op *op)
{
- struct xen_pcibk_dev_data *dev_data;
if (unlikely(verbose_request))
printk(KERN_DEBUG DRV_NAME ": %s: disable MSI-X\n",
pci_name(dev));
- pci_disable_msix(dev);
+ if (dev->msix_enabled) {
+ struct xen_pcibk_dev_data *dev_data;
+
+ pci_disable_msix(dev);
+
+ dev_data = pci_get_drvdata(dev);
+ if (dev_data)
+ dev_data->ack_intr = 1;
+ }
/*
* SR-IOV devices (which don't have any legacy IRQ) have
* an undefined IRQ value of zero.
*/
op->value = dev->irq ? xen_pirq_from_irq(dev->irq) : 0;
if (unlikely(verbose_request))
- printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n", pci_name(dev),
- op->value);
- dev_data = pci_get_drvdata(dev);
- if (dev_data)
- dev_data->ack_intr = 1;
+ printk(KERN_DEBUG DRV_NAME ": %s: MSI-X: %d\n",
+ pci_name(dev), op->value);
return 0;
}
#endif
@@ -298,9 +330,11 @@ void xen_pcibk_do_op(struct work_struct *data)
container_of(data, struct xen_pcibk_device, op_work);
struct pci_dev *dev;
struct xen_pcibk_dev_data *dev_data = NULL;
- struct xen_pci_op *op = &pdev->sh_info->op;
+ struct xen_pci_op *op = &pdev->op;
int test_intx = 0;
+ *op = pdev->sh_info->op;
+ barrier();
dev = xen_pcibk_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
if (dev == NULL)
@@ -342,6 +376,17 @@ void xen_pcibk_do_op(struct work_struct *data)
if ((dev_data->enable_intx != test_intx))
xen_pcibk_control_isr(dev, 0 /* no reset */);
}
+ pdev->sh_info->op.err = op->err;
+ pdev->sh_info->op.value = op->value;
+#ifdef CONFIG_PCI_MSI
+ if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
+ unsigned int i;
+
+ for (i = 0; i < op->value; i++)
+ pdev->sh_info->op.msix_entries[i].vector =
+ op->msix_entries[i].vector;
+ }
+#endif
/* Tell the driver domain that we're done. */
wmb();
clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 98bc345f296e..4843741e703a 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -44,7 +44,6 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
pdev->xdev = xdev;
- dev_set_drvdata(&xdev->dev, pdev);
mutex_init(&pdev->dev_lock);
@@ -58,6 +57,9 @@ static struct xen_pcibk_device *alloc_pdev(struct xenbus_device *xdev)
kfree(pdev);
pdev = NULL;
}
+
+ dev_set_drvdata(&xdev->dev, pdev);
+
out:
return pdev;
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 43bcae852546..ad4eb1024d1f 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -726,7 +726,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info)
if (!pending_req)
return 1;
- ring_req = *RING_GET_REQUEST(ring, rc);
+ RING_COPY_REQUEST(ring, rc, &ring_req);
ring->req_cons = ++rc;
err = prepare_pending_reqs(info, &ring_req, pending_req);
OpenPOWER on IntegriCloud