summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-02-26 15:32:00 +0100
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2018-02-26 15:32:00 +0100
commit36e9f7203e05090031a5356be043a9be342e383c (patch)
tree27877737c3bb3c00ee9caeece7d671f65cf91252 /drivers
parent721dfe4133a9a41e4b4a74e5b41089b7dac8f539 (diff)
parent4a3928c6f8a53fa1aed28ccba227742486e8ddcb (diff)
downloadblackbird-op-linux-36e9f7203e05090031a5356be043a9be342e383c.tar.gz
blackbird-op-linux-36e9f7203e05090031a5356be043a9be342e383c.zip
Merge 4.16-rc3 into staging-next
We want the IIO/Staging fixes in here, and to resolve a merge problem with the move of the fsl-mc code. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/bus.c75
-rw-r--r--drivers/acpi/ec.c6
-rw-r--r--drivers/acpi/property.c4
-rw-r--r--drivers/acpi/spcr.c1
-rw-r--r--drivers/android/binder.c29
-rw-r--r--drivers/base/core.c3
-rw-r--r--drivers/base/power/wakeirq.c6
-rw-r--r--drivers/base/property.c5
-rw-r--r--drivers/char/hw_random/via-rng.c2
-rw-r--r--drivers/clocksource/mips-gic-timer.c4
-rw-r--r--drivers/clocksource/timer-sun5i.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c2
-rw-r--r--drivers/cpufreq/longhaul.c6
-rw-r--r--drivers/cpufreq/p4-clockmod.c2
-rw-r--r--drivers/cpufreq/powernow-k7.c2
-rw-r--r--drivers/cpufreq/speedstep-centrino.c4
-rw-r--r--drivers/cpufreq/speedstep-lib.c6
-rw-r--r--drivers/crypto/caam/ctrl.c8
-rw-r--r--drivers/crypto/padlock-aes.c2
-rw-r--r--drivers/crypto/s5p-sss.c12
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-prng.c6
-rw-r--r--drivers/crypto/talitos.c4
-rw-r--r--drivers/edac/amd64_edac.c2
-rw-r--r--drivers/extcon/extcon-axp288.c36
-rw-r--r--drivers/extcon/extcon-intel-int3496.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c40
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c15
-rw-r--r--drivers/gpu/drm/drm_edid.c21
-rw-r--r--drivers/gpu/drm/drm_mm.c21
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.h19
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/exynos/regs-fimc.h2
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c51
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/trace.h2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cflgt3.c4
-rw-r--r--drivers/gpu/drm/i915/i915_oa_cnl.c4
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c231
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h6
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c105
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c29
-rw-r--r--drivers/gpu/drm/i915/intel_cdclk.c8
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c24
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h14
-rw-r--r--drivers/gpu/drm/meson/meson_crtc.c6
-rw-r--r--drivers/gpu/drm/meson/meson_drv.h3
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c74
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c4
-rw-r--r--drivers/gpu/ipu-v3/ipu-cpmem.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c2
-rw-r--r--drivers/gpu/ipu-v3/ipu-pre.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-prg.c3
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-quirks.c3
-rw-r--r--drivers/hwmon/coretemp.c6
-rw-r--r--drivers/hwmon/hwmon-vid.c2
-rw-r--r--drivers/hwmon/k10temp.c7
-rw-r--r--drivers/hwmon/k8temp.c2
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c21
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-sirf.c4
-rw-r--r--drivers/iio/adc/aspeed_adc.c7
-rw-r--r--drivers/iio/adc/stm32-adc.c7
-rw-r--r--drivers/iio/imu/adis_trigger.c7
-rw-r--r--drivers/iio/industrialio-buffer.c2
-rw-r--r--drivers/iio/proximity/Kconfig2
-rw-r--r--drivers/infiniband/core/core_priv.h7
-rw-r--r--drivers/infiniband/core/rdma_core.c38
-rw-r--r--drivers/infiniband/core/restrack.c23
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c50
-rw-r--r--drivers/infiniband/core/uverbs_ioctl.c3
-rw-r--r--drivers/infiniband/core/uverbs_ioctl_merge.c18
-rw-r--r--drivers/infiniband/core/uverbs_main.c29
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c12
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c54
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c12
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c21
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.h4
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c14
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c2
-rw-r--r--drivers/iommu/intel-svm.c2
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c3
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c3
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c3
-rw-r--r--drivers/irqchip/irq-gic-v2m.c46
-rw-r--r--drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its-platform-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/macintosh/macio_asic.c1
-rw-r--r--drivers/md/dm.c3
-rw-r--r--drivers/md/raid1.c2
-rw-r--r--drivers/message/fusion/mptctl.c2
-rw-r--r--drivers/misc/mei/bus.c6
-rw-r--r--drivers/misc/mei/client.c6
-rw-r--r--drivers/misc/mei/hw-me-regs.h5
-rw-r--r--drivers/misc/mei/pci-me.c5
-rw-r--r--drivers/misc/ocxl/file.c8
-rw-r--r--drivers/mmc/host/bcm2835.c3
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c19
-rw-r--r--drivers/mtd/nand/Kconfig2
-rw-r--r--drivers/mtd/nand/vf610_nfc.c6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c14
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c35
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h5
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c110
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c10
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c23
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c59
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c35
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c68
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c6
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/smsc/Kconfig2
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/thunderbolt.c19
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/smsc75xx.c7
-rw-r--r--drivers/net/virtio_net.c58
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c2
-rw-r--r--drivers/nvme/host/core.c45
-rw-r--r--drivers/nvme/host/fabrics.h9
-rw-r--r--drivers/nvme/host/fc.c157
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/pci.c39
-rw-r--r--drivers/nvme/host/rdma.c16
-rw-r--r--drivers/nvme/target/io-cmd.c7
-rw-r--r--drivers/of/property.c4
-rw-r--r--drivers/opp/cpu.c2
-rw-r--r--drivers/pci/quirks.c39
-rw-r--r--drivers/perf/arm_pmu.c138
-rw-r--r--drivers/perf/arm_pmu_acpi.c61
-rw-r--r--drivers/perf/arm_pmu_platform.c37
-rw-r--r--drivers/platform/x86/dell-laptop.c20
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/s390/virtio/virtio_ccw.c29
-rw-r--r--drivers/scsi/Makefile1
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/aic7xxx/aiclib.c34
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c1
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c5
-rw-r--r--drivers/scsi/qedi/qedi_main.c55
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c46
-rw-r--r--drivers/scsi/storvsc_drv.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_hipd.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c2
-rw-r--r--drivers/soc/imx/gpc.c2
-rw-r--r--drivers/staging/android/ashmem.c19
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c17
-rw-r--r--drivers/staging/iio/adc/ad7192.c27
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c4
-rw-r--r--drivers/usb/Kconfig6
-rw-r--r--drivers/usb/class/cdc-acm.c9
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc2/gadget.c26
-rw-r--r--drivers/usb/dwc3/core.c86
-rw-r--r--drivers/usb/dwc3/core.h21
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c1
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c16
-rw-r--r--drivers/usb/dwc3/ep0.c7
-rw-r--r--drivers/usb/dwc3/gadget.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c44
-rw-r--r--drivers/usb/gadget/function/f_uac2.c2
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_pci.c1
-rw-r--r--drivers/usb/gadget/udc/core.c2
-rw-r--r--drivers/usb/gadget/udc/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/udc/renesas_usb3.c2
-rw-r--r--drivers/usb/host/Kconfig8
-rw-r--r--drivers/usb/host/ehci-hub.c4
-rw-r--r--drivers/usb/host/ehci-q.c12
-rw-r--r--drivers/usb/host/ohci-hcd.c10
-rw-r--r--drivers/usb/host/ohci-hub.c4
-rw-r--r--drivers/usb/host/ohci-q.c17
-rw-r--r--drivers/usb/host/pci-quirks.c109
-rw-r--r--drivers/usb/host/pci-quirks.h5
-rw-r--r--drivers/usb/host/xhci-debugfs.c4
-rw-r--r--drivers/usb/host/xhci-hub.c25
-rw-r--r--drivers/usb/host/xhci-pci.c11
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/misc/ldusb.c6
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/musb/musb_host.c8
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c3
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c5
-rw-r--r--drivers/usb/serial/option.c7
-rw-r--r--drivers/usb/usbip/stub_dev.c3
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/video/fbdev/geode/video_gx.c2
-rw-r--r--drivers/xen/pvcalls-front.c197
-rw-r--r--drivers/xen/tmem.c4
-rw-r--r--drivers/xen/xenbus/xenbus.h1
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c1
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c3
251 files changed, 2281 insertions, 1568 deletions
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 676c9788e1c8..0dad0bd9327b 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -660,13 +660,15 @@ struct acpi_device *acpi_companion_match(const struct device *dev)
* acpi_of_match_device - Match device object using the "compatible" property.
* @adev: ACPI device object to match.
* @of_match_table: List of device IDs to match against.
+ * @of_id: OF ID if matched
*
* If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
* identifiers and a _DSD object with the "compatible" property, use that
* property to match against the given list of identifiers.
*/
static bool acpi_of_match_device(struct acpi_device *adev,
- const struct of_device_id *of_match_table)
+ const struct of_device_id *of_match_table,
+ const struct of_device_id **of_id)
{
const union acpi_object *of_compatible, *obj;
int i, nval;
@@ -690,8 +692,11 @@ static bool acpi_of_match_device(struct acpi_device *adev,
const struct of_device_id *id;
for (id = of_match_table; id->compatible[0]; id++)
- if (!strcasecmp(obj->string.pointer, id->compatible))
+ if (!strcasecmp(obj->string.pointer, id->compatible)) {
+ if (of_id)
+ *of_id = id;
return true;
+ }
}
return false;
@@ -762,10 +767,11 @@ static bool __acpi_match_device_cls(const struct acpi_device_id *id,
return true;
}
-static const struct acpi_device_id *__acpi_match_device(
- struct acpi_device *device,
- const struct acpi_device_id *ids,
- const struct of_device_id *of_ids)
+static bool __acpi_match_device(struct acpi_device *device,
+ const struct acpi_device_id *acpi_ids,
+ const struct of_device_id *of_ids,
+ const struct acpi_device_id **acpi_id,
+ const struct of_device_id **of_id)
{
const struct acpi_device_id *id;
struct acpi_hardware_id *hwid;
@@ -775,30 +781,32 @@ static const struct acpi_device_id *__acpi_match_device(
* driver for it.
*/
if (!device || !device->status.present)
- return NULL;
+ return false;
list_for_each_entry(hwid, &device->pnp.ids, list) {
/* First, check the ACPI/PNP IDs provided by the caller. */
- for (id = ids; id->id[0] || id->cls; id++) {
- if (id->id[0] && !strcmp((char *) id->id, hwid->id))
- return id;
- else if (id->cls && __acpi_match_device_cls(id, hwid))
- return id;
+ if (acpi_ids) {
+ for (id = acpi_ids; id->id[0] || id->cls; id++) {
+ if (id->id[0] && !strcmp((char *)id->id, hwid->id))
+ goto out_acpi_match;
+ if (id->cls && __acpi_match_device_cls(id, hwid))
+ goto out_acpi_match;
+ }
}
/*
* Next, check ACPI_DT_NAMESPACE_HID and try to match the
* "compatible" property if found.
- *
- * The id returned by the below is not valid, but the only
- * caller passing non-NULL of_ids here is only interested in
- * whether or not the return value is NULL.
*/
- if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
- && acpi_of_match_device(device, of_ids))
- return id;
+ if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id))
+ return acpi_of_match_device(device, of_ids, of_id);
}
- return NULL;
+ return false;
+
+out_acpi_match:
+ if (acpi_id)
+ *acpi_id = id;
+ return true;
}
/**
@@ -815,32 +823,29 @@ static const struct acpi_device_id *__acpi_match_device(
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
const struct device *dev)
{
- return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
+ const struct acpi_device_id *id = NULL;
+
+ __acpi_match_device(acpi_companion_match(dev), ids, NULL, &id, NULL);
+ return id;
}
EXPORT_SYMBOL_GPL(acpi_match_device);
-void *acpi_get_match_data(const struct device *dev)
+const void *acpi_device_get_match_data(const struct device *dev)
{
const struct acpi_device_id *match;
- if (!dev->driver)
- return NULL;
-
- if (!dev->driver->acpi_match_table)
- return NULL;
-
match = acpi_match_device(dev->driver->acpi_match_table, dev);
if (!match)
return NULL;
- return (void *)match->driver_data;
+ return (const void *)match->driver_data;
}
-EXPORT_SYMBOL_GPL(acpi_get_match_data);
+EXPORT_SYMBOL_GPL(acpi_device_get_match_data);
int acpi_match_device_ids(struct acpi_device *device,
const struct acpi_device_id *ids)
{
- return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
+ return __acpi_match_device(device, ids, NULL, NULL, NULL) ? 0 : -ENOENT;
}
EXPORT_SYMBOL(acpi_match_device_ids);
@@ -849,10 +854,12 @@ bool acpi_driver_match_device(struct device *dev,
{
if (!drv->acpi_match_table)
return acpi_of_match_device(ACPI_COMPANION(dev),
- drv->of_match_table);
+ drv->of_match_table,
+ NULL);
- return !!__acpi_match_device(acpi_companion_match(dev),
- drv->acpi_match_table, drv->of_match_table);
+ return __acpi_match_device(acpi_companion_match(dev),
+ drv->acpi_match_table, drv->of_match_table,
+ NULL, NULL);
}
EXPORT_SYMBOL_GPL(acpi_driver_match_device);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d9f38c645e4a..30a572956557 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1927,6 +1927,9 @@ static int acpi_ec_suspend_noirq(struct device *dev)
ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
+ if (acpi_sleep_no_ec_events())
+ acpi_ec_enter_noirq(ec);
+
return 0;
}
@@ -1934,6 +1937,9 @@ static int acpi_ec_resume_noirq(struct device *dev)
{
struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
+ if (acpi_sleep_no_ec_events())
+ acpi_ec_leave_noirq(ec);
+
if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
ec->reference_count >= 1)
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
index 466d1503aba0..5815356ea6ad 100644
--- a/drivers/acpi/property.c
+++ b/drivers/acpi/property.c
@@ -1271,11 +1271,11 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return 0;
}
-static void *
+static const void *
acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
const struct device *dev)
{
- return acpi_get_match_data(dev);
+ return acpi_device_get_match_data(dev);
}
#define DECLARE_ACPI_FWNODE_OPS(ops) \
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 89e97d21a89c..9d52743080a4 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -115,6 +115,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
table->serial_port.access_width))) {
default:
pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
+ /* fall through */
case 8:
iotype = "mmio";
break;
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 15e3d3c2260d..764b63a5aade 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1991,8 +1991,14 @@ static void binder_send_failed_reply(struct binder_transaction *t,
&target_thread->reply_error.work);
wake_up_interruptible(&target_thread->wait);
} else {
- WARN(1, "Unexpected reply error: %u\n",
- target_thread->reply_error.cmd);
+ /*
+ * Cannot get here for normal operation, but
+ * we can if multiple synchronous transactions
+ * are sent without blocking for responses.
+ * Just ignore the 2nd error in this case.
+ */
+ pr_warn("Unexpected reply error: %u\n",
+ target_thread->reply_error.cmd);
}
binder_inner_proc_unlock(target_thread->proc);
binder_thread_dec_tmpref(target_thread);
@@ -2193,7 +2199,7 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
int debug_id = buffer->debug_id;
binder_debug(BINDER_DEBUG_TRANSACTION,
- "%d buffer release %d, size %zd-%zd, failed at %p\n",
+ "%d buffer release %d, size %zd-%zd, failed at %pK\n",
proc->pid, buffer->debug_id,
buffer->data_size, buffer->offsets_size, failed_at);
@@ -3705,7 +3711,7 @@ static int binder_thread_write(struct binder_proc *proc,
}
}
binder_debug(BINDER_DEBUG_DEAD_BINDER,
- "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+ "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
proc->pid, thread->pid, (u64)cookie,
death);
if (death == NULL) {
@@ -4376,6 +4382,15 @@ static int binder_thread_release(struct binder_proc *proc,
binder_inner_proc_unlock(thread->proc);
+ /*
+ * This is needed to avoid races between wake_up_poll() above and
+ * and ep_remove_waitqueue() called for other reasons (eg the epoll file
+ * descriptor being closed); ep_remove_waitqueue() holds an RCU read
+ * lock, so we can be sure it's done after calling synchronize_rcu().
+ */
+ if (thread->looper & BINDER_LOOPER_STATE_POLL)
+ synchronize_rcu();
+
if (send_reply)
binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
binder_release_work(proc, &thread->todo);
@@ -4391,6 +4406,8 @@ static __poll_t binder_poll(struct file *filp,
bool wait_for_proc_work;
thread = binder_get_thread(proc);
+ if (!thread)
+ return POLLERR;
binder_inner_proc_lock(thread->proc);
thread->looper |= BINDER_LOOPER_STATE_POLL;
@@ -5034,7 +5051,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
prefix, t->debug_id, t,
t->from ? t->from->proc->pid : 0,
t->from ? t->from->pid : 0,
@@ -5058,7 +5075,7 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
}
if (buffer->target_node)
seq_printf(m, " node %d", buffer->target_node->debug_id);
- seq_printf(m, " size %zd:%zd data %p\n",
+ seq_printf(m, " size %zd:%zd data %pK\n",
buffer->data_size, buffer->offsets_size,
buffer->data);
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index b2261f92f2f1..5847364f25d9 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -310,6 +310,9 @@ static void __device_link_del(struct device_link *link)
dev_info(link->consumer, "Dropping the link to %s\n",
dev_name(link->supplier));
+ if (link->flags & DL_FLAG_PM_RUNTIME)
+ pm_runtime_drop_link(link->consumer);
+
list_del(&link->s_node);
list_del(&link->c_node);
device_link_free(link);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index a8ac86e4d79e..6637fc319269 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -321,7 +321,8 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
return;
if (device_may_wakeup(wirq->dev)) {
- if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+ !pm_runtime_status_suspended(wirq->dev))
enable_irq(wirq->irq);
enable_irq_wake(wirq->irq);
@@ -343,7 +344,8 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
if (device_may_wakeup(wirq->dev)) {
disable_irq_wake(wirq->irq);
- if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
+ if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
+ !pm_runtime_status_suspended(wirq->dev))
disable_irq_nosync(wirq->irq);
}
}
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 302236281d83..8f205f6461ed 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -1410,9 +1410,8 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
-void *device_get_match_data(struct device *dev)
+const void *device_get_match_data(struct device *dev)
{
- return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data,
- dev);
+ return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
}
EXPORT_SYMBOL_GPL(device_get_match_data);
diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c
index d1f5bb534e0e..6e9df558325b 100644
--- a/drivers/char/hw_random/via-rng.c
+++ b/drivers/char/hw_random/via-rng.c
@@ -162,7 +162,7 @@ static int via_rng_init(struct hwrng *rng)
/* Enable secondary noise source on CPUs where it is present. */
/* Nehemiah stepping 8 and higher */
- if ((c->x86_model == 9) && (c->x86_mask > 7))
+ if ((c->x86_model == 9) && (c->x86_stepping > 7))
lo |= VIA_NOISESRC2;
/* Esther */
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index a04808a21d4e..65e18c86d9b9 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -205,12 +205,12 @@ static int __init gic_clocksource_of_init(struct device_node *node)
} else if (of_property_read_u32(node, "clock-frequency",
&gic_frequency)) {
pr_err("GIC frequency not specified.\n");
- return -EINVAL;;
+ return -EINVAL;
}
gic_timer_irq = irq_of_parse_and_map(node, 0);
if (!gic_timer_irq) {
pr_err("GIC timer IRQ not specified.\n");
- return -EINVAL;;
+ return -EINVAL;
}
ret = __gic_clocksource_init();
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c
index 2a3fe83ec337..3b56ea3f52af 100644
--- a/drivers/clocksource/timer-sun5i.c
+++ b/drivers/clocksource/timer-sun5i.c
@@ -334,7 +334,7 @@ static int __init sun5i_timer_init(struct device_node *node)
timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
if (IS_ERR(timer_base)) {
pr_err("Can't map registers\n");
- return PTR_ERR(timer_base);;
+ return PTR_ERR(timer_base);
}
irq = irq_of_parse_and_map(node, 0);
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 3a2ca0f79daf..d0c34df0529c 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -629,7 +629,7 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
if (c->x86_vendor == X86_VENDOR_INTEL) {
if ((c->x86 == 15) &&
(c->x86_model == 6) &&
- (c->x86_mask == 8)) {
+ (c->x86_stepping == 8)) {
pr_info("Intel(R) Xeon(R) 7100 Errata AL30, processors may lock up on frequency changes: disabling acpi-cpufreq\n");
return -ENODEV;
}
diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c
index 942632a27b50..f730b6528c18 100644
--- a/drivers/cpufreq/longhaul.c
+++ b/drivers/cpufreq/longhaul.c
@@ -775,7 +775,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
break;
case 7:
- switch (c->x86_mask) {
+ switch (c->x86_stepping) {
case 0:
longhaul_version = TYPE_LONGHAUL_V1;
cpu_model = CPU_SAMUEL2;
@@ -787,7 +787,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
break;
case 1 ... 15:
longhaul_version = TYPE_LONGHAUL_V2;
- if (c->x86_mask < 8) {
+ if (c->x86_stepping < 8) {
cpu_model = CPU_SAMUEL2;
cpuname = "C3 'Samuel 2' [C5B]";
} else {
@@ -814,7 +814,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy)
numscales = 32;
memcpy(mults, nehemiah_mults, sizeof(nehemiah_mults));
memcpy(eblcr, nehemiah_eblcr, sizeof(nehemiah_eblcr));
- switch (c->x86_mask) {
+ switch (c->x86_stepping) {
case 0 ... 1:
cpu_model = CPU_NEHEMIAH;
cpuname = "C3 'Nehemiah A' [C5XLOE]";
diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
index fd77812313f3..a25741b1281b 100644
--- a/drivers/cpufreq/p4-clockmod.c
+++ b/drivers/cpufreq/p4-clockmod.c
@@ -168,7 +168,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
#endif
/* Errata workaround */
- cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
+ cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_stepping;
switch (cpuid) {
case 0x0f07:
case 0x0f0a:
diff --git a/drivers/cpufreq/powernow-k7.c b/drivers/cpufreq/powernow-k7.c
index 80ac313e6c59..302e9ce793a0 100644
--- a/drivers/cpufreq/powernow-k7.c
+++ b/drivers/cpufreq/powernow-k7.c
@@ -131,7 +131,7 @@ static int check_powernow(void)
return 0;
}
- if ((c->x86_model == 6) && (c->x86_mask == 0)) {
+ if ((c->x86_model == 6) && (c->x86_stepping == 0)) {
pr_info("K7 660[A0] core detected, enabling errata workarounds\n");
have_a0 = 1;
}
diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
index 41bc5397f4bb..4fa5adf16c70 100644
--- a/drivers/cpufreq/speedstep-centrino.c
+++ b/drivers/cpufreq/speedstep-centrino.c
@@ -37,7 +37,7 @@ struct cpu_id
{
__u8 x86; /* CPU family */
__u8 x86_model; /* model */
- __u8 x86_mask; /* stepping */
+ __u8 x86_stepping; /* stepping */
};
enum {
@@ -277,7 +277,7 @@ static int centrino_verify_cpu_id(const struct cpuinfo_x86 *c,
{
if ((c->x86 == x->x86) &&
(c->x86_model == x->x86_model) &&
- (c->x86_mask == x->x86_mask))
+ (c->x86_stepping == x->x86_stepping))
return 1;
return 0;
}
diff --git a/drivers/cpufreq/speedstep-lib.c b/drivers/cpufreq/speedstep-lib.c
index 8085ec9000d1..e3a9962ee410 100644
--- a/drivers/cpufreq/speedstep-lib.c
+++ b/drivers/cpufreq/speedstep-lib.c
@@ -272,9 +272,9 @@ unsigned int speedstep_detect_processor(void)
ebx = cpuid_ebx(0x00000001);
ebx &= 0x000000FF;
- pr_debug("ebx value is %x, x86_mask is %x\n", ebx, c->x86_mask);
+ pr_debug("ebx value is %x, x86_stepping is %x\n", ebx, c->x86_stepping);
- switch (c->x86_mask) {
+ switch (c->x86_stepping) {
case 4:
/*
* B-stepping [M-P4-M]
@@ -361,7 +361,7 @@ unsigned int speedstep_detect_processor(void)
msr_lo, msr_hi);
if ((msr_hi & (1<<18)) &&
(relaxed_check ? 1 : (msr_hi & (3<<24)))) {
- if (c->x86_mask == 0x01) {
+ if (c->x86_stepping == 0x01) {
pr_debug("early PIII version\n");
return SPEEDSTEP_CPU_PIII_C_EARLY;
} else
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 75d280cb2dc0..e843cf410373 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -228,12 +228,16 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
* without any error (HW optimizations for later
* CAAM eras), then try again.
*/
+ if (ret)
+ break;
+
rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
- !(rdsta_val & (1 << sh_idx)))
+ !(rdsta_val & (1 << sh_idx))) {
ret = -EAGAIN;
- if (ret)
break;
+ }
+
dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
/* Clear the contents before recreating the descriptor */
memset(desc, 0x00, CAAM_CMD_SZ * 7);
diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
index 4b6642a25df5..1c6cbda56afe 100644
--- a/drivers/crypto/padlock-aes.c
+++ b/drivers/crypto/padlock-aes.c
@@ -512,7 +512,7 @@ static int __init padlock_init(void)
printk(KERN_NOTICE PFX "Using VIA PadLock ACE for AES algorithm.\n");
- if (c->x86 == 6 && c->x86_model == 15 && c->x86_mask == 2) {
+ if (c->x86 == 6 && c->x86_model == 15 && c->x86_stepping == 2) {
ecb_fetch_blocks = MAX_ECB_FETCH_BLOCKS;
cbc_fetch_blocks = MAX_CBC_FETCH_BLOCKS;
printk(KERN_NOTICE PFX "VIA Nano stepping 2 detected: enabling workaround.\n");
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 188f44b7eb27..5d64c08b7f47 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -1922,15 +1922,21 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
uint32_t aes_control;
unsigned long flags;
int err;
+ u8 *iv;
aes_control = SSS_AES_KEY_CHANGE_MODE;
if (mode & FLAGS_AES_DECRYPT)
aes_control |= SSS_AES_MODE_DECRYPT;
- if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC)
+ if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
aes_control |= SSS_AES_CHAIN_MODE_CBC;
- else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR)
+ iv = req->info;
+ } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
aes_control |= SSS_AES_CHAIN_MODE_CTR;
+ iv = req->info;
+ } else {
+ iv = NULL; /* AES_ECB */
+ }
if (dev->ctx->keylen == AES_KEYSIZE_192)
aes_control |= SSS_AES_KEY_SIZE_192;
@@ -1961,7 +1967,7 @@ static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
goto outdata_error;
SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
- s5p_set_aes(dev, dev->ctx->aes_key, req->info, dev->ctx->keylen);
+ s5p_set_aes(dev, dev->ctx->aes_key, iv, dev->ctx->keylen);
s5p_set_dma_indata(dev, dev->sg_src);
s5p_set_dma_outdata(dev, dev->sg_dst);
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
index 0d01d1624252..63d636424161 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-prng.c
@@ -28,7 +28,7 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
algt = container_of(alg, struct sun4i_ss_alg_template, alg.rng);
ss = algt->ss;
- spin_lock(&ss->slock);
+ spin_lock_bh(&ss->slock);
writel(mode, ss->base + SS_CTL);
@@ -51,6 +51,6 @@ int sun4i_ss_prng_generate(struct crypto_rng *tfm, const u8 *src,
}
writel(0, ss->base + SS_CTL);
- spin_unlock(&ss->slock);
- return dlen;
+ spin_unlock_bh(&ss->slock);
+ return 0;
}
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 9c80e0cb1664..6882fa2f8bad 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1138,6 +1138,10 @@ static int talitos_sg_map(struct device *dev, struct scatterlist *src,
struct talitos_private *priv = dev_get_drvdata(dev);
bool is_sec1 = has_ftr_sec1(priv);
+ if (!src) {
+ to_talitos_ptr(ptr, 0, 0, is_sec1);
+ return 1;
+ }
if (sg_count == 1) {
to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
return sg_count;
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 8b16ec595fa7..329cb96f886f 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -3147,7 +3147,7 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
struct amd64_family_type *fam_type = NULL;
pvt->ext_model = boot_cpu_data.x86_model >> 4;
- pvt->stepping = boot_cpu_data.x86_mask;
+ pvt->stepping = boot_cpu_data.x86_stepping;
pvt->model = boot_cpu_data.x86_model;
pvt->fam = boot_cpu_data.x86;
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index 0a44d43802fe..3ec4c715e240 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -1,7 +1,6 @@
/*
* extcon-axp288.c - X-Power AXP288 PMIC extcon cable detection driver
*
- * Copyright (C) 2016-2017 Hans de Goede <hdegoede@redhat.com>
* Copyright (C) 2015 Intel Corporation
* Author: Ramakrishna Pallala <ramakrishna.pallala@intel.com>
*
@@ -98,15 +97,13 @@ struct axp288_extcon_info {
struct device *dev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irqc;
- struct delayed_work det_work;
int irq[EXTCON_IRQ_END];
struct extcon_dev *edev;
unsigned int previous_cable;
- bool first_detect_done;
};
/* Power up/down reason string array */
-static char *axp288_pwr_up_down_info[] = {
+static const char * const axp288_pwr_up_down_info[] = {
"Last wake caused by user pressing the power button",
"Last wake caused by a charger insertion",
"Last wake caused by a battery insertion",
@@ -124,7 +121,7 @@ static char *axp288_pwr_up_down_info[] = {
*/
static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
{
- char **rsi;
+ const char * const *rsi;
unsigned int val, i, clear_mask = 0;
int ret;
@@ -140,25 +137,6 @@ static void axp288_extcon_log_rsi(struct axp288_extcon_info *info)
regmap_write(info->regmap, AXP288_PS_BOOT_REASON_REG, clear_mask);
}
-static void axp288_chrg_detect_complete(struct axp288_extcon_info *info)
-{
- /*
- * We depend on other drivers to do things like mux the data lines,
- * enable/disable vbus based on the id-pin, etc. Sometimes the BIOS has
- * not set these things up correctly resulting in the initial charger
- * cable type detection giving a wrong result and we end up not charging
- * or charging at only 0.5A.
- *
- * So we schedule a second cable type detection after 2 seconds to
- * give the other drivers time to load and do their thing.
- */
- if (!info->first_detect_done) {
- queue_delayed_work(system_wq, &info->det_work,
- msecs_to_jiffies(2000));
- info->first_detect_done = true;
- }
-}
-
static int axp288_handle_chrg_det_event(struct axp288_extcon_info *info)
{
int ret, stat, cfg, pwr_stat;
@@ -223,8 +201,6 @@ no_vbus:
info->previous_cable = cable;
}
- axp288_chrg_detect_complete(info);
-
return 0;
dev_det_ret:
@@ -246,11 +222,8 @@ static irqreturn_t axp288_extcon_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static void axp288_extcon_det_work(struct work_struct *work)
+static void axp288_extcon_enable(struct axp288_extcon_info *info)
{
- struct axp288_extcon_info *info =
- container_of(work, struct axp288_extcon_info, det_work.work);
-
regmap_update_bits(info->regmap, AXP288_BC_GLOBAL_REG,
BC_GLOBAL_RUN, 0);
/* Enable the charger detection logic */
@@ -272,7 +245,6 @@ static int axp288_extcon_probe(struct platform_device *pdev)
info->regmap = axp20x->regmap;
info->regmap_irqc = axp20x->regmap_irqc;
info->previous_cable = EXTCON_NONE;
- INIT_DELAYED_WORK(&info->det_work, axp288_extcon_det_work);
platform_set_drvdata(pdev, info);
@@ -318,7 +290,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
}
/* Start charger cable type detection */
- queue_delayed_work(system_wq, &info->det_work, 0);
+ axp288_extcon_enable(info);
return 0;
}
diff --git a/drivers/extcon/extcon-intel-int3496.c b/drivers/extcon/extcon-intel-int3496.c
index c8691b5a9cb0..191e99f06a9a 100644
--- a/drivers/extcon/extcon-intel-int3496.c
+++ b/drivers/extcon/extcon-intel-int3496.c
@@ -153,8 +153,9 @@ static int int3496_probe(struct platform_device *pdev)
return ret;
}
- /* queue initial processing of id-pin */
+ /* process id-pin so that we start with the right status */
queue_delayed_work(system_wq, &data->work, 0);
+ flush_delayed_work(&data->work);
platform_set_drvdata(pdev, data);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index e2c3c5ec42d1..c53095b3b0fb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
/* HG _PR3 doesn't seem to work on this A+A weston board */
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index 8ca3783f2deb..74d2efaec52f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -736,9 +736,11 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (encoder) {
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
@@ -757,8 +759,12 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
amdgpu_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -868,9 +874,11 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = amdgpu_connector_best_single_encoder(connector);
if (!encoder)
@@ -924,8 +932,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -988,9 +998,11 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1115,8 +1127,10 @@ out:
amdgpu_connector_update_scratch_regs(connector, ret);
exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1359,9 +1373,11 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
struct drm_encoder *encoder = amdgpu_connector_best_single_encoder(connector);
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1429,8 +1445,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
amdgpu_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 61e8c3e02d16..33d91e4474ea 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -718,7 +718,7 @@ static enum link_training_result perform_channel_equalization_sequence(
uint32_t retries_ch_eq;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = {{0}};
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {{{0}}};
hw_tr_pattern = get_supported_tp(link);
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 4c3223a4d62b..adb6e7b9280c 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -162,7 +162,7 @@ static int pp_hw_init(void *handle)
if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
pr_err("smc start failed\n");
hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
- return -EINVAL;;
+ return -EINVAL;
}
if (ret == PP_DPM_DISABLED)
goto exit;
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index cd23b1b28259..c91b9b054e3f 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -294,22 +294,7 @@ static void cirrus_crtc_prepare(struct drm_crtc *crtc)
{
}
-/*
- * This is called after a mode is programmed. It should reverse anything done
- * by the prepare function
- */
-static void cirrus_crtc_commit(struct drm_crtc *crtc)
-{
-}
-
-/*
- * The core can pass us a set of gamma values to program. We actually only
- * use this for 8-bit mode so can't perform smooth fades on deeper modes,
- * but it's a requirement that we provide the function
- */
-static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size,
- struct drm_modeset_acquire_ctx *ctx)
+static void cirrus_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct cirrus_device *cdev = dev->dev_private;
@@ -317,7 +302,7 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
int i;
if (!crtc->enabled)
- return 0;
+ return;
r = crtc->gamma_store;
g = r + crtc->gamma_size;
@@ -330,6 +315,27 @@ static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
WREG8(PALETTE_DATA, *g++ >> 8);
WREG8(PALETTE_DATA, *b++ >> 8);
}
+}
+
+/*
+ * This is called after a mode is programmed. It should reverse anything done
+ * by the prepare function
+ */
+static void cirrus_crtc_commit(struct drm_crtc *crtc)
+{
+ cirrus_crtc_load_lut(crtc);
+}
+
+/*
+ * The core can pass us a set of gamma values to program. We actually only
+ * use this for 8-bit mode so can't perform smooth fades on deeper modes,
+ * but it's a requirement that we provide the function
+ */
+static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ cirrus_crtc_load_lut(crtc);
return 0;
}
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ab4032167094..ae3cbfe9e01c 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1878,6 +1878,8 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
new_crtc_state->event->base.completion = &commit->flip_done;
new_crtc_state->event->base.completion_release = release_crtc_commit;
drm_crtc_commit_get(commit);
+
+ commit->abort_completion = true;
}
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
@@ -3421,8 +3423,21 @@ EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state)
{
if (state->commit) {
+ /*
+ * In the event that a non-blocking commit returns
+ * -ERESTARTSYS before the commit_tail work is queued, we will
+ * have an extra reference to the commit object. Release it, if
+ * the event has not been consumed by the worker.
+ *
+ * state->event may be freed, so we can't directly look at
+ * state->event->base.completion.
+ */
+ if (state->event && state->commit->abort_completion)
+ drm_crtc_commit_put(state->commit);
+
kfree(state->commit->event);
state->commit->event = NULL;
+
drm_crtc_commit_put(state->commit);
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ddd537914575..4f751a9d71a3 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -113,6 +113,9 @@ static const struct edid_quirk {
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
{ "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+ /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
+ { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
@@ -162,6 +165,24 @@ static const struct edid_quirk {
/* HTC Vive VR Headset */
{ "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
+
+ /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
+ { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
+ { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
+ { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
+
+ /* Windows Mixed Reality Headsets */
+ { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
+ { "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP },
+ { "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP },
+ { "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP },
+ { "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP },
+ { "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP },
+ { "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP },
+ { "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP },
+
+ /* Sony PlayStation VR Headset */
+ { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
};
/*
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 186c4e90cc1c..89eef1bb4ddc 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -836,9 +836,24 @@ struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan)
if (!mm->color_adjust)
return NULL;
- hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack);
- hole_start = __drm_mm_hole_node_start(hole);
- hole_end = hole_start + hole->hole_size;
+ /*
+ * The hole found during scanning should ideally be the first element
+ * in the hole_stack list, but due to side-effects in the driver it
+ * may not be.
+ */
+ list_for_each_entry(hole, &mm->hole_stack, hole_stack) {
+ hole_start = __drm_mm_hole_node_start(hole);
+ hole_end = hole_start + hole->hole_size;
+
+ if (hole_start <= scan->hit_start &&
+ hole_end >= scan->hit_end)
+ break;
+ }
+
+ /* We should only be called after we found the hole previously */
+ DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack);
+ if (unlikely(&hole->hole_stack == &mm->hole_stack))
+ return NULL;
DRM_MM_BUG_ON(hole_start > scan->hit_start);
DRM_MM_BUG_ON(hole_end < scan->hit_end);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 555fbe54d6e2..00b8445ba819 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -654,6 +654,26 @@ out:
}
/**
+ * drm_kms_helper_is_poll_worker - is %current task an output poll worker?
+ *
+ * Determine if %current task is an output poll worker. This can be used
+ * to select distinct code paths for output polling versus other contexts.
+ *
+ * One use case is to avoid a deadlock between the output poll worker and
+ * the autosuspend worker wherein the latter waits for polling to finish
+ * upon calling drm_kms_helper_poll_disable(), while the former waits for
+ * runtime suspend to finish upon calling pm_runtime_get_sync() in a
+ * connector ->detect hook.
+ */
+bool drm_kms_helper_is_poll_worker(void)
+{
+ struct work_struct *work = current_work();
+
+ return work && work->func == output_poll_execute;
+}
+EXPORT_SYMBOL(drm_kms_helper_is_poll_worker);
+
+/**
* drm_kms_helper_poll_disable - disable output polling
* @dev: drm_device
*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 2b8bf2dd6387..f68ef1b3a28c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -286,7 +286,6 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
if (!node) {
- dev_err(dev, "failed to allocate memory\n");
ret = -ENOMEM;
goto err;
}
@@ -926,7 +925,7 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
struct drm_device *drm_dev = g2d->subdrv.drm_dev;
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e;
- struct timeval now;
+ struct timespec64 now;
if (list_empty(&runqueue_node->event_list))
return;
@@ -934,9 +933,9 @@ static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
e = list_first_entry(&runqueue_node->event_list,
struct drm_exynos_pending_g2d_event, base.link);
- do_gettimeofday(&now);
+ ktime_get_ts64(&now);
e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
e->event.cmdlist_no = cmdlist_no;
drm_send_event(drm_dev, &e->base);
@@ -1358,10 +1357,9 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
return -EFAULT;
runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
- if (!runqueue_node) {
- dev_err(dev, "failed to allocate memory\n");
+ if (!runqueue_node)
return -ENOMEM;
- }
+
run_cmdlist = &runqueue_node->run_cmdlist;
event_list = &runqueue_node->event_list;
INIT_LIST_HEAD(run_cmdlist);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.h b/drivers/gpu/drm/exynos/exynos_drm_rotator.h
deleted file mode 100644
index 71a0b4c0c1e8..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/*
- * Copyright (c) 2012 Samsung Electronics Co., Ltd.
- *
- * Authors:
- * YoungJun Cho <yj44.cho@samsung.com>
- * Eunchul Kim <chulspro.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_ROTATOR_H_
-#define _EXYNOS_DRM_ROTATOR_H_
-
-/* TODO */
-
-#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a4b75a46f946..abd84cbcf1c2 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1068,10 +1068,13 @@ static void hdmi_audio_config(struct hdmi_context *hdata)
/* Configuration I2S input ports. Configure I2S_PIN_SEL_0~4 */
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_0, HDMI_I2S_SEL_SCLK(5)
| HDMI_I2S_SEL_LRCK(6));
- hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(1)
- | HDMI_I2S_SEL_SDATA2(4));
+
+ hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_1, HDMI_I2S_SEL_SDATA1(3)
+ | HDMI_I2S_SEL_SDATA0(4));
+
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_2, HDMI_I2S_SEL_SDATA3(1)
| HDMI_I2S_SEL_SDATA2(2));
+
hdmi_reg_writeb(hdata, HDMI_I2S_PIN_SEL_3, HDMI_I2S_SEL_DSD(0));
/* I2S_CON_1 & 2 */
diff --git a/drivers/gpu/drm/exynos/regs-fimc.h b/drivers/gpu/drm/exynos/regs-fimc.h
index 30496134a3d0..d7cbe53c4c01 100644
--- a/drivers/gpu/drm/exynos/regs-fimc.h
+++ b/drivers/gpu/drm/exynos/regs-fimc.h
@@ -569,7 +569,7 @@
#define EXYNOS_CIIMGEFF_FIN_EMBOSSING (4 << 26)
#define EXYNOS_CIIMGEFF_FIN_SILHOUETTE (5 << 26)
#define EXYNOS_CIIMGEFF_FIN_MASK (7 << 26)
-#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff < 13) | (0xff < 0))
+#define EXYNOS_CIIMGEFF_PAT_CBCR_MASK ((0xff << 13) | (0xff << 0))
/* Real input DMA size register */
#define EXYNOS_CIREAL_ISIZE_AUTOLOAD_ENABLE (1 << 31)
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
index 04be0f7e8193..4420c203ac85 100644
--- a/drivers/gpu/drm/exynos/regs-hdmi.h
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -464,7 +464,7 @@
/* I2S_PIN_SEL_1 */
#define HDMI_I2S_SEL_SDATA1(x) (((x) & 0x7) << 4)
-#define HDMI_I2S_SEL_SDATA2(x) ((x) & 0x7)
+#define HDMI_I2S_SEL_SDATA0(x) ((x) & 0x7)
/* I2S_PIN_SEL_2 */
#define HDMI_I2S_SEL_SDATA3(x) (((x) & 0x7) << 4)
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 909499b73d03..021f722e2481 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -733,6 +733,25 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
return ret == 0 ? count : ret;
}
+static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
+{
+ struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
+ struct intel_gvt *gvt = vgpu->gvt;
+ int offset;
+
+ /* Only allow MMIO GGTT entry access */
+ if (index != PCI_BASE_ADDRESS_0)
+ return false;
+
+ offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
+ intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
+
+ return (offset >= gvt->device_info.gtt_start_offset &&
+ offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
+ true : false;
+}
+
static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
size_t count, loff_t *ppos)
{
@@ -742,7 +761,21 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
while (count) {
size_t filled;
- if (count >= 4 && !(*ppos % 4)) {
+ /* Only support GGTT entry 8 bytes read */
+ if (count >= 8 && !(*ppos % 8) &&
+ gtt_entry(mdev, ppos)) {
+ u64 val;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, false);
+ if (ret <= 0)
+ goto read_err;
+
+ if (copy_to_user(buf, &val, sizeof(val)))
+ goto read_err;
+
+ filled = 8;
+ } else if (count >= 4 && !(*ppos % 4)) {
u32 val;
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
@@ -802,7 +835,21 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
while (count) {
size_t filled;
- if (count >= 4 && !(*ppos % 4)) {
+ /* Only support GGTT entry 8 bytes write */
+ if (count >= 8 && !(*ppos % 8) &&
+ gtt_entry(mdev, ppos)) {
+ u64 val;
+
+ if (copy_from_user(&val, buf, sizeof(val)))
+ goto write_err;
+
+ ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
+ ppos, true);
+ if (ret <= 0)
+ goto write_err;
+
+ filled = 8;
+ } else if (count >= 4 && !(*ppos % 4)) {
u32 val;
if (copy_from_user(&val, buf, sizeof(val)))
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 73ad6e90e49d..256f1bb522b7 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -118,6 +118,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
{RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
{RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
+ {RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
{RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
{RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
{RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
diff --git a/drivers/gpu/drm/i915/gvt/trace.h b/drivers/gpu/drm/i915/gvt/trace.h
index 7a2511538f34..736bd2bc5127 100644
--- a/drivers/gpu/drm/i915/gvt/trace.h
+++ b/drivers/gpu/drm/i915/gvt/trace.h
@@ -333,7 +333,7 @@ TRACE_EVENT(render_mmio,
TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,
unsigned int old_val, unsigned int new_val),
- TP_ARGS(old_id, new_id, action, reg, new_val, old_val),
+ TP_ARGS(old_id, new_id, action, reg, old_val, new_val),
TP_STRUCT__entry(
__field(int, old_id)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 173d0095e3b2..2f5209de0391 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1433,19 +1433,7 @@ void i915_driver_unload(struct drm_device *dev)
intel_modeset_cleanup(dev);
- /*
- * free the memory space allocated for the child device
- * config parsed from VBT
- */
- if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
- kfree(dev_priv->vbt.child_dev);
- dev_priv->vbt.child_dev = NULL;
- dev_priv->vbt.child_dev_num = 0;
- }
- kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
- dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
- kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
- dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+ intel_bios_cleanup(dev_priv);
vga_switcheroo_unregister_client(pdev);
vga_client_register(pdev, NULL, NULL, NULL);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a42deebedb0f..d307429a5ae0 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1349,6 +1349,7 @@ struct intel_vbt_data {
u32 size;
u8 *data;
const u8 *sequence[MIPI_SEQ_MAX];
+ u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
} dsi;
int crt_ddc_pin;
@@ -3657,6 +3658,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
/* intel_bios.c */
void intel_bios_init(struct drm_i915_private *dev_priv);
+void intel_bios_cleanup(struct drm_i915_private *dev_priv);
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 648e7536ff51..0c963fcf31ff 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -803,7 +803,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
case I915_CONTEXT_PARAM_PRIORITY:
{
- int priority = args->value;
+ s64 priority = args->value;
if (args->size)
ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_oa_cflgt3.c b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
index 42ff06fe54a3..792facdb6702 100644
--- a/drivers/gpu/drm/i915/i915_oa_cflgt3.c
+++ b/drivers/gpu/drm/i915/i915_oa_cflgt3.c
@@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"577e8e2c-3fa0-4875-8743-3538d585e3b0",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_oa_cnl.c b/drivers/gpu/drm/i915/i915_oa_cnl.c
index ff0ac3627cc4..ba9140c87cc0 100644
--- a/drivers/gpu/drm/i915/i915_oa_cnl.c
+++ b/drivers/gpu/drm/i915/i915_oa_cnl.c
@@ -96,9 +96,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
void
i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
{
- strncpy(dev_priv->perf.oa.test_config.uuid,
+ strlcpy(dev_priv->perf.oa.test_config.uuid,
"db41edd4-d8e7-4730-ad11-b9a2d6833503",
- UUID_STRING_LEN);
+ sizeof(dev_priv->perf.oa.test_config.uuid));
dev_priv->perf.oa.test_config.id = 1;
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 55a8a1e29424..0e9b98c32b62 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -285,26 +285,41 @@ static u64 count_interrupts(struct drm_i915_private *i915)
return sum;
}
-static void i915_pmu_event_destroy(struct perf_event *event)
+static void engine_event_destroy(struct perf_event *event)
{
- WARN_ON(event->parent);
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ struct intel_engine_cs *engine;
+
+ engine = intel_engine_lookup_user(i915,
+ engine_event_class(event),
+ engine_event_instance(event));
+ if (WARN_ON_ONCE(!engine))
+ return;
+
+ if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
+ intel_engine_supports_stats(engine))
+ intel_disable_engine_stats(engine);
}
-static int engine_event_init(struct perf_event *event)
+static void i915_pmu_event_destroy(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ WARN_ON(event->parent);
- if (!intel_engine_lookup_user(i915, engine_event_class(event),
- engine_event_instance(event)))
- return -ENODEV;
+ if (is_engine_event(event))
+ engine_event_destroy(event);
+}
- switch (engine_event_sample(event)) {
+static int
+engine_event_status(struct intel_engine_cs *engine,
+ enum drm_i915_pmu_engine_sample sample)
+{
+ switch (sample) {
case I915_SAMPLE_BUSY:
case I915_SAMPLE_WAIT:
break;
case I915_SAMPLE_SEMA:
- if (INTEL_GEN(i915) < 6)
+ if (INTEL_GEN(engine->i915) < 6)
return -ENODEV;
break;
default:
@@ -314,6 +329,30 @@ static int engine_event_init(struct perf_event *event)
return 0;
}
+static int engine_event_init(struct perf_event *event)
+{
+ struct drm_i915_private *i915 =
+ container_of(event->pmu, typeof(*i915), pmu.base);
+ struct intel_engine_cs *engine;
+ u8 sample;
+ int ret;
+
+ engine = intel_engine_lookup_user(i915, engine_event_class(event),
+ engine_event_instance(event));
+ if (!engine)
+ return -ENODEV;
+
+ sample = engine_event_sample(event);
+ ret = engine_event_status(engine, sample);
+ if (ret)
+ return ret;
+
+ if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
+ ret = intel_enable_engine_stats(engine);
+
+ return ret;
+}
+
static int i915_pmu_event_init(struct perf_event *event)
{
struct drm_i915_private *i915 =
@@ -370,7 +409,94 @@ static int i915_pmu_event_init(struct perf_event *event)
return 0;
}
-static u64 __i915_pmu_event_read(struct perf_event *event)
+static u64 __get_rc6(struct drm_i915_private *i915)
+{
+ u64 val;
+
+ val = intel_rc6_residency_ns(i915,
+ IS_VALLEYVIEW(i915) ?
+ VLV_GT_RENDER_RC6 :
+ GEN6_GT_GFX_RC6);
+
+ if (HAS_RC6p(i915))
+ val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
+
+ if (HAS_RC6pp(i915))
+ val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
+
+ return val;
+}
+
+static u64 get_rc6(struct drm_i915_private *i915, bool locked)
+{
+#if IS_ENABLED(CONFIG_PM)
+ unsigned long flags;
+ u64 val;
+
+ if (intel_runtime_pm_get_if_in_use(i915)) {
+ val = __get_rc6(i915);
+ intel_runtime_pm_put(i915);
+
+ /*
+ * If we are coming back from being runtime suspended we must
+ * be careful not to report a larger value than returned
+ * previously.
+ */
+
+ if (!locked)
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
+ i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
+ i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
+ } else {
+ val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
+ }
+
+ if (!locked)
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ } else {
+ struct pci_dev *pdev = i915->drm.pdev;
+ struct device *kdev = &pdev->dev;
+ unsigned long flags2;
+
+ /*
+ * We are runtime suspended.
+ *
+ * Report the delta from when the device was suspended to now,
+ * on top of the last known real value, as the approximated RC6
+ * counter value.
+ */
+ if (!locked)
+ spin_lock_irqsave(&i915->pmu.lock, flags);
+
+ spin_lock_irqsave(&kdev->power.lock, flags2);
+
+ if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
+ i915->pmu.suspended_jiffies_last =
+ kdev->power.suspended_jiffies;
+
+ val = kdev->power.suspended_jiffies -
+ i915->pmu.suspended_jiffies_last;
+ val += jiffies - kdev->power.accounting_timestamp;
+
+ spin_unlock_irqrestore(&kdev->power.lock, flags2);
+
+ val = jiffies_to_nsecs(val);
+ val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
+ i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
+
+ if (!locked)
+ spin_unlock_irqrestore(&i915->pmu.lock, flags);
+ }
+
+ return val;
+#else
+ return __get_rc6(i915);
+#endif
+}
+
+static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
{
struct drm_i915_private *i915 =
container_of(event->pmu, typeof(*i915), pmu.base);
@@ -387,7 +513,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
if (WARN_ON_ONCE(!engine)) {
/* Do nothing */
} else if (sample == I915_SAMPLE_BUSY &&
- engine->pmu.busy_stats) {
+ intel_engine_supports_stats(engine)) {
val = ktime_to_ns(intel_engine_get_busy_time(engine));
} else {
val = engine->pmu.sample[sample].cur;
@@ -408,18 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
val = count_interrupts(i915);
break;
case I915_PMU_RC6_RESIDENCY:
- intel_runtime_pm_get(i915);
- val = intel_rc6_residency_ns(i915,
- IS_VALLEYVIEW(i915) ?
- VLV_GT_RENDER_RC6 :
- GEN6_GT_GFX_RC6);
- if (HAS_RC6p(i915))
- val += intel_rc6_residency_ns(i915,
- GEN6_GT_GFX_RC6p);
- if (HAS_RC6pp(i915))
- val += intel_rc6_residency_ns(i915,
- GEN6_GT_GFX_RC6pp);
- intel_runtime_pm_put(i915);
+ val = get_rc6(i915, locked);
break;
}
}
@@ -434,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event)
again:
prev = local64_read(&hwc->prev_count);
- new = __i915_pmu_event_read(event);
+ new = __i915_pmu_event_read(event, false);
if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
goto again;
@@ -442,12 +557,6 @@ again:
local64_add(new - prev, &event->count);
}
-static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
-{
- return intel_engine_supports_stats(engine) &&
- (engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
-}
-
static void i915_pmu_enable(struct perf_event *event)
{
struct drm_i915_private *i915 =
@@ -487,21 +596,7 @@ static void i915_pmu_enable(struct perf_event *event)
GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
- if (engine->pmu.enable_count[sample]++ == 0) {
- /*
- * Enable engine busy stats tracking if needed or
- * alternatively cancel the scheduled disable.
- *
- * If the delayed disable was pending, cancel it and
- * in this case do not enable since it already is.
- */
- if (engine_needs_busy_stats(engine) &&
- !engine->pmu.busy_stats) {
- engine->pmu.busy_stats = true;
- if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
- intel_enable_engine_stats(engine);
- }
- }
+ engine->pmu.enable_count[sample]++;
}
/*
@@ -509,19 +604,11 @@ static void i915_pmu_enable(struct perf_event *event)
* for all listeners. Even when the event was already enabled and has
* an existing non-zero value.
*/
- local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+ local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));
spin_unlock_irqrestore(&i915->pmu.lock, flags);
}
-static void __disable_busy_stats(struct work_struct *work)
-{
- struct intel_engine_cs *engine =
- container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
-
- intel_disable_engine_stats(engine);
-}
-
static void i915_pmu_disable(struct perf_event *event)
{
struct drm_i915_private *i915 =
@@ -545,26 +632,8 @@ static void i915_pmu_disable(struct perf_event *event)
* Decrement the reference count and clear the enabled
* bitmask when the last listener on an event goes away.
*/
- if (--engine->pmu.enable_count[sample] == 0) {
+ if (--engine->pmu.enable_count[sample] == 0)
engine->pmu.enable &= ~BIT(sample);
- if (!engine_needs_busy_stats(engine) &&
- engine->pmu.busy_stats) {
- engine->pmu.busy_stats = false;
- /*
- * We request a delayed disable to handle the
- * rapid on/off cycles on events, which can
- * happen when tools like perf stat start, in a
- * nicer way.
- *
- * In addition, this also helps with busy stats
- * accuracy with background CPU offline/online
- * migration events.
- */
- queue_delayed_work(system_wq,
- &engine->pmu.disable_busy_stats,
- round_jiffies_up_relative(HZ));
- }
- }
}
GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
@@ -797,8 +866,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
void i915_pmu_register(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
int ret;
if (INTEL_GEN(i915) <= 2) {
@@ -820,10 +887,6 @@ void i915_pmu_register(struct drm_i915_private *i915)
hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
i915->pmu.timer.function = i915_sample;
- for_each_engine(engine, i915, id)
- INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
- __disable_busy_stats);
-
ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
if (ret)
goto err;
@@ -843,9 +906,6 @@ err:
void i915_pmu_unregister(struct drm_i915_private *i915)
{
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
-
if (!i915->pmu.base.event_init)
return;
@@ -853,11 +913,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
hrtimer_cancel(&i915->pmu.timer);
- for_each_engine(engine, i915, id) {
- GEM_BUG_ON(engine->pmu.busy_stats);
- flush_delayed_work(&engine->pmu.disable_busy_stats);
- }
-
i915_pmu_unregister_cpuhp_state(i915);
perf_pmu_unregister(&i915->pmu.base);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 40c154d13565..bb62df15afa4 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -27,6 +27,8 @@
enum {
__I915_SAMPLE_FREQ_ACT = 0,
__I915_SAMPLE_FREQ_REQ,
+ __I915_SAMPLE_RC6,
+ __I915_SAMPLE_RC6_ESTIMATED,
__I915_NUM_PMU_SAMPLERS
};
@@ -94,6 +96,10 @@ struct i915_pmu {
* struct intel_engine_cs.
*/
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
+ /**
+ * @suspended_jiffies_last: Cached suspend time from PM core.
+ */
+ unsigned long suspended_jiffies_last;
};
#ifdef CONFIG_PERF_EVENTS
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index f7f771749e48..b49a2df44430 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
return 0;
}
+/*
+ * Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
+ * skip all delay + gpio operands and stop at the first DSI packet op.
+ */
+static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
+{
+ const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ int index, len;
+
+ if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
+ return 0;
+
+ /* index = 1 to skip sequence byte */
+ for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) {
+ switch (data[index]) {
+ case MIPI_SEQ_ELEM_SEND_PKT:
+ return index == 1 ? 0 : index;
+ case MIPI_SEQ_ELEM_DELAY:
+ len = 5; /* 1 byte for operand + uint32 */
+ break;
+ case MIPI_SEQ_ELEM_GPIO:
+ len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */
+ break;
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
+ * The deassert must be done before calling intel_dsi_device_ready, so for
+ * these devices we split the init OTP sequence into a deassert sequence and
+ * the actual init OTP part.
+ */
+static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
+{
+ u8 *init_otp;
+ int len;
+
+ /* Limit this to VLV for now. */
+ if (!IS_VALLEYVIEW(dev_priv))
+ return;
+
+ /* Limit this to v1 vid-mode sequences */
+ if (dev_priv->vbt.dsi.config->is_cmd_mode ||
+ dev_priv->vbt.dsi.seq_version != 1)
+ return;
+
+ /* Only do this if there are otp and assert seqs and no deassert seq */
+ if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
+ !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
+ dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
+ return;
+
+ /* The deassert-sequence ends at the first DSI packet */
+ len = get_init_otp_deassert_fragment_len(dev_priv);
+ if (!len)
+ return;
+
+ DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
+
+ /* Copy the fragment, update seq byte and terminate it */
+ init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
+ dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
+ if (!dev_priv->vbt.dsi.deassert_seq)
+ return;
+ dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
+ dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
+ /* Use the copy for deassert */
+ dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
+ dev_priv->vbt.dsi.deassert_seq;
+ /* Replace the last byte of the fragment with init OTP seq byte */
+ init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
+ /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
+ dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
+}
+
static void
parse_mipi_sequence(struct drm_i915_private *dev_priv,
const struct bdb_header *bdb)
@@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
dev_priv->vbt.dsi.size = seq_size;
dev_priv->vbt.dsi.seq_version = sequence->version;
+ fixup_mipi_sequences(dev_priv);
+
DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
return;
@@ -1589,6 +1671,29 @@ out:
}
/**
+ * intel_bios_cleanup - Free any resources allocated by intel_bios_init()
+ * @dev_priv: i915 device instance
+ */
+void intel_bios_cleanup(struct drm_i915_private *dev_priv)
+{
+ kfree(dev_priv->vbt.child_dev);
+ dev_priv->vbt.child_dev = NULL;
+ dev_priv->vbt.child_dev_num = 0;
+ kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
+ dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
+ kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
+ dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
+ kfree(dev_priv->vbt.dsi.data);
+ dev_priv->vbt.dsi.data = NULL;
+ kfree(dev_priv->vbt.dsi.pps);
+ dev_priv->vbt.dsi.pps = NULL;
+ kfree(dev_priv->vbt.dsi.config);
+ dev_priv->vbt.dsi.config = NULL;
+ kfree(dev_priv->vbt.dsi.deassert_seq);
+ dev_priv->vbt.dsi.deassert_seq = NULL;
+}
+
+/**
* intel_bios_is_tv_present - is integrated TV present in VBT
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
index bd40fea16b4f..f54ddda9fdad 100644
--- a/drivers/gpu/drm/i915/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -594,29 +594,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
spin_unlock_irq(&b->rb_lock);
}
-static bool signal_valid(const struct drm_i915_gem_request *request)
-{
- return intel_wait_check_request(&request->signaling.wait, request);
-}
-
static bool signal_complete(const struct drm_i915_gem_request *request)
{
if (!request)
return false;
- /* If another process served as the bottom-half it may have already
- * signalled that this wait is already completed.
- */
- if (intel_wait_complete(&request->signaling.wait))
- return signal_valid(request);
-
- /* Carefully check if the request is complete, giving time for the
+ /*
+ * Carefully check if the request is complete, giving time for the
* seqno to be visible or if the GPU hung.
*/
- if (__i915_request_irq_complete(request))
- return true;
-
- return false;
+ return __i915_request_irq_complete(request);
}
static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
@@ -659,9 +646,13 @@ static int intel_breadcrumbs_signaler(void *arg)
request = i915_gem_request_get_rcu(request);
rcu_read_unlock();
if (signal_complete(request)) {
- local_bh_disable();
- dma_fence_signal(&request->fence);
- local_bh_enable(); /* kick start the tasklets */
+ if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &request->fence.flags)) {
+ local_bh_disable();
+ dma_fence_signal(&request->fence);
+ GEM_BUG_ON(!i915_gem_request_completed(request));
+ local_bh_enable(); /* kick start the tasklets */
+ }
spin_lock_irq(&b->rb_lock);
diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
index 5dc118f26b51..1704c8897afd 100644
--- a/drivers/gpu/drm/i915/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/intel_cdclk.c
@@ -1952,6 +1952,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
min_cdclk = max(2 * 96000, min_cdclk);
+ /*
+ * On Valleyview some DSI panels lose (v|h)sync when the clock is lower
+ * than 320000KHz.
+ */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
+ IS_VALLEYVIEW(dev_priv))
+ min_cdclk = max(320000, min_cdclk);
+
if (min_cdclk > dev_priv->max_cdclk_freq) {
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
min_cdclk, dev_priv->max_cdclk_freq);
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index d790bdc227ff..fa960cfd2764 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1458,7 +1458,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
struct drm_i915_private *dev_priv = engine->i915;
bool idle = true;
- intel_runtime_pm_get(dev_priv);
+ /* If the whole device is asleep, the engine must be idle */
+ if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ return true;
/* First check that no commands are left in the ring */
if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
@@ -1943,16 +1945,22 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
*/
int intel_enable_engine_stats(struct intel_engine_cs *engine)
{
+ struct intel_engine_execlists *execlists = &engine->execlists;
unsigned long flags;
+ int err = 0;
if (!intel_engine_supports_stats(engine))
return -ENODEV;
+ tasklet_disable(&execlists->tasklet);
spin_lock_irqsave(&engine->stats.lock, flags);
- if (engine->stats.enabled == ~0)
- goto busy;
+
+ if (unlikely(engine->stats.enabled == ~0)) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
if (engine->stats.enabled++ == 0) {
- struct intel_engine_execlists *execlists = &engine->execlists;
const struct execlist_port *port = execlists->port;
unsigned int num_ports = execlists_num_ports(execlists);
@@ -1967,14 +1975,12 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
if (engine->stats.active)
engine->stats.start = engine->stats.enabled_at;
}
- spin_unlock_irqrestore(&engine->stats.lock, flags);
-
- return 0;
-busy:
+unlock:
spin_unlock_irqrestore(&engine->stats.lock, flags);
+ tasklet_enable(&execlists->tasklet);
- return -EBUSY;
+ return err;
}
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c5ff203e42d6..a0e7a6c2a57c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -366,20 +366,6 @@ struct intel_engine_cs {
*/
#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
- /**
- * @busy_stats: Has enablement of engine stats tracking been
- * requested.
- */
- bool busy_stats;
- /**
- * @disable_busy_stats: Work item for busy stats disabling.
- *
- * Same as with @enable_busy_stats action, with the difference
- * that we delay it in case there are rapid enable-disable
- * actions, which can happen during tool startup (like perf
- * stat).
- */
- struct delayed_work disable_busy_stats;
} pmu;
/*
diff --git a/drivers/gpu/drm/meson/meson_crtc.c b/drivers/gpu/drm/meson/meson_crtc.c
index 5155f0179b61..05520202c967 100644
--- a/drivers/gpu/drm/meson/meson_crtc.c
+++ b/drivers/gpu/drm/meson/meson_crtc.c
@@ -36,6 +36,7 @@
#include "meson_venc.h"
#include "meson_vpp.h"
#include "meson_viu.h"
+#include "meson_canvas.h"
#include "meson_registers.h"
/* CRTC definition */
@@ -192,6 +193,11 @@ void meson_crtc_irq(struct meson_drm *priv)
} else
meson_vpp_disable_interlace_vscaler_osd1(priv);
+ meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
+ priv->viu.osd1_addr, priv->viu.osd1_stride,
+ priv->viu.osd1_height, MESON_CANVAS_WRAP_NONE,
+ MESON_CANVAS_BLKMODE_LINEAR);
+
/* Enable OSD1 */
writel_bits_relaxed(VPP_OSD1_POSTBLEND, VPP_OSD1_POSTBLEND,
priv->io_base + _REG(VPP_MISC));
diff --git a/drivers/gpu/drm/meson/meson_drv.h b/drivers/gpu/drm/meson/meson_drv.h
index 5e8b392b9d1f..8450d6ac8c9b 100644
--- a/drivers/gpu/drm/meson/meson_drv.h
+++ b/drivers/gpu/drm/meson/meson_drv.h
@@ -43,6 +43,9 @@ struct meson_drm {
bool osd1_commit;
uint32_t osd1_ctrl_stat;
uint32_t osd1_blk0_cfg[5];
+ uint32_t osd1_addr;
+ uint32_t osd1_stride;
+ uint32_t osd1_height;
} viu;
struct {
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index d0a6ac8390f3..27bd3503e1e4 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -164,10 +164,9 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
/* Update Canvas with buffer address */
gem = drm_fb_cma_get_gem_obj(fb, 0);
- meson_canvas_setup(priv, MESON_CANVAS_ID_OSD1,
- gem->paddr, fb->pitches[0],
- fb->height, MESON_CANVAS_WRAP_NONE,
- MESON_CANVAS_BLKMODE_LINEAR);
+ priv->viu.osd1_addr = gem->paddr;
+ priv->viu.osd1_stride = fb->pitches[0];
+ priv->viu.osd1_height = fb->height;
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 3e9bba4d6624..6d8e3a9a6fc0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -680,7 +680,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
} else {
dev_info(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
- aspace = NULL;;
+ aspace = NULL;
}
pm_runtime_put_sync(&pdev->dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 69d6e61a01ec..6ed9cb053dfa 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -570,9 +570,15 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nv_connector->edid = NULL;
}
- ret = pm_runtime_get_sync(connector->dev->dev);
- if (ret < 0 && ret != -EACCES)
- return conn_status;
+ /* Outputs are only polled while runtime active, so acquiring a
+ * runtime PM ref here is unnecessary (and would deadlock upon
+ * runtime suspend because it waits for polling to finish).
+ */
+ if (!drm_kms_helper_is_poll_worker()) {
+ ret = pm_runtime_get_sync(connector->dev->dev);
+ if (ret < 0 && ret != -EACCES)
+ return conn_status;
+ }
nv_encoder = nouveau_connector_ddc_detect(connector);
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
@@ -647,8 +653,10 @@ detect_analog:
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return conn_status;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index bf62303571b3..3695cde669f8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -301,7 +301,7 @@ nvkm_therm_attr_set(struct nvkm_therm *therm,
void
nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
{
- if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
+ if (!therm || !therm->func->clkgate_enable || !therm->clkgating_enabled)
return;
nvkm_debug(&therm->subdev,
@@ -312,7 +312,7 @@ nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
void
nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)
{
- if (!therm->func->clkgate_fini || !therm->clkgating_enabled)
+ if (!therm || !therm->func->clkgate_fini || !therm->clkgating_enabled)
return;
nvkm_debug(&therm->subdev,
@@ -395,7 +395,7 @@ void
nvkm_therm_clkgate_init(struct nvkm_therm *therm,
const struct nvkm_therm_clkgate_pack *p)
{
- if (!therm->func->clkgate_init || !therm->clkgating_enabled)
+ if (!therm || !therm->func->clkgate_init || !therm->clkgating_enabled)
return;
therm->func->clkgate_init(therm, p);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 5012f5e47a1e..2e2ca3c6b47d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -899,9 +899,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -924,8 +926,12 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
/* check acpi lid status ??? */
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -1039,9 +1045,11 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
int r;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -1108,8 +1116,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1173,9 +1183,11 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (!radeon_connector->dac_load_detect)
return ret;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
encoder = radeon_best_single_encoder(connector);
if (!encoder)
@@ -1187,8 +1199,12 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
if (ret == connector_status_connected)
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false);
radeon_connector_update_scratch_regs(connector, ret);
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
+
return ret;
}
@@ -1251,9 +1267,11 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false, broken_edid = false;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (radeon_connector->detected_hpd_without_ddc) {
force = true;
@@ -1436,8 +1454,10 @@ out:
}
exit:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
@@ -1688,9 +1708,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
if (radeon_dig_connector->is_mst)
return connector_status_disconnected;
- r = pm_runtime_get_sync(connector->dev->dev);
- if (r < 0)
- return connector_status_disconnected;
+ if (!drm_kms_helper_is_poll_worker()) {
+ r = pm_runtime_get_sync(connector->dev->dev);
+ if (r < 0)
+ return connector_status_disconnected;
+ }
if (!force && radeon_check_hpd_status_unchanged(connector)) {
ret = connector->status;
@@ -1777,8 +1799,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
}
out:
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
+ if (!drm_kms_helper_is_poll_worker()) {
+ pm_runtime_mark_last_busy(connector->dev->dev);
+ pm_runtime_put_autosuspend(connector->dev->dev);
+ }
return ret;
}
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index 2c18996d59c5..0d95888ccc3e 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -461,7 +461,7 @@ void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched, struct drm_sched_jo
{
struct drm_sched_job *s_job;
struct drm_sched_entity *entity, *tmp;
- int i;;
+ int i;
spin_lock(&sched->job_list_lock);
list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 658fa2d3e40c..48685cddbad1 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -1089,7 +1089,7 @@ static void ipu_irq_handler(struct irq_desc *desc)
{
struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
+ static const int int_reg[] = { 0, 1, 2, 3, 10, 11, 12, 13, 14};
chained_irq_enter(chip, desc);
@@ -1102,7 +1102,7 @@ static void ipu_err_irq_handler(struct irq_desc *desc)
{
struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- const int int_reg[] = { 4, 5, 8, 9};
+ static const int int_reg[] = { 4, 5, 8, 9};
chained_irq_enter(chip, desc);
diff --git a/drivers/gpu/ipu-v3/ipu-cpmem.c b/drivers/gpu/ipu-v3/ipu-cpmem.c
index bb9c087e6c0d..9f2d9ec42add 100644
--- a/drivers/gpu/ipu-v3/ipu-cpmem.c
+++ b/drivers/gpu/ipu-v3/ipu-cpmem.c
@@ -788,12 +788,14 @@ int ipu_cpmem_set_image(struct ipuv3_channel *ch, struct ipu_image *image)
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_GREY:
offset = image->rect.left + image->rect.top * pix->bytesperline;
break;
case V4L2_PIX_FMT_SBGGR16:
case V4L2_PIX_FMT_SGBRG16:
case V4L2_PIX_FMT_SGRBG16:
case V4L2_PIX_FMT_SRGGB16:
+ case V4L2_PIX_FMT_Y16:
offset = image->rect.left * 2 +
image->rect.top * pix->bytesperline;
break;
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 24e12b87a0cb..caa05b0702e1 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -288,6 +288,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
case MEDIA_BUS_FMT_SGBRG10_1X10:
case MEDIA_BUS_FMT_SGRBG10_1X10:
case MEDIA_BUS_FMT_SRGGB10_1X10:
+ case MEDIA_BUS_FMT_Y10_1X10:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW10;
cfg->data_width = IPU_CSI_DATA_WIDTH_10;
@@ -296,6 +297,7 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code)
case MEDIA_BUS_FMT_SGBRG12_1X12:
case MEDIA_BUS_FMT_SGRBG12_1X12:
case MEDIA_BUS_FMT_SRGGB12_1X12:
+ case MEDIA_BUS_FMT_Y12_1X12:
cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
cfg->mipi_dt = MIPI_DT_RAW12;
cfg->data_width = IPU_CSI_DATA_WIDTH_12;
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c
index f1cec3d70498..0f70e8847540 100644
--- a/drivers/gpu/ipu-v3/ipu-pre.c
+++ b/drivers/gpu/ipu-v3/ipu-pre.c
@@ -129,11 +129,14 @@ ipu_pre_lookup_by_phandle(struct device *dev, const char *name, int index)
if (pre_node == pre->dev->of_node) {
mutex_unlock(&ipu_pre_list_mutex);
device_link_add(dev, pre->dev, DL_FLAG_AUTOREMOVE);
+ of_node_put(pre_node);
return pre;
}
}
mutex_unlock(&ipu_pre_list_mutex);
+ of_node_put(pre_node);
+
return NULL;
}
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c
index 067365c733c6..97b99500153d 100644
--- a/drivers/gpu/ipu-v3/ipu-prg.c
+++ b/drivers/gpu/ipu-v3/ipu-prg.c
@@ -102,11 +102,14 @@ ipu_prg_lookup_by_phandle(struct device *dev, const char *name, int ipu_id)
mutex_unlock(&ipu_prg_list_mutex);
device_link_add(dev, prg->dev, DL_FLAG_AUTOREMOVE);
prg->id = ipu_id;
+ of_node_put(prg_node);
return prg;
}
}
mutex_unlock(&ipu_prg_list_mutex);
+ of_node_put(prg_node);
+
return NULL;
}
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 43ddcdfbd0da..9454ac134ce2 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -645,6 +645,9 @@
#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033
#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035
#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038
+#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040
+#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042
+#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043
#define USB_DEVICE_ID_LD_JWM 0x1080
#define USB_DEVICE_ID_LD_DMMP 0x1081
#define USB_DEVICE_ID_LD_UMIP 0x1090
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 5f6035a5ce36..e92b77fa574a 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -809,6 +809,9 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 4bdbf77f7197..72c338eb5fae 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -269,13 +269,13 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
for (i = 0; i < ARRAY_SIZE(tjmax_model_table); i++) {
const struct tjmax_model *tm = &tjmax_model_table[i];
if (c->x86_model == tm->model &&
- (tm->mask == ANY || c->x86_mask == tm->mask))
+ (tm->mask == ANY || c->x86_stepping == tm->mask))
return tm->tjmax;
}
/* Early chips have no MSR for TjMax */
- if (c->x86_model == 0xf && c->x86_mask < 4)
+ if (c->x86_model == 0xf && c->x86_stepping < 4)
usemsr_ee = 0;
if (c->x86_model > 0xe && usemsr_ee) {
@@ -426,7 +426,7 @@ static int chk_ucode_version(unsigned int cpu)
* Readings might stop update when processor visited too deep sleep,
* fixed for stepping D0 (6EC).
*/
- if (c->x86_model == 0xe && c->x86_mask < 0xc && c->microcode < 0x39) {
+ if (c->x86_model == 0xe && c->x86_stepping < 0xc && c->microcode < 0x39) {
pr_err("Errata AE18 not fixed, update BIOS or microcode of the CPU!\n");
return -ENODEV;
}
diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
index ef91b8a67549..84e91286fc4f 100644
--- a/drivers/hwmon/hwmon-vid.c
+++ b/drivers/hwmon/hwmon-vid.c
@@ -293,7 +293,7 @@ u8 vid_which_vrm(void)
if (c->x86 < 6) /* Any CPU with family lower than 6 */
return 0; /* doesn't have VID */
- vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_mask, c->x86_vendor);
+ vrm_ret = find_vrm(c->x86, c->x86_model, c->x86_stepping, c->x86_vendor);
if (vrm_ret == 134)
vrm_ret = get_via_model_d_vrm();
if (vrm_ret == 0)
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index 06b4e1c78bd8..051a72eecb24 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -129,7 +129,10 @@ static ssize_t temp1_input_show(struct device *dev,
data->read_tempreg(data->pdev, &regval);
temp = (regval >> 21) * 125;
- temp -= data->temp_offset;
+ if (temp > data->temp_offset)
+ temp -= data->temp_offset;
+ else
+ temp = 0;
return sprintf(buf, "%u\n", temp);
}
@@ -227,7 +230,7 @@ static bool has_erratum_319(struct pci_dev *pdev)
* and AM3 formats, but that's the best we can do.
*/
return boot_cpu_data.x86_model < 4 ||
- (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask <= 2);
+ (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2);
}
static int k10temp_probe(struct pci_dev *pdev,
diff --git a/drivers/hwmon/k8temp.c b/drivers/hwmon/k8temp.c
index 5a632bcf869b..e59f9113fb93 100644
--- a/drivers/hwmon/k8temp.c
+++ b/drivers/hwmon/k8temp.c
@@ -187,7 +187,7 @@ static int k8temp_probe(struct pci_dev *pdev,
return -ENOMEM;
model = boot_cpu_data.x86_model;
- stepping = boot_cpu_data.x86_mask;
+ stepping = boot_cpu_data.x86_stepping;
/* feature available since SH-C0, exclude older revisions */
if ((model == 4 && stepping == 0) ||
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a9805c7cb305..e2954fb86d65 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -123,8 +123,10 @@ config I2C_I801
Wildcat Point (PCH)
Wildcat Point-LP (PCH)
BayTrail (SOC)
+ Braswell (SOC)
Sunrise Point-H (PCH)
Sunrise Point-LP (PCH)
+ Kaby Lake-H (PCH)
DNV (SOC)
Broxton (SOC)
Lewisburg (PCH)
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index cd07a69e2e93..44deae78913e 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -50,6 +50,9 @@
#define BCM2835_I2C_S_CLKT BIT(9)
#define BCM2835_I2C_S_LEN BIT(10) /* Fake bit for SW error reporting */
+#define BCM2835_I2C_FEDL_SHIFT 16
+#define BCM2835_I2C_REDL_SHIFT 0
+
#define BCM2835_I2C_CDIV_MIN 0x0002
#define BCM2835_I2C_CDIV_MAX 0xFFFE
@@ -81,7 +84,7 @@ static inline u32 bcm2835_i2c_readl(struct bcm2835_i2c_dev *i2c_dev, u32 reg)
static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
{
- u32 divider;
+ u32 divider, redl, fedl;
divider = DIV_ROUND_UP(clk_get_rate(i2c_dev->clk),
i2c_dev->bus_clk_rate);
@@ -100,6 +103,22 @@ static int bcm2835_i2c_set_divider(struct bcm2835_i2c_dev *i2c_dev)
bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DIV, divider);
+ /*
+ * Number of core clocks to wait after falling edge before
+ * outputting the next data bit. Note that both FEDL and REDL
+ * can't be greater than CDIV/2.
+ */
+ fedl = max(divider / 16, 1u);
+
+ /*
+ * Number of core clocks to wait after rising edge before
+ * sampling the next incoming data bit.
+ */
+ redl = max(divider / 4, 1u);
+
+ bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_DEL,
+ (fedl << BCM2835_I2C_FEDL_SHIFT) |
+ (redl << BCM2835_I2C_REDL_SHIFT));
return 0;
}
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index ae691884d071..05732531829f 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -209,7 +209,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
i2c_dw_disable_int(dev);
/* Enable the adapter */
- __i2c_dw_enable(dev, true);
+ __i2c_dw_enable_and_wait(dev, true);
/* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR);
@@ -644,7 +644,7 @@ static int i2c_dw_init_recovery_info(struct dw_i2c_dev *dev)
gpio = devm_gpiod_get(dev->dev, "scl", GPIOD_OUT_HIGH);
if (IS_ERR(gpio)) {
r = PTR_ERR(gpio);
- if (r == -ENOENT)
+ if (r == -ENOENT || r == -ENOSYS)
return 0;
return r;
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 8eac00efadc1..692b34125866 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -58,6 +58,7 @@
* Wildcat Point (PCH) 0x8ca2 32 hard yes yes yes
* Wildcat Point-LP (PCH) 0x9ca2 32 hard yes yes yes
* BayTrail (SOC) 0x0f12 32 hard yes yes yes
+ * Braswell (SOC) 0x2292 32 hard yes yes yes
* Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes
* Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes
* DNV (SOC) 0x19df 32 hard yes yes yes
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 2fd8b6d00391..87197ece0f90 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -341,7 +341,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, adap);
init_completion(&siic->done);
- /* Controller Initalisation */
+ /* Controller initialisation */
writel(SIRFSOC_I2C_RESET, siic->base + SIRFSOC_I2C_CTRL);
while (readl(siic->base + SIRFSOC_I2C_CTRL) & SIRFSOC_I2C_RESET)
@@ -369,7 +369,7 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
* but they start to affect the speed when clock is set to faster
* frequencies.
* Through the actual tests, use the different user_div value(which
- * in the divider formular 'Fio / (Fi2c * user_div)') to adapt
+ * in the divider formula 'Fio / (Fi2c * user_div)') to adapt
* the different ranges of i2c bus clock frequency, to make the SCL
* more accurate.
*/
diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c
index 327a49ba1991..9515ca165dfd 100644
--- a/drivers/iio/adc/aspeed_adc.c
+++ b/drivers/iio/adc/aspeed_adc.c
@@ -243,7 +243,7 @@ static int aspeed_adc_probe(struct platform_device *pdev)
ASPEED_ADC_INIT_POLLING_TIME,
ASPEED_ADC_INIT_TIMEOUT);
if (ret)
- goto scaler_error;
+ goto poll_timeout_error;
}
/* Start all channels in normal mode. */
@@ -274,9 +274,10 @@ iio_register_error:
writel(ASPEED_OPERATION_MODE_POWER_DOWN,
data->base + ASPEED_REG_ENGINE_CONTROL);
clk_disable_unprepare(data->clk_scaler->clk);
-reset_error:
- reset_control_assert(data->rst);
clk_enable_error:
+poll_timeout_error:
+ reset_control_assert(data->rst);
+reset_error:
clk_hw_unregister_divider(data->clk_scaler);
scaler_error:
clk_hw_unregister_divider(data->clk_prescaler);
diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
index 7f5def465340..9a2583caedaa 100644
--- a/drivers/iio/adc/stm32-adc.c
+++ b/drivers/iio/adc/stm32-adc.c
@@ -722,8 +722,6 @@ static int stm32h7_adc_enable(struct stm32_adc *adc)
int ret;
u32 val;
- /* Clear ADRDY by writing one, then enable ADC */
- stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY);
stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN);
/* Poll for ADRDY to be set (after adc startup time) */
@@ -731,8 +729,11 @@ static int stm32h7_adc_enable(struct stm32_adc *adc)
val & STM32H7_ADRDY,
100, STM32_ADC_TIMEOUT_US);
if (ret) {
- stm32_adc_clr_bits(adc, STM32H7_ADC_CR, STM32H7_ADEN);
+ stm32_adc_set_bits(adc, STM32H7_ADC_CR, STM32H7_ADDIS);
dev_err(&indio_dev->dev, "Failed to enable ADC\n");
+ } else {
+ /* Clear ADRDY by writing one */
+ stm32_adc_set_bits(adc, STM32H7_ADC_ISR, STM32H7_ADRDY);
}
return ret;
diff --git a/drivers/iio/imu/adis_trigger.c b/drivers/iio/imu/adis_trigger.c
index 0dd5a381be64..457372f36791 100644
--- a/drivers/iio/imu/adis_trigger.c
+++ b/drivers/iio/imu/adis_trigger.c
@@ -46,6 +46,10 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (adis->trig == NULL)
return -ENOMEM;
+ adis->trig->dev.parent = &adis->spi->dev;
+ adis->trig->ops = &adis_trigger_ops;
+ iio_trigger_set_drvdata(adis->trig, adis);
+
ret = request_irq(adis->spi->irq,
&iio_trigger_generic_data_rdy_poll,
IRQF_TRIGGER_RISING,
@@ -54,9 +58,6 @@ int adis_probe_trigger(struct adis *adis, struct iio_dev *indio_dev)
if (ret)
goto error_free_trig;
- adis->trig->dev.parent = &adis->spi->dev;
- adis->trig->ops = &adis_trigger_ops;
- iio_trigger_set_drvdata(adis->trig, adis);
ret = iio_trigger_register(adis->trig);
indio_dev->trig = iio_trigger_get(adis->trig);
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index 79abf70a126d..cd5bfe39591b 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -175,7 +175,7 @@ __poll_t iio_buffer_poll(struct file *filp,
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
- if (!indio_dev->info)
+ if (!indio_dev->info || rb == NULL)
return 0;
poll_wait(filp, &rb->pollq, wait);
diff --git a/drivers/iio/proximity/Kconfig b/drivers/iio/proximity/Kconfig
index fcb1c4ba5e41..f726f9427602 100644
--- a/drivers/iio/proximity/Kconfig
+++ b/drivers/iio/proximity/Kconfig
@@ -68,6 +68,8 @@ config SX9500
config SRF08
tristate "Devantech SRF02/SRF08/SRF10 ultrasonic ranger sensor"
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
depends on I2C
help
Say Y here to build a driver for Devantech SRF02/SRF08/SRF10
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index c4560d84dfae..25bb178f6074 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -305,16 +305,21 @@ void nldev_exit(void);
static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
struct ib_pd *pd,
struct ib_qp_init_attr *attr,
- struct ib_udata *udata)
+ struct ib_udata *udata,
+ struct ib_uobject *uobj)
{
struct ib_qp *qp;
+ if (!dev->create_qp)
+ return ERR_PTR(-EOPNOTSUPP);
+
qp = dev->create_qp(pd, attr, udata);
if (IS_ERR(qp))
return qp;
qp->device = dev;
qp->pd = pd;
+ qp->uobject = uobj;
/*
* We don't track XRC QPs for now, because they don't have PD
* and more importantly they are created internaly by driver,
diff --git a/drivers/infiniband/core/rdma_core.c b/drivers/infiniband/core/rdma_core.c
index 85b5ee4defa4..d8eead5d106d 100644
--- a/drivers/infiniband/core/rdma_core.c
+++ b/drivers/infiniband/core/rdma_core.c
@@ -141,7 +141,12 @@ static struct ib_uobject *alloc_uobj(struct ib_ucontext *context,
*/
uobj->context = context;
uobj->type = type;
- atomic_set(&uobj->usecnt, 0);
+ /*
+ * Allocated objects start out as write locked to deny any other
+ * syscalls from accessing them until they are committed. See
+ * rdma_alloc_commit_uobject
+ */
+ atomic_set(&uobj->usecnt, -1);
kref_init(&uobj->ref);
return uobj;
@@ -196,7 +201,15 @@ static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *t
goto free;
}
- uverbs_uobject_get(uobj);
+ /*
+ * The idr_find is guaranteed to return a pointer to something that
+ * isn't freed yet, or NULL, as the free after idr_remove goes through
+ * kfree_rcu(). However the object may still have been released and
+ * kfree() could be called at any time.
+ */
+ if (!kref_get_unless_zero(&uobj->ref))
+ uobj = ERR_PTR(-ENOENT);
+
free:
rcu_read_unlock();
return uobj;
@@ -399,13 +412,13 @@ static int __must_check remove_commit_fd_uobject(struct ib_uobject *uobj,
return ret;
}
-static void lockdep_check(struct ib_uobject *uobj, bool exclusive)
+static void assert_uverbs_usecnt(struct ib_uobject *uobj, bool exclusive)
{
#ifdef CONFIG_LOCKDEP
if (exclusive)
- WARN_ON(atomic_read(&uobj->usecnt) > 0);
+ WARN_ON(atomic_read(&uobj->usecnt) != -1);
else
- WARN_ON(atomic_read(&uobj->usecnt) == -1);
+ WARN_ON(atomic_read(&uobj->usecnt) <= 0);
#endif
}
@@ -444,7 +457,7 @@ int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj)
WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
return 0;
}
- lockdep_check(uobj, true);
+ assert_uverbs_usecnt(uobj, true);
ret = _rdma_remove_commit_uobject(uobj, RDMA_REMOVE_DESTROY);
up_read(&ucontext->cleanup_rwsem);
@@ -474,16 +487,17 @@ int rdma_explicit_destroy(struct ib_uobject *uobject)
WARN(true, "ib_uverbs: Cleanup is running while removing an uobject\n");
return 0;
}
- lockdep_check(uobject, true);
+ assert_uverbs_usecnt(uobject, true);
ret = uobject->type->type_class->remove_commit(uobject,
RDMA_REMOVE_DESTROY);
if (ret)
- return ret;
+ goto out;
uobject->type = &null_obj_type;
+out:
up_read(&ucontext->cleanup_rwsem);
- return 0;
+ return ret;
}
static void alloc_commit_idr_uobject(struct ib_uobject *uobj)
@@ -527,6 +541,10 @@ int rdma_alloc_commit_uobject(struct ib_uobject *uobj)
return ret;
}
+ /* matches atomic_set(-1) in alloc_uobj */
+ assert_uverbs_usecnt(uobj, true);
+ atomic_set(&uobj->usecnt, 0);
+
uobj->type->type_class->alloc_commit(uobj);
up_read(&uobj->context->cleanup_rwsem);
@@ -561,7 +579,7 @@ static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
{
- lockdep_check(uobj, exclusive);
+ assert_uverbs_usecnt(uobj, exclusive);
uobj->type->type_class->lookup_put(uobj, exclusive);
/*
* In order to unlock an object, either decrease its usecnt for
diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
index 857637bf46da..3dbc4e4cca41 100644
--- a/drivers/infiniband/core/restrack.c
+++ b/drivers/infiniband/core/restrack.c
@@ -7,7 +7,6 @@
#include <rdma/restrack.h>
#include <linux/mutex.h>
#include <linux/sched/task.h>
-#include <linux/uaccess.h>
#include <linux/pid_namespace.h>
void rdma_restrack_init(struct rdma_restrack_root *res)
@@ -63,7 +62,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
{
enum rdma_restrack_type type = res->type;
struct ib_device *dev;
- struct ib_xrcd *xrcd;
struct ib_pd *pd;
struct ib_cq *cq;
struct ib_qp *qp;
@@ -81,10 +79,6 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
qp = container_of(res, struct ib_qp, res);
dev = qp->device;
break;
- case RDMA_RESTRACK_XRCD:
- xrcd = container_of(res, struct ib_xrcd, res);
- dev = xrcd->device;
- break;
default:
WARN_ONCE(true, "Wrong resource tracking type %u\n", type);
return NULL;
@@ -93,6 +87,21 @@ static struct ib_device *res_to_dev(struct rdma_restrack_entry *res)
return dev;
}
+static bool res_is_user(struct rdma_restrack_entry *res)
+{
+ switch (res->type) {
+ case RDMA_RESTRACK_PD:
+ return container_of(res, struct ib_pd, res)->uobject;
+ case RDMA_RESTRACK_CQ:
+ return container_of(res, struct ib_cq, res)->uobject;
+ case RDMA_RESTRACK_QP:
+ return container_of(res, struct ib_qp, res)->uobject;
+ default:
+ WARN_ONCE(true, "Wrong resource tracking type %u\n", res->type);
+ return false;
+ }
+}
+
void rdma_restrack_add(struct rdma_restrack_entry *res)
{
struct ib_device *dev = res_to_dev(res);
@@ -100,7 +109,7 @@ void rdma_restrack_add(struct rdma_restrack_entry *res)
if (!dev)
return;
- if (!uaccess_kernel()) {
+ if (res_is_user(res)) {
get_task_struct(current);
res->task = current;
res->kern_name = NULL;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 256934d1f64f..a148de35df8d 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -562,9 +562,10 @@ ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
if (f.file)
fdput(f);
+ mutex_unlock(&file->device->xrcd_tree_mutex);
+
uobj_alloc_commit(&obj->uobject);
- mutex_unlock(&file->device->xrcd_tree_mutex);
return in_len;
err_copy:
@@ -603,10 +604,8 @@ ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
file->ucontext);
- if (IS_ERR(uobj)) {
- mutex_unlock(&file->device->xrcd_tree_mutex);
+ if (IS_ERR(uobj))
return PTR_ERR(uobj);
- }
ret = uobj_remove_commit(uobj);
return ret ?: in_len;
@@ -979,6 +978,9 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
struct ib_uverbs_ex_create_cq_resp resp;
struct ib_cq_init_attr attr = {};
+ if (!ib_dev->create_cq)
+ return ERR_PTR(-EOPNOTSUPP);
+
if (cmd->comp_vector >= file->device->num_comp_vectors)
return ERR_PTR(-EINVAL);
@@ -1030,14 +1032,14 @@ static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
resp.response_length = offsetof(typeof(resp), response_length) +
sizeof(resp.response_length);
+ cq->res.type = RDMA_RESTRACK_CQ;
+ rdma_restrack_add(&cq->res);
+
ret = cb(file, obj, &resp, ucore, context);
if (ret)
goto err_cb;
uobj_alloc_commit(&obj->uobject);
- cq->res.type = RDMA_RESTRACK_CQ;
- rdma_restrack_add(&cq->res);
-
return obj;
err_cb:
@@ -1518,7 +1520,8 @@ static int create_qp(struct ib_uverbs_file *file,
if (cmd->qp_type == IB_QPT_XRC_TGT)
qp = ib_create_qp(pd, &attr);
else
- qp = _ib_create_qp(device, pd, &attr, uhw);
+ qp = _ib_create_qp(device, pd, &attr, uhw,
+ &obj->uevent.uobject);
if (IS_ERR(qp)) {
ret = PTR_ERR(qp);
@@ -1550,8 +1553,10 @@ static int create_qp(struct ib_uverbs_file *file,
atomic_inc(&attr.srq->usecnt);
if (ind_tbl)
atomic_inc(&ind_tbl->usecnt);
+ } else {
+ /* It is done in _ib_create_qp for other QP types */
+ qp->uobject = &obj->uevent.uobject;
}
- qp->uobject = &obj->uevent.uobject;
obj->uevent.uobject.object = qp;
@@ -1971,8 +1976,15 @@ static int modify_qp(struct ib_uverbs_file *file,
goto release_qp;
}
+ if ((cmd->base.attr_mask & IB_QP_AV) &&
+ !rdma_is_port_valid(qp->device, cmd->base.dest.port_num)) {
+ ret = -EINVAL;
+ goto release_qp;
+ }
+
if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
- !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
+ (!rdma_is_port_valid(qp->device, cmd->base.alt_port_num) ||
+ !rdma_is_port_valid(qp->device, cmd->base.alt_dest.port_num))) {
ret = -EINVAL;
goto release_qp;
}
@@ -2941,6 +2953,11 @@ int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
wq_init_attr.create_flags = cmd.create_flags;
obj->uevent.events_reported = 0;
INIT_LIST_HEAD(&obj->uevent.event_list);
+
+ if (!pd->device->create_wq) {
+ err = -EOPNOTSUPP;
+ goto err_put_cq;
+ }
wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
if (IS_ERR(wq)) {
err = PTR_ERR(wq);
@@ -3084,7 +3101,12 @@ int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
wq_attr.flags = cmd.flags;
wq_attr.flags_mask = cmd.flags_mask;
}
+ if (!wq->device->modify_wq) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
+out:
uobj_put_obj_read(wq);
return ret;
}
@@ -3181,6 +3203,11 @@ int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
init_attr.ind_tbl = wqs;
+
+ if (!ib_dev->create_rwq_ind_table) {
+ err = -EOPNOTSUPP;
+ goto err_uobj;
+ }
rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
if (IS_ERR(rwq_ind_tbl)) {
@@ -3770,6 +3797,9 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
struct ib_device_attr attr = {0};
int err;
+ if (!ib_dev->query_device)
+ return -EOPNOTSUPP;
+
if (ucore->inlen < sizeof(cmd))
return -EINVAL;
diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
index d96dc1d17be1..339b85145044 100644
--- a/drivers/infiniband/core/uverbs_ioctl.c
+++ b/drivers/infiniband/core/uverbs_ioctl.c
@@ -59,6 +59,9 @@ static int uverbs_process_attr(struct ib_device *ibdev,
return 0;
}
+ if (test_bit(attr_id, attr_bundle_h->valid_bitmap))
+ return -EINVAL;
+
spec = &attr_spec_bucket->attrs[attr_id];
e = &elements[attr_id];
e->uattr = uattr_ptr;
diff --git a/drivers/infiniband/core/uverbs_ioctl_merge.c b/drivers/infiniband/core/uverbs_ioctl_merge.c
index 062485f9300d..62e1eb1d2a28 100644
--- a/drivers/infiniband/core/uverbs_ioctl_merge.c
+++ b/drivers/infiniband/core/uverbs_ioctl_merge.c
@@ -114,6 +114,7 @@ static size_t get_elements_above_id(const void **iters,
short min = SHRT_MAX;
const void *elem;
int i, j, last_stored = -1;
+ unsigned int equal_min = 0;
for_each_element(elem, i, j, elements, num_elements, num_offset,
data_offset) {
@@ -136,6 +137,10 @@ static size_t get_elements_above_id(const void **iters,
*/
iters[last_stored == i ? num_iters - 1 : num_iters++] = elem;
last_stored = i;
+ if (min == GET_ID(id))
+ equal_min++;
+ else
+ equal_min = 1;
min = GET_ID(id);
}
@@ -146,15 +151,10 @@ static size_t get_elements_above_id(const void **iters,
* Therefore, we need to clean the beginning of the array to make sure
* all ids of final elements are equal to min.
*/
- for (i = num_iters - 1; i >= 0 &&
- GET_ID(*(u16 *)(iters[i] + id_offset)) == min; i--)
- ;
-
- num_iters -= i + 1;
- memmove(iters, iters + i + 1, sizeof(*iters) * num_iters);
+ memmove(iters, iters + num_iters - equal_min, sizeof(*iters) * equal_min);
*min_id = min;
- return num_iters;
+ return equal_min;
}
#define find_max_element_entry_id(num_elements, elements, num_objects_fld, \
@@ -322,7 +322,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
hash = kzalloc(sizeof(*hash) +
ALIGN(sizeof(*hash->attrs) * (attr_max_bucket + 1),
sizeof(long)) +
- BITS_TO_LONGS(attr_max_bucket) * sizeof(long),
+ BITS_TO_LONGS(attr_max_bucket + 1) * sizeof(long),
GFP_KERNEL);
if (!hash) {
res = -ENOMEM;
@@ -509,7 +509,7 @@ static struct uverbs_object_spec *build_object_with_methods(const struct uverbs_
* first handler which != NULL. This also defines the
* set of flags used for this handler.
*/
- for (i = num_object_defs - 1;
+ for (i = num_method_defs - 1;
i >= 0 && !method_defs[i]->handler; i--)
;
hash->methods[min_id++] = method;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 395a3b091229..b1ca223aa380 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -650,12 +650,21 @@ static int verify_command_mask(struct ib_device *ib_dev, __u32 command)
return -1;
}
+static bool verify_command_idx(u32 command, bool extended)
+{
+ if (extended)
+ return command < ARRAY_SIZE(uverbs_ex_cmd_table);
+
+ return command < ARRAY_SIZE(uverbs_cmd_table);
+}
+
static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
size_t count, loff_t *pos)
{
struct ib_uverbs_file *file = filp->private_data;
struct ib_device *ib_dev;
struct ib_uverbs_cmd_hdr hdr;
+ bool extended_command;
__u32 command;
__u32 flags;
int srcu_key;
@@ -688,6 +697,15 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
}
command = hdr.command & IB_USER_VERBS_CMD_COMMAND_MASK;
+ flags = (hdr.command &
+ IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
+
+ extended_command = flags & IB_USER_VERBS_CMD_FLAG_EXTENDED;
+ if (!verify_command_idx(command, extended_command)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (verify_command_mask(ib_dev, command)) {
ret = -EOPNOTSUPP;
goto out;
@@ -699,12 +717,8 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out;
}
- flags = (hdr.command &
- IB_USER_VERBS_CMD_FLAGS_MASK) >> IB_USER_VERBS_CMD_FLAGS_SHIFT;
-
if (!flags) {
- if (command >= ARRAY_SIZE(uverbs_cmd_table) ||
- !uverbs_cmd_table[command]) {
+ if (!uverbs_cmd_table[command]) {
ret = -EINVAL;
goto out;
}
@@ -725,8 +739,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
struct ib_udata uhw;
size_t written_count = count;
- if (command >= ARRAY_SIZE(uverbs_ex_cmd_table) ||
- !uverbs_ex_cmd_table[command]) {
+ if (!uverbs_ex_cmd_table[command]) {
ret = -ENOSYS;
goto out;
}
@@ -942,6 +955,7 @@ static const struct file_operations uverbs_fops = {
.llseek = no_llseek,
#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
.unlocked_ioctl = ib_uverbs_ioctl,
+ .compat_ioctl = ib_uverbs_ioctl,
#endif
};
@@ -954,6 +968,7 @@ static const struct file_operations uverbs_mmap_fops = {
.llseek = no_llseek,
#if IS_ENABLED(CONFIG_INFINIBAND_EXP_USER_ACCESS)
.unlocked_ioctl = ib_uverbs_ioctl,
+ .compat_ioctl = ib_uverbs_ioctl,
#endif
};
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index cab0ac3556eb..df1360e6774f 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -234,15 +234,18 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
uverbs_attr_get(ctx, UVERBS_UHW_OUT);
if (!IS_ERR(uhw_in)) {
- udata->inbuf = uhw_in->ptr_attr.ptr;
udata->inlen = uhw_in->ptr_attr.len;
+ if (uverbs_attr_ptr_is_inline(uhw_in))
+ udata->inbuf = &uhw_in->uattr->data;
+ else
+ udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
} else {
udata->inbuf = NULL;
udata->inlen = 0;
}
if (!IS_ERR(uhw_out)) {
- udata->outbuf = uhw_out->ptr_attr.ptr;
+ udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
udata->outlen = uhw_out->ptr_attr.len;
} else {
udata->outbuf = NULL;
@@ -323,7 +326,8 @@ static int uverbs_create_cq_handler(struct ib_device *ib_dev,
cq->res.type = RDMA_RESTRACK_CQ;
rdma_restrack_add(&cq->res);
- ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe);
+ ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe,
+ sizeof(cq->cqe));
if (ret)
goto err_cq;
@@ -375,7 +379,7 @@ static int uverbs_destroy_cq_handler(struct ib_device *ib_dev,
resp.comp_events_reported = obj->comp_events_reported;
resp.async_events_reported = obj->async_events_reported;
- return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp);
+ return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp, sizeof(resp));
}
static DECLARE_UVERBS_METHOD(
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 16ebc6372c31..93025d2009b8 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -887,7 +887,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
if (qp_init_attr->cap.max_rdma_ctxs)
rdma_rw_init_qp(device, qp_init_attr);
- qp = _ib_create_qp(device, pd, qp_init_attr, NULL);
+ qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
if (IS_ERR(qp))
return qp;
@@ -898,7 +898,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
}
qp->real_qp = qp;
- qp->uobject = NULL;
qp->qp_type = qp_init_attr->qp_type;
qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ca32057e886f..3eb7a8387116 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -120,7 +120,6 @@ struct bnxt_re_dev {
#define BNXT_RE_FLAG_HAVE_L2_REF 3
#define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4
#define BNXT_RE_FLAG_QOS_WORK_REG 5
-#define BNXT_RE_FLAG_TASK_IN_PROG 6
#define BNXT_RE_FLAG_ISSUE_ROCE_STATS 29
struct net_device *netdev;
unsigned int version, major, minor;
@@ -158,6 +157,7 @@ struct bnxt_re_dev {
atomic_t srq_count;
atomic_t mr_count;
atomic_t mw_count;
+ atomic_t sched_count;
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index ae9e9ff54826..643174d949a8 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -174,10 +174,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_pd = dev_attr->max_pd;
ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom;
ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom;
- if (dev_attr->is_atomic) {
- ib_attr->atomic_cap = IB_ATOMIC_HCA;
- ib_attr->masked_atomic_cap = IB_ATOMIC_HCA;
- }
+ ib_attr->atomic_cap = IB_ATOMIC_NONE;
+ ib_attr->masked_atomic_cap = IB_ATOMIC_NONE;
ib_attr->max_ee_rd_atom = 0;
ib_attr->max_res_rd_atom = 0;
@@ -787,20 +785,51 @@ int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
return 0;
}
+static unsigned long bnxt_re_lock_cqs(struct bnxt_re_qp *qp)
+ __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&qp->scq->cq_lock, flags);
+ if (qp->rcq != qp->scq)
+ spin_lock(&qp->rcq->cq_lock);
+ else
+ __acquire(&qp->rcq->cq_lock);
+
+ return flags;
+}
+
+static void bnxt_re_unlock_cqs(struct bnxt_re_qp *qp,
+ unsigned long flags)
+ __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock)
+{
+ if (qp->rcq != qp->scq)
+ spin_unlock(&qp->rcq->cq_lock);
+ else
+ __release(&qp->rcq->cq_lock);
+ spin_unlock_irqrestore(&qp->scq->cq_lock, flags);
+}
+
/* Queue Pairs */
int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
{
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev;
int rc;
+ unsigned int flags;
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
- bnxt_qplib_del_flush_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to destroy HW QP");
return rc;
}
+
+ flags = bnxt_re_lock_cqs(qp);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
+ bnxt_re_unlock_cqs(qp, flags);
+ bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
+
if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
rc = bnxt_qplib_destroy_ah(&rdev->qplib_res,
&rdev->sqp_ah->qplib_ah);
@@ -810,7 +839,7 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
return rc;
}
- bnxt_qplib_del_flush_qp(&qp->qplib_qp);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res,
&rdev->qp1_sqp->qplib_qp);
if (rc) {
@@ -1069,6 +1098,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
goto fail;
}
qp->qplib_qp.scq = &cq->qplib_cq;
+ qp->scq = cq;
}
if (qp_init_attr->recv_cq) {
@@ -1080,6 +1110,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
goto fail;
}
qp->qplib_qp.rcq = &cq->qplib_cq;
+ qp->rcq = cq;
}
if (qp_init_attr->srq) {
@@ -1185,7 +1216,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
rc = bnxt_qplib_create_qp(&rdev->qplib_res, &qp->qplib_qp);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to create HW QP");
- goto fail;
+ goto free_umem;
}
}
@@ -1213,6 +1244,13 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
return &qp->ib_qp;
qp_destroy:
bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
+free_umem:
+ if (udata) {
+ if (qp->rumem)
+ ib_umem_release(qp->rumem);
+ if (qp->sumem)
+ ib_umem_release(qp->sumem);
+ }
fail:
kfree(qp);
return ERR_PTR(rc);
@@ -1603,7 +1641,7 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
dev_dbg(rdev_to_dev(rdev),
"Move QP = %p out of flush list\n",
qp);
- bnxt_qplib_del_flush_qp(&qp->qplib_qp);
+ bnxt_qplib_clean_qp(&qp->qplib_qp);
}
}
if (qp_attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) {
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index 423ebe012f95..b88a48d43a9d 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -89,6 +89,8 @@ struct bnxt_re_qp {
/* QP1 */
u32 send_psn;
struct ib_ud_header qp1_hdr;
+ struct bnxt_re_cq *scq;
+ struct bnxt_re_cq *rcq;
};
struct bnxt_re_cq {
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 508d00a5a106..33a448036c2e 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -656,7 +656,6 @@ static void bnxt_re_dev_remove(struct bnxt_re_dev *rdev)
mutex_unlock(&bnxt_re_dev_lock);
synchronize_rcu();
- flush_workqueue(bnxt_re_wq);
ib_dealloc_device(&rdev->ibdev);
/* rdev is gone */
@@ -1441,7 +1440,7 @@ static void bnxt_re_task(struct work_struct *work)
break;
}
smp_mb__before_atomic();
- clear_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
+ atomic_dec(&rdev->sched_count);
kfree(re_work);
}
@@ -1503,7 +1502,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
/* netdev notifier will call NETDEV_UNREGISTER again later since
* we are still holding the reference to the netdev
*/
- if (test_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags))
+ if (atomic_read(&rdev->sched_count) > 0)
goto exit;
bnxt_re_ib_unreg(rdev, false);
bnxt_re_remove_one(rdev);
@@ -1523,7 +1522,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
re_work->vlan_dev = (real_dev == netdev ?
NULL : netdev);
INIT_WORK(&re_work->work, bnxt_re_task);
- set_bit(BNXT_RE_FLAG_TASK_IN_PROG, &rdev->flags);
+ atomic_inc(&rdev->sched_count);
queue_work(bnxt_re_wq, &re_work->work);
}
}
@@ -1578,6 +1577,11 @@ static void __exit bnxt_re_mod_exit(void)
*/
list_for_each_entry_safe_reverse(rdev, next, &to_be_deleted, list) {
dev_info(rdev_to_dev(rdev), "Unregistering Device");
+ /*
+ * Flush out any scheduled tasks before destroying the
+ * resources
+ */
+ flush_workqueue(bnxt_re_wq);
bnxt_re_dev_stop(rdev);
bnxt_re_ib_unreg(rdev, true);
bnxt_re_remove_one(rdev);
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index 1b0e94697fe3..3ea5b9624f6b 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -173,7 +173,7 @@ static void __bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
}
}
-void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp)
+void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp)
{
unsigned long flags;
@@ -1419,7 +1419,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_qp req;
struct creq_destroy_qp_resp resp;
- unsigned long flags;
u16 cmd_flags = 0;
int rc;
@@ -1437,19 +1436,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
return rc;
}
- /* Must walk the associated CQs to nullified the QP ptr */
- spin_lock_irqsave(&qp->scq->hwq.lock, flags);
-
- __clean_cq(qp->scq, (u64)(unsigned long)qp);
-
- if (qp->rcq && qp->rcq != qp->scq) {
- spin_lock(&qp->rcq->hwq.lock);
- __clean_cq(qp->rcq, (u64)(unsigned long)qp);
- spin_unlock(&qp->rcq->hwq.lock);
- }
-
- spin_unlock_irqrestore(&qp->scq->hwq.lock, flags);
+ return 0;
+}
+void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp)
+{
bnxt_qplib_free_qp_hdr_buf(res, qp);
bnxt_qplib_free_hwq(res->pdev, &qp->sq.hwq);
kfree(qp->sq.swq);
@@ -1462,7 +1454,6 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
if (qp->orrq.max_elements)
bnxt_qplib_free_hwq(res->pdev, &qp->orrq);
- return 0;
}
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
index 211b27a8f9e2..ca0a2ffa3509 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h
@@ -478,6 +478,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp);
+void bnxt_qplib_clean_qp(struct bnxt_qplib_qp *qp);
+void bnxt_qplib_free_qp_res(struct bnxt_qplib_res *res,
+ struct bnxt_qplib_qp *qp);
void *bnxt_qplib_get_qp1_sq_buf(struct bnxt_qplib_qp *qp,
struct bnxt_qplib_sge *sge);
void *bnxt_qplib_get_qp1_rq_buf(struct bnxt_qplib_qp *qp,
@@ -500,7 +503,6 @@ void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
void bnxt_qplib_add_flush_qp(struct bnxt_qplib_qp *qp);
-void bnxt_qplib_del_flush_qp(struct bnxt_qplib_qp *qp);
void bnxt_qplib_acquire_cq_locks(struct bnxt_qplib_qp *qp,
unsigned long *flags);
void bnxt_qplib_release_cq_locks(struct bnxt_qplib_qp *qp,
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index c015c1861351..03057983341f 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -52,18 +52,6 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0,
/* Device */
-static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw)
-{
- int rc;
- u16 pcie_ctl2;
-
- rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2,
- &pcie_ctl2);
- if (rc)
- return false;
- return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ);
-}
-
static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw,
char *fw_ver)
{
@@ -165,7 +153,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
}
- attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
+ attr->is_atomic = 0;
bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
index faa9478c14a6..f95b97646c25 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
@@ -114,6 +114,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_cq *cmd = &req.create_cq;
struct pvrdma_cmd_create_cq_resp *resp = &rsp.create_cq_resp;
+ struct pvrdma_create_cq_resp cq_resp = {0};
struct pvrdma_create_cq ucmd;
BUILD_BUG_ON(sizeof(struct pvrdma_cqe) != 64);
@@ -197,6 +198,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
cq->ibcq.cqe = resp->cqe;
cq->cq_handle = resp->cq_handle;
+ cq_resp.cqn = resp->cq_handle;
spin_lock_irqsave(&dev->cq_tbl_lock, flags);
dev->cq_tbl[cq->cq_handle % dev->dsr->caps.max_cq] = cq;
spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
@@ -205,7 +207,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
cq->uar = &(to_vucontext(context)->uar);
/* Copy udata back. */
- if (ib_copy_to_udata(udata, &cq->cq_handle, sizeof(__u32))) {
+ if (ib_copy_to_udata(udata, &cq_resp, sizeof(cq_resp))) {
dev_warn(&dev->pdev->dev,
"failed to copy back udata\n");
pvrdma_destroy_cq(&cq->ibcq);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
index 5acebb1ef631..af235967a9c2 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c
@@ -113,6 +113,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_srq *cmd = &req.create_srq;
struct pvrdma_cmd_create_srq_resp *resp = &rsp.create_srq_resp;
+ struct pvrdma_create_srq_resp srq_resp = {0};
struct pvrdma_create_srq ucmd;
unsigned long flags;
int ret;
@@ -204,12 +205,13 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
}
srq->srq_handle = resp->srqn;
+ srq_resp.srqn = resp->srqn;
spin_lock_irqsave(&dev->srq_tbl_lock, flags);
dev->srq_tbl[srq->srq_handle % dev->dsr->caps.max_srq] = srq;
spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
/* Copy udata back. */
- if (ib_copy_to_udata(udata, &srq->srq_handle, sizeof(__u32))) {
+ if (ib_copy_to_udata(udata, &srq_resp, sizeof(srq_resp))) {
dev_warn(&dev->pdev->dev, "failed to copy back udata\n");
pvrdma_destroy_srq(&srq->ibsrq);
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 16b96616ef7e..a51463cd2f37 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -447,6 +447,7 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_pd *cmd = &req.create_pd;
struct pvrdma_cmd_create_pd_resp *resp = &rsp.create_pd_resp;
+ struct pvrdma_alloc_pd_resp pd_resp = {0};
int ret;
void *ptr;
@@ -475,9 +476,10 @@ struct ib_pd *pvrdma_alloc_pd(struct ib_device *ibdev,
pd->privileged = !context;
pd->pd_handle = resp->pd_handle;
pd->pdn = resp->pd_handle;
+ pd_resp.pdn = resp->pd_handle;
if (context) {
- if (ib_copy_to_udata(udata, &pd->pdn, sizeof(__u32))) {
+ if (ib_copy_to_udata(udata, &pd_resp, sizeof(pd_resp))) {
dev_warn(&dev->pdev->dev,
"failed to copy back protection domain\n");
pvrdma_dealloc_pd(&pd->ibpd);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 11f74cbe6660..ea302b054601 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -281,8 +281,6 @@ void ipoib_delete_debug_files(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
- WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
- WARN_ONCE(!priv->path_dentry, "null path debug file\n");
debugfs_remove(priv->mcg_dentry);
debugfs_remove(priv->path_dentry);
priv->mcg_dentry = priv->path_dentry = NULL;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 35a408d0ae4f..99bc9bd64b9e 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -205,7 +205,7 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
* for example, an "address" value of 0x12345f000 will
* flush from 0x123440000 to 0x12347ffff (256KiB). */
unsigned long last = address + ((unsigned long)(pages - 1) << VTD_PAGE_SHIFT);
- unsigned long mask = __rounddown_pow_of_two(address ^ last);;
+ unsigned long mask = __rounddown_pow_of_two(address ^ last);
desc.high = QI_DEV_EIOTLB_ADDR((address & ~mask) | (mask - 1)) | QI_DEV_EIOTLB_SIZE;
} else {
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 55cfb986225b..faf734ff4cf3 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -339,9 +339,6 @@ int __init bcm7038_l1_of_init(struct device_node *dn,
goto out_unmap;
}
- pr_info("registered BCM7038 L1 intc (mem: 0x%p, IRQs: %d)\n",
- intc->cpus[0]->map_base, IRQS_PER_WORD * intc->n_words);
-
return 0;
out_unmap:
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index 983640eba418..8968e5e93fcb 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -318,9 +318,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
}
}
- pr_info("registered %s intc (mem: 0x%p, parent IRQ(s): %d)\n",
- intc_name, data->map_base[0], data->num_parent_irqs);
-
return 0;
out_free_domain:
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 691d20eb0bec..0e65f609352e 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -262,9 +262,6 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
ct->chip.irq_set_wake = irq_gc_set_wake;
}
- pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n",
- base, parent_irq);
-
return 0;
out_free_domain:
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 993a8426a453..1ff38aff9f29 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -94,7 +94,7 @@ static struct irq_chip gicv2m_msi_irq_chip = {
static struct msi_domain_info gicv2m_msi_domain_info = {
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
- MSI_FLAG_PCI_MSIX),
+ MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
.chip = &gicv2m_msi_irq_chip,
};
@@ -155,18 +155,12 @@ static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
return 0;
}
-static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq)
+static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
+ int nr_irqs)
{
- int pos;
-
- pos = hwirq - v2m->spi_start;
- if (pos < 0 || pos >= v2m->nr_spis) {
- pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq);
- return;
- }
-
spin_lock(&v2m_lock);
- __clear_bit(pos, v2m->bm);
+ bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
+ get_count_order(nr_irqs));
spin_unlock(&v2m_lock);
}
@@ -174,13 +168,13 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *args)
{
struct v2m_data *v2m = NULL, *tmp;
- int hwirq, offset, err = 0;
+ int hwirq, offset, i, err = 0;
spin_lock(&v2m_lock);
list_for_each_entry(tmp, &v2m_nodes, entry) {
- offset = find_first_zero_bit(tmp->bm, tmp->nr_spis);
- if (offset < tmp->nr_spis) {
- __set_bit(offset, tmp->bm);
+ offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
+ get_count_order(nr_irqs));
+ if (offset >= 0) {
v2m = tmp;
break;
}
@@ -192,16 +186,21 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
hwirq = v2m->spi_start + offset;
- err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq);
- if (err) {
- gicv2m_unalloc_msi(v2m, hwirq);
- return err;
- }
+ for (i = 0; i < nr_irqs; i++) {
+ err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
+ if (err)
+ goto fail;
- irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- &gicv2m_irq_chip, v2m);
+ irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
+ &gicv2m_irq_chip, v2m);
+ }
return 0;
+
+fail:
+ irq_domain_free_irqs_parent(domain, virq, nr_irqs);
+ gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs));
+ return err;
}
static void gicv2m_irq_domain_free(struct irq_domain *domain,
@@ -210,8 +209,7 @@ static void gicv2m_irq_domain_free(struct irq_domain *domain,
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
- BUG_ON(nr_irqs != 1);
- gicv2m_unalloc_msi(v2m, d->hwirq);
+ gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
diff --git a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 13a5d9a1de96..4eca5c763766 100644
--- a/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -71,6 +71,8 @@ static int __init its_fsl_mc_msi_init(void)
for (np = of_find_matching_node(NULL, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
+ if (!of_device_is_available(np))
+ continue;
if (!of_property_read_bool(np, "msi-controller"))
continue;
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 14a8c0a7e095..25a98de5cfb2 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -132,6 +132,8 @@ static int __init its_pci_of_msi_init(void)
for (np = of_find_matching_node(NULL, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
+ if (!of_device_is_available(np))
+ continue;
if (!of_property_read_bool(np, "msi-controller"))
continue;
diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index 833a90fe33ae..8881a053c173 100644
--- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -154,6 +154,8 @@ static void __init its_pmsi_of_init(void)
for (np = of_find_matching_node(NULL, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
+ if (!of_device_is_available(np))
+ continue;
if (!of_property_read_bool(np, "msi-controller"))
continue;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 06f025fd5726..1d3056f53747 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -3314,6 +3314,8 @@ static int __init its_of_probe(struct device_node *node)
for (np = of_find_matching_node(node, its_device_id); np;
np = of_find_matching_node(np, its_device_id)) {
+ if (!of_device_is_available(np))
+ continue;
if (!of_property_read_bool(np, "msi-controller")) {
pr_warn("%pOF: no msi-controller property, ITS ignored\n",
np);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index a57c0fbbd34a..d99cc07903ec 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -673,7 +673,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
MPIDR_TO_SGI_RS(cluster_id) |
tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
- pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
+ pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
gic_write_sgi1r(val);
}
@@ -688,7 +688,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
* Ensure that stores to Normal memory are visible to the
* other CPUs before issuing the IPI.
*/
- smp_wmb();
+ wmb();
for_each_cpu(cpu, mask) {
u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index ef92a4d2038e..d32268cc1174 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -424,8 +424,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
spin_lock_irqsave(&gic_lock, flags);
write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
- gic_clear_pcpu_masks(intr);
- set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
irq_data_update_effective_affinity(data, cpumask_of(cpu));
spin_unlock_irqrestore(&gic_lock, flags);
diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
index 62f541f968f6..07074820a167 100644
--- a/drivers/macintosh/macio_asic.c
+++ b/drivers/macintosh/macio_asic.c
@@ -375,6 +375,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
dev->ofdev.dev.of_node = np;
dev->ofdev.archdata.dma_mask = 0xffffffffUL;
dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask;
+ dev->ofdev.dev.coherent_dma_mask = dev->ofdev.archdata.dma_mask;
dev->ofdev.dev.parent = parent;
dev->ofdev.dev.bus = &macio_bus_type;
dev->ofdev.dev.release = macio_release_dev;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index d6de00f367ef..68136806d365 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -903,7 +903,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
- bio->bi_status = io_error;
+ if (io_error)
+ bio->bi_status = io_error;
bio_endio(bio);
}
}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index b2eae332e1a2..f978eddc7a21 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1108,7 +1108,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
bio_copy_data(behind_bio, bio);
skip_copy:
- r1_bio->behind_master_bio = behind_bio;;
+ r1_bio->behind_master_bio = behind_bio;
set_bit(R1BIO_BehindIO, &r1_bio->state);
return;
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 8d12017b9893..4470630dd545 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2687,6 +2687,8 @@ mptctl_hp_targetinfo(unsigned long arg)
__FILE__, __LINE__, iocnum);
return -ENODEV;
}
+ if (karg.hdr.id >= MPT_MAX_FC_DEVICES)
+ return -EINVAL;
dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_hp_targetinfo called.\n",
ioc->name));
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 3e5eabdae8d9..772d02922529 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -548,12 +548,6 @@ int mei_cldev_disable(struct mei_cl_device *cldev)
goto out;
}
- if (bus->dev_state == MEI_DEV_POWER_DOWN) {
- dev_dbg(bus->dev, "Device is powering down, don't bother with disconnection\n");
- err = 0;
- goto out;
- }
-
err = mei_cl_disconnect(cl);
if (err < 0)
dev_err(bus->dev, "Could not disconnect from the ME client\n");
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index be64969d986a..7e60c1817c31 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -945,6 +945,12 @@ int mei_cl_disconnect(struct mei_cl *cl)
return 0;
}
+ if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
+ mei_cl_set_disconnected(cl);
+ return 0;
+ }
+
rets = pm_runtime_get(dev->dev);
if (rets < 0 && rets != -EINPROGRESS) {
pm_runtime_put_noidle(dev->dev);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 0ccccbaf530d..e4b10b2d1a08 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -132,6 +132,11 @@
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
+#define MEI_DEV_ID_CNP_LP 0x9DE0 /* Cannon Point LP */
+#define MEI_DEV_ID_CNP_LP_4 0x9DE4 /* Cannon Point LP 4 (iTouch) */
+#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
+#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 4a0ccda4d04b..ea4e152270a3 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -98,6 +98,11 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_LP_4, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c
index d9aa407db06a..337462e1569f 100644
--- a/drivers/misc/ocxl/file.c
+++ b/drivers/misc/ocxl/file.c
@@ -133,8 +133,10 @@ static long afu_ioctl(struct file *file, unsigned int cmd,
if (!rc) {
rc = copy_to_user((u64 __user *) args, &irq_offset,
sizeof(irq_offset));
- if (rc)
+ if (rc) {
ocxl_afu_irq_free(ctx, irq_offset);
+ return -EFAULT;
+ }
}
break;
@@ -277,7 +279,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
struct ocxl_context *ctx = file->private_data;
struct ocxl_kernel_event_header header;
ssize_t rc;
- size_t used = 0;
+ ssize_t used = 0;
DEFINE_WAIT(event_wait);
memset(&header, 0, sizeof(header));
@@ -329,7 +331,7 @@ static ssize_t afu_read(struct file *file, char __user *buf, size_t count,
used += sizeof(header);
- rc = (ssize_t) used;
+ rc = used;
return rc;
}
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 229dc18f0581..768972af8b85 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host)
char pio_limit_string[20];
int ret;
- mmc->f_max = host->max_clk;
+ if (!mmc->f_max || mmc->f_max > host->max_clk)
+ mmc->f_max = host->max_clk;
mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;
mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index 22438ebfe4e6..4f972b879fe6 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -717,22 +717,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct meson_host *host = mmc_priv(mmc);
- int ret;
-
- /*
- * If this is the initial tuning, try to get a sane Rx starting
- * phase before doing the actual tuning.
- */
- if (!mmc->doing_retune) {
- ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
-
- if (ret)
- return ret;
- }
-
- ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
- if (ret)
- return ret;
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
}
@@ -763,9 +747,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
if (!IS_ERR(mmc->supply.vmmc))
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
- /* Reset phases */
+ /* Reset rx phase */
clk_set_phase(host->rx_clk, 0);
- clk_set_phase(host->tx_clk, 270);
break;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index e6b8c59f2c0d..736ac887303c 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -328,7 +328,7 @@ config MTD_NAND_MARVELL
tristate "NAND controller support on Marvell boards"
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
COMPILE_TEST
- depends on HAS_IOMEM
+ depends on HAS_IOMEM && HAS_DMA
help
This enables the NAND flash controller driver for Marvell boards,
including:
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
index 80d31a58e558..f367144f3c6f 100644
--- a/drivers/mtd/nand/vf610_nfc.c
+++ b/drivers/mtd/nand/vf610_nfc.c
@@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
if (mtd->oobsize > 64)
mtd->oobsize = 64;
- /*
- * mtd->ecclayout is not specified here because we're using the
- * default large page ECC layout defined in NAND core.
- */
+ /* Use default large page ECC layout defined in NAND core */
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
if (chip->ecc.strength == 32) {
nfc->ecc_mode = ECC_60_BYTE;
chip->ecc.bytes = 60;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index 3e5833cf1fab..eb23f9ba1a9a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev)
struct net_device *netdev = pdata->netdev;
int ret = 0;
+ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
+
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 22889fc158f2..87c4308b52a7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -226,6 +226,10 @@ static int aq_pci_probe(struct pci_dev *pdev,
goto err_ioremap;
self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
+ if (!self->aq_hw) {
+ err = -ENOMEM;
+ goto err_ioremap;
+ }
self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
for (bar = 0; bar < 4; ++bar) {
@@ -235,19 +239,19 @@ static int aq_pci_probe(struct pci_dev *pdev,
mmio_pa = pci_resource_start(pdev, bar);
if (mmio_pa == 0U) {
err = -EIO;
- goto err_ioremap;
+ goto err_free_aq_hw;
}
reg_sz = pci_resource_len(pdev, bar);
if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
err = -EIO;
- goto err_ioremap;
+ goto err_free_aq_hw;
}
self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
if (!self->aq_hw->mmio) {
err = -EIO;
- goto err_ioremap;
+ goto err_free_aq_hw;
}
break;
}
@@ -255,7 +259,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
if (bar == 4) {
err = -EIO;
- goto err_ioremap;
+ goto err_free_aq_hw;
}
numvecs = min((u8)AQ_CFG_VECS_DEF,
@@ -290,6 +294,8 @@ err_register:
aq_pci_free_irq_vectors(self);
err_hwinit:
iounmap(self->aq_hw->mmio);
+err_free_aq_hw:
+ kfree(self->aq_hw);
err_ioremap:
free_netdev(ndev);
err_pci_func:
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a77ee2f8fb8d..c1841db1b500 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
- udelay(10);
+ usleep_range(10, 20);
timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
}
@@ -922,8 +922,8 @@ static int tg3_ape_send_event(struct tg3 *tp, u32 event)
if (!(apedata & APE_FW_STATUS_READY))
return -EAGAIN;
- /* Wait for up to 1 millisecond for APE to service previous event. */
- err = tg3_ape_event_lock(tp, 1000);
+ /* Wait for up to 20 millisecond for APE to service previous event. */
+ err = tg3_ape_event_lock(tp, 20000);
if (err)
return err;
@@ -946,6 +946,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
switch (kind) {
case RESET_KIND_INIT:
+ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
APE_HOST_SEG_SIG_MAGIC);
tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
@@ -962,13 +963,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
event = APE_EVENT_STATUS_STATE_START;
break;
case RESET_KIND_SHUTDOWN:
- /* With the interface we are currently using,
- * APE does not track driver state. Wiping
- * out the HOST SEGMENT SIGNATURE forces
- * the APE to assume OS absent status.
- */
- tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
-
if (device_may_wakeup(&tp->pdev->dev) &&
tg3_flag(tp, WOL_ENABLE)) {
tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
@@ -990,6 +984,18 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
tg3_ape_send_event(tp, event);
}
+static void tg3_send_ape_heartbeat(struct tg3 *tp,
+ unsigned long interval)
+{
+ /* Check if hb interval has exceeded */
+ if (!tg3_flag(tp, ENABLE_APE) ||
+ time_before(jiffies, tp->ape_hb_jiffies + interval))
+ return;
+
+ tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
+ tp->ape_hb_jiffies = jiffies;
+}
+
static void tg3_disable_ints(struct tg3 *tp)
{
int i;
@@ -7262,6 +7268,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
}
}
+ tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
return work_done;
tx_recovery:
@@ -7344,6 +7351,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
}
}
+ tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
return work_done;
tx_recovery:
@@ -10732,7 +10740,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_flag(tp, ENABLE_APE))
/* Write our heartbeat update interval to APE. */
tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
- APE_HOST_HEARTBEAT_INT_DISABLE);
+ APE_HOST_HEARTBEAT_INT_5SEC);
tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
@@ -11077,6 +11085,9 @@ static void tg3_timer(struct timer_list *t)
tp->asf_counter = tp->asf_multiplier;
}
+ /* Update the APE heartbeat every 5 seconds.*/
+ tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
+
spin_unlock(&tp->lock);
restart_timer:
@@ -16653,6 +16664,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
pci_state_reg);
tg3_ape_lock_init(tp);
+ tp->ape_hb_interval =
+ msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
}
/* Set up tp->grc_local_ctrl before calling
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 47f51cc0566d..1d61aa3efda1 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2508,6 +2508,7 @@
#define TG3_APE_LOCK_PHY3 5
#define TG3_APE_LOCK_GPIO 7
+#define TG3_APE_HB_INTERVAL (tp->ape_hb_interval)
#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
@@ -3423,6 +3424,10 @@ struct tg3 {
struct device *hwmon_dev;
bool link_up;
bool pcierr_recovery;
+
+ u32 ape_hb;
+ unsigned long ape_hb_interval;
+ unsigned long ape_hb_jiffies;
};
/* Accessor macros for chip and asic attributes
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index c87c9c684a33..d59497a7bdce 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -75,6 +75,8 @@ EXPORT_SYMBOL(cavium_ptp_get);
void cavium_ptp_put(struct cavium_ptp *ptp)
{
+ if (!ptp)
+ return;
pci_dev_put(ptp->pdev);
}
EXPORT_SYMBOL(cavium_ptp_put);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index b68cde9f17d2..7d9c5ffbd041 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -67,11 +67,6 @@ module_param(cpi_alg, int, S_IRUGO);
MODULE_PARM_DESC(cpi_alg,
"PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
-struct nicvf_xdp_tx {
- u64 dma_addr;
- u8 qidx;
-};
-
static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
{
if (nic->sqs_mode)
@@ -507,29 +502,14 @@ static int nicvf_init_resources(struct nicvf *nic)
return 0;
}
-static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
-{
- /* Check if it's a recycled page, if not unmap the DMA mapping.
- * Recycled page holds an extra reference.
- */
- if (page_ref_count(page) == 1) {
- dma_addr &= PAGE_MASK;
- dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
- RCV_FRAG_LEN + XDP_HEADROOM,
- DMA_FROM_DEVICE,
- DMA_ATTR_SKIP_CPU_SYNC);
- }
-}
-
static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
struct rcv_queue *rq, struct sk_buff **skb)
{
struct xdp_buff xdp;
struct page *page;
- struct nicvf_xdp_tx *xdp_tx = NULL;
u32 action;
- u16 len, err, offset = 0;
+ u16 len, offset = 0;
u64 dma_addr, cpu_addr;
void *orig_data;
@@ -543,7 +523,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
cpu_addr = (u64)phys_to_virt(cpu_addr);
page = virt_to_page((void *)cpu_addr);
- xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM;
+ xdp.data_hard_start = page_address(page);
xdp.data = (void *)cpu_addr;
xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + len;
@@ -563,7 +543,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
switch (action) {
case XDP_PASS:
- nicvf_unmap_page(nic, page, dma_addr);
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
/* Build SKB and pass on packet to network stack */
*skb = build_skb(xdp.data,
@@ -576,20 +567,6 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
case XDP_TX:
nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
return true;
- case XDP_REDIRECT:
- /* Save DMA address for use while transmitting */
- xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
- xdp_tx->dma_addr = dma_addr;
- xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
-
- err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog);
- if (!err)
- return true;
-
- /* Free the page on error */
- nicvf_unmap_page(nic, page, dma_addr);
- put_page(page);
- break;
default:
bpf_warn_invalid_xdp_action(action);
/* fall through */
@@ -597,7 +574,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
trace_xdp_exception(nic->netdev, prog, action);
/* fall through */
case XDP_DROP:
- nicvf_unmap_page(nic, page, dma_addr);
+ /* Check if it's a recycled page, if not
+ * unmap the DMA mapping.
+ *
+ * Recycled page holds an extra reference.
+ */
+ if (page_ref_count(page) == 1) {
+ dma_addr &= PAGE_MASK;
+ dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
+ RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
+ DMA_FROM_DEVICE,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
put_page(page);
return true;
}
@@ -1864,50 +1852,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
}
}
-static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp)
-{
- struct nicvf *nic = netdev_priv(netdev);
- struct nicvf *snic = nic;
- struct nicvf_xdp_tx *xdp_tx;
- struct snd_queue *sq;
- struct page *page;
- int err, qidx;
-
- if (!netif_running(netdev) || !nic->xdp_prog)
- return -EINVAL;
-
- page = virt_to_page(xdp->data);
- xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
- qidx = xdp_tx->qidx;
-
- if (xdp_tx->qidx >= nic->xdp_tx_queues)
- return -EINVAL;
-
- /* Get secondary Qset's info */
- if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) {
- qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS;
- snic = (struct nicvf *)nic->snicvf[qidx - 1];
- if (!snic)
- return -EINVAL;
- qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS;
- }
-
- sq = &snic->qs->sq[qidx];
- err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data,
- xdp_tx->dma_addr,
- xdp->data_end - xdp->data);
- if (err)
- return -ENOMEM;
-
- nicvf_xdp_sq_doorbell(snic, sq, qidx);
- return 0;
-}
-
-static void nicvf_xdp_flush(struct net_device *dev)
-{
- return;
-}
-
static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config config;
@@ -1986,8 +1930,6 @@ static const struct net_device_ops nicvf_netdev_ops = {
.ndo_fix_features = nicvf_fix_features,
.ndo_set_features = nicvf_set_features,
.ndo_bpf = nicvf_xdp,
- .ndo_xdp_xmit = nicvf_xdp_xmit,
- .ndo_xdp_flush = nicvf_xdp_flush,
.ndo_do_ioctl = nicvf_ioctl,
};
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 3eae9ff9b53a..d42704d07484 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
/* Reserve space for header modifications by BPF program */
if (rbdr->is_xdp)
- buf_len += XDP_HEADROOM;
+ buf_len += XDP_PACKET_HEADROOM;
/* Check if it's recycled */
if (pgcache)
@@ -224,9 +224,8 @@ ret:
nic->rb_page = NULL;
return -ENOMEM;
}
-
if (pgcache)
- pgcache->dma_addr = *rbuf + XDP_HEADROOM;
+ pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
nic->rb_page_offset += buf_len;
}
@@ -1244,7 +1243,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
int qentry;
if (subdesc_cnt > sq->xdp_free_cnt)
- return -1;
+ return 0;
qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
@@ -1255,7 +1254,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
sq->xdp_desc_cnt += subdesc_cnt;
- return 0;
+ return 1;
}
/* Calculate no of SQ subdescriptors needed to transmit all
@@ -1656,7 +1655,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
if (page_ref_count(page) != 1)
return;
- len += XDP_HEADROOM;
+ len += XDP_PACKET_HEADROOM;
/* Receive buffers in XDP mode are mapped from page start */
dma_addr &= PAGE_MASK;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index ce1eed7a6d63..5e9a03cf1b4d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -11,7 +11,6 @@
#include <linux/netdevice.h>
#include <linux/iommu.h>
-#include <linux/bpf.h>
#include <net/xdp.h>
#include "q_struct.h"
@@ -94,9 +93,6 @@
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
-#define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */
-#define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM)
-
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
MAX_CQE_PER_PKT_XMIT)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 557fd8bfd54e..00a1d2d13169 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -472,7 +472,7 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
if (is_t6(padap->params.chip)) {
size = padap->params.cim_la_size / 10 + 1;
- size *= 11 * sizeof(u32);
+ size *= 10 * sizeof(u32);
} else {
size = padap->params.cim_la_size / 8;
size *= 8 * sizeof(u32);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 30485f9a598f..143686c60234 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -102,7 +102,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_CIM_LA:
if (is_t6(adap->params.chip)) {
len = adap->params.cim_la_size / 10 + 1;
- len *= 11 * sizeof(u32);
+ len *= 10 * sizeof(u32);
} else {
len = adap->params.cim_la_size / 8;
len *= 8 * sizeof(u32);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 56bc626ef006..7b452e85de2a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4982,9 +4982,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
pcie_fw = readl(adap->regs + PCIE_FW_A);
/* Check if cxgb4 is the MASTER and fw is initialized */
- if (!(pcie_fw & PCIE_FW_INIT_F) ||
+ if (num_vfs &&
+ (!(pcie_fw & PCIE_FW_INIT_F) ||
!(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
- PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) {
+ PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) {
dev_warn(&pdev->dev,
"cxgb4 driver needs to be MASTER to support SRIOV\n");
return -EOPNOTSUPP;
@@ -5599,24 +5600,24 @@ static void remove_one(struct pci_dev *pdev)
#if IS_ENABLED(CONFIG_IPV6)
t4_cleanup_clip_tbl(adapter);
#endif
- iounmap(adapter->regs);
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
- pci_disable_pcie_error_reporting(pdev);
- if ((adapter->flags & DEV_ENABLED)) {
- pci_disable_device(pdev);
- adapter->flags &= ~DEV_ENABLED;
- }
- pci_release_regions(pdev);
- kfree(adapter->mbox_log);
- synchronize_rcu();
- kfree(adapter);
}
#ifdef CONFIG_PCI_IOV
else {
cxgb4_iov_configure(adapter->pdev, 0);
}
#endif
+ iounmap(adapter->regs);
+ pci_disable_pcie_error_reporting(pdev);
+ if ((adapter->flags & DEV_ENABLED)) {
+ pci_disable_device(pdev);
+ adapter->flags &= ~DEV_ENABLED;
+ }
+ pci_release_regions(pdev);
+ kfree(adapter->mbox_log);
+ synchronize_rcu();
+ kfree(adapter);
}
/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 047609ef0515..920bccd6bc40 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2637,7 +2637,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
}
#define EEPROM_STAT_ADDR 0x7bfc
-#define VPD_SIZE 0x800
#define VPD_BASE 0x400
#define VPD_BASE_OLD 0
#define VPD_LEN 1024
@@ -2704,15 +2703,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
if (!vpd)
return -ENOMEM;
- /* We have two VPD data structures stored in the adapter VPD area.
- * By default, Linux calculates the size of the VPD area by traversing
- * the first VPD area at offset 0x0, so we need to tell the OS what
- * our real VPD size is.
- */
- ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
- if (ret < 0)
- goto out;
-
/* Card information normally starts at VPD_BASE but early cards had
* it at 0.
*/
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3bdeb295514b..f5c87bd35fa1 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2934,29 +2934,17 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
{
int size = lstatus & BD_LENGTH_MASK;
struct page *page = rxb->page;
- bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
-
- /* Remove the FCS from the packet length */
- if (last)
- size -= ETH_FCS_LEN;
if (likely(first)) {
skb_put(skb, size);
} else {
/* the last fragments' length contains the full frame length */
- if (last)
+ if (lstatus & BD_LFLAG(RXBD_LAST))
size -= skb->len;
- /* Add the last fragment if it contains something other than
- * the FCS, otherwise drop it and trim off any part of the FCS
- * that was already received.
- */
- if (size > 0)
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rxb->page_offset + RXBUF_ALIGNMENT,
- size, GFAR_RXB_TRUESIZE);
- else if (size < 0)
- pskb_trim(skb, skb->len + size);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset + RXBUF_ALIGNMENT,
+ size, GFAR_RXB_TRUESIZE);
}
/* try reuse page */
@@ -3069,6 +3057,9 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
if (priv->padding)
skb_pull(skb, priv->padding);
+ /* Trim off the FCS */
+ pskb_trim(skb, skb->len - ETH_FCS_LEN);
+
if (ndev->features & NETIF_F_RXCSUM)
gfar_rx_checksum(skb, fcb);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 27447260215d..1b3cc8bb0705 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -791,6 +791,18 @@ static int ibmvnic_login(struct net_device *netdev)
return 0;
}
+static void release_login_buffer(struct ibmvnic_adapter *adapter)
+{
+ kfree(adapter->login_buf);
+ adapter->login_buf = NULL;
+}
+
+static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
+{
+ kfree(adapter->login_rsp_buf);
+ adapter->login_rsp_buf = NULL;
+}
+
static void release_resources(struct ibmvnic_adapter *adapter)
{
int i;
@@ -813,6 +825,10 @@ static void release_resources(struct ibmvnic_adapter *adapter)
}
}
}
+ kfree(adapter->napi);
+ adapter->napi = NULL;
+
+ release_login_rsp_buffer(adapter);
}
static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
@@ -1057,6 +1073,35 @@ static int ibmvnic_open(struct net_device *netdev)
return rc;
}
+static void clean_rx_pools(struct ibmvnic_adapter *adapter)
+{
+ struct ibmvnic_rx_pool *rx_pool;
+ u64 rx_entries;
+ int rx_scrqs;
+ int i, j;
+
+ if (!adapter->rx_pool)
+ return;
+
+ rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
+ rx_entries = adapter->req_rx_add_entries_per_subcrq;
+
+ /* Free any remaining skbs in the rx buffer pools */
+ for (i = 0; i < rx_scrqs; i++) {
+ rx_pool = &adapter->rx_pool[i];
+ if (!rx_pool)
+ continue;
+
+ netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
+ for (j = 0; j < rx_entries; j++) {
+ if (rx_pool->rx_buff[j].skb) {
+ dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
+ rx_pool->rx_buff[j].skb = NULL;
+ }
+ }
+ }
+}
+
static void clean_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
@@ -1134,7 +1179,7 @@ static int __ibmvnic_close(struct net_device *netdev)
}
}
}
-
+ clean_rx_pools(adapter);
clean_tx_pools(adapter);
adapter->state = VNIC_CLOSED;
return rc;
@@ -1670,8 +1715,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
return 0;
}
- netif_carrier_on(netdev);
-
/* kick napi */
for (i = 0; i < adapter->req_rx_queues; i++)
napi_schedule(&adapter->napi[i]);
@@ -1679,6 +1722,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (adapter->reset_reason != VNIC_RESET_FAILOVER)
netdev_notify_peers(netdev);
+ netif_carrier_on(netdev);
+
return 0;
}
@@ -1853,6 +1898,12 @@ restart_poll:
be16_to_cpu(next->rx_comp.rc));
/* free the entry */
next->rx_comp.first = 0;
+ dev_kfree_skb_any(rx_buff->skb);
+ remove_buff_from_pool(adapter, rx_buff);
+ continue;
+ } else if (!rx_buff->skb) {
+ /* free the entry */
+ next->rx_comp.first = 0;
remove_buff_from_pool(adapter, rx_buff);
continue;
}
@@ -3013,6 +3064,7 @@ static void send_login(struct ibmvnic_adapter *adapter)
struct vnic_login_client_data *vlcd;
int i;
+ release_login_rsp_buffer(adapter);
client_data_len = vnic_client_data_len(adapter);
buffer_size =
@@ -3738,6 +3790,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
ibmvnic_remove(adapter->vdev);
return -EIO;
}
+ release_login_buffer(adapter);
complete(&adapter->init_done);
return 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index a1d7b88cf083..5a1668cdb461 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -7137,6 +7137,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
int id = port->id;
bool allmulti = dev->flags & IFF_ALLMULTI;
+retry:
mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
@@ -7144,9 +7145,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
/* Remove all port->id's mcast enries */
mvpp2_prs_mcast_del_all(priv, id);
- if (allmulti && !netdev_mc_empty(dev)) {
- netdev_for_each_mc_addr(ha, dev)
- mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
+ if (!allmulti) {
+ netdev_for_each_mc_addr(ha, dev) {
+ if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
+ allmulti = true;
+ goto retry;
+ }
+ }
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 0be4575b58a2..fd509160c8f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -96,10 +96,10 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p,
"%pI4");
} else if (ethertype.v == ETH_P_IPV6) {
static const struct in6_addr full_ones = {
- .in6_u.u6_addr32 = {htonl(0xffffffff),
- htonl(0xffffffff),
- htonl(0xffffffff),
- htonl(0xffffffff)},
+ .in6_u.u6_addr32 = {__constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff),
+ __constant_htonl(0xffffffff)},
};
DECLARE_MASK_VAL(struct in6_addr, src_ipv6);
DECLARE_MASK_VAL(struct in6_addr, dst_ipv6);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 47bab842c5ee..da94c8cba5ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1768,13 +1768,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
param->wq.linear = 1;
}
-static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
+static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq_param *param)
{
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
}
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
@@ -2634,6 +2637,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
+ param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
+ param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
+
return mlx5e_alloc_cq_common(mdev, param, cq);
}
@@ -2645,7 +2651,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq = &drop_rq->cq;
int err;
- mlx5e_build_drop_rq_param(&rq_param);
+ mlx5e_build_drop_rq_param(mdev, &rq_param);
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
if (err)
@@ -2994,8 +3000,8 @@ static int mlx5e_setup_tc_block(struct net_device *dev,
}
#endif
-int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
+static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
{
switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 0d4bb0688faa..e5c3ab46a24a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -36,6 +36,7 @@
#include <linux/tcp.h>
#include <linux/bpf_trace.h>
#include <net/busy_poll.h>
+#include <net/ip6_checksum.h>
#include "en.h"
#include "en_tc.h"
#include "eswitch.h"
@@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
return true;
}
+static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
+{
+ u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+ u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
+ (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
+
+ tcp->check = 0;
+ tcp->psh = get_cqe_lro_tcppsh(cqe);
+
+ if (tcp_ack) {
+ tcp->ack = 1;
+ tcp->ack_seq = cqe->lro_ack_seq_num;
+ tcp->window = cqe->lro_tcp_win;
+ }
+}
+
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
u32 cqe_bcnt)
{
struct ethhdr *eth = (struct ethhdr *)(skb->data);
struct tcphdr *tcp;
int network_depth = 0;
+ __wsum check;
__be16 proto;
u16 tot_len;
void *ip_p;
- u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
- u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
- (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
-
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
tot_len = cqe_bcnt - network_depth;
@@ -576,23 +590,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
ipv4->check = 0;
ipv4->check = ip_fast_csum((unsigned char *)ipv4,
ipv4->ihl);
+
+ mlx5e_lro_update_tcp_hdr(cqe, tcp);
+ check = csum_partial(tcp, tcp->doff * 4,
+ csum_unfold((__force __sum16)cqe->check_sum));
+ /* Almost done, don't forget the pseudo header */
+ tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
+ tot_len - sizeof(struct iphdr),
+ IPPROTO_TCP, check);
} else {
+ u16 payload_len = tot_len - sizeof(struct ipv6hdr);
struct ipv6hdr *ipv6 = ip_p;
tcp = ip_p + sizeof(struct ipv6hdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
ipv6->hop_limit = cqe->lro_min_ttl;
- ipv6->payload_len = cpu_to_be16(tot_len -
- sizeof(struct ipv6hdr));
- }
-
- tcp->psh = get_cqe_lro_tcppsh(cqe);
-
- if (tcp_ack) {
- tcp->ack = 1;
- tcp->ack_seq = cqe->lro_ack_seq_num;
- tcp->window = cqe->lro_tcp_win;
+ ipv6->payload_len = cpu_to_be16(payload_len);
+
+ mlx5e_lro_update_tcp_hdr(cqe, tcp);
+ check = csum_partial(tcp, tcp->doff * 4,
+ csum_unfold((__force __sum16)cqe->check_sum));
+ /* Almost done, don't forget the pseudo header */
+ tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
+ IPPROTO_TCP, check);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index 5a4608281f38..707976482c09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
if (iph->protocol != IPPROTO_UDP)
goto out;
- udph = udp_hdr(skb);
+ /* Don't assume skb_transport_header() was set */
+ udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
if (udph->dest != htons(9))
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index fd98b0dc610f..fa86a1466718 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2529,7 +2529,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
- if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
+ if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
+ tcf_vlan_push_prio(a))
return -EOPNOTSUPP;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 569b42a01026..11b4f1089d1c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
default:
hlen = mlx5e_skb_l2_header_offset(skb);
}
- return min_t(u16, hlen, skb->len);
+ return min_t(u16, hlen, skb_headlen(skb));
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 5ecf2cddc16d..c2b1d7d351fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1529,6 +1529,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
+ /* Create steering drop counters for ingress and egress ACLs */
+ if (vport_num && esw->mode == SRIOV_LEGACY)
+ esw_vport_create_drop_counters(vport);
+
/* Restore old vport configuration */
esw_apply_vport_conf(esw, vport);
@@ -1545,10 +1549,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
if (!vport_num)
vport->info.trusted = true;
- /* create steering drop counters for ingress and egress ACLs */
- if (vport_num && esw->mode == SRIOV_LEGACY)
- esw_vport_create_drop_counters(vport);
-
esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index c025c98700e4..31fc2cfac3b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1429,7 +1429,8 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_ENCAP |
- MLX5_FLOW_CONTEXT_ACTION_DECAP))
+ MLX5_FLOW_CONTEXT_ACTION_DECAP |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR))
return true;
return false;
@@ -1758,8 +1759,11 @@ search_again_locked:
/* Collect all fgs which has a matching match_criteria */
err = build_match_list(&match_head, ft, spec);
- if (err)
+ if (err) {
+ if (take_write)
+ up_write_ref_node(&ft->node);
return ERR_PTR(err);
+ }
if (!take_write)
up_read_ref_node(&ft->node);
@@ -1768,8 +1772,11 @@ search_again_locked:
dest_num, version);
free_match_list(&match_head);
if (!IS_ERR(rule) ||
- (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN))
+ (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
+ if (take_write)
+ up_write_ref_node(&ft->node);
return rule;
+ }
if (!take_write) {
nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index e159243e0fcf..857035583ccd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -34,6 +34,7 @@
#include <linux/highmem.h>
#include <rdma/mlx5-abi.h>
#include "en.h"
+#include "clock.h"
enum {
MLX5_CYCLES_SHIFT = 23
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 2ef641c91c26..ae391e4b7070 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -551,7 +551,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
MLX5_SET(cmd_hca_cap,
set_hca_cap,
cache_line_128byte,
- cache_line_size() == 128 ? 1 : 0);
+ cache_line_size() >= 128 ? 1 : 0);
if (MLX5_CAP_GEN_MAX(dev, dct))
MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index f0b25baba09a..f7948e983637 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -788,6 +788,9 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
u32 tb_id,
struct netlink_ext_ack *extack)
{
+ struct mlxsw_sp_mr_table *mr4_table;
+ struct mlxsw_sp_fib *fib4;
+ struct mlxsw_sp_fib *fib6;
struct mlxsw_sp_vr *vr;
int err;
@@ -796,29 +799,30 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
return ERR_PTR(-EBUSY);
}
- vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
- if (IS_ERR(vr->fib4))
- return ERR_CAST(vr->fib4);
- vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
- if (IS_ERR(vr->fib6)) {
- err = PTR_ERR(vr->fib6);
+ fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(fib4))
+ return ERR_CAST(fib4);
+ fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
+ if (IS_ERR(fib6)) {
+ err = PTR_ERR(fib6);
goto err_fib6_create;
}
- vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
- MLXSW_SP_L3_PROTO_IPV4);
- if (IS_ERR(vr->mr4_table)) {
- err = PTR_ERR(vr->mr4_table);
+ mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(mr4_table)) {
+ err = PTR_ERR(mr4_table);
goto err_mr_table_create;
}
+ vr->fib4 = fib4;
+ vr->fib6 = fib6;
+ vr->mr4_table = mr4_table;
vr->tb_id = tb_id;
return vr;
err_mr_table_create:
- mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6);
- vr->fib6 = NULL;
+ mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
err_fib6_create:
- mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4);
- vr->fib4 = NULL;
+ mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
return ERR_PTR(err);
}
@@ -3790,6 +3794,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
int i;
+ if (!list_is_singular(&nh_grp->fib_list))
+ return;
+
for (i = 0; i < nh_grp->count; i++) {
struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 7e7704daf5f1..c4949183eef3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -43,12 +43,6 @@
/* Local Definitions and Declarations */
-struct rmnet_walk_data {
- struct net_device *real_dev;
- struct list_head *head;
- struct rmnet_port *port;
-};
-
static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
{
return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
@@ -112,17 +106,14 @@ static int rmnet_register_real_device(struct net_device *real_dev)
static void rmnet_unregister_bridge(struct net_device *dev,
struct rmnet_port *port)
{
- struct net_device *rmnet_dev, *bridge_dev;
struct rmnet_port *bridge_port;
+ struct net_device *bridge_dev;
if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
return;
/* bridge slave handling */
if (!port->nr_rmnet_devs) {
- rmnet_dev = netdev_master_upper_dev_get_rcu(dev);
- netdev_upper_dev_unlink(dev, rmnet_dev);
-
bridge_dev = port->bridge_ep;
bridge_port = rmnet_get_port_rtnl(bridge_dev);
@@ -132,9 +123,6 @@ static void rmnet_unregister_bridge(struct net_device *dev,
bridge_dev = port->bridge_ep;
bridge_port = rmnet_get_port_rtnl(bridge_dev);
- rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev);
- netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
-
rmnet_unregister_real_device(bridge_dev, bridge_port);
}
}
@@ -173,10 +161,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
if (err)
goto err1;
- err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack);
- if (err)
- goto err2;
-
port->rmnet_mode = mode;
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
@@ -193,8 +177,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
return 0;
-err2:
- rmnet_vnd_dellink(mux_id, port, ep);
err1:
rmnet_unregister_real_device(real_dev, port);
err0:
@@ -204,14 +186,13 @@ err0:
static void rmnet_dellink(struct net_device *dev, struct list_head *head)
{
+ struct rmnet_priv *priv = netdev_priv(dev);
struct net_device *real_dev;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
u8 mux_id;
- rcu_read_lock();
- real_dev = netdev_master_upper_dev_get_rcu(dev);
- rcu_read_unlock();
+ real_dev = priv->real_dev;
if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
return;
@@ -219,7 +200,6 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
port = rmnet_get_port_rtnl(real_dev);
mux_id = rmnet_vnd_get_mux(dev);
- netdev_upper_dev_unlink(dev, real_dev);
ep = rmnet_get_endpoint(port, mux_id);
if (ep) {
@@ -233,30 +213,13 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
unregister_netdevice_queue(dev, head);
}
-static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data)
-{
- struct rmnet_walk_data *d = data;
- struct rmnet_endpoint *ep;
- u8 mux_id;
-
- mux_id = rmnet_vnd_get_mux(rmnet_dev);
- ep = rmnet_get_endpoint(d->port, mux_id);
- if (ep) {
- hlist_del_init_rcu(&ep->hlnode);
- rmnet_vnd_dellink(mux_id, d->port, ep);
- kfree(ep);
- }
- netdev_upper_dev_unlink(rmnet_dev, d->real_dev);
- unregister_netdevice_queue(rmnet_dev, d->head);
-
- return 0;
-}
-
static void rmnet_force_unassociate_device(struct net_device *dev)
{
struct net_device *real_dev = dev;
- struct rmnet_walk_data d;
+ struct hlist_node *tmp_ep;
+ struct rmnet_endpoint *ep;
struct rmnet_port *port;
+ unsigned long bkt_ep;
LIST_HEAD(list);
if (!rmnet_is_real_dev_registered(real_dev))
@@ -264,16 +227,19 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
ASSERT_RTNL();
- d.real_dev = real_dev;
- d.head = &list;
-
port = rmnet_get_port_rtnl(dev);
- d.port = port;
rcu_read_lock();
rmnet_unregister_bridge(dev, port);
- netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d);
+ hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
+ unregister_netdevice_queue(ep->egress_dev, &list);
+ rmnet_vnd_dellink(ep->mux_id, port, ep);
+
+ hlist_del_init_rcu(&ep->hlnode);
+ kfree(ep);
+ }
+
rcu_read_unlock();
unregister_netdevice_many(&list);
@@ -422,11 +388,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
if (err)
return -EBUSY;
- err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
- extack);
- if (err)
- return -EINVAL;
-
slave_port = rmnet_get_port(slave_dev);
slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
slave_port->bridge_ep = real_dev;
@@ -449,7 +410,6 @@ int rmnet_del_bridge(struct net_device *rmnet_dev,
port->rmnet_mode = RMNET_EPMODE_VND;
port->bridge_ep = NULL;
- netdev_upper_dev_unlink(slave_dev, rmnet_dev);
slave_port = rmnet_get_port(slave_dev);
rmnet_unregister_real_device(slave_dev, slave_port);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 6bc328fb88e1..b0dbca070c00 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -38,6 +38,11 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
}
ep = rmnet_get_endpoint(port, mux_id);
+ if (!ep) {
+ kfree_skb(skb);
+ return RX_HANDLER_CONSUMED;
+ }
+
vnd = ep->egress_dev;
ip_family = cmd->flow_control.ip_family;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 570a227acdd8..346d310914df 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -121,7 +121,7 @@ static void rmnet_get_stats64(struct net_device *dev,
memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
for_each_possible_cpu(cpu) {
- pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
+ pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
do {
start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c87f57ca4437..a95fbd5510d9 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2255,9 +2255,6 @@ static int ravb_wol_setup(struct net_device *ndev)
/* Enable MagicPacket */
ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
- /* Increased clock usage so device won't be suspended */
- clk_enable(priv->clk);
-
return enable_irq_wake(priv->emac_irq);
}
@@ -2276,9 +2273,6 @@ static int ravb_wol_restore(struct net_device *ndev)
if (ret < 0)
return ret;
- /* Restore clock usage count */
- clk_disable(priv->clk);
-
return disable_irq_wake(priv->emac_irq);
}
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a197e11f3a56..92dcf8717fc6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -40,7 +40,6 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#include <linux/clk.h>
#include <linux/sh_eth.h>
#include <linux/of_mdio.h>
@@ -2304,7 +2303,7 @@ static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
wol->supported = 0;
wol->wolopts = 0;
- if (mdp->cd->magic && mdp->clk) {
+ if (mdp->cd->magic) {
wol->supported = WAKE_MAGIC;
wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
}
@@ -2314,7 +2313,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
- if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC)
+ if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
@@ -3153,11 +3152,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
goto out_release;
}
- /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */
- mdp->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(mdp->clk))
- mdp->clk = NULL;
-
ndev->base_addr = res->start;
spin_lock_init(&mdp->lock);
@@ -3278,7 +3272,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
if (ret)
goto out_napi_del;
- if (mdp->cd->magic && mdp->clk)
+ if (mdp->cd->magic)
device_set_wakeup_capable(&pdev->dev, 1);
/* print device information */
@@ -3331,9 +3325,6 @@ static int sh_eth_wol_setup(struct net_device *ndev)
/* Enable MagicPacket */
sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
- /* Increased clock usage so device won't be suspended */
- clk_enable(mdp->clk);
-
return enable_irq_wake(ndev->irq);
}
@@ -3359,9 +3350,6 @@ static int sh_eth_wol_restore(struct net_device *ndev)
if (ret < 0)
return ret;
- /* Restore clock usage count */
- clk_disable(mdp->clk);
-
return disable_irq_wake(ndev->irq);
}
diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig
index 63aca9f847e1..4c2f612e4414 100644
--- a/drivers/net/ethernet/smsc/Kconfig
+++ b/drivers/net/ethernet/smsc/Kconfig
@@ -20,7 +20,7 @@ if NET_VENDOR_SMSC
config SMC9194
tristate "SMC 9194 support"
- depends on (ISA || MAC && BROKEN)
+ depends on ISA
select CRC32
---help---
This is support for the SMC9xxx based Ethernet cards. Choose this
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index a0f2be81d52e..8fc02d9db3d0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1451,7 +1451,7 @@ destroy_macvlan_port:
/* the macvlan port may be freed by macvlan_uninit when fail to register.
* so we destroy the macvlan port only when it's valid.
*/
- if (create && macvlan_port_get_rtnl(dev))
+ if (create && macvlan_port_get_rtnl(lowerdev))
macvlan_port_destroy(port->dev);
return err;
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b13eed21c87d..d39ae77707ef 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1382,7 +1382,7 @@ int genphy_setup_forced(struct phy_device *phydev)
ctl |= BMCR_FULLDPLX;
return phy_modify(phydev, MII_BMCR,
- BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl);
+ ~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl);
}
EXPORT_SYMBOL(genphy_setup_forced);
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index ca5e375de27c..e0d6760f3219 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -166,6 +166,8 @@ struct tbnet_ring {
* @connected_work: Worker that finalizes the ThunderboltIP connection
* setup and enables DMA paths for high speed data
* transfers
+ * @disconnect_work: Worker that handles tearing down the ThunderboltIP
+ * connection
* @rx_hdr: Copy of the currently processed Rx frame. Used when a
* network packet consists of multiple Thunderbolt frames.
* In host byte order.
@@ -190,6 +192,7 @@ struct tbnet {
int login_retries;
struct delayed_work login_work;
struct work_struct connected_work;
+ struct work_struct disconnect_work;
struct thunderbolt_ip_frame_header rx_hdr;
struct tbnet_ring rx_ring;
atomic_t frame_id;
@@ -445,7 +448,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
case TBIP_LOGOUT:
ret = tbnet_logout_response(net, route, sequence, command_id);
if (!ret)
- tbnet_tear_down(net, false);
+ queue_work(system_long_wq, &net->disconnect_work);
break;
default:
@@ -659,6 +662,13 @@ static void tbnet_login_work(struct work_struct *work)
}
}
+static void tbnet_disconnect_work(struct work_struct *work)
+{
+ struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
+
+ tbnet_tear_down(net, false);
+}
+
static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
const struct thunderbolt_ip_frame_header *hdr)
{
@@ -881,6 +891,7 @@ static int tbnet_stop(struct net_device *dev)
napi_disable(&net->napi);
+ cancel_work_sync(&net->disconnect_work);
tbnet_tear_down(net, true);
tb_ring_free(net->rx_ring.ring);
@@ -1195,6 +1206,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
net = netdev_priv(dev);
INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
INIT_WORK(&net->connected_work, tbnet_connected_work);
+ INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);
mutex_init(&net->connection_lock);
atomic_set(&net->command_id, 0);
atomic_set(&net->frame_id, 0);
@@ -1270,10 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev)
stop_login(net);
if (netif_running(net->dev)) {
netif_device_detach(net->dev);
- tb_ring_stop(net->rx_ring.ring);
- tb_ring_stop(net->tx_ring.ring);
- tbnet_free_buffers(&net->rx_ring);
- tbnet_free_buffers(&net->tx_ring);
+ tbnet_tear_down(net, true);
}
return 0;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 81e6cc951e7f..b52258c327d2 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1489,27 +1489,23 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
skb->truesize += skb->data_len;
for (i = 1; i < it->nr_segs; i++) {
+ struct page_frag *pfrag = &current->task_frag;
size_t fragsz = it->iov[i].iov_len;
- unsigned long offset;
- struct page *page;
- void *data;
if (fragsz == 0 || fragsz > PAGE_SIZE) {
err = -EINVAL;
goto free;
}
- local_bh_disable();
- data = napi_alloc_frag(fragsz);
- local_bh_enable();
- if (!data) {
+ if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
err = -ENOMEM;
goto free;
}
- page = virt_to_head_page(data);
- offset = data - page_address(page);
- skb_fill_page_desc(skb, i - 1, page, offset, fragsz);
+ skb_fill_page_desc(skb, i - 1, pfrag->page,
+ pfrag->offset, fragsz);
+ page_ref_inc(pfrag->page);
+ pfrag->offset += fragsz;
}
return skb;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index d0a113743195..7a6a1fe79309 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev,
/* it's racing here! */
ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
- if (ret < 0)
+ if (ret < 0) {
netdev_warn(dev->net, "Error writing RFE_CTL\n");
-
- return ret;
+ return ret;
+ }
+ return 0;
}
static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 626c27352ae2..9bb9e562b893 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -443,12 +443,8 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
- if (unlikely(err)) {
- struct page *page = virt_to_head_page(xdp->data);
-
- put_page(page);
- return false;
- }
+ if (unlikely(err))
+ return false; /* Caller handle free/refcnt */
return true;
}
@@ -456,8 +452,18 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
{
struct virtnet_info *vi = netdev_priv(dev);
- bool sent = __virtnet_xdp_xmit(vi, xdp);
+ struct receive_queue *rq = vi->rq;
+ struct bpf_prog *xdp_prog;
+ bool sent;
+ /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
+ * indicate XDP resources have been successfully allocated.
+ */
+ xdp_prog = rcu_dereference(rq->xdp_prog);
+ if (!xdp_prog)
+ return -ENXIO;
+
+ sent = __virtnet_xdp_xmit(vi, xdp);
if (!sent)
return -ENOSPC;
return 0;
@@ -546,8 +552,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
struct page *page = virt_to_head_page(buf);
- unsigned int delta = 0, err;
+ unsigned int delta = 0;
struct page *xdp_page;
+ bool sent;
+ int err;
+
len -= vi->hdr_len;
rcu_read_lock();
@@ -558,7 +567,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
void *orig_data;
u32 act;
- if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
+ if (unlikely(hdr->hdr.gso_type))
goto err_xdp;
if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
@@ -596,16 +605,19 @@ static struct sk_buff *receive_small(struct net_device *dev,
delta = orig_data - xdp.data;
break;
case XDP_TX:
- if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
+ sent = __virtnet_xdp_xmit(vi, &xdp);
+ if (unlikely(!sent)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
- else
- *xdp_xmit = true;
+ goto err_xdp;
+ }
+ *xdp_xmit = true;
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
err = xdp_do_redirect(dev, &xdp, xdp_prog);
- if (!err)
- *xdp_xmit = true;
+ if (err)
+ goto err_xdp;
+ *xdp_xmit = true;
rcu_read_unlock();
goto xdp_xmit;
default:
@@ -677,7 +689,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
struct bpf_prog *xdp_prog;
unsigned int truesize;
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
- int err;
+ bool sent;
head_skb = NULL;
@@ -746,20 +758,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
break;
case XDP_TX:
- if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
+ sent = __virtnet_xdp_xmit(vi, &xdp);
+ if (unlikely(!sent)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
- else
- *xdp_xmit = true;
+ if (unlikely(xdp_page != page))
+ put_page(xdp_page);
+ goto err_xdp;
+ }
+ *xdp_xmit = true;
if (unlikely(xdp_page != page))
goto err_xdp;
rcu_read_unlock();
goto xdp_xmit;
- case XDP_REDIRECT:
- err = xdp_do_redirect(dev, &xdp, xdp_prog);
- if (!err)
- *xdp_xmit = true;
- rcu_read_unlock();
- goto xdp_xmit;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 1cf22e62e3dd..6e0af815f25e 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3516,7 +3516,7 @@ static int __init init_mac80211_hwsim(void)
spin_lock_init(&hwsim_radio_lock);
- hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
+ hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);
if (!hwsim_wq)
return -ENOMEM;
rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f431c32774f3..0fe7ea35c221 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -120,8 +120,12 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
int ret;
ret = nvme_reset_ctrl(ctrl);
- if (!ret)
+ if (!ret) {
flush_work(&ctrl->reset_work);
+ if (ctrl->state != NVME_CTRL_LIVE)
+ ret = -ENETRESET;
+ }
+
return ret;
}
EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
@@ -265,7 +269,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (new_state) {
case NVME_CTRL_ADMIN_ONLY:
switch (old_state) {
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
changed = true;
/* FALLTHRU */
default:
@@ -276,7 +280,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
switch (old_state) {
case NVME_CTRL_NEW:
case NVME_CTRL_RESETTING:
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
changed = true;
/* FALLTHRU */
default:
@@ -294,9 +298,9 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
break;
}
break;
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
switch (old_state) {
- case NVME_CTRL_LIVE:
+ case NVME_CTRL_NEW:
case NVME_CTRL_RESETTING:
changed = true;
/* FALLTHRU */
@@ -309,7 +313,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
case NVME_CTRL_LIVE:
case NVME_CTRL_ADMIN_ONLY:
case NVME_CTRL_RESETTING:
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
changed = true;
/* FALLTHRU */
default:
@@ -518,9 +522,11 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
- range[n].cattr = cpu_to_le32(0);
- range[n].nlb = cpu_to_le32(nlb);
- range[n].slba = cpu_to_le64(slba);
+ if (n < segments) {
+ range[n].cattr = cpu_to_le32(0);
+ range[n].nlb = cpu_to_le32(nlb);
+ range[n].slba = cpu_to_le64(slba);
+ }
n++;
}
@@ -794,13 +800,9 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
static int nvme_keep_alive(struct nvme_ctrl *ctrl)
{
- struct nvme_command c;
struct request *rq;
- memset(&c, 0, sizeof(c));
- c.common.opcode = nvme_admin_keep_alive;
-
- rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
+ rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
NVME_QID_ANY);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -832,6 +834,8 @@ void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
return;
INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
+ memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
+ ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
}
EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
@@ -1117,14 +1121,19 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
static void nvme_update_formats(struct nvme_ctrl *ctrl)
{
- struct nvme_ns *ns;
+ struct nvme_ns *ns, *next;
+ LIST_HEAD(rm_list);
mutex_lock(&ctrl->namespaces_mutex);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->disk && nvme_revalidate_disk(ns->disk))
- nvme_ns_remove(ns);
+ if (ns->disk && nvme_revalidate_disk(ns->disk)) {
+ list_move_tail(&ns->list, &rm_list);
+ }
}
mutex_unlock(&ctrl->namespaces_mutex);
+
+ list_for_each_entry_safe(ns, next, &rm_list, list)
+ nvme_ns_remove(ns);
}
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -2687,7 +2696,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
[NVME_CTRL_LIVE] = "live",
[NVME_CTRL_ADMIN_ONLY] = "only-admin",
[NVME_CTRL_RESETTING] = "resetting",
- [NVME_CTRL_RECONNECTING]= "reconnecting",
+ [NVME_CTRL_CONNECTING] = "connecting",
[NVME_CTRL_DELETING] = "deleting",
[NVME_CTRL_DEAD] = "dead",
};
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index 25b19f722f5b..a3145d90c1d2 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -171,13 +171,14 @@ static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
cmd->common.opcode != nvme_fabrics_command ||
cmd->fabrics.fctype != nvme_fabrics_type_connect) {
/*
- * Reconnecting state means transport disruption, which can take
- * a long time and even might fail permanently, fail fast to
- * give upper layers a chance to failover.
+ * Connecting state means transport disruption or initial
+ * establishment, which can take a long time and even might
+ * fail permanently, fail fast to give upper layers a chance
+ * to failover.
* Deleting state means that the ctrl will never accept commands
* again, fail it permanently.
*/
- if (ctrl->state == NVME_CTRL_RECONNECTING ||
+ if (ctrl->state == NVME_CTRL_CONNECTING ||
ctrl->state == NVME_CTRL_DELETING) {
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b856d7c919d2..7f51f8414b97 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -55,9 +55,7 @@ struct nvme_fc_queue {
enum nvme_fcop_flags {
FCOP_FLAGS_TERMIO = (1 << 0),
- FCOP_FLAGS_RELEASED = (1 << 1),
- FCOP_FLAGS_COMPLETE = (1 << 2),
- FCOP_FLAGS_AEN = (1 << 3),
+ FCOP_FLAGS_AEN = (1 << 1),
};
struct nvmefc_ls_req_op {
@@ -532,7 +530,7 @@ nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
{
switch (ctrl->ctrl.state) {
case NVME_CTRL_NEW:
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
/*
* As all reconnects were suppressed, schedule a
* connect.
@@ -777,7 +775,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
}
break;
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
/*
* The association has already been terminated and the
* controller is attempting reconnects. No need to do anything
@@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
/* *********************** NVME Ctrl Routines **************************** */
-static void __nvme_fc_final_op_cleanup(struct request *rq);
static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
static int
@@ -1512,13 +1509,19 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
static int
__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
{
- int state;
+ unsigned long flags;
+ int opstate;
+
+ spin_lock_irqsave(&ctrl->lock, flags);
+ opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
+ if (opstate != FCPOP_STATE_ACTIVE)
+ atomic_set(&op->state, opstate);
+ else if (ctrl->flags & FCCTRL_TERMIO)
+ ctrl->iocnt++;
+ spin_unlock_irqrestore(&ctrl->lock, flags);
- state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
- if (state != FCPOP_STATE_ACTIVE) {
- atomic_set(&op->state, state);
+ if (opstate != FCPOP_STATE_ACTIVE)
return -ECANCELED;
- }
ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
&ctrl->rport->remoteport,
@@ -1532,60 +1535,26 @@ static void
nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
{
struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
- unsigned long flags;
- int i, ret;
-
- for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
- if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
- continue;
-
- spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO) {
- ctrl->iocnt++;
- aen_op->flags |= FCOP_FLAGS_TERMIO;
- }
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- ret = __nvme_fc_abort_op(ctrl, aen_op);
- if (ret) {
- /*
- * if __nvme_fc_abort_op failed the io wasn't
- * active. Thus this call path is running in
- * parallel to the io complete. Treat as non-error.
- */
+ int i;
- /* back out the flags/counters */
- spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO)
- ctrl->iocnt--;
- aen_op->flags &= ~FCOP_FLAGS_TERMIO;
- spin_unlock_irqrestore(&ctrl->lock, flags);
- return;
- }
- }
+ for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
+ __nvme_fc_abort_op(ctrl, aen_op);
}
-static inline int
+static inline void
__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
- struct nvme_fc_fcp_op *op)
+ struct nvme_fc_fcp_op *op, int opstate)
{
unsigned long flags;
- bool complete_rq = false;
- spin_lock_irqsave(&ctrl->lock, flags);
- if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
+ if (opstate == FCPOP_STATE_ABORTED) {
+ spin_lock_irqsave(&ctrl->lock, flags);
if (ctrl->flags & FCCTRL_TERMIO) {
if (!--ctrl->iocnt)
wake_up(&ctrl->ioabort_wait);
}
+ spin_unlock_irqrestore(&ctrl->lock, flags);
}
- if (op->flags & FCOP_FLAGS_RELEASED)
- complete_rq = true;
- else
- op->flags |= FCOP_FLAGS_COMPLETE;
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- return complete_rq;
}
static void
@@ -1601,6 +1570,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
union nvme_result result;
bool terminate_assoc = true;
+ int opstate;
/*
* WARNING:
@@ -1639,11 +1609,12 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
* association to be terminated.
*/
+ opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
+
fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
sizeof(op->rsp_iu), DMA_FROM_DEVICE);
- if (atomic_read(&op->state) == FCPOP_STATE_ABORTED ||
- op->flags & FCOP_FLAGS_TERMIO)
+ if (opstate == FCPOP_STATE_ABORTED)
status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
else if (freq->status)
status = cpu_to_le16(NVME_SC_INTERNAL << 1);
@@ -1708,7 +1679,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
done:
if (op->flags & FCOP_FLAGS_AEN) {
nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
- __nvme_fc_fcpop_chk_teardowns(ctrl, op);
+ __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
atomic_set(&op->state, FCPOP_STATE_IDLE);
op->flags = FCOP_FLAGS_AEN; /* clear other flags */
nvme_fc_ctrl_put(ctrl);
@@ -1722,13 +1693,11 @@ done:
if (status &&
(blk_queue_dying(rq->q) ||
ctrl->ctrl.state == NVME_CTRL_NEW ||
- ctrl->ctrl.state == NVME_CTRL_RECONNECTING))
+ ctrl->ctrl.state == NVME_CTRL_CONNECTING))
status |= cpu_to_le16(NVME_SC_DNR << 1);
- if (__nvme_fc_fcpop_chk_teardowns(ctrl, op))
- __nvme_fc_final_op_cleanup(rq);
- else
- nvme_end_request(rq, status, result);
+ __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
+ nvme_end_request(rq, status, result);
check_error:
if (terminate_assoc)
@@ -2415,46 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)
}
static void
-__nvme_fc_final_op_cleanup(struct request *rq)
+nvme_fc_complete_rq(struct request *rq)
{
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
struct nvme_fc_ctrl *ctrl = op->ctrl;
atomic_set(&op->state, FCPOP_STATE_IDLE);
- op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
- FCOP_FLAGS_COMPLETE);
nvme_fc_unmap_data(ctrl, rq, op);
nvme_complete_rq(rq);
nvme_fc_ctrl_put(ctrl);
-
-}
-
-static void
-nvme_fc_complete_rq(struct request *rq)
-{
- struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
- struct nvme_fc_ctrl *ctrl = op->ctrl;
- unsigned long flags;
- bool completed = false;
-
- /*
- * the core layer, on controller resets after calling
- * nvme_shutdown_ctrl(), calls complete_rq without our
- * calling blk_mq_complete_request(), thus there may still
- * be live i/o outstanding with the LLDD. Means transport has
- * to track complete calls vs fcpio_done calls to know what
- * path to take on completes and dones.
- */
- spin_lock_irqsave(&ctrl->lock, flags);
- if (op->flags & FCOP_FLAGS_COMPLETE)
- completed = true;
- else
- op->flags |= FCOP_FLAGS_RELEASED;
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- if (completed)
- __nvme_fc_final_op_cleanup(rq);
}
/*
@@ -2476,35 +2415,11 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
struct nvme_ctrl *nctrl = data;
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
- unsigned long flags;
- int status;
if (!blk_mq_request_started(req))
return;
- spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO) {
- ctrl->iocnt++;
- op->flags |= FCOP_FLAGS_TERMIO;
- }
- spin_unlock_irqrestore(&ctrl->lock, flags);
-
- status = __nvme_fc_abort_op(ctrl, op);
- if (status) {
- /*
- * if __nvme_fc_abort_op failed the io wasn't
- * active. Thus this call path is running in
- * parallel to the io complete. Treat as non-error.
- */
-
- /* back out the flags/counters */
- spin_lock_irqsave(&ctrl->lock, flags);
- if (ctrl->flags & FCCTRL_TERMIO)
- ctrl->iocnt--;
- op->flags &= ~FCOP_FLAGS_TERMIO;
- spin_unlock_irqrestore(&ctrl->lock, flags);
- return;
- }
+ __nvme_fc_abort_op(ctrl, op);
}
@@ -2943,7 +2858,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
bool recon = true;
- if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING)
+ if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
return;
if (portptr->port_state == FC_OBJSTATE_ONLINE)
@@ -2991,10 +2906,10 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
/* will block will waiting for io to terminate */
nvme_fc_delete_association(ctrl);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: error_recovery: Couldn't change state "
- "to RECONNECTING\n", ctrl->cnum);
+ "to CONNECTING\n", ctrl->cnum);
return;
}
@@ -3195,7 +3110,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
* transport errors (frame drop, LS failure) inherently must kill
* the association. The transport is coded so that any command used
* to create the association (prior to a LIVE state transition
- * while NEW or RECONNECTING) will fail if it completes in error or
+ * while NEW or CONNECTING) will fail if it completes in error or
* times out.
*
* As such: as the connect request was mostly likely due to a
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 8e4550fa08f8..0521e4707d1c 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -123,7 +123,7 @@ enum nvme_ctrl_state {
NVME_CTRL_LIVE,
NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
NVME_CTRL_RESETTING,
- NVME_CTRL_RECONNECTING,
+ NVME_CTRL_CONNECTING,
NVME_CTRL_DELETING,
NVME_CTRL_DEAD,
};
@@ -183,6 +183,7 @@ struct nvme_ctrl {
struct work_struct scan_work;
struct work_struct async_event_work;
struct delayed_work ka_work;
+ struct nvme_command ka_cmd;
struct work_struct fw_act_work;
/* Power saving configuration */
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 6fe7af00a1f4..73036d2fbbd5 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1141,7 +1141,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
/* If there is a reset/reinit ongoing, we shouldn't reset again. */
switch (dev->ctrl.state) {
case NVME_CTRL_RESETTING:
- case NVME_CTRL_RECONNECTING:
+ case NVME_CTRL_CONNECTING:
return false;
default:
break;
@@ -1215,13 +1215,17 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* cancellation error. All outstanding requests are completed on
* shutdown, so we return BLK_EH_HANDLED.
*/
- if (dev->ctrl.state == NVME_CTRL_RESETTING) {
+ switch (dev->ctrl.state) {
+ case NVME_CTRL_CONNECTING:
+ case NVME_CTRL_RESETTING:
dev_warn(dev->ctrl.device,
"I/O %d QID %d timeout, disable controller\n",
req->tag, nvmeq->qid);
nvme_dev_disable(dev, false);
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
return BLK_EH_HANDLED;
+ default:
+ break;
}
/*
@@ -1364,18 +1368,14 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
int qid, int depth)
{
- if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
- unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
- dev->ctrl.page_size);
- nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
- nvmeq->sq_cmds_io = dev->cmb + offset;
- } else {
- nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
- &nvmeq->sq_dma_addr, GFP_KERNEL);
- if (!nvmeq->sq_cmds)
- return -ENOMEM;
- }
+ /* CMB SQEs will be mapped before creation */
+ if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
+ return 0;
+ nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->sq_cmds)
+ return -ENOMEM;
return 0;
}
@@ -1449,6 +1449,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
struct nvme_dev *dev = nvmeq->dev;
int result;
+ if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
+ unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
+ dev->ctrl.page_size);
+ nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
+ nvmeq->sq_cmds_io = dev->cmb + offset;
+ }
+
nvmeq->cq_vector = qid - 1;
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
@@ -2288,12 +2295,12 @@ static void nvme_reset_work(struct work_struct *work)
nvme_dev_disable(dev, false);
/*
- * Introduce RECONNECTING state from nvme-fc/rdma transports to mark the
+ * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
* initializing procedure here.
*/
- if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) {
+ if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
dev_warn(dev->ctrl.device,
- "failed to mark controller RECONNECTING\n");
+ "failed to mark controller CONNECTING\n");
goto out;
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 2bc059f7d73c..3a51ed50eff2 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -887,7 +887,7 @@ free_ctrl:
static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
{
/* If we are resetting/deleting then do nothing */
- if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
+ if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
ctrl->ctrl.state == NVME_CTRL_LIVE);
return;
@@ -973,7 +973,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
nvme_start_queues(&ctrl->ctrl);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure should never happen */
WARN_ON_ONCE(1);
return;
@@ -1756,7 +1756,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
nvme_stop_ctrl(&ctrl->ctrl);
nvme_rdma_shutdown_ctrl(ctrl, false);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
+ if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure should never happen */
WARN_ON_ONCE(1);
return;
@@ -1784,11 +1784,8 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
return;
out_fail:
- dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
- nvme_remove_namespaces(&ctrl->ctrl);
- nvme_rdma_shutdown_ctrl(ctrl, true);
- nvme_uninit_ctrl(&ctrl->ctrl);
- nvme_put_ctrl(&ctrl->ctrl);
+ ++ctrl->ctrl.nr_reconnects;
+ nvme_rdma_reconnect_or_remove(ctrl);
}
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
@@ -1942,6 +1939,9 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
if (!ctrl->queues)
goto out_uninit_ctrl;
+ changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
+ WARN_ON_ONCE(!changed);
+
ret = nvme_rdma_configure_admin_queue(ctrl, true);
if (ret)
goto out_kfree_queues;
diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c
index 0a4372a016f2..28bbdff4a88b 100644
--- a/drivers/nvme/target/io-cmd.c
+++ b/drivers/nvme/target/io-cmd.c
@@ -105,10 +105,13 @@ static void nvmet_execute_flush(struct nvmet_req *req)
static u16 nvmet_discard_range(struct nvmet_ns *ns,
struct nvme_dsm_range *range, struct bio **bio)
{
- if (__blkdev_issue_discard(ns->bdev,
+ int ret;
+
+ ret = __blkdev_issue_discard(ns->bdev,
le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
- GFP_KERNEL, 0, bio))
+ GFP_KERNEL, 0, bio);
+ if (ret && ret != -EOPNOTSUPP)
return NVME_SC_INTERNAL | NVME_SC_DNR;
return 0;
}
diff --git a/drivers/of/property.c b/drivers/of/property.c
index 36ed84e26d9c..f46828e3b082 100644
--- a/drivers/of/property.c
+++ b/drivers/of/property.c
@@ -977,11 +977,11 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
return 0;
}
-static void *
+static const void *
of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
const struct device *dev)
{
- return (void *)of_device_get_match_data(dev);
+ return of_device_get_match_data(dev);
}
const struct fwnode_operations of_fwnode_ops = {
diff --git a/drivers/opp/cpu.c b/drivers/opp/cpu.c
index 2d87bc1adf38..0c0910709435 100644
--- a/drivers/opp/cpu.c
+++ b/drivers/opp/cpu.c
@@ -55,7 +55,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
if (max_opps <= 0)
return max_opps ? max_opps : -ENODATA;
- freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
if (!freq_table)
return -ENOMEM;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index fc734014206f..8b14bd326d4a 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3419,22 +3419,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
{
- pci_set_vpd_size(dev, 8192);
-}
-
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd);
+ int chip = (dev->device & 0xf000) >> 12;
+ int func = (dev->device & 0x0f00) >> 8;
+ int prod = (dev->device & 0x00ff) >> 0;
+
+ /*
+ * If this is a T3-based adapter, there's a 1KB VPD area at offset
+ * 0xc00 which contains the preferred VPD values. If this is a T4 or
+ * later based adapter, the special VPD is at offset 0x400 for the
+ * Physical Functions (the SR-IOV Virtual Functions have no VPD
+ * Capabilities). The PCI VPD Access core routines will normally
+ * compute the size of the VPD by parsing the VPD Data Structure at
+ * offset 0x000. This will result in silent failures when attempting
+ * to accesses these other VPD areas which are beyond those computed
+ * limits.
+ */
+ if (chip == 0x0 && prod >= 0x20)
+ pci_set_vpd_size(dev, 8192);
+ else if (chip >= 0x4 && func < 0x8)
+ pci_set_vpd_size(dev, 2048);
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
+ quirk_chelsio_extend_vpd);
#ifdef CONFIG_ACPI
/*
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index 7bc5eee96b31..0c2ed11c0603 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -17,7 +17,6 @@
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/perf/arm_pmu.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/sched/clock.h>
#include <linux/spinlock.h>
@@ -26,6 +25,9 @@
#include <asm/irq_regs.h>
+static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
+static DEFINE_PER_CPU(int, cpu_irq);
+
static int
armpmu_map_cache_event(const unsigned (*cache_map)
[PERF_COUNT_HW_CACHE_MAX]
@@ -320,17 +322,9 @@ validate_group(struct perf_event *event)
return 0;
}
-static struct arm_pmu_platdata *armpmu_get_platdata(struct arm_pmu *armpmu)
-{
- struct platform_device *pdev = armpmu->plat_device;
-
- return pdev ? dev_get_platdata(&pdev->dev) : NULL;
-}
-
static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
{
struct arm_pmu *armpmu;
- struct arm_pmu_platdata *plat;
int ret;
u64 start_clock, finish_clock;
@@ -341,14 +335,11 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev)
* dereference.
*/
armpmu = *(void **)dev;
-
- plat = armpmu_get_platdata(armpmu);
+ if (WARN_ON_ONCE(!armpmu))
+ return IRQ_NONE;
start_clock = sched_clock();
- if (plat && plat->handle_irq)
- ret = plat->handle_irq(irq, armpmu, armpmu->handle_irq);
- else
- ret = armpmu->handle_irq(irq, armpmu);
+ ret = armpmu->handle_irq(irq, armpmu);
finish_clock = sched_clock();
perf_sample_event_took(finish_clock - start_clock);
@@ -531,54 +522,41 @@ int perf_num_counters(void)
}
EXPORT_SYMBOL_GPL(perf_num_counters);
-void armpmu_free_irq(struct arm_pmu *armpmu, int cpu)
+static int armpmu_count_irq_users(const int irq)
{
- struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
- int irq = per_cpu(hw_events->irq, cpu);
+ int cpu, count = 0;
- if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs))
- return;
-
- if (irq_is_percpu_devid(irq)) {
- free_percpu_irq(irq, &hw_events->percpu_pmu);
- cpumask_clear(&armpmu->active_irqs);
- return;
+ for_each_possible_cpu(cpu) {
+ if (per_cpu(cpu_irq, cpu) == irq)
+ count++;
}
- free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+ return count;
}
-void armpmu_free_irqs(struct arm_pmu *armpmu)
+void armpmu_free_irq(int irq, int cpu)
{
- int cpu;
+ if (per_cpu(cpu_irq, cpu) == 0)
+ return;
+ if (WARN_ON(irq != per_cpu(cpu_irq, cpu)))
+ return;
+
+ if (!irq_is_percpu_devid(irq))
+ free_irq(irq, per_cpu_ptr(&cpu_armpmu, cpu));
+ else if (armpmu_count_irq_users(irq) == 1)
+ free_percpu_irq(irq, &cpu_armpmu);
- for_each_cpu(cpu, &armpmu->supported_cpus)
- armpmu_free_irq(armpmu, cpu);
+ per_cpu(cpu_irq, cpu) = 0;
}
-int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
+int armpmu_request_irq(int irq, int cpu)
{
int err = 0;
- struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
const irq_handler_t handler = armpmu_dispatch_irq;
- int irq = per_cpu(hw_events->irq, cpu);
if (!irq)
return 0;
- if (irq_is_percpu_devid(irq) && cpumask_empty(&armpmu->active_irqs)) {
- err = request_percpu_irq(irq, handler, "arm-pmu",
- &hw_events->percpu_pmu);
- } else if (irq_is_percpu_devid(irq)) {
- int other_cpu = cpumask_first(&armpmu->active_irqs);
- int other_irq = per_cpu(hw_events->irq, other_cpu);
-
- if (irq != other_irq) {
- pr_warn("mismatched PPIs detected.\n");
- err = -EINVAL;
- goto err_out;
- }
- } else {
- struct arm_pmu_platdata *platdata = armpmu_get_platdata(armpmu);
+ if (!irq_is_percpu_devid(irq)) {
unsigned long irq_flags;
err = irq_force_affinity(irq, cpumask_of(cpu));
@@ -589,22 +567,22 @@ int armpmu_request_irq(struct arm_pmu *armpmu, int cpu)
goto err_out;
}
- if (platdata && platdata->irq_flags) {
- irq_flags = platdata->irq_flags;
- } else {
- irq_flags = IRQF_PERCPU |
- IRQF_NOBALANCING |
- IRQF_NO_THREAD;
- }
+ irq_flags = IRQF_PERCPU |
+ IRQF_NOBALANCING |
+ IRQF_NO_THREAD;
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
err = request_irq(irq, handler, irq_flags, "arm-pmu",
- per_cpu_ptr(&hw_events->percpu_pmu, cpu));
+ per_cpu_ptr(&cpu_armpmu, cpu));
+ } else if (armpmu_count_irq_users(irq) == 0) {
+ err = request_percpu_irq(irq, handler, "arm-pmu",
+ &cpu_armpmu);
}
if (err)
goto err_out;
- cpumask_set_cpu(cpu, &armpmu->active_irqs);
+ per_cpu(cpu_irq, cpu) = irq;
return 0;
err_out:
@@ -612,19 +590,6 @@ err_out:
return err;
}
-int armpmu_request_irqs(struct arm_pmu *armpmu)
-{
- int cpu, err;
-
- for_each_cpu(cpu, &armpmu->supported_cpus) {
- err = armpmu_request_irq(armpmu, cpu);
- if (err)
- break;
- }
-
- return err;
-}
-
static int armpmu_get_cpu_irq(struct arm_pmu *pmu, int cpu)
{
struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
@@ -647,12 +612,14 @@ static int arm_perf_starting_cpu(unsigned int cpu, struct hlist_node *node)
if (pmu->reset)
pmu->reset(pmu);
+ per_cpu(cpu_armpmu, cpu) = pmu;
+
irq = armpmu_get_cpu_irq(pmu, cpu);
if (irq) {
- if (irq_is_percpu_devid(irq)) {
+ if (irq_is_percpu_devid(irq))
enable_percpu_irq(irq, IRQ_TYPE_NONE);
- return 0;
- }
+ else
+ enable_irq(irq);
}
return 0;
@@ -667,8 +634,14 @@ static int arm_perf_teardown_cpu(unsigned int cpu, struct hlist_node *node)
return 0;
irq = armpmu_get_cpu_irq(pmu, cpu);
- if (irq && irq_is_percpu_devid(irq))
- disable_percpu_irq(irq);
+ if (irq) {
+ if (irq_is_percpu_devid(irq))
+ disable_percpu_irq(irq);
+ else
+ disable_irq(irq);
+ }
+
+ per_cpu(cpu_armpmu, cpu) = NULL;
return 0;
}
@@ -800,18 +773,18 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
&cpu_pmu->node);
}
-struct arm_pmu *armpmu_alloc(void)
+static struct arm_pmu *__armpmu_alloc(gfp_t flags)
{
struct arm_pmu *pmu;
int cpu;
- pmu = kzalloc(sizeof(*pmu), GFP_KERNEL);
+ pmu = kzalloc(sizeof(*pmu), flags);
if (!pmu) {
pr_info("failed to allocate PMU device!\n");
goto out;
}
- pmu->hw_events = alloc_percpu(struct pmu_hw_events);
+ pmu->hw_events = alloc_percpu_gfp(struct pmu_hw_events, flags);
if (!pmu->hw_events) {
pr_info("failed to allocate per-cpu PMU data.\n");
goto out_free_pmu;
@@ -857,6 +830,17 @@ out:
return NULL;
}
+struct arm_pmu *armpmu_alloc(void)
+{
+ return __armpmu_alloc(GFP_KERNEL);
+}
+
+struct arm_pmu *armpmu_alloc_atomic(void)
+{
+ return __armpmu_alloc(GFP_ATOMIC);
+}
+
+
void armpmu_free(struct arm_pmu *pmu)
{
free_percpu(pmu->hw_events);
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index 705f1a390e31..0f197516d708 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -11,6 +11,8 @@
#include <linux/acpi.h>
#include <linux/cpumask.h>
#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
#include <linux/percpu.h>
#include <linux/perf/arm_pmu.h>
@@ -87,7 +89,13 @@ static int arm_pmu_acpi_parse_irqs(void)
pr_warn("No ACPI PMU IRQ for CPU%d\n", cpu);
}
+ /*
+ * Log and request the IRQ so the core arm_pmu code can manage
+ * it. We'll have to sanity-check IRQs later when we associate
+ * them with their PMUs.
+ */
per_cpu(pmu_irqs, cpu) = irq;
+ armpmu_request_irq(irq, cpu);
}
return 0;
@@ -127,7 +135,7 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
return pmu;
}
- pmu = armpmu_alloc();
+ pmu = armpmu_alloc_atomic();
if (!pmu) {
pr_warn("Unable to allocate PMU for CPU%d\n",
smp_processor_id());
@@ -140,6 +148,35 @@ static struct arm_pmu *arm_pmu_acpi_find_alloc_pmu(void)
}
/*
+ * Check whether the new IRQ is compatible with those already associated with
+ * the PMU (e.g. we don't have mismatched PPIs).
+ */
+static bool pmu_irq_matches(struct arm_pmu *pmu, int irq)
+{
+ struct pmu_hw_events __percpu *hw_events = pmu->hw_events;
+ int cpu;
+
+ if (!irq)
+ return true;
+
+ for_each_cpu(cpu, &pmu->supported_cpus) {
+ int other_irq = per_cpu(hw_events->irq, cpu);
+ if (!other_irq)
+ continue;
+
+ if (irq == other_irq)
+ continue;
+ if (!irq_is_percpu_devid(irq) && !irq_is_percpu_devid(other_irq))
+ continue;
+
+ pr_warn("mismatched PPIs detected\n");
+ return false;
+ }
+
+ return true;
+}
+
+/*
* This must run before the common arm_pmu hotplug logic, so that we can
* associate a CPU and its interrupt before the common code tries to manage the
* affinity and so on.
@@ -164,19 +201,14 @@ static int arm_pmu_acpi_cpu_starting(unsigned int cpu)
if (!pmu)
return -ENOMEM;
- cpumask_set_cpu(cpu, &pmu->supported_cpus);
-
per_cpu(probed_pmus, cpu) = pmu;
- /*
- * Log and request the IRQ so the core arm_pmu code can manage it. In
- * some situations (e.g. mismatched PPIs), we may fail to request the
- * IRQ. However, it may be too late for us to do anything about it.
- * The common ARM PMU code will log a warning in this case.
- */
- hw_events = pmu->hw_events;
- per_cpu(hw_events->irq, cpu) = irq;
- armpmu_request_irq(pmu, cpu);
+ if (pmu_irq_matches(pmu, irq)) {
+ hw_events = pmu->hw_events;
+ per_cpu(hw_events->irq, cpu) = irq;
+ }
+
+ cpumask_set_cpu(cpu, &pmu->supported_cpus);
/*
* Ideally, we'd probe the PMU here when we find the first matching
@@ -247,11 +279,6 @@ static int arm_pmu_acpi_init(void)
if (acpi_disabled)
return 0;
- /*
- * We can't request IRQs yet, since we don't know the cookie value
- * until we know which CPUs share the same logical PMU. We'll handle
- * that in arm_pmu_acpi_cpu_starting().
- */
ret = arm_pmu_acpi_parse_irqs();
if (ret)
return ret;
diff --git a/drivers/perf/arm_pmu_platform.c b/drivers/perf/arm_pmu_platform.c
index 46501cc79fd7..7729eda5909d 100644
--- a/drivers/perf/arm_pmu_platform.c
+++ b/drivers/perf/arm_pmu_platform.c
@@ -127,13 +127,6 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
pdev->dev.of_node);
}
- /*
- * Some platforms have all PMU IRQs OR'd into a single IRQ, with a
- * special platdata function that attempts to demux them.
- */
- if (dev_get_platdata(&pdev->dev))
- cpumask_setall(&pmu->supported_cpus);
-
for (i = 0; i < num_irqs; i++) {
int cpu, irq;
@@ -164,6 +157,36 @@ static int pmu_parse_irqs(struct arm_pmu *pmu)
return 0;
}
+static int armpmu_request_irqs(struct arm_pmu *armpmu)
+{
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
+ int cpu, err;
+
+ for_each_cpu(cpu, &armpmu->supported_cpus) {
+ int irq = per_cpu(hw_events->irq, cpu);
+ if (!irq)
+ continue;
+
+ err = armpmu_request_irq(irq, cpu);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static void armpmu_free_irqs(struct arm_pmu *armpmu)
+{
+ int cpu;
+ struct pmu_hw_events __percpu *hw_events = armpmu->hw_events;
+
+ for_each_cpu(cpu, &armpmu->supported_cpus) {
+ int irq = per_cpu(hw_events->irq, cpu);
+
+ armpmu_free_irq(irq, cpu);
+ }
+}
+
int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table)
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 2a68f59d2228..c52c6723374b 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -127,24 +127,6 @@ static const struct dmi_system_id dell_device_table[] __initconst = {
},
},
{
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "30"), /*Tablet*/
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /*Convertible*/
- },
- },
- {
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/
- },
- },
- {
.ident = "Dell Computer Corporation",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Dell Computer Corporation"),
@@ -1279,7 +1261,7 @@ static int kbd_get_state(struct kbd_state *state)
struct calling_interface_buffer buffer;
int ret;
- dell_fill_request(&buffer, 0, 0, 0, 0);
+ dell_fill_request(&buffer, 0x1, 0, 0, 0);
ret = dell_send_request(&buffer,
CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
if (ret)
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 5b6f18b18801..535199c9e6bc 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
/*
* ACPI Helpers
*/
-#define IDEAPAD_EC_TIMEOUT (100) /* in ms */
+#define IDEAPAD_EC_TIMEOUT (200) /* in ms */
static int read_method_int(acpi_handle handle, const char *method, int *val)
{
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index daa68acbc900..c0c8945603cb 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -933,7 +933,7 @@ static int wmi_dev_probe(struct device *dev)
goto probe_failure;
}
- buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL);
+ buf = kmalloc(strlen(wdriver->driver.name) + 5, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto probe_string_failure;
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index ba2e0856d22c..8f5c1d7f751a 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -1297,6 +1297,9 @@ static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
vcdev->device_lost = true;
rc = NOTIFY_DONE;
break;
+ case CIO_OPER:
+ rc = NOTIFY_OK;
+ break;
default:
rc = NOTIFY_DONE;
break;
@@ -1309,6 +1312,27 @@ static struct ccw_device_id virtio_ids[] = {
{},
};
+#ifdef CONFIG_PM_SLEEP
+static int virtio_ccw_freeze(struct ccw_device *cdev)
+{
+ struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+
+ return virtio_device_freeze(&vcdev->vdev);
+}
+
+static int virtio_ccw_restore(struct ccw_device *cdev)
+{
+ struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
+ int ret;
+
+ ret = virtio_ccw_set_transport_rev(vcdev);
+ if (ret)
+ return ret;
+
+ return virtio_device_restore(&vcdev->vdev);
+}
+#endif
+
static struct ccw_driver virtio_ccw_driver = {
.driver = {
.owner = THIS_MODULE,
@@ -1321,6 +1345,11 @@ static struct ccw_driver virtio_ccw_driver = {
.set_online = virtio_ccw_online,
.notify = virtio_ccw_cio_notify,
.int_class = IRQIO_VIR,
+#ifdef CONFIG_PM_SLEEP
+ .freeze = virtio_ccw_freeze,
+ .thaw = virtio_ccw_restore,
+ .restore = virtio_ccw_restore,
+#endif
};
static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index fcfd28d2884c..de1b3fce936d 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -185,7 +185,6 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
zalon7xx-objs := zalon.o ncr53c8xx.o
NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
-oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
# Files generated that shall be removed upon make clean
clean-files := 53c700_d.h 53c700_u.h
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index b3b931ab77eb..2664ea0df35f 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1693,8 +1693,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
* Map in the registers from the adapter.
*/
aac->base_size = AAC_MIN_FOOTPRINT_SIZE;
- if ((*aac_drivers[index].init)(aac))
+ if ((*aac_drivers[index].init)(aac)) {
+ error = -ENODEV;
goto out_unmap;
+ }
if (aac->sync_mode) {
if (aac_sync_mode)
diff --git a/drivers/scsi/aic7xxx/aiclib.c b/drivers/scsi/aic7xxx/aiclib.c
deleted file mode 100644
index 828ae3d9a510..000000000000
--- a/drivers/scsi/aic7xxx/aiclib.c
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Implementation of Utility functions for all SCSI device types.
- *
- * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs.
- * Copyright (c) 1997, 1998 Kenneth D. Merry.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions, and the following disclaimer,
- * without modification, immediately at the beginning of the file.
- * 2. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD: src/sys/cam/scsi/scsi_all.c,v 1.38 2002/09/23 04:56:35 mjacob Exp $
- * $Id$
- */
-
-#include "aiclib.h"
-
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 8e2f767147cb..5a645b8b9af1 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1889,6 +1889,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req,
/* we will not receive ABTS response for this IO */
BNX2FC_IO_DBG(io_req, "Timer context finished processing "
"this scsi cmd\n");
+ return;
}
/* Cancel the timeout_work, as we received IO completion */
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
index be5ee2d37815..7dbbbb81a1e7 100644
--- a/drivers/scsi/csiostor/csio_lnode.c
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -114,7 +114,7 @@ static enum csio_ln_ev fwevt_to_lnevt[] = {
static struct csio_lnode *
csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
{
- struct csio_lnode *ln = hw->rln;
+ struct csio_lnode *ln;
struct list_head *tmp;
/* Match siblings lnode with portid */
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 022e421c2185..4b44325d1a82 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -876,6 +876,11 @@ static void alua_rtpg_work(struct work_struct *work)
/**
* alua_rtpg_queue() - cause RTPG to be submitted asynchronously
+ * @pg: ALUA port group associated with @sdev.
+ * @sdev: SCSI device for which to submit an RTPG.
+ * @qdata: Information about the callback to invoke after the RTPG.
+ * @force: Whether or not to submit an RTPG if a work item that will submit an
+ * RTPG already has been scheduled.
*
* Returns true if and only if alua_rtpg_work() will be called asynchronously.
* That function is responsible for calling @qdata->fn().
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 9a0696f68f37..b81a53c4a9a8 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -367,7 +367,7 @@ enum ibmvfc_fcp_rsp_info_codes {
};
struct ibmvfc_fcp_rsp_info {
- __be16 reserved;
+ u8 reserved[3];
u8 rsp_code;
u8 reserved2[4];
}__attribute__((packed, aligned (2)));
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 13d6e4ec3022..59a87ca328d3 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2410,8 +2410,11 @@ _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
continue;
}
- for_each_cpu(cpu, mask)
+ for_each_cpu_and(cpu, mask, cpu_online_mask) {
+ if (cpu >= ioc->cpu_msix_table_sz)
+ break;
ioc->cpu_msix_table[cpu] = reply_q->msix_index;
+ }
}
return;
}
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 029e2e69b29f..f57a94b4f0d9 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -1724,7 +1724,6 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
{
struct qedi_ctx *qedi = data;
struct nvm_iscsi_initiator *initiator;
- char *str = buf;
int rc = 1;
u32 ipv6_en, dhcp_en, ip_len;
struct nvm_iscsi_block *block;
@@ -1758,32 +1757,32 @@ static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_ETH_IP_ADDR:
- rc = snprintf(str, ip_len, fmt, ip);
+ rc = snprintf(buf, ip_len, fmt, ip);
break;
case ISCSI_BOOT_ETH_SUBNET_MASK:
- rc = snprintf(str, ip_len, fmt, sub);
+ rc = snprintf(buf, ip_len, fmt, sub);
break;
case ISCSI_BOOT_ETH_GATEWAY:
- rc = snprintf(str, ip_len, fmt, gw);
+ rc = snprintf(buf, ip_len, fmt, gw);
break;
case ISCSI_BOOT_ETH_FLAGS:
- rc = snprintf(str, 3, "%hhd\n",
+ rc = snprintf(buf, 3, "%hhd\n",
SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_ETH_INDEX:
- rc = snprintf(str, 3, "0\n");
+ rc = snprintf(buf, 3, "0\n");
break;
case ISCSI_BOOT_ETH_MAC:
- rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN);
+ rc = sysfs_format_mac(buf, qedi->mac, ETH_ALEN);
break;
case ISCSI_BOOT_ETH_VLAN:
- rc = snprintf(str, 12, "%d\n",
+ rc = snprintf(buf, 12, "%d\n",
GET_FIELD2(initiator->generic_cont0,
NVM_ISCSI_CFG_INITIATOR_VLAN));
break;
case ISCSI_BOOT_ETH_ORIGIN:
if (dhcp_en)
- rc = snprintf(str, 3, "3\n");
+ rc = snprintf(buf, 3, "3\n");
break;
default:
rc = 0;
@@ -1819,7 +1818,6 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
{
struct qedi_ctx *qedi = data;
struct nvm_iscsi_initiator *initiator;
- char *str = buf;
int rc;
struct nvm_iscsi_block *block;
@@ -1831,8 +1829,8 @@ static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf)
switch (type) {
case ISCSI_BOOT_INI_INITIATOR_NAME:
- rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
- initiator->initiator_name.byte);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
+ initiator->initiator_name.byte);
break;
default:
rc = 0;
@@ -1860,7 +1858,6 @@ static ssize_t
qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
char *buf, enum qedi_nvm_tgts idx)
{
- char *str = buf;
int rc = 1;
u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len;
struct nvm_iscsi_block *block;
@@ -1899,48 +1896,48 @@ qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type,
switch (type) {
case ISCSI_BOOT_TGT_NAME:
- rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
- block->target[idx].target_name.byte);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN,
+ block->target[idx].target_name.byte);
break;
case ISCSI_BOOT_TGT_IP_ADDR:
if (ipv6_en)
- rc = snprintf(str, ip_len, "%pI6\n",
+ rc = snprintf(buf, ip_len, "%pI6\n",
block->target[idx].ipv6_addr.byte);
else
- rc = snprintf(str, ip_len, "%pI4\n",
+ rc = snprintf(buf, ip_len, "%pI4\n",
block->target[idx].ipv4_addr.byte);
break;
case ISCSI_BOOT_TGT_PORT:
- rc = snprintf(str, 12, "%d\n",
+ rc = snprintf(buf, 12, "%d\n",
GET_FIELD2(block->target[idx].generic_cont0,
NVM_ISCSI_CFG_TARGET_TCP_PORT));
break;
case ISCSI_BOOT_TGT_LUN:
- rc = snprintf(str, 22, "%.*d\n",
+ rc = snprintf(buf, 22, "%.*d\n",
block->target[idx].lun.value[1],
block->target[idx].lun.value[0]);
break;
case ISCSI_BOOT_TGT_CHAP_NAME:
- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
- chap_name);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ chap_name);
break;
case ISCSI_BOOT_TGT_CHAP_SECRET:
- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
- chap_secret);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ chap_secret);
break;
case ISCSI_BOOT_TGT_REV_CHAP_NAME:
- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n",
- mchap_name);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ mchap_name);
break;
case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
- rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n",
- mchap_secret);
+ rc = sprintf(buf, "%.*s\n", NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN,
+ mchap_secret);
break;
case ISCSI_BOOT_TGT_FLAGS:
- rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
+ rc = snprintf(buf, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT);
break;
case ISCSI_BOOT_TGT_NIC_ASSOC:
- rc = snprintf(str, 3, "0\n");
+ rc = snprintf(buf, 3, "0\n");
break;
default:
rc = 0;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index aececf664654..2dea1129d396 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -59,8 +59,6 @@ qla2x00_sp_timeout(struct timer_list *t)
req->outstanding_cmds[sp->handle] = NULL;
iocb = &sp->u.iocb_cmd;
iocb->timeout(sp);
- if (sp->type != SRB_ELS_DCMD)
- sp->free(sp);
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
}
@@ -102,7 +100,6 @@ qla2x00_async_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
- struct event_arg ea;
if (fcport) {
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
@@ -117,25 +114,13 @@ qla2x00_async_iocb_timeout(void *data)
switch (sp->type) {
case SRB_LOGIN_CMD:
- if (!fcport)
- break;
/* Retry as needed. */
lio->u.logio.data[0] = MBS_COMMAND_ERROR;
lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
QLA_LOGIO_LOGIN_RETRIED : 0;
- memset(&ea, 0, sizeof(ea));
- ea.event = FCME_PLOGI_DONE;
- ea.fcport = sp->fcport;
- ea.data[0] = lio->u.logio.data[0];
- ea.data[1] = lio->u.logio.data[1];
- ea.sp = sp;
- qla24xx_handle_plogi_done_event(fcport->vha, &ea);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
break;
case SRB_LOGOUT_CMD:
- if (!fcport)
- break;
- qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
- break;
case SRB_CT_PTHRU_CMD:
case SRB_MB_IOCB:
case SRB_NACK_PLOGI:
@@ -235,12 +220,10 @@ static void
qla2x00_async_logout_sp_done(void *ptr, int res)
{
srb_t *sp = ptr;
- struct srb_iocb *lio = &sp->u.iocb_cmd;
sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
- if (!test_bit(UNLOADING, &sp->vha->dpc_flags))
- qla2x00_post_async_logout_done_work(sp->vha, sp->fcport,
- lio->u.logio.data);
+ sp->fcport->login_gen++;
+ qlt_logo_completion_handler(sp->fcport, res);
sp->free(sp);
}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 1b62e943ec49..8d00d559bd26 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -3275,12 +3275,11 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
abt_iocb->entry_count = 1;
- abt_iocb->handle =
- cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
- aio->u.abt.cmd_hndl));
+ abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle));
abt_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
abt_iocb->handle_to_abort =
- cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl));
+ cpu_to_le32(MAKE_HANDLE(aio->u.abt.req_que_no,
+ aio->u.abt.cmd_hndl));
abt_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
abt_iocb->port_id[1] = sp->fcport->d_id.b.area;
abt_iocb->port_id[2] = sp->fcport->d_id.b.domain;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 14109d86c3f6..89f93ebd819d 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -272,7 +272,8 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
/* Read all mbox registers? */
- mboxes = (1 << ha->mbx_count) - 1;
+ WARN_ON_ONCE(ha->mbx_count > 32);
+ mboxes = (1ULL << ha->mbx_count) - 1;
if (!ha->mcp)
ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
else
@@ -2880,7 +2881,8 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
/* Read all mbox registers? */
- mboxes = (1 << ha->mbx_count) - 1;
+ WARN_ON_ONCE(ha->mbx_count > 32);
+ mboxes = (1ULL << ha->mbx_count) - 1;
if (!ha->mcp)
ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
else
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 12ee6e02d146..afcb5567998a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3625,6 +3625,8 @@ qla2x00_remove_one(struct pci_dev *pdev)
}
qla2x00_wait_for_hba_ready(base_vha);
+ qla2x00_wait_for_sess_deletion(base_vha);
+
/*
* if UNLOAD flag is already set, then continue unload,
* where it was set first.
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index fc89af8fe256..896b2d8bd803 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -4871,8 +4871,6 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
sess);
qlt_send_term_imm_notif(vha, iocb, 1);
res = 0;
- spin_lock_irqsave(&tgt->ha->tgt.sess_lock,
- flags);
break;
}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index fc233717355f..817f312023a9 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -168,6 +168,8 @@
#define DEV_DB_NON_PERSISTENT 0
#define DEV_DB_PERSISTENT 1
+#define QL4_ISP_REG_DISCONNECT 0xffffffffU
+
#define COPY_ISID(dst_isid, src_isid) { \
int i, j; \
for (i = 0, j = ISID_SIZE - 1; i < ISID_SIZE;) \
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 82e889bbe0ed..fc2c97d9a0d6 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -262,6 +262,24 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
static struct scsi_transport_template *qla4xxx_scsi_transport;
+static int qla4xxx_isp_check_reg(struct scsi_qla_host *ha)
+{
+ u32 reg_val = 0;
+ int rval = QLA_SUCCESS;
+
+ if (is_qla8022(ha))
+ reg_val = readl(&ha->qla4_82xx_reg->host_status);
+ else if (is_qla8032(ha) || is_qla8042(ha))
+ reg_val = qla4_8xxx_rd_direct(ha, QLA8XXX_PEG_ALIVE_COUNTER);
+ else
+ reg_val = readw(&ha->reg->ctrl_status);
+
+ if (reg_val == QL4_ISP_REG_DISCONNECT)
+ rval = QLA_ERROR;
+
+ return rval;
+}
+
static int qla4xxx_send_ping(struct Scsi_Host *shost, uint32_t iface_num,
uint32_t iface_type, uint32_t payload_size,
uint32_t pid, struct sockaddr *dst_addr)
@@ -9186,10 +9204,17 @@ static int qla4xxx_eh_abort(struct scsi_cmnd *cmd)
struct srb *srb = NULL;
int ret = SUCCESS;
int wait = 0;
+ int rval;
ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%llu: Abort command issued cmd=%p, cdb=0x%x\n",
ha->host_no, id, lun, cmd, cmd->cmnd[0]);
+ rval = qla4xxx_isp_check_reg(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
spin_lock_irqsave(&ha->hardware_lock, flags);
srb = (struct srb *) CMD_SP(cmd);
if (!srb) {
@@ -9241,6 +9266,7 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
int ret = FAILED, stat;
+ int rval;
if (!ddb_entry)
return ret;
@@ -9260,6 +9286,12 @@ static int qla4xxx_eh_device_reset(struct scsi_cmnd *cmd)
cmd, jiffies, cmd->request->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
+ rval = qla4xxx_isp_check_reg(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
/* FIXME: wait for hba to go online */
stat = qla4xxx_reset_lun(ha, ddb_entry, cmd->device->lun);
if (stat != QLA_SUCCESS) {
@@ -9303,6 +9335,7 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
struct scsi_qla_host *ha = to_qla_host(cmd->device->host);
struct ddb_entry *ddb_entry = cmd->device->hostdata;
int stat, ret;
+ int rval;
if (!ddb_entry)
return FAILED;
@@ -9320,6 +9353,12 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
ha->host_no, cmd, jiffies, cmd->request->timeout / HZ,
ha->dpc_flags, cmd->result, cmd->allowed));
+ rval = qla4xxx_isp_check_reg(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
stat = qla4xxx_reset_target(ha, ddb_entry);
if (stat != QLA_SUCCESS) {
starget_printk(KERN_INFO, scsi_target(cmd->device),
@@ -9374,9 +9413,16 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
{
int return_status = FAILED;
struct scsi_qla_host *ha;
+ int rval;
ha = to_qla_host(cmd->device->host);
+ rval = qla4xxx_isp_check_reg(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_INFO, ha, "PCI/Register disconnect, exiting.\n");
+ return FAILED;
+ }
+
if ((is_qla8032(ha) || is_qla8042(ha)) && ql4xdontresethba)
qla4_83xx_set_idc_dontreset(ha);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 40fc7a590e81..6be5ab32c94f 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1657,7 +1657,7 @@ static struct scsi_host_template scsi_driver = {
.eh_timed_out = storvsc_eh_timed_out,
.slave_alloc = storvsc_device_alloc,
.slave_configure = storvsc_device_configure,
- .cmd_per_lun = 255,
+ .cmd_per_lun = 2048,
.this_id = -1,
.use_clustering = ENABLE_CLUSTERING,
/* Make sure we dont get a sg segment crosses a page boundary */
diff --git a/drivers/scsi/sym53c8xx_2/sym_hipd.c b/drivers/scsi/sym53c8xx_2/sym_hipd.c
index ca360daa6a25..378af306fda1 100644
--- a/drivers/scsi/sym53c8xx_2/sym_hipd.c
+++ b/drivers/scsi/sym53c8xx_2/sym_hipd.c
@@ -536,7 +536,7 @@ sym_getsync(struct sym_hcb *np, u_char dt, u_char sfac, u_char *divp, u_char *fa
* Look for the greatest clock divisor that allows an
* input speed faster than the period.
*/
- while (div-- > 0)
+ while (--div > 0)
if (kpc >= (div_10M[div] << 2)) break;
/*
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index a355d989b414..c7da2c185990 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4352,6 +4352,8 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev)
/* REPORT SUPPORTED OPERATION CODES is not supported */
sdev->no_report_opcodes = 1;
+ /* WRITE_SAME command is not supported */
+ sdev->no_write_same = 1;
ufshcd_set_queue_depth(sdev);
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 53f7275d6cbd..cfb42f5eccb2 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -348,7 +348,7 @@ static int imx_gpc_old_dt_init(struct device *dev, struct regmap *regmap,
if (i == 1) {
domain->supply = devm_regulator_get(dev, "pu");
if (IS_ERR(domain->supply))
- return PTR_ERR(domain->supply);;
+ return PTR_ERR(domain->supply);
ret = imx_pgc_get_clocks(dev, domain);
if (ret)
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index bbdc53b686dd..6dbba5aff191 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -702,30 +702,32 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
size_t pgstart, pgend;
int ret = -EINVAL;
+ mutex_lock(&ashmem_mutex);
+
if (unlikely(!asma->file))
- return -EINVAL;
+ goto out_unlock;
- if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
- return -EFAULT;
+ if (unlikely(copy_from_user(&pin, p, sizeof(pin)))) {
+ ret = -EFAULT;
+ goto out_unlock;
+ }
/* per custom, you can pass zero for len to mean "everything onward" */
if (!pin.len)
pin.len = PAGE_ALIGN(asma->size) - pin.offset;
if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
- return -EINVAL;
+ goto out_unlock;
if (unlikely(((__u32)-1) - pin.offset < pin.len))
- return -EINVAL;
+ goto out_unlock;
if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
- return -EINVAL;
+ goto out_unlock;
pgstart = pin.offset / PAGE_SIZE;
pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
- mutex_lock(&ashmem_mutex);
-
switch (cmd) {
case ASHMEM_PIN:
ret = ashmem_pin(asma, pgstart, pgend);
@@ -738,6 +740,7 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
break;
}
+out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 94e06925c712..49718c96bf9e 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/cma.h>
#include <linux/scatterlist.h>
+#include <linux/highmem.h>
#include "ion.h"
@@ -42,6 +43,22 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
if (!pages)
return -ENOMEM;
+ if (PageHighMem(pages)) {
+ unsigned long nr_clear_pages = nr_pages;
+ struct page *page = pages;
+
+ while (nr_clear_pages > 0) {
+ void *vaddr = kmap_atomic(page);
+
+ memset(vaddr, 0, PAGE_SIZE);
+ kunmap_atomic(vaddr);
+ page++;
+ nr_clear_pages--;
+ }
+ } else {
+ memset(page_address(pages), 0, size);
+ }
+
table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
goto err;
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 079a8410d664..df0499fc4802 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -141,6 +141,8 @@
#define AD7192_GPOCON_P1DAT BIT(1) /* P1 state */
#define AD7192_GPOCON_P0DAT BIT(0) /* P0 state */
+#define AD7192_EXT_FREQ_MHZ_MIN 2457600
+#define AD7192_EXT_FREQ_MHZ_MAX 5120000
#define AD7192_INT_FREQ_MHZ 4915200
/* NOTE:
@@ -218,6 +220,12 @@ static int ad7192_calibrate_all(struct ad7192_state *st)
ARRAY_SIZE(ad7192_calib_arr));
}
+static inline bool ad7192_valid_external_frequency(u32 freq)
+{
+ return (freq >= AD7192_EXT_FREQ_MHZ_MIN &&
+ freq <= AD7192_EXT_FREQ_MHZ_MAX);
+}
+
static int ad7192_setup(struct ad7192_state *st,
const struct ad7192_platform_data *pdata)
{
@@ -243,17 +251,20 @@ static int ad7192_setup(struct ad7192_state *st,
id);
switch (pdata->clock_source_sel) {
- case AD7192_CLK_EXT_MCLK1_2:
- case AD7192_CLK_EXT_MCLK2:
- st->mclk = AD7192_INT_FREQ_MHZ;
- break;
case AD7192_CLK_INT:
case AD7192_CLK_INT_CO:
- if (pdata->ext_clk_hz)
- st->mclk = pdata->ext_clk_hz;
- else
- st->mclk = AD7192_INT_FREQ_MHZ;
+ st->mclk = AD7192_INT_FREQ_MHZ;
break;
+ case AD7192_CLK_EXT_MCLK1_2:
+ case AD7192_CLK_EXT_MCLK2:
+ if (ad7192_valid_external_frequency(pdata->ext_clk_hz)) {
+ st->mclk = pdata->ext_clk_hz;
+ break;
+ }
+ dev_err(&st->sd.spi->dev, "Invalid frequency setting %u\n",
+ pdata->ext_clk_hz);
+ ret = -EINVAL;
+ goto out;
default:
ret = -EINVAL;
goto out;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 2b28fb9c0048..3bcf49466361 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -648,8 +648,6 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
/* Ring buffer functions - here trigger setup related */
indio_dev->setup_ops = &ad5933_ring_setup_ops;
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-
return 0;
}
@@ -762,7 +760,7 @@ static int ad5933_probe(struct i2c_client *client,
indio_dev->dev.parent = &client->dev;
indio_dev->info = &ad5933_info;
indio_dev->name = id->name;
- indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->modes = (INDIO_BUFFER_SOFTWARE | INDIO_DIRECT_MODE);
indio_dev->channels = ad5933_channels;
indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index f699abab1787..148f3ee70286 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -19,6 +19,12 @@ config USB_EHCI_BIG_ENDIAN_MMIO
config USB_EHCI_BIG_ENDIAN_DESC
bool
+config USB_UHCI_BIG_ENDIAN_MMIO
+ bool
+
+config USB_UHCI_BIG_ENDIAN_DESC
+ bool
+
menuconfig USB_SUPPORT
bool "USB support"
depends on HAS_IOMEM
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 06b3b54a0e68..7b366a6c0b49 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -174,6 +174,7 @@ static int acm_wb_alloc(struct acm *acm)
wb = &acm->wb[wbn];
if (!wb->use) {
wb->use = 1;
+ wb->len = 0;
return wbn;
}
wbn = (wbn + 1) % ACM_NW;
@@ -805,16 +806,18 @@ static int acm_tty_write(struct tty_struct *tty,
static void acm_tty_flush_chars(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- struct acm_wb *cur = acm->putbuffer;
+ struct acm_wb *cur;
int err;
unsigned long flags;
+ spin_lock_irqsave(&acm->write_lock, flags);
+
+ cur = acm->putbuffer;
if (!cur) /* nothing to do */
- return;
+ goto out;
acm->putbuffer = NULL;
err = usb_autopm_get_interface_async(acm->control);
- spin_lock_irqsave(&acm->write_lock, flags);
if (err < 0) {
cur->use = 0;
acm->putbuffer = cur;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 4024926c1d68..f4a548471f0f 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -226,6 +226,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Corsair K70 RGB */
+ { USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index e4c3ce0de5de..5bcad1d869b5 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1917,7 +1917,9 @@ static void dwc2_hsotg_program_zlp(struct dwc2_hsotg *hsotg,
/* Not specific buffer needed for ep0 ZLP */
dma_addr_t dma = hs_ep->desc_list_dma;
- dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
+ if (!index)
+ dwc2_gadget_set_ep0_desc_chain(hsotg, hs_ep);
+
dwc2_gadget_config_nonisoc_xfer_ddma(hs_ep, dma, 0);
} else {
dwc2_writel(DXEPTSIZ_MC(1) | DXEPTSIZ_PKTCNT(1) |
@@ -2974,9 +2976,13 @@ static void dwc2_hsotg_epint(struct dwc2_hsotg *hsotg, unsigned int idx,
if (ints & DXEPINT_STSPHSERCVD) {
dev_dbg(hsotg->dev, "%s: StsPhseRcvd\n", __func__);
- /* Move to STATUS IN for DDMA */
- if (using_desc_dma(hsotg))
- dwc2_hsotg_ep0_zlp(hsotg, true);
+ /* Safety check EP0 state when STSPHSERCVD asserted */
+ if (hsotg->ep0_state == DWC2_EP0_DATA_OUT) {
+ /* Move to STATUS IN for DDMA */
+ if (using_desc_dma(hsotg))
+ dwc2_hsotg_ep0_zlp(hsotg, true);
+ }
+
}
if (ints & DXEPINT_BACK2BACKSETUP)
@@ -3375,12 +3381,6 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
dwc2_writel(dwc2_hsotg_ep0_mps(hsotg->eps_out[0]->ep.maxpacket) |
DXEPCTL_USBACTEP, hsotg->regs + DIEPCTL0);
- dwc2_hsotg_enqueue_setup(hsotg);
-
- dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
- dwc2_readl(hsotg->regs + DIEPCTL0),
- dwc2_readl(hsotg->regs + DOEPCTL0));
-
/* clear global NAKs */
val = DCTL_CGOUTNAK | DCTL_CGNPINNAK;
if (!is_usb_reset)
@@ -3391,6 +3391,12 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
mdelay(3);
hsotg->lx_state = DWC2_L0;
+
+ dwc2_hsotg_enqueue_setup(hsotg);
+
+ dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
+ dwc2_readl(hsotg->regs + DIEPCTL0),
+ dwc2_readl(hsotg->regs + DOEPCTL0));
}
static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ade2ab00d37a..f1d838a4acd6 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -100,6 +100,8 @@ static void dwc3_set_prtcap(struct dwc3 *dwc, u32 mode)
reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
reg |= DWC3_GCTL_PRTCAPDIR(mode);
dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ dwc->current_dr_role = mode;
}
static void __dwc3_set_mode(struct work_struct *work)
@@ -133,8 +135,6 @@ static void __dwc3_set_mode(struct work_struct *work)
dwc3_set_prtcap(dwc, dwc->desired_dr_role);
- dwc->current_dr_role = dwc->desired_dr_role;
-
spin_unlock_irqrestore(&dwc->lock, flags);
switch (dwc->desired_dr_role) {
@@ -219,7 +219,7 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
* XHCI driver will reset the host block. If dwc3 was configured for
* host-only mode, then we can return early.
*/
- if (dwc->dr_mode == USB_DR_MODE_HOST)
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST)
return 0;
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -234,6 +234,9 @@ static int dwc3_core_soft_reset(struct dwc3 *dwc)
udelay(1);
} while (--retries);
+ phy_exit(dwc->usb3_generic_phy);
+ phy_exit(dwc->usb2_generic_phy);
+
return -ETIMEDOUT;
}
@@ -483,6 +486,22 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
}
+static int dwc3_core_ulpi_init(struct dwc3 *dwc)
+{
+ int intf;
+ int ret = 0;
+
+ intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3);
+
+ if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI ||
+ (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI &&
+ dwc->hsphy_interface &&
+ !strncmp(dwc->hsphy_interface, "ulpi", 4)))
+ ret = dwc3_ulpi_init(dwc);
+
+ return ret;
+}
+
/**
* dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
* @dwc: Pointer to our controller context structure
@@ -494,7 +513,6 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
static int dwc3_phy_setup(struct dwc3 *dwc)
{
u32 reg;
- int ret;
reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
@@ -565,9 +583,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
}
/* FALLTHROUGH */
case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
- ret = dwc3_ulpi_init(dwc);
- if (ret)
- return ret;
/* FALLTHROUGH */
default:
break;
@@ -724,6 +739,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
}
static int dwc3_core_get_phy(struct dwc3 *dwc);
+static int dwc3_core_ulpi_init(struct dwc3 *dwc);
/**
* dwc3_core_init - Low-level initialization of DWC3 Core
@@ -755,17 +771,27 @@ static int dwc3_core_init(struct dwc3 *dwc)
dwc->maximum_speed = USB_SPEED_HIGH;
}
- ret = dwc3_core_get_phy(dwc);
+ ret = dwc3_phy_setup(dwc);
if (ret)
goto err0;
- ret = dwc3_core_soft_reset(dwc);
- if (ret)
- goto err0;
+ if (!dwc->ulpi_ready) {
+ ret = dwc3_core_ulpi_init(dwc);
+ if (ret)
+ goto err0;
+ dwc->ulpi_ready = true;
+ }
- ret = dwc3_phy_setup(dwc);
+ if (!dwc->phys_ready) {
+ ret = dwc3_core_get_phy(dwc);
+ if (ret)
+ goto err0a;
+ dwc->phys_ready = true;
+ }
+
+ ret = dwc3_core_soft_reset(dwc);
if (ret)
- goto err0;
+ goto err0a;
dwc3_core_setup_global_control(dwc);
dwc3_core_num_eps(dwc);
@@ -838,6 +864,9 @@ err1:
phy_exit(dwc->usb2_generic_phy);
phy_exit(dwc->usb3_generic_phy);
+err0a:
+ dwc3_ulpi_exit(dwc);
+
err0:
return ret;
}
@@ -916,7 +945,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
switch (dwc->dr_mode) {
case USB_DR_MODE_PERIPHERAL:
- dwc->current_dr_role = DWC3_GCTL_PRTCAP_DEVICE;
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE);
if (dwc->usb2_phy)
@@ -932,7 +960,6 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
}
break;
case USB_DR_MODE_HOST:
- dwc->current_dr_role = DWC3_GCTL_PRTCAP_HOST;
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
if (dwc->usb2_phy)
@@ -1234,7 +1261,6 @@ err4:
err3:
dwc3_free_event_buffers(dwc);
- dwc3_ulpi_exit(dwc);
err2:
pm_runtime_allow(&pdev->dev);
@@ -1284,7 +1310,7 @@ static int dwc3_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int dwc3_suspend_common(struct dwc3 *dwc)
+static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
{
unsigned long flags;
@@ -1296,6 +1322,10 @@ static int dwc3_suspend_common(struct dwc3 *dwc)
dwc3_core_exit(dwc);
break;
case DWC3_GCTL_PRTCAP_HOST:
+ /* do nothing during host runtime_suspend */
+ if (!PMSG_IS_AUTO(msg))
+ dwc3_core_exit(dwc);
+ break;
default:
/* do nothing */
break;
@@ -1304,7 +1334,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc)
return 0;
}
-static int dwc3_resume_common(struct dwc3 *dwc)
+static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
{
unsigned long flags;
int ret;
@@ -1320,6 +1350,13 @@ static int dwc3_resume_common(struct dwc3 *dwc)
spin_unlock_irqrestore(&dwc->lock, flags);
break;
case DWC3_GCTL_PRTCAP_HOST:
+ /* nothing to do on host runtime_resume */
+ if (!PMSG_IS_AUTO(msg)) {
+ ret = dwc3_core_init(dwc);
+ if (ret)
+ return ret;
+ }
+ break;
default:
/* do nothing */
break;
@@ -1331,12 +1368,11 @@ static int dwc3_resume_common(struct dwc3 *dwc)
static int dwc3_runtime_checks(struct dwc3 *dwc)
{
switch (dwc->current_dr_role) {
- case USB_DR_MODE_PERIPHERAL:
- case USB_DR_MODE_OTG:
+ case DWC3_GCTL_PRTCAP_DEVICE:
if (dwc->connected)
return -EBUSY;
break;
- case USB_DR_MODE_HOST:
+ case DWC3_GCTL_PRTCAP_HOST:
default:
/* do nothing */
break;
@@ -1353,7 +1389,7 @@ static int dwc3_runtime_suspend(struct device *dev)
if (dwc3_runtime_checks(dwc))
return -EBUSY;
- ret = dwc3_suspend_common(dwc);
+ ret = dwc3_suspend_common(dwc, PMSG_AUTO_SUSPEND);
if (ret)
return ret;
@@ -1369,7 +1405,7 @@ static int dwc3_runtime_resume(struct device *dev)
device_init_wakeup(dev, false);
- ret = dwc3_resume_common(dwc);
+ ret = dwc3_resume_common(dwc, PMSG_AUTO_RESUME);
if (ret)
return ret;
@@ -1416,7 +1452,7 @@ static int dwc3_suspend(struct device *dev)
struct dwc3 *dwc = dev_get_drvdata(dev);
int ret;
- ret = dwc3_suspend_common(dwc);
+ ret = dwc3_suspend_common(dwc, PMSG_SUSPEND);
if (ret)
return ret;
@@ -1432,7 +1468,7 @@ static int dwc3_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- ret = dwc3_resume_common(dwc);
+ ret = dwc3_resume_common(dwc, PMSG_RESUME);
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 03c7aaaac926..860d2bc184d1 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -158,13 +158,15 @@
#define DWC3_GDBGFIFOSPACE_TYPE(n) (((n) << 5) & 0x1e0)
#define DWC3_GDBGFIFOSPACE_SPACE_AVAILABLE(n) (((n) >> 16) & 0xffff)
-#define DWC3_TXFIFOQ 1
-#define DWC3_RXFIFOQ 3
-#define DWC3_TXREQQ 5
-#define DWC3_RXREQQ 7
-#define DWC3_RXINFOQ 9
-#define DWC3_DESCFETCHQ 13
-#define DWC3_EVENTQ 15
+#define DWC3_TXFIFOQ 0
+#define DWC3_RXFIFOQ 1
+#define DWC3_TXREQQ 2
+#define DWC3_RXREQQ 3
+#define DWC3_RXINFOQ 4
+#define DWC3_PSTATQ 5
+#define DWC3_DESCFETCHQ 6
+#define DWC3_EVENTQ 7
+#define DWC3_AUXEVENTQ 8
/* Global RX Threshold Configuration Register */
#define DWC3_GRXTHRCFG_MAXRXBURSTSIZE(n) (((n) & 0x1f) << 19)
@@ -795,7 +797,9 @@ struct dwc3_scratchpad_array {
* @usb3_phy: pointer to USB3 PHY
* @usb2_generic_phy: pointer to USB2 PHY
* @usb3_generic_phy: pointer to USB3 PHY
+ * @phys_ready: flag to indicate that PHYs are ready
* @ulpi: pointer to ulpi interface
+ * @ulpi_ready: flag to indicate that ULPI is initialized
* @u2sel: parameter from Set SEL request.
* @u2pel: parameter from Set SEL request.
* @u1sel: parameter from Set SEL request.
@@ -893,7 +897,10 @@ struct dwc3 {
struct phy *usb2_generic_phy;
struct phy *usb3_generic_phy;
+ bool phys_ready;
+
struct ulpi *ulpi;
+ bool ulpi_ready;
void __iomem *regs;
size_t regs_size;
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 7ae0eefc7cc7..e54c3622eb28 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -143,6 +143,7 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
clk_disable_unprepare(simple->clks[i]);
clk_put(simple->clks[i]);
}
+ simple->num_clocks = 0;
reset_control_assert(simple->resets);
reset_control_put(simple->resets);
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index a4719e853b85..ed8b86517675 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -582,9 +582,25 @@ static int dwc3_omap_resume(struct device *dev)
return 0;
}
+static void dwc3_omap_complete(struct device *dev)
+{
+ struct dwc3_omap *omap = dev_get_drvdata(dev);
+
+ if (extcon_get_state(omap->edev, EXTCON_USB))
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_VALID);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_VBUS_OFF);
+
+ if (extcon_get_state(omap->edev, EXTCON_USB_HOST))
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_GROUND);
+ else
+ dwc3_omap_set_mailbox(omap, OMAP_DWC3_ID_FLOAT);
+}
+
static const struct dev_pm_ops dwc3_omap_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_omap_suspend, dwc3_omap_resume)
+ .complete = dwc3_omap_complete,
};
#define DEV_PM_OPS (&dwc3_omap_dev_pm_ops)
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 9c2e4a17918e..18be31d5743a 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -854,7 +854,12 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
trb++;
trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
trace_dwc3_complete_trb(ep0, trb);
- ep0->trb_enqueue = 0;
+
+ if (r->direction)
+ dwc->eps[1]->trb_enqueue = 0;
+ else
+ dwc->eps[0]->trb_enqueue = 0;
+
dwc->ep0_bounced = false;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 616ef49ccb49..2bda4eb1e9ac 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2745,6 +2745,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
break;
}
+ dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket;
+
/* Enable USB2 LPM Capability */
if ((dwc->revision > DWC3_REVISION_194A) &&
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8f2cf3baa19c..c2592d883f67 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1855,44 +1855,20 @@ static int ffs_func_eps_enable(struct ffs_function *func)
spin_lock_irqsave(&func->ffs->eps_lock, flags);
while(count--) {
- struct usb_endpoint_descriptor *ds;
- struct usb_ss_ep_comp_descriptor *comp_desc = NULL;
- int needs_comp_desc = false;
- int desc_idx;
-
- if (ffs->gadget->speed == USB_SPEED_SUPER) {
- desc_idx = 2;
- needs_comp_desc = true;
- } else if (ffs->gadget->speed == USB_SPEED_HIGH)
- desc_idx = 1;
- else
- desc_idx = 0;
-
- /* fall-back to lower speed if desc missing for current speed */
- do {
- ds = ep->descs[desc_idx];
- } while (!ds && --desc_idx >= 0);
-
- if (!ds) {
- ret = -EINVAL;
- break;
- }
-
ep->ep->driver_data = ep;
- ep->ep->desc = ds;
- if (needs_comp_desc) {
- comp_desc = (struct usb_ss_ep_comp_descriptor *)(ds +
- USB_DT_ENDPOINT_SIZE);
- ep->ep->maxburst = comp_desc->bMaxBurst + 1;
- ep->ep->comp_desc = comp_desc;
+ ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
+ if (ret) {
+ pr_err("%s: config_ep_by_speed(%s) returned %d\n",
+ __func__, ep->ep->name, ret);
+ break;
}
ret = usb_ep_enable(ep->ep);
if (likely(!ret)) {
epfile->ep = ep;
- epfile->in = usb_endpoint_dir_in(ds);
- epfile->isoc = usb_endpoint_xfer_isoc(ds);
+ epfile->in = usb_endpoint_dir_in(ep->ep->desc);
+ epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);
} else {
break;
}
@@ -2979,10 +2955,8 @@ static int _ffs_func_bind(struct usb_configuration *c,
struct ffs_data *ffs = func->ffs;
const int full = !!func->ffs->fs_descs_count;
- const int high = gadget_is_dualspeed(func->gadget) &&
- func->ffs->hs_descs_count;
- const int super = gadget_is_superspeed(func->gadget) &&
- func->ffs->ss_descs_count;
+ const int high = !!func->ffs->hs_descs_count;
+ const int super = !!func->ffs->ss_descs_count;
int fs_len, hs_len, ss_len, ret, i;
struct ffs_ep *eps_ptr;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 11fe788b4308..d2dc1f00180b 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -524,6 +524,8 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
return ret;
}
+ iad_desc.bFirstInterface = ret;
+
std_ac_if_desc.bInterfaceNumber = ret;
uac2->ac_intf = ret;
uac2->ac_alt = 0;
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 1e9567091d86..0875d38476ee 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -274,7 +274,6 @@ config USB_SNP_UDC_PLAT
tristate "Synopsys USB 2.0 Device controller"
depends on USB_GADGET && OF && HAS_DMA
depends on EXTCON || EXTCON=n
- select USB_GADGET_DUALSPEED
select USB_SNP_CORE
default ARCH_BCM_IPROC
help
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
index 1e940f054cb8..6dbc489513cd 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_pci.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_pci.c
@@ -77,6 +77,7 @@ static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
if (ret) {
dev_err(&pci->dev,
"couldn't add resources to bdc device\n");
+ platform_device_put(bdc);
return ret;
}
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 859d5b11ba4c..1f8b19d9cf97 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -180,8 +180,8 @@ EXPORT_SYMBOL_GPL(usb_ep_alloc_request);
void usb_ep_free_request(struct usb_ep *ep,
struct usb_request *req)
{
- ep->ops->free_request(ep, req);
trace_usb_ep_free_request(ep, req, 0);
+ ep->ops->free_request(ep, req);
}
EXPORT_SYMBOL_GPL(usb_ep_free_request);
diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c
index e5b4ee96c4bf..56b517a38865 100644
--- a/drivers/usb/gadget/udc/fsl_udc_core.c
+++ b/drivers/usb/gadget/udc/fsl_udc_core.c
@@ -1305,7 +1305,7 @@ static void udc_reset_ep_queue(struct fsl_udc *udc, u8 pipe)
{
struct fsl_ep *ep = get_ep_by_pipe(udc, pipe);
- if (ep->name)
+ if (ep->ep.name)
nuke(ep, -ESHUTDOWN);
}
@@ -1693,7 +1693,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
curr_ep = get_ep_by_pipe(udc, i);
/* If the ep is configured */
- if (curr_ep->name == NULL) {
+ if (!curr_ep->ep.name) {
WARNING("Invalid EP?");
continue;
}
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index 6e87af248367..409cde4e6a51 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -2410,7 +2410,7 @@ static int renesas_usb3_remove(struct platform_device *pdev)
__renesas_usb3_ep_free_request(usb3->ep0_req);
if (usb3->phy)
phy_put(usb3->phy);
- pm_runtime_disable(usb3_to_dev(usb3));
+ pm_runtime_disable(&pdev->dev);
return 0;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 6150bed7cfa8..4fcfb3084b36 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -633,14 +633,6 @@ config USB_UHCI_ASPEED
bool
default y if ARCH_ASPEED
-config USB_UHCI_BIG_ENDIAN_MMIO
- bool
- default y if SPARC_LEON
-
-config USB_UHCI_BIG_ENDIAN_DESC
- bool
- default y if SPARC_LEON
-
config USB_FHCI_HCD
tristate "Freescale QE USB Host Controller support"
depends on OF_GPIO && QE_GPIO && QUICC_ENGINE
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index facafdf8fb95..d7641cbdee43 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -774,12 +774,12 @@ static struct urb *request_single_step_set_feature_urb(
atomic_inc(&urb->use_count);
atomic_inc(&urb->dev->urbnum);
urb->setup_dma = dma_map_single(
- hcd->self.controller,
+ hcd->self.sysdev,
urb->setup_packet,
sizeof(struct usb_ctrlrequest),
DMA_TO_DEVICE);
urb->transfer_dma = dma_map_single(
- hcd->self.controller,
+ hcd->self.sysdev,
urb->transfer_buffer,
urb->transfer_buffer_length,
DMA_FROM_DEVICE);
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 88158324dcae..327630405695 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -1188,10 +1188,10 @@ static int submit_single_step_set_feature(
* 15 secs after the setup
*/
if (is_setup) {
- /* SETUP pid */
+ /* SETUP pid, and interrupt after SETUP completion */
qtd_fill(ehci, qtd, urb->setup_dma,
sizeof(struct usb_ctrlrequest),
- token | (2 /* "setup" */ << 8), 8);
+ QTD_IOC | token | (2 /* "setup" */ << 8), 8);
submit_async(ehci, urb, &qtd_list, GFP_ATOMIC);
return 0; /*Return now; we shall come back after 15 seconds*/
@@ -1228,12 +1228,8 @@ static int submit_single_step_set_feature(
qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
- /* dont fill any data in such packets */
- qtd_fill(ehci, qtd, 0, 0, token, 0);
-
- /* by default, enable interrupt on urb completion */
- if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT)))
- qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC);
+ /* Interrupt after STATUS completion */
+ qtd_fill(ehci, qtd, 0, 0, token | QTD_IOC, 0);
submit_async(ehci, urb, &qtd_list, GFP_KERNEL);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index ee9676349333..84f88fa411cd 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -74,6 +74,7 @@ static const char hcd_name [] = "ohci_hcd";
#define STATECHANGE_DELAY msecs_to_jiffies(300)
#define IO_WATCHDOG_DELAY msecs_to_jiffies(275)
+#define IO_WATCHDOG_OFF 0xffffff00
#include "ohci.h"
#include "pci-quirks.h"
@@ -231,7 +232,7 @@ static int ohci_urb_enqueue (
}
/* Start up the I/O watchdog timer, if it's not running */
- if (!timer_pending(&ohci->io_watchdog) &&
+ if (ohci->prev_frame_no == IO_WATCHDOG_OFF &&
list_empty(&ohci->eds_in_use) &&
!(ohci->flags & OHCI_QUIRK_QEMU)) {
ohci->prev_frame_no = ohci_frame_no(ohci);
@@ -501,6 +502,7 @@ static int ohci_init (struct ohci_hcd *ohci)
return 0;
timer_setup(&ohci->io_watchdog, io_watchdog_func, 0);
+ ohci->prev_frame_no = IO_WATCHDOG_OFF;
ohci->hcca = dma_alloc_coherent (hcd->self.controller,
sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
@@ -730,7 +732,7 @@ static void io_watchdog_func(struct timer_list *t)
u32 head;
struct ed *ed;
struct td *td, *td_start, *td_next;
- unsigned frame_no;
+ unsigned frame_no, prev_frame_no = IO_WATCHDOG_OFF;
unsigned long flags;
spin_lock_irqsave(&ohci->lock, flags);
@@ -835,7 +837,7 @@ static void io_watchdog_func(struct timer_list *t)
}
}
if (!list_empty(&ohci->eds_in_use)) {
- ohci->prev_frame_no = frame_no;
+ prev_frame_no = frame_no;
ohci->prev_wdh_cnt = ohci->wdh_cnt;
ohci->prev_donehead = ohci_readl(ohci,
&ohci->regs->donehead);
@@ -845,6 +847,7 @@ static void io_watchdog_func(struct timer_list *t)
}
done:
+ ohci->prev_frame_no = prev_frame_no;
spin_unlock_irqrestore(&ohci->lock, flags);
}
@@ -973,6 +976,7 @@ static void ohci_stop (struct usb_hcd *hcd)
if (quirk_nec(ohci))
flush_work(&ohci->nec_work);
del_timer_sync(&ohci->io_watchdog);
+ ohci->prev_frame_no = IO_WATCHDOG_OFF;
ohci_writel (ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
ohci_usb_reset(ohci);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index fb7aaa3b9d06..634f3c7bf774 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -311,8 +311,10 @@ static int ohci_bus_suspend (struct usb_hcd *hcd)
rc = ohci_rh_suspend (ohci, 0);
spin_unlock_irq (&ohci->lock);
- if (rc == 0)
+ if (rc == 0) {
del_timer_sync(&ohci->io_watchdog);
+ ohci->prev_frame_no = IO_WATCHDOG_OFF;
+ }
return rc;
}
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index b2ec8c399363..4ccb85a67bb3 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -1019,6 +1019,8 @@ skip_ed:
* have modified this list. normally it's just prepending
* entries (which we'd ignore), but paranoia won't hurt.
*/
+ *last = ed->ed_next;
+ ed->ed_next = NULL;
modified = 0;
/* unlink urbs as requested, but rescan the list after
@@ -1077,21 +1079,22 @@ rescan_this:
goto rescan_this;
/*
- * If no TDs are queued, take ED off the ed_rm_list.
+ * If no TDs are queued, ED is now idle.
* Otherwise, if the HC is running, reschedule.
- * If not, leave it on the list for further dequeues.
+ * If the HC isn't running, add ED back to the
+ * start of the list for later processing.
*/
if (list_empty(&ed->td_list)) {
- *last = ed->ed_next;
- ed->ed_next = NULL;
ed->state = ED_IDLE;
list_del(&ed->in_use_list);
} else if (ohci->rh_state == OHCI_RH_RUNNING) {
- *last = ed->ed_next;
- ed->ed_next = NULL;
ed_schedule(ohci, ed);
} else {
- last = &ed->ed_next;
+ ed->ed_next = ohci->ed_rm_list;
+ ohci->ed_rm_list = ed;
+ /* Don't loop on the same ED */
+ if (last == &ohci->ed_rm_list)
+ last = &ed->ed_next;
}
if (modified)
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 161536717025..67ad4bb6919a 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -66,6 +66,23 @@
#define AX_INDXC 0x30
#define AX_DATAC 0x34
+#define PT_ADDR_INDX 0xE8
+#define PT_READ_INDX 0xE4
+#define PT_SIG_1_ADDR 0xA520
+#define PT_SIG_2_ADDR 0xA521
+#define PT_SIG_3_ADDR 0xA522
+#define PT_SIG_4_ADDR 0xA523
+#define PT_SIG_1_DATA 0x78
+#define PT_SIG_2_DATA 0x56
+#define PT_SIG_3_DATA 0x34
+#define PT_SIG_4_DATA 0x12
+#define PT4_P1_REG 0xB521
+#define PT4_P2_REG 0xB522
+#define PT2_P1_REG 0xD520
+#define PT2_P2_REG 0xD521
+#define PT1_P1_REG 0xD522
+#define PT1_P2_REG 0xD523
+
#define NB_PCIE_INDX_ADDR 0xe0
#define NB_PCIE_INDX_DATA 0xe4
#define PCIE_P_CNTL 0x10040
@@ -513,6 +530,98 @@ void usb_amd_dev_put(void)
EXPORT_SYMBOL_GPL(usb_amd_dev_put);
/*
+ * Check if port is disabled in BIOS on AMD Promontory host.
+ * BIOS Disabled ports may wake on connect/disconnect and need
+ * driver workaround to keep them disabled.
+ * Returns true if port is marked disabled.
+ */
+bool usb_amd_pt_check_port(struct device *device, int port)
+{
+ unsigned char value, port_shift;
+ struct pci_dev *pdev;
+ u16 reg;
+
+ pdev = to_pci_dev(device);
+ pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_1_ADDR);
+
+ pci_read_config_byte(pdev, PT_READ_INDX, &value);
+ if (value != PT_SIG_1_DATA)
+ return false;
+
+ pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_2_ADDR);
+
+ pci_read_config_byte(pdev, PT_READ_INDX, &value);
+ if (value != PT_SIG_2_DATA)
+ return false;
+
+ pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_3_ADDR);
+
+ pci_read_config_byte(pdev, PT_READ_INDX, &value);
+ if (value != PT_SIG_3_DATA)
+ return false;
+
+ pci_write_config_word(pdev, PT_ADDR_INDX, PT_SIG_4_ADDR);
+
+ pci_read_config_byte(pdev, PT_READ_INDX, &value);
+ if (value != PT_SIG_4_DATA)
+ return false;
+
+ /* Check disabled port setting, if bit is set port is enabled */
+ switch (pdev->device) {
+ case 0x43b9:
+ case 0x43ba:
+ /*
+ * device is AMD_PROMONTORYA_4(0x43b9) or PROMONTORYA_3(0x43ba)
+ * PT4_P1_REG bits[7..1] represents USB2.0 ports 6 to 0
+ * PT4_P2_REG bits[6..0] represents ports 13 to 7
+ */
+ if (port > 6) {
+ reg = PT4_P2_REG;
+ port_shift = port - 7;
+ } else {
+ reg = PT4_P1_REG;
+ port_shift = port + 1;
+ }
+ break;
+ case 0x43bb:
+ /*
+ * device is AMD_PROMONTORYA_2(0x43bb)
+ * PT2_P1_REG bits[7..5] represents USB2.0 ports 2 to 0
+ * PT2_P2_REG bits[5..0] represents ports 9 to 3
+ */
+ if (port > 2) {
+ reg = PT2_P2_REG;
+ port_shift = port - 3;
+ } else {
+ reg = PT2_P1_REG;
+ port_shift = port + 5;
+ }
+ break;
+ case 0x43bc:
+ /*
+ * device is AMD_PROMONTORYA_1(0x43bc)
+ * PT1_P1_REG[7..4] represents USB2.0 ports 3 to 0
+ * PT1_P2_REG[5..0] represents ports 9 to 4
+ */
+ if (port > 3) {
+ reg = PT1_P2_REG;
+ port_shift = port - 4;
+ } else {
+ reg = PT1_P1_REG;
+ port_shift = port + 4;
+ }
+ break;
+ default:
+ return false;
+ }
+ pci_write_config_word(pdev, PT_ADDR_INDX, reg);
+ pci_read_config_byte(pdev, PT_READ_INDX, &value);
+
+ return !(value & BIT(port_shift));
+}
+EXPORT_SYMBOL_GPL(usb_amd_pt_check_port);
+
+/*
* Make sure the controller is completely inactive, unable to
* generate interrupts or do DMA.
*/
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index b68dcb5dd0fd..4ca0d9b7e463 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -17,6 +17,7 @@ void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev);
void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
void sb800_prefetch(struct device *dev, int on);
bool usb_xhci_needs_pci_reset(struct pci_dev *pdev);
+bool usb_amd_pt_check_port(struct device *device, int port);
#else
struct pci_dev;
static inline void usb_amd_quirk_pll_disable(void) {}
@@ -25,6 +26,10 @@ static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {}
static inline void usb_amd_dev_put(void) {}
static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {}
static inline void sb800_prefetch(struct device *dev, int on) {}
+static inline bool usb_amd_pt_check_port(struct device *device, int port)
+{
+ return false;
+}
#endif /* CONFIG_USB_PCI */
#endif /* __LINUX_USB_PCI_QUIRKS_H */
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index e26e685d8a57..5851052d4668 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -211,7 +211,7 @@ static void xhci_ring_dump_segment(struct seq_file *s,
static int xhci_ring_trb_show(struct seq_file *s, void *unused)
{
int i;
- struct xhci_ring *ring = s->private;
+ struct xhci_ring *ring = *(struct xhci_ring **)s->private;
struct xhci_segment *seg = ring->first_seg;
for (i = 0; i < ring->num_segs; i++) {
@@ -387,7 +387,7 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index);
epriv->root = xhci_debugfs_create_ring_dir(xhci,
- &dev->eps[ep_index].new_ring,
+ &dev->eps[ep_index].ring,
epriv->name,
spriv->root);
spriv->eps[ep_index] = epriv;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 46d5e08f05f1..72ebbc908e19 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1224,17 +1224,17 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = readl(port_array[wIndex]);
break;
}
-
- /* Software should not attempt to set
- * port link state above '3' (U3) and the port
- * must be enabled.
- */
- if ((temp & PORT_PE) == 0 ||
- (link_state > USB_SS_PORT_LS_U3)) {
- xhci_warn(xhci, "Cannot set link state.\n");
+ /* Port must be enabled */
+ if (!(temp & PORT_PE)) {
+ retval = -ENODEV;
+ break;
+ }
+ /* Can't set port link state above '3' (U3) */
+ if (link_state > USB_SS_PORT_LS_U3) {
+ xhci_warn(xhci, "Cannot set port %d link state %d\n",
+ wIndex, link_state);
goto error;
}
-
if (link_state == USB_SS_PORT_LS_U3) {
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
wIndex + 1);
@@ -1522,6 +1522,13 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
t2 &= ~PORT_WKDISC_E;
}
+
+ if ((xhci->quirks & XHCI_U2_DISABLE_WAKE) &&
+ (hcd->speed < HCD_USB3)) {
+ if (usb_amd_pt_check_port(hcd->self.controller,
+ port_index))
+ t2 &= ~PORT_WAKE_BITS;
+ }
} else
t2 &= ~PORT_WAKE_BITS;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 6c79037876db..5262fa571a5d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -42,6 +42,10 @@
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
+#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
static const char hcd_name[] = "xhci_hcd";
@@ -125,6 +129,13 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
if (pdev->vendor == PCI_VENDOR_ID_AMD)
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
+ if ((pdev->vendor == PCI_VENDOR_ID_AMD) &&
+ ((pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4) ||
+ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_3) ||
+ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_2) ||
+ (pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_1)))
+ xhci->quirks |= XHCI_U2_DISABLE_WAKE;
+
if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
xhci->quirks |= XHCI_LPM_SUPPORT;
xhci->quirks |= XHCI_INTEL_HOST;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 1eeb3396300f..25d4b748a56f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -646,8 +646,6 @@ static void xhci_stop(struct usb_hcd *hcd)
return;
}
- xhci_debugfs_exit(xhci);
-
xhci_dbc_exit(xhci);
spin_lock_irq(&xhci->lock);
@@ -680,6 +678,7 @@ static void xhci_stop(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "cleaning up memory");
xhci_mem_cleanup(xhci);
+ xhci_debugfs_exit(xhci);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xhci_stop completed - status = %x",
readl(&xhci->op_regs->status));
@@ -1014,6 +1013,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
+ xhci_debugfs_exit(xhci);
xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
readl(&xhci->op_regs->status));
@@ -3544,12 +3544,10 @@ static void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
virt_dev->eps[i].ep_state &= ~EP_STOP_CMD_PENDING;
del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
}
-
+ xhci_debugfs_remove_slot(xhci, udev->slot_id);
ret = xhci_disable_slot(xhci, udev->slot_id);
- if (ret) {
- xhci_debugfs_remove_slot(xhci, udev->slot_id);
+ if (ret)
xhci_free_virt_device(xhci, udev->slot_id);
- }
}
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 96099a245c69..e4d7d3d06a75 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1822,7 +1822,7 @@ struct xhci_hcd {
/* For controller with a broken Port Disable implementation */
#define XHCI_BROKEN_PORT_PED (1 << 25)
#define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26)
-/* Reserved. It was XHCI_U2_DISABLE_WAKE */
+#define XHCI_U2_DISABLE_WAKE (1 << 27)
#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28)
#define XHCI_HW_LPM_DISABLE (1 << 29)
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 63b9e85dc0e9..236a60f53099 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -42,6 +42,9 @@
#define USB_DEVICE_ID_LD_MICROCASSYTIME 0x1033 /* USB Product ID of Micro-CASSY Time (reserved) */
#define USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE 0x1035 /* USB Product ID of Micro-CASSY Temperature */
#define USB_DEVICE_ID_LD_MICROCASSYPH 0x1038 /* USB Product ID of Micro-CASSY pH */
+#define USB_DEVICE_ID_LD_POWERANALYSERCASSY 0x1040 /* USB Product ID of Power Analyser CASSY */
+#define USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY 0x1042 /* USB Product ID of Converter Controller CASSY */
+#define USB_DEVICE_ID_LD_MACHINETESTCASSY 0x1043 /* USB Product ID of Machine Test CASSY */
#define USB_DEVICE_ID_LD_JWM 0x1080 /* USB Product ID of Joule and Wattmeter */
#define USB_DEVICE_ID_LD_DMMP 0x1081 /* USB Product ID of Digital Multimeter P (reserved) */
#define USB_DEVICE_ID_LD_UMIP 0x1090 /* USB Product ID of UMI P */
@@ -84,6 +87,9 @@ static const struct usb_device_id ld_usb_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTIME) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYTEMPERATURE) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MICROCASSYPH) },
+ { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POWERANALYSERCASSY) },
+ { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CONVERTERCONTROLLERCASSY) },
+ { USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MACHINETESTCASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_JWM) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_DMMP) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_UMIP) },
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 968bf1e8b0fe..eef4ad578b31 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2708,7 +2708,8 @@ static int musb_resume(struct device *dev)
if ((devctl & mask) != (musb->context.devctl & mask))
musb->port1_status = 0;
- musb_start(musb);
+ musb_enable_interrupts(musb);
+ musb_platform_enable(musb);
spin_lock_irqsave(&musb->lock, flags);
error = musb_run_resume_work(musb);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 394b4ac86161..45ed32c2cba9 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -391,13 +391,7 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
}
}
- /*
- * The pipe must be broken if current urb->status is set, so don't
- * start next urb.
- * TODO: to minimize the risk of regression, only check urb->status
- * for RX, until we have a test case to understand the behavior of TX.
- */
- if ((!status || !is_in) && qh && qh->is_ready) {
+ if (qh != NULL && qh->is_ready) {
musb_dbg(musb, "... next ep%d %cX urb %p",
hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
musb_start_urb(musb, is_in, qh);
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index da031c45395a..fbec863350f6 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -602,6 +602,9 @@ static enum usb_charger_type mxs_phy_charger_detect(struct usb_phy *phy)
void __iomem *base = phy->io_priv;
enum usb_charger_type chgr_type = UNKNOWN_TYPE;
+ if (!regmap)
+ return UNKNOWN_TYPE;
+
if (mxs_charger_data_contact_detect(mxs_phy))
return chgr_type;
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index 5925d111bd47..39fa2fc1b8b7 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -982,6 +982,10 @@ static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
goto usbhsf_pio_prepare_pop;
+ /* return at this time if the pipe is running */
+ if (usbhs_pipe_is_running(pipe))
+ return 0;
+
usbhs_pipe_config_change_bfre(pipe, 1);
ret = usbhsf_fifo_select(pipe, fifo, 0);
@@ -1172,6 +1176,7 @@ static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
usbhsf_fifo_clear(pipe, fifo);
pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
+ usbhs_pipe_running(pipe, 0);
usbhsf_dma_stop(pipe, fifo);
usbhsf_dma_unmap(pkt);
usbhsf_fifo_unselect(pipe, pipe->fifo);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5db8ed517e0e..2d8d9150da0c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -241,6 +241,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_BG96 0x0296
+#define QUECTEL_PRODUCT_EP06 0x0306
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -689,6 +690,10 @@ static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
.reserved = BIT(1) | BIT(4),
};
+static const struct option_blacklist_info quectel_ep06_blacklist = {
+ .reserved = BIT(4) | BIT(5),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1203,6 +1208,8 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
+ .driver_info = (kernel_ulong_t)&quectel_ep06_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index 49e552472c3f..dd8ef36ab10e 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -73,6 +73,7 @@ static ssize_t usbip_sockfd_store(struct device *dev, struct device_attribute *a
goto err;
sdev->ud.tcp_socket = socket;
+ sdev->ud.sockfd = sockfd;
spin_unlock_irq(&sdev->ud.lock);
@@ -172,6 +173,7 @@ static void stub_shutdown_connection(struct usbip_device *ud)
if (ud->tcp_socket) {
sockfd_put(ud->tcp_socket);
ud->tcp_socket = NULL;
+ ud->sockfd = -1;
}
/* 3. free used data */
@@ -266,6 +268,7 @@ static struct stub_device *stub_device_alloc(struct usb_device *udev)
sdev->ud.status = SDEV_ST_AVAILABLE;
spin_lock_init(&sdev->ud.lock);
sdev->ud.tcp_socket = NULL;
+ sdev->ud.sockfd = -1;
INIT_LIST_HEAD(&sdev->priv_init);
INIT_LIST_HEAD(&sdev->priv_tx);
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index c3e1008aa491..20e3d4609583 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -984,6 +984,7 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
if (vdev->ud.tcp_socket) {
sockfd_put(vdev->ud.tcp_socket);
vdev->ud.tcp_socket = NULL;
+ vdev->ud.sockfd = -1;
}
pr_info("release socket\n");
@@ -1030,6 +1031,7 @@ static void vhci_device_reset(struct usbip_device *ud)
if (ud->tcp_socket) {
sockfd_put(ud->tcp_socket);
ud->tcp_socket = NULL;
+ ud->sockfd = -1;
}
ud->status = VDEV_ST_NULL;
diff --git a/drivers/video/fbdev/geode/video_gx.c b/drivers/video/fbdev/geode/video_gx.c
index 6082f653c68a..67773e8bbb95 100644
--- a/drivers/video/fbdev/geode/video_gx.c
+++ b/drivers/video/fbdev/geode/video_gx.c
@@ -127,7 +127,7 @@ void gx_set_dclk_frequency(struct fb_info *info)
int timeout = 1000;
/* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
- if (cpu_data(0).x86_mask == 1) {
+ if (cpu_data(0).x86_stepping == 1) {
pll_table = gx_pll_table_14MHz;
pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
} else {
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 753d9cb437d0..aedbee3b2838 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -60,6 +60,7 @@ struct sock_mapping {
bool active_socket;
struct list_head list;
struct socket *sock;
+ atomic_t refcount;
union {
struct {
int irq;
@@ -93,6 +94,32 @@ struct sock_mapping {
};
};
+static inline struct sock_mapping *pvcalls_enter_sock(struct socket *sock)
+{
+ struct sock_mapping *map;
+
+ if (!pvcalls_front_dev ||
+ dev_get_drvdata(&pvcalls_front_dev->dev) == NULL)
+ return ERR_PTR(-ENOTCONN);
+
+ map = (struct sock_mapping *)sock->sk->sk_send_head;
+ if (map == NULL)
+ return ERR_PTR(-ENOTSOCK);
+
+ pvcalls_enter();
+ atomic_inc(&map->refcount);
+ return map;
+}
+
+static inline void pvcalls_exit_sock(struct socket *sock)
+{
+ struct sock_mapping *map;
+
+ map = (struct sock_mapping *)sock->sk->sk_send_head;
+ atomic_dec(&map->refcount);
+ pvcalls_exit();
+}
+
static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)
{
*req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
@@ -369,31 +396,23 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
return -EOPNOTSUPP;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -ENOTCONN;
- }
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *)sock->sk->sk_send_head;
- if (!map) {
- pvcalls_exit();
- return -ENOTSOCK;
- }
-
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
ret = create_active(map, &evtchn);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
@@ -423,7 +442,7 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
smp_rmb();
ret = bedata->rsp[req_id].ret;
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
@@ -488,23 +507,15 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB))
return -EOPNOTSUPP;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -ENOTCONN;
- }
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (!map) {
- pvcalls_exit();
- return -ENOTSOCK;
- }
-
mutex_lock(&map->active.out_mutex);
if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
mutex_unlock(&map->active.out_mutex);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EAGAIN;
}
if (len > INT_MAX)
@@ -526,7 +537,7 @@ again:
tot_sent = sent;
mutex_unlock(&map->active.out_mutex);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return tot_sent;
}
@@ -591,19 +602,11 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC))
return -EOPNOTSUPP;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -ENOTCONN;
- }
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (!map) {
- pvcalls_exit();
- return -ENOTSOCK;
- }
-
mutex_lock(&map->active.in_mutex);
if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
@@ -623,7 +626,7 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
ret = 0;
mutex_unlock(&map->active.in_mutex);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
@@ -637,24 +640,16 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
return -EOPNOTSUPP;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -ENOTCONN;
- }
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (map == NULL) {
- pvcalls_exit();
- return -ENOTSOCK;
- }
-
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
req = RING_GET_REQUEST(&bedata->ring, req_id);
@@ -684,7 +679,7 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.status = PVCALLS_STATUS_BIND;
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return 0;
}
@@ -695,21 +690,13 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
struct xen_pvcalls_request *req;
int notify, req_id, ret;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -ENOTCONN;
- }
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (!map) {
- pvcalls_exit();
- return -ENOTSOCK;
- }
-
if (map->passive.status != PVCALLS_STATUS_BIND) {
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EOPNOTSUPP;
}
@@ -717,7 +704,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
req = RING_GET_REQUEST(&bedata->ring, req_id);
@@ -741,7 +728,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.status = PVCALLS_STATUS_LISTEN;
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
@@ -753,21 +740,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
struct xen_pvcalls_request *req;
int notify, req_id, ret, evtchn, nonblock;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -ENOTCONN;
- }
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (!map) {
- pvcalls_exit();
- return -ENOTSOCK;
- }
-
if (map->passive.status != PVCALLS_STATUS_LISTEN) {
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EINVAL;
}
@@ -785,13 +764,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
goto received;
}
if (nonblock) {
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EAGAIN;
}
if (wait_event_interruptible(map->passive.inflight_accept_req,
!test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags))) {
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EINTR;
}
}
@@ -802,7 +781,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
@@ -810,7 +789,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -ENOMEM;
}
ret = create_active(map2, &evtchn);
@@ -819,7 +798,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
list_add_tail(&map2->list, &bedata->socket_mappings);
@@ -841,13 +820,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
/* We could check if we have received a response before returning. */
if (nonblock) {
WRITE_ONCE(map->passive.inflight_req_id, req_id);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EAGAIN;
}
if (wait_event_interruptible(bedata->inflight_req,
READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) {
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -EINTR;
}
/* read req_id, then the content */
@@ -862,7 +841,7 @@ received:
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
pvcalls_front_free_map(bedata, map2);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return -ENOMEM;
}
newsock->sk->sk_send_head = (void *)map2;
@@ -874,7 +853,7 @@ received:
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags);
wake_up(&map->passive.inflight_accept_req);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
@@ -965,23 +944,16 @@ __poll_t pvcalls_front_poll(struct file *file, struct socket *sock,
struct sock_mapping *map;
__poll_t ret;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map))
return EPOLLNVAL;
- }
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (!map) {
- pvcalls_exit();
- return EPOLLNVAL;
- }
if (map->active_socket)
ret = pvcalls_front_poll_active(file, bedata, map, wait);
else
ret = pvcalls_front_poll_passive(file, bedata, map, wait);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
@@ -995,25 +967,20 @@ int pvcalls_front_release(struct socket *sock)
if (sock->sk == NULL)
return 0;
- pvcalls_enter();
- if (!pvcalls_front_dev) {
- pvcalls_exit();
- return -EIO;
+ map = pvcalls_enter_sock(sock);
+ if (IS_ERR(map)) {
+ if (PTR_ERR(map) == -ENOTCONN)
+ return -EIO;
+ else
+ return 0;
}
-
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
- map = (struct sock_mapping *) sock->sk->sk_send_head;
- if (map == NULL) {
- pvcalls_exit();
- return 0;
- }
-
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
- pvcalls_exit();
+ pvcalls_exit_sock(sock);
return ret;
}
sock->sk->sk_send_head = NULL;
@@ -1043,14 +1010,20 @@ int pvcalls_front_release(struct socket *sock)
/*
* We need to make sure that sendmsg/recvmsg on this socket have
* not started before we've cleared sk_send_head here. The
- * easiest (though not optimal) way to guarantee this is to see
- * that no pvcall (other than us) is in progress.
+ * easiest way to guarantee this is to see that no pvcalls
+ * (other than us) is in progress on this socket.
*/
- while (atomic_read(&pvcalls_refcount) > 1)
+ while (atomic_read(&map->refcount) > 1)
cpu_relax();
pvcalls_front_free_map(bedata, map);
} else {
+ wake_up(&bedata->inflight_req);
+ wake_up(&map->passive.inflight_accept_req);
+
+ while (atomic_read(&map->refcount) > 1)
+ cpu_relax();
+
spin_lock(&bedata->socket_lock);
list_del(&map->list);
spin_unlock(&bedata->socket_lock);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index bf13d1ec51f3..04e7b3b29bac 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -284,6 +284,10 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
int pool = tmem_frontswap_poolid;
int ret;
+ /* THP isn't supported */
+ if (PageTransHuge(page))
+ return -1;
+
if (pool < 0)
return -1;
if (ind64 != ind)
diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
index 149c5e7efc89..092981171df1 100644
--- a/drivers/xen/xenbus/xenbus.h
+++ b/drivers/xen/xenbus/xenbus.h
@@ -76,6 +76,7 @@ struct xb_req_data {
struct list_head list;
wait_queue_head_t wq;
struct xsd_sockmsg msg;
+ uint32_t caller_req_id;
enum xsd_sockmsg_type type;
char *body;
const struct kvec *vec;
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 5b081a01779d..d239fc3c5e3d 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -309,6 +309,7 @@ static int process_msg(void)
goto out;
if (req->state == xb_req_state_wait_reply) {
+ req->msg.req_id = req->caller_req_id;
req->msg.type = state.msg.type;
req->msg.len = state.msg.len;
req->body = state.body;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index 3e59590c7254..3f3b29398ab8 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -227,6 +227,8 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
req->state = xb_req_state_queued;
init_waitqueue_head(&req->wq);
+ /* Save the caller req_id and restore it later in the reply */
+ req->caller_req_id = req->msg.req_id;
req->msg.req_id = xs_request_enter(req);
mutex_lock(&xb_write_mutex);
@@ -310,6 +312,7 @@ static void *xs_talkv(struct xenbus_transaction t,
req->num_vecs = num_vecs;
req->cb = xs_wake_up;
+ msg.req_id = 0;
msg.tx_id = t.id;
msg.type = type;
msg.len = 0;
OpenPOWER on IntegriCloud