summaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/bus.c12
-rw-r--r--drivers/acpi/ec.c2
-rw-r--r--drivers/acpi/int340x_thermal.c9
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/acpi/pci_link.c16
-rw-r--r--drivers/atm/he.c7
-rw-r--r--drivers/atm/solos-pci.c12
-rw-r--r--drivers/base/cacheinfo.c10
-rw-r--r--drivers/base/platform-msi.c18
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/block/loop.c11
-rw-r--r--drivers/block/null_blk.c38
-rw-r--r--drivers/block/nvme-core.c52
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/block/xen-blkback/xenbus.c38
-rw-r--r--drivers/block/xen-blkfront.c19
-rw-r--r--drivers/block/zram/zcomp.c12
-rw-r--r--drivers/char/hw_random/xgene-rng.c7
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/h8300/clk-h8s2678.c1
-rw-r--r--drivers/clk/hisilicon/Kconfig8
-rw-r--r--drivers/clk/hisilicon/Makefile3
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c9
-rw-r--r--drivers/clk/rockchip/clk-rk3368.c6
-rw-r--r--drivers/clk/shmobile/clk-mstp.c4
-rw-r--r--drivers/clk/st/clkgen-fsyn.c8
-rw-r--r--drivers/clk/st/clkgen-pll.c12
-rw-r--r--drivers/clk/tegra/clk-dfll.c8
-rw-r--r--drivers/clocksource/rockchip_timer.c2
-rw-r--r--drivers/clocksource/timer-keystone.c2
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c3
-rw-r--r--drivers/cpufreq/cpufreq.c4
-rw-r--r--drivers/crypto/Kconfig2
-rw-r--r--drivers/crypto/marvell/cesa.h27
-rw-r--r--drivers/crypto/marvell/cipher.c7
-rw-r--r--drivers/crypto/marvell/hash.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c3
-rw-r--r--drivers/crypto/sunxi-ss/sun4i-ss-cipher.c2
-rw-r--r--drivers/devfreq/devfreq.c12
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c3
-rw-r--r--drivers/devfreq/governor_simpleondemand.c33
-rw-r--r--drivers/devfreq/tegra-devfreq.c8
-rw-r--r--drivers/dma/at_xdmac.c15
-rw-r--r--drivers/dma/dmaengine.c10
-rw-r--r--drivers/dma/dw/core.c4
-rw-r--r--drivers/dma/idma64.c16
-rw-r--r--drivers/dma/ipu/ipu_irq.c2
-rw-r--r--drivers/dma/pxa_dma.c31
-rw-r--r--drivers/dma/sun4i-dma.c6
-rw-r--r--drivers/dma/xgene-dma.c46
-rw-r--r--drivers/dma/zx296702_dma.c2
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/firmware/Kconfig8
-rw-r--r--drivers/firmware/Makefile3
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c88
-rw-r--r--drivers/firmware/efi/libstub/efistub.h4
-rw-r--r--drivers/firmware/qcom_scm-64.c63
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-altera.c6
-rw-r--r--drivers/gpio/gpio-bcm-kona.c2
-rw-r--r--drivers/gpio/gpio-brcmstb.c2
-rw-r--r--drivers/gpio/gpio-davinci.c3
-rw-r--r--drivers/gpio/gpio-dwapb.c2
-rw-r--r--drivers/gpio/gpio-ep93xx.c5
-rw-r--r--drivers/gpio/gpio-intel-mid.c2
-rw-r--r--drivers/gpio/gpio-lynxpoint.c2
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c2
-rw-r--r--drivers/gpio/gpio-msic.c2
-rw-r--r--drivers/gpio/gpio-msm-v2.c2
-rw-r--r--drivers/gpio/gpio-mvebu.c2
-rw-r--r--drivers/gpio/gpio-mxc.c16
-rw-r--r--drivers/gpio/gpio-mxs.c15
-rw-r--r--drivers/gpio/gpio-omap.c11
-rw-r--r--drivers/gpio/gpio-pl061.c2
-rw-r--r--drivers/gpio/gpio-pxa.c2
-rw-r--r--drivers/gpio/gpio-sa1100.c3
-rw-r--r--drivers/gpio/gpio-sx150x.c1
-rw-r--r--drivers/gpio/gpio-tegra.c2
-rw-r--r--drivers/gpio/gpio-timberdale.c2
-rw-r--r--drivers/gpio/gpio-tz1090.c4
-rw-r--r--drivers/gpio/gpio-vf610.c6
-rw-r--r--drivers/gpio/gpio-zx.c2
-rw-r--r--drivers/gpio/gpio-zynq.c2
-rw-r--r--drivers/gpio/gpiolib.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c140
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c65
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_test.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_smc.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c74
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c79
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c3
-rw-r--r--drivers/gpu/drm/amd/include/cgs_linux.h17
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h41
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c155
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h41
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c4
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c85
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c6
-rw-r--r--drivers/gpu/drm/drm_ioctl.c3
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c15
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c36
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c94
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c26
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c2
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c12
-rw-r--r--drivers/gpu/drm/i915/intel_display.c7
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c9
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c39
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h2
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c31
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c36
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c32
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c29
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c11
-rw-r--r--drivers/gpu/ipu-v3/ipu-common.c7
-rw-r--r--drivers/hv/channel_mgmt.c17
-rw-r--r--drivers/hwmon/Kconfig4
-rw-r--r--drivers/hwmon/abx500.c1
-rw-r--r--drivers/hwmon/gpio-fan.c1
-rw-r--r--drivers/hwmon/nct6775.c64
-rw-r--r--drivers/hwmon/pwm-fan.c1
-rw-r--r--drivers/idle/intel_idle.c12
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/hw/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/main.c67
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c18
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c26
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c5
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h1
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c18
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c21
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c293
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h21
-rw-r--r--drivers/input/joystick/Kconfig1
-rw-r--r--drivers/input/joystick/walkera0701.c4
-rw-r--r--drivers/input/keyboard/omap4-keypad.c2
-rw-r--r--drivers/input/misc/pm8941-pwrkey.c2
-rw-r--r--drivers/input/misc/uinput.c2
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c26
-rw-r--r--drivers/input/mouse/elan_i2c_i2c.c4
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c4
-rw-r--r--drivers/input/mouse/synaptics.c12
-rw-r--r--drivers/input/serio/libps2.c22
-rw-r--r--drivers/input/serio/parkbd.c1
-rw-r--r--drivers/input/touchscreen/imx6ul_tsc.c34
-rw-r--r--drivers/input/touchscreen/mms114.c4
-rw-r--r--drivers/iommu/Kconfig2
-rw-r--r--drivers/iommu/intel-iommu.c8
-rw-r--r--drivers/iommu/iova.c120
-rw-r--r--drivers/irqchip/exynos-combiner.c8
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c6
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c24
-rw-r--r--drivers/irqchip/irq-bcm2835.c6
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c2
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c2
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c7
-rw-r--r--drivers/irqchip/irq-clps711x.c6
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c2
-rw-r--r--drivers/irqchip/irq-gic-v2m.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c9
-rw-r--r--drivers/irqchip/irq-gic-v3.c19
-rw-r--r--drivers/irqchip/irq-gic.c86
-rw-r--r--drivers/irqchip/irq-hip04.c4
-rw-r--r--drivers/irqchip/irq-i8259.c2
-rw-r--r--drivers/irqchip/irq-imgpdc.c4
-rw-r--r--drivers/irqchip/irq-keystone.c4
-rw-r--r--drivers/irqchip/irq-metag-ext.c2
-rw-r--r--drivers/irqchip/irq-metag.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c14
-rw-r--r--drivers/irqchip/irq-mmp.c5
-rw-r--r--drivers/irqchip/irq-mxs.c1
-rw-r--r--drivers/irqchip/irq-orion.c2
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c11
-rw-r--r--drivers/irqchip/irq-renesas-irqc.c10
-rw-r--r--drivers/irqchip/irq-s3c24xx.c16
-rw-r--r--drivers/irqchip/irq-sun4i.c2
-rw-r--r--drivers/irqchip/irq-sunxi-nmi.c2
-rw-r--r--drivers/irqchip/irq-tb10x.c2
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c10
-rw-r--r--drivers/irqchip/irq-vic.c4
-rw-r--r--drivers/irqchip/irq-vt8500.c1
-rw-r--r--drivers/irqchip/spear-shirq.c3
-rw-r--r--drivers/leds/Kconfig3
-rw-r--r--drivers/leds/leds-aat1290.c3
-rw-r--r--drivers/leds/leds-bcm6328.c1
-rw-r--r--drivers/leds/leds-bcm6358.c1
-rw-r--r--drivers/leds/leds-ktd2692.c1
-rw-r--r--drivers/leds/leds-max77693.c1
-rw-r--r--drivers/leds/leds-ns2.c1
-rw-r--r--drivers/md/bitmap.c3
-rw-r--r--drivers/md/dm-crypt.c17
-rw-r--r--drivers/md/dm-thin.c4
-rw-r--r--drivers/md/md.c5
-rw-r--r--drivers/md/multipath.c3
-rw-r--r--drivers/md/raid0.c12
-rw-r--r--drivers/md/raid1.c11
-rw-r--r--drivers/md/raid10.c9
-rw-r--r--drivers/md/raid5.c11
-rw-r--r--drivers/mfd/asic3.c2
-rw-r--r--drivers/mfd/ezx-pcap.c2
-rw-r--r--drivers/mfd/htc-egpio.c2
-rw-r--r--drivers/mfd/jz4740-adc.c2
-rw-r--r--drivers/mfd/pm8921-core.c2
-rw-r--r--drivers/mfd/t7l66xb.c2
-rw-r--r--drivers/mfd/tc6393xb.c3
-rw-r--r--drivers/mfd/ucb1x00-core.c2
-rw-r--r--drivers/misc/cxl/Makefile2
-rw-r--r--drivers/misc/cxl/pci.c2
-rw-r--r--drivers/misc/cxl/sysfs.c2
-rw-r--r--drivers/misc/cxl/vphb.c6
-rw-r--r--drivers/misc/mei/debugfs.c3
-rw-r--r--drivers/mmc/core/core.c6
-rw-r--r--drivers/mmc/core/host.c4
-rw-r--r--drivers/mmc/host/pxamci.c66
-rw-r--r--drivers/mmc/host/sunxi-mmc.c53
-rw-r--r--drivers/mtd/ubi/io.c5
-rw-r--r--drivers/mtd/ubi/vtbl.c1
-rw-r--r--drivers/mtd/ubi/wl.c1
-rw-r--r--drivers/net/arcnet/arcnet.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx.c3
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c24
-rw-r--r--drivers/net/ethernet/arc/emac_arc.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c20
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c12
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c1
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c13
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_tx_rx.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h1
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c29
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c10
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c15
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c1
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c9
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c22
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c1
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c18
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c111
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c17
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c74
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c47
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
-rw-r--r--drivers/net/fjes/fjes_hw.c8
-rw-r--r--drivers/net/geneve.c32
-rw-r--r--drivers/net/irda/ali-ircc.c6
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/phy/fixed_phy.c2
-rw-r--r--drivers/net/phy/marvell.c9
-rw-r--r--drivers/net/phy/mdio-bcm-unimac.c1
-rw-r--r--drivers/net/phy/mdio-gpio.c1
-rw-r--r--drivers/net/phy/mdio-mux.c19
-rw-r--r--drivers/net/phy/mdio_bus.c31
-rw-r--r--drivers/net/phy/phy_device.c62
-rw-r--r--drivers/net/phy/vitesse.c14
-rw-r--r--drivers/net/ppp/ppp_generic.c4
-rw-r--r--drivers/net/usb/Kconfig11
-rw-r--r--drivers/net/usb/Makefile2
-rw-r--r--drivers/net/usb/ch9200.c432
-rw-r--r--drivers/net/vrf.c3
-rw-r--r--drivers/net/vxlan.c15
-rw-r--r--drivers/nvdimm/btt_devs.c4
-rw-r--r--drivers/nvdimm/pfn_devs.c4
-rw-r--r--drivers/nvdimm/pmem.c2
-rw-r--r--drivers/of/of_mdio.c27
-rw-r--r--drivers/of/of_pci_irq.c22
-rw-r--r--drivers/parisc/dino.c3
-rw-r--r--drivers/parisc/lba_pci.c1
-rw-r--r--drivers/pci/access.c27
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/host/pci-keystone.c5
-rw-r--r--drivers/pci/host/pci-rcar-gen2.c1
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/pci-driver.c7
-rw-r--r--drivers/pci/probe.c23
-rw-r--r--drivers/pci/quirks.c20
-rw-r--r--drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c2
-rw-r--r--drivers/pinctrl/core.c3
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-cherryview.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-nomadik.c4
-rw-r--r--drivers/pinctrl/pinctrl-adi2.c3
-rw-r--r--drivers/pinctrl/pinctrl-amd.c6
-rw-r--r--drivers/pinctrl/pinctrl-at91.c2
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c2
-rw-r--r--drivers/pinctrl/pinctrl-digicolor.c4
-rw-r--r--drivers/pinctrl/pinctrl-pistachio.c10
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c2
-rw-r--r--drivers/pinctrl/pinctrl-st.c4
-rw-r--r--drivers/pinctrl/pinmux.c3
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c5
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c4
-rw-r--r--drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-exynos.c4
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c24xx.c10
-rw-r--r--drivers/pinctrl/samsung/pinctrl-s3c64xx.c10
-rw-r--r--drivers/pinctrl/sirf/pinctrl-atlas7.c4
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c12
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c18
-rw-r--r--drivers/platform/x86/hp-wmi.c31
-rw-r--r--drivers/platform/x86/toshiba_acpi.c10
-rw-r--r--drivers/platform/x86/wmi.c51
-rw-r--r--drivers/power/twl4030_charger.c8
-rw-r--r--drivers/regulator/anatop-regulator.c1
-rw-r--r--drivers/regulator/core.c21
-rw-r--r--drivers/regulator/gpio-regulator.c1
-rw-r--r--drivers/regulator/pbias-regulator.c56
-rw-r--r--drivers/regulator/tps65218-regulator.c2
-rw-r--r--drivers/regulator/vexpress.c1
-rw-r--r--drivers/s390/virtio/virtio_ccw.c10
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/sh/intc/core.c2
-rw-r--r--drivers/sh/intc/internals.h10
-rw-r--r--drivers/sh/intc/virq.c4
-rw-r--r--drivers/sh/pm_runtime.c19
-rw-r--r--drivers/soc/dove/pmu.c6
-rw-r--r--drivers/spi/spi-atmel.c2
-rw-r--r--drivers/spi/spi-bcm2835.c6
-rw-r--r--drivers/spi/spi-meson-spifc.c1
-rw-r--r--drivers/spi/spi-mt65xx.c53
-rw-r--r--drivers/spi/spi-pxa2xx.c4
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c4
-rw-r--r--drivers/spi/spi.c3
-rw-r--r--drivers/spi/spidev.c3
-rw-r--r--drivers/spmi/spmi-pmic-arb.c2
-rw-r--r--drivers/staging/android/TODO20
-rw-r--r--drivers/staging/android/ion/ion.c6
-rw-r--r--drivers/staging/fbtft/fb_uc1611.c2
-rw-r--r--drivers/staging/fbtft/fb_watterott.c4
-rw-r--r--drivers/staging/fbtft/fbtft-core.c10
-rw-r--r--drivers/staging/fbtft/flexfb.c11
-rw-r--r--drivers/staging/lustre/README.txt16
-rw-r--r--drivers/staging/most/Kconfig1
-rw-r--r--drivers/staging/most/hdm-dim2/Kconfig1
-rw-r--r--drivers/staging/most/hdm-usb/Kconfig2
-rw-r--r--drivers/staging/most/mostcore/Kconfig1
-rw-r--r--drivers/staging/rdma/Kconfig2
-rw-r--r--drivers/staging/rdma/Makefile1
-rw-r--r--drivers/staging/rdma/ehca/Kconfig (renamed from drivers/infiniband/hw/ehca/Kconfig)3
-rw-r--r--drivers/staging/rdma/ehca/Makefile (renamed from drivers/infiniband/hw/ehca/Makefile)0
-rw-r--r--drivers/staging/rdma/ehca/TODO4
-rw-r--r--drivers/staging/rdma/ehca/ehca_av.c (renamed from drivers/infiniband/hw/ehca/ehca_av.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes.h (renamed from drivers/infiniband/hw/ehca/ehca_classes.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_classes_pSeries.h (renamed from drivers/infiniband/hw/ehca/ehca_classes_pSeries.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_cq.c (renamed from drivers/infiniband/hw/ehca/ehca_cq.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_eq.c (renamed from drivers/infiniband/hw/ehca/ehca_eq.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_hca.c (renamed from drivers/infiniband/hw/ehca/ehca_hca.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_irq.c (renamed from drivers/infiniband/hw/ehca/ehca_irq.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_irq.h (renamed from drivers/infiniband/hw/ehca/ehca_irq.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_iverbs.h (renamed from drivers/infiniband/hw/ehca/ehca_iverbs.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_main.c (renamed from drivers/infiniband/hw/ehca/ehca_main.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_mcast.c (renamed from drivers/infiniband/hw/ehca/ehca_mcast.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.c (renamed from drivers/infiniband/hw/ehca/ehca_mrmw.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_mrmw.h (renamed from drivers/infiniband/hw/ehca/ehca_mrmw.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_pd.c (renamed from drivers/infiniband/hw/ehca/ehca_pd.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_qes.h (renamed from drivers/infiniband/hw/ehca/ehca_qes.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_qp.c (renamed from drivers/infiniband/hw/ehca/ehca_qp.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_reqs.c (renamed from drivers/infiniband/hw/ehca/ehca_reqs.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_sqp.c (renamed from drivers/infiniband/hw/ehca/ehca_sqp.c)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_tools.h (renamed from drivers/infiniband/hw/ehca/ehca_tools.h)0
-rw-r--r--drivers/staging/rdma/ehca/ehca_uverbs.c (renamed from drivers/infiniband/hw/ehca/ehca_uverbs.c)0
-rw-r--r--drivers/staging/rdma/ehca/hcp_if.c (renamed from drivers/infiniband/hw/ehca/hcp_if.c)0
-rw-r--r--drivers/staging/rdma/ehca/hcp_if.h (renamed from drivers/infiniband/hw/ehca/hcp_if.h)0
-rw-r--r--drivers/staging/rdma/ehca/hcp_phyp.c (renamed from drivers/infiniband/hw/ehca/hcp_phyp.c)0
-rw-r--r--drivers/staging/rdma/ehca/hcp_phyp.h (renamed from drivers/infiniband/hw/ehca/hcp_phyp.h)0
-rw-r--r--drivers/staging/rdma/ehca/hipz_fns.h (renamed from drivers/infiniband/hw/ehca/hipz_fns.h)0
-rw-r--r--drivers/staging/rdma/ehca/hipz_fns_core.h (renamed from drivers/infiniband/hw/ehca/hipz_fns_core.h)0
-rw-r--r--drivers/staging/rdma/ehca/hipz_hw.h (renamed from drivers/infiniband/hw/ehca/hipz_hw.h)0
-rw-r--r--drivers/staging/rdma/ehca/ipz_pt_fn.c (renamed from drivers/infiniband/hw/ehca/ipz_pt_fn.c)0
-rw-r--r--drivers/staging/rdma/ehca/ipz_pt_fn.h (renamed from drivers/infiniband/hw/ehca/ipz_pt_fn.h)0
-rw-r--r--drivers/staging/rdma/hfi1/chip.c4
-rw-r--r--drivers/staging/rdma/hfi1/device.c54
-rw-r--r--drivers/staging/rdma/hfi1/device.h3
-rw-r--r--drivers/staging/rdma/hfi1/diag.c36
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c10
-rw-r--r--drivers/staging/rdma/hfi1/mad.c4
-rw-r--r--drivers/staging/rdma/hfi1/sdma.c6
-rw-r--r--drivers/staging/rdma/hfi1/sdma.h36
-rw-r--r--drivers/staging/rdma/hfi1/verbs.c8
-rw-r--r--drivers/staging/unisys/visorbus/Makefile1
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c13
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c18
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c5
-rw-r--r--drivers/target/target_core_device.c45
-rw-r--r--drivers/target/target_core_hba.c2
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_pr.c91
-rw-r--r--drivers/target/target_core_tpg.c5
-rw-r--r--drivers/thermal/Kconfig17
-rw-r--r--drivers/thermal/cpu_cooling.c52
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c1
-rw-r--r--drivers/thermal/power_allocator.c253
-rw-r--r--drivers/thermal/thermal_core.c28
-rw-r--r--drivers/thermal/ti-soc-thermal/Kconfig8
-rw-r--r--drivers/thunderbolt/nhi.c2
-rw-r--r--drivers/tty/serial/8250/8250_port.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c2
-rw-r--r--drivers/usb/chipidea/ci_hdrc_usb2.c25
-rw-r--r--drivers/usb/chipidea/udc.c84
-rw-r--r--drivers/usb/core/config.c5
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c4
-rw-r--r--drivers/usb/dwc3/gadget.c4
-rw-r--r--drivers/usb/gadget/epautoconf.c1
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.c43
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c11
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_core.c3
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c46
-rw-r--r--drivers/usb/gadget/udc/gr_udc.c3
-rw-r--r--drivers/usb/gadget/udc/mv_u3d_core.c3
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c3
-rw-r--r--drivers/usb/host/xhci-mem.c17
-rw-r--r--drivers/usb/host/xhci-pci.c90
-rw-r--r--drivers/usb/host/xhci-ring.c13
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/musb/musb_core.c7
-rw-r--r--drivers/usb/musb/musb_cppi41.c3
-rw-r--r--drivers/usb/musb/musb_dsps.c7
-rw-r--r--drivers/usb/musb/ux500.c2
-rw-r--r--drivers/usb/phy/Kconfig2
-rw-r--r--drivers/usb/phy/phy-generic.c3
-rw-r--r--drivers/usb/phy/phy-isp1301.c1
-rw-r--r--drivers/usb/serial/option.c24
-rw-r--r--drivers/usb/serial/whiteheat.c31
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--drivers/vhost/test.c3
-rw-r--r--drivers/vhost/vhost.h4
-rw-r--r--drivers/watchdog/Kconfig3
-rw-r--r--drivers/watchdog/bcm2835_wdt.c10
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/mena21_wdt.c1
-rw-r--r--drivers/watchdog/moxart_wdt.c1
521 files changed, 4332 insertions, 2733 deletions
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 46506e7687cd..a212cefae524 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -315,14 +315,10 @@ static void acpi_bus_osc_support(void)
capbuf[OSC_QUERY_DWORD] = OSC_QUERY_ENABLE;
capbuf[OSC_SUPPORT_DWORD] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */
-#if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\
- defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE)
- capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
-#endif
-
-#if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE)
- capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
-#endif
+ if (IS_ENABLED(CONFIG_ACPI_PROCESSOR_AGGREGATOR))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PAD_SUPPORT;
+ if (IS_ENABLED(CONFIG_ACPI_PROCESSOR))
+ capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_PPC_OST_SUPPORT;
capbuf[OSC_SUPPORT_DWORD] |= OSC_SB_HOTPLUG_OST_SUPPORT;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 2614a839c60d..42c66b64c12c 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1044,8 +1044,10 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data)
goto err_exit;
mutex_lock(&ec->mutex);
+ result = -ENODATA;
list_for_each_entry(handler, &ec->list, node) {
if (value == handler->query_bit) {
+ result = 0;
q->handler = acpi_ec_get_query_handler(handler);
ec_dbg_evt("Query(0x%02x) scheduled",
q->handler->query_bit);
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c
index 9dcf83682e36..33505c651f62 100644
--- a/drivers/acpi/int340x_thermal.c
+++ b/drivers/acpi/int340x_thermal.c
@@ -33,13 +33,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
static int int340x_thermal_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
-#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
- acpi_create_platform_device(adev);
-#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
+ if (IS_ENABLED(CONFIG_INT340X_THERMAL))
+ acpi_create_platform_device(adev);
/* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
- if (id->driver_data == INT3401_DEVICE)
+ else if (IS_ENABLED(CONFIG_INTEL_SOC_DTS_THERMAL) &&
+ id->driver_data == INT3401_DEVICE)
acpi_create_platform_device(adev);
-#endif
return 1;
}
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 6da0f9beab19..c9336751e5e3 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -372,6 +372,7 @@ static int acpi_isa_register_gsi(struct pci_dev *dev)
/* Interrupt Line values above 0xF are forbidden */
if (dev->irq > 0 && (dev->irq <= 0xF) &&
+ acpi_isa_irq_available(dev->irq) &&
(acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
dev_warn(&dev->dev, "PCI INT %c: no GSI - using ISA IRQ %d\n",
pin_name(dev->pin), dev->irq);
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index 3b4ea98e3ea0..7c8408b946ca 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -498,8 +498,7 @@ int __init acpi_irq_penalty_init(void)
PIRQ_PENALTY_PCI_POSSIBLE;
}
}
- /* Add a penalty for the SCI */
- acpi_irq_penalty[acpi_gbl_FADT.sci_interrupt] += PIRQ_PENALTY_PCI_USING;
+
return 0;
}
@@ -553,6 +552,13 @@ static int acpi_pci_link_allocate(struct acpi_pci_link *link)
irq = link->irq.possible[i];
}
}
+ if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
+ printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
+ "Try pci=noacpi or acpi=off\n",
+ acpi_device_name(link->device),
+ acpi_device_bid(link->device));
+ return -ENODEV;
+ }
/* Attempt to enable the link device at this IRQ. */
if (acpi_pci_link_set(link, irq)) {
@@ -821,6 +827,12 @@ void acpi_penalize_isa_irq(int irq, int active)
}
}
+bool acpi_isa_irq_available(int irq)
+{
+ return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
+ acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
+}
+
/*
* Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with
* PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index a8da3a50e374..0f5cb37636bc 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -1578,9 +1578,7 @@ he_stop(struct he_dev *he_dev)
kfree(he_dev->rbpl_virt);
kfree(he_dev->rbpl_table);
-
- if (he_dev->rbpl_pool)
- dma_pool_destroy(he_dev->rbpl_pool);
+ dma_pool_destroy(he_dev->rbpl_pool);
if (he_dev->rbrq_base)
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_RBRQ_SIZE * sizeof(struct he_rbrq),
@@ -1594,8 +1592,7 @@ he_stop(struct he_dev *he_dev)
dma_free_coherent(&he_dev->pci_dev->dev, CONFIG_TBRQ_SIZE * sizeof(struct he_tbrq),
he_dev->tpdrq_base, he_dev->tpdrq_phys);
- if (he_dev->tpd_pool)
- dma_pool_destroy(he_dev->tpd_pool);
+ dma_pool_destroy(he_dev->tpd_pool);
if (he_dev->pci_dev) {
pci_read_config_word(he_dev->pci_dev, PCI_COMMAND, &command);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 74e18b0a6d89..3d7fb6516f74 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -805,7 +805,12 @@ static void solos_bh(unsigned long card_arg)
continue;
}
- skb = alloc_skb(size + 1, GFP_ATOMIC);
+ /* Use netdev_alloc_skb() because it adds NET_SKB_PAD of
+ * headroom, and ensures we can route packets back out an
+ * Ethernet interface (for example) without having to
+ * reallocate. Adding NET_IP_ALIGN also ensures that both
+ * PPPoATM and PPPoEoBR2684 packets end up aligned. */
+ skb = netdev_alloc_skb_ip_align(NULL, size + 1);
if (!skb) {
if (net_ratelimit())
dev_warn(&card->dev->dev, "Failed to allocate sk_buff for RX\n");
@@ -869,7 +874,10 @@ static void solos_bh(unsigned long card_arg)
/* Allocate RX skbs for any ports which need them */
if (card->using_dma && card->atmdev[port] &&
!card->rx_skb[port]) {
- struct sk_buff *skb = alloc_skb(RX_DMA_SIZE, GFP_ATOMIC);
+ /* Unlike the MMIO case (qv) we can't add NET_IP_ALIGN
+ * here; the FPGA can only DMA to addresses which are
+ * aligned to 4 bytes. */
+ struct sk_buff *skb = dev_alloc_skb(RX_DMA_SIZE);
if (skb) {
SKB_CB(skb)->dma_addr =
dma_map_single(&card->dev->dev, skb->data,
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 764280a91776..e9fd32e91668 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -148,7 +148,11 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
if (sibling == cpu) /* skip itself */
continue;
+
sib_cpu_ci = get_cpu_cacheinfo(sibling);
+ if (!sib_cpu_ci->info_list)
+ continue;
+
sib_leaf = sib_cpu_ci->info_list + index;
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
@@ -159,6 +163,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
static void free_cache_attributes(unsigned int cpu)
{
+ if (!per_cpu_cacheinfo(cpu))
+ return;
+
cache_shared_cpu_map_remove(cpu);
kfree(per_cpu_cacheinfo(cpu));
@@ -514,8 +521,7 @@ static int cacheinfo_cpu_callback(struct notifier_block *nfb,
break;
case CPU_DEAD:
cache_remove_dev(cpu);
- if (per_cpu_cacheinfo(cpu))
- free_cache_attributes(cpu);
+ free_cache_attributes(cpu);
break;
}
return notifier_from_errno(rc);
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 1857a5dd0816..134483daac25 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -63,20 +63,8 @@ static int platform_msi_init(struct irq_domain *domain,
unsigned int virq, irq_hw_number_t hwirq,
msi_alloc_info_t *arg)
{
- struct irq_data *data;
-
- irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
- info->chip, info->chip_data);
-
- /*
- * Save the MSI descriptor in handler_data so that the
- * irq_write_msi_msg callback can retrieve it (and the
- * associated device).
- */
- data = irq_domain_get_irq_data(domain, virq);
- data->handler_data = arg->desc;
-
- return 0;
+ return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ info->chip, info->chip_data);
}
#else
#define platform_msi_set_desc NULL
@@ -97,7 +85,7 @@ static void platform_msi_update_dom_ops(struct msi_domain_info *info)
static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
{
- struct msi_desc *desc = irq_data_get_irq_handler_data(data);
+ struct msi_desc *desc = irq_data_get_msi_desc(data);
struct platform_msi_priv_data *priv_data;
priv_data = desc->platform.msi_priv_data;
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 28cd75c535b0..7ae7cd990fbf 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -892,10 +892,17 @@ static int opp_get_microvolt(struct dev_pm_opp *opp, struct device *dev)
u32 microvolt[3] = {0};
int count, ret;
- count = of_property_count_u32_elems(opp->np, "opp-microvolt");
- if (!count)
+ /* Missing property isn't a problem, but an invalid entry is */
+ if (!of_find_property(opp->np, "opp-microvolt", NULL))
return 0;
+ count = of_property_count_u32_elems(opp->np, "opp-microvolt");
+ if (count < 0) {
+ dev_err(dev, "%s: Invalid opp-microvolt property (%d)\n",
+ __func__, count);
+ return count;
+ }
+
/* There can be one or three elements here */
if (count != 1 && count != 3) {
dev_err(dev, "%s: Invalid number of elements in opp-microvolt property (%d)\n",
@@ -1063,7 +1070,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add);
* share a common logic which is isolated here.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
* successful.
*
* Locking: The internal device_opp and opp structures are RCU protected.
@@ -1151,7 +1158,7 @@ unlock:
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
* successful.
*/
int dev_pm_opp_enable(struct device *dev, unsigned long freq)
@@ -1177,7 +1184,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
* mutex locking or synchronize_rcu() blocking calls cannot be used.
*
* Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the
- * copy operation, returns 0 if no modifcation was done OR modification was
+ * copy operation, returns 0 if no modification was done OR modification was
* successful.
*/
int dev_pm_opp_disable(struct device *dev, unsigned long freq)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f9889b6bc02c..674f800a3b57 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1486,17 +1486,16 @@ static void loop_handle_cmd(struct loop_cmd *cmd)
{
const bool write = cmd->rq->cmd_flags & REQ_WRITE;
struct loop_device *lo = cmd->rq->q->queuedata;
- int ret = -EIO;
+ int ret = 0;
- if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY))
+ if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
+ ret = -EIO;
goto failed;
+ }
ret = do_req_filebacked(lo, cmd->rq);
-
failed:
- if (ret)
- cmd->rq->errors = -EIO;
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(cmd->rq, ret ? -EIO : 0);
}
static void loop_queue_write_work(struct work_struct *work)
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 17269a3b85f2..1c9e4fe5aa44 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -289,7 +289,7 @@ static inline void null_handle_cmd(struct nullb_cmd *cmd)
case NULL_IRQ_SOFTIRQ:
switch (queue_mode) {
case NULL_Q_MQ:
- blk_mq_complete_request(cmd->rq);
+ blk_mq_complete_request(cmd->rq, cmd->rq->errors);
break;
case NULL_Q_RQ:
blk_complete_request(cmd->rq);
@@ -406,6 +406,22 @@ static struct blk_mq_ops null_mq_ops = {
.complete = null_softirq_done_fn,
};
+static void cleanup_queue(struct nullb_queue *nq)
+{
+ kfree(nq->tag_map);
+ kfree(nq->cmds);
+}
+
+static void cleanup_queues(struct nullb *nullb)
+{
+ int i;
+
+ for (i = 0; i < nullb->nr_queues; i++)
+ cleanup_queue(&nullb->queues[i]);
+
+ kfree(nullb->queues);
+}
+
static void null_del_dev(struct nullb *nullb)
{
list_del_init(&nullb->list);
@@ -415,6 +431,7 @@ static void null_del_dev(struct nullb *nullb)
if (queue_mode == NULL_Q_MQ)
blk_mq_free_tag_set(&nullb->tag_set);
put_disk(nullb->disk);
+ cleanup_queues(nullb);
kfree(nullb);
}
@@ -459,22 +476,6 @@ static int setup_commands(struct nullb_queue *nq)
return 0;
}
-static void cleanup_queue(struct nullb_queue *nq)
-{
- kfree(nq->tag_map);
- kfree(nq->cmds);
-}
-
-static void cleanup_queues(struct nullb *nullb)
-{
- int i;
-
- for (i = 0; i < nullb->nr_queues; i++)
- cleanup_queue(&nullb->queues[i]);
-
- kfree(nullb->queues);
-}
-
static int setup_queues(struct nullb *nullb)
{
nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
@@ -588,8 +589,7 @@ static int null_add_dev(void)
blk_queue_physical_block_size(nullb->q, bs);
size = gb * 1024 * 1024 * 1024ULL;
- sector_div(size, bs);
- set_capacity(disk, size);
+ set_capacity(disk, size >> 9);
disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->major = null_major;
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b97fc3fe0916..6f04771f1019 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -618,16 +618,15 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
spin_unlock_irqrestore(req->q->queue_lock, flags);
return;
}
+
if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
if (cmd_rq->ctx == CMD_CTX_CANCELLED)
- req->errors = -EINTR;
- else
- req->errors = status;
+ status = -EINTR;
} else {
- req->errors = nvme_error_status(status);
+ status = nvme_error_status(status);
}
- } else
- req->errors = 0;
+ }
+
if (req->cmd_type == REQ_TYPE_DRV_PRIV) {
u32 result = le32_to_cpup(&cqe->result);
req->special = (void *)(uintptr_t)result;
@@ -650,7 +649,7 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
}
nvme_free_iod(nvmeq->dev, iod);
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, status);
}
/* length is in bytes. gfp flags indicates whether we may sleep. */
@@ -863,8 +862,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ns && ns->ms && !blk_integrity_rq(req)) {
if (!(ns->pi_type && ns->ms == 8) &&
req->cmd_type != REQ_TYPE_DRV_PRIV) {
- req->errors = -EFAULT;
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, -EFAULT);
return BLK_MQ_RQ_QUEUE_OK;
}
}
@@ -2439,6 +2437,22 @@ static void nvme_scan_namespaces(struct nvme_dev *dev, unsigned nn)
list_sort(NULL, &dev->namespaces, ns_cmp);
}
+static void nvme_set_irq_hints(struct nvme_dev *dev)
+{
+ struct nvme_queue *nvmeq;
+ int i;
+
+ for (i = 0; i < dev->online_queues; i++) {
+ nvmeq = dev->queues[i];
+
+ if (!nvmeq->tags || !(*nvmeq->tags))
+ continue;
+
+ irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
+ blk_mq_tags_cpumask(*nvmeq->tags));
+ }
+}
+
static void nvme_dev_scan(struct work_struct *work)
{
struct nvme_dev *dev = container_of(work, struct nvme_dev, scan_work);
@@ -2450,6 +2464,7 @@ static void nvme_dev_scan(struct work_struct *work)
return;
nvme_scan_namespaces(dev, le32_to_cpup(&ctrl->nn));
kfree(ctrl);
+ nvme_set_irq_hints(dev);
}
/*
@@ -2953,22 +2968,6 @@ static const struct file_operations nvme_dev_fops = {
.compat_ioctl = nvme_dev_ioctl,
};
-static void nvme_set_irq_hints(struct nvme_dev *dev)
-{
- struct nvme_queue *nvmeq;
- int i;
-
- for (i = 0; i < dev->online_queues; i++) {
- nvmeq = dev->queues[i];
-
- if (!nvmeq->tags || !(*nvmeq->tags))
- continue;
-
- irq_set_affinity_hint(dev->entry[nvmeq->cq_vector].vector,
- blk_mq_tags_cpumask(*nvmeq->tags));
- }
-}
-
static int nvme_dev_start(struct nvme_dev *dev)
{
int result;
@@ -3010,8 +3009,6 @@ static int nvme_dev_start(struct nvme_dev *dev)
if (result)
goto free_tags;
- nvme_set_irq_hints(dev);
-
dev->event_limit = 1;
return result;
@@ -3062,7 +3059,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
} else {
nvme_unfreeze_queues(dev);
nvme_dev_add(dev);
- nvme_set_irq_hints(dev);
}
return 0;
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index e93899cc6f60..6ca35495a5be 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -144,7 +144,7 @@ static void virtblk_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
- blk_mq_complete_request(vbr->req);
+ blk_mq_complete_request(vbr->req, vbr->req->errors);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index deb3f001791f..767657565de6 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -212,6 +212,9 @@ static int xen_blkif_map(struct xen_blkif *blkif, grant_ref_t *gref,
static int xen_blkif_disconnect(struct xen_blkif *blkif)
{
+ struct pending_req *req, *n;
+ int i = 0, j;
+
if (blkif->xenblkd) {
kthread_stop(blkif->xenblkd);
wake_up(&blkif->shutdown_wq);
@@ -238,13 +241,28 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
/* Remove all persistent grants and the cache of ballooned pages. */
xen_blkbk_free_caches(blkif);
+ /* Check that there is no request in use */
+ list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
+ list_del(&req->free_list);
+
+ for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
+ kfree(req->segments[j]);
+
+ for (j = 0; j < MAX_INDIRECT_PAGES; j++)
+ kfree(req->indirect_pages[j]);
+
+ kfree(req);
+ i++;
+ }
+
+ WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
+ blkif->nr_ring_pages = 0;
+
return 0;
}
static void xen_blkif_free(struct xen_blkif *blkif)
{
- struct pending_req *req, *n;
- int i = 0, j;
xen_blkif_disconnect(blkif);
xen_vbd_free(&blkif->vbd);
@@ -257,22 +275,6 @@ static void xen_blkif_free(struct xen_blkif *blkif)
BUG_ON(!list_empty(&blkif->free_pages));
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
- /* Check that there is no request in use */
- list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
- list_del(&req->free_list);
-
- for (j = 0; j < MAX_INDIRECT_SEGMENTS; j++)
- kfree(req->segments[j]);
-
- for (j = 0; j < MAX_INDIRECT_PAGES; j++)
- kfree(req->indirect_pages[j]);
-
- kfree(req);
- i++;
- }
-
- WARN_ON(i != (XEN_BLKIF_REQS_PER_PAGE * blkif->nr_ring_pages));
-
kmem_cache_free(xen_blkif_cachep, blkif);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 0823a96902f8..611170896b8c 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1142,6 +1142,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
RING_IDX i, rp;
unsigned long flags;
struct blkfront_info *info = (struct blkfront_info *)dev_id;
+ int error;
spin_lock_irqsave(&info->io_lock, flags);
@@ -1182,37 +1183,37 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
continue;
}
- req->errors = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
+ error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
switch (bret->operation) {
case BLKIF_OP_DISCARD:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
struct request_queue *rq = info->rq;
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- req->errors = -EOPNOTSUPP;
+ error = -EOPNOTSUPP;
info->feature_discard = 0;
info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
}
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, error);
break;
case BLKIF_OP_FLUSH_DISKCACHE:
case BLKIF_OP_WRITE_BARRIER:
if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
printk(KERN_WARNING "blkfront: %s: %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- req->errors = -EOPNOTSUPP;
+ error = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
info->shadow[id].req.u.rw.nr_segments == 0)) {
printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
info->gd->disk_name, op_name(bret->operation));
- req->errors = -EOPNOTSUPP;
+ error = -EOPNOTSUPP;
}
- if (unlikely(req->errors)) {
- if (req->errors == -EOPNOTSUPP)
- req->errors = 0;
+ if (unlikely(error)) {
+ if (error == -EOPNOTSUPP)
+ error = 0;
info->feature_flush = 0;
xlvbd_flush(info);
}
@@ -1223,7 +1224,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
"request: %x\n", bret->status);
- blk_mq_complete_request(req);
+ blk_mq_complete_request(req, error);
break;
default:
BUG();
diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
index 965d1afb0eaa..5cb13ca3a3ac 100644
--- a/drivers/block/zram/zcomp.c
+++ b/drivers/block/zram/zcomp.c
@@ -330,12 +330,14 @@ void zcomp_destroy(struct zcomp *comp)
* allocate new zcomp and initialize it. return compressing
* backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
* if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
- * case of allocation error.
+ * case of allocation error, or any other error potentially
+ * returned by functions zcomp_strm_{multi,single}_create.
*/
struct zcomp *zcomp_create(const char *compress, int max_strm)
{
struct zcomp *comp;
struct zcomp_backend *backend;
+ int error;
backend = find_backend(compress);
if (!backend)
@@ -347,12 +349,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm)
comp->backend = backend;
if (max_strm > 1)
- zcomp_strm_multi_create(comp, max_strm);
+ error = zcomp_strm_multi_create(comp, max_strm);
else
- zcomp_strm_single_create(comp);
- if (!comp->stream) {
+ error = zcomp_strm_single_create(comp);
+ if (error) {
kfree(comp);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(error);
}
return comp;
}
diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c
index c37cf754a985..3c77645405e5 100644
--- a/drivers/char/hw_random/xgene-rng.c
+++ b/drivers/char/hw_random/xgene-rng.c
@@ -344,11 +344,12 @@ static int xgene_rng_probe(struct platform_device *pdev)
if (IS_ERR(ctx->csr_base))
return PTR_ERR(ctx->csr_base);
- ctx->irq = platform_get_irq(pdev, 0);
- if (ctx->irq < 0) {
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0) {
dev_err(&pdev->dev, "No IRQ resource\n");
- return ctx->irq;
+ return rc;
}
+ ctx->irq = rc;
dev_dbg(&pdev->dev, "APM X-Gene RNG BASE %p ALARM IRQ %d",
ctx->csr_base, ctx->irq);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 43e2c3ad6c31..0ebcf449778a 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2437,7 +2437,8 @@ static int __clk_init(struct device *dev, struct clk *clk_user)
hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
if (orphan->num_parents && orphan->ops->get_parent) {
i = orphan->ops->get_parent(orphan->hw);
- if (!strcmp(core->name, orphan->parent_names[i]))
+ if (i >= 0 && i < orphan->num_parents &&
+ !strcmp(core->name, orphan->parent_names[i]))
clk_core_reparent(orphan, core);
continue;
}
diff --git a/drivers/clk/h8300/clk-h8s2678.c b/drivers/clk/h8300/clk-h8s2678.c
index 2a38eb4a2552..6cf38dc1c929 100644
--- a/drivers/clk/h8300/clk-h8s2678.c
+++ b/drivers/clk/h8300/clk-h8s2678.c
@@ -8,6 +8,7 @@
#include <linux/err.h>
#include <linux/device.h>
#include <linux/of_address.h>
+#include <linux/slab.h>
static DEFINE_SPINLOCK(clklock);
diff --git a/drivers/clk/hisilicon/Kconfig b/drivers/clk/hisilicon/Kconfig
index 2c16807341dc..e43485448612 100644
--- a/drivers/clk/hisilicon/Kconfig
+++ b/drivers/clk/hisilicon/Kconfig
@@ -1,6 +1,12 @@
config COMMON_CLK_HI6220
bool "Hi6220 Clock Driver"
- depends on (ARCH_HISI || COMPILE_TEST) && MAILBOX
+ depends on ARCH_HISI || COMPILE_TEST
default ARCH_HISI
help
Build the Hisilicon Hi6220 clock driver based on the common clock framework.
+
+config STUB_CLK_HI6220
+ bool "Hi6220 Stub Clock Driver"
+ depends on COMMON_CLK_HI6220 && MAILBOX
+ help
+ Build the Hisilicon Hi6220 stub clock driver.
diff --git a/drivers/clk/hisilicon/Makefile b/drivers/clk/hisilicon/Makefile
index 4a1001a11f04..74dba31590f9 100644
--- a/drivers/clk/hisilicon/Makefile
+++ b/drivers/clk/hisilicon/Makefile
@@ -7,4 +7,5 @@ obj-y += clk.o clkgate-separated.o clkdivider-hi6220.o
obj-$(CONFIG_ARCH_HI3xxx) += clk-hi3620.o
obj-$(CONFIG_ARCH_HIP04) += clk-hip04.o
obj-$(CONFIG_ARCH_HIX5HD2) += clk-hix5hd2.o
-obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o clk-hi6220-stub.o
+obj-$(CONFIG_COMMON_CLK_HI6220) += clk-hi6220.o
+obj-$(CONFIG_STUB_CLK_HI6220) += clk-hi6220-stub.o
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index ed02bbc7b11f..abb47608713b 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -716,6 +716,8 @@ static const char *const rk3188_critical_clocks[] __initconst = {
"aclk_cpu",
"aclk_peri",
"hclk_peri",
+ "pclk_cpu",
+ "pclk_peri",
};
static void __init rk3188_common_clk_init(struct device_node *np)
@@ -744,8 +746,6 @@ static void __init rk3188_common_clk_init(struct device_node *np)
rockchip_clk_register_branches(common_clk_branches,
ARRAY_SIZE(common_clk_branches));
- rockchip_clk_protect_critical(rk3188_critical_clocks,
- ARRAY_SIZE(rk3188_critical_clocks));
rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
@@ -765,6 +765,8 @@ static void __init rk3066a_clk_init(struct device_node *np)
mux_armclk_p, ARRAY_SIZE(mux_armclk_p),
&rk3066_cpuclk_data, rk3066_cpuclk_rates,
ARRAY_SIZE(rk3066_cpuclk_rates));
+ rockchip_clk_protect_critical(rk3188_critical_clocks,
+ ARRAY_SIZE(rk3188_critical_clocks));
}
CLK_OF_DECLARE(rk3066a_cru, "rockchip,rk3066a-cru", rk3066a_clk_init);
@@ -801,6 +803,9 @@ static void __init rk3188a_clk_init(struct device_node *np)
pr_warn("%s: missing clocks to reparent aclk_cpu_pre to gpll\n",
__func__);
}
+
+ rockchip_clk_protect_critical(rk3188_critical_clocks,
+ ARRAY_SIZE(rk3188_critical_clocks));
}
CLK_OF_DECLARE(rk3188a_cru, "rockchip,rk3188a-cru", rk3188a_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index 9c5d61e698ef..7e6b783e6eee 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -818,6 +818,10 @@ static struct rockchip_clk_branch rk3368_clk_branches[] __initdata = {
GATE(0, "sclk_timer00", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 0, GFLAGS),
};
+static const char *const rk3368_critical_clocks[] __initconst = {
+ "pclk_pd_pmu",
+};
+
static void __init rk3368_clk_init(struct device_node *np)
{
void __iomem *reg_base;
@@ -862,6 +866,8 @@ static void __init rk3368_clk_init(struct device_node *np)
RK3368_GRF_SOC_STATUS0);
rockchip_clk_register_branches(rk3368_clk_branches,
ARRAY_SIZE(rk3368_clk_branches));
+ rockchip_clk_protect_critical(rk3368_critical_clocks,
+ ARRAY_SIZE(rk3368_critical_clocks));
rockchip_clk_register_armclk(ARMCLKB, "armclkb",
mux_armclkb_p, ARRAY_SIZE(mux_armclkb_p),
diff --git a/drivers/clk/shmobile/clk-mstp.c b/drivers/clk/shmobile/clk-mstp.c
index b1df7b2f1e97..a0a56e99882a 100644
--- a/drivers/clk/shmobile/clk-mstp.c
+++ b/drivers/clk/shmobile/clk-mstp.c
@@ -259,6 +259,10 @@ int cpg_mstp_attach_dev(struct generic_pm_domain *domain, struct device *dev)
"renesas,cpg-mstp-clocks"))
goto found;
+ /* BSC on r8a73a4/sh73a0 uses zb_clk instead of an mstp clock */
+ if (!strcmp(clkspec.np->name, "zb_clk"))
+ goto found;
+
of_node_put(clkspec.np);
i++;
}
diff --git a/drivers/clk/st/clkgen-fsyn.c b/drivers/clk/st/clkgen-fsyn.c
index 83ccf142ff2a..576cd0354d48 100644
--- a/drivers/clk/st/clkgen-fsyn.c
+++ b/drivers/clk/st/clkgen-fsyn.c
@@ -307,7 +307,7 @@ static const struct clkgen_quadfs_data st_fs660c32_F_416 = {
.get_rate = clk_fs660c32_dig_get_rate,
};
-static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
+static const struct clkgen_quadfs_data st_fs660c32_C = {
.nrst_present = true,
.nrst = { CLKGEN_FIELD(0x2f0, 0x1, 0),
CLKGEN_FIELD(0x2f0, 0x1, 1),
@@ -350,7 +350,7 @@ static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
.get_rate = clk_fs660c32_dig_get_rate,
};
-static const struct clkgen_quadfs_data st_fs660c32_D_407 = {
+static const struct clkgen_quadfs_data st_fs660c32_D = {
.nrst_present = true,
.nrst = { CLKGEN_FIELD(0x2a0, 0x1, 0),
CLKGEN_FIELD(0x2a0, 0x1, 1),
@@ -1077,11 +1077,11 @@ static const struct of_device_id quadfs_of_match[] = {
},
{
.compatible = "st,stih407-quadfs660-C",
- .data = &st_fs660c32_C_407
+ .data = &st_fs660c32_C
},
{
.compatible = "st,stih407-quadfs660-D",
- .data = &st_fs660c32_D_407
+ .data = &st_fs660c32_D
},
{}
};
diff --git a/drivers/clk/st/clkgen-pll.c b/drivers/clk/st/clkgen-pll.c
index 47a38a994cac..b2a332cf8985 100644
--- a/drivers/clk/st/clkgen-pll.c
+++ b/drivers/clk/st/clkgen-pll.c
@@ -193,7 +193,7 @@ static const struct clkgen_pll_data st_pll3200c32_407_a0 = {
.ops = &stm_pll3200c32_ops,
};
-static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = {
+static const struct clkgen_pll_data st_pll3200c32_cx_0 = {
/* 407 C0 PLL0 */
.pdn_status = CLKGEN_FIELD(0x2a0, 0x1, 8),
.locked_status = CLKGEN_FIELD(0x2a0, 0x1, 24),
@@ -205,7 +205,7 @@ static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = {
.ops = &stm_pll3200c32_ops,
};
-static const struct clkgen_pll_data st_pll3200c32_407_c0_1 = {
+static const struct clkgen_pll_data st_pll3200c32_cx_1 = {
/* 407 C0 PLL1 */
.pdn_status = CLKGEN_FIELD(0x2c8, 0x1, 8),
.locked_status = CLKGEN_FIELD(0x2c8, 0x1, 24),
@@ -624,12 +624,12 @@ static const struct of_device_id c32_pll_of_match[] = {
.data = &st_pll3200c32_407_a0,
},
{
- .compatible = "st,stih407-plls-c32-c0_0",
- .data = &st_pll3200c32_407_c0_0,
+ .compatible = "st,plls-c32-cx_0",
+ .data = &st_pll3200c32_cx_0,
},
{
- .compatible = "st,stih407-plls-c32-c0_1",
- .data = &st_pll3200c32_407_c0_1,
+ .compatible = "st,plls-c32-cx_1",
+ .data = &st_pll3200c32_cx_1,
},
{
.compatible = "st,stih407-plls-c32-a9",
diff --git a/drivers/clk/tegra/clk-dfll.c b/drivers/clk/tegra/clk-dfll.c
index c2ff859ee0e8..c4e3a52e225b 100644
--- a/drivers/clk/tegra/clk-dfll.c
+++ b/drivers/clk/tegra/clk-dfll.c
@@ -682,11 +682,17 @@ static int find_lut_index_for_rate(struct tegra_dfll *td, unsigned long rate)
struct dev_pm_opp *opp;
int i, uv;
+ rcu_read_lock();
+
opp = dev_pm_opp_find_freq_ceil(td->soc->dev, &rate);
- if (IS_ERR(opp))
+ if (IS_ERR(opp)) {
+ rcu_read_unlock();
return PTR_ERR(opp);
+ }
uv = dev_pm_opp_get_voltage(opp);
+ rcu_read_unlock();
+
for (i = 0; i < td->i2c_lut_size; i++) {
if (regulator_list_voltage(td->vdd_reg, td->i2c_lut[i]) == uv)
return i;
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c
index bb2c2b050964..d3c1742ded1a 100644
--- a/drivers/clocksource/rockchip_timer.c
+++ b/drivers/clocksource/rockchip_timer.c
@@ -148,7 +148,7 @@ static void __init rk_timer_init(struct device_node *np)
bc_timer.freq = clk_get_rate(timer_clk);
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME);
return;
}
diff --git a/drivers/clocksource/timer-keystone.c b/drivers/clocksource/timer-keystone.c
index edacf3902e10..1cea08cf603e 100644
--- a/drivers/clocksource/timer-keystone.c
+++ b/drivers/clocksource/timer-keystone.c
@@ -152,7 +152,7 @@ static void __init keystone_timer_init(struct device_node *np)
int irq, error;
irq = irq_of_parse_and_map(np, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
pr_err("%s: failed to map interrupts\n", __func__);
return;
}
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 15b921a9248c..798277227de7 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -375,12 +375,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
pr_debug("get_cur_freq_on_cpu (%d)\n", cpu);
- policy = cpufreq_cpu_get(cpu);
+ policy = cpufreq_cpu_get_raw(cpu);
if (unlikely(!policy))
return 0;
data = policy->driver_data;
- cpufreq_cpu_put(policy);
if (unlikely(!data || !data->freq_table))
return 0;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6633b3fa996e..ef5ed9470de9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -238,13 +238,13 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
-/* Only for cpufreq core internal use */
-static struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
+struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
{
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
}
+EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
unsigned int cpufreq_generic_get(unsigned int cpu)
{
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 07bc7aa6b224..d234719065a5 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -461,7 +461,7 @@ config CRYPTO_DEV_QCE
config CRYPTO_DEV_VMX
bool "Support for VMX cryptographic acceleration instructions"
- depends on PPC64
+ depends on PPC64 && VSX
help
Support for VMX cryptographic acceleration instructions.
diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
index b60698b30d30..bc2a55bc35e4 100644
--- a/drivers/crypto/marvell/cesa.h
+++ b/drivers/crypto/marvell/cesa.h
@@ -687,6 +687,33 @@ static inline u32 mv_cesa_get_int_mask(struct mv_cesa_engine *engine)
int mv_cesa_queue_req(struct crypto_async_request *req);
+/*
+ * Helper function that indicates whether a crypto request needs to be
+ * cleaned up or not after being enqueued using mv_cesa_queue_req().
+ */
+static inline int mv_cesa_req_needs_cleanup(struct crypto_async_request *req,
+ int ret)
+{
+ /*
+ * The queue still had some space, the request was queued
+ * normally, so there's no need to clean it up.
+ */
+ if (ret == -EINPROGRESS)
+ return false;
+
+ /*
+ * The queue had not space left, but since the request is
+ * flagged with CRYPTO_TFM_REQ_MAY_BACKLOG, it was added to
+ * the backlog and will be processed later. There's no need to
+ * clean it up.
+ */
+ if (ret == -EBUSY && req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ return false;
+
+ /* Request wasn't queued, we need to clean it up */
+ return true;
+}
+
/* TDMA functions */
static inline void mv_cesa_req_dma_iter_init(struct mv_cesa_dma_iter *iter,
diff --git a/drivers/crypto/marvell/cipher.c b/drivers/crypto/marvell/cipher.c
index 0745cf3b9c0e..3df2f4e7adb2 100644
--- a/drivers/crypto/marvell/cipher.c
+++ b/drivers/crypto/marvell/cipher.c
@@ -189,7 +189,6 @@ static inline void mv_cesa_ablkcipher_prepare(struct crypto_async_request *req,
{
struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(ablkreq);
-
creq->req.base.engine = engine;
if (creq->req.base.type == CESA_DMA_REQ)
@@ -431,7 +430,7 @@ static int mv_cesa_des_op(struct ablkcipher_request *req,
return ret;
ret = mv_cesa_queue_req(&req->base);
- if (ret && ret != -EINPROGRESS)
+ if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ablkcipher_cleanup(req);
return ret;
@@ -551,7 +550,7 @@ static int mv_cesa_des3_op(struct ablkcipher_request *req,
return ret;
ret = mv_cesa_queue_req(&req->base);
- if (ret && ret != -EINPROGRESS)
+ if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ablkcipher_cleanup(req);
return ret;
@@ -693,7 +692,7 @@ static int mv_cesa_aes_op(struct ablkcipher_request *req,
return ret;
ret = mv_cesa_queue_req(&req->base);
- if (ret && ret != -EINPROGRESS)
+ if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ablkcipher_cleanup(req);
return ret;
diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
index ae9272eb9c1a..e8d0d7128137 100644
--- a/drivers/crypto/marvell/hash.c
+++ b/drivers/crypto/marvell/hash.c
@@ -739,10 +739,8 @@ static int mv_cesa_ahash_update(struct ahash_request *req)
return 0;
ret = mv_cesa_queue_req(&req->base);
- if (ret && ret != -EINPROGRESS) {
+ if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ahash_cleanup(req);
- return ret;
- }
return ret;
}
@@ -766,7 +764,7 @@ static int mv_cesa_ahash_final(struct ahash_request *req)
return 0;
ret = mv_cesa_queue_req(&req->base);
- if (ret && ret != -EINPROGRESS)
+ if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ahash_cleanup(req);
return ret;
@@ -791,7 +789,7 @@ static int mv_cesa_ahash_finup(struct ahash_request *req)
return 0;
ret = mv_cesa_queue_req(&req->base);
- if (ret && ret != -EINPROGRESS)
+ if (mv_cesa_req_needs_cleanup(&req->base, ret))
mv_cesa_ahash_cleanup(req);
return ret;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index a57b4194de28..0a5ca0ba5d64 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -88,6 +88,9 @@ static void adf_dev_restore(struct adf_accel_dev *accel_dev)
struct pci_dev *parent = pdev->bus->self;
uint16_t bridge_ctl = 0;
+ if (accel_dev->is_vf)
+ return;
+
dev_info(&GET_DEV(accel_dev), "Resetting device qat_dev%d\n",
accel_dev->accel_id);
diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
index e070c316e8b7..a19ee127edca 100644
--- a/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
+++ b/drivers/crypto/sunxi-ss/sun4i-ss-cipher.c
@@ -104,7 +104,7 @@ static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
sg_miter_next(&mo);
oo = 0;
}
- } while (mo.length > 0);
+ } while (oleft > 0);
if (areq->info) {
for (i = 0; i < 4 && i < ivsize / 4; i++) {
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index ca1b362d77e2..3927ed9fdbd5 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -53,7 +53,7 @@ static struct devfreq *find_device_devfreq(struct device *dev)
{
struct devfreq *tmp_devfreq;
- if (unlikely(IS_ERR_OR_NULL(dev))) {
+ if (IS_ERR_OR_NULL(dev)) {
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
@@ -133,7 +133,7 @@ static struct devfreq_governor *find_devfreq_governor(const char *name)
{
struct devfreq_governor *tmp_governor;
- if (unlikely(IS_ERR_OR_NULL(name))) {
+ if (IS_ERR_OR_NULL(name)) {
pr_err("DEVFREQ: %s: Invalid parameters\n", __func__);
return ERR_PTR(-EINVAL);
}
@@ -177,10 +177,10 @@ int update_devfreq(struct devfreq *devfreq)
return err;
/*
- * Adjust the freuqency with user freq and QoS.
+ * Adjust the frequency with user freq and QoS.
*
- * List from the highest proiority
- * max_freq (probably called by thermal when it's too hot)
+ * List from the highest priority
+ * max_freq
* min_freq
*/
@@ -482,7 +482,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->profile->max_state *
devfreq->profile->max_state,
GFP_KERNEL);
- devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned int) *
+ devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) *
devfreq->profile->max_state,
GFP_KERNEL);
devfreq->last_stat_updated = jiffies;
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index f9901f52a225..f312485f1451 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -319,7 +319,8 @@ static int exynos_ppmu_v2_get_event(struct devfreq_event_dev *edev,
case PPMU_PMNCNT3:
pmcnt_high = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_HIGH);
pmcnt_low = __raw_readl(info->ppmu.base + PPMU_V2_PMCNT3_LOW);
- load_count = (u64)((pmcnt_high & 0xff) << 32) + (u64)pmcnt_low;
+ load_count = ((u64)((pmcnt_high & 0xff)) << 32)
+ + (u64)pmcnt_low;
break;
}
edata->load_count = load_count;
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index 0720ba84ca92..ae72ba5e78df 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -21,17 +21,20 @@
static int devfreq_simple_ondemand_func(struct devfreq *df,
unsigned long *freq)
{
- struct devfreq_dev_status stat;
- int err = df->profile->get_dev_status(df->dev.parent, &stat);
+ int err;
+ struct devfreq_dev_status *stat;
unsigned long long a, b;
unsigned int dfso_upthreshold = DFSO_UPTHRESHOLD;
unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
struct devfreq_simple_ondemand_data *data = df->data;
unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
+ err = devfreq_update_stats(df);
if (err)
return err;
+ stat = &df->last_status;
+
if (data) {
if (data->upthreshold)
dfso_upthreshold = data->upthreshold;
@@ -43,41 +46,41 @@ static int devfreq_simple_ondemand_func(struct devfreq *df,
return -EINVAL;
/* Assume MAX if it is going to be divided by zero */
- if (stat.total_time == 0) {
+ if (stat->total_time == 0) {
*freq = max;
return 0;
}
/* Prevent overflow */
- if (stat.busy_time >= (1 << 24) || stat.total_time >= (1 << 24)) {
- stat.busy_time >>= 7;
- stat.total_time >>= 7;
+ if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) {
+ stat->busy_time >>= 7;
+ stat->total_time >>= 7;
}
/* Set MAX if it's busy enough */
- if (stat.busy_time * 100 >
- stat.total_time * dfso_upthreshold) {
+ if (stat->busy_time * 100 >
+ stat->total_time * dfso_upthreshold) {
*freq = max;
return 0;
}
/* Set MAX if we do not know the initial frequency */
- if (stat.current_frequency == 0) {
+ if (stat->current_frequency == 0) {
*freq = max;
return 0;
}
/* Keep the current frequency */
- if (stat.busy_time * 100 >
- stat.total_time * (dfso_upthreshold - dfso_downdifferential)) {
- *freq = stat.current_frequency;
+ if (stat->busy_time * 100 >
+ stat->total_time * (dfso_upthreshold - dfso_downdifferential)) {
+ *freq = stat->current_frequency;
return 0;
}
/* Set the desired frequency based on the load */
- a = stat.busy_time;
- a *= stat.current_frequency;
- b = div_u64(a, stat.total_time);
+ a = stat->busy_time;
+ a *= stat->current_frequency;
+ b = div_u64(a, stat->total_time);
b *= 100;
b = div_u64(b, (dfso_upthreshold - dfso_downdifferential / 2));
*freq = (unsigned long) b;
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 13a1a6e8108c..848b93ee930f 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -541,18 +541,20 @@ static struct devfreq_dev_profile tegra_devfreq_profile = {
static int tegra_governor_get_target(struct devfreq *devfreq,
unsigned long *freq)
{
- struct devfreq_dev_status stat;
+ struct devfreq_dev_status *stat;
struct tegra_devfreq *tegra;
struct tegra_devfreq_device *dev;
unsigned long target_freq = 0;
unsigned int i;
int err;
- err = devfreq->profile->get_dev_status(devfreq->dev.parent, &stat);
+ err = devfreq_update_stats(devfreq);
if (err)
return err;
- tegra = stat.private_data;
+ stat = &devfreq->last_status;
+
+ tegra = stat->private_data;
for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
dev = &tegra->devices[i];
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index a165b4bfd330..dd24375b76dd 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -455,6 +455,15 @@ static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
return desc;
}
+void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
+{
+ memset(&desc->lld, 0, sizeof(desc->lld));
+ INIT_LIST_HEAD(&desc->descs_list);
+ desc->direction = DMA_TRANS_NONE;
+ desc->xfer_size = 0;
+ desc->active_xfer = false;
+}
+
/* Call must be protected by lock. */
static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
{
@@ -466,7 +475,7 @@ static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
desc = list_first_entry(&atchan->free_descs_list,
struct at_xdmac_desc, desc_node);
list_del(&desc->desc_node);
- desc->active_xfer = false;
+ at_xdmac_init_used_desc(desc);
}
return desc;
@@ -875,14 +884,14 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
if (xt->src_inc) {
if (xt->src_sgl)
- chan_cc |= AT_XDMAC_CC_SAM_UBS_DS_AM;
+ chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
else
chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
}
if (xt->dst_inc) {
if (xt->dst_sgl)
- chan_cc |= AT_XDMAC_CC_DAM_UBS_DS_AM;
+ chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
else
chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
}
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 3ff284c8e3d5..09479d4be4db 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -554,10 +554,18 @@ struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
mutex_lock(&dma_list_mutex);
if (chan->client_count == 0) {
+ struct dma_device *device = chan->device;
+
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
+ device->privatecnt++;
err = dma_chan_get(chan);
- if (err)
+ if (err) {
pr_debug("%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
+ chan = NULL;
+ if (--device->privatecnt == 0)
+ dma_cap_clear(DMA_PRIVATE, device->cap_mask);
+ }
} else
chan = NULL;
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index cf1c87fa1edd..bedce038c6e2 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1591,7 +1591,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
INIT_LIST_HEAD(&dw->dma.channels);
for (i = 0; i < nr_channels; i++) {
struct dw_dma_chan *dwc = &dw->chan[i];
- int r = nr_channels - i - 1;
dwc->chan.device = &dw->dma;
dma_cookie_init(&dwc->chan);
@@ -1603,7 +1602,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* 7 is highest priority & 0 is lowest. */
if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
- dwc->priority = r;
+ dwc->priority = nr_channels - i - 1;
else
dwc->priority = i;
@@ -1622,6 +1621,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
/* Hardware configuration */
if (autocfg) {
unsigned int dwc_params;
+ unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
void __iomem *addr = chip->regs + r * sizeof(u32);
dwc_params = dma_read_byaddr(addr, DWC_PARAMS);
diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c
index 18c14e1f1414..48d6d9e94f67 100644
--- a/drivers/dma/idma64.c
+++ b/drivers/dma/idma64.c
@@ -355,23 +355,23 @@ static size_t idma64_active_desc_size(struct idma64_chan *idma64c)
struct idma64_desc *desc = idma64c->desc;
struct idma64_hw_desc *hw;
size_t bytes = desc->length;
- u64 llp;
- u32 ctlhi;
+ u64 llp = channel_readq(idma64c, LLP);
+ u32 ctlhi = channel_readl(idma64c, CTL_HI);
unsigned int i = 0;
- llp = channel_readq(idma64c, LLP);
do {
hw = &desc->hw[i];
- } while ((hw->llp != llp) && (++i < desc->ndesc));
+ if (hw->llp == llp)
+ break;
+ bytes -= hw->len;
+ } while (++i < desc->ndesc);
if (!i)
return bytes;
- do {
- bytes -= desc->hw[--i].len;
- } while (i);
+ /* The current chunk is not fully transfered yet */
+ bytes += desc->hw[--i].len;
- ctlhi = channel_readl(idma64c, CTL_HI);
return bytes - IDMA64C_CTLH_BLOCK_TS(ctlhi);
}
diff --git a/drivers/dma/ipu/ipu_irq.c b/drivers/dma/ipu/ipu_irq.c
index 4768a829253a..2bf37e68ad0f 100644
--- a/drivers/dma/ipu/ipu_irq.c
+++ b/drivers/dma/ipu/ipu_irq.c
@@ -266,7 +266,7 @@ int ipu_irq_unmap(unsigned int source)
}
/* Chained IRQ handler for IPU function and error interrupt */
-static void ipu_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void ipu_irq_handler(struct irq_desc *desc)
{
struct ipu *ipu = irq_desc_get_handler_data(desc);
u32 status;
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 5cb61ce01036..fc4156afa070 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -473,8 +473,10 @@ static void pxad_free_phy(struct pxad_chan *chan)
return;
/* clear the channel mapping in DRCMR */
- reg = pxad_drcmr(chan->drcmr);
- writel_relaxed(0, chan->phy->base + reg);
+ if (chan->drcmr <= DRCMR_CHLNUM) {
+ reg = pxad_drcmr(chan->drcmr);
+ writel_relaxed(0, chan->phy->base + reg);
+ }
spin_lock_irqsave(&pdev->phy_lock, flags);
for (i = 0; i < 32; i++)
@@ -516,8 +518,10 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
"%s(); phy=%p(%d) misaligned=%d\n", __func__,
phy, phy->idx, misaligned);
- reg = pxad_drcmr(phy->vchan->drcmr);
- writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
+ reg = pxad_drcmr(phy->vchan->drcmr);
+ writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
+ }
dalgn = phy_readl_relaxed(phy, DALGN);
if (misaligned)
@@ -887,6 +891,7 @@ pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
struct dma_async_tx_descriptor *tx;
struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
+ INIT_LIST_HEAD(&vd->node);
tx = vchan_tx_prep(vc, vd, tx_flags);
tx->tx_submit = pxad_tx_submit;
dev_dbg(&chan->vc.chan.dev->device,
@@ -910,14 +915,18 @@ static void pxad_get_config(struct pxad_chan *chan,
width = chan->cfg.src_addr_width;
dev_addr = chan->cfg.src_addr;
*dev_src = dev_addr;
- *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC;
+ *dcmd |= PXA_DCMD_INCTRGADDR;
+ if (chan->drcmr <= DRCMR_CHLNUM)
+ *dcmd |= PXA_DCMD_FLOWSRC;
}
if (dir == DMA_MEM_TO_DEV) {
maxburst = chan->cfg.dst_maxburst;
width = chan->cfg.dst_addr_width;
dev_addr = chan->cfg.dst_addr;
*dev_dst = dev_addr;
- *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG;
+ *dcmd |= PXA_DCMD_INCSRCADDR;
+ if (chan->drcmr <= DRCMR_CHLNUM)
+ *dcmd |= PXA_DCMD_FLOWTRG;
}
if (dir == DMA_MEM_TO_MEM)
*dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
@@ -1177,6 +1186,16 @@ static unsigned int pxad_residue(struct pxad_chan *chan,
else
curr = phy_readl_relaxed(chan->phy, DTADR);
+ /*
+ * curr has to be actually read before checking descriptor
+ * completion, so that a curr inside a status updater
+ * descriptor implies the following test returns true, and
+ * preventing reordering of curr load and the test.
+ */
+ rmb();
+ if (is_desc_completed(vd))
+ goto out;
+
for (i = 0; i < sw_desc->nb_desc - 1; i++) {
hw_desc = sw_desc->hw_desc[i];
if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c
index a1a500d96ff2..1661d518224a 100644
--- a/drivers/dma/sun4i-dma.c
+++ b/drivers/dma/sun4i-dma.c
@@ -599,13 +599,13 @@ get_next_cyclic_promise(struct sun4i_dma_contract *contract)
static void sun4i_dma_free_contract(struct virt_dma_desc *vd)
{
struct sun4i_dma_contract *contract = to_sun4i_dma_contract(vd);
- struct sun4i_dma_promise *promise;
+ struct sun4i_dma_promise *promise, *tmp;
/* Free all the demands and completed demands */
- list_for_each_entry(promise, &contract->demands, list)
+ list_for_each_entry_safe(promise, tmp, &contract->demands, list)
kfree(promise);
- list_for_each_entry(promise, &contract->completed_demands, list)
+ list_for_each_entry_safe(promise, tmp, &contract->completed_demands, list)
kfree(promise);
kfree(contract);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index b23e8d52d126..8d57b1b12e41 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -59,7 +59,6 @@
#define XGENE_DMA_RING_MEM_RAM_SHUTDOWN 0xD070
#define XGENE_DMA_RING_BLK_MEM_RDY 0xD074
#define XGENE_DMA_RING_BLK_MEM_RDY_VAL 0xFFFFFFFF
-#define XGENE_DMA_RING_DESC_CNT(v) (((v) & 0x0001FFFE) >> 1)
#define XGENE_DMA_RING_ID_GET(owner, num) (((owner) << 6) | (num))
#define XGENE_DMA_RING_DST_ID(v) ((1 << 10) | (v))
#define XGENE_DMA_RING_CMD_OFFSET 0x2C
@@ -379,14 +378,6 @@ static u8 xgene_dma_encode_xor_flyby(u32 src_cnt)
return flyby_type[src_cnt];
}
-static u32 xgene_dma_ring_desc_cnt(struct xgene_dma_ring *ring)
-{
- u32 __iomem *cmd_base = ring->cmd_base;
- u32 ring_state = ioread32(&cmd_base[1]);
-
- return XGENE_DMA_RING_DESC_CNT(ring_state);
-}
-
static void xgene_dma_set_src_buffer(__le64 *ext8, size_t *len,
dma_addr_t *paddr)
{
@@ -659,15 +650,12 @@ static void xgene_dma_clean_running_descriptor(struct xgene_dma_chan *chan,
dma_pool_free(chan->desc_pool, desc, desc->tx.phys);
}
-static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
- struct xgene_dma_desc_sw *desc_sw)
+static void xgene_chan_xfer_request(struct xgene_dma_chan *chan,
+ struct xgene_dma_desc_sw *desc_sw)
{
+ struct xgene_dma_ring *ring = &chan->tx_ring;
struct xgene_dma_desc_hw *desc_hw;
- /* Check if can push more descriptor to hw for execution */
- if (xgene_dma_ring_desc_cnt(ring) > (ring->slots - 2))
- return -EBUSY;
-
/* Get hw descriptor from DMA tx ring */
desc_hw = &ring->desc_hw[ring->head];
@@ -694,11 +682,13 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
memcpy(desc_hw, &desc_sw->desc2, sizeof(*desc_hw));
}
+ /* Increment the pending transaction count */
+ chan->pending += ((desc_sw->flags &
+ XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
+
/* Notify the hw that we have descriptor ready for execution */
iowrite32((desc_sw->flags & XGENE_DMA_FLAG_64B_DESC) ?
2 : 1, ring->cmd);
-
- return 0;
}
/**
@@ -710,7 +700,6 @@ static int xgene_chan_xfer_request(struct xgene_dma_ring *ring,
static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
{
struct xgene_dma_desc_sw *desc_sw, *_desc_sw;
- int ret;
/*
* If the list of pending descriptors is empty, then we
@@ -735,18 +724,13 @@ static void xgene_chan_xfer_ld_pending(struct xgene_dma_chan *chan)
if (chan->pending >= chan->max_outstanding)
return;
- ret = xgene_chan_xfer_request(&chan->tx_ring, desc_sw);
- if (ret)
- return;
+ xgene_chan_xfer_request(chan, desc_sw);
/*
* Delete this element from ld pending queue and append it to
* ld running queue
*/
list_move_tail(&desc_sw->node, &chan->ld_running);
-
- /* Increment the pending transaction count */
- chan->pending++;
}
}
@@ -821,7 +805,8 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
* Decrement the pending transaction count
* as we have processed one
*/
- chan->pending--;
+ chan->pending -= ((desc_sw->flags &
+ XGENE_DMA_FLAG_64B_DESC) ? 2 : 1);
/*
* Delete this node from ld running queue and append it to
@@ -1421,15 +1406,18 @@ static int xgene_dma_create_ring_one(struct xgene_dma_chan *chan,
struct xgene_dma_ring *ring,
enum xgene_dma_ring_cfgsize cfgsize)
{
+ int ret;
+
/* Setup DMA ring descriptor variables */
ring->pdma = chan->pdma;
ring->cfgsize = cfgsize;
ring->num = chan->pdma->ring_num++;
ring->id = XGENE_DMA_RING_ID_GET(ring->owner, ring->buf_num);
- ring->size = xgene_dma_get_ring_size(chan, cfgsize);
- if (ring->size <= 0)
- return ring->size;
+ ret = xgene_dma_get_ring_size(chan, cfgsize);
+ if (ret <= 0)
+ return ret;
+ ring->size = ret;
/* Allocate memory for DMA ring descriptor */
ring->desc_vaddr = dma_zalloc_coherent(chan->dev, ring->size,
@@ -1482,7 +1470,7 @@ static int xgene_dma_create_chan_rings(struct xgene_dma_chan *chan)
tx_ring->id, tx_ring->num, tx_ring->desc_vaddr);
/* Set the max outstanding request possible to this channel */
- chan->max_outstanding = rx_ring->slots;
+ chan->max_outstanding = tx_ring->slots;
return ret;
}
diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
index 39915a6b7986..c017fcd8e07c 100644
--- a/drivers/dma/zx296702_dma.c
+++ b/drivers/dma/zx296702_dma.c
@@ -739,7 +739,7 @@ static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct dma_chan *chan;
struct zx_dma_chan *c;
- if (request > d->dma_requests)
+ if (request >= d->dma_requests)
return NULL;
chan = dma_get_any_slave_channel(&d->slave);
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index a07addde297b..8dd0af1d50bc 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -159,7 +159,7 @@ static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
static bool is_extcon_changed(u32 prev, u32 new, int idx, bool *attached)
{
if (((prev >> idx) & 0x1) != ((new >> idx) & 0x1)) {
- *attached = new ? true : false;
+ *attached = ((new >> idx) & 0x1) ? true : false;
return true;
}
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index d8de6a8dd4de..665efca59487 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -139,6 +139,14 @@ config QCOM_SCM
bool
depends on ARM || ARM64
+config QCOM_SCM_32
+ def_bool y
+ depends on QCOM_SCM && ARM
+
+config QCOM_SCM_64
+ def_bool y
+ depends on QCOM_SCM && ARM64
+
source "drivers/firmware/broadcom/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 000830fc6707..2ee83474a3c1 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -13,7 +13,8 @@ obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_QCOM_SCM) += qcom_scm.o
-obj-$(CONFIG_QCOM_SCM) += qcom_scm-32.o
+obj-$(CONFIG_QCOM_SCM_64) += qcom_scm-64.o
+obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o
CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
obj-y += broadcom/
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index e29560e6b40b..950c87f5d279 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -13,6 +13,7 @@
*/
#include <linux/efi.h>
+#include <linux/sort.h>
#include <asm/efi.h>
#include "efistub.h"
@@ -305,6 +306,44 @@ fail:
*/
#define EFI_RT_VIRTUAL_BASE 0x40000000
+static int cmp_mem_desc(const void *l, const void *r)
+{
+ const efi_memory_desc_t *left = l, *right = r;
+
+ return (left->phys_addr > right->phys_addr) ? 1 : -1;
+}
+
+/*
+ * Returns whether region @left ends exactly where region @right starts,
+ * or false if either argument is NULL.
+ */
+static bool regions_are_adjacent(efi_memory_desc_t *left,
+ efi_memory_desc_t *right)
+{
+ u64 left_end;
+
+ if (left == NULL || right == NULL)
+ return false;
+
+ left_end = left->phys_addr + left->num_pages * EFI_PAGE_SIZE;
+
+ return left_end == right->phys_addr;
+}
+
+/*
+ * Returns whether region @left and region @right have compatible memory type
+ * mapping attributes, and are both EFI_MEMORY_RUNTIME regions.
+ */
+static bool regions_have_compatible_memory_type_attrs(efi_memory_desc_t *left,
+ efi_memory_desc_t *right)
+{
+ static const u64 mem_type_mask = EFI_MEMORY_WB | EFI_MEMORY_WT |
+ EFI_MEMORY_WC | EFI_MEMORY_UC |
+ EFI_MEMORY_RUNTIME;
+
+ return ((left->attribute ^ right->attribute) & mem_type_mask) == 0;
+}
+
/*
* efi_get_virtmap() - create a virtual mapping for the EFI memory map
*
@@ -317,33 +356,52 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
int *count)
{
u64 efi_virt_base = EFI_RT_VIRTUAL_BASE;
- efi_memory_desc_t *out = runtime_map;
+ efi_memory_desc_t *in, *prev = NULL, *out = runtime_map;
int l;
- for (l = 0; l < map_size; l += desc_size) {
- efi_memory_desc_t *in = (void *)memory_map + l;
+ /*
+ * To work around potential issues with the Properties Table feature
+ * introduced in UEFI 2.5, which may split PE/COFF executable images
+ * in memory into several RuntimeServicesCode and RuntimeServicesData
+ * regions, we need to preserve the relative offsets between adjacent
+ * EFI_MEMORY_RUNTIME regions with the same memory type attributes.
+ * The easiest way to find adjacent regions is to sort the memory map
+ * before traversing it.
+ */
+ sort(memory_map, map_size / desc_size, desc_size, cmp_mem_desc, NULL);
+
+ for (l = 0; l < map_size; l += desc_size, prev = in) {
u64 paddr, size;
+ in = (void *)memory_map + l;
if (!(in->attribute & EFI_MEMORY_RUNTIME))
continue;
+ paddr = in->phys_addr;
+ size = in->num_pages * EFI_PAGE_SIZE;
+
/*
* Make the mapping compatible with 64k pages: this allows
* a 4k page size kernel to kexec a 64k page size kernel and
* vice versa.
*/
- paddr = round_down(in->phys_addr, SZ_64K);
- size = round_up(in->num_pages * EFI_PAGE_SIZE +
- in->phys_addr - paddr, SZ_64K);
-
- /*
- * Avoid wasting memory on PTEs by choosing a virtual base that
- * is compatible with section mappings if this region has the
- * appropriate size and physical alignment. (Sections are 2 MB
- * on 4k granule kernels)
- */
- if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
- efi_virt_base = round_up(efi_virt_base, SZ_2M);
+ if (!regions_are_adjacent(prev, in) ||
+ !regions_have_compatible_memory_type_attrs(prev, in)) {
+
+ paddr = round_down(in->phys_addr, SZ_64K);
+ size += in->phys_addr - paddr;
+
+ /*
+ * Avoid wasting memory on PTEs by choosing a virtual
+ * base that is compatible with section mappings if this
+ * region has the appropriate size and physical
+ * alignment. (Sections are 2 MB on 4k granule kernels)
+ */
+ if (IS_ALIGNED(in->phys_addr, SZ_2M) && size >= SZ_2M)
+ efi_virt_base = round_up(efi_virt_base, SZ_2M);
+ else
+ efi_virt_base = round_up(efi_virt_base, SZ_64K);
+ }
in->virt_addr = efi_virt_base + in->phys_addr - paddr;
efi_virt_base += size;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index e334a01cf92f..6b6548fda089 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -5,10 +5,6 @@
/* error code which can't be mistaken for valid address */
#define EFI_ERROR (~0UL)
-#undef memcpy
-#undef memset
-#undef memmove
-
void efi_char16_printk(efi_system_table_t *, efi_char16_t *);
efi_status_t efi_open_volume(efi_system_table_t *sys_table_arg, void *__image,
diff --git a/drivers/firmware/qcom_scm-64.c b/drivers/firmware/qcom_scm-64.c
new file mode 100644
index 000000000000..bb6555f6d63b
--- /dev/null
+++ b/drivers/firmware/qcom_scm-64.c
@@ -0,0 +1,63 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/qcom_scm.h>
+
+/**
+ * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the cold boot address of the cpus. Any cpu outside the supported
+ * range would be removed from the cpu present mask.
+ */
+int __qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ return -ENOTSUPP;
+}
+
+/**
+ * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
+ * @entry: Entry point function for the cpus
+ * @cpus: The cpumask of cpus that will use the entry point
+ *
+ * Set the Linux entry point for the SCM to transfer control to when coming
+ * out of a power down. CPU power down may be executed on cpuidle or hotplug.
+ */
+int __qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
+{
+ return -ENOTSUPP;
+}
+
+/**
+ * qcom_scm_cpu_power_down() - Power down the cpu
+ * @flags - Flags to flush cache
+ *
+ * This is an end point to power down cpu. If there was a pending interrupt,
+ * the control would return from this function, otherwise, the cpu jumps to the
+ * warm boot entry point set for this cpu upon reset.
+ */
+void __qcom_scm_cpu_power_down(u32 flags)
+{
+}
+
+int __qcom_scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+ return -ENOTSUPP;
+}
+
+int __qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
+{
+ return -ENOTSUPP;
+}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index b4fc9e4d24c6..8949b3f6f74d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -356,7 +356,7 @@ config GPIO_PXA
config GPIO_RCAR
tristate "Renesas R-Car GPIO"
- depends on ARM && (ARCH_SHMOBILE || COMPILE_TEST)
+ depends on ARCH_SHMOBILE || COMPILE_TEST
select GPIOLIB_IRQCHIP
help
Say yes here to support GPIO on Renesas R-Car SoCs.
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 9b7e0b3db387..1b44941574fa 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -201,8 +201,7 @@ static int altera_gpio_direction_output(struct gpio_chip *gc,
return 0;
}
-static void altera_gpio_irq_edge_handler(unsigned int irq,
- struct irq_desc *desc)
+static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
{
struct altera_gpio_chip *altera_gc;
struct irq_chip *chip;
@@ -231,8 +230,7 @@ static void altera_gpio_irq_edge_handler(unsigned int irq,
}
-static void altera_gpio_irq_leveL_high_handler(unsigned int irq,
- struct irq_desc *desc)
+static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
{
struct altera_gpio_chip *altera_gc;
struct irq_chip *chip;
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 31b90ac15204..33a1f9779b86 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -433,7 +433,7 @@ static int bcm_kona_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
-static void bcm_kona_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void bcm_kona_gpio_irq_handler(struct irq_desc *desc)
{
void __iomem *reg_base;
int bit, bank_id;
diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c
index 9ea86d2ac054..4c64627c6bb5 100644
--- a/drivers/gpio/gpio-brcmstb.c
+++ b/drivers/gpio/gpio-brcmstb.c
@@ -236,7 +236,7 @@ static void brcmstb_gpio_irq_bank_handler(struct brcmstb_gpio_bank *bank)
}
/* Each UPG GIO block has one IRQ for all banks */
-static void brcmstb_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void brcmstb_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct brcmstb_gpio_priv *priv = brcmstb_gpio_gc_to_priv(gc);
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 94b0ab709721..5e715388803d 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -326,8 +326,7 @@ static struct irq_chip gpio_irqchip = {
.flags = IRQCHIP_SET_TYPE_MASKED,
};
-static void
-gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
+static void gpio_irq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct davinci_gpio_regs __iomem *g;
diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c
index c5be4b9b8baf..fcd5b0acfc72 100644
--- a/drivers/gpio/gpio-dwapb.c
+++ b/drivers/gpio/gpio-dwapb.c
@@ -147,7 +147,7 @@ static u32 dwapb_do_irq(struct dwapb_gpio *gpio)
return ret;
}
-static void dwapb_irq_handler(u32 irq, struct irq_desc *desc)
+static void dwapb_irq_handler(struct irq_desc *desc)
{
struct dwapb_gpio *gpio = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c
index 9d90366ea259..3e3947b35c83 100644
--- a/drivers/gpio/gpio-ep93xx.c
+++ b/drivers/gpio/gpio-ep93xx.c
@@ -78,7 +78,7 @@ static void ep93xx_gpio_int_debounce(unsigned int irq, bool enable)
EP93XX_GPIO_REG(int_debounce_register_offset[port]));
}
-static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc)
{
unsigned char status;
int i;
@@ -100,8 +100,7 @@ static void ep93xx_gpio_ab_irq_handler(unsigned int irq, struct irq_desc *desc)
}
}
-static void ep93xx_gpio_f_irq_handler(unsigned int __irq,
- struct irq_desc *desc)
+static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc)
{
/*
* map discontiguous hw irq range to continuous sw irq range:
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index aa28c65eb6b4..70097472b02c 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -301,7 +301,7 @@ static const struct pci_device_id intel_gpio_ids[] = {
};
MODULE_DEVICE_TABLE(pci, intel_gpio_ids);
-static void intel_mid_irq_handler(unsigned irq, struct irq_desc *desc)
+static void intel_mid_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct intel_mid_gpio *priv = to_intel_gpio_priv(gc);
diff --git a/drivers/gpio/gpio-lynxpoint.c b/drivers/gpio/gpio-lynxpoint.c
index 153af464c7a7..127c37b380ae 100644
--- a/drivers/gpio/gpio-lynxpoint.c
+++ b/drivers/gpio/gpio-lynxpoint.c
@@ -234,7 +234,7 @@ static int lp_gpio_direction_output(struct gpio_chip *chip,
return 0;
}
-static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc)
+static void lp_gpio_irq_handler(struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 8ef7a12de983..48ef368347ab 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -194,7 +194,7 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
return -ENXIO;
}
-static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc)
+static void mpc8xxx_gpio_irq_cascade(struct irq_desc *desc)
{
struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/gpio/gpio-msic.c b/drivers/gpio/gpio-msic.c
index 7bcfb87a5fa6..22523aae8abe 100644
--- a/drivers/gpio/gpio-msic.c
+++ b/drivers/gpio/gpio-msic.c
@@ -232,7 +232,7 @@ static struct irq_chip msic_irqchip = {
.irq_bus_sync_unlock = msic_bus_sync_unlock,
};
-static void msic_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void msic_gpio_irq_handler(struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
struct msic_gpio *mg = irq_data_get_irq_handler_data(data);
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index d2012cfb5571..4b4222145f10 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -305,7 +305,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
* which have been set as summary IRQ lines and which are triggered,
* and to call their interrupt handlers.
*/
-static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void msm_summary_irq_handler(struct irq_desc *desc)
{
unsigned long i;
struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index b396bf3bf294..df418b81456d 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -458,7 +458,7 @@ static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
-static void mvebu_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void mvebu_gpio_irq_handler(struct irq_desc *desc)
{
struct mvebu_gpio_chip *mvchip = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index b752b560126e..b8dd847443c5 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -272,7 +272,7 @@ static void mxc_gpio_irq_handler(struct mxc_gpio_port *port, u32 irq_stat)
}
/* MX1 and MX3 has one interrupt *per* gpio port */
-static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+static void mx3_gpio_irq_handler(struct irq_desc *desc)
{
u32 irq_stat;
struct mxc_gpio_port *port = irq_desc_get_handler_data(desc);
@@ -288,7 +288,7 @@ static void mx3_gpio_irq_handler(u32 irq, struct irq_desc *desc)
}
/* MX2 has one interrupt *for all* gpio ports */
-static void mx2_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+static void mx2_gpio_irq_handler(struct irq_desc *desc)
{
u32 irq_msk, irq_stat;
struct mxc_gpio_port *port;
@@ -339,13 +339,15 @@ static int gpio_set_wake_irq(struct irq_data *d, u32 enable)
return 0;
}
-static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
+static int mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
gc = irq_alloc_generic_chip("gpio-mxc", 1, irq_base,
port->base, handle_level_irq);
+ if (!gc)
+ return -ENOMEM;
gc->private = port;
ct = gc->chip_types;
@@ -360,6 +362,8 @@ static void mxc_gpio_init_gc(struct mxc_gpio_port *port, int irq_base)
irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
IRQ_NOREQUEST, 0);
+
+ return 0;
}
static void mxc_gpio_get_hw(struct platform_device *pdev)
@@ -477,12 +481,16 @@ static int mxc_gpio_probe(struct platform_device *pdev)
}
/* gpio-mxc can be a generic irq chip */
- mxc_gpio_init_gc(port, irq_base);
+ err = mxc_gpio_init_gc(port, irq_base);
+ if (err < 0)
+ goto out_irqdomain_remove;
list_add_tail(&port->node, &mxc_gpio_ports);
return 0;
+out_irqdomain_remove:
+ irq_domain_remove(port->domain);
out_irqdesc_free:
irq_free_descs(irq_base, 32);
out_gpiochip_remove:
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b7f383eb18d9..a4288f428819 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -154,7 +154,7 @@ static void mxs_flip_edge(struct mxs_gpio_port *port, u32 gpio)
}
/* MXS has one interrupt *per* gpio port */
-static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+static void mxs_gpio_irq_handler(struct irq_desc *desc)
{
u32 irq_stat;
struct mxs_gpio_port *port = irq_desc_get_handler_data(desc);
@@ -196,13 +196,16 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
return 0;
}
-static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
+static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
gc = irq_alloc_generic_chip("gpio-mxs", 1, irq_base,
port->base, handle_level_irq);
+ if (!gc)
+ return -ENOMEM;
+
gc->private = port;
ct = gc->chip_types;
@@ -216,6 +219,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
irq_setup_generic_chip(gc, IRQ_MSK(32), IRQ_GC_INIT_NESTED_LOCK,
IRQ_NOREQUEST, 0);
+
+ return 0;
}
static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
@@ -317,7 +322,9 @@ static int mxs_gpio_probe(struct platform_device *pdev)
}
/* gpio-mxs can be a generic irq chip */
- mxs_gpio_init_gc(port, irq_base);
+ err = mxs_gpio_init_gc(port, irq_base);
+ if (err < 0)
+ goto out_irqdomain_remove;
/* setup one handler for each entry */
irq_set_chained_handler_and_data(port->irq, mxs_gpio_irq_handler,
@@ -343,6 +350,8 @@ static int mxs_gpio_probe(struct platform_device *pdev)
out_bgpio_remove:
bgpio_remove(&port->bgc);
+out_irqdomain_remove:
+ irq_domain_remove(port->domain);
out_irqdesc_free:
irq_free_descs(irq_base, 32);
return err;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 2ae0d47e9554..5236db161e76 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -709,7 +709,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
* line's interrupt handler has been run, we may miss some nested
* interrupts.
*/
-static void omap_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void omap_gpio_irq_handler(struct irq_desc *desc)
{
void __iomem *isr_reg = NULL;
u32 isr;
@@ -1098,7 +1098,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
} else {
bank->chip.label = "gpio";
bank->chip.base = gpio;
- gpio += bank->width;
}
bank->chip.ngpio = bank->width;
@@ -1108,6 +1107,9 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
return ret;
}
+ if (!bank->is_mpuio)
+ gpio += bank->width;
+
#ifdef CONFIG_ARCH_OMAP1
/*
* REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
@@ -1253,8 +1255,11 @@ static int omap_gpio_probe(struct platform_device *pdev)
omap_gpio_mod_init(bank);
ret = omap_gpio_chip_init(bank, irqc);
- if (ret)
+ if (ret) {
+ pm_runtime_put_sync(bank->dev);
+ pm_runtime_disable(bank->dev);
return ret;
+ }
omap_gpio_show_rev(bank);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 04756130437f..229ef653e0f8 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -187,7 +187,7 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
return 0;
}
-static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
+static void pl061_irq_handler(struct irq_desc *desc)
{
unsigned long pending;
int offset;
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 55a11de3d5b7..df2ce550f309 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -401,7 +401,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
-static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
+static void pxa_gpio_demux_handler(struct irq_desc *desc)
{
struct pxa_gpio_chip *c;
int loop, gpio, gpio_base, n;
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index 67bd2f5d89e8..990fa9023e22 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -172,8 +172,7 @@ static struct irq_domain *sa1100_gpio_irqdomain;
* irq_controller_lock held, and IRQs disabled. Decode the IRQ
* and call the handler.
*/
-static void
-sa1100_gpio_handler(unsigned int __irq, struct irq_desc *desc)
+static void sa1100_gpio_handler(struct irq_desc *desc)
{
unsigned int irq, mask;
diff --git a/drivers/gpio/gpio-sx150x.c b/drivers/gpio/gpio-sx150x.c
index 458d9d7952b8..9c6b96707c9f 100644
--- a/drivers/gpio/gpio-sx150x.c
+++ b/drivers/gpio/gpio-sx150x.c
@@ -706,4 +706,3 @@ module_exit(sx150x_exit);
MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>");
MODULE_DESCRIPTION("Driver for Semtech SX150X I2C GPIO Expanders");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("i2c:sx150x");
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 9b14aafb576d..027e5f47dd28 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -266,7 +266,7 @@ static void tegra_gpio_irq_shutdown(struct irq_data *d)
gpiochip_unlock_as_irq(&tegra_gpio_chip, gpio);
}
-static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void tegra_gpio_irq_handler(struct irq_desc *desc)
{
int port;
int pin;
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index 5a492054589f..30653e6319e9 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -192,7 +192,7 @@ out:
return ret;
}
-static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
+static void timbgpio_irq(struct irq_desc *desc)
{
struct timbgpio *tgpio = irq_desc_get_handler_data(desc);
struct irq_data *data = irq_desc_get_irq_data(desc);
diff --git a/drivers/gpio/gpio-tz1090.c b/drivers/gpio/gpio-tz1090.c
index bbac92ae4c32..87bb1b1eee8d 100644
--- a/drivers/gpio/gpio-tz1090.c
+++ b/drivers/gpio/gpio-tz1090.c
@@ -375,7 +375,7 @@ static int gpio_set_irq_wake(struct irq_data *data, unsigned int on)
#define gpio_set_irq_wake NULL
#endif
-static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void tz1090_gpio_irq_handler(struct irq_desc *desc)
{
irq_hw_number_t hw;
unsigned int irq_stat, irq_no;
@@ -400,7 +400,7 @@ static void tz1090_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
== IRQ_TYPE_EDGE_BOTH)
tz1090_gpio_irq_next_edge(bank, hw);
- generic_handle_irq_desc(irq_no, child_desc);
+ generic_handle_irq_desc(child_desc);
}
}
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index 3d5714d4f405..069f9e4b7daa 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -120,7 +120,7 @@ static int vf610_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
return pinctrl_gpio_direction_output(chip->base + gpio);
}
-static void vf610_gpio_irq_handler(u32 irq, struct irq_desc *desc)
+static void vf610_gpio_irq_handler(struct irq_desc *desc)
{
struct vf610_gpio_port *port = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -176,9 +176,9 @@ static int vf610_gpio_irq_set_type(struct irq_data *d, u32 type)
port->irqc[d->hwirq] = irqc;
if (type & IRQ_TYPE_LEVEL_MASK)
- __irq_set_handler_locked(d->irq, handle_level_irq);
+ irq_set_handler_locked(d, handle_level_irq);
else
- __irq_set_handler_locked(d->irq, handle_edge_irq);
+ irq_set_handler_locked(d, handle_edge_irq);
return 0;
}
diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c
index 12ee1969298c..4b8a26910705 100644
--- a/drivers/gpio/gpio-zx.c
+++ b/drivers/gpio/gpio-zx.c
@@ -177,7 +177,7 @@ static int zx_irq_type(struct irq_data *d, unsigned trigger)
return 0;
}
-static void zx_irq_handler(unsigned irq, struct irq_desc *desc)
+static void zx_irq_handler(struct irq_desc *desc)
{
unsigned long pending;
int offset;
diff --git a/drivers/gpio/gpio-zynq.c b/drivers/gpio/gpio-zynq.c
index 27348e7cb705..1d1a5865ede9 100644
--- a/drivers/gpio/gpio-zynq.c
+++ b/drivers/gpio/gpio-zynq.c
@@ -514,7 +514,7 @@ static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
* application for that pin.
* Note: A bug is reported if no handler is set for the gpio pin.
*/
-static void zynq_gpio_irqhandler(unsigned int irq, struct irq_desc *desc)
+static void zynq_gpio_irqhandler(struct irq_desc *desc)
{
u32 int_sts, int_enb;
unsigned int bank_num;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 980c1f87866a..5db3445552b1 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1174,15 +1174,16 @@ EXPORT_SYMBOL_GPL(gpiod_is_active_low);
* that the GPIO was actually requested.
*/
-static bool _gpiod_get_raw_value(const struct gpio_desc *desc)
+static int _gpiod_get_raw_value(const struct gpio_desc *desc)
{
struct gpio_chip *chip;
- bool value;
int offset;
+ int value;
chip = desc->chip;
offset = gpio_chip_hwgpio(desc);
- value = chip->get ? chip->get(chip, offset) : false;
+ value = chip->get ? chip->get(chip, offset) : -EIO;
+ value = value < 0 ? value : !!value;
trace_gpio_value(desc_to_gpio(desc), 1, value);
return value;
}
@@ -1192,7 +1193,7 @@ static bool _gpiod_get_raw_value(const struct gpio_desc *desc)
* @desc: gpio whose value will be returned
*
* Return the GPIO's raw value, i.e. the value of the physical line disregarding
- * its ACTIVE_LOW status.
+ * its ACTIVE_LOW status, or negative errno on failure.
*
* This function should be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
@@ -1212,7 +1213,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value);
* @desc: gpio whose value will be returned
*
* Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into
- * account.
+ * account, or negative errno on failure.
*
* This function should be called from contexts where we cannot sleep, and will
* complain if the GPIO chip functions potentially sleep.
@@ -1226,6 +1227,9 @@ int gpiod_get_value(const struct gpio_desc *desc)
WARN_ON(desc->chip->can_sleep);
value = _gpiod_get_raw_value(desc);
+ if (value < 0)
+ return value;
+
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
@@ -1548,7 +1552,7 @@ EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq);
* @desc: gpio whose value will be returned
*
* Return the GPIO's raw value, i.e. the value of the physical line disregarding
- * its ACTIVE_LOW status.
+ * its ACTIVE_LOW status, or negative errno on failure.
*
* This function is to be called from contexts that can sleep.
*/
@@ -1566,7 +1570,7 @@ EXPORT_SYMBOL_GPL(gpiod_get_raw_value_cansleep);
* @desc: gpio whose value will be returned
*
* Return the GPIO's logical value, i.e. taking the ACTIVE_LOW status into
- * account.
+ * account, or negative errno on failure.
*
* This function is to be called from contexts that can sleep.
*/
@@ -1579,6 +1583,9 @@ int gpiod_get_value_cansleep(const struct gpio_desc *desc)
return 0;
value = _gpiod_get_raw_value(desc);
+ if (value < 0)
+ return value;
+
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 668939a14206..6647fb26ef25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -82,6 +82,7 @@ extern int amdgpu_vm_block_size;
extern int amdgpu_enable_scheduler;
extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission;
+extern int amdgpu_enable_semaphores;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -432,7 +433,7 @@ int amdgpu_fence_driver_init(struct amdgpu_device *adev);
void amdgpu_fence_driver_fini(struct amdgpu_device *adev);
void amdgpu_fence_driver_force_completion(struct amdgpu_device *adev);
-void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring);
int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_irq_src *irq_src,
unsigned irq_type);
@@ -890,7 +891,7 @@ struct amdgpu_ring {
struct amdgpu_device *adev;
const struct amdgpu_ring_funcs *funcs;
struct amdgpu_fence_driver fence_drv;
- struct amd_gpu_scheduler *scheduler;
+ struct amd_gpu_scheduler sched;
spinlock_t fence_lock;
struct mutex *ring_lock;
@@ -1201,8 +1202,6 @@ struct amdgpu_gfx {
struct amdgpu_irq_src priv_inst_irq;
/* gfx status */
uint32_t gfx_current_status;
- /* sync signal for const engine */
- unsigned ce_sync_offs;
/* ce ram size*/
unsigned ce_ram_size;
};
@@ -1274,8 +1273,10 @@ struct amdgpu_job {
uint32_t num_ibs;
struct mutex job_lock;
struct amdgpu_user_fence uf;
- int (*free_job)(struct amdgpu_job *sched_job);
+ int (*free_job)(struct amdgpu_job *job);
};
+#define to_amdgpu_job(sched_job) \
+ container_of((sched_job), struct amdgpu_job, base)
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
{
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 496ed2192eba..84d68d658f8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -183,7 +183,7 @@ int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
return -ENOMEM;
r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
- AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, NULL, &(*mem)->bo);
if (r) {
dev_err(rdev->dev,
"failed to allocate BO for amdkfd (%d)\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
index 98d59ee640ce..cd639c362df3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
@@ -79,7 +79,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
int time;
n = AMDGPU_BENCHMARK_ITERATIONS;
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL, &sobj);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, sdomain, 0, NULL,
+ NULL, &sobj);
if (r) {
goto out_cleanup;
}
@@ -91,7 +92,8 @@ static void amdgpu_benchmark_move(struct amdgpu_device *adev, unsigned size,
if (r) {
goto out_cleanup;
}
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL, &dobj);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, ddomain, 0, NULL,
+ NULL, &dobj);
if (r) {
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 6b1243f9f86d..8e995148f56e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -86,7 +86,7 @@ static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
- AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
+ AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
if (ret)
return ret;
ret = amdgpu_bo_reserve(bo, false);
@@ -197,7 +197,8 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
true, domain, flags,
- NULL, &placement, &obj);
+ NULL, &placement, NULL,
+ &obj);
if (ret) {
DRM_ERROR("(%d) bo create failed\n", ret);
return ret;
@@ -207,44 +208,6 @@ static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device,
return ret;
}
-static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd,
- cgs_handle_t *handle)
-{
- CGS_FUNC_ADEV;
- int r;
- uint32_t dma_handle;
- struct drm_gem_object *obj;
- struct amdgpu_bo *bo;
- struct drm_device *dev = adev->ddev;
- struct drm_file *file_priv = NULL, *priv;
-
- mutex_lock(&dev->struct_mutex);
- list_for_each_entry(priv, &dev->filelist, lhead) {
- rcu_read_lock();
- if (priv->pid == get_pid(task_pid(current)))
- file_priv = priv;
- rcu_read_unlock();
- if (file_priv)
- break;
- }
- mutex_unlock(&dev->struct_mutex);
- r = dev->driver->prime_fd_to_handle(dev,
- file_priv, dmabuf_fd,
- &dma_handle);
- spin_lock(&file_priv->table_lock);
-
- /* Check if we currently have a reference on the object */
- obj = idr_find(&file_priv->object_idr, dma_handle);
- if (obj == NULL) {
- spin_unlock(&file_priv->table_lock);
- return -EINVAL;
- }
- spin_unlock(&file_priv->table_lock);
- bo = gem_to_amdgpu_bo(obj);
- *handle = (cgs_handle_t)bo;
- return 0;
-}
-
static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle)
{
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
@@ -809,7 +772,6 @@ static const struct cgs_ops amdgpu_cgs_ops = {
};
static const struct cgs_os_ops amdgpu_cgs_os_ops = {
- amdgpu_cgs_import_gpu_mem,
amdgpu_cgs_add_irq_source,
amdgpu_cgs_irq_get,
amdgpu_cgs_irq_put
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 3b355aeb62fd..cb3c274edb0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -154,42 +154,42 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
- uint64_t *chunk_array = NULL;
+ uint64_t *chunk_array;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- unsigned size, i;
- int r = 0;
+ unsigned size;
+ int i;
+ int ret;
- if (!cs->in.num_chunks)
- goto out;
+ if (cs->in.num_chunks == 0)
+ return 0;
+
+ chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+ if (!chunk_array)
+ return -ENOMEM;
p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
if (!p->ctx) {
- r = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto free_chunk;
}
+
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
/* get chunks */
INIT_LIST_HEAD(&p->validated);
- chunk_array = kmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
- if (chunk_array == NULL) {
- r = -ENOMEM;
- goto out;
- }
-
chunk_array_user = (uint64_t __user *)(cs->in.chunks);
if (copy_from_user(chunk_array, chunk_array_user,
sizeof(uint64_t)*cs->in.num_chunks)) {
- r = -EFAULT;
- goto out;
+ ret = -EFAULT;
+ goto put_bo_list;
}
p->nchunks = cs->in.num_chunks;
p->chunks = kmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk),
GFP_KERNEL);
- if (p->chunks == NULL) {
- r = -ENOMEM;
- goto out;
+ if (!p->chunks) {
+ ret = -ENOMEM;
+ goto put_bo_list;
}
for (i = 0; i < p->nchunks; i++) {
@@ -200,8 +200,9 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
chunk_ptr = (void __user *)chunk_array[i];
if (copy_from_user(&user_chunk, chunk_ptr,
sizeof(struct drm_amdgpu_cs_chunk))) {
- r = -EFAULT;
- goto out;
+ ret = -EFAULT;
+ i--;
+ goto free_partial_kdata;
}
p->chunks[i].chunk_id = user_chunk.chunk_id;
p->chunks[i].length_dw = user_chunk.length_dw;
@@ -212,13 +213,14 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
if (p->chunks[i].kdata == NULL) {
- r = -ENOMEM;
- goto out;
+ ret = -ENOMEM;
+ i--;
+ goto free_partial_kdata;
}
size *= sizeof(uint32_t);
if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
- r = -EFAULT;
- goto out;
+ ret = -EFAULT;
+ goto free_partial_kdata;
}
switch (p->chunks[i].chunk_id) {
@@ -238,15 +240,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
gobj = drm_gem_object_lookup(p->adev->ddev,
p->filp, handle);
if (gobj == NULL) {
- r = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto free_partial_kdata;
}
p->uf.bo = gem_to_amdgpu_bo(gobj);
p->uf.offset = fence_data->offset;
} else {
- r = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto free_partial_kdata;
}
break;
@@ -254,19 +256,35 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
break;
default:
- r = -EINVAL;
- goto out;
+ ret = -EINVAL;
+ goto free_partial_kdata;
}
}
p->ibs = kcalloc(p->num_ibs, sizeof(struct amdgpu_ib), GFP_KERNEL);
- if (!p->ibs)
- r = -ENOMEM;
+ if (!p->ibs) {
+ ret = -ENOMEM;
+ goto free_all_kdata;
+ }
-out:
kfree(chunk_array);
- return r;
+ return 0;
+
+free_all_kdata:
+ i = p->nchunks - 1;
+free_partial_kdata:
+ for (; i >= 0; i--)
+ drm_free_large(p->chunks[i].kdata);
+ kfree(p->chunks);
+put_bo_list:
+ if (p->bo_list)
+ amdgpu_bo_list_put(p->bo_list);
+ amdgpu_ctx_put(p->ctx);
+free_chunk:
+ kfree(chunk_array);
+
+ return ret;
}
/* Returns how many bytes TTM can move per IB.
@@ -321,25 +339,17 @@ static u64 amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev)
return max(bytes_moved_threshold, 1024*1024ull);
}
-int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
+int amdgpu_cs_list_validate(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct list_head *validated)
{
- struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
- struct amdgpu_vm *vm = &fpriv->vm;
- struct amdgpu_device *adev = p->adev;
struct amdgpu_bo_list_entry *lobj;
- struct list_head duplicates;
struct amdgpu_bo *bo;
u64 bytes_moved = 0, initial_bytes_moved;
u64 bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(adev);
int r;
- INIT_LIST_HEAD(&duplicates);
- r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
- if (unlikely(r != 0)) {
- return r;
- }
-
- list_for_each_entry(lobj, &p->validated, tv.head) {
+ list_for_each_entry(lobj, validated, tv.head) {
bo = lobj->robj;
if (!bo->pin_count) {
u32 domain = lobj->prefered_domains;
@@ -373,7 +383,6 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p)
domain = lobj->allowed_domains;
goto retry;
}
- ttm_eu_backoff_reservation(&p->ticket, &p->validated);
return r;
}
}
@@ -386,6 +395,7 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_cs_buckets buckets;
+ struct list_head duplicates;
bool need_mmap_lock = false;
int i, r;
@@ -405,8 +415,22 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
- r = amdgpu_cs_list_validate(p);
+ INIT_LIST_HEAD(&duplicates);
+ r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
+ if (unlikely(r != 0))
+ goto error_reserve;
+
+ r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
+ if (r)
+ goto error_validate;
+
+ r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &duplicates);
+
+error_validate:
+ if (r)
+ ttm_eu_backoff_reservation(&p->ticket, &p->validated);
+error_reserve:
if (need_mmap_lock)
up_read(&current->mm->mmap_sem);
@@ -772,15 +796,15 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
return 0;
}
-static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
+static int amdgpu_cs_free_job(struct amdgpu_job *job)
{
int i;
- if (sched_job->ibs)
- for (i = 0; i < sched_job->num_ibs; i++)
- amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
- kfree(sched_job->ibs);
- if (sched_job->uf.bo)
- drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
+ if (job->ibs)
+ for (i = 0; i < job->num_ibs; i++)
+ amdgpu_ib_free(job->adev, &job->ibs[i]);
+ kfree(job->ibs);
+ if (job->uf.bo)
+ drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
return 0;
}
@@ -804,7 +828,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(parser, data);
if (r) {
DRM_ERROR("Failed to initialize parser !\n");
- amdgpu_cs_parser_fini(parser, r, false);
+ kfree(parser);
up_read(&adev->exclusive_lock);
r = amdgpu_cs_handle_lockup(adev, r);
return r;
@@ -842,7 +866,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job)
return -ENOMEM;
- job->base.sched = ring->scheduler;
+ job->base.sched = &ring->sched;
job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
job->adev = parser->adev;
job->ibs = parser->ibs;
@@ -857,7 +881,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
job->free_job = amdgpu_cs_free_job;
mutex_lock(&job->job_lock);
- r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+ r = amd_sched_entity_push_job(&job->base);
if (r) {
mutex_unlock(&job->job_lock);
amdgpu_cs_free_job(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 20cbc4eb5a6f..e0b80ccdfe8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,10 +43,10 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
for (i = 0; i < adev->num_rings; i++) {
struct amd_sched_rq *rq;
if (kernel)
- rq = &adev->rings[i]->scheduler->kernel_rq;
+ rq = &adev->rings[i]->sched.kernel_rq;
else
- rq = &adev->rings[i]->scheduler->sched_rq;
- r = amd_sched_entity_init(adev->rings[i]->scheduler,
+ rq = &adev->rings[i]->sched.sched_rq;
+ r = amd_sched_entity_init(&adev->rings[i]->sched,
&ctx->rings[i].entity,
rq, amdgpu_sched_jobs);
if (r)
@@ -55,7 +55,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
if (i < adev->num_rings) {
for (j = 0; j < i; j++)
- amd_sched_entity_fini(adev->rings[j]->scheduler,
+ amd_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity);
kfree(ctx);
return r;
@@ -75,7 +75,7 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
if (amdgpu_enable_scheduler) {
for (i = 0; i < adev->num_rings; i++)
- amd_sched_entity_fini(adev->rings[i]->scheduler,
+ amd_sched_entity_fini(&adev->rings[i]->sched,
&ctx->rings[i].entity);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6ff6ae945794..6068d8207d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -246,7 +246,7 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->vram_scratch.robj);
+ NULL, NULL, &adev->vram_scratch.robj);
if (r) {
return r;
}
@@ -449,7 +449,8 @@ static int amdgpu_wb_init(struct amdgpu_device *adev)
if (adev->wb.wb_obj == NULL) {
r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, &adev->wb.wb_obj);
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
+ &adev->wb.wb_obj);
if (r) {
dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
return r;
@@ -1650,9 +1651,11 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
/* turn off display hw */
+ drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
+ drm_modeset_unlock_all(dev);
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1747,9 +1750,11 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (fbcon) {
drm_helper_resume_force_mode(dev);
/* turn on display hw */
+ drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
+ drm_modeset_unlock_all(dev);
}
drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 0fcc0bd1622c..adb48353f2e1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -79,6 +79,7 @@ int amdgpu_exp_hw_support = 0;
int amdgpu_enable_scheduler = 0;
int amdgpu_sched_jobs = 16;
int amdgpu_sched_hw_submission = 2;
+int amdgpu_enable_semaphores = 1;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -152,6 +153,9 @@ module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
+MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable (default), 0 = disable)");
+module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
+
static struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 1be2bd6d07ea..b3fc26c59787 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -609,9 +609,9 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
* Init the fence driver for the requested ring (all asics).
* Helper function for amdgpu_fence_driver_init().
*/
-void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
+int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
{
- int i;
+ int i, r;
ring->fence_drv.cpu_addr = NULL;
ring->fence_drv.gpu_addr = 0;
@@ -625,15 +625,19 @@ void amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
amdgpu_fence_check_lockup);
ring->fence_drv.ring = ring;
+ init_waitqueue_head(&ring->fence_drv.fence_queue);
+
if (amdgpu_enable_scheduler) {
- ring->scheduler = amd_sched_create(&amdgpu_sched_ops,
- ring->idx,
- amdgpu_sched_hw_submission,
- (void *)ring->adev);
- if (!ring->scheduler)
- DRM_ERROR("Failed to create scheduler on ring %d.\n",
- ring->idx);
+ r = amd_sched_init(&ring->sched, &amdgpu_sched_ops,
+ amdgpu_sched_hw_submission, ring->name);
+ if (r) {
+ DRM_ERROR("Failed to create scheduler on ring %s.\n",
+ ring->name);
+ return r;
+ }
}
+
+ return 0;
}
/**
@@ -681,8 +685,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
wake_up_all(&ring->fence_drv.fence_queue);
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
- if (ring->scheduler)
- amd_sched_destroy(ring->scheduler);
+ amd_sched_fini(&ring->sched);
ring->fence_drv.initialized = false;
}
mutex_unlock(&adev->ring_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index cbd3a486c5c2..7312d729d300 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -127,7 +127,7 @@ int amdgpu_gart_table_vram_alloc(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, adev->gart.table_size,
PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->gart.robj);
+ NULL, NULL, &adev->gart.robj);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 5839fab374bf..7297ca3a0ba7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -69,7 +69,8 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
}
}
retry:
- r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain, flags, NULL, &robj);
+ r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
+ flags, NULL, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS) {
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
@@ -426,6 +427,10 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
&args->data.data_size_bytes,
&args->data.flags);
} else if (args->op == AMDGPU_GEM_METADATA_OP_SET_METADATA) {
+ if (args->data.data_size_bytes > sizeof(args->data.data)) {
+ r = -EINVAL;
+ goto unreserve;
+ }
r = amdgpu_bo_set_tiling_flags(robj, args->data.tiling_info);
if (!r)
r = amdgpu_bo_set_metadata(robj, args->data.data,
@@ -433,6 +438,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
args->data.flags);
}
+unreserve:
amdgpu_bo_unreserve(robj);
out:
drm_gem_object_unreference_unlocked(gobj);
@@ -454,11 +460,12 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry *vm_bos;
struct ww_acquire_ctx ticket;
- struct list_head list;
+ struct list_head list, duplicates;
unsigned domain;
int r;
INIT_LIST_HEAD(&list);
+ INIT_LIST_HEAD(&duplicates);
tv.bo = &bo_va->bo->tbo;
tv.shared = true;
@@ -468,7 +475,8 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (!vm_bos)
return;
- r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
+ /* Provide duplicates to avoid -EALREADY */
+ r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r)
goto error_free;
@@ -651,7 +659,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
int r;
args->pitch = amdgpu_align_pitch(adev, args->width, args->bpp, 0) * ((args->bpp + 1) / 8);
- args->size = args->pitch * args->height;
+ args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
r = amdgpu_gem_object_create(adev, args->size, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
index 5c8a803acedc..534fc04e80fd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c
@@ -43,7 +43,7 @@ static int amdgpu_ih_ring_alloc(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, adev->irq.ih.ring_size,
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0,
- NULL, &adev->irq.ih.ring_obj);
+ NULL, NULL, &adev->irq.ih.ring_obj);
if (r) {
DRM_ERROR("amdgpu: failed to create ih ring buffer (%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 0aba8e9bc8a0..7c42ff670080 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -140,7 +140,7 @@ void amdgpu_irq_preinstall(struct drm_device *dev)
*/
int amdgpu_irq_postinstall(struct drm_device *dev)
{
- dev->max_vblank_count = 0x001fffff;
+ dev->max_vblank_count = 0x00ffffff;
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 22367939ebf1..8c735f544b66 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -390,7 +390,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0;
}
case AMDGPU_INFO_READ_MMR_REG: {
- unsigned n, alloc_size = info->read_mmr_reg.count * 4;
+ unsigned n, alloc_size;
uint32_t *regs;
unsigned se_num = (info->read_mmr_reg.instance >>
AMDGPU_INFO_MMR_SE_INDEX_SHIFT) &
@@ -406,9 +406,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
sh_num = 0xffffffff;
- regs = kmalloc(alloc_size, GFP_KERNEL);
+ regs = kmalloc_array(info->read_mmr_reg.count, sizeof(*regs), GFP_KERNEL);
if (!regs)
return -ENOMEM;
+ alloc_size = info->read_mmr_reg.count * sizeof(*regs);
for (i = 0; i < info->read_mmr_reg.count; i++)
if (amdgpu_asic_read_register(adev, se_num, sh_num,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 08b09d55b96f..1a7708f365f3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -215,6 +215,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct ttm_placement *placement,
+ struct reservation_object *resv,
struct amdgpu_bo **bo_ptr)
{
struct amdgpu_bo *bo;
@@ -261,7 +262,7 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
/* Kernel allocation are uninterruptible */
r = ttm_bo_init(&adev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, NULL,
- acc_size, sg, NULL, &amdgpu_ttm_bo_destroy);
+ acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
if (unlikely(r != 0)) {
return r;
}
@@ -275,7 +276,9 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
- struct sg_table *sg, struct amdgpu_bo **bo_ptr)
+ struct sg_table *sg,
+ struct reservation_object *resv,
+ struct amdgpu_bo **bo_ptr)
{
struct ttm_placement placement = {0};
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
@@ -286,11 +289,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
amdgpu_ttm_placement_init(adev, &placement,
placements, domain, flags);
- return amdgpu_bo_create_restricted(adev, size, byte_align,
- kernel, domain, flags,
- sg,
- &placement,
- bo_ptr);
+ return amdgpu_bo_create_restricted(adev, size, byte_align, kernel,
+ domain, flags, sg, &placement,
+ resv, bo_ptr);
}
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
@@ -535,12 +536,10 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
if (metadata == NULL)
return -EINVAL;
- buffer = kzalloc(metadata_size, GFP_KERNEL);
+ buffer = kmemdup(metadata, metadata_size, GFP_KERNEL);
if (buffer == NULL)
return -ENOMEM;
- memcpy(buffer, metadata, metadata_size);
-
kfree(bo->metadata);
bo->metadata_flags = flags;
bo->metadata = buffer;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 6ea18dcec561..3c2ff4567798 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -129,12 +129,14 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
+ struct reservation_object *resv,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
unsigned long size, int byte_align,
bool kernel, u32 domain, u64 flags,
struct sg_table *sg,
struct ttm_placement *placement,
+ struct reservation_object *resv,
struct amdgpu_bo **bo_ptr);
int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr);
void amdgpu_bo_kunmap(struct amdgpu_bo *bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index d9652fe32d6a..59f735a933a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -61,12 +61,15 @@ struct drm_gem_object *amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sg)
{
+ struct reservation_object *resv = attach->dmabuf->resv;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_bo *bo;
int ret;
+ ww_mutex_lock(&resv->lock, NULL);
ret = amdgpu_bo_create(adev, attach->dmabuf->size, PAGE_SIZE, false,
- AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo);
+ AMDGPU_GEM_DOMAIN_GTT, 0, sg, resv, &bo);
+ ww_mutex_unlock(&resv->lock);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 9bec91484c24..30dce235ddeb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -357,11 +357,11 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
ring->adev = adev;
ring->idx = adev->num_rings++;
adev->rings[ring->idx] = ring;
- amdgpu_fence_driver_init_ring(ring);
+ r = amdgpu_fence_driver_init_ring(ring);
+ if (r)
+ return r;
}
- init_waitqueue_head(&ring->fence_drv.fence_queue);
-
r = amdgpu_wb_get(adev, &ring->rptr_offs);
if (r) {
dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
@@ -407,7 +407,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
if (ring->ring_obj == NULL) {
r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0,
- NULL, &ring->ring_obj);
+ NULL, NULL, &ring->ring_obj);
if (r) {
dev_err(adev->dev, "(%d) ring create failed\n", r);
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 74dad270362c..e90712443fe9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -64,8 +64,8 @@ int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
INIT_LIST_HEAD(&sa_manager->flist[i]);
}
- r = amdgpu_bo_create(adev, size, align, true,
- domain, 0, NULL, &sa_manager->bo);
+ r = amdgpu_bo_create(adev, size, align, true, domain,
+ 0, NULL, NULL, &sa_manager->bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
@@ -145,8 +145,13 @@ static uint32_t amdgpu_sa_get_ring_from_fence(struct fence *f)
struct amd_sched_fence *s_fence;
s_fence = to_amd_sched_fence(f);
- if (s_fence)
- return s_fence->scheduler->ring_id;
+ if (s_fence) {
+ struct amdgpu_ring *ring;
+
+ ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
+ return ring->idx;
+ }
+
a_fence = to_amdgpu_fence(f);
if (a_fence)
return a_fence->ring->idx;
@@ -412,6 +417,26 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
}
#if defined(CONFIG_DEBUG_FS)
+
+static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
+{
+ struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
+ struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
+
+ if (a_fence)
+ seq_printf(m, " protected by 0x%016llx on ring %d",
+ a_fence->seq, a_fence->ring->idx);
+
+ if (s_fence) {
+ struct amdgpu_ring *ring;
+
+
+ ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
+ seq_printf(m, " protected by 0x%016x on ring %d",
+ s_fence->base.seqno, ring->idx);
+ }
+}
+
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m)
{
@@ -428,18 +453,8 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
}
seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
soffset, eoffset, eoffset - soffset);
- if (i->fence) {
- struct amdgpu_fence *a_fence = to_amdgpu_fence(i->fence);
- struct amd_sched_fence *s_fence = to_amd_sched_fence(i->fence);
- if (a_fence)
- seq_printf(m, " protected by 0x%016llx on ring %d",
- a_fence->seq, a_fence->ring->idx);
- if (s_fence)
- seq_printf(m, " protected by 0x%016x on ring %d",
- s_fence->base.seqno,
- s_fence->scheduler->ring_id);
-
- }
+ if (i->fence)
+ amdgpu_sa_bo_dump_fence(i->fence, m);
seq_printf(m, "\n");
}
spin_unlock(&sa_manager->wq.lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index de98fbd2971e..2e946b2cad88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -27,63 +27,48 @@
#include <drm/drmP.h>
#include "amdgpu.h"
-static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
+static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
{
- struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
- return amdgpu_sync_get_fence(&sched_job->ibs->sync);
+ struct amdgpu_job *job = to_amdgpu_job(sched_job);
+ return amdgpu_sync_get_fence(&job->ibs->sync);
}
-static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
+static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
{
- struct amdgpu_job *sched_job;
- struct amdgpu_fence *fence;
+ struct amdgpu_fence *fence = NULL;
+ struct amdgpu_job *job;
int r;
- if (!job) {
+ if (!sched_job) {
DRM_ERROR("job is null\n");
return NULL;
}
- sched_job = (struct amdgpu_job *)job;
- mutex_lock(&sched_job->job_lock);
- r = amdgpu_ib_schedule(sched_job->adev,
- sched_job->num_ibs,
- sched_job->ibs,
- sched_job->base.owner);
- if (r)
+ job = to_amdgpu_job(sched_job);
+ mutex_lock(&job->job_lock);
+ r = amdgpu_ib_schedule(job->adev,
+ job->num_ibs,
+ job->ibs,
+ job->base.owner);
+ if (r) {
+ DRM_ERROR("Error scheduling IBs (%d)\n", r);
goto err;
- fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
-
- if (sched_job->free_job)
- sched_job->free_job(sched_job);
+ }
- mutex_unlock(&sched_job->job_lock);
- return &fence->base;
+ fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence);
err:
- DRM_ERROR("Run job error\n");
- mutex_unlock(&sched_job->job_lock);
- job->sched->ops->process_job(job);
- return NULL;
-}
+ if (job->free_job)
+ job->free_job(job);
-static void amdgpu_sched_process_job(struct amd_sched_job *job)
-{
- struct amdgpu_job *sched_job;
-
- if (!job) {
- DRM_ERROR("job is null\n");
- return;
- }
- sched_job = (struct amdgpu_job *)job;
- /* after processing job, free memory */
- fence_put(&sched_job->base.s_fence->base);
- kfree(sched_job);
+ mutex_unlock(&job->job_lock);
+ fence_put(&job->base.s_fence->base);
+ kfree(job);
+ return fence ? &fence->base : NULL;
}
struct amd_sched_backend_ops amdgpu_sched_ops = {
.dependency = amdgpu_sched_dependency,
.run_job = amdgpu_sched_run_job,
- .process_job = amdgpu_sched_process_job
};
int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
@@ -100,7 +85,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
if (!job)
return -ENOMEM;
- job->base.sched = ring->scheduler;
+ job->base.sched = &ring->sched;
job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
job->adev = adev;
job->ibs = ibs;
@@ -109,7 +94,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
mutex_init(&job->job_lock);
job->free_job = free_job;
mutex_lock(&job->job_lock);
- r = amd_sched_entity_push_job((struct amd_sched_job *)job);
+ r = amd_sched_entity_push_job(&job->base);
if (r) {
mutex_unlock(&job->job_lock);
kfree(job);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 068aeaff7183..4921de15b451 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -65,8 +65,14 @@ static bool amdgpu_sync_same_dev(struct amdgpu_device *adev, struct fence *f)
if (a_fence)
return a_fence->ring->adev == adev;
- if (s_fence)
- return (struct amdgpu_device *)s_fence->scheduler->priv == adev;
+
+ if (s_fence) {
+ struct amdgpu_ring *ring;
+
+ ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
+ return ring->adev == adev;
+ }
+
return false;
}
@@ -251,6 +257,20 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync)
fence_put(e->fence);
kfree(e);
}
+
+ if (amdgpu_enable_semaphores)
+ return 0;
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_fence *fence = sync->sync_to[i];
+ if (!fence)
+ continue;
+
+ r = fence_wait(&fence->base, false);
+ if (r)
+ return r;
+ }
+
return 0;
}
@@ -285,7 +305,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
return -EINVAL;
}
- if (amdgpu_enable_scheduler || (count >= AMDGPU_NUM_SYNCS)) {
+ if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
+ (count >= AMDGPU_NUM_SYNCS)) {
/* not enough room, wait manually */
r = fence_wait(&fence->base, false);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
index f80b1a43be8a..4865615e9c06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c
@@ -59,8 +59,9 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
goto out_cleanup;
}
- r = amdgpu_bo_create(adev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, 0,
- NULL, &vram_obj);
+ r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
+ AMDGPU_GEM_DOMAIN_VRAM, 0,
+ NULL, NULL, &vram_obj);
if (r) {
DRM_ERROR("Failed to create VRAM object\n");
goto out_cleanup;
@@ -80,7 +81,8 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev)
struct fence *fence = NULL;
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, gtt_obj + i);
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+ NULL, gtt_obj + i);
if (r) {
DRM_ERROR("Failed to create GTT object %d\n", i);
goto out_lclean;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index b5abd5cde413..364cbe975332 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -861,7 +861,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->stollen_vga_memory);
+ NULL, NULL, &adev->stollen_vga_memory);
if (r) {
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 482e66797ae6..5cc95f1a7dab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -247,7 +247,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL, bo);
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, bo);
if (err) {
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
err = -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2cf6c6b06e3b..d0312364d950 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -156,7 +156,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->uvd.vcpu_bo);
+ NULL, NULL, &adev->uvd.vcpu_bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r);
return r;
@@ -543,46 +543,60 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
return -EINVAL;
}
- if (msg_type == 1) {
+ switch (msg_type) {
+ case 0:
+ /* it's a create msg, calc image size (width * height) */
+ amdgpu_bo_kunmap(bo);
+
+ /* try to alloc a new handle */
+ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ if (atomic_read(&adev->uvd.handles[i]) == handle) {
+ DRM_ERROR("Handle 0x%x already in use!\n", handle);
+ return -EINVAL;
+ }
+
+ if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
+ adev->uvd.filp[i] = ctx->parser->filp;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("No more free UVD handles!\n");
+ return -EINVAL;
+
+ case 1:
/* it's a decode msg, calc buffer sizes */
r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes);
amdgpu_bo_kunmap(bo);
if (r)
return r;
- } else if (msg_type == 2) {
+ /* validate the handle */
+ for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
+ if (atomic_read(&adev->uvd.handles[i]) == handle) {
+ if (adev->uvd.filp[i] != ctx->parser->filp) {
+ DRM_ERROR("UVD handle collision detected!\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+ }
+
+ DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
+ return -ENOENT;
+
+ case 2:
/* it's a destroy msg, free the handle */
for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i)
atomic_cmpxchg(&adev->uvd.handles[i], handle, 0);
amdgpu_bo_kunmap(bo);
return 0;
- } else {
- /* it's a create msg */
- amdgpu_bo_kunmap(bo);
-
- if (msg_type != 0) {
- DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
- return -EINVAL;
- }
-
- /* it's a create msg, no special handling needed */
- }
-
- /* create or decode, validate the handle */
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
- if (atomic_read(&adev->uvd.handles[i]) == handle)
- return 0;
- }
- /* handle not found try to alloc a new one */
- for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) {
- if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) {
- adev->uvd.filp[i] = ctx->parser->filp;
- return 0;
- }
+ default:
+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+ return -EINVAL;
}
-
- DRM_ERROR("No more free UVD handles!\n");
+ BUG();
return -EINVAL;
}
@@ -805,10 +819,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
}
static int amdgpu_uvd_free_job(
- struct amdgpu_job *sched_job)
+ struct amdgpu_job *job)
{
- amdgpu_ib_free(sched_job->adev, sched_job->ibs);
- kfree(sched_job->ibs);
+ amdgpu_ib_free(job->adev, job->ibs);
+ kfree(job->ibs);
return 0;
}
@@ -905,7 +919,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &bo);
+ NULL, NULL, &bo);
if (r)
return r;
@@ -954,7 +968,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &bo);
+ NULL, NULL, &bo);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 3cab96c42aa8..74f2038ac747 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -143,7 +143,7 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
r = amdgpu_bo_create(adev, size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->vce.vcpu_bo);
+ NULL, NULL, &adev->vce.vcpu_bo);
if (r) {
dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
return r;
@@ -342,10 +342,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
static int amdgpu_vce_free_job(
- struct amdgpu_job *sched_job)
+ struct amdgpu_job *job)
{
- amdgpu_ib_free(sched_job->adev, sched_job->ibs);
- kfree(sched_job->ibs);
+ amdgpu_ib_free(job->adev, job->ibs);
+ kfree(job->ibs);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f68b7cdc370a..1e14531353e0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -316,12 +316,12 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
}
}
-int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
+int amdgpu_vm_free_job(struct amdgpu_job *job)
{
int i;
- for (i = 0; i < sched_job->num_ibs; i++)
- amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
- kfree(sched_job->ibs);
+ for (i = 0; i < job->num_ibs; i++)
+ amdgpu_ib_free(job->adev, &job->ibs[i]);
+ kfree(job->ibs);
return 0;
}
@@ -686,31 +686,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
}
/**
- * amdgpu_vm_fence_pts - fence page tables after an update
- *
- * @vm: requested vm
- * @start: start of GPU address range
- * @end: end of GPU address range
- * @fence: fence to use
- *
- * Fence the page tables in the range @start - @end (cayman+).
- *
- * Global and local mutex must be locked!
- */
-static void amdgpu_vm_fence_pts(struct amdgpu_vm *vm,
- uint64_t start, uint64_t end,
- struct fence *fence)
-{
- unsigned i;
-
- start >>= amdgpu_vm_block_size;
- end >>= amdgpu_vm_block_size;
-
- for (i = start; i <= end; ++i)
- amdgpu_bo_fence(vm->page_tables[i].bo, fence, true);
-}
-
-/**
* amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
*
* @adev: amdgpu_device pointer
@@ -813,8 +788,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- amdgpu_vm_fence_pts(vm, mapping->it.start,
- mapping->it.last + 1, f);
+ amdgpu_bo_fence(vm->page_directory, f, true);
if (fence) {
fence_put(*fence);
*fence = fence_get(f);
@@ -855,7 +829,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
int r;
if (mem) {
- addr = mem->start << PAGE_SHIFT;
+ addr = (u64)mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_TT)
addr += adev->vm_manager.vram_base_offset;
} else {
@@ -1089,6 +1063,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
+ struct reservation_object *resv = vm->page_directory->tbo.resv;
struct amdgpu_bo *pt;
if (vm->page_tables[pt_idx].bo)
@@ -1097,11 +1072,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* drop mutex to allocate and clear page table */
mutex_unlock(&vm->mutex);
+ ww_mutex_lock(&resv->lock, NULL);
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
- NULL, &pt);
+ NULL, resv, &pt);
+ ww_mutex_unlock(&resv->lock);
if (r)
goto error_free;
@@ -1303,7 +1280,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
r = amdgpu_bo_create(adev, pd_size, align, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
- NULL, &vm->page_directory);
+ NULL, NULL, &vm->page_directory);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index cd6edc40c9cd..1e0bba29e167 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -1279,8 +1279,7 @@ amdgpu_atombios_encoder_setup_dig(struct drm_encoder *encoder, int action)
amdgpu_atombios_encoder_setup_dig_encoder(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
- ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ amdgpu_atombios_encoder_set_backlight_level(amdgpu_encoder, dig->backlight_level);
if (ext_encoder)
amdgpu_atombios_encoder_setup_external_encoder(encoder, ext_encoder, ATOM_ENABLE);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_smc.c b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
index a72ffc7d6c26..e33180d3314a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_smc.c
@@ -814,7 +814,8 @@ int cz_smu_init(struct amdgpu_device *adev)
* 3. map kernel virtual address
*/
ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, toc_buf);
+ true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
+ toc_buf);
if (ret) {
dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
@@ -822,7 +823,8 @@ int cz_smu_init(struct amdgpu_device *adev)
}
ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
- true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, smu_buf);
+ true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
+ smu_buf);
if (ret) {
dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index 322edea65857..bda1249eb871 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -764,7 +764,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, toc_buf);
+ NULL, NULL, toc_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
return -ENOMEM;
@@ -774,7 +774,7 @@ int fiji_smu_init(struct amdgpu_device *adev)
ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, smu_buf);
+ NULL, NULL, smu_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 4bd1e5cf65ca..e992bf2ff66c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3206,7 +3206,7 @@ static int gfx_v7_0_mec_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev,
adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
&adev->gfx.mec.hpd_eop_obj);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -3373,7 +3373,7 @@ static int gfx_v7_0_cp_compute_resume(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev,
sizeof(struct bonaire_mqd),
PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
&ring->mqd_obj);
if (r) {
dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
@@ -3610,41 +3610,6 @@ static int gfx_v7_0_cp_resume(struct amdgpu_device *adev)
return 0;
}
-static void gfx_v7_0_ce_sync_me(struct amdgpu_ring *ring)
-{
- struct amdgpu_device *adev = ring->adev;
- u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
-
- /* instruct DE to set a magic number */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(5)));
- amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 1);
-
- /* let CE wait till condition satisfied */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
- WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
- WAIT_REG_MEM_FUNCTION(3) | /* == */
- WAIT_REG_MEM_ENGINE(2))); /* ce */
- amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 1);
- amdgpu_ring_write(ring, 0xffffffff);
- amdgpu_ring_write(ring, 4); /* poll interval */
-
- /* instruct CE to reset wb of ce_sync to zero */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
- WRITE_DATA_DST_SEL(5) |
- WR_CONFIRM));
- amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 0);
-}
-
/*
* vm
* VMID 0 is the physical GPU addresses as used by the kernel.
@@ -3663,6 +3628,13 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ if (usepfp) {
+ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ }
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -3703,7 +3675,10 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, 0x0);
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
- gfx_v7_0_ce_sync_me(ring);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
}
}
@@ -3788,7 +3763,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->gfx.rlc.save_restore_obj);
+ NULL, NULL,
+ &adev->gfx.rlc.save_restore_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
@@ -3831,7 +3807,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, dws * 4, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->gfx.rlc.clear_state_obj);
+ NULL, NULL,
+ &adev->gfx.rlc.clear_state_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC c bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
@@ -3870,7 +3847,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, &adev->gfx.rlc.cp_table_obj);
+ NULL, NULL,
+ &adev->gfx.rlc.cp_table_obj);
if (r) {
dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
gfx_v7_0_rlc_fini(adev);
@@ -4802,12 +4780,6 @@ static int gfx_v7_0_sw_init(void *handle)
return r;
}
- r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
- if (r) {
- DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
- return r;
- }
-
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
ring->ring_obj = NULL;
@@ -4851,21 +4823,21 @@ static int gfx_v7_0_sw_init(void *handle)
r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GDS, 0,
- NULL, &adev->gds.gds_gfx_bo);
+ NULL, NULL, &adev->gds.gds_gfx_bo);
if (r)
return r;
r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GWS, 0,
- NULL, &adev->gds.gws_gfx_bo);
+ NULL, NULL, &adev->gds.gws_gfx_bo);
if (r)
return r;
r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_OA, 0,
- NULL, &adev->gds.oa_gfx_bo);
+ NULL, NULL, &adev->gds.oa_gfx_bo);
if (r)
return r;
@@ -4886,8 +4858,6 @@ static int gfx_v7_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
-
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 53f07439a512..cb4f68f53f24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -868,7 +868,7 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev)
r = amdgpu_bo_create(adev,
adev->gfx.mec.num_mec *adev->gfx.mec.num_pipe * MEC_HPD_SIZE * 2,
PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
+ AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
&adev->gfx.mec.hpd_eop_obj);
if (r) {
dev_warn(adev->dev, "(%d) create HDP EOP bo failed\n", r);
@@ -940,12 +940,6 @@ static int gfx_v8_0_sw_init(void *handle)
return r;
}
- r = amdgpu_wb_get(adev, &adev->gfx.ce_sync_offs);
- if (r) {
- DRM_ERROR("(%d) gfx.ce_sync_offs wb alloc failed\n", r);
- return r;
- }
-
/* set up the gfx ring */
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
ring = &adev->gfx.gfx_ring[i];
@@ -995,21 +989,21 @@ static int gfx_v8_0_sw_init(void *handle)
/* reserve GDS, GWS and OA resource for gfx */
r = amdgpu_bo_create(adev, adev->gds.mem.gfx_partition_size,
PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GDS, 0,
+ AMDGPU_GEM_DOMAIN_GDS, 0, NULL,
NULL, &adev->gds.gds_gfx_bo);
if (r)
return r;
r = amdgpu_bo_create(adev, adev->gds.gws.gfx_partition_size,
PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_GWS, 0,
+ AMDGPU_GEM_DOMAIN_GWS, 0, NULL,
NULL, &adev->gds.gws_gfx_bo);
if (r)
return r;
r = amdgpu_bo_create(adev, adev->gds.oa.gfx_partition_size,
PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_OA, 0,
+ AMDGPU_GEM_DOMAIN_OA, 0, NULL,
NULL, &adev->gds.oa_gfx_bo);
if (r)
return r;
@@ -1033,8 +1027,6 @@ static int gfx_v8_0_sw_fini(void *handle)
for (i = 0; i < adev->gfx.num_compute_rings; i++)
amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
- amdgpu_wb_free(adev, adev->gfx.ce_sync_offs);
-
gfx_v8_0_mec_fini(adev);
return 0;
@@ -3106,7 +3098,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
sizeof(struct vi_mqd),
PAGE_SIZE, true,
AMDGPU_GEM_DOMAIN_GTT, 0, NULL,
- &ring->mqd_obj);
+ NULL, &ring->mqd_obj);
if (r) {
dev_warn(adev->dev, "(%d) create MQD bo failed\n", r);
return r;
@@ -3965,6 +3957,7 @@ static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
amdgpu_ring_write(ring, lower_32_bits(seq));
amdgpu_ring_write(ring, upper_32_bits(seq));
+
}
/**
@@ -4005,49 +3998,34 @@ static bool gfx_v8_0_ring_emit_semaphore(struct amdgpu_ring *ring,
return true;
}
-static void gfx_v8_0_ce_sync_me(struct amdgpu_ring *ring)
+static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
+ unsigned vm_id, uint64_t pd_addr)
{
- struct amdgpu_device *adev = ring->adev;
- u64 gpu_addr = adev->wb.gpu_addr + adev->gfx.ce_sync_offs * 4;
-
- /* instruct DE to set a magic number */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
- WRITE_DATA_DST_SEL(5)));
- amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 1);
+ int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
+ uint64_t addr = ring->fence_drv.gpu_addr;
- /* let CE wait till condition satisfied */
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
- amdgpu_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
- WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
- WAIT_REG_MEM_FUNCTION(3) | /* == */
- WAIT_REG_MEM_ENGINE(2))); /* ce */
- amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 1);
+ amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
+ WAIT_REG_MEM_FUNCTION(3))); /* equal */
+ amdgpu_ring_write(ring, addr & 0xfffffffc);
+ amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
+ amdgpu_ring_write(ring, seq);
amdgpu_ring_write(ring, 0xffffffff);
amdgpu_ring_write(ring, 4); /* poll interval */
- /* instruct CE to reset wb of ce_sync to zero */
- amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
- amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) |
- WRITE_DATA_DST_SEL(5) |
- WR_CONFIRM));
- amdgpu_ring_write(ring, gpu_addr & 0xfffffffc);
- amdgpu_ring_write(ring, upper_32_bits(gpu_addr) & 0xffffffff);
- amdgpu_ring_write(ring, 0);
-}
-
-static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
- unsigned vm_id, uint64_t pd_addr)
-{
- int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
+ if (usepfp) {
+ /* synce CE with ME to prevent CE fetch CEIB before context switch done */
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ }
amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
- WRITE_DATA_DST_SEL(0)));
+ WRITE_DATA_DST_SEL(0)) |
+ WR_CONFIRM);
if (vm_id < 8) {
amdgpu_ring_write(ring,
(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id));
@@ -4083,9 +4061,10 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
/* sync PFP to ME, otherwise we might get invalid PFP reads */
amdgpu_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
amdgpu_ring_write(ring, 0x0);
-
- /* synce CE with ME to prevent CE fetch CEIB before context switch done */
- gfx_v8_0_ce_sync_me(ring);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
+ amdgpu_ring_write(ring, 0);
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 774528ab8704..fab5471d25d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1262,6 +1262,12 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+ /* reset addr and status */
+ WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+ if (!addr && !status)
+ return 0;
+
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client);
- /* reset addr and status */
- WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 9a07742620d0..7bc9e9fcf3d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1262,6 +1262,12 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+ /* reset addr and status */
+ WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
+
+ if (!addr && !status)
+ return 0;
+
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
entry->src_id, entry->src_data);
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
@@ -1269,8 +1275,6 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
status);
gmc_v8_0_vm_decode_fault(adev, status, addr, mc_client);
- /* reset addr and status */
- WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index c900aa942ade..966d4b2ed9da 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -625,7 +625,7 @@ int iceland_smu_init(struct amdgpu_device *adev)
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, toc_buf);
+ NULL, NULL, toc_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 1f5ac941a610..5421309c1862 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -763,7 +763,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
ret = amdgpu_bo_create(adev, image_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, toc_buf);
+ NULL, NULL, toc_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for TOC buffer\n");
return -ENOMEM;
@@ -773,7 +773,7 @@ int tonga_smu_init(struct amdgpu_device *adev)
ret = amdgpu_bo_create(adev, smu_internal_buffer_size, PAGE_SIZE,
true, AMDGPU_GEM_DOMAIN_VRAM,
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
- NULL, smu_buf);
+ NULL, NULL, smu_buf);
if (ret) {
DRM_ERROR("Failed to allocate memory for SMU internal buffer\n");
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 5fac5da694f0..ed50dd725788 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -224,11 +224,11 @@ static int uvd_v4_2_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = uvd_v4_2_hw_fini(adev);
+ r = amdgpu_uvd_suspend(adev);
if (r)
return r;
- r = amdgpu_uvd_suspend(adev);
+ r = uvd_v4_2_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 2d5c59c318af..9ad8b9906c0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -220,11 +220,11 @@ static int uvd_v5_0_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = uvd_v5_0_hw_fini(adev);
+ r = amdgpu_uvd_suspend(adev);
if (r)
return r;
- r = amdgpu_uvd_suspend(adev);
+ r = uvd_v5_0_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index d9f553fce531..7e9934fa4193 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -214,14 +214,16 @@ static int uvd_v6_0_suspend(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* Skip this for APU for now */
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_uvd_suspend(adev);
+ if (r)
+ return r;
+ }
r = uvd_v6_0_hw_fini(adev);
if (r)
return r;
- r = amdgpu_uvd_suspend(adev);
- if (r)
- return r;
-
return r;
}
@@ -230,10 +232,12 @@ static int uvd_v6_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- r = amdgpu_uvd_resume(adev);
- if (r)
- return r;
-
+ /* Skip this for APU for now */
+ if (!(adev->flags & AMD_IS_APU)) {
+ r = amdgpu_uvd_resume(adev);
+ if (r)
+ return r;
+ }
r = uvd_v6_0_hw_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 552d9e75ad1b..b55ceb14fdcd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1400,7 +1400,8 @@ static int vi_common_early_init(void *handle)
case CHIP_CARRIZO:
adev->has_uvd = true;
adev->cg_flags = 0;
- adev->pg_flags = AMDGPU_PG_SUPPORT_UVD | AMDGPU_PG_SUPPORT_VCE;
+ /* Disable UVD pg */
+ adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
adev->external_rev_id = adev->rev_id + 0x1;
if (amdgpu_smc_load_fw && smc_enabled)
adev->firmware.smu_load = true;
diff --git a/drivers/gpu/drm/amd/include/cgs_linux.h b/drivers/gpu/drm/amd/include/cgs_linux.h
index 488642f08267..3b47ae313e36 100644
--- a/drivers/gpu/drm/amd/include/cgs_linux.h
+++ b/drivers/gpu/drm/amd/include/cgs_linux.h
@@ -27,19 +27,6 @@
#include "cgs_common.h"
/**
- * cgs_import_gpu_mem() - Import dmabuf handle
- * @cgs_device: opaque device handle
- * @dmabuf_fd: DMABuf file descriptor
- * @handle: memory handle (output)
- *
- * Must be called in the process context that dmabuf_fd belongs to.
- *
- * Return: 0 on success, -errno otherwise
- */
-typedef int (*cgs_import_gpu_mem_t)(void *cgs_device, int dmabuf_fd,
- cgs_handle_t *handle);
-
-/**
* cgs_irq_source_set_func() - Callback for enabling/disabling interrupt sources
* @private_data: private data provided to cgs_add_irq_source
* @src_id: interrupt source ID
@@ -114,16 +101,12 @@ typedef int (*cgs_irq_get_t)(void *cgs_device, unsigned src_id, unsigned type);
typedef int (*cgs_irq_put_t)(void *cgs_device, unsigned src_id, unsigned type);
struct cgs_os_ops {
- cgs_import_gpu_mem_t import_gpu_mem;
-
/* IRQ handling */
cgs_add_irq_source_t add_irq_source;
cgs_irq_get_t irq_get;
cgs_irq_put_t irq_put;
};
-#define cgs_import_gpu_mem(dev,dmabuf_fd,handle) \
- CGS_OS_CALL(import_gpu_mem,dev,dmabuf_fd,handle)
#define cgs_add_irq_source(dev,src_id,num_types,set,handler,private_data) \
CGS_OS_CALL(add_irq_source,dev,src_id,num_types,set,handler, \
private_data)
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
new file mode 100644
index 000000000000..144f50acc971
--- /dev/null
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -0,0 +1,41 @@
+#if !defined(_GPU_SCHED_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _GPU_SCHED_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drmP.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu_sched
+#define TRACE_INCLUDE_FILE gpu_sched_trace
+
+TRACE_EVENT(amd_sched_job,
+ TP_PROTO(struct amd_sched_job *sched_job),
+ TP_ARGS(sched_job),
+ TP_STRUCT__entry(
+ __field(struct amd_sched_entity *, entity)
+ __field(const char *, name)
+ __field(u32, job_count)
+ __field(int, hw_job_count)
+ ),
+
+ TP_fast_assign(
+ __entry->entity = sched_job->s_entity;
+ __entry->name = sched_job->sched->name;
+ __entry->job_count = kfifo_len(
+ &sched_job->s_entity->job_queue) / sizeof(sched_job);
+ __entry->hw_job_count = atomic_read(
+ &sched_job->sched->hw_rq_count);
+ ),
+ TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d",
+ __entry->entity, __entry->name, __entry->job_count,
+ __entry->hw_job_count)
+);
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 9259f1b6664c..3697eeeecf82 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -27,6 +27,9 @@
#include <drm/drmP.h>
#include "gpu_scheduler.h"
+#define CREATE_TRACE_POINTS
+#include "gpu_sched_trace.h"
+
static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
@@ -65,29 +68,29 @@ static struct amd_sched_job *
amd_sched_rq_select_job(struct amd_sched_rq *rq)
{
struct amd_sched_entity *entity;
- struct amd_sched_job *job;
+ struct amd_sched_job *sched_job;
spin_lock(&rq->lock);
entity = rq->current_entity;
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
- job = amd_sched_entity_pop_job(entity);
- if (job) {
+ sched_job = amd_sched_entity_pop_job(entity);
+ if (sched_job) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
- return job;
+ return sched_job;
}
}
}
list_for_each_entry(entity, &rq->entities, list) {
- job = amd_sched_entity_pop_job(entity);
- if (job) {
+ sched_job = amd_sched_entity_pop_job(entity);
+ if (sched_job) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
- return job;
+ return sched_job;
}
if (entity == rq->current_entity)
@@ -115,23 +118,27 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
struct amd_sched_rq *rq,
uint32_t jobs)
{
+ int r;
+
if (!(sched && entity && rq))
return -EINVAL;
memset(entity, 0, sizeof(struct amd_sched_entity));
- entity->belongto_rq = rq;
- entity->scheduler = sched;
- entity->fence_context = fence_context_alloc(1);
- if(kfifo_alloc(&entity->job_queue,
- jobs * sizeof(void *),
- GFP_KERNEL))
- return -EINVAL;
+ INIT_LIST_HEAD(&entity->list);
+ entity->rq = rq;
+ entity->sched = sched;
spin_lock_init(&entity->queue_lock);
+ r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
+ if (r)
+ return r;
+
atomic_set(&entity->fence_seq, 0);
+ entity->fence_context = fence_context_alloc(1);
/* Add the entity to the run queue */
amd_sched_rq_add_entity(rq, entity);
+
return 0;
}
@@ -146,8 +153,8 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity)
{
- return entity->scheduler == sched &&
- entity->belongto_rq != NULL;
+ return entity->sched == sched &&
+ entity->rq != NULL;
}
/**
@@ -177,7 +184,7 @@ static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity)
{
- struct amd_sched_rq *rq = entity->belongto_rq;
+ struct amd_sched_rq *rq = entity->rq;
if (!amd_sched_entity_is_initialized(sched, entity))
return;
@@ -198,22 +205,22 @@ static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
container_of(cb, struct amd_sched_entity, cb);
entity->dependency = NULL;
fence_put(f);
- amd_sched_wakeup(entity->scheduler);
+ amd_sched_wakeup(entity->sched);
}
static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
{
- struct amd_gpu_scheduler *sched = entity->scheduler;
- struct amd_sched_job *job;
+ struct amd_gpu_scheduler *sched = entity->sched;
+ struct amd_sched_job *sched_job;
if (ACCESS_ONCE(entity->dependency))
return NULL;
- if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
+ if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
return NULL;
- while ((entity->dependency = sched->ops->dependency(job))) {
+ while ((entity->dependency = sched->ops->dependency(sched_job))) {
if (fence_add_callback(entity->dependency, &entity->cb,
amd_sched_entity_wakeup))
@@ -222,32 +229,33 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
return NULL;
}
- return job;
+ return sched_job;
}
/**
* Helper to submit a job to the job queue
*
- * @job The pointer to job required to submit
+ * @sched_job The pointer to job required to submit
*
* Returns true if we could submit the job.
*/
-static bool amd_sched_entity_in(struct amd_sched_job *job)
+static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
{
- struct amd_sched_entity *entity = job->s_entity;
+ struct amd_sched_entity *entity = sched_job->s_entity;
bool added, first = false;
spin_lock(&entity->queue_lock);
- added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
+ added = kfifo_in(&entity->job_queue, &sched_job,
+ sizeof(sched_job)) == sizeof(sched_job);
- if (added && kfifo_len(&entity->job_queue) == sizeof(job))
+ if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
first = true;
spin_unlock(&entity->queue_lock);
/* first job wakes up scheduler */
if (first)
- amd_sched_wakeup(job->sched);
+ amd_sched_wakeup(sched_job->sched);
return added;
}
@@ -255,7 +263,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *job)
/**
* Submit a job to the job queue
*
- * @job The pointer to job required to submit
+ * @sched_job The pointer to job required to submit
*
* Returns 0 for success, negative error code otherwise.
*/
@@ -271,9 +279,9 @@ int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
fence_get(&fence->base);
sched_job->s_fence = fence;
- wait_event(entity->scheduler->job_scheduled,
+ wait_event(entity->sched->job_scheduled,
amd_sched_entity_in(sched_job));
-
+ trace_amd_sched_job(sched_job);
return 0;
}
@@ -301,30 +309,28 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
static struct amd_sched_job *
amd_sched_select_job(struct amd_gpu_scheduler *sched)
{
- struct amd_sched_job *job;
+ struct amd_sched_job *sched_job;
if (!amd_sched_ready(sched))
return NULL;
/* Kernel run queue has higher priority than normal run queue*/
- job = amd_sched_rq_select_job(&sched->kernel_rq);
- if (job == NULL)
- job = amd_sched_rq_select_job(&sched->sched_rq);
+ sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
+ if (sched_job == NULL)
+ sched_job = amd_sched_rq_select_job(&sched->sched_rq);
- return job;
+ return sched_job;
}
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
{
- struct amd_sched_job *sched_job =
- container_of(cb, struct amd_sched_job, cb);
- struct amd_gpu_scheduler *sched;
+ struct amd_sched_fence *s_fence =
+ container_of(cb, struct amd_sched_fence, cb);
+ struct amd_gpu_scheduler *sched = s_fence->sched;
- sched = sched_job->sched;
- amd_sched_fence_signal(sched_job->s_fence);
atomic_dec(&sched->hw_rq_count);
- fence_put(&sched_job->s_fence->base);
- sched->ops->process_job(sched_job);
+ amd_sched_fence_signal(s_fence);
+ fence_put(&s_fence->base);
wake_up_interruptible(&sched->wake_up_worker);
}
@@ -338,87 +344,82 @@ static int amd_sched_main(void *param)
while (!kthread_should_stop()) {
struct amd_sched_entity *entity;
- struct amd_sched_job *job;
+ struct amd_sched_fence *s_fence;
+ struct amd_sched_job *sched_job;
struct fence *fence;
wait_event_interruptible(sched->wake_up_worker,
kthread_should_stop() ||
- (job = amd_sched_select_job(sched)));
+ (sched_job = amd_sched_select_job(sched)));
- if (!job)
+ if (!sched_job)
continue;
- entity = job->s_entity;
+ entity = sched_job->s_entity;
+ s_fence = sched_job->s_fence;
atomic_inc(&sched->hw_rq_count);
- fence = sched->ops->run_job(job);
+ fence = sched->ops->run_job(sched_job);
if (fence) {
- r = fence_add_callback(fence, &job->cb,
+ r = fence_add_callback(fence, &s_fence->cb,
amd_sched_process_job);
if (r == -ENOENT)
- amd_sched_process_job(fence, &job->cb);
+ amd_sched_process_job(fence, &s_fence->cb);
else if (r)
DRM_ERROR("fence add callback failed (%d)\n", r);
fence_put(fence);
+ } else {
+ DRM_ERROR("Failed to run job!\n");
+ amd_sched_process_job(NULL, &s_fence->cb);
}
- count = kfifo_out(&entity->job_queue, &job, sizeof(job));
- WARN_ON(count != sizeof(job));
+ count = kfifo_out(&entity->job_queue, &sched_job,
+ sizeof(sched_job));
+ WARN_ON(count != sizeof(sched_job));
wake_up(&sched->job_scheduled);
}
return 0;
}
/**
- * Create a gpu scheduler
+ * Init a gpu scheduler instance
*
+ * @sched The pointer to the scheduler
* @ops The backend operations for this scheduler.
- * @ring The the ring id for the scheduler.
* @hw_submissions Number of hw submissions to do.
+ * @name Name used for debugging
*
- * Return the pointer to scheduler for success, otherwise return NULL
+ * Return 0 on success, otherwise error code.
*/
-struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops,
- unsigned ring, unsigned hw_submission,
- void *priv)
+int amd_sched_init(struct amd_gpu_scheduler *sched,
+ struct amd_sched_backend_ops *ops,
+ unsigned hw_submission, const char *name)
{
- struct amd_gpu_scheduler *sched;
-
- sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL);
- if (!sched)
- return NULL;
-
sched->ops = ops;
- sched->ring_id = ring;
sched->hw_submission_limit = hw_submission;
- sched->priv = priv;
- snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring);
+ sched->name = name;
amd_sched_rq_init(&sched->sched_rq);
amd_sched_rq_init(&sched->kernel_rq);
init_waitqueue_head(&sched->wake_up_worker);
init_waitqueue_head(&sched->job_scheduled);
atomic_set(&sched->hw_rq_count, 0);
+
/* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(amd_sched_main, sched, sched->name);
if (IS_ERR(sched->thread)) {
- DRM_ERROR("Failed to create scheduler for id %d.\n", ring);
- kfree(sched);
- return NULL;
+ DRM_ERROR("Failed to create scheduler for %s.\n", name);
+ return PTR_ERR(sched->thread);
}
- return sched;
+ return 0;
}
/**
* Destroy a gpu scheduler
*
* @sched The pointer to the scheduler
- *
- * return 0 if succeed. -1 if failed.
*/
-int amd_sched_destroy(struct amd_gpu_scheduler *sched)
+void amd_sched_fini(struct amd_gpu_scheduler *sched)
{
kthread_stop(sched->thread);
- kfree(sched);
- return 0;
}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 2af0e4d4d817..80b64dc22214 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -38,13 +38,15 @@ struct amd_sched_rq;
*/
struct amd_sched_entity {
struct list_head list;
- struct amd_sched_rq *belongto_rq;
- atomic_t fence_seq;
- /* the job_queue maintains the jobs submitted by clients */
- struct kfifo job_queue;
+ struct amd_sched_rq *rq;
+ struct amd_gpu_scheduler *sched;
+
spinlock_t queue_lock;
- struct amd_gpu_scheduler *scheduler;
+ struct kfifo job_queue;
+
+ atomic_t fence_seq;
uint64_t fence_context;
+
struct fence *dependency;
struct fence_cb cb;
};
@@ -62,13 +64,13 @@ struct amd_sched_rq {
struct amd_sched_fence {
struct fence base;
- struct amd_gpu_scheduler *scheduler;
+ struct fence_cb cb;
+ struct amd_gpu_scheduler *sched;
spinlock_t lock;
void *owner;
};
struct amd_sched_job {
- struct fence_cb cb;
struct amd_gpu_scheduler *sched;
struct amd_sched_entity *s_entity;
struct amd_sched_fence *s_fence;
@@ -91,32 +93,29 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
* these functions should be implemented in driver side
*/
struct amd_sched_backend_ops {
- struct fence *(*dependency)(struct amd_sched_job *job);
- struct fence *(*run_job)(struct amd_sched_job *job);
- void (*process_job)(struct amd_sched_job *job);
+ struct fence *(*dependency)(struct amd_sched_job *sched_job);
+ struct fence *(*run_job)(struct amd_sched_job *sched_job);
};
/**
* One scheduler is implemented for each hardware ring
*/
struct amd_gpu_scheduler {
- struct task_struct *thread;
+ struct amd_sched_backend_ops *ops;
+ uint32_t hw_submission_limit;
+ const char *name;
struct amd_sched_rq sched_rq;
struct amd_sched_rq kernel_rq;
- atomic_t hw_rq_count;
- struct amd_sched_backend_ops *ops;
- uint32_t ring_id;
wait_queue_head_t wake_up_worker;
wait_queue_head_t job_scheduled;
- uint32_t hw_submission_limit;
- char name[20];
- void *priv;
+ atomic_t hw_rq_count;
+ struct task_struct *thread;
};
-struct amd_gpu_scheduler *
-amd_sched_create(struct amd_sched_backend_ops *ops,
- uint32_t ring, uint32_t hw_submission, void *priv);
-int amd_sched_destroy(struct amd_gpu_scheduler *sched);
+int amd_sched_init(struct amd_gpu_scheduler *sched,
+ struct amd_sched_backend_ops *ops,
+ uint32_t hw_submission, const char *name);
+void amd_sched_fini(struct amd_gpu_scheduler *sched);
int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity,
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index e62c37920e11..d802638094f4 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -36,7 +36,7 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
if (fence == NULL)
return NULL;
fence->owner = owner;
- fence->scheduler = s_entity->scheduler;
+ fence->sched = s_entity->sched;
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&s_entity->fence_seq);
@@ -63,7 +63,7 @@ static const char *amd_sched_fence_get_driver_name(struct fence *fence)
static const char *amd_sched_fence_get_timeline_name(struct fence *f)
{
struct amd_sched_fence *fence = to_amd_sched_fence(f);
- return (const char *)fence->scheduler->name;
+ return (const char *)fence->sched->name;
}
static bool amd_sched_fence_enable_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index e23df5fd3836..bf27a07dbce3 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -53,8 +53,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb);
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_port *port);
@@ -804,8 +804,6 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
struct drm_dp_mst_port *port, *tmp;
bool wake_tx = false;
- cancel_work_sync(&mstb->mgr->work);
-
/*
* destroy all ports - don't need lock
* as there are no more references to the mst branch
@@ -863,29 +861,33 @@ static void drm_dp_destroy_port(struct kref *kref)
{
struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
+
if (!port->input) {
port->vcpi.num_slots = 0;
kfree(port->cached_edid);
- /* we can't destroy the connector here, as
- we might be holding the mode_config.mutex
- from an EDID retrieval */
+ /*
+ * The only time we don't have a connector
+ * on an output port is if the connector init
+ * fails.
+ */
if (port->connector) {
+ /* we can't destroy the connector here, as
+ * we might be holding the mode_config.mutex
+ * from an EDID retrieval */
+
mutex_lock(&mgr->destroy_connector_lock);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
return;
}
+ /* no need to clean up vcpi
+ * as if we have no connector we never setup a vcpi */
drm_dp_port_teardown_pdt(port, port->pdt);
-
- if (!port->input && port->vcpi.vcpi > 0)
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
}
kfree(port);
-
- (*mgr->cbs->hotplug)(mgr);
}
static void drm_dp_put_port(struct drm_dp_mst_port *port)
@@ -1027,8 +1029,8 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
}
}
-static void build_mst_prop_path(struct drm_dp_mst_port *port,
- struct drm_dp_mst_branch *mstb,
+static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
+ int pnum,
char *proppath,
size_t proppath_size)
{
@@ -1041,7 +1043,7 @@ static void build_mst_prop_path(struct drm_dp_mst_port *port,
snprintf(temp, sizeof(temp), "-%d", port_num);
strlcat(proppath, temp, proppath_size);
}
- snprintf(temp, sizeof(temp), "-%d", port->port_num);
+ snprintf(temp, sizeof(temp), "-%d", pnum);
strlcat(proppath, temp, proppath_size);
}
@@ -1105,22 +1107,32 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
drm_dp_port_teardown_pdt(port, old_pdt);
ret = drm_dp_port_setup_pdt(port);
- if (ret == true) {
+ if (ret == true)
drm_dp_send_link_address(mstb->mgr, port->mstb);
- port->mstb->link_address_sent = true;
- }
}
if (created && !port->input) {
char proppath[255];
- build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
- port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
- if (port->port_num >= 8) {
+ build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
+ port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+ if (!port->connector) {
+ /* remove it from the port list */
+ mutex_lock(&mstb->mgr->lock);
+ list_del(&port->next);
+ mutex_unlock(&mstb->mgr->lock);
+ /* drop port list reference */
+ drm_dp_put_port(port);
+ goto out;
+ }
+ if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ drm_mode_connector_set_tile_property(port->connector);
}
+ (*mstb->mgr->cbs->register_connector)(port->connector);
}
+out:
/* put reference to this port */
drm_dp_put_port(port);
}
@@ -1202,10 +1214,9 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
{
struct drm_dp_mst_port *port;
struct drm_dp_mst_branch *mstb_child;
- if (!mstb->link_address_sent) {
+ if (!mstb->link_address_sent)
drm_dp_send_link_address(mgr, mstb);
- mstb->link_address_sent = true;
- }
+
list_for_each_entry(port, &mstb->ports, next) {
if (port->input)
continue;
@@ -1458,8 +1469,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
mutex_unlock(&mgr->qlock);
}
-static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb)
+static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
{
int len;
struct drm_dp_sideband_msg_tx *txmsg;
@@ -1467,11 +1478,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
- return -ENOMEM;
+ return;
txmsg->dst = mstb;
len = build_link_address(txmsg);
+ mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
@@ -1499,11 +1511,12 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
}
(*mgr->cbs->hotplug)(mgr);
}
- } else
+ } else {
+ mstb->link_address_sent = false;
DRM_DEBUG_KMS("link address failed %d\n", ret);
+ }
kfree(txmsg);
- return 0;
}
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -1978,6 +1991,8 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UPSTREAM_IS_SRC);
mutex_unlock(&mgr->lock);
+ flush_work(&mgr->work);
+ flush_work(&mgr->destroy_connector_work);
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend);
@@ -2263,10 +2278,10 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
if (port->cached_edid)
edid = drm_edid_duplicate(port->cached_edid);
- else
+ else {
edid = drm_get_edid(connector, &port->aux.ddc);
-
- drm_mode_connector_set_tile_property(connector);
+ drm_mode_connector_set_tile_property(connector);
+ }
drm_dp_put_port(port);
return edid;
}
@@ -2671,7 +2686,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
struct drm_dp_mst_port *port;
-
+ bool send_hotplug = false;
/*
* Not a regular list traverse as we have to drop the destroy
* connector lock before destroying the connector, to avoid AB->BA
@@ -2694,7 +2709,10 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
if (!port->input && port->vcpi.vcpi > 0)
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
kfree(port);
+ send_hotplug = true;
}
+ if (send_hotplug)
+ (*mgr->cbs->hotplug)(mgr);
}
/**
@@ -2747,6 +2765,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
*/
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
+ flush_work(&mgr->work);
flush_work(&mgr->destroy_connector_work);
mutex_lock(&mgr->payload_lock);
kfree(mgr->payloads);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 418d299f3b12..ca08c472311b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -345,7 +345,11 @@ static bool restore_fbdev_mode(struct drm_fb_helper *fb_helper)
struct drm_crtc *crtc = mode_set->crtc;
int ret;
- if (crtc->funcs->cursor_set) {
+ if (crtc->funcs->cursor_set2) {
+ ret = crtc->funcs->cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
+ if (ret)
+ error = true;
+ } else if (crtc->funcs->cursor_set) {
ret = crtc->funcs->cursor_set(crtc, NULL, 0, 0, 0);
if (ret)
error = true;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9a860ca1e9d7..d93e7378c077 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit);
/** Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
+ DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index d734780b31c0..a18164f2f6d2 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -94,7 +94,18 @@ static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
}
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
-static void __drm_kms_helper_poll_enable(struct drm_device *dev)
+/**
+ * drm_kms_helper_poll_enable_locked - re-enable output polling.
+ * @dev: drm_device
+ *
+ * This function re-enables the output polling work without
+ * locking the mode_config mutex.
+ *
+ * This is like drm_kms_helper_poll_enable() however it is to be
+ * called from a context where the mode_config mutex is locked
+ * already.
+ */
+void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
@@ -113,6 +124,8 @@ static void __drm_kms_helper_poll_enable(struct drm_device *dev)
if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
+EXPORT_SYMBOL(drm_kms_helper_poll_enable_locked);
+
static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connector *connector,
uint32_t maxX, uint32_t maxY, bool merge_type_bits)
@@ -174,7 +187,7 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
- __drm_kms_helper_poll_enable(dev);
+ drm_kms_helper_poll_enable_locked(dev);
dev->mode_config.poll_running = drm_kms_helper_poll;
@@ -428,7 +441,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_disable);
void drm_kms_helper_poll_enable(struct drm_device *dev)
{
mutex_lock(&dev->mode_config.mutex);
- __drm_kms_helper_poll_enable(dev);
+ drm_kms_helper_poll_enable_locked(dev);
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index cbdb78ef3bac..e6cbaca821a4 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -37,7 +37,6 @@
* DECON stands for Display and Enhancement controller.
*/
-#define DECON_DEFAULT_FRAMERATE 60
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
#define WINDOWS_NR 2
@@ -165,16 +164,6 @@ static u32 decon_calc_clkdiv(struct decon_context *ctx,
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
-static bool decon_mode_fixup(struct exynos_drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- if (adjusted_mode->vrefresh == 0)
- adjusted_mode->vrefresh = DECON_DEFAULT_FRAMERATE;
-
- return true;
-}
-
static void decon_commit(struct exynos_drm_crtc *crtc)
{
struct decon_context *ctx = crtc->ctx;
@@ -637,7 +626,6 @@ static void decon_disable(struct exynos_drm_crtc *crtc)
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
.enable = decon_enable,
.disable = decon_disable,
- .mode_fixup = decon_mode_fixup,
.commit = decon_commit,
.enable_vblank = decon_enable_vblank,
.disable_vblank = decon_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index d66ade0efac8..124fb9a56f02 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1383,28 +1383,6 @@ static int exynos_dp_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int exynos_dp_suspend(struct device *dev)
-{
- struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
- exynos_dp_disable(&dp->encoder);
- return 0;
-}
-
-static int exynos_dp_resume(struct device *dev)
-{
- struct exynos_dp_device *dp = dev_get_drvdata(dev);
-
- exynos_dp_enable(&dp->encoder);
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops exynos_dp_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_dp_suspend, exynos_dp_resume)
-};
-
static const struct of_device_id exynos_dp_match[] = {
{ .compatible = "samsung,exynos5-dp" },
{},
@@ -1417,7 +1395,6 @@ struct platform_driver dp_driver = {
.driver = {
.name = "exynos-dp",
.owner = THIS_MODULE,
- .pm = &exynos_dp_pm_ops,
.of_match_table = exynos_dp_match,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index c68a6a2a9b57..7f55ba6771c6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -28,7 +28,6 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
{
@@ -39,7 +38,6 @@ int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
int exynos_drm_device_subdrv_probe(struct drm_device *dev)
{
@@ -69,7 +67,6 @@ int exynos_drm_device_subdrv_probe(struct drm_device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_probe);
int exynos_drm_device_subdrv_remove(struct drm_device *dev)
{
@@ -87,7 +84,6 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_drm_device_subdrv_remove);
int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
{
@@ -111,7 +107,6 @@ err:
}
return ret;
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open);
void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
{
@@ -122,4 +117,3 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
subdrv->close(dev, subdrv->dev, file);
}
}
-EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 0872aa2f450f..ed28823d3b35 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -41,20 +41,6 @@ static void exynos_drm_crtc_disable(struct drm_crtc *crtc)
exynos_crtc->ops->disable(exynos_crtc);
}
-static bool
-exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
-
- if (exynos_crtc->ops->mode_fixup)
- return exynos_crtc->ops->mode_fixup(exynos_crtc, mode,
- adjusted_mode);
-
- return true;
-}
-
static void
exynos_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
@@ -99,7 +85,6 @@ static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
.enable = exynos_drm_crtc_enable,
.disable = exynos_drm_crtc_disable,
- .mode_fixup = exynos_drm_crtc_mode_fixup,
.mode_set_nofb = exynos_drm_crtc_mode_set_nofb,
.atomic_begin = exynos_crtc_atomic_begin,
.atomic_flush = exynos_crtc_atomic_flush,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 831d2e4cacf9..ae9e6b2d3758 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -304,6 +304,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int exynos_drm_suspend(struct drm_device *dev, pm_message_t state)
{
struct drm_connector *connector;
@@ -340,6 +341,7 @@ static int exynos_drm_resume(struct drm_device *dev)
return 0;
}
+#endif
static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index b7ba21dfb696..6c717ba672db 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -82,7 +82,6 @@ struct exynos_drm_plane {
*
* @enable: enable the device
* @disable: disable the device
- * @mode_fixup: fix mode data before applying it
* @commit: set current hw specific display mode to hw.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
@@ -103,9 +102,6 @@ struct exynos_drm_crtc;
struct exynos_drm_crtc_ops {
void (*enable)(struct exynos_drm_crtc *crtc);
void (*disable)(struct exynos_drm_crtc *crtc);
- bool (*mode_fixup)(struct exynos_drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
void (*commit)(struct exynos_drm_crtc *crtc);
int (*enable_vblank)(struct exynos_drm_crtc *crtc);
void (*disable_vblank)(struct exynos_drm_crtc *crtc);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 2a652359af64..dd3a5e6d58c8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1206,23 +1206,6 @@ static struct exynos_drm_ipp_ops fimc_dst_ops = {
.set_addr = fimc_dst_set_addr,
};
-static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
-{
- DRM_DEBUG_KMS("enable[%d]\n", enable);
-
- if (enable) {
- clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
- clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
- ctx->suspended = false;
- } else {
- clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
- clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
- ctx->suspended = true;
- }
-
- return 0;
-}
-
static irqreturn_t fimc_irq_handler(int irq, void *dev_id)
{
struct fimc_context *ctx = dev_id;
@@ -1780,6 +1763,24 @@ static int fimc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable)
+{
+ DRM_DEBUG_KMS("enable[%d]\n", enable);
+
+ if (enable) {
+ clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]);
+ clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]);
+ ctx->suspended = false;
+ } else {
+ clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]);
+ clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]);
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int fimc_suspend(struct device *dev)
{
@@ -1806,7 +1807,6 @@ static int fimc_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static int fimc_runtime_suspend(struct device *dev)
{
struct fimc_context *ctx = get_fimc_context(dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 750a9e6b9e8d..3d1aba67758b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -41,7 +41,6 @@
* CPU Interface.
*/
-#define FIMD_DEFAULT_FRAMERATE 60
#define MIN_FB_WIDTH_FOR_16WORD_BURST 128
/* position control register for hardware window 0, 2 ~ 4.*/
@@ -377,16 +376,6 @@ static u32 fimd_calc_clkdiv(struct fimd_context *ctx,
return (clkdiv < 0x100) ? clkdiv : 0xff;
}
-static bool fimd_mode_fixup(struct exynos_drm_crtc *crtc,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- if (adjusted_mode->vrefresh == 0)
- adjusted_mode->vrefresh = FIMD_DEFAULT_FRAMERATE;
-
- return true;
-}
-
static void fimd_commit(struct exynos_drm_crtc *crtc)
{
struct fimd_context *ctx = crtc->ctx;
@@ -882,13 +871,12 @@ static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
return;
val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
- writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+ writel(val, ctx->regs + DP_MIE_CLKCON);
}
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
.enable = fimd_enable,
.disable = fimd_disable,
- .mode_fixup = fimd_mode_fixup,
.commit = fimd_commit,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 3734c34aed16..c17efdb238a6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1059,7 +1059,6 @@ int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_g2d_get_ver_ioctl);
int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
@@ -1230,7 +1229,6 @@ err:
g2d_put_cmdlist(g2d, node);
return ret;
}
-EXPORT_SYMBOL_GPL(exynos_g2d_set_cmdlist_ioctl);
int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
@@ -1293,7 +1291,6 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
out:
return 0;
}
-EXPORT_SYMBOL_GPL(exynos_g2d_exec_ioctl);
static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
{
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index f12fbc36b120..407afedb6003 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -56,39 +56,35 @@ static int exynos_drm_alloc_buf(struct exynos_drm_gem_obj *obj)
nr_pages = obj->size >> PAGE_SHIFT;
if (!is_drm_iommu_supported(dev)) {
- dma_addr_t start_addr;
- unsigned int i = 0;
-
obj->pages = drm_calloc_large(nr_pages, sizeof(struct page *));
if (!obj->pages) {
DRM_ERROR("failed to allocate pages.\n");
return -ENOMEM;
}
+ }
- obj->cookie = dma_alloc_attrs(dev->dev,
- obj->size,
- &obj->dma_addr, GFP_KERNEL,
- &obj->dma_attrs);
- if (!obj->cookie) {
- DRM_ERROR("failed to allocate buffer.\n");
+ obj->cookie = dma_alloc_attrs(dev->dev, obj->size, &obj->dma_addr,
+ GFP_KERNEL, &obj->dma_attrs);
+ if (!obj->cookie) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ if (obj->pages)
drm_free_large(obj->pages);
- return -ENOMEM;
- }
+ return -ENOMEM;
+ }
+
+ if (obj->pages) {
+ dma_addr_t start_addr;
+ unsigned int i = 0;
start_addr = obj->dma_addr;
while (i < nr_pages) {
- obj->pages[i] = phys_to_page(start_addr);
+ obj->pages[i] = pfn_to_page(dma_to_pfn(dev->dev,
+ start_addr));
start_addr += PAGE_SIZE;
i++;
}
} else {
- obj->pages = dma_alloc_attrs(dev->dev, obj->size,
- &obj->dma_addr, GFP_KERNEL,
- &obj->dma_attrs);
- if (!obj->pages) {
- DRM_ERROR("failed to allocate buffer.\n");
- return -ENOMEM;
- }
+ obj->pages = obj->cookie;
}
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
@@ -110,15 +106,11 @@ static void exynos_drm_free_buf(struct exynos_drm_gem_obj *obj)
DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
(unsigned long)obj->dma_addr, obj->size);
- if (!is_drm_iommu_supported(dev)) {
- dma_free_attrs(dev->dev, obj->size, obj->cookie,
- (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
- drm_free_large(obj->pages);
- } else
- dma_free_attrs(dev->dev, obj->size, obj->pages,
- (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
+ dma_free_attrs(dev->dev, obj->size, obj->cookie,
+ (dma_addr_t)obj->dma_addr, &obj->dma_attrs);
- obj->dma_addr = (dma_addr_t)NULL;
+ if (!is_drm_iommu_supported(dev))
+ drm_free_large(obj->pages);
}
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -156,18 +148,14 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
* once dmabuf's refcount becomes 0.
*/
if (obj->import_attach)
- goto out;
-
- exynos_drm_free_buf(exynos_gem_obj);
-
-out:
- drm_gem_free_mmap_offset(obj);
+ drm_prime_gem_destroy(obj, exynos_gem_obj->sgt);
+ else
+ exynos_drm_free_buf(exynos_gem_obj);
/* release file pointer to gem object. */
drm_gem_object_release(obj);
kfree(exynos_gem_obj);
- exynos_gem_obj = NULL;
}
unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
@@ -190,8 +178,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
return exynos_gem_obj->size;
}
-
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
@@ -212,6 +199,13 @@ struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
return ERR_PTR(ret);
}
+ ret = drm_gem_create_mmap_offset(obj);
+ if (ret < 0) {
+ drm_gem_object_release(obj);
+ kfree(exynos_gem_obj);
+ return ERR_PTR(ret);
+ }
+
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
return exynos_gem_obj;
@@ -313,7 +307,7 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
drm_gem_object_unreference_unlocked(obj);
}
-int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
+static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
struct vm_area_struct *vma)
{
struct drm_device *drm_dev = exynos_gem_obj->base.dev;
@@ -342,7 +336,8 @@ int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem_obj *exynos_gem_obj,
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
-{ struct exynos_drm_gem_obj *exynos_gem_obj;
+{
+ struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_exynos_gem_info *args = data;
struct drm_gem_object *obj;
@@ -402,6 +397,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
struct drm_mode_create_dumb *args)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
+ unsigned int flags;
int ret;
/*
@@ -413,16 +409,12 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
- if (is_drm_iommu_supported(dev)) {
- exynos_gem_obj = exynos_drm_gem_create(dev,
- EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC,
- args->size);
- } else {
- exynos_gem_obj = exynos_drm_gem_create(dev,
- EXYNOS_BO_CONTIG | EXYNOS_BO_WC,
- args->size);
- }
+ if (is_drm_iommu_supported(dev))
+ flags = EXYNOS_BO_NONCONTIG | EXYNOS_BO_WC;
+ else
+ flags = EXYNOS_BO_CONTIG | EXYNOS_BO_WC;
+ exynos_gem_obj = exynos_drm_gem_create(dev, flags, args->size);
if (IS_ERR(exynos_gem_obj)) {
dev_warn(dev->dev, "FB allocation failed.\n");
return PTR_ERR(exynos_gem_obj);
@@ -460,14 +452,9 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
goto unlock;
}
- ret = drm_gem_create_mmap_offset(obj);
- if (ret)
- goto out;
-
*offset = drm_vma_node_offset_addr(&obj->vma_node);
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
-out:
drm_gem_object_unreference(obj);
unlock:
mutex_unlock(&dev->struct_mutex);
@@ -543,7 +530,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
err_close_vm:
drm_gem_vm_close(vma);
- drm_gem_free_mmap_offset(obj);
return ret;
}
@@ -588,6 +574,8 @@ exynos_drm_gem_prime_import_sg_table(struct drm_device *dev,
if (ret < 0)
goto err_free_large;
+ exynos_gem_obj->sgt = sgt;
+
if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index cd62f8410d1e..b62d1007c0e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -39,6 +39,7 @@
* - this address could be physical address without IOMMU and
* device address with IOMMU.
* @pages: Array of backing pages.
+ * @sgt: Imported sg_table.
*
* P.S. this object would be transferred to user as kms_bo.handle so
* user can access the buffer through kms_bo.handle.
@@ -52,6 +53,7 @@ struct exynos_drm_gem_obj {
dma_addr_t dma_addr;
struct dma_attrs dma_attrs;
struct page **pages;
+ struct sg_table *sgt;
};
struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
@@ -59,10 +61,6 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
/* destroy a buffer with gem object */
void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
-/* create a private gem object and initialize it. */
-struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
- unsigned long size);
-
/* create a new buffer with gem object */
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
unsigned int flags,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 425e70625388..2f5c118f4c8e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -786,6 +786,7 @@ static int rotator_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
static int rotator_clk_crtl(struct rot_context *rot, bool enable)
{
if (enable) {
@@ -822,7 +823,6 @@ static int rotator_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static int rotator_runtime_suspend(struct device *dev)
{
struct rot_context *rot = dev_get_drvdata(dev);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 82be6b86a168..d1e300dcd544 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -58,7 +58,8 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
- unsigned int index, value, ret;
+ unsigned int value;
+ int index, ret;
index = fsl_dcu_drm_plane_index(plane);
if (index < 0)
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 5a244ab9395b..39d73dbc1c47 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -640,6 +640,32 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
/*
+ * On HSW, the DSL reg (0x70000) appears to return 0 if we
+ * read it just before the start of vblank. So try it again
+ * so we don't accidentally end up spanning a vblank frame
+ * increment, causing the pipe_update_end() code to squak at us.
+ *
+ * The nature of this problem means we can't simply check the ISR
+ * bit and return the vblank start value; nor can we use the scanline
+ * debug register in the transcoder as it appears to have the same
+ * problem. We may need to extend this to include other platforms,
+ * but so far testing only shows the problem on HSW.
+ */
+ if (IS_HASWELL(dev) && !position) {
+ int i, temp;
+
+ for (i = 0; i < 100; i++) {
+ udelay(1);
+ temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
+ DSL_LINEMASK_GEN3;
+ if (temp != position) {
+ position = temp;
+ break;
+ }
+ }
+ }
+
+ /*
* See update_scanline_offset() for the details on the
* scanline_offset adjustment.
*/
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 89c1a8ce1f98..2a5c76faf9f8 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -430,7 +430,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
/**
* intel_audio_codec_disable - Disable the audio codec for HD audio
- * @encoder: encoder on which to disable audio
+ * @intel_encoder: encoder on which to disable audio
*
* The disable sequences must be performed before disabling the transcoder or
* port.
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b3e437b3bb54..c19e669ffe50 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -42,7 +42,7 @@ find_section(const void *_bdb, int section_id)
const struct bdb_header *bdb = _bdb;
const u8 *base = _bdb;
int index = 0;
- u16 total, current_size;
+ u32 total, current_size;
u8 current_id;
/* skip to first section */
@@ -57,6 +57,10 @@ find_section(const void *_bdb, int section_id)
current_size = *((const u16 *)(base + index));
index += 2;
+ /* The MIPI Sequence Block v3+ has a separate size field. */
+ if (current_id == BDB_MIPI_SEQUENCE && *(base + index) >= 3)
+ current_size = *((const u32 *)(base + index + 1));
+
if (index + current_size > total)
return NULL;
@@ -799,6 +803,12 @@ parse_mipi(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
return;
}
+ /* Fail gracefully for forward incompatible sequence block. */
+ if (sequence->version >= 3) {
+ DRM_ERROR("Unable to parse MIPI Sequence Block v3+\n");
+ return;
+ }
+
DRM_DEBUG_DRIVER("Found MIPI sequence block\n");
block_size = get_blocksize(sequence);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 8cc9264f7809..cf418be7d30a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -15087,9 +15087,12 @@ static void readout_plane_state(struct intel_crtc *crtc,
plane_state = to_intel_plane_state(p->base.state);
- if (p->base.type == DRM_PLANE_TYPE_PRIMARY)
+ if (p->base.type == DRM_PLANE_TYPE_PRIMARY) {
plane_state->visible = primary_get_hw_state(crtc);
- else {
+ if (plane_state->visible)
+ crtc->base.state->plane_mask |=
+ 1 << drm_plane_index(&p->base);
+ } else {
if (active)
p->disable_plane(&p->base, &crtc->base);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 3e4be5a3becd..6ade06888432 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -462,11 +462,17 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
+ return connector;
+}
+
+static void intel_dp_register_mst_connector(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct drm_device *dev = connector->dev;
drm_modeset_lock_all(dev);
intel_connector_add_to_fbdev(intel_connector);
drm_modeset_unlock_all(dev);
drm_connector_register(&intel_connector->base);
- return connector;
}
static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -512,6 +518,7 @@ static void intel_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
static struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
+ .register_connector = intel_dp_register_mst_connector,
.destroy_connector = intel_dp_destroy_mst_connector,
.hotplug = intel_dp_mst_hotplug,
};
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index 53c0173a39fe..b17785719598 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -180,7 +180,7 @@ static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
/* Enable polling and queue hotplug re-enabling. */
if (hpd_disabled) {
- drm_kms_helper_poll_enable(dev);
+ drm_kms_helper_poll_enable_locked(dev);
mod_delayed_work(system_wq, &dev_priv->hotplug.reenable_work,
msecs_to_jiffies(HPD_STORM_REENABLE_DELAY));
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 72e0edd7bbde..7412caedcf7f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -484,18 +484,18 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
status_pointer = I915_READ(RING_CONTEXT_STATUS_PTR(ring));
read_pointer = ring->next_context_status_buffer;
- write_pointer = status_pointer & 0x07;
+ write_pointer = status_pointer & GEN8_CSB_PTR_MASK;
if (read_pointer > write_pointer)
- write_pointer += 6;
+ write_pointer += GEN8_CSB_ENTRIES;
spin_lock(&ring->execlist_lock);
while (read_pointer < write_pointer) {
read_pointer++;
status = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
- (read_pointer % 6) * 8);
+ (read_pointer % GEN8_CSB_ENTRIES) * 8);
status_id = I915_READ(RING_CONTEXT_STATUS_BUF(ring) +
- (read_pointer % 6) * 8 + 4);
+ (read_pointer % GEN8_CSB_ENTRIES) * 8 + 4);
if (status & GEN8_CTX_STATUS_IDLE_ACTIVE)
continue;
@@ -521,10 +521,12 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
spin_unlock(&ring->execlist_lock);
WARN(submit_contexts > 2, "More than two context complete events?\n");
- ring->next_context_status_buffer = write_pointer % 6;
+ ring->next_context_status_buffer = write_pointer % GEN8_CSB_ENTRIES;
I915_WRITE(RING_CONTEXT_STATUS_PTR(ring),
- _MASKED_FIELD(0x07 << 8, ((u32)ring->next_context_status_buffer & 0x07) << 8));
+ _MASKED_FIELD(GEN8_CSB_PTR_MASK << 8,
+ ((u32)ring->next_context_status_buffer &
+ GEN8_CSB_PTR_MASK) << 8));
}
static int execlists_context_queue(struct drm_i915_gem_request *request)
@@ -1422,6 +1424,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u8 next_context_status_buffer_hw;
I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -1436,7 +1439,29 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
_MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
_MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
POSTING_READ(RING_MODE_GEN7(ring));
- ring->next_context_status_buffer = 0;
+
+ /*
+ * Instead of resetting the Context Status Buffer (CSB) read pointer to
+ * zero, we need to read the write pointer from hardware and use its
+ * value because "this register is power context save restored".
+ * Effectively, these states have been observed:
+ *
+ * | Suspend-to-idle (freeze) | Suspend-to-RAM (mem) |
+ * BDW | CSB regs not reset | CSB regs reset |
+ * CHT | CSB regs not reset | CSB regs not reset |
+ */
+ next_context_status_buffer_hw = (I915_READ(RING_CONTEXT_STATUS_PTR(ring))
+ & GEN8_CSB_PTR_MASK);
+
+ /*
+ * When the CSB registers are reset (also after power-up / gpu reset),
+ * CSB write pointer is set to all 1's, which is not valid, use '5' in
+ * this special case, so the first element read is CSB[0].
+ */
+ if (next_context_status_buffer_hw == GEN8_CSB_PTR_MASK)
+ next_context_status_buffer_hw = (GEN8_CSB_ENTRIES - 1);
+
+ ring->next_context_status_buffer = next_context_status_buffer_hw;
DRM_DEBUG_DRIVER("Execlists enabled for %s\n", ring->name);
memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 64f89f9982a2..3c63bb32ad81 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -25,6 +25,8 @@
#define _INTEL_LRC_H_
#define GEN8_LR_CONTEXT_ALIGN 4096
+#define GEN8_CSB_ENTRIES 6
+#define GEN8_CSB_PTR_MASK 0x07
/* Execlists regs */
#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index af7fdb3bd663..7401cf90b0db 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -246,7 +246,8 @@ static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
}
if (power_well->data == SKL_DISP_PW_1) {
- intel_prepare_ddi(dev);
+ if (!dev_priv->power_domains.initializing)
+ intel_prepare_ddi(dev);
gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A);
}
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index 87de15ea1f93..b35b5b2db4ec 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -186,17 +186,19 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
sysram = vmalloc(size);
if (!sysram)
- return -ENOMEM;
+ goto err_sysram;
info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info))
- return PTR_ERR(info);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_alloc_fbi;
+ }
info->par = mfbdev;
ret = mgag200_framebuffer_init(dev, &mfbdev->mfb, &mode_cmd, gobj);
if (ret)
- return ret;
+ goto err_framebuffer_init;
mfbdev->sysram = sysram;
mfbdev->size = size;
@@ -225,7 +227,17 @@ static int mgag200fb_create(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("allocated %dx%d\n",
fb->width, fb->height);
+
return 0;
+
+err_framebuffer_init:
+ drm_fb_helper_release_fbi(helper);
+err_alloc_fbi:
+ vfree(sysram);
+err_sysram:
+ drm_gem_object_unreference_unlocked(gobj);
+
+ return ret;
}
static int mga_fbdev_destroy(struct drm_device *dev,
@@ -276,23 +288,26 @@ int mgag200_fbdev_init(struct mga_device *mdev)
ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
mdev->num_crtc, MGAG200FB_CONN_LIMIT);
if (ret)
- return ret;
+ goto err_fb_helper;
ret = drm_fb_helper_single_add_all_connectors(&mfbdev->helper);
if (ret)
- goto fini;
+ goto err_fb_setup;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(mdev->dev);
ret = drm_fb_helper_initial_config(&mfbdev->helper, bpp_sel);
if (ret)
- goto fini;
+ goto err_fb_setup;
return 0;
-fini:
+err_fb_setup:
drm_fb_helper_fini(&mfbdev->helper);
+err_fb_helper:
+ mdev->mfbdev = NULL;
+
return ret;
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index de06388069e7..b1a0f5656175 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -220,7 +220,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
}
r = mgag200_mm_init(mdev);
if (r)
- goto out;
+ goto err_mm;
drm_mode_config_init(dev);
dev->mode_config.funcs = (void *)&mga_mode_funcs;
@@ -233,7 +233,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
r = mgag200_modeset_init(mdev);
if (r) {
dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
- goto out;
+ goto err_modeset;
}
/* Make small buffers to store a hardware cursor (double buffered icon updates) */
@@ -241,20 +241,24 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags)
&mdev->cursor.pixels_1);
mgag200_bo_create(dev, roundup(48*64, PAGE_SIZE), 0, 0,
&mdev->cursor.pixels_2);
- if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1)
- goto cursor_nospace;
- mdev->cursor.pixels_current = mdev->cursor.pixels_1;
- mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
- goto cursor_done;
- cursor_nospace:
- mdev->cursor.pixels_1 = NULL;
- mdev->cursor.pixels_2 = NULL;
- dev_warn(&dev->pdev->dev, "Could not allocate space for cursors. Not doing hardware cursors.\n");
- cursor_done:
-
-out:
- if (r)
- mgag200_driver_unload(dev);
+ if (!mdev->cursor.pixels_2 || !mdev->cursor.pixels_1) {
+ mdev->cursor.pixels_1 = NULL;
+ mdev->cursor.pixels_2 = NULL;
+ dev_warn(&dev->pdev->dev,
+ "Could not allocate space for cursors. Not doing hardware cursors.\n");
+ } else {
+ mdev->cursor.pixels_current = mdev->cursor.pixels_1;
+ mdev->cursor.pixels_prev = mdev->cursor.pixels_2;
+ }
+
+ return 0;
+
+err_modeset:
+ drm_mode_config_cleanup(dev);
+ mgag200_mm_fini(mdev);
+err_mm:
+ dev->dev_private = NULL;
+
return r;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index b1f73bee1368..b0d4b53b97f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -178,7 +178,6 @@ static int mdp5_hw_irqdomain_map(struct irq_domain *d,
irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
irq_set_chip_data(irq, mdp5_kms);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 7c6225c84ba6..4649bd2ed340 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -618,7 +618,7 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->hdisplay,
adjusted_mode->vdisplay);
- if (qcrtc->index == 0)
+ if (bo->is_primary == false)
recreate_primary = true;
if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
@@ -886,13 +886,15 @@ static enum drm_connector_status qxl_conn_detect(
drm_connector_to_qxl_output(connector);
struct drm_device *ddev = connector->dev;
struct qxl_device *qdev = ddev->dev_private;
- int connected;
+ bool connected = false;
/* The first monitor is always connected */
- connected = (output->index == 0) ||
- (qdev->client_monitors_config &&
- qdev->client_monitors_config->count > output->index &&
- qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]));
+ if (!qdev->client_monitors_config) {
+ if (output->index == 0)
+ connected = true;
+ } else
+ connected = qdev->client_monitors_config->count > output->index &&
+ qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]);
DRM_DEBUG("#%d connected: %d\n", output->index, connected);
if (!connected)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index c3872598b85a..65adb9c72377 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1624,8 +1624,9 @@ radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
} else
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
- args.ucAction = ATOM_LCD_BLON;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
}
break;
case DRM_MODE_DPMS_STANDBY:
@@ -1706,8 +1707,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
- atombios_dig_transmitter_setup(encoder,
- ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
+ atombios_set_backlight_level(radeon_encoder, dig->backlight_level);
if (ext_encoder)
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index d8319dae8358..f3f562f6d848 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1573,10 +1573,12 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
+ drm_modeset_lock_all(dev);
/* turn off display hw */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
+ drm_modeset_unlock_all(dev);
/* unpin the front buffers and cursors */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1734,9 +1736,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (fbcon) {
drm_helper_resume_force_mode(dev);
/* turn on display hw */
+ drm_modeset_lock_all(dev);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
+ drm_modeset_unlock_all(dev);
}
drm_kms_helper_poll_enable(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 5e09c061847f..6cddae44fa6e 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -265,7 +265,6 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
{
struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
struct drm_device *dev = master->base.dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector;
struct drm_connector *connector;
@@ -286,12 +285,19 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
drm_mode_connector_set_path_property(connector, pathprop);
+ return connector;
+}
+
+static void radeon_dp_register_mst_connector(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
drm_modeset_lock_all(dev);
radeon_fb_add_connector(rdev, connector);
drm_modeset_unlock_all(dev);
drm_connector_register(connector);
- return connector;
}
static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
@@ -324,6 +330,7 @@ static void radeon_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = radeon_dp_add_mst_connector,
+ .register_connector = radeon_dp_register_mst_connector,
.destroy_connector = radeon_dp_destroy_mst_connector,
.hotplug = radeon_dp_mst_hotplug,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 7214858ffcea..1aa657fe31cb 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -48,40 +48,10 @@ struct radeon_fbdev {
struct radeon_device *rdev;
};
-/**
- * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
- *
- * @info: fbdev info
- *
- * This function hides the cursor on all CRTCs used by fbdev.
- */
-static int radeon_fb_helper_set_par(struct fb_info *info)
-{
- int ret;
-
- ret = drm_fb_helper_set_par(info);
-
- /* XXX: with universal plane support fbdev will automatically disable
- * all non-primary planes (including the cursor)
- */
- if (ret == 0) {
- struct drm_fb_helper *fb_helper = info->par;
- int i;
-
- for (i = 0; i < fb_helper->crtc_count; i++) {
- struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
-
- radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
- }
- }
-
- return ret;
-}
-
static struct fb_ops radeonfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
- .fb_set_par = radeon_fb_helper_set_par,
+ .fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = drm_fb_helper_cfb_fillrect,
.fb_copyarea = drm_fb_helper_cfb_copyarea,
.fb_imageblit = drm_fb_helper_cfb_imageblit,
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 787cd8fd897f..e9115d3f67b0 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2927,6 +2927,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
{ 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8d9b7de25613..745e996d2dbc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (ret)
return ret;
man = &bdev->man[mem_type];
+ if (!man->has_type || !man->use_type)
+ continue;
type_ok = ttm_bo_mt_compatible(man, mem_type, place,
&cur_flags);
@@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!type_ok)
continue;
+ type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
/*
@@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (mem_type == TTM_PL_SYSTEM)
break;
- if (man->has_type && man->use_type) {
- type_found = true;
- ret = (*man->func->get_node)(man, bo, place, mem);
- if (unlikely(ret))
- return ret;
- }
+ ret = (*man->func->get_node)(man, bo, place, mem);
+ if (unlikely(ret))
+ return ret;
+
if (mem->mm_node)
break;
}
@@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
return 0;
}
- if (!type_found)
- return -EINVAL;
-
for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i];
@@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (ret)
return ret;
man = &bdev->man[mem_type];
- if (!man->has_type)
+ if (!man->has_type || !man->use_type)
continue;
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
continue;
+ type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags);
/*
@@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (ret == -ERESTARTSYS)
has_erestartsys = true;
}
- ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
- return ret;
+
+ if (!type_found) {
+ printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
+ return -EINVAL;
+ }
+
+ return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
}
EXPORT_SYMBOL(ttm_bo_mem_space);
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 67720f70fe29..b49445df8a7e 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,6 +1,6 @@
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI
+ depends on DRM && PCI && X86
select FB_DEFERRED_IO
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 5ae8f921da2a..8a76821177a6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -681,6 +681,14 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
0, 0,
DRM_MM_SEARCH_DEFAULT,
DRM_MM_CREATE_DEFAULT);
+ if (ret) {
+ (void) vmw_cmdbuf_man_process(man);
+ ret = drm_mm_insert_node_generic(&man->mm, info->node,
+ info->page_size, 0, 0,
+ DRM_MM_SEARCH_DEFAULT,
+ DRM_MM_CREATE_DEFAULT);
+ }
+
spin_unlock_bh(&man->lock);
info->done = !ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index ce659a125f2b..092ea81eeff7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence;
- int ret;
if (list_empty(&res->mob_head))
return 0;
@@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence);
- return ret;
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index e13b20bd9908..2c7a25c71af2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
dev_priv->active_master = &dev_priv->fbdev_master;
-
- dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start,
- dev_priv->mmio_size);
-
- dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
- dev_priv->mmio_size);
+ dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
+ dev_priv->mmio_size);
if (unlikely(dev_priv->mmio_virt == NULL)) {
ret = -ENOMEM;
@@ -913,7 +909,6 @@ out_no_device:
out_err4:
iounmap(dev_priv->mmio_virt);
out_err3:
- arch_phys_wc_del(dev_priv->mmio_mtrr);
vmw_ttm_global_release(dev_priv);
out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i)
@@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev)
ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt);
- arch_phys_wc_del(dev_priv->mmio_mtrr);
if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings);
vmw_ttm_global_release(dev_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 6d02de6dc36c..f19fd39b43e1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -376,7 +376,6 @@ struct vmw_private {
uint32_t initial_width;
uint32_t initial_height;
u32 __iomem *mmio_virt;
- int mmio_mtrr;
uint32_t capabilities;
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
@@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
uint32_t size,
bool shareable,
uint32_t *handle,
- struct vmw_dma_buffer **p_dma_buf);
+ struct vmw_dma_buffer **p_dma_buf,
+ struct ttm_base_object **p_base);
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
struct vmw_dma_buffer *dma_buf,
uint32_t *handle);
@@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
- uint32_t id, struct vmw_dma_buffer **out);
+ uint32_t id, struct vmw_dma_buffer **out,
+ struct ttm_base_object **base);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index b56565457c96..5da5de0cb522 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
+ NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
ret = -EINVAL;
@@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
+ NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
ret = -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 61fb7f3de311..15a6c01cd016 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct drm_crtc *crtc;
u32 num_units = 0;
u32 i, k;
- int ret;
dirty->dev_priv = dev_priv;
@@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
if (!dirty->cmd) {
DRM_ERROR("Couldn't reserve fifo space "
"for dirty blits.\n");
- return ret;
+ return -ENOMEM;
}
memset(dirty->cmd, 0, dirty->fifo_reserve_size);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index 76069f093ccf..222c9c2123a1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
if (ret)
goto out_unlock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index c1912f852b42..e57667ca7557 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
}
*out_surf = NULL;
- ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
return ret;
}
@@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
uint32_t size,
bool shareable,
uint32_t *handle,
- struct vmw_dma_buffer **p_dma_buf)
+ struct vmw_dma_buffer **p_dma_buf,
+ struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *user_bo;
struct ttm_buffer_object *tmp;
@@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
}
*p_dma_buf = &user_bo->dma;
+ if (p_base) {
+ *p_base = &user_bo->prime.base;
+ kref_get(&(*p_base)->refcount);
+ }
*handle = user_bo->prime.base.hash.key;
out_no_base_object:
@@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
struct vmw_dma_buffer *dma_buf;
struct vmw_user_dma_buffer *user_bo;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct ttm_base_object *buffer_base;
int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
@@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
switch (arg->op) {
case drm_vmw_synccpu_grab:
- ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
+ &buffer_base);
if (unlikely(ret != 0))
return ret;
@@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
dma);
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
vmw_dmabuf_unreference(&dma_buf);
+ ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
@@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
return ret;
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
- req->size, false, &handle, &dma_buf);
+ req->size, false, &handle, &dma_buf,
+ NULL);
if (unlikely(ret != 0))
goto out_no_dmabuf;
@@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
}
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_dma_buffer **out)
+ uint32_t handle, struct vmw_dma_buffer **out,
+ struct ttm_base_object **p_base)
{
struct vmw_user_dma_buffer *vmw_user_bo;
struct ttm_base_object *base;
@@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
- ttm_base_object_unref(&base);
+ if (p_base)
+ *p_base = base;
+ else
+ ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma;
return 0;
@@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
args->size, false, &args->handle,
- &dma_buf);
+ &dma_buf, NULL);
if (unlikely(ret != 0))
goto out_no_dmabuf;
@@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
struct vmw_dma_buffer *out_buf;
int ret;
- ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
+ ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
if (ret != 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index bba1ee395478..fd47547b0234 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
if (buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
- &buffer);
+ &buffer, NULL);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find buffer for shader "
"creation.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3361769842f4..64b50409fa07 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -46,6 +46,7 @@ struct vmw_user_surface {
struct vmw_surface srf;
uint32_t size;
struct drm_master *master;
+ struct ttm_base_object *backup_base;
};
/**
@@ -656,6 +657,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL;
+ ttm_base_object_unref(&user_srf->backup_base);
vmw_resource_unreference(&res);
}
@@ -851,7 +853,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
res->backup_size,
true,
&backup_handle,
- &res->backup);
+ &res->backup,
+ &user_srf->backup_base);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
goto out_unlock;
@@ -1321,7 +1324,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
if (req->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
- &res->backup);
+ &res->backup,
+ &user_srf->backup_base);
if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
res->backup_size) {
DRM_ERROR("Surface backup buffer is too small.\n");
@@ -1335,7 +1339,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
req->drm_surface_flags &
drm_vmw_surface_flag_shareable,
&backup_handle,
- &res->backup);
+ &res->backup,
+ &user_srf->backup_base);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&res);
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c
index 243f99a80253..e5a38d202a21 100644
--- a/drivers/gpu/ipu-v3/ipu-common.c
+++ b/drivers/gpu/ipu-v3/ipu-common.c
@@ -912,7 +912,7 @@ static void ipu_irq_handle(struct ipu_soc *ipu, const int *regs, int num_regs)
}
}
-static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ipu_irq_handler(struct irq_desc *desc)
{
struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -925,7 +925,7 @@ static void ipu_irq_handler(unsigned int irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void ipu_err_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void ipu_err_irq_handler(struct irq_desc *desc)
{
struct ipu_soc *ipu = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1099,8 +1099,7 @@ static int ipu_irq_init(struct ipu_soc *ipu)
}
ret = irq_alloc_domain_generic_chips(ipu->domain, 32, 1, "IPU",
- handle_level_irq, 0,
- IRQF_VALID, 0);
+ handle_level_irq, 0, 0, 0);
if (ret < 0) {
dev_err(ipu->dev, "failed to alloc generic irq chips\n");
irq_domain_remove(ipu->domain);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 2f9aead4ecfc..652afd11a9ef 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -204,6 +204,8 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
list_del(&channel->listentry);
spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
+
+ primary_channel = channel;
} else {
primary_channel = channel->primary_channel;
spin_lock_irqsave(&primary_channel->lock, flags);
@@ -211,6 +213,14 @@ void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid)
primary_channel->num_sc--;
spin_unlock_irqrestore(&primary_channel->lock, flags);
}
+
+ /*
+ * We need to free the bit for init_vp_index() to work in the case
+ * of sub-channel, when we reload drivers like hv_netvsc.
+ */
+ cpumask_clear_cpu(channel->target_cpu,
+ &primary_channel->alloced_cpus_in_node);
+
free_channel(channel);
}
@@ -458,6 +468,13 @@ static void init_vp_index(struct vmbus_channel *channel, const uuid_le *type_gui
continue;
}
+ /*
+ * NOTE: in the case of sub-channel, we clear the sub-channel
+ * related bit(s) in primary->alloced_cpus_in_node in
+ * hv_process_channel_removal(), so when we reload drivers
+ * like hv_netvsc in SMP guest, here we're able to re-allocate
+ * bit from primary->alloced_cpus_in_node.
+ */
if (!cpumask_test_cpu(cur_cpu,
&primary->alloced_cpus_in_node)) {
cpumask_set_cpu(cur_cpu,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 500b262b89bb..e13c902e8966 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1140,8 +1140,8 @@ config SENSORS_NCT6775
help
If you say yes here you get support for the hardware monitoring
functionality of the Nuvoton NCT6106D, NCT6775F, NCT6776F, NCT6779D,
- NCT6791D, NCT6792D and compatible Super-I/O chips. This driver
- replaces the w83627ehf driver for NCT6775F and NCT6776F.
+ NCT6791D, NCT6792D, NCT6793D, and compatible Super-I/O chips. This
+ driver replaces the w83627ehf driver for NCT6775F and NCT6776F.
This driver can also be built as a module. If so, the module
will be called nct6775.
diff --git a/drivers/hwmon/abx500.c b/drivers/hwmon/abx500.c
index 6cb89c0ebab6..1fd46859ed29 100644
--- a/drivers/hwmon/abx500.c
+++ b/drivers/hwmon/abx500.c
@@ -470,6 +470,7 @@ static const struct of_device_id abx500_temp_match[] = {
{ .compatible = "stericsson,abx500-temp" },
{},
};
+MODULE_DEVICE_TABLE(of, abx500_temp_match);
#endif
static struct platform_driver abx500_temp_driver = {
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index a3dae6d0082a..82de3deeb18a 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -539,6 +539,7 @@ static const struct of_device_id of_gpio_fan_match[] = {
{ .compatible = "gpio-fan", },
{},
};
+MODULE_DEVICE_TABLE(of, of_gpio_fan_match);
#endif /* CONFIG_OF_GPIO */
static int gpio_fan_probe(struct platform_device *pdev)
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index bd1c99deac71..8b4fa55e46c6 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -39,6 +39,7 @@
* nct6779d 15 5 5 2+6 0xc560 0xc1 0x5ca3
* nct6791d 15 6 6 2+6 0xc800 0xc1 0x5ca3
* nct6792d 15 6 6 2+6 0xc910 0xc1 0x5ca3
+ * nct6793d 15 6 6 2+6 0xd120 0xc1 0x5ca3
*
* #temp lists the number of monitored temperature sources (first value) plus
* the number of directly connectable temperature sensors (second value).
@@ -63,7 +64,7 @@
#define USE_ALTERNATE
-enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792 };
+enum kinds { nct6106, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793 };
/* used to set data->name = nct6775_device_names[data->sio_kind] */
static const char * const nct6775_device_names[] = {
@@ -73,6 +74,17 @@ static const char * const nct6775_device_names[] = {
"nct6779",
"nct6791",
"nct6792",
+ "nct6793",
+};
+
+static const char * const nct6775_sio_names[] __initconst = {
+ "NCT6106D",
+ "NCT6775F",
+ "NCT6776D/F",
+ "NCT6779D",
+ "NCT6791D",
+ "NCT6792D",
+ "NCT6793D",
};
static unsigned short force_id;
@@ -104,6 +116,7 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
#define SIO_NCT6779_ID 0xc560
#define SIO_NCT6791_ID 0xc800
#define SIO_NCT6792_ID 0xc910
+#define SIO_NCT6793_ID 0xd120
#define SIO_ID_MASK 0xFFF0
enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
@@ -354,6 +367,10 @@ static const u16 NCT6775_REG_TEMP_CRIT[ARRAY_SIZE(nct6775_temp_label) - 1]
/* NCT6776 specific data */
+/* STEP_UP_TIME and STEP_DOWN_TIME regs are swapped for all chips but NCT6775 */
+#define NCT6776_REG_FAN_STEP_UP_TIME NCT6775_REG_FAN_STEP_DOWN_TIME
+#define NCT6776_REG_FAN_STEP_DOWN_TIME NCT6775_REG_FAN_STEP_UP_TIME
+
static const s8 NCT6776_ALARM_BITS[] = {
0, 1, 2, 3, 8, 21, 20, 16, /* in0.. in7 */
17, -1, -1, -1, -1, -1, -1, /* in8..in14 */
@@ -533,7 +550,7 @@ static const s8 NCT6791_ALARM_BITS[] = {
4, 5, 13, -1, -1, -1, /* temp1..temp6 */
12, 9 }; /* intrusion0, intrusion1 */
-/* NCT6792 specific data */
+/* NCT6792/NCT6793 specific data */
static const u16 NCT6792_REG_TEMP_MON[] = {
0x73, 0x75, 0x77, 0x79, 0x7b, 0x7d };
@@ -1056,6 +1073,7 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
case nct6779:
case nct6791:
case nct6792:
+ case nct6793:
return reg == 0x150 || reg == 0x153 || reg == 0x155 ||
((reg & 0xfff0) == 0x4b0 && (reg & 0x000f) < 0x0b) ||
reg == 0x402 ||
@@ -1407,6 +1425,7 @@ static void nct6775_update_pwm_limits(struct device *dev)
case nct6779:
case nct6791:
case nct6792:
+ case nct6793:
reg = nct6775_read_value(data,
data->REG_CRITICAL_PWM_ENABLE[i]);
if (reg & data->CRITICAL_PWM_ENABLE_MASK)
@@ -2822,6 +2841,7 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
case nct6779:
case nct6791:
case nct6792:
+ case nct6793:
nct6775_write_value(data, data->REG_CRITICAL_PWM[nr],
val);
reg = nct6775_read_value(data,
@@ -3256,7 +3276,7 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
pwm4pin = false;
pwm5pin = false;
pwm6pin = false;
- } else { /* NCT6779D, NCT6791D, or NCT6792D */
+ } else { /* NCT6779D, NCT6791D, NCT6792D, or NCT6793D */
regval = superio_inb(sioreg, 0x1c);
fan3pin = !(regval & (1 << 5));
@@ -3269,7 +3289,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
fan4min = fan4pin;
- if (data->kind == nct6791 || data->kind == nct6792) {
+ if (data->kind == nct6791 || data->kind == nct6792 ||
+ data->kind == nct6793) {
regval = superio_inb(sioreg, 0x2d);
fan6pin = (regval & (1 << 1));
pwm6pin = (regval & (1 << 0));
@@ -3528,8 +3549,8 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_PULSES = NCT6776_REG_FAN_PULSES;
data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
data->REG_PWM[0] = NCT6775_REG_PWM;
data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3600,8 +3621,8 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
data->REG_PWM[0] = NCT6775_REG_PWM;
data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3643,6 +3664,7 @@ static int nct6775_probe(struct platform_device *pdev)
break;
case nct6791:
case nct6792:
+ case nct6793:
data->in_num = 15;
data->pwm_num = 6;
data->auto_pwm_num = 4;
@@ -3677,8 +3699,8 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_PULSES = NCT6779_REG_FAN_PULSES;
data->FAN_PULSE_SHIFT = NCT6775_FAN_PULSE_SHIFT;
data->REG_FAN_TIME[0] = NCT6775_REG_FAN_STOP_TIME;
- data->REG_FAN_TIME[1] = NCT6775_REG_FAN_STEP_UP_TIME;
- data->REG_FAN_TIME[2] = NCT6775_REG_FAN_STEP_DOWN_TIME;
+ data->REG_FAN_TIME[1] = NCT6776_REG_FAN_STEP_UP_TIME;
+ data->REG_FAN_TIME[2] = NCT6776_REG_FAN_STEP_DOWN_TIME;
data->REG_TOLERANCE_H = NCT6776_REG_TOLERANCE_H;
data->REG_PWM[0] = NCT6775_REG_PWM;
data->REG_PWM[1] = NCT6775_REG_FAN_START_OUTPUT;
@@ -3918,6 +3940,7 @@ static int nct6775_probe(struct platform_device *pdev)
case nct6779:
case nct6791:
case nct6792:
+ case nct6793:
break;
}
@@ -3950,6 +3973,7 @@ static int nct6775_probe(struct platform_device *pdev)
break;
case nct6791:
case nct6792:
+ case nct6793:
tmp |= 0x7e;
break;
}
@@ -4047,7 +4071,8 @@ static int __maybe_unused nct6775_resume(struct device *dev)
if (reg != data->sio_reg_enable)
superio_outb(sioreg, SIO_REG_ENABLE, data->sio_reg_enable);
- if (data->kind == nct6791 || data->kind == nct6792)
+ if (data->kind == nct6791 || data->kind == nct6792 ||
+ data->kind == nct6793)
nct6791_enable_io_mapping(sioreg);
superio_exit(sioreg);
@@ -4106,15 +4131,6 @@ static struct platform_driver nct6775_driver = {
.probe = nct6775_probe,
};
-static const char * const nct6775_sio_names[] __initconst = {
- "NCT6106D",
- "NCT6775F",
- "NCT6776D/F",
- "NCT6779D",
- "NCT6791D",
- "NCT6792D",
-};
-
/* nct6775_find() looks for a '627 in the Super-I/O config space */
static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
{
@@ -4150,6 +4166,9 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
case SIO_NCT6792_ID:
sio_data->kind = nct6792;
break;
+ case SIO_NCT6793_ID:
+ sio_data->kind = nct6793;
+ break;
default:
if (val != 0xffff)
pr_debug("unsupported chip ID: 0x%04x\n", val);
@@ -4175,7 +4194,8 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
}
- if (sio_data->kind == nct6791 || sio_data->kind == nct6792)
+ if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
+ sio_data->kind == nct6793)
nct6791_enable_io_mapping(sioaddr);
superio_exit(sioaddr);
@@ -4285,7 +4305,7 @@ static void __exit sensors_nct6775_exit(void)
}
MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>");
-MODULE_DESCRIPTION("NCT6106D/NCT6775F/NCT6776F/NCT6779D/NCT6791D/NCT6792D driver");
+MODULE_DESCRIPTION("Driver for NCT6775F and compatible chips");
MODULE_LICENSE("GPL");
module_init(sensors_nct6775_init);
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 2d9a712699ff..3e23003f78b0 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -323,6 +323,7 @@ static const struct of_device_id of_pwm_fan_match[] = {
{ .compatible = "pwm-fan", },
{},
};
+MODULE_DEVICE_TABLE(of, of_pwm_fan_match);
static struct platform_driver pwm_fan_driver = {
.probe = pwm_fan_probe,
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 3a3738fe016b..cd4510a63375 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -620,7 +620,7 @@ static struct cpuidle_state skl_cstates[] = {
.name = "C6-SKL",
.desc = "MWAIT 0x20",
.flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 75,
+ .exit_latency = 85,
.target_residency = 200,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
@@ -636,11 +636,19 @@ static struct cpuidle_state skl_cstates[] = {
.name = "C8-SKL",
.desc = "MWAIT 0x40",
.flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED,
- .exit_latency = 174,
+ .exit_latency = 200,
.target_residency = 800,
.enter = &intel_idle,
.enter_freeze = intel_idle_freeze, },
{
+ .name = "C9-SKL",
+ .desc = "MWAIT 0x50",
+ .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED,
+ .exit_latency = 480,
+ .target_residency = 5000,
+ .enter = &intel_idle,
+ .enter_freeze = intel_idle_freeze, },
+ {
.name = "C10-SKL",
.desc = "MWAIT 0x60",
.flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED,
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index da4c6979fbb8..aa26f3c3416b 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -56,7 +56,6 @@ config INFINIBAND_ADDR_TRANS
source "drivers/infiniband/hw/mthca/Kconfig"
source "drivers/infiniband/hw/qib/Kconfig"
-source "drivers/infiniband/hw/ehca/Kconfig"
source "drivers/infiniband/hw/cxgb3/Kconfig"
source "drivers/infiniband/hw/cxgb4/Kconfig"
source "drivers/infiniband/hw/mlx4/Kconfig"
diff --git a/drivers/infiniband/hw/Makefile b/drivers/infiniband/hw/Makefile
index 1bdb9996d371..aded2a5cc2d5 100644
--- a/drivers/infiniband/hw/Makefile
+++ b/drivers/infiniband/hw/Makefile
@@ -1,6 +1,5 @@
obj-$(CONFIG_INFINIBAND_MTHCA) += mthca/
obj-$(CONFIG_INFINIBAND_QIB) += qib/
-obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
obj-$(CONFIG_INFINIBAND_CXGB3) += cxgb3/
obj-$(CONFIG_INFINIBAND_CXGB4) += cxgb4/
obj-$(CONFIG_MLX4_INFINIBAND) += mlx4/
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 41d6911e244e..f1ccd40beae9 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -245,7 +245,6 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
if (MLX5_CAP_GEN(mdev, apm))
props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
- props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
if (MLX5_CAP_GEN(mdev, xrc))
props->device_cap_flags |= IB_DEVICE_XRC;
props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
@@ -795,53 +794,6 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm
return 0;
}
-static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
-{
- struct mlx5_create_mkey_mbox_in *in;
- struct mlx5_mkey_seg *seg;
- struct mlx5_core_mr mr;
- int err;
-
- in = kzalloc(sizeof(*in), GFP_KERNEL);
- if (!in)
- return -ENOMEM;
-
- seg = &in->seg;
- seg->flags = MLX5_PERM_LOCAL_READ | MLX5_ACCESS_MODE_PA;
- seg->flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
- seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
- seg->start_addr = 0;
-
- err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
- NULL, NULL, NULL);
- if (err) {
- mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
- goto err_in;
- }
-
- kfree(in);
- *key = mr.key;
-
- return 0;
-
-err_in:
- kfree(in);
-
- return err;
-}
-
-static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
-{
- struct mlx5_core_mr mr;
- int err;
-
- memset(&mr, 0, sizeof(mr));
- mr.key = key;
- err = mlx5_core_destroy_mkey(dev->mdev, &mr);
- if (err)
- mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
-}
-
static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context,
struct ib_udata *udata)
@@ -867,13 +819,6 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
kfree(pd);
return ERR_PTR(-EFAULT);
}
- } else {
- err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
- if (err) {
- mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
- kfree(pd);
- return ERR_PTR(err);
- }
}
return &pd->ibpd;
@@ -884,9 +829,6 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
struct mlx5_ib_dev *mdev = to_mdev(pd->device);
struct mlx5_ib_pd *mpd = to_mpd(pd);
- if (!pd->uobject)
- free_pa_mkey(mdev, mpd->pa_lkey);
-
mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
kfree(mpd);
@@ -1245,18 +1187,10 @@ static int create_dev_resources(struct mlx5_ib_resources *devr)
struct ib_srq_init_attr attr;
struct mlx5_ib_dev *dev;
struct ib_cq_init_attr cq_attr = {.cqe = 1};
- u32 rsvd_lkey;
int ret = 0;
dev = container_of(devr, struct mlx5_ib_dev, devr);
- ret = mlx5_core_query_special_context(dev->mdev, &rsvd_lkey);
- if (ret) {
- pr_err("Failed to query special context %d\n", ret);
- return ret;
- }
- dev->ib_dev.local_dma_lkey = rsvd_lkey;
-
devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
if (IS_ERR(devr->p0)) {
ret = PTR_ERR(devr->p0);
@@ -1418,6 +1352,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
dev->ib_dev.owner = THIS_MODULE;
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
+ dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
dev->ib_dev.phys_port_cnt = dev->num_ports;
dev->ib_dev.num_comp_vectors =
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bb8cda79e881..22123b79d550 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -103,7 +103,6 @@ static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibuconte
struct mlx5_ib_pd {
struct ib_pd ibpd;
u32 pdn;
- u32 pa_lkey;
};
/* Use macros here so that don't have to duplicate
@@ -213,7 +212,6 @@ struct mlx5_ib_qp {
int uuarn;
int create_type;
- u32 pa_lkey;
/* Store signature errors */
bool signature_en;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index c745c6c5e10d..6f521a3418e8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -925,8 +925,6 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
if (err)
mlx5_ib_dbg(dev, "err %d\n", err);
- else
- qp->pa_lkey = to_mpd(pd)->pa_lkey;
}
if (err)
@@ -2045,7 +2043,7 @@ static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
dseg->addr = cpu_to_be64(mfrpl->map);
dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
- dseg->lkey = cpu_to_be32(pd->pa_lkey);
+ dseg->lkey = cpu_to_be32(pd->ibpd.local_dma_lkey);
}
static __be32 send_ieth(struct ib_send_wr *wr)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index ca2873698d75..4cd5428a2399 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -80,7 +80,7 @@ enum {
IPOIB_NUM_WC = 4,
IPOIB_MAX_PATH_REC_QUEUE = 3,
- IPOIB_MAX_MCAST_QUEUE = 3,
+ IPOIB_MAX_MCAST_QUEUE = 64,
IPOIB_FLAG_OPER_UP = 0,
IPOIB_FLAG_INITIALIZED = 1,
@@ -548,6 +548,8 @@ void ipoib_path_iter_read(struct ipoib_path_iter *iter,
int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
union ib_gid *mgid, int set_qkey);
+int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast);
+struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid);
int ipoib_init_qp(struct net_device *dev);
int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 36536ce5a3e2..f74316e679d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1149,6 +1149,9 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
unsigned long dt;
unsigned long flags;
int i;
+ LIST_HEAD(remove_list);
+ struct ipoib_mcast *mcast, *tmcast;
+ struct net_device *dev = priv->dev;
if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags))
return;
@@ -1176,6 +1179,19 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
lockdep_is_held(&priv->lock))) != NULL) {
/* was the neigh idle for two GC periods */
if (time_after(neigh_obsolete, neigh->alive)) {
+ u8 *mgid = neigh->daddr + 4;
+
+ /* Is this multicast ? */
+ if (*mgid == 0xff) {
+ mcast = __ipoib_mcast_find(dev, mgid);
+
+ if (mcast && test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
+ list_del(&mcast->list);
+ rb_erase(&mcast->rb_node, &priv->multicast_tree);
+ list_add_tail(&mcast->list, &remove_list);
+ }
+ }
+
rcu_assign_pointer(*np,
rcu_dereference_protected(neigh->hnext,
lockdep_is_held(&priv->lock)));
@@ -1191,6 +1207,8 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv)
out_unlock:
spin_unlock_irqrestore(&priv->lock, flags);
+ list_for_each_entry_safe(mcast, tmcast, &remove_list, list)
+ ipoib_mcast_leave(dev, mcast);
}
static void ipoib_reap_neigh(struct work_struct *work)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 09a1748f9d13..136cbefe00f8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -153,7 +153,7 @@ static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
return mcast;
}
-static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
+struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, void *mgid)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct rb_node *n = priv->multicast_tree.rb_node;
@@ -508,17 +508,19 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
rec.hop_limit = priv->broadcast->mcmember.hop_limit;
/*
- * Historically Linux IPoIB has never properly supported SEND
- * ONLY join. It emulated it by not providing all the required
- * attributes, which is enough to prevent group creation and
- * detect if there are full members or not. A major problem
- * with supporting SEND ONLY is detecting when the group is
- * auto-destroyed as IPoIB will cache the MLID..
+ * Send-only IB Multicast joins do not work at the core
+ * IB layer yet, so we can't use them here. However,
+ * we are emulating an Ethernet multicast send, which
+ * does not require a multicast subscription and will
+ * still send properly. The most appropriate thing to
+ * do is to create the group if it doesn't exist as that
+ * most closely emulates the behavior, from a user space
+ * application perspecitive, of Ethernet multicast
+ * operation. For now, we do a full join, maybe later
+ * when the core IB layers support send only joins we
+ * will use them.
*/
-#if 1
- if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
- comp_mask &= ~IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
-#else
+#if 0
if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
rec.join_state = 4;
#endif
@@ -675,7 +677,7 @@ int ipoib_mcast_stop_thread(struct net_device *dev)
return 0;
}
-static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
+int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret = 0;
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 1ace5d83a4d7..f58ff96b6cbb 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -97,6 +97,11 @@ unsigned int iser_max_sectors = ISER_DEF_MAX_SECTORS;
module_param_named(max_sectors, iser_max_sectors, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_sectors, "Max number of sectors in a single scsi command (default:1024");
+bool iser_always_reg = true;
+module_param_named(always_register, iser_always_reg, bool, S_IRUGO);
+MODULE_PARM_DESC(always_register,
+ "Always register memory, even for continuous memory regions (default:true)");
+
bool iser_pi_enable = false;
module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO);
MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)");
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 86f6583485ef..a5edd6ede692 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -611,6 +611,7 @@ extern int iser_debug_level;
extern bool iser_pi_enable;
extern int iser_pi_guard;
extern unsigned int iser_max_sectors;
+extern bool iser_always_reg;
int iser_assign_reg_ops(struct iser_device *device);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 2493cc748db8..4c46d67d37a1 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -803,11 +803,12 @@ static int
iser_reg_prot_sg(struct iscsi_iser_task *task,
struct iser_data_buf *mem,
struct iser_fr_desc *desc,
+ bool use_dma_key,
struct iser_mem_reg *reg)
{
struct iser_device *device = task->iser_conn->ib_conn.device;
- if (mem->dma_nents == 1)
+ if (use_dma_key)
return iser_reg_dma(device, mem, reg);
return device->reg_ops->reg_mem(task, mem, &desc->pi_ctx->rsc, reg);
@@ -817,11 +818,12 @@ static int
iser_reg_data_sg(struct iscsi_iser_task *task,
struct iser_data_buf *mem,
struct iser_fr_desc *desc,
+ bool use_dma_key,
struct iser_mem_reg *reg)
{
struct iser_device *device = task->iser_conn->ib_conn.device;
- if (mem->dma_nents == 1)
+ if (use_dma_key)
return iser_reg_dma(device, mem, reg);
return device->reg_ops->reg_mem(task, mem, &desc->rsc, reg);
@@ -836,14 +838,17 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
struct iser_mem_reg *reg = &task->rdma_reg[dir];
struct iser_mem_reg *data_reg;
struct iser_fr_desc *desc = NULL;
+ bool use_dma_key;
int err;
err = iser_handle_unaligned_buf(task, mem, dir);
if (unlikely(err))
return err;
- if (mem->dma_nents != 1 ||
- scsi_get_prot_op(task->sc) != SCSI_PROT_NORMAL) {
+ use_dma_key = (mem->dma_nents == 1 && !iser_always_reg &&
+ scsi_get_prot_op(task->sc) == SCSI_PROT_NORMAL);
+
+ if (!use_dma_key) {
desc = device->reg_ops->reg_desc_get(ib_conn);
reg->mem_h = desc;
}
@@ -853,7 +858,7 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
else
data_reg = &task->desc.data_reg;
- err = iser_reg_data_sg(task, mem, desc, data_reg);
+ err = iser_reg_data_sg(task, mem, desc, use_dma_key, data_reg);
if (unlikely(err))
goto err_reg;
@@ -866,7 +871,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *task,
if (unlikely(err))
goto err_reg;
- err = iser_reg_prot_sg(task, mem, desc, prot_reg);
+ err = iser_reg_prot_sg(task, mem, desc,
+ use_dma_key, prot_reg);
if (unlikely(err))
goto err_reg;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index ae70cc1463ac..85132d867bc8 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -133,11 +133,15 @@ static int iser_create_device_ib_res(struct iser_device *device)
(unsigned long)comp);
}
- device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
- IB_ACCESS_REMOTE_WRITE |
- IB_ACCESS_REMOTE_READ);
- if (IS_ERR(device->mr))
- goto dma_mr_err;
+ if (!iser_always_reg) {
+ int access = IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE |
+ IB_ACCESS_REMOTE_READ;
+
+ device->mr = ib_get_dma_mr(device->pd, access);
+ if (IS_ERR(device->mr))
+ goto dma_mr_err;
+ }
INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
iser_event_handler);
@@ -147,7 +151,8 @@ static int iser_create_device_ib_res(struct iser_device *device)
return 0;
handler_err:
- ib_dereg_mr(device->mr);
+ if (device->mr)
+ ib_dereg_mr(device->mr);
dma_mr_err:
for (i = 0; i < device->comps_used; i++)
tasklet_kill(&device->comps[i].tasklet);
@@ -173,7 +178,6 @@ comps_err:
static void iser_free_device_ib_res(struct iser_device *device)
{
int i;
- BUG_ON(device->mr == NULL);
for (i = 0; i < device->comps_used; i++) {
struct iser_comp *comp = &device->comps[i];
@@ -184,7 +188,8 @@ static void iser_free_device_ib_res(struct iser_device *device)
}
(void)ib_unregister_event_handler(&device->event_handler);
- (void)ib_dereg_mr(device->mr);
+ if (device->mr)
+ (void)ib_dereg_mr(device->mr);
ib_dealloc_pd(device->pd);
kfree(device->comps);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 403bd29443b8..aa59037d7504 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -238,8 +238,6 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
rx_sg->lkey = device->pd->local_dma_lkey;
}
- isert_conn->rx_desc_head = 0;
-
return 0;
dma_map_fail:
@@ -634,7 +632,7 @@ static void
isert_init_conn(struct isert_conn *isert_conn)
{
isert_conn->state = ISER_CONN_INIT;
- INIT_LIST_HEAD(&isert_conn->accept_node);
+ INIT_LIST_HEAD(&isert_conn->node);
init_completion(&isert_conn->login_comp);
init_completion(&isert_conn->login_req_comp);
init_completion(&isert_conn->wait);
@@ -762,28 +760,15 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
ret = isert_rdma_post_recvl(isert_conn);
if (ret)
goto out_conn_dev;
- /*
- * Obtain the second reference now before isert_rdma_accept() to
- * ensure that any initiator generated REJECT CM event that occurs
- * asynchronously won't drop the last reference until the error path
- * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
- * isert_free_conn() -> isert_put_conn() -> kref_put().
- */
- if (!kref_get_unless_zero(&isert_conn->kref)) {
- isert_warn("conn %p connect_release is running\n", isert_conn);
- goto out_conn_dev;
- }
ret = isert_rdma_accept(isert_conn);
if (ret)
goto out_conn_dev;
- mutex_lock(&isert_np->np_accept_mutex);
- list_add_tail(&isert_conn->accept_node, &isert_np->np_accept_list);
- mutex_unlock(&isert_np->np_accept_mutex);
+ mutex_lock(&isert_np->mutex);
+ list_add_tail(&isert_conn->node, &isert_np->accepted);
+ mutex_unlock(&isert_np->mutex);
- isert_info("np %p: Allow accept_np to continue\n", np);
- up(&isert_np->np_sem);
return 0;
out_conn_dev:
@@ -831,13 +816,21 @@ static void
isert_connected_handler(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ struct isert_np *isert_np = cma_id->context;
isert_info("conn %p\n", isert_conn);
mutex_lock(&isert_conn->mutex);
- if (isert_conn->state != ISER_CONN_FULL_FEATURE)
- isert_conn->state = ISER_CONN_UP;
+ isert_conn->state = ISER_CONN_UP;
+ kref_get(&isert_conn->kref);
mutex_unlock(&isert_conn->mutex);
+
+ mutex_lock(&isert_np->mutex);
+ list_move_tail(&isert_conn->node, &isert_np->pending);
+ mutex_unlock(&isert_np->mutex);
+
+ isert_info("np %p: Allow accept_np to continue\n", isert_np);
+ up(&isert_np->sem);
}
static void
@@ -903,14 +896,14 @@ isert_np_cma_handler(struct isert_np *isert_np,
switch (event) {
case RDMA_CM_EVENT_DEVICE_REMOVAL:
- isert_np->np_cm_id = NULL;
+ isert_np->cm_id = NULL;
break;
case RDMA_CM_EVENT_ADDR_CHANGE:
- isert_np->np_cm_id = isert_setup_id(isert_np);
- if (IS_ERR(isert_np->np_cm_id)) {
+ isert_np->cm_id = isert_setup_id(isert_np);
+ if (IS_ERR(isert_np->cm_id)) {
isert_err("isert np %p setup id failed: %ld\n",
- isert_np, PTR_ERR(isert_np->np_cm_id));
- isert_np->np_cm_id = NULL;
+ isert_np, PTR_ERR(isert_np->cm_id));
+ isert_np->cm_id = NULL;
}
break;
default:
@@ -929,7 +922,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
struct isert_conn *isert_conn;
bool terminating = false;
- if (isert_np->np_cm_id == cma_id)
+ if (isert_np->cm_id == cma_id)
return isert_np_cma_handler(cma_id->context, event);
isert_conn = cma_id->qp->qp_context;
@@ -945,13 +938,13 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
if (terminating)
goto out;
- mutex_lock(&isert_np->np_accept_mutex);
- if (!list_empty(&isert_conn->accept_node)) {
- list_del_init(&isert_conn->accept_node);
+ mutex_lock(&isert_np->mutex);
+ if (!list_empty(&isert_conn->node)) {
+ list_del_init(&isert_conn->node);
isert_put_conn(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);
}
- mutex_unlock(&isert_np->np_accept_mutex);
+ mutex_unlock(&isert_np->mutex);
out:
return 0;
@@ -962,6 +955,7 @@ isert_connect_error(struct rdma_cm_id *cma_id)
{
struct isert_conn *isert_conn = cma_id->qp->qp_context;
+ list_del_init(&isert_conn->node);
isert_conn->cm_id = NULL;
isert_put_conn(isert_conn);
@@ -1006,35 +1000,51 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
}
static int
-isert_post_recv(struct isert_conn *isert_conn, u32 count)
+isert_post_recvm(struct isert_conn *isert_conn, u32 count)
{
struct ib_recv_wr *rx_wr, *rx_wr_failed;
int i, ret;
- unsigned int rx_head = isert_conn->rx_desc_head;
struct iser_rx_desc *rx_desc;
for (rx_wr = isert_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
- rx_desc = &isert_conn->rx_descs[rx_head];
- rx_wr->wr_id = (uintptr_t)rx_desc;
- rx_wr->sg_list = &rx_desc->rx_sg;
- rx_wr->num_sge = 1;
- rx_wr->next = rx_wr + 1;
- rx_head = (rx_head + 1) & (ISERT_QP_MAX_RECV_DTOS - 1);
+ rx_desc = &isert_conn->rx_descs[i];
+ rx_wr->wr_id = (uintptr_t)rx_desc;
+ rx_wr->sg_list = &rx_desc->rx_sg;
+ rx_wr->num_sge = 1;
+ rx_wr->next = rx_wr + 1;
}
-
rx_wr--;
rx_wr->next = NULL; /* mark end of work requests list */
isert_conn->post_recv_buf_count += count;
ret = ib_post_recv(isert_conn->qp, isert_conn->rx_wr,
- &rx_wr_failed);
+ &rx_wr_failed);
if (ret) {
isert_err("ib_post_recv() failed with ret: %d\n", ret);
isert_conn->post_recv_buf_count -= count;
- } else {
- isert_dbg("Posted %d RX buffers\n", count);
- isert_conn->rx_desc_head = rx_head;
}
+
+ return ret;
+}
+
+static int
+isert_post_recv(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc)
+{
+ struct ib_recv_wr *rx_wr_failed, rx_wr;
+ int ret;
+
+ rx_wr.wr_id = (uintptr_t)rx_desc;
+ rx_wr.sg_list = &rx_desc->rx_sg;
+ rx_wr.num_sge = 1;
+ rx_wr.next = NULL;
+
+ isert_conn->post_recv_buf_count++;
+ ret = ib_post_recv(isert_conn->qp, &rx_wr, &rx_wr_failed);
+ if (ret) {
+ isert_err("ib_post_recv() failed with ret: %d\n", ret);
+ isert_conn->post_recv_buf_count--;
+ }
+
return ret;
}
@@ -1205,7 +1215,8 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
if (ret)
return ret;
- ret = isert_post_recv(isert_conn, ISERT_MIN_POSTED_RX);
+ ret = isert_post_recvm(isert_conn,
+ ISERT_QP_MAX_RECV_DTOS);
if (ret)
return ret;
@@ -1278,7 +1289,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
}
static struct iscsi_cmd
-*isert_allocate_cmd(struct iscsi_conn *conn)
+*isert_allocate_cmd(struct iscsi_conn *conn, struct iser_rx_desc *rx_desc)
{
struct isert_conn *isert_conn = conn->context;
struct isert_cmd *isert_cmd;
@@ -1292,6 +1303,7 @@ static struct iscsi_cmd
isert_cmd = iscsit_priv_cmd(cmd);
isert_cmd->conn = isert_conn;
isert_cmd->iscsi_cmd = cmd;
+ isert_cmd->rx_desc = rx_desc;
return cmd;
}
@@ -1303,9 +1315,9 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
{
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)buf;
- struct scatterlist *sg;
int imm_data, imm_data_len, unsol_data, sg_nents, rc;
bool dump_payload = false;
+ unsigned int data_len;
rc = iscsit_setup_scsi_cmd(conn, cmd, buf);
if (rc < 0)
@@ -1314,7 +1326,10 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
imm_data = cmd->immediate_data;
imm_data_len = cmd->first_burst_len;
unsol_data = cmd->unsolicited_data;
+ data_len = cmd->se_cmd.data_length;
+ if (imm_data && imm_data_len == data_len)
+ cmd->se_cmd.se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
rc = iscsit_process_scsi_cmd(conn, cmd, hdr);
if (rc < 0) {
return 0;
@@ -1326,13 +1341,20 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
if (!imm_data)
return 0;
- sg = &cmd->se_cmd.t_data_sg[0];
- sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
-
- isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
- sg, sg_nents, &rx_desc->data[0], imm_data_len);
-
- sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
+ if (imm_data_len != data_len) {
+ sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
+ sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
+ &rx_desc->data[0], imm_data_len);
+ isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
+ sg_nents, imm_data_len);
+ } else {
+ sg_init_table(&isert_cmd->sg, 1);
+ cmd->se_cmd.t_data_sg = &isert_cmd->sg;
+ cmd->se_cmd.t_data_nents = 1;
+ sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
+ isert_dbg("Transfer Immediate imm_data_len: %d\n",
+ imm_data_len);
+ }
cmd->write_data_done += imm_data_len;
@@ -1407,6 +1429,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
if (rc < 0)
return rc;
+ /*
+ * multiple data-outs on the same command can arrive -
+ * so post the buffer before hand
+ */
+ rc = isert_post_recv(isert_conn, rx_desc);
+ if (rc) {
+ isert_err("ib_post_recv failed with %d\n", rc);
+ return rc;
+ }
return 0;
}
@@ -1479,7 +1510,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
switch (opcode) {
case ISCSI_OP_SCSI_CMD:
- cmd = isert_allocate_cmd(conn);
+ cmd = isert_allocate_cmd(conn, rx_desc);
if (!cmd)
break;
@@ -1493,7 +1524,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
rx_desc, (unsigned char *)hdr);
break;
case ISCSI_OP_NOOP_OUT:
- cmd = isert_allocate_cmd(conn);
+ cmd = isert_allocate_cmd(conn, rx_desc);
if (!cmd)
break;
@@ -1506,7 +1537,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
(unsigned char *)hdr);
break;
case ISCSI_OP_SCSI_TMFUNC:
- cmd = isert_allocate_cmd(conn);
+ cmd = isert_allocate_cmd(conn, rx_desc);
if (!cmd)
break;
@@ -1514,22 +1545,20 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
(unsigned char *)hdr);
break;
case ISCSI_OP_LOGOUT:
- cmd = isert_allocate_cmd(conn);
+ cmd = isert_allocate_cmd(conn, rx_desc);
if (!cmd)
break;
ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
break;
case ISCSI_OP_TEXT:
- if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
+ if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF)
cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
- if (!cmd)
- break;
- } else {
- cmd = isert_allocate_cmd(conn);
- if (!cmd)
- break;
- }
+ else
+ cmd = isert_allocate_cmd(conn, rx_desc);
+
+ if (!cmd)
+ break;
isert_cmd = iscsit_priv_cmd(cmd);
ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
@@ -1589,7 +1618,7 @@ isert_rcv_completion(struct iser_rx_desc *desc,
struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iscsi_hdr *hdr;
u64 rx_dma;
- int rx_buflen, outstanding;
+ int rx_buflen;
if ((char *)desc == isert_conn->login_req_buf) {
rx_dma = isert_conn->login_req_dma;
@@ -1629,22 +1658,6 @@ isert_rcv_completion(struct iser_rx_desc *desc,
DMA_FROM_DEVICE);
isert_conn->post_recv_buf_count--;
- isert_dbg("Decremented post_recv_buf_count: %d\n",
- isert_conn->post_recv_buf_count);
-
- if ((char *)desc == isert_conn->login_req_buf)
- return;
-
- outstanding = isert_conn->post_recv_buf_count;
- if (outstanding + ISERT_MIN_POSTED_RX <= ISERT_QP_MAX_RECV_DTOS) {
- int err, count = min(ISERT_QP_MAX_RECV_DTOS - outstanding,
- ISERT_MIN_POSTED_RX);
- err = isert_post_recv(isert_conn, count);
- if (err) {
- isert_err("isert_post_recv() count: %d failed, %d\n",
- count, err);
- }
- }
}
static int
@@ -2156,6 +2169,12 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
struct ib_send_wr *wr_failed;
int ret;
+ ret = isert_post_recv(isert_conn, isert_cmd->rx_desc);
+ if (ret) {
+ isert_err("ib_post_recv failed with %d\n", ret);
+ return ret;
+ }
+
ret = ib_post_send(isert_conn->qp, &isert_cmd->tx_desc.send_wr,
&wr_failed);
if (ret) {
@@ -2950,6 +2969,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
&isert_cmd->tx_desc.send_wr);
isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
wr->send_wr_num += 1;
+
+ rc = isert_post_recv(isert_conn, isert_cmd->rx_desc);
+ if (rc) {
+ isert_err("ib_post_recv failed with %d\n", rc);
+ return rc;
+ }
}
rc = ib_post_send(isert_conn->qp, wr->send_wr, &wr_failed);
@@ -2999,9 +3024,16 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
static int
isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
{
- int ret;
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+ int ret = 0;
switch (state) {
+ case ISTATE_REMOVE:
+ spin_lock_bh(&conn->cmd_lock);
+ list_del_init(&cmd->i_conn_node);
+ spin_unlock_bh(&conn->cmd_lock);
+ isert_put_cmd(isert_cmd, true);
+ break;
case ISTATE_SEND_NOPIN_WANT_RESPONSE:
ret = isert_put_nopin(cmd, conn, false);
break;
@@ -3106,10 +3138,10 @@ isert_setup_np(struct iscsi_np *np,
isert_err("Unable to allocate struct isert_np\n");
return -ENOMEM;
}
- sema_init(&isert_np->np_sem, 0);
- mutex_init(&isert_np->np_accept_mutex);
- INIT_LIST_HEAD(&isert_np->np_accept_list);
- init_completion(&isert_np->np_login_comp);
+ sema_init(&isert_np->sem, 0);
+ mutex_init(&isert_np->mutex);
+ INIT_LIST_HEAD(&isert_np->accepted);
+ INIT_LIST_HEAD(&isert_np->pending);
isert_np->np = np;
/*
@@ -3125,7 +3157,7 @@ isert_setup_np(struct iscsi_np *np,
goto out;
}
- isert_np->np_cm_id = isert_lid;
+ isert_np->cm_id = isert_lid;
np->np_context = isert_np;
return 0;
@@ -3214,7 +3246,7 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
int ret;
accept_wait:
- ret = down_interruptible(&isert_np->np_sem);
+ ret = down_interruptible(&isert_np->sem);
if (ret)
return -ENODEV;
@@ -3231,15 +3263,15 @@ accept_wait:
}
spin_unlock_bh(&np->np_thread_lock);
- mutex_lock(&isert_np->np_accept_mutex);
- if (list_empty(&isert_np->np_accept_list)) {
- mutex_unlock(&isert_np->np_accept_mutex);
+ mutex_lock(&isert_np->mutex);
+ if (list_empty(&isert_np->pending)) {
+ mutex_unlock(&isert_np->mutex);
goto accept_wait;
}
- isert_conn = list_first_entry(&isert_np->np_accept_list,
- struct isert_conn, accept_node);
- list_del_init(&isert_conn->accept_node);
- mutex_unlock(&isert_np->np_accept_mutex);
+ isert_conn = list_first_entry(&isert_np->pending,
+ struct isert_conn, node);
+ list_del_init(&isert_conn->node);
+ mutex_unlock(&isert_np->mutex);
conn->context = isert_conn;
isert_conn->conn = conn;
@@ -3257,28 +3289,39 @@ isert_free_np(struct iscsi_np *np)
struct isert_np *isert_np = np->np_context;
struct isert_conn *isert_conn, *n;
- if (isert_np->np_cm_id)
- rdma_destroy_id(isert_np->np_cm_id);
+ if (isert_np->cm_id)
+ rdma_destroy_id(isert_np->cm_id);
/*
* FIXME: At this point we don't have a good way to insure
* that at this point we don't have hanging connections that
* completed RDMA establishment but didn't start iscsi login
* process. So work-around this by cleaning up what ever piled
- * up in np_accept_list.
+ * up in accepted and pending lists.
*/
- mutex_lock(&isert_np->np_accept_mutex);
- if (!list_empty(&isert_np->np_accept_list)) {
- isert_info("Still have isert connections, cleaning up...\n");
+ mutex_lock(&isert_np->mutex);
+ if (!list_empty(&isert_np->pending)) {
+ isert_info("Still have isert pending connections\n");
+ list_for_each_entry_safe(isert_conn, n,
+ &isert_np->pending,
+ node) {
+ isert_info("cleaning isert_conn %p state (%d)\n",
+ isert_conn, isert_conn->state);
+ isert_connect_release(isert_conn);
+ }
+ }
+
+ if (!list_empty(&isert_np->accepted)) {
+ isert_info("Still have isert accepted connections\n");
list_for_each_entry_safe(isert_conn, n,
- &isert_np->np_accept_list,
- accept_node) {
+ &isert_np->accepted,
+ node) {
isert_info("cleaning isert_conn %p state (%d)\n",
isert_conn, isert_conn->state);
isert_connect_release(isert_conn);
}
}
- mutex_unlock(&isert_np->np_accept_mutex);
+ mutex_unlock(&isert_np->mutex);
np->np_context = NULL;
kfree(isert_np);
@@ -3345,6 +3388,41 @@ isert_wait4flush(struct isert_conn *isert_conn)
wait_for_completion(&isert_conn->wait_comp_err);
}
+/**
+ * isert_put_unsol_pending_cmds() - Drop commands waiting for
+ * unsolicitate dataout
+ * @conn: iscsi connection
+ *
+ * We might still have commands that are waiting for unsolicited
+ * dataouts messages. We must put the extra reference on those
+ * before blocking on the target_wait_for_session_cmds
+ */
+static void
+isert_put_unsol_pending_cmds(struct iscsi_conn *conn)
+{
+ struct iscsi_cmd *cmd, *tmp;
+ static LIST_HEAD(drop_cmd_list);
+
+ spin_lock_bh(&conn->cmd_lock);
+ list_for_each_entry_safe(cmd, tmp, &conn->conn_cmd_list, i_conn_node) {
+ if ((cmd->cmd_flags & ICF_NON_IMMEDIATE_UNSOLICITED_DATA) &&
+ (cmd->write_data_done < conn->sess->sess_ops->FirstBurstLength) &&
+ (cmd->write_data_done < cmd->se_cmd.data_length))
+ list_move_tail(&cmd->i_conn_node, &drop_cmd_list);
+ }
+ spin_unlock_bh(&conn->cmd_lock);
+
+ list_for_each_entry_safe(cmd, tmp, &drop_cmd_list, i_conn_node) {
+ list_del_init(&cmd->i_conn_node);
+ if (cmd->i_state != ISTATE_REMOVE) {
+ struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
+
+ isert_info("conn %p dropping cmd %p\n", conn, cmd);
+ isert_put_cmd(isert_cmd, true);
+ }
+ }
+}
+
static void isert_wait_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
@@ -3363,8 +3441,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
isert_conn_terminate(isert_conn);
mutex_unlock(&isert_conn->mutex);
- isert_wait4cmds(conn);
isert_wait4flush(isert_conn);
+ isert_put_unsol_pending_cmds(conn);
+ isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 6a04ba3c0f72..c5b99bcecbcf 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -113,7 +113,6 @@ enum {
};
struct isert_rdma_wr {
- struct list_head wr_list;
struct isert_cmd *isert_cmd;
enum iser_ib_op_code iser_ib_op;
struct ib_sge *ib_sge;
@@ -134,14 +133,13 @@ struct isert_cmd {
uint64_t write_va;
u64 pdu_buf_dma;
u32 pdu_buf_len;
- u32 read_va_off;
- u32 write_va_off;
- u32 rdma_wr_num;
struct isert_conn *conn;
struct iscsi_cmd *iscsi_cmd;
struct iser_tx_desc tx_desc;
+ struct iser_rx_desc *rx_desc;
struct isert_rdma_wr rdma_wr;
struct work_struct comp_work;
+ struct scatterlist sg;
};
struct isert_device;
@@ -159,11 +157,10 @@ struct isert_conn {
u64 login_req_dma;
int login_req_len;
u64 login_rsp_dma;
- unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs;
- struct ib_recv_wr rx_wr[ISERT_MIN_POSTED_RX];
+ struct ib_recv_wr rx_wr[ISERT_QP_MAX_RECV_DTOS];
struct iscsi_conn *conn;
- struct list_head accept_node;
+ struct list_head node;
struct completion login_comp;
struct completion login_req_comp;
struct iser_tx_desc login_tx_desc;
@@ -222,9 +219,9 @@ struct isert_device {
struct isert_np {
struct iscsi_np *np;
- struct semaphore np_sem;
- struct rdma_cm_id *np_cm_id;
- struct mutex np_accept_mutex;
- struct list_head np_accept_list;
- struct completion np_login_comp;
+ struct semaphore sem;
+ struct rdma_cm_id *cm_id;
+ struct mutex mutex;
+ struct list_head accepted;
+ struct list_head pending;
};
diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig
index 56eb471b5576..4215b5382092 100644
--- a/drivers/input/joystick/Kconfig
+++ b/drivers/input/joystick/Kconfig
@@ -196,6 +196,7 @@ config JOYSTICK_TWIDJOY
config JOYSTICK_ZHENHUA
tristate "5-byte Zhenhua RC transmitter"
select SERIO
+ select BITREVERSE
help
Say Y here if you have a Zhen Hua PPM-4CH transmitter which is
supplied with a ready to fly micro electric indoor helicopters
diff --git a/drivers/input/joystick/walkera0701.c b/drivers/input/joystick/walkera0701.c
index b76ac580703c..a8bc2fe170dd 100644
--- a/drivers/input/joystick/walkera0701.c
+++ b/drivers/input/joystick/walkera0701.c
@@ -150,7 +150,7 @@ static void walkera0701_irq_handler(void *handler_data)
if (w->counter == 24) { /* full frame */
walkera0701_parse_frame(w);
w->counter = NO_SYNC;
- if (abs(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */
+ if (abs64(pulse_time - SYNC_PULSE) < RESERVE) /* new frame sync */
w->counter = 0;
} else {
if ((pulse_time > (ANALOG_MIN_PULSE - RESERVE)
@@ -161,7 +161,7 @@ static void walkera0701_irq_handler(void *handler_data)
} else
w->counter = NO_SYNC;
}
- } else if (abs(pulse_time - SYNC_PULSE - BIN0_PULSE) <
+ } else if (abs64(pulse_time - SYNC_PULSE - BIN0_PULSE) <
RESERVE + BIN1_PULSE - BIN0_PULSE) /* frame sync .. */
w->counter = 0;
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index b052afec9a11..6639b2b8528a 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -266,7 +266,7 @@ static int omap4_keypad_probe(struct platform_device *pdev)
error = omap4_keypad_parse_dt(&pdev->dev, keypad_data);
if (error)
- return error;
+ goto err_free_keypad;
res = request_mem_region(res->start, resource_size(res), pdev->name);
if (!res) {
diff --git a/drivers/input/misc/pm8941-pwrkey.c b/drivers/input/misc/pm8941-pwrkey.c
index 867db8a91372..e317b75357a0 100644
--- a/drivers/input/misc/pm8941-pwrkey.c
+++ b/drivers/input/misc/pm8941-pwrkey.c
@@ -93,7 +93,7 @@ static int pm8941_reboot_notify(struct notifier_block *nb,
default:
reset_type = PON_PS_HOLD_TYPE_HARD_RESET;
break;
- };
+ }
error = regmap_update_bits(pwrkey->regmap,
pwrkey->baseaddr + PON_PS_HOLD_RST_CTL,
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index 345df9b03aed..5adbcedcb81c 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -414,7 +414,7 @@ static int uinput_setup_device(struct uinput_device *udev,
dev->id.product = user_dev->id.product;
dev->id.version = user_dev->id.version;
- for_each_set_bit(i, dev->absbit, ABS_CNT) {
+ for (i = 0; i < ABS_CNT; i++) {
input_abs_set_max(dev, i, user_dev->absmax[i]);
input_abs_set_min(dev, i, user_dev->absmin[i]);
input_abs_set_fuzz(dev, i, user_dev->absfuzz[i]);
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index 73670f2aebfd..c0ec26118732 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -60,7 +60,7 @@ struct elan_transport_ops {
int (*get_sm_version)(struct i2c_client *client,
u8* ic_type, u8 *version);
int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum);
- int (*get_product_id)(struct i2c_client *client, u8 *id);
+ int (*get_product_id)(struct i2c_client *client, u16 *id);
int (*get_max)(struct i2c_client *client,
unsigned int *max_x, unsigned int *max_y);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index fa945304b9a5..5e1665bbaa0b 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -40,7 +40,7 @@
#include "elan_i2c.h"
#define DRIVER_NAME "elan_i2c"
-#define ELAN_DRIVER_VERSION "1.6.0"
+#define ELAN_DRIVER_VERSION "1.6.1"
#define ETP_MAX_PRESSURE 255
#define ETP_FWIDTH_REDUCE 90
#define ETP_FINGER_WIDTH 15
@@ -76,7 +76,7 @@ struct elan_tp_data {
unsigned int x_res;
unsigned int y_res;
- u8 product_id;
+ u16 product_id;
u8 fw_version;
u8 sm_version;
u8 iap_version;
@@ -98,15 +98,25 @@ static int elan_get_fwinfo(u8 iap_version, u16 *validpage_count,
u16 *signature_address)
{
switch (iap_version) {
+ case 0x00:
+ case 0x06:
case 0x08:
*validpage_count = 512;
break;
+ case 0x03:
+ case 0x07:
case 0x09:
+ case 0x0A:
+ case 0x0B:
+ case 0x0C:
*validpage_count = 768;
break;
case 0x0D:
*validpage_count = 896;
break;
+ case 0x0E:
+ *validpage_count = 640;
+ break;
default:
/* unknown ic type clear value */
*validpage_count = 0;
@@ -266,11 +276,10 @@ static int elan_query_device_info(struct elan_tp_data *data)
error = elan_get_fwinfo(data->iap_version, &data->fw_validpage_count,
&data->fw_signature_address);
- if (error) {
- dev_err(&data->client->dev,
- "unknown iap version %d\n", data->iap_version);
- return error;
- }
+ if (error)
+ dev_warn(&data->client->dev,
+ "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n",
+ data->iap_version, data->ic_type);
return 0;
}
@@ -486,6 +495,9 @@ static ssize_t elan_sysfs_update_fw(struct device *dev,
const u8 *fw_signature;
static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF};
+ if (data->fw_validpage_count == 0)
+ return -EINVAL;
+
/* Look for a firmware with the product id appended. */
fw_name = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id);
if (!fw_name) {
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c
index 683c840c9dd7..a679e56c44cd 100644
--- a/drivers/input/mouse/elan_i2c_i2c.c
+++ b/drivers/input/mouse/elan_i2c_i2c.c
@@ -276,7 +276,7 @@ static int elan_i2c_get_sm_version(struct i2c_client *client,
return 0;
}
-static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
+static int elan_i2c_get_product_id(struct i2c_client *client, u16 *id)
{
int error;
u8 val[3];
@@ -287,7 +287,7 @@ static int elan_i2c_get_product_id(struct i2c_client *client, u8 *id)
return error;
}
- *id = val[0];
+ *id = le16_to_cpup((__le16 *)val);
return 0;
}
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index ff36a366b2aa..cb6aecbc1dc2 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -183,7 +183,7 @@ static int elan_smbus_get_sm_version(struct i2c_client *client,
return 0;
}
-static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
+static int elan_smbus_get_product_id(struct i2c_client *client, u16 *id)
{
int error;
u8 val[3];
@@ -195,7 +195,7 @@ static int elan_smbus_get_product_id(struct i2c_client *client, u8 *id)
return error;
}
- *id = val[1];
+ *id = be16_to_cpup((__be16 *)val);
return 0;
}
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 994ae7886156..6025eb430c0a 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -519,18 +519,14 @@ static int synaptics_set_mode(struct psmouse *psmouse)
struct synaptics_data *priv = psmouse->private;
priv->mode = 0;
-
- if (priv->absolute_mode) {
+ if (priv->absolute_mode)
priv->mode |= SYN_BIT_ABSOLUTE_MODE;
- if (SYN_CAP_EXTENDED(priv->capabilities))
- priv->mode |= SYN_BIT_W_MODE;
- }
-
- if (!SYN_MODE_WMODE(priv->mode) && priv->disable_gesture)
+ if (priv->disable_gesture)
priv->mode |= SYN_BIT_DISABLE_GESTURE;
-
if (psmouse->rate >= 80)
priv->mode |= SYN_BIT_HIGH_RATE;
+ if (SYN_CAP_EXTENDED(priv->capabilities))
+ priv->mode |= SYN_BIT_W_MODE;
if (synaptics_mode_cmd(psmouse, priv->mode))
return -1;
diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c
index 75516996db20..316f2c897101 100644
--- a/drivers/input/serio/libps2.c
+++ b/drivers/input/serio/libps2.c
@@ -212,12 +212,17 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
* time before the ACK arrives.
*/
if (ps2_sendbyte(ps2dev, command & 0xff,
- command == PS2_CMD_RESET_BAT ? 1000 : 200))
- goto out;
+ command == PS2_CMD_RESET_BAT ? 1000 : 200)) {
+ serio_pause_rx(ps2dev->serio);
+ goto out_reset_flags;
+ }
- for (i = 0; i < send; i++)
- if (ps2_sendbyte(ps2dev, param[i], 200))
- goto out;
+ for (i = 0; i < send; i++) {
+ if (ps2_sendbyte(ps2dev, param[i], 200)) {
+ serio_pause_rx(ps2dev->serio);
+ goto out_reset_flags;
+ }
+ }
/*
* The reset command takes a long time to execute.
@@ -234,17 +239,18 @@ int __ps2_command(struct ps2dev *ps2dev, unsigned char *param, int command)
!(ps2dev->flags & PS2_FLAG_CMD), timeout);
}
+ serio_pause_rx(ps2dev->serio);
+
if (param)
for (i = 0; i < receive; i++)
param[i] = ps2dev->cmdbuf[(receive - 1) - i];
if (ps2dev->cmdcnt && (command != PS2_CMD_RESET_BAT || ps2dev->cmdcnt != 1))
- goto out;
+ goto out_reset_flags;
rc = 0;
- out:
- serio_pause_rx(ps2dev->serio);
+ out_reset_flags:
ps2dev->flags = 0;
serio_continue_rx(ps2dev->serio);
diff --git a/drivers/input/serio/parkbd.c b/drivers/input/serio/parkbd.c
index 26b45936f9fd..1e8cd6f1fe9e 100644
--- a/drivers/input/serio/parkbd.c
+++ b/drivers/input/serio/parkbd.c
@@ -194,6 +194,7 @@ static int __init parkbd_init(void)
parkbd_port = parkbd_allocate_serio();
if (!parkbd_port) {
parport_release(parkbd_dev);
+ parport_unregister_device(parkbd_dev);
return -ENOMEM;
}
diff --git a/drivers/input/touchscreen/imx6ul_tsc.c b/drivers/input/touchscreen/imx6ul_tsc.c
index ff0b75813daa..8275267eac25 100644
--- a/drivers/input/touchscreen/imx6ul_tsc.c
+++ b/drivers/input/touchscreen/imx6ul_tsc.c
@@ -94,7 +94,7 @@ struct imx6ul_tsc {
* TSC module need ADC to get the measure value. So
* before config TSC, we should initialize ADC module.
*/
-static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
+static int imx6ul_adc_init(struct imx6ul_tsc *tsc)
{
int adc_hc = 0;
int adc_gc;
@@ -122,17 +122,23 @@ static void imx6ul_adc_init(struct imx6ul_tsc *tsc)
timeout = wait_for_completion_timeout
(&tsc->completion, ADC_TIMEOUT);
- if (timeout == 0)
+ if (timeout == 0) {
dev_err(tsc->dev, "Timeout for adc calibration\n");
+ return -ETIMEDOUT;
+ }
adc_gs = readl(tsc->adc_regs + REG_ADC_GS);
- if (adc_gs & ADC_CALF)
+ if (adc_gs & ADC_CALF) {
dev_err(tsc->dev, "ADC calibration failed\n");
+ return -EINVAL;
+ }
/* TSC need the ADC work in hardware trigger */
adc_cfg = readl(tsc->adc_regs + REG_ADC_CFG);
adc_cfg |= ADC_HARDWARE_TRIGGER;
writel(adc_cfg, tsc->adc_regs + REG_ADC_CFG);
+
+ return 0;
}
/*
@@ -188,11 +194,17 @@ static void imx6ul_tsc_set(struct imx6ul_tsc *tsc)
writel(start, tsc->tsc_regs + REG_TSC_FLOW_CONTROL);
}
-static void imx6ul_tsc_init(struct imx6ul_tsc *tsc)
+static int imx6ul_tsc_init(struct imx6ul_tsc *tsc)
{
- imx6ul_adc_init(tsc);
+ int err;
+
+ err = imx6ul_adc_init(tsc);
+ if (err)
+ return err;
imx6ul_tsc_channel_config(tsc);
imx6ul_tsc_set(tsc);
+
+ return 0;
}
static void imx6ul_tsc_disable(struct imx6ul_tsc *tsc)
@@ -311,9 +323,7 @@ static int imx6ul_tsc_open(struct input_dev *input_dev)
return err;
}
- imx6ul_tsc_init(tsc);
-
- return 0;
+ return imx6ul_tsc_init(tsc);
}
static void imx6ul_tsc_close(struct input_dev *input_dev)
@@ -337,7 +347,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
int tsc_irq;
int adc_irq;
- tsc = devm_kzalloc(&pdev->dev, sizeof(struct imx6ul_tsc), GFP_KERNEL);
+ tsc = devm_kzalloc(&pdev->dev, sizeof(*tsc), GFP_KERNEL);
if (!tsc)
return -ENOMEM;
@@ -345,7 +355,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
if (!input_dev)
return -ENOMEM;
- input_dev->name = "iMX6UL TouchScreen Controller";
+ input_dev->name = "iMX6UL Touchscreen Controller";
input_dev->id.bustype = BUS_HOST;
input_dev->open = imx6ul_tsc_open;
@@ -406,7 +416,7 @@ static int imx6ul_tsc_probe(struct platform_device *pdev)
}
adc_irq = platform_get_irq(pdev, 1);
- if (adc_irq <= 0) {
+ if (adc_irq < 0) {
dev_err(&pdev->dev, "no adc irq resource?\n");
return adc_irq;
}
@@ -491,7 +501,7 @@ static int __maybe_unused imx6ul_tsc_resume(struct device *dev)
goto out;
}
- imx6ul_tsc_init(tsc);
+ retval = imx6ul_tsc_init(tsc);
}
out:
diff --git a/drivers/input/touchscreen/mms114.c b/drivers/input/touchscreen/mms114.c
index 7cce87650fc8..1fafc9f57af6 100644
--- a/drivers/input/touchscreen/mms114.c
+++ b/drivers/input/touchscreen/mms114.c
@@ -394,12 +394,12 @@ static struct mms114_platform_data *mms114_parse_dt(struct device *dev)
if (of_property_read_u32(np, "x-size", &pdata->x_size)) {
dev_err(dev, "failed to get x-size property\n");
return NULL;
- };
+ }
if (of_property_read_u32(np, "y-size", &pdata->y_size)) {
dev_err(dev, "failed to get y-size property\n");
return NULL;
- };
+ }
of_property_read_u32(np, "contact-threshold",
&pdata->contact_threshold);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 4664c2a96c67..d9da766719c8 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -43,7 +43,7 @@ config IOMMU_IO_PGTABLE_LPAE_SELFTEST
endmenu
config IOMMU_IOVA
- bool
+ tristate
config OF_IOMMU
def_bool y
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 2d7349a3ee14..041bc1810a86 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3215,6 +3215,8 @@ static struct iova *intel_alloc_iova(struct device *dev,
/* Restrict dma_mask to the width that the iommu can handle */
dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
+ /* Ensure we reserve the whole size-aligned region */
+ nrpages = __roundup_pow_of_two(nrpages);
if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
/*
@@ -3711,7 +3713,7 @@ static inline int iommu_devinfo_cache_init(void)
static int __init iommu_init_mempool(void)
{
int ret;
- ret = iommu_iova_cache_init();
+ ret = iova_cache_get();
if (ret)
return ret;
@@ -3725,7 +3727,7 @@ static int __init iommu_init_mempool(void)
kmem_cache_destroy(iommu_domain_cache);
domain_error:
- iommu_iova_cache_destroy();
+ iova_cache_put();
return -ENOMEM;
}
@@ -3734,7 +3736,7 @@ static void __init iommu_exit_mempool(void)
{
kmem_cache_destroy(iommu_devinfo_cache);
kmem_cache_destroy(iommu_domain_cache);
- iommu_iova_cache_destroy();
+ iova_cache_put();
}
static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b7c3d923f3e1..fa0adef32bd6 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -18,42 +18,9 @@
*/
#include <linux/iova.h>
+#include <linux/module.h>
#include <linux/slab.h>
-static struct kmem_cache *iommu_iova_cache;
-
-int iommu_iova_cache_init(void)
-{
- int ret = 0;
-
- iommu_iova_cache = kmem_cache_create("iommu_iova",
- sizeof(struct iova),
- 0,
- SLAB_HWCACHE_ALIGN,
- NULL);
- if (!iommu_iova_cache) {
- pr_err("Couldn't create iova cache\n");
- ret = -ENOMEM;
- }
-
- return ret;
-}
-
-void iommu_iova_cache_destroy(void)
-{
- kmem_cache_destroy(iommu_iova_cache);
-}
-
-struct iova *alloc_iova_mem(void)
-{
- return kmem_cache_alloc(iommu_iova_cache, GFP_ATOMIC);
-}
-
-void free_iova_mem(struct iova *iova)
-{
- kmem_cache_free(iommu_iova_cache, iova);
-}
-
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn, unsigned long pfn_32bit)
@@ -72,6 +39,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
iovad->start_pfn = start_pfn;
iovad->dma_32bit_pfn = pfn_32bit;
}
+EXPORT_SYMBOL_GPL(init_iova_domain);
static struct rb_node *
__get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn)
@@ -120,19 +88,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
}
}
-/* Computes the padding size required, to make the
- * the start address naturally aligned on its size
+/*
+ * Computes the padding size required, to make the start address
+ * naturally aligned on the power-of-two order of its size
*/
-static int
-iova_get_pad_size(int size, unsigned int limit_pfn)
+static unsigned int
+iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
{
- unsigned int pad_size = 0;
- unsigned int order = ilog2(size);
-
- if (order)
- pad_size = (limit_pfn + 1) % (1 << order);
-
- return pad_size;
+ return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
}
static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
@@ -242,6 +205,57 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova)
rb_insert_color(&iova->node, root);
}
+static struct kmem_cache *iova_cache;
+static unsigned int iova_cache_users;
+static DEFINE_MUTEX(iova_cache_mutex);
+
+struct iova *alloc_iova_mem(void)
+{
+ return kmem_cache_alloc(iova_cache, GFP_ATOMIC);
+}
+EXPORT_SYMBOL(alloc_iova_mem);
+
+void free_iova_mem(struct iova *iova)
+{
+ kmem_cache_free(iova_cache, iova);
+}
+EXPORT_SYMBOL(free_iova_mem);
+
+int iova_cache_get(void)
+{
+ mutex_lock(&iova_cache_mutex);
+ if (!iova_cache_users) {
+ iova_cache = kmem_cache_create(
+ "iommu_iova", sizeof(struct iova), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!iova_cache) {
+ mutex_unlock(&iova_cache_mutex);
+ printk(KERN_ERR "Couldn't create iova cache\n");
+ return -ENOMEM;
+ }
+ }
+
+ iova_cache_users++;
+ mutex_unlock(&iova_cache_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iova_cache_get);
+
+void iova_cache_put(void)
+{
+ mutex_lock(&iova_cache_mutex);
+ if (WARN_ON(!iova_cache_users)) {
+ mutex_unlock(&iova_cache_mutex);
+ return;
+ }
+ iova_cache_users--;
+ if (!iova_cache_users)
+ kmem_cache_destroy(iova_cache);
+ mutex_unlock(&iova_cache_mutex);
+}
+EXPORT_SYMBOL_GPL(iova_cache_put);
+
/**
* alloc_iova - allocates an iova
* @iovad: - iova domain in question
@@ -265,12 +279,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (!new_iova)
return NULL;
- /* If size aligned is set then round the size to
- * to next power of two.
- */
- if (size_aligned)
- size = __roundup_pow_of_two(size);
-
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned);
@@ -281,6 +289,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
return new_iova;
}
+EXPORT_SYMBOL_GPL(alloc_iova);
/**
* find_iova - find's an iova for a given pfn
@@ -321,6 +330,7 @@ struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn)
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return NULL;
}
+EXPORT_SYMBOL_GPL(find_iova);
/**
* __free_iova - frees the given iova
@@ -339,6 +349,7 @@ __free_iova(struct iova_domain *iovad, struct iova *iova)
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
free_iova_mem(iova);
}
+EXPORT_SYMBOL_GPL(__free_iova);
/**
* free_iova - finds and frees the iova for a given pfn
@@ -356,6 +367,7 @@ free_iova(struct iova_domain *iovad, unsigned long pfn)
__free_iova(iovad, iova);
}
+EXPORT_SYMBOL_GPL(free_iova);
/**
* put_iova_domain - destroys the iova doamin
@@ -378,6 +390,7 @@ void put_iova_domain(struct iova_domain *iovad)
}
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
}
+EXPORT_SYMBOL_GPL(put_iova_domain);
static int
__is_range_overlap(struct rb_node *node,
@@ -467,6 +480,7 @@ finish:
spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
return iova;
}
+EXPORT_SYMBOL_GPL(reserve_iova);
/**
* copy_reserved_iova - copies the reserved between domains
@@ -493,6 +507,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
}
spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
}
+EXPORT_SYMBOL_GPL(copy_reserved_iova);
struct iova *
split_and_remove_iova(struct iova_domain *iovad, struct iova *iova,
@@ -534,3 +549,6 @@ error:
free_iova_mem(prev);
return NULL;
}
+
+MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c
index e9c6f2a5b52d..cd7d3bc78e34 100644
--- a/drivers/irqchip/exynos-combiner.c
+++ b/drivers/irqchip/exynos-combiner.c
@@ -65,12 +65,10 @@ static void combiner_unmask_irq(struct irq_data *data)
__raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
}
-static void combiner_handle_cascade_irq(unsigned int __irq,
- struct irq_desc *desc)
+static void combiner_handle_cascade_irq(struct irq_desc *desc)
{
struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned int irq = irq_desc_get_irq(desc);
unsigned int cascade_irq, combiner_irq;
unsigned long status;
@@ -88,7 +86,7 @@ static void combiner_handle_cascade_irq(unsigned int __irq,
cascade_irq = irq_find_mapping(combiner_irq_domain, combiner_irq);
if (unlikely(!cascade_irq))
- handle_bad_irq(irq, desc);
+ handle_bad_irq(desc);
else
generic_handle_irq(cascade_irq);
@@ -165,7 +163,7 @@ static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
irq_set_chip_data(irq, &combiner_data[hw >> 3]);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_probe(irq);
return 0;
}
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 39b72da0c143..655cb967a1f2 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -200,7 +200,6 @@ static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq,
{
irq_set_chip_and_handler(virq, &armada_370_xp_msi_irq_chip,
handle_simple_irq);
- set_irq_flags(virq, IRQF_VALID);
return 0;
}
@@ -317,7 +316,7 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
handle_level_irq);
}
- set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+ irq_set_probe(virq);
return 0;
}
@@ -447,8 +446,7 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
static void armada_370_xp_handle_msi_irq(struct pt_regs *r, bool b) {}
#endif
-static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
- struct irq_desc *desc)
+static void armada_370_xp_mpic_handle_cascade_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long irqmap, irqn, irqsrc, cpuid;
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index 9da9942ac83c..f6d680485bee 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -88,28 +88,36 @@ static void aic5_mask(struct irq_data *d)
{
struct irq_domain *domain = d->domain;
struct irq_domain_chip_generic *dgc = domain->gc;
- struct irq_chip_generic *gc = dgc->gc[0];
+ struct irq_chip_generic *bgc = dgc->gc[0];
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- /* Disable interrupt on AIC5 */
- irq_gc_lock(gc);
+ /*
+ * Disable interrupt on AIC5. We always take the lock of the
+ * first irq chip as all chips share the same registers.
+ */
+ irq_gc_lock(bgc);
irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(gc, 1, AT91_AIC5_IDCR);
gc->mask_cache &= ~d->mask;
- irq_gc_unlock(gc);
+ irq_gc_unlock(bgc);
}
static void aic5_unmask(struct irq_data *d)
{
struct irq_domain *domain = d->domain;
struct irq_domain_chip_generic *dgc = domain->gc;
- struct irq_chip_generic *gc = dgc->gc[0];
+ struct irq_chip_generic *bgc = dgc->gc[0];
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
- /* Enable interrupt on AIC5 */
- irq_gc_lock(gc);
+ /*
+ * Enable interrupt on AIC5. We always take the lock of the
+ * first irq chip as all chips share the same registers.
+ */
+ irq_gc_lock(bgc);
irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR);
irq_reg_writel(gc, 1, AT91_AIC5_IECR);
gc->mask_cache |= d->mask;
- irq_gc_unlock(gc);
+ irq_gc_unlock(bgc);
}
static int aic5_retrigger(struct irq_data *d)
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index ed4ca9deca70..bf9cc5f2e839 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -96,7 +96,7 @@ struct armctrl_ic {
static struct armctrl_ic intc __read_mostly;
static void __exception_irq_entry bcm2835_handle_irq(
struct pt_regs *regs);
-static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc);
+static void bcm2836_chained_handle_irq(struct irq_desc *desc);
static void armctrl_mask_irq(struct irq_data *d)
{
@@ -166,7 +166,7 @@ static int __init armctrl_of_init(struct device_node *node,
BUG_ON(irq <= 0);
irq_set_chip_and_handler(irq, &armctrl_chip,
handle_level_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_probe(irq);
}
}
@@ -245,7 +245,7 @@ static void __exception_irq_entry bcm2835_handle_irq(
handle_IRQ(irq_linear_revmap(intc.domain, hwirq), regs);
}
-static void bcm2836_chained_handle_irq(unsigned int irq, struct irq_desc *desc)
+static void bcm2836_chained_handle_irq(struct irq_desc *desc)
{
u32 hwirq;
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index 409bdc6366c2..0fea985ef1dc 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -115,7 +115,7 @@ static inline void l1_writel(u32 val, void __iomem *reg)
writel(val, reg);
}
-static void bcm7038_l1_irq_handle(unsigned int irq, struct irq_desc *desc)
+static void bcm7038_l1_irq_handle(struct irq_desc *desc)
{
struct bcm7038_l1_chip *intc = irq_desc_get_handler_data(desc);
struct bcm7038_l1_cpu *cpu;
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index d3f976913a6f..61b18ab33ad9 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -56,7 +56,7 @@ struct bcm7120_l2_intc_data {
const __be32 *map_mask_prop;
};
-static void bcm7120_l2_intc_irq_handle(unsigned int irq, struct irq_desc *desc)
+static void bcm7120_l2_intc_irq_handle(struct irq_desc *desc)
{
struct bcm7120_l1_intc_data *data = irq_desc_get_handler_data(desc);
struct bcm7120_l2_intc_data *b = data->b;
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index aedda06191eb..65cd341f331a 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -49,13 +49,12 @@ struct brcmstb_l2_intc_data {
u32 saved_mask; /* for suspend/resume */
};
-static void brcmstb_l2_intc_irq_handle(unsigned int __irq,
- struct irq_desc *desc)
+static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
{
struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0);
struct irq_chip *chip = irq_desc_get_chip(desc);
- unsigned int irq = irq_desc_get_irq(desc);
+ unsigned int irq;
u32 status;
chained_irq_enter(chip, desc);
@@ -65,7 +64,7 @@ static void brcmstb_l2_intc_irq_handle(unsigned int __irq,
if (status == 0) {
raw_spin_lock(&desc->lock);
- handle_bad_irq(irq, desc);
+ handle_bad_irq(desc);
raw_spin_unlock(&desc->lock);
goto out;
}
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index 2dd929eed9e0..eb5eb0cd414d 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -132,14 +132,14 @@ static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
irq_flow_handler_t handler = handle_level_irq;
- unsigned int flags = IRQF_VALID | IRQF_PROBE;
+ unsigned int flags = 0;
if (!clps711x_irqs[hw].flags)
return 0;
if (clps711x_irqs[hw].flags & CLPS711X_FLAG_FIQ) {
handler = handle_bad_irq;
- flags |= IRQF_NOAUTOEN;
+ flags |= IRQ_NOAUTOEN;
} else if (clps711x_irqs[hw].eoi) {
handler = handle_fasteoi_irq;
}
@@ -149,7 +149,7 @@ static int __init clps711x_intc_irq_map(struct irq_domain *h, unsigned int virq,
writel_relaxed(0, clps711x_intc->base + clps711x_irqs[hw].eoi);
irq_set_chip_and_handler(virq, &clps711x_intc_chip, handler);
- set_irq_flags(virq, flags);
+ irq_modify_status(virq, IRQ_NOPROBE, flags);
return 0;
}
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index efd95d9955e7..052f266364c0 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -26,7 +26,7 @@
#define APB_INT_FINALSTATUS_H 0x34
#define APB_INT_BASE_OFFSET 0x04
-static void dw_apb_ictl_handler(unsigned int irq, struct irq_desc *desc)
+static void dw_apb_ictl_handler(struct irq_desc *desc)
{
struct irq_domain *d = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index db04fc1f56b2..12985daa66ab 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -95,8 +95,8 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct v2m_data *v2m = irq_data_get_irq_chip_data(data);
phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS;
- msg->address_hi = (u32) (addr >> 32);
- msg->address_lo = (u32) (addr);
+ msg->address_hi = upper_32_bits(addr);
+ msg->address_lo = lower_32_bits(addr);
msg->data = data->hwirq;
}
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index cf351c637464..a7c8c9ffbafd 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -62,7 +62,7 @@ static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data)
dev_alias->dev_id = alias;
if (pdev != dev_alias->pdev)
- dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev);
+ dev_alias->count += its_pci_msi_vec_count(pdev);
return 0;
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 26b55c53755f..25ceae9f7348 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -719,6 +719,9 @@ static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids)
out:
spin_unlock(&lpi_lock);
+ if (!bitmap)
+ *base = *nr_ids = 0;
+
return bitmap;
}
@@ -898,8 +901,10 @@ retry_baser:
* non-cacheable as well.
*/
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
- if (!shr)
+ if (!shr) {
cache = GITS_BASER_nC;
+ __flush_dcache_area(base, alloc_size);
+ }
goto retry_baser;
}
@@ -1140,6 +1145,8 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
return NULL;
}
+ __flush_dcache_area(itt, sz);
+
dev->its = its;
dev->itt = itt;
dev->nr_ites = nr_ites;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 7deed6ef54c2..36ecfc870e5a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -70,11 +70,6 @@ static inline int gic_irq_in_rdist(struct irq_data *d)
return gic_irq(d) < 32;
}
-static inline bool forwarded_irq(struct irq_data *d)
-{
- return d->handler_data != NULL;
-}
-
static inline void __iomem *gic_dist_base(struct irq_data *d)
{
if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
@@ -249,7 +244,7 @@ static void gic_eoimode1_mask_irq(struct irq_data *d)
* disabled/masked will not get "stuck", because there is
* noone to deactivate it (guest is being terminated).
*/
- if (forwarded_irq(d))
+ if (irqd_is_forwarded_to_vcpu(d))
gic_poke_irq(d, GICD_ICACTIVER);
}
@@ -324,7 +319,7 @@ static void gic_eoimode1_eoi_irq(struct irq_data *d)
* No need to deactivate an LPI, or an interrupt that
* is is getting forwarded to a vcpu.
*/
- if (gic_irq(d) >= 8192 || forwarded_irq(d))
+ if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
return;
gic_write_dir(gic_irq(d));
}
@@ -357,7 +352,10 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
- d->handler_data = vcpu;
+ if (vcpu)
+ irqd_set_forwarded_to_vcpu(d);
+ else
+ irqd_clr_forwarded_to_vcpu(d);
return 0;
}
@@ -754,13 +752,13 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
- set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
}
/* SPIs */
if (hw >= 32 && hw < gic_data.irq_nr) {
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_probe(irq);
}
/* LPIs */
if (hw >= 8192 && hw < GIC_ID_NR) {
@@ -768,7 +766,6 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
return -EPERM;
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
- set_irq_flags(irq, IRQF_VALID);
}
return 0;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index e6b7ed537952..982c09c2d791 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -145,29 +145,10 @@ static inline bool cascading_gic_irq(struct irq_data *d)
void *data = irq_data_get_irq_handler_data(d);
/*
- * If handler_data pointing to one of the secondary GICs, then
- * this is a cascading interrupt, and it cannot possibly be
- * forwarded.
+ * If handler_data is set, this is a cascading interrupt, and
+ * it cannot possibly be forwarded.
*/
- if (data >= (void *)(gic_data + 1) &&
- data < (void *)(gic_data + MAX_GIC_NR))
- return true;
-
- return false;
-}
-
-static inline bool forwarded_irq(struct irq_data *d)
-{
- /*
- * A forwarded interrupt:
- * - is on the primary GIC
- * - has its handler_data set to a value
- * - that isn't a secondary GIC
- */
- if (d->handler_data && !cascading_gic_irq(d))
- return true;
-
- return false;
+ return data != NULL;
}
/*
@@ -201,7 +182,7 @@ static void gic_eoimode1_mask_irq(struct irq_data *d)
* disabled/masked will not get "stuck", because there is
* noone to deactivate it (guest is being terminated).
*/
- if (forwarded_irq(d))
+ if (irqd_is_forwarded_to_vcpu(d))
gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
}
@@ -218,7 +199,7 @@ static void gic_eoi_irq(struct irq_data *d)
static void gic_eoimode1_eoi_irq(struct irq_data *d)
{
/* Do not deactivate an IRQ forwarded to a vcpu. */
- if (forwarded_irq(d))
+ if (irqd_is_forwarded_to_vcpu(d))
return;
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
@@ -296,7 +277,10 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
if (cascading_gic_irq(d))
return -EINVAL;
- d->handler_data = vcpu;
+ if (vcpu)
+ irqd_set_forwarded_to_vcpu(d);
+ else
+ irqd_clr_forwarded_to_vcpu(d);
return 0;
}
@@ -357,7 +341,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
} while (1);
}
-static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
+static void gic_handle_cascade_irq(struct irq_desc *desc)
{
struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -376,7 +360,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
if (unlikely(gic_irq < 32 || gic_irq > 1020))
- handle_bad_irq(cascade_irq, desc);
+ handle_bad_irq(desc);
else
generic_handle_irq(cascade_irq);
@@ -906,11 +890,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
- set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
} else {
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_probe(irq);
}
return 0;
}
@@ -1119,12 +1103,49 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
#ifdef CONFIG_OF
static int gic_cnt __initdata;
+static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
+{
+ struct resource cpuif_res;
+
+ of_address_to_resource(node, 1, &cpuif_res);
+
+ if (!is_hyp_mode_available())
+ return false;
+ if (resource_size(&cpuif_res) < SZ_8K)
+ return false;
+ if (resource_size(&cpuif_res) == SZ_128K) {
+ u32 val_low, val_high;
+
+ /*
+ * Verify that we have the first 4kB of a GIC400
+ * aliased over the first 64kB by checking the
+ * GICC_IIDR register on both ends.
+ */
+ val_low = readl_relaxed(*base + GIC_CPU_IDENT);
+ val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000);
+ if ((val_low & 0xffff0fff) != 0x0202043B ||
+ val_low != val_high)
+ return false;
+
+ /*
+ * Move the base up by 60kB, so that we have a 8kB
+ * contiguous region, which allows us to use GICC_DIR
+ * at its normal offset. Please pass me that bucket.
+ */
+ *base += 0xf000;
+ cpuif_res.start += 0xf000;
+ pr_warn("GIC: Adjusting CPU interface base to %pa",
+ &cpuif_res.start);
+ }
+
+ return true;
+}
+
static int __init
gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *cpu_base;
void __iomem *dist_base;
- struct resource cpu_res;
u32 percpu_offset;
int irq;
@@ -1137,14 +1158,11 @@ gic_of_init(struct device_node *node, struct device_node *parent)
cpu_base = of_iomap(node, 1);
WARN(!cpu_base, "unable to map gic cpu registers\n");
- of_address_to_resource(node, 1, &cpu_res);
-
/*
* Disable split EOI/Deactivate if either HYP is not available
* or the CPU interface is too small.
*/
- if (gic_cnt == 0 && (!is_hyp_mode_available() ||
- resource_size(&cpu_res) < SZ_8K))
+ if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base))
static_key_slow_dec(&supports_deactivate);
if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index a0128c7c98dd..8f3ca8f3a62b 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -307,11 +307,11 @@ static int hip04_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_set_percpu_devid(irq);
irq_set_chip_and_handler(irq, &hip04_irq_chip,
handle_percpu_devid_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+ irq_set_status_flags(irq, IRQ_NOAUTOEN);
} else {
irq_set_chip_and_handler(irq, &hip04_irq_chip,
handle_fasteoi_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_probe(irq);
}
irq_set_chip_data(irq, d->host_data);
return 0;
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index 4836102ba312..e484fd255321 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -352,7 +352,7 @@ void __init init_i8259_irqs(void)
__init_i8259_irqs(NULL);
}
-static void i8259_irq_dispatch(unsigned int __irq, struct irq_desc *desc)
+static void i8259_irq_dispatch(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
int hwirq = i8259_irq();
diff --git a/drivers/irqchip/irq-imgpdc.c b/drivers/irqchip/irq-imgpdc.c
index 841604b81004..c02d29c9dc05 100644
--- a/drivers/irqchip/irq-imgpdc.c
+++ b/drivers/irqchip/irq-imgpdc.c
@@ -218,7 +218,7 @@ static int pdc_irq_set_wake(struct irq_data *data, unsigned int on)
return 0;
}
-static void pdc_intc_perip_isr(unsigned int __irq, struct irq_desc *desc)
+static void pdc_intc_perip_isr(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct pdc_intc_priv *priv;
@@ -240,7 +240,7 @@ found:
generic_handle_irq(irq_no);
}
-static void pdc_intc_syswake_isr(unsigned int irq, struct irq_desc *desc)
+static void pdc_intc_syswake_isr(struct irq_desc *desc)
{
struct pdc_intc_priv *priv;
unsigned int syswake, irq_no;
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index c1517267b5db..deb89d63a728 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -83,7 +83,7 @@ static void keystone_irq_ack(struct irq_data *d)
/* nothing to do here */
}
-static void keystone_irq_handler(unsigned __irq, struct irq_desc *desc)
+static void keystone_irq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
@@ -127,7 +127,7 @@ static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, kirq);
irq_set_chip_and_handler(virq, &kirq->chip, handle_level_irq);
- set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+ irq_set_probe(virq);
return 0;
}
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 5f4c52928d16..8c38b3d92e1c 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -446,7 +446,7 @@ static int meta_intc_irq_set_type(struct irq_data *data, unsigned int flow_type)
* Whilst using TR2 to detect external interrupts is a software convention it is
* (hopefully) unlikely to change.
*/
-static void meta_intc_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void meta_intc_irq_demux(struct irq_desc *desc)
{
struct meta_intc_priv *priv = &meta_intc_priv;
irq_hw_number_t hw;
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
index 3d23ce3edb5c..a5f053bd2f44 100644
--- a/drivers/irqchip/irq-metag.c
+++ b/drivers/irqchip/irq-metag.c
@@ -220,7 +220,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data,
* occurred. It is this function's job to demux this irq and
* figure out exactly which trigger needs servicing.
*/
-static void metag_internal_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void metag_internal_irq_demux(struct irq_desc *desc)
{
struct metag_internal_irq_priv *priv = irq_desc_get_handler_data(desc);
irq_hw_number_t hw;
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 1764bcf8ee6b..aeaa061f0dbf 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -320,6 +320,14 @@ static void gic_handle_shared_int(bool chained)
intrmask[i] = gic_read(intrmask_reg);
pending_reg += gic_reg_step;
intrmask_reg += gic_reg_step;
+
+ if (!config_enabled(CONFIG_64BIT) || mips_cm_is64)
+ continue;
+
+ pending[i] |= (u64)gic_read(pending_reg) << 32;
+ intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
+ pending_reg += gic_reg_step;
+ intrmask_reg += gic_reg_step;
}
bitmap_and(pending, pending, intrmask, gic_shared_intrs);
@@ -426,7 +434,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
spin_lock_irqsave(&gic_lock, flags);
/* Re-route this IRQ */
- gic_map_to_vpe(irq, cpumask_first(&tmp));
+ gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp)));
/* Update the pcpu_masks */
for (i = 0; i < NR_CPUS; i++)
@@ -546,7 +554,7 @@ static void __gic_irq_dispatch(void)
gic_handle_shared_int(false);
}
-static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
+static void gic_irq_dispatch(struct irq_desc *desc)
{
gic_handle_local_int(true);
gic_handle_shared_int(true);
@@ -599,7 +607,7 @@ static __init void gic_ipi_init_one(unsigned int intr, int cpu,
GIC_SHARED_TO_HWIRQ(intr));
int i;
- gic_map_to_vpe(intr, cpu);
+ gic_map_to_vpe(intr, mips_cm_vp_id(cpu));
for (i = 0; i < NR_CPUS; i++)
clear_bit(intr, pcpu_masks[i].pcpu_mask);
set_bit(intr, pcpu_masks[cpu].pcpu_mask);
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 781ed6e71dbb..013fc9659a84 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -129,7 +129,7 @@ struct irq_chip icu_irq_chip = {
.irq_unmask = icu_unmask_irq,
};
-static void icu_mux_irq_demux(unsigned int __irq, struct irq_desc *desc)
+static void icu_mux_irq_demux(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct irq_domain *domain;
@@ -164,7 +164,6 @@ static int mmp_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID);
return 0;
}
@@ -234,7 +233,6 @@ void __init icu_init_irq(void)
for (irq = 0; irq < 64; irq++) {
icu_mask_irq(irq_get_irq_data(irq));
irq_set_chip_and_handler(irq, &icu_irq_chip, handle_level_irq);
- set_irq_flags(irq, IRQF_VALID);
}
irq_set_default_host(icu_data[0].domain);
set_handle_irq(mmp_handle_irq);
@@ -337,7 +335,6 @@ void __init mmp2_init_icu(void)
irq_set_chip_and_handler(irq, &icu_irq_chip,
handle_level_irq);
}
- set_irq_flags(irq, IRQF_VALID);
}
irq_set_default_host(icu_data[0].domain);
set_handle_irq(mmp2_handle_irq);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 1faf812f3dc8..604df63e2edf 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -84,7 +84,6 @@ static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(virq, &mxs_icoll_chip, handle_level_irq);
- set_irq_flags(virq, IRQF_VALID);
return 0;
}
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index 5ea999a724b5..be4c5a8c9659 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -106,7 +106,7 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
#define ORION_BRIDGE_IRQ_CAUSE 0x00
#define ORION_BRIDGE_IRQ_MASK 0x04
-static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void orion_bridge_irq_handler(struct irq_desc *desc)
{
struct irq_domain *d = irq_desc_get_handler_data(desc);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 0670ab4e3897..9525335723f6 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -283,6 +283,9 @@ static int intc_irqpin_irq_set_type(struct irq_data *d, unsigned int type)
static int intc_irqpin_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct intc_irqpin_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
if (!p->clk)
return 0;
@@ -332,6 +335,12 @@ static irqreturn_t intc_irqpin_shared_irq_handler(int irq, void *dev_id)
return status;
}
+/*
+ * This lock class tells lockdep that INTC External IRQ Pin irqs are in a
+ * different category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key intc_irqpin_irq_lock_class;
+
static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
@@ -342,8 +351,8 @@ static int intc_irqpin_irq_domain_map(struct irq_domain *h, unsigned int virq,
intc_irqpin_dbg(&p->irq[hw], "map");
irq_set_chip_data(virq, h->host_data);
+ irq_set_lockdep_class(virq, &intc_irqpin_irq_lock_class);
irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
- set_irq_flags(virq, IRQF_VALID); /* kill me now */
return 0;
}
diff --git a/drivers/irqchip/irq-renesas-irqc.c b/drivers/irqchip/irq-renesas-irqc.c
index 2aa3add711a6..35bf97ba4a3d 100644
--- a/drivers/irqchip/irq-renesas-irqc.c
+++ b/drivers/irqchip/irq-renesas-irqc.c
@@ -121,6 +121,9 @@ static int irqc_irq_set_type(struct irq_data *d, unsigned int type)
static int irqc_irq_set_wake(struct irq_data *d, unsigned int on)
{
struct irqc_priv *p = irq_data_get_irq_chip_data(d);
+ int hw_irq = irqd_to_hwirq(d);
+
+ irq_set_irq_wake(p->irq[hw_irq].requested_irq, on);
if (!p->clk)
return 0;
@@ -150,6 +153,12 @@ static irqreturn_t irqc_irq_handler(int irq, void *dev_id)
return IRQ_NONE;
}
+/*
+ * This lock class tells lockdep that IRQC irqs are in a different
+ * category than their parents, so it won't report false recursion.
+ */
+static struct lock_class_key irqc_irq_lock_class;
+
static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
@@ -157,6 +166,7 @@ static int irqc_irq_domain_map(struct irq_domain *h, unsigned int virq,
irqc_dbg(&p->irq[hw], "map");
irq_set_chip_data(virq, h->host_data);
+ irq_set_lockdep_class(virq, &irqc_irq_lock_class);
irq_set_chip_and_handler(virq, &p->irq_chip, handle_level_irq);
return 0;
}
diff --git a/drivers/irqchip/irq-s3c24xx.c b/drivers/irqchip/irq-s3c24xx.c
index 506d9f20ca51..7154b011ddd2 100644
--- a/drivers/irqchip/irq-s3c24xx.c
+++ b/drivers/irqchip/irq-s3c24xx.c
@@ -298,7 +298,7 @@ static struct irq_chip s3c_irq_eint0t4 = {
.irq_set_type = s3c_irqext0_type,
};
-static void s3c_irq_demux(unsigned int __irq, struct irq_desc *desc)
+static void s3c_irq_demux(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct s3c_irq_data *irq_data = irq_desc_get_chip_data(desc);
@@ -466,13 +466,11 @@ static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, irq_data);
- set_irq_flags(virq, IRQF_VALID);
-
if (parent_intc && irq_data->type != S3C_IRQTYPE_NONE) {
if (irq_data->parent_irq > 31) {
pr_err("irq-s3c24xx: parent irq %lu is out of range\n",
irq_data->parent_irq);
- goto err;
+ return -EINVAL;
}
parent_irq_data = &parent_intc->irqs[irq_data->parent_irq];
@@ -485,18 +483,12 @@ static int s3c24xx_irq_map(struct irq_domain *h, unsigned int virq,
if (!irqno) {
pr_err("irq-s3c24xx: could not find mapping for parent irq %lu\n",
irq_data->parent_irq);
- goto err;
+ return -EINVAL;
}
irq_set_chained_handler(irqno, s3c_irq_demux);
}
return 0;
-
-err:
- set_irq_flags(virq, 0);
-
- /* the only error can result from bad mapping data*/
- return -EINVAL;
}
static const struct irq_domain_ops s3c24xx_irq_ops = {
@@ -1174,8 +1166,6 @@ static int s3c24xx_irq_map_of(struct irq_domain *h, unsigned int virq,
irq_set_chip_data(virq, irq_data);
- set_irq_flags(virq, IRQF_VALID);
-
return 0;
}
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 4ad3e7c69aa7..0704362f4c82 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -83,7 +83,7 @@ static int sun4i_irq_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(virq, &sun4i_irq_chip, handle_fasteoi_irq);
- set_irq_flags(virq, IRQF_VALID | IRQF_PROBE);
+ irq_set_probe(virq);
return 0;
}
diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
index 772a82cacbf7..c143dd58410c 100644
--- a/drivers/irqchip/irq-sunxi-nmi.c
+++ b/drivers/irqchip/irq-sunxi-nmi.c
@@ -58,7 +58,7 @@ static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
return irq_reg_readl(gc, off);
}
-static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc)
+static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c
index 331829661366..848d782a2a3b 100644
--- a/drivers/irqchip/irq-tb10x.c
+++ b/drivers/irqchip/irq-tb10x.c
@@ -97,7 +97,7 @@ static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type)
return IRQ_SET_MASK_OK;
}
-static void tb10x_irq_cascade(unsigned int __irq, struct irq_desc *desc)
+static void tb10x_irq_cascade(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
unsigned int irq = irq_desc_get_irq(desc);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 16123f688768..598ab3f0e0ac 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -65,19 +65,19 @@ static void fpga_irq_unmask(struct irq_data *d)
writel(mask, f->base + IRQ_ENABLE_SET);
}
-static void fpga_irq_handle(unsigned int __irq, struct irq_desc *desc)
+static void fpga_irq_handle(struct irq_desc *desc)
{
struct fpga_irq_data *f = irq_desc_get_handler_data(desc);
- unsigned int irq = irq_desc_get_irq(desc);
u32 status = readl(f->base + IRQ_STATUS);
if (status == 0) {
- do_bad_IRQ(irq, desc);
+ do_bad_IRQ(desc);
return;
}
do {
- irq = ffs(status) - 1;
+ unsigned int irq = ffs(status) - 1;
+
status &= ~(1 << irq);
generic_handle_irq(irq_find_mapping(f->domain, irq));
} while (status);
@@ -128,7 +128,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_set_chip_data(irq, f);
irq_set_chip_and_handler(irq, &f->chip,
handle_level_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_probe(irq);
return 0;
}
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 03846dff4212..b956dfffe78c 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -201,7 +201,7 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq,
return -EPERM;
irq_set_chip_and_handler(irq, &vic_chip, handle_level_irq);
irq_set_chip_data(irq, v->base);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_probe(irq);
return 0;
}
@@ -225,7 +225,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
return handled;
}
-static void vic_handle_irq_cascaded(unsigned int irq, struct irq_desc *desc)
+static void vic_handle_irq_cascaded(struct irq_desc *desc)
{
u32 stat, hwirq;
struct irq_chip *host_chip = irq_desc_get_chip(desc);
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
index 8371d9978d31..f9af0af21751 100644
--- a/drivers/irqchip/irq-vt8500.c
+++ b/drivers/irqchip/irq-vt8500.c
@@ -167,7 +167,6 @@ static int vt8500_irq_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
irq_set_chip_and_handler(virq, &vt8500_irq_chip, handle_level_irq);
- set_irq_flags(virq, IRQF_VALID);
return 0;
}
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 4cbd9c5dc1e6..1ccd2abed65f 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -182,7 +182,7 @@ static struct spear_shirq *spear320_shirq_blocks[] = {
&spear320_shirq_intrcomm_ras,
};
-static void shirq_handler(unsigned __irq, struct irq_desc *desc)
+static void shirq_handler(struct irq_desc *desc)
{
struct spear_shirq *shirq = irq_desc_get_handler_data(desc);
u32 pend;
@@ -211,7 +211,6 @@ static void __init spear_shirq_register(struct spear_shirq *shirq,
for (i = 0; i < shirq->nr_irqs; i++) {
irq_set_chip_and_handler(shirq->virq_base + i,
shirq->irq_chip, handle_simple_irq);
- set_irq_flags(shirq->virq_base + i, IRQF_VALID);
irq_set_chip_data(shirq->virq_base + i, shirq);
}
}
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 70f4255ff291..42990f2d0317 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -170,6 +170,7 @@ config LEDS_SUNFIRE
config LEDS_IPAQ_MICRO
tristate "LED Support for the Compaq iPAQ h3xxx"
+ depends on LEDS_CLASS
depends on MFD_IPAQ_MICRO
help
Choose this option if you want to use the notification LED on
@@ -229,7 +230,7 @@ config LEDS_LP55XX_COMMON
tristate "Common Driver for TI/National LP5521/5523/55231/5562/8501"
depends on LEDS_LP5521 || LEDS_LP5523 || LEDS_LP5562 || LEDS_LP8501
select FW_LOADER
- select FW_LOADER_USER_HELPER_FALLBACK
+ select FW_LOADER_USER_HELPER
help
This option supports common operations for LP5521/5523/55231/5562/8501
devices.
diff --git a/drivers/leds/leds-aat1290.c b/drivers/leds/leds-aat1290.c
index fd7c25fd29c1..ac77d36b630c 100644
--- a/drivers/leds/leds-aat1290.c
+++ b/drivers/leds/leds-aat1290.c
@@ -331,7 +331,7 @@ static void aat1290_led_validate_mm_current(struct aat1290_led *led,
cfg->max_brightness = b + 1;
}
-int init_mm_current_scale(struct aat1290_led *led,
+static int init_mm_current_scale(struct aat1290_led *led,
struct aat1290_led_config_data *cfg)
{
int max_mm_current_percent[] = { 20, 22, 25, 28, 32, 36, 40, 45, 50, 56,
@@ -559,6 +559,7 @@ static const struct of_device_id aat1290_led_dt_match[] = {
{ .compatible = "skyworks,aat1290" },
{},
};
+MODULE_DEVICE_TABLE(of, aat1290_led_dt_match);
static struct platform_driver aat1290_led_driver = {
.probe = aat1290_led_probe,
diff --git a/drivers/leds/leds-bcm6328.c b/drivers/leds/leds-bcm6328.c
index 986fe1e28f84..1793727bc9ae 100644
--- a/drivers/leds/leds-bcm6328.c
+++ b/drivers/leds/leds-bcm6328.c
@@ -395,6 +395,7 @@ static const struct of_device_id bcm6328_leds_of_match[] = {
{ .compatible = "brcm,bcm6328-leds", },
{ },
};
+MODULE_DEVICE_TABLE(of, bcm6328_leds_of_match);
static struct platform_driver bcm6328_leds_driver = {
.probe = bcm6328_leds_probe,
diff --git a/drivers/leds/leds-bcm6358.c b/drivers/leds/leds-bcm6358.c
index 21f96930b3be..7ea3526702e0 100644
--- a/drivers/leds/leds-bcm6358.c
+++ b/drivers/leds/leds-bcm6358.c
@@ -226,6 +226,7 @@ static const struct of_device_id bcm6358_leds_of_match[] = {
{ .compatible = "brcm,bcm6358-leds", },
{ },
};
+MODULE_DEVICE_TABLE(of, bcm6358_leds_of_match);
static struct platform_driver bcm6358_leds_driver = {
.probe = bcm6358_leds_probe,
diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
index 2ae8c4d17ff8..feca07be85f5 100644
--- a/drivers/leds/leds-ktd2692.c
+++ b/drivers/leds/leds-ktd2692.c
@@ -426,6 +426,7 @@ static const struct of_device_id ktd2692_match[] = {
{ .compatible = "kinetic,ktd2692", },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, ktd2692_match);
static struct platform_driver ktd2692_driver = {
.driver = {
diff --git a/drivers/leds/leds-max77693.c b/drivers/leds/leds-max77693.c
index df348a06d8c7..afbb1409b2e2 100644
--- a/drivers/leds/leds-max77693.c
+++ b/drivers/leds/leds-max77693.c
@@ -1080,6 +1080,7 @@ static const struct of_device_id max77693_led_dt_match[] = {
{ .compatible = "maxim,max77693-led" },
{},
};
+MODULE_DEVICE_TABLE(of, max77693_led_dt_match);
static struct platform_driver max77693_led_driver = {
.probe = max77693_led_probe,
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index b33514d9f427..a95a61220169 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -337,6 +337,7 @@ static const struct of_device_id of_ns2_leds_match[] = {
{ .compatible = "lacie,ns2-leds", },
{},
};
+MODULE_DEVICE_TABLE(of, of_ns2_leds_match);
#endif /* CONFIG_OF_GPIO */
struct ns2_led_priv {
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index e51de52eeb94..48b5890c28e3 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1997,7 +1997,8 @@ int bitmap_resize(struct bitmap *bitmap, sector_t blocks,
if (bitmap->mddev->bitmap_info.offset || bitmap->mddev->bitmap_info.file)
ret = bitmap_storage_alloc(&store, chunks,
!bitmap->mddev->bitmap_info.external,
- bitmap->cluster_slot);
+ mddev_is_clustered(bitmap->mddev)
+ ? bitmap->cluster_slot : 0);
if (ret)
goto err;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index d60c88df5234..4b3b6f8aff0c 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -968,7 +968,8 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
/*
* Generate a new unfragmented bio with the given size
- * This should never violate the device limitations
+ * This should never violate the device limitations (but only because
+ * max_segment_size is being constrained to PAGE_SIZE).
*
* This function may be called concurrently. If we allocate from the mempool
* concurrently, there is a possibility of deadlock. For example, if we have
@@ -2045,9 +2046,20 @@ static int crypt_iterate_devices(struct dm_target *ti,
return fn(ti, cc->dev, cc->start, ti->len, data);
}
+static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ /*
+ * Unfortunate constraint that is required to avoid the potential
+ * for exceeding underlying device's max_segments limits -- due to
+ * crypt_alloc_buffer() possibly allocating pages for the encryption
+ * bio that are not as physically contiguous as the original bio.
+ */
+ limits->max_segment_size = PAGE_SIZE;
+}
+
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 14, 0},
+ .version = {1, 14, 1},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
@@ -2058,6 +2070,7 @@ static struct target_type crypt_target = {
.resume = crypt_resume,
.message = crypt_message,
.iterate_devices = crypt_iterate_devices,
+ .io_hints = crypt_io_hints,
};
static int __init dm_crypt_init(void)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 6578b7bc1fbb..6fcbfb063366 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -4249,6 +4249,10 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
{
struct thin_c *tc = ti->private;
struct pool *pool = tc->pool;
+ struct queue_limits *pool_limits = dm_get_queue_limits(pool->pool_md);
+
+ if (!pool_limits->discard_granularity)
+ return; /* pool's discard support is disabled */
limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4f5ecbe94ccb..c702de18207a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5409,9 +5409,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
* which will now never happen */
wake_up_process(mddev->sync_thread->tsk);
+ if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags))
+ return -EBUSY;
mddev_unlock(mddev);
wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING,
&mddev->recovery));
+ wait_event(mddev->sb_wait,
+ !test_bit(MD_CHANGE_PENDING, &mddev->flags));
mddev_lock_nointr(mddev);
mutex_lock(&mddev->open_mutex);
@@ -8160,6 +8164,7 @@ void md_check_recovery(struct mddev *mddev)
md_reap_sync_thread(mddev);
clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ clear_bit(MD_CHANGE_PENDING, &mddev->flags);
goto unlock;
}
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index d222522c52e0..d132f06afdd1 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -470,8 +470,7 @@ static int multipath_run (struct mddev *mddev)
return 0;
out_free_conf:
- if (conf->pool)
- mempool_destroy(conf->pool);
+ mempool_destroy(conf->pool);
kfree(conf->multipaths);
kfree(conf);
mddev->private = NULL;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 63e619b2f44e..f8e5db0cb5aa 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -376,12 +376,6 @@ static int raid0_run(struct mddev *mddev)
struct md_rdev *rdev;
bool discard_supported = false;
- rdev_for_each(rdev, mddev) {
- disk_stack_limits(mddev->gendisk, rdev->bdev,
- rdev->data_offset << 9);
- if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
- discard_supported = true;
- }
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_write_same_sectors(mddev->queue, mddev->chunk_sectors);
blk_queue_max_discard_sectors(mddev->queue, mddev->chunk_sectors);
@@ -390,6 +384,12 @@ static int raid0_run(struct mddev *mddev)
blk_queue_io_opt(mddev->queue,
(mddev->chunk_sectors << 9) * mddev->raid_disks);
+ rdev_for_each(rdev, mddev) {
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
+ discard_supported = true;
+ }
if (!discard_supported)
queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
else
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4517f06c41ba..049df6c4a8cc 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -881,8 +881,7 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
}
if (bio && bio_data_dir(bio) == WRITE) {
- if (bio->bi_iter.bi_sector >=
- conf->mddev->curr_resync_completed) {
+ if (bio->bi_iter.bi_sector >= conf->next_resync) {
if (conf->start_next_window == MaxSector)
conf->start_next_window =
conf->next_resync +
@@ -1516,7 +1515,7 @@ static void close_sync(struct r1conf *conf)
conf->r1buf_pool = NULL;
spin_lock_irq(&conf->resync_lock);
- conf->next_resync = 0;
+ conf->next_resync = MaxSector - 2 * NEXT_NORMALIO_DISTANCE;
conf->start_next_window = MaxSector;
conf->current_window_requests +=
conf->next_window_requests;
@@ -2843,8 +2842,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
abort:
if (conf) {
- if (conf->r1bio_pool)
- mempool_destroy(conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
kfree(conf->poolinfo);
@@ -2946,8 +2944,7 @@ static void raid1_free(struct mddev *mddev, void *priv)
{
struct r1conf *conf = priv;
- if (conf->r1bio_pool)
- mempool_destroy(conf->r1bio_pool);
+ mempool_destroy(conf->r1bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
kfree(conf->poolinfo);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 0fc33eb88855..7c99a4037715 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3486,8 +3486,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
mdname(mddev));
if (conf) {
- if (conf->r10bio_pool)
- mempool_destroy(conf->r10bio_pool);
+ mempool_destroy(conf->r10bio_pool);
kfree(conf->mirrors);
safe_put_page(conf->tmppage);
kfree(conf);
@@ -3682,8 +3681,7 @@ static int run(struct mddev *mddev)
out_free_conf:
md_unregister_thread(&mddev->thread);
- if (conf->r10bio_pool)
- mempool_destroy(conf->r10bio_pool);
+ mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf);
@@ -3696,8 +3694,7 @@ static void raid10_free(struct mddev *mddev, void *priv)
{
struct r10conf *conf = priv;
- if (conf->r10bio_pool)
- mempool_destroy(conf->r10bio_pool);
+ mempool_destroy(conf->r10bio_pool);
safe_put_page(conf->tmppage);
kfree(conf->mirrors);
kfree(conf->mirrors_old);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 15ef2c641b2b..49bb8d3ff9be 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2271,8 +2271,7 @@ static void shrink_stripes(struct r5conf *conf)
drop_one_stripe(conf))
;
- if (conf->slab_cache)
- kmem_cache_destroy(conf->slab_cache);
+ kmem_cache_destroy(conf->slab_cache);
conf->slab_cache = NULL;
}
@@ -3150,6 +3149,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
spin_unlock_irq(&sh->stripe_lock);
if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
wake_up(&conf->wait_for_overlap);
+ if (bi)
+ s->to_read--;
while (bi && bi->bi_iter.bi_sector <
sh->dev[i].sector + STRIPE_SECTORS) {
struct bio *nextbi =
@@ -3169,6 +3170,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
*/
clear_bit(R5_LOCKED, &sh->dev[i].flags);
}
+ s->to_write = 0;
+ s->written = 0;
if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -3300,7 +3303,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
*/
return 0;
- for (i = 0; i < s->failed; i++) {
+ for (i = 0; i < s->failed && i < 2; i++) {
if (fdev[i]->towrite &&
!test_bit(R5_UPTODATE, &fdev[i]->flags) &&
!test_bit(R5_OVERWRITE, &fdev[i]->flags))
@@ -3324,7 +3327,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
sh->sector < sh->raid_conf->mddev->recovery_cp)
/* reconstruct-write isn't being forced */
return 0;
- for (i = 0; i < s->failed; i++) {
+ for (i = 0; i < s->failed && i < 2; i++) {
if (s->failed_num[i] != sh->pd_idx &&
s->failed_num[i] != sh->qd_idx &&
!test_bit(R5_UPTODATE, &fdev[i]->flags) &&
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 4b54128bc78e..a726f01e3b02 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -138,7 +138,7 @@ static void asic3_irq_flip_edge(struct asic3 *asic,
spin_unlock_irqrestore(&asic->lock, flags);
}
-static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void asic3_irq_demux(struct irq_desc *desc)
{
struct asic3 *asic = irq_desc_get_handler_data(desc);
struct irq_data *data = irq_desc_get_irq_data(desc);
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index a76eb6ef47a0..b279205659a4 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -205,7 +205,7 @@ static void pcap_isr_work(struct work_struct *work)
} while (gpio_get_value(pdata->gpio));
}
-static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void pcap_irq_handler(struct irq_desc *desc)
{
struct pcap_chip *pcap = irq_desc_get_handler_data(desc);
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index 9131cdcdc64a..6ccaf90d98fd 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -98,7 +98,7 @@ static struct irq_chip egpio_muxed_chip = {
.irq_unmask = egpio_unmask,
};
-static void egpio_handler(unsigned int irq, struct irq_desc *desc)
+static void egpio_handler(struct irq_desc *desc)
{
struct egpio_info *ei = irq_desc_get_handler_data(desc);
int irqpin;
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index 5bb49f08955d..798e44306382 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -65,7 +65,7 @@ struct jz4740_adc {
spinlock_t lock;
};
-static void jz4740_adc_irq_demux(unsigned int irq, struct irq_desc *desc)
+static void jz4740_adc_irq_demux(struct irq_desc *desc)
{
struct irq_chip_generic *gc = irq_desc_get_handler_data(desc);
uint8_t status;
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c
index 59502d02cd15..1b7ec0870c2a 100644
--- a/drivers/mfd/pm8921-core.c
+++ b/drivers/mfd/pm8921-core.c
@@ -156,7 +156,7 @@ static int pm8xxx_irq_master_handler(struct pm_irq_chip *chip, int master)
return ret;
}
-static void pm8xxx_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void pm8xxx_irq_handler(struct irq_desc *desc)
{
struct pm_irq_chip *chip = irq_desc_get_handler_data(desc);
struct irq_chip *irq_chip = irq_desc_get_chip(desc);
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 16fc1adc4fa3..94bd89cb1f06 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -185,7 +185,7 @@ static struct mfd_cell t7l66xb_cells[] = {
/*--------------------------------------------------------------------------*/
/* Handle the T7L66XB interrupt mux */
-static void t7l66xb_irq(unsigned int irq, struct irq_desc *desc)
+static void t7l66xb_irq(struct irq_desc *desc)
{
struct t7l66xb *t7l66xb = irq_desc_get_handler_data(desc);
unsigned int isr;
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c
index 775b9aca871a..8c84a513016b 100644
--- a/drivers/mfd/tc6393xb.c
+++ b/drivers/mfd/tc6393xb.c
@@ -522,8 +522,7 @@ static int tc6393xb_register_gpio(struct tc6393xb *tc6393xb, int gpio_base)
/*--------------------------------------------------------------------------*/
-static void
-tc6393xb_irq(unsigned int irq, struct irq_desc *desc)
+static void tc6393xb_irq(struct irq_desc *desc)
{
struct tc6393xb *tc6393xb = irq_desc_get_handler_data(desc);
unsigned int isr;
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index 9a2302129711..f691d7ecad52 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -282,7 +282,7 @@ void ucb1x00_adc_disable(struct ucb1x00 *ucb)
* SIBCLK to talk to the chip. We leave the clock running until
* we have finished processing all interrupts from the chip.
*/
-static void ucb1x00_irq(unsigned int __irq, struct irq_desc *desc)
+static void ucb1x00_irq(struct irq_desc *desc)
{
struct ucb1x00 *ucb = irq_desc_get_handler_data(desc);
unsigned int isr, i;
diff --git a/drivers/misc/cxl/Makefile b/drivers/misc/cxl/Makefile
index 6f484dfe78f9..6982f603fadc 100644
--- a/drivers/misc/cxl/Makefile
+++ b/drivers/misc/cxl/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Werror
+ccflags-y := -Werror -Wno-unused-const-variable
cxl-y += main.o file.o irq.o fault.o native.o
cxl-y += context.o sysfs.o debugfs.o pci.o trace.o
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 02c85160bfe9..a5e977192b61 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1249,8 +1249,6 @@ static int cxl_probe(struct pci_dev *dev, const struct pci_device_id *id)
int slice;
int rc;
- pci_dev_get(dev);
-
if (cxl_verbose)
dump_cxl_config_space(dev);
diff --git a/drivers/misc/cxl/sysfs.c b/drivers/misc/cxl/sysfs.c
index 25868c2ec03e..02006f7109a8 100644
--- a/drivers/misc/cxl/sysfs.c
+++ b/drivers/misc/cxl/sysfs.c
@@ -592,6 +592,8 @@ int cxl_sysfs_afu_add(struct cxl_afu *afu)
/* conditionally create the add the binary file for error info buffer */
if (afu->eb_len) {
+ sysfs_attr_init(&afu->attr_eb.attr);
+
afu->attr_eb.attr.name = "afu_err_buff";
afu->attr_eb.attr.mode = S_IRUGO;
afu->attr_eb.size = afu->eb_len;
diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
index 6dd16a6d153f..94b520896b18 100644
--- a/drivers/misc/cxl/vphb.c
+++ b/drivers/misc/cxl/vphb.c
@@ -48,6 +48,12 @@ static bool cxl_pci_enable_device_hook(struct pci_dev *dev)
phb = pci_bus_to_host(dev->bus);
afu = (struct cxl_afu *)phb->private_data;
+
+ if (!cxl_adapter_link_ok(afu->adapter)) {
+ dev_warn(&dev->dev, "%s: Device link is down, refusing to enable AFU\n", __func__);
+ return false;
+ }
+
set_dma_ops(&dev->dev, &dma_direct_ops);
set_dma_offset(&dev->dev, PAGE_OFFSET);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index 4b469cf9e60f..8504dbeacd3b 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -204,6 +204,8 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
if (!dir)
return -ENOMEM;
+ dev->dbgfs_dir = dir;
+
f = debugfs_create_file("meclients", S_IRUSR, dir,
dev, &mei_dbgfs_fops_meclients);
if (!f) {
@@ -228,7 +230,6 @@ int mei_dbgfs_register(struct mei_device *dev, const char *name)
dev_err(dev->dev, "allow_fixed_address: registration failed\n");
goto err;
}
- dev->dbgfs_dir = dir;
return 0;
err:
mei_dbgfs_deregister(dev);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0520064dc33b..a3eb20bdcd97 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -134,9 +134,11 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
int err = cmd->error;
/* Flag re-tuning needed on CRC errors */
- if (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
+ if ((cmd->opcode != MMC_SEND_TUNING_BLOCK &&
+ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200) &&
+ (err == -EILSEQ || (mrq->sbc && mrq->sbc->error == -EILSEQ) ||
(mrq->data && mrq->data->error == -EILSEQ) ||
- (mrq->stop && mrq->stop->error == -EILSEQ))
+ (mrq->stop && mrq->stop->error == -EILSEQ)))
mmc_retune_needed(host);
if (err && cmd->retries && mmc_host_is_spi(host)) {
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index abd933b7029b..5466f25f0281 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -457,7 +457,7 @@ int mmc_of_parse(struct mmc_host *host)
0, &cd_gpio_invert);
if (!ret)
dev_info(host->parent, "Got CD GPIO\n");
- else if (ret != -ENOENT)
+ else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
/*
@@ -481,7 +481,7 @@ int mmc_of_parse(struct mmc_host *host)
ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
if (!ret)
dev_info(host->parent, "Got WP GPIO\n");
- else if (ret != -ENOENT)
+ else if (ret != -ENOENT && ret != -ENOSYS)
return ret;
if (of_property_read_bool(np, "disable-wp"))
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 1420f29628c7..8cadd74e8407 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -28,6 +28,7 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/io.h>
#include <linux/regulator/consumer.h>
#include <linux/gpio.h>
@@ -454,12 +455,8 @@ static int pxamci_get_ro(struct mmc_host *mmc)
{
struct pxamci_host *host = mmc_priv(mmc);
- if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
- if (host->pdata->gpio_card_ro_invert)
- return !gpio_get_value(host->pdata->gpio_card_ro);
- else
- return gpio_get_value(host->pdata->gpio_card_ro);
- }
+ if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro))
+ return mmc_gpio_get_ro(mmc);
if (host->pdata && host->pdata->get_ro)
return !!host->pdata->get_ro(mmc_dev(mmc));
/*
@@ -551,6 +548,7 @@ static void pxamci_enable_sdio_irq(struct mmc_host *host, int enable)
static const struct mmc_host_ops pxamci_ops = {
.request = pxamci_request,
+ .get_cd = mmc_gpio_get_cd,
.get_ro = pxamci_get_ro,
.set_ios = pxamci_set_ios,
.enable_sdio_irq = pxamci_enable_sdio_irq,
@@ -790,37 +788,31 @@ static int pxamci_probe(struct platform_device *pdev)
gpio_power = host->pdata->gpio_power;
}
if (gpio_is_valid(gpio_power)) {
- ret = gpio_request(gpio_power, "mmc card power");
+ ret = devm_gpio_request(&pdev->dev, gpio_power,
+ "mmc card power");
if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
+ dev_err(&pdev->dev, "Failed requesting gpio_power %d\n",
+ gpio_power);
goto out;
}
gpio_direction_output(gpio_power,
host->pdata->gpio_power_invert);
}
- if (gpio_is_valid(gpio_ro)) {
- ret = gpio_request(gpio_ro, "mmc card read only");
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
- goto err_gpio_ro;
- }
- gpio_direction_input(gpio_ro);
+ if (gpio_is_valid(gpio_ro))
+ ret = mmc_gpio_request_ro(mmc, gpio_ro);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
+ goto out;
+ } else {
+ mmc->caps |= host->pdata->gpio_card_ro_invert ?
+ MMC_CAP2_RO_ACTIVE_HIGH : 0;
}
- if (gpio_is_valid(gpio_cd)) {
- ret = gpio_request(gpio_cd, "mmc card detect");
- if (ret) {
- dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
- goto err_gpio_cd;
- }
- gpio_direction_input(gpio_cd);
- ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- "mmc card detect", mmc);
- if (ret) {
- dev_err(&pdev->dev, "failed to request card detect IRQ\n");
- goto err_request_irq;
- }
+ if (gpio_is_valid(gpio_cd))
+ ret = mmc_gpio_request_cd(mmc, gpio_cd, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_cd);
+ goto out;
}
if (host->pdata && host->pdata->init)
@@ -835,13 +827,7 @@ static int pxamci_probe(struct platform_device *pdev)
return 0;
-err_request_irq:
- gpio_free(gpio_cd);
-err_gpio_cd:
- gpio_free(gpio_ro);
-err_gpio_ro:
- gpio_free(gpio_power);
- out:
+out:
if (host) {
if (host->dma_chan_rx)
dma_release_channel(host->dma_chan_rx);
@@ -873,14 +859,6 @@ static int pxamci_remove(struct platform_device *pdev)
gpio_ro = host->pdata->gpio_card_ro;
gpio_power = host->pdata->gpio_power;
}
- if (gpio_is_valid(gpio_cd)) {
- free_irq(gpio_to_irq(gpio_cd), mmc);
- gpio_free(gpio_cd);
- }
- if (gpio_is_valid(gpio_ro))
- gpio_free(gpio_ro);
- if (gpio_is_valid(gpio_power))
- gpio_free(gpio_power);
if (host->vcc)
regulator_put(host->vcc);
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index a7b7a6771598..b981b8552e43 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -210,6 +210,16 @@
#define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */
#define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */
+#define SDXC_CLK_400K 0
+#define SDXC_CLK_25M 1
+#define SDXC_CLK_50M 2
+#define SDXC_CLK_50M_DDR 3
+
+struct sunxi_mmc_clk_delay {
+ u32 output;
+ u32 sample;
+};
+
struct sunxi_idma_des {
u32 config;
u32 buf_size;
@@ -229,6 +239,7 @@ struct sunxi_mmc_host {
struct clk *clk_mmc;
struct clk *clk_sample;
struct clk *clk_output;
+ const struct sunxi_mmc_clk_delay *clk_delays;
/* irq */
spinlock_t lock;
@@ -654,25 +665,19 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
/* determine delays */
if (rate <= 400000) {
- oclk_dly = 180;
- sclk_dly = 42;
+ oclk_dly = host->clk_delays[SDXC_CLK_400K].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_400K].sample;
} else if (rate <= 25000000) {
- oclk_dly = 180;
- sclk_dly = 75;
+ oclk_dly = host->clk_delays[SDXC_CLK_25M].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_25M].sample;
} else if (rate <= 50000000) {
if (ios->timing == MMC_TIMING_UHS_DDR50) {
- oclk_dly = 60;
- sclk_dly = 120;
+ oclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_50M_DDR].sample;
} else {
- oclk_dly = 90;
- sclk_dly = 150;
+ oclk_dly = host->clk_delays[SDXC_CLK_50M].output;
+ sclk_dly = host->clk_delays[SDXC_CLK_50M].sample;
}
- } else if (rate <= 100000000) {
- oclk_dly = 6;
- sclk_dly = 24;
- } else if (rate <= 200000000) {
- oclk_dly = 3;
- sclk_dly = 12;
} else {
return -EINVAL;
}
@@ -871,6 +876,7 @@ static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq)
static const struct of_device_id sunxi_mmc_of_match[] = {
{ .compatible = "allwinner,sun4i-a10-mmc", },
{ .compatible = "allwinner,sun5i-a13-mmc", },
+ { .compatible = "allwinner,sun9i-a80-mmc", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match);
@@ -884,6 +890,20 @@ static struct mmc_host_ops sunxi_mmc_ops = {
.hw_reset = sunxi_mmc_hw_reset,
};
+static const struct sunxi_mmc_clk_delay sunxi_mmc_clk_delays[] = {
+ [SDXC_CLK_400K] = { .output = 180, .sample = 180 },
+ [SDXC_CLK_25M] = { .output = 180, .sample = 75 },
+ [SDXC_CLK_50M] = { .output = 90, .sample = 120 },
+ [SDXC_CLK_50M_DDR] = { .output = 60, .sample = 120 },
+};
+
+static const struct sunxi_mmc_clk_delay sun9i_mmc_clk_delays[] = {
+ [SDXC_CLK_400K] = { .output = 180, .sample = 180 },
+ [SDXC_CLK_25M] = { .output = 180, .sample = 75 },
+ [SDXC_CLK_50M] = { .output = 150, .sample = 120 },
+ [SDXC_CLK_50M_DDR] = { .output = 90, .sample = 120 },
+};
+
static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
struct platform_device *pdev)
{
@@ -895,6 +915,11 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
else
host->idma_des_size_bits = 16;
+ if (of_device_is_compatible(np, "allwinner,sun9i-a80-mmc"))
+ host->clk_delays = sun9i_mmc_clk_delays;
+ else
+ host->clk_delays = sunxi_mmc_clk_delays;
+
ret = mmc_regulator_get_supply(host->mmc);
if (ret) {
if (ret != -EPROBE_DEFER)
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 5bbd1f094f4e..1fc23e48fe8e 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -926,6 +926,11 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
goto bad;
}
+ if (data_size > ubi->leb_size) {
+ ubi_err(ubi, "bad data_size");
+ goto bad;
+ }
+
if (vol_type == UBI_VID_STATIC) {
/*
* Although from high-level point of view static volumes may
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 80bdd5b88bac..d85c19762160 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -649,6 +649,7 @@ static int init_volumes(struct ubi_device *ubi,
if (ubi->corr_peb_count)
ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
+ return -ENOSPC;
}
ubi->rsvd_pebs += reserved_pebs;
ubi->avail_pebs -= reserved_pebs;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 275d9fb6fe5c..eb4489f9082f 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1601,6 +1601,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (ubi->corr_peb_count)
ubi_err(ubi, "%d PEBs are corrupted and not used",
ubi->corr_peb_count);
+ err = -ENOSPC;
goto out_free;
}
ubi->avail_pebs -= reserved_pebs;
diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
index 10f71c732b59..816d0e94961c 100644
--- a/drivers/net/arcnet/arcnet.c
+++ b/drivers/net/arcnet/arcnet.c
@@ -326,7 +326,7 @@ static void arcdev_setup(struct net_device *dev)
dev->type = ARPHRD_ARCNET;
dev->netdev_ops = &arcnet_netdev_ops;
dev->header_ops = &arcnet_header_ops;
- dev->hard_header_len = sizeof(struct archdr);
+ dev->hard_header_len = sizeof(struct arc_hardware);
dev->mtu = choose_mtu();
dev->addr_len = ARCNET_ALEN;
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index 6f13f7206762..1f7dd927cc5e 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -2000,6 +2000,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
*/
reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
+ reg &= ~PORT_PCS_CTRL_UNFORCED;
reg |= PORT_PCS_CTRL_FORCE_LINK |
PORT_PCS_CTRL_LINK_UP |
PORT_PCS_CTRL_DUPLEX_FULL |
@@ -2050,6 +2051,8 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
else
reg |= PORT_CONTROL_FRAME_MODE_DSA;
+ reg |= PORT_CONTROL_FORWARD_UNKNOWN |
+ PORT_CONTROL_FORWARD_UNKNOWN_MC;
}
if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index cfa37041ab71..c4bb8027b3fb 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -689,16 +689,24 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
netdev_dbg(ndev, "No phy-handle found in DT\n");
return -ENODEV;
}
- pdata->phy_dev = of_phy_find_device(phy_np);
- }
- phy_dev = pdata->phy_dev;
+ phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
+ 0, pdata->phy_mode);
+ if (!phy_dev) {
+ netdev_err(ndev, "Could not connect to PHY\n");
+ return -ENODEV;
+ }
+
+ pdata->phy_dev = phy_dev;
+ } else {
+ phy_dev = pdata->phy_dev;
- if (!phy_dev ||
- phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
- pdata->phy_mode)) {
- netdev_err(ndev, "Could not connect to PHY\n");
- return -ENODEV;
+ if (!phy_dev ||
+ phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
+ pdata->phy_mode)) {
+ netdev_err(ndev, "Could not connect to PHY\n");
+ return -ENODEV;
+ }
}
pdata->phy_speed = SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c
index f9cb99bfb511..ffd180570920 100644
--- a/drivers/net/ethernet/arc/emac_arc.c
+++ b/drivers/net/ethernet/arc/emac_arc.c
@@ -78,6 +78,7 @@ static const struct of_device_id emac_arc_dt_ids[] = {
{ .compatible = "snps,arc-emac" },
{ /* Sentinel */ }
};
+MODULE_DEVICE_TABLE(of, emac_arc_dt_ids);
static struct platform_driver emac_arc_driver = {
.probe = emac_arc_probe,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index b9a5a97ed4dd..f1b5364f3521 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -2079,6 +2079,7 @@ static const struct of_device_id bcm_sysport_of_match[] = {
{ .compatible = "brcm,systemport" },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
static struct platform_driver bcm_sysport_driver = {
.probe = bcm_sysport_probe,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index ba936635322a..b5e64b02200c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1946,6 +1946,7 @@ struct bnx2x {
u16 vlan_cnt;
u16 vlan_credit;
u16 vxlan_dst_port;
+ u8 vxlan_dst_port_count;
bool accept_any_vlan;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index e3da2bddf143..f1d62d5dbaff 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3705,16 +3705,14 @@ out:
void bnx2x_update_mfw_dump(struct bnx2x *bp)
{
- struct timeval epoc;
u32 drv_ver;
u32 valid_dump;
if (!SHMEM2_HAS(bp, drv_info))
return;
- /* Update Driver load time */
- do_gettimeofday(&epoc);
- SHMEM2_WR(bp, drv_info.epoc, epoc.tv_sec);
+ /* Update Driver load time, possibly broken in y2038 */
+ SHMEM2_WR(bp, drv_info.epoc, (u32)ktime_get_real_seconds());
drv_ver = bnx2x_update_mng_version_utility(DRV_MODULE_VERSION, true);
SHMEM2_WR(bp, drv_info.drv_ver, drv_ver);
@@ -10110,12 +10108,18 @@ static void __bnx2x_add_vxlan_port(struct bnx2x *bp, u16 port)
if (!netif_running(bp->dev))
return;
- if (bp->vxlan_dst_port || !IS_PF(bp)) {
+ if (bp->vxlan_dst_port_count && bp->vxlan_dst_port == port) {
+ bp->vxlan_dst_port_count++;
+ return;
+ }
+
+ if (bp->vxlan_dst_port_count || !IS_PF(bp)) {
DP(BNX2X_MSG_SP, "Vxlan destination port limit reached\n");
return;
}
bp->vxlan_dst_port = port;
+ bp->vxlan_dst_port_count = 1;
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_ADD_VXLAN_PORT, 0);
}
@@ -10130,10 +10134,14 @@ static void bnx2x_add_vxlan_port(struct net_device *netdev,
static void __bnx2x_del_vxlan_port(struct bnx2x *bp, u16 port)
{
- if (!bp->vxlan_dst_port || bp->vxlan_dst_port != port || !IS_PF(bp)) {
+ if (!bp->vxlan_dst_port_count || bp->vxlan_dst_port != port ||
+ !IS_PF(bp)) {
DP(BNX2X_MSG_SP, "Invalid vxlan port\n");
return;
}
+ bp->vxlan_dst_port--;
+ if (bp->vxlan_dst_port)
+ return;
if (netif_running(bp->dev)) {
bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_DEL_VXLAN_PORT, 0);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index c9bd7f16018e..ff702a707a91 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -4319,8 +4319,16 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
/* RSS keys */
if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
- memcpy(&data->rss_key[0], &p->rss_key[0],
- sizeof(data->rss_key));
+ u8 *dst = (u8 *)(data->rss_key) + sizeof(data->rss_key);
+ const u8 *src = (const u8 *)p->rss_key;
+ int i;
+
+ /* Apparently, bnx2x reads this array in reverse order
+ * We need to byte swap rss_key to comply with Toeplitz specs.
+ */
+ for (i = 0; i < sizeof(data->rss_key); i++)
+ *--dst = *src++;
+
caps |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fadbd0088d3e..3bc701e4c59e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3155,6 +3155,7 @@ static const struct of_device_id bcmgenet_match[] = {
{ .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
{ },
};
+MODULE_DEVICE_TABLE(of, bcmgenet_match);
static int bcmgenet_probe(struct platform_device *pdev)
{
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b7a0f7879de2..9e59663a6ead 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1543,7 +1543,7 @@ bfa_flash_cmd_act_check(void __iomem *pci_bar)
}
/* Flush FLI data fifo. */
-static u32
+static int
bfa_flash_fifo_flush(void __iomem *pci_bar)
{
u32 i;
@@ -1573,11 +1573,11 @@ bfa_flash_fifo_flush(void __iomem *pci_bar)
}
/* Read flash status. */
-static u32
+static int
bfa_flash_status_read(void __iomem *pci_bar)
{
union bfa_flash_dev_status_reg dev_status;
- u32 status;
+ int status;
u32 ret_status;
int i;
@@ -1611,11 +1611,11 @@ bfa_flash_status_read(void __iomem *pci_bar)
}
/* Start flash read operation. */
-static u32
+static int
bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
char *buf)
{
- u32 status;
+ int status;
/* len must be mutiple of 4 and not exceeding fifo size */
if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
@@ -1703,7 +1703,8 @@ static enum bfa_status
bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
u32 len)
{
- u32 n, status;
+ u32 n;
+ int status;
u32 off, l, s, residue, fifo_sz;
residue = len;
diff --git a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
index 5d0753cc7e73..04b0d16b210e 100644
--- a/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
+++ b/drivers/net/ethernet/brocade/bna/bna_tx_rx.c
@@ -2400,6 +2400,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
q0->rcb->id = 0;
q0->rx_packets = q0->rx_bytes = 0;
q0->rx_packets_with_error = q0->rxbuf_alloc_failed = 0;
+ q0->rxbuf_map_failed = 0;
bna_rxq_qpt_setup(q0, rxp, dpage_count, PAGE_SIZE,
&dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[i]);
@@ -2428,6 +2429,7 @@ bna_rx_create(struct bna *bna, struct bnad *bnad,
: rx_cfg->q1_buf_size;
q1->rx_packets = q1->rx_bytes = 0;
q1->rx_packets_with_error = q1->rxbuf_alloc_failed = 0;
+ q1->rxbuf_map_failed = 0;
bna_rxq_qpt_setup(q1, rxp, hpage_count, PAGE_SIZE,
&hqpt_mem[i], &hsqpt_mem[i],
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index e0e797f2ea14..c438d032e8bf 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -587,6 +587,7 @@ struct bna_rxq {
u64 rx_bytes;
u64 rx_packets_with_error;
u64 rxbuf_alloc_failed;
+ u64 rxbuf_map_failed;
};
/* RxQ pair */
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 506047c38607..21a0cfc3e7ec 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -399,7 +399,13 @@ bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
}
dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
- unmap_q->map_size, DMA_FROM_DEVICE);
+ unmap_q->map_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
+ put_page(page);
+ BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
+ rcb->rxq->rxbuf_map_failed++;
+ goto finishing;
+ }
unmap->page = page;
unmap->page_offset = page_offset;
@@ -454,8 +460,15 @@ bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
rcb->rxq->rxbuf_alloc_failed++;
goto finishing;
}
+
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
buff_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
+ rcb->rxq->rxbuf_map_failed++;
+ goto finishing;
+ }
unmap->skb = skb;
dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
@@ -3025,6 +3038,11 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
unmap = head_unmap;
dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
+ dev_kfree_skb_any(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
+ return NETDEV_TX_OK;
+ }
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
txqent->vector[0].length = htons(len);
dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
@@ -3056,6 +3074,15 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
0, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
+ /* Undo the changes starting at tcb->producer_index */
+ bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
+ tcb->producer_index);
+ dev_kfree_skb_any(skb);
+ BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
+ return NETDEV_TX_OK;
+ }
+
dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
txqent->vector[vect_id].length = htons(size);
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index faedbf24777e..f4ed816b93ee 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -175,6 +175,7 @@ struct bnad_drv_stats {
u64 tx_skb_headlen_zero;
u64 tx_skb_frag_zero;
u64 tx_skb_len_mismatch;
+ u64 tx_skb_map_failed;
u64 hw_stats_updates;
u64 netif_rx_dropped;
@@ -189,6 +190,7 @@ struct bnad_drv_stats {
u64 rx_unmap_q_alloc_failed;
u64 rxbuf_alloc_failed;
+ u64 rxbuf_map_failed;
};
/* Complete driver stats */
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 2bdfc5dff4b1..0e4fdc3dd729 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -90,6 +90,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tx_skb_headlen_zero",
"tx_skb_frag_zero",
"tx_skb_len_mismatch",
+ "tx_skb_map_failed",
"hw_stats_updates",
"netif_rx_dropped",
@@ -102,6 +103,7 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tx_unmap_q_alloc_failed",
"rx_unmap_q_alloc_failed",
"rxbuf_alloc_failed",
+ "rxbuf_map_failed",
"mac_stats_clr_cnt",
"mac_frame_64",
@@ -807,6 +809,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
rx_packets_with_error;
buf[bi++] = rcb->rxq->
rxbuf_alloc_failed;
+ buf[bi++] = rcb->rxq->rxbuf_map_failed;
buf[bi++] = rcb->producer_index;
buf[bi++] = rcb->consumer_index;
}
@@ -821,6 +824,7 @@ bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
rx_packets_with_error;
buf[bi++] = rcb->rxq->
rxbuf_alloc_failed;
+ buf[bi++] = rcb->rxq->rxbuf_map_failed;
buf[bi++] = rcb->producer_index;
buf[bi++] = rcb->consumer_index;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 8353a6cbfcc2..03ed00c49823 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -157,6 +157,11 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5090), /* Custom T540-CR */
CH_PCI_ID_TABLE_FENTRY(0x5091), /* Custom T522-CR */
CH_PCI_ID_TABLE_FENTRY(0x5092), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5093), /* Custom T580-LP-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5094), /* Custom T540-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5095), /* Custom T540-CR-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5096), /* Custom T580-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x5097), /* Custom T520-KR */
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 0a27805cbbbd..821540913343 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -582,6 +582,7 @@ struct be_adapter {
u16 pvid;
__be16 vxlan_port;
int vxlan_port_count;
+ int vxlan_port_aliases;
struct phy_info phy;
u8 wol_cap;
bool wol_en;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 12687bf52b95..7bf51a1a0a77 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5176,6 +5176,11 @@ static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
+ if (adapter->vxlan_port == port && adapter->vxlan_port_count) {
+ adapter->vxlan_port_aliases++;
+ return;
+ }
+
if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
dev_info(dev,
"Only one UDP port supported for VxLAN offloads\n");
@@ -5226,6 +5231,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
if (adapter->vxlan_port != port)
goto done;
+ if (adapter->vxlan_port_aliases) {
+ adapter->vxlan_port_aliases--;
+ return;
+ }
+
be_disable_vxlan_offloads(adapter);
dev_info(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 4b69d061d90f..710715fcb23d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1710,8 +1710,10 @@ static void gfar_configure_serdes(struct net_device *dev)
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
*/
- if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS)
+ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
+ put_device(&tbiphy->dev);
return;
+ }
/* Single clk mode, mii mode off(for serdes communication) */
phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
@@ -1723,6 +1725,8 @@ static void gfar_configure_serdes(struct net_device *dev)
phy_write(tbiphy, MII_BMCR,
BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
BMCR_SPEED1000);
+
+ put_device(&tbiphy->dev);
}
static int __gfar_is_rx_idle(struct gfar_private *priv)
@@ -1970,8 +1974,7 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
/* Install our interrupt handlers for Error,
* Transmit, and Receive
*/
- err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
- IRQF_NO_SUSPEND,
+ err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
gfar_irq(grp, ER)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -1979,6 +1982,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
goto err_irq_fail;
}
+ enable_irq_wake(gfar_irq(grp, ER)->irq);
+
err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
gfar_irq(grp, TX)->name, grp);
if (err < 0) {
@@ -1994,14 +1999,14 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
goto rx_irq_fail;
}
} else {
- err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
- IRQF_NO_SUSPEND,
+ err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
gfar_irq(grp, TX)->name, grp);
if (err < 0) {
netif_err(priv, intr, dev, "Can't get IRQ %d\n",
gfar_irq(grp, TX)->irq);
goto err_irq_fail;
}
+ enable_irq_wake(gfar_irq(grp, TX)->irq);
}
return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 8e3cd77aa347..664d0c261269 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -557,6 +557,7 @@ static const struct of_device_id match_table[] = {
{ .compatible = "fsl,etsec-ptp" },
{},
};
+MODULE_DEVICE_TABLE(of, match_table);
static struct platform_driver gianfar_ptp_driver = {
.driver = {
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 4dd40e057f40..650f7888e32b 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1384,6 +1384,8 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
value = phy_read(tbiphy, ENET_TBI_MII_CR);
value &= ~0x1000; /* Turn off autonegotiation */
phy_write(tbiphy, ENET_TBI_MII_CR, value);
+
+ put_device(&tbiphy->dev);
}
init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
@@ -1702,8 +1704,10 @@ static void uec_configure_serdes(struct net_device *dev)
* everything for us? Resetting it takes the link down and requires
* several seconds for it to come back.
*/
- if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS)
+ if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) {
+ put_device(&tbiphy->dev);
return;
+ }
/* Single clk mode, mii mode off(for serdes communication) */
phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS);
@@ -1711,6 +1715,8 @@ static void uec_configure_serdes(struct net_device *dev)
phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS);
+
+ put_device(&tbiphy->dev);
}
/* Configure the PHY for dev.
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index cc2d8b4b18e3..253f8ed0537a 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -816,7 +816,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
struct net_device *ndev;
struct hip04_priv *priv;
struct resource *res;
- unsigned int irq;
+ int irq;
int ret;
ndev = alloc_etherdev(sizeof(struct hip04_priv));
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 28df37420da9..ac02c675c59c 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr {
u32 index;
};
-#define EMAC_ETHTOOL_REGS_VER 0
-#define EMAC4_ETHTOOL_REGS_VER 1
-#define EMAC4SYNC_ETHTOOL_REGS_VER 2
+#define EMAC_ETHTOOL_REGS_VER 3
+#define EMAC4_ETHTOOL_REGS_VER 4
+#define EMAC4SYNC_ETHTOOL_REGS_VER 5
#endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index 3e0d20037675..62488a67149d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -946,6 +946,13 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
+ if (hw->aq.arq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = I40E_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
/* set next_to_use to head */
ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
if (ntu == ntc) {
@@ -1007,6 +1014,8 @@ clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
mutex_unlock(&hw->aq.arq_mutex);
if (i40e_is_nvm_update_op(&e->desc)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 851c1a159be8..2fdf978ae6a5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2672,7 +2672,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
- rx_ctx.showiv = 1;
+ /* this controls whether VLAN is stripped from inner headers */
+ rx_ctx.showiv = 0;
#ifdef I40E_FCOE
rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
#endif
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index f08450b90774..929d47152bf2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -887,6 +887,13 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
/* take the lock before we start messing with the ring */
mutex_lock(&hw->aq.arq_mutex);
+ if (hw->aq.arq.count == 0) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = I40E_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
/* set next_to_use to head */
ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
if (ntu == ntc) {
@@ -948,6 +955,8 @@ clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
mutex_unlock(&hw->aq.arq_mutex);
return ret_code;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index fe2299ac4f5c..514df76fc70f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1479,6 +1479,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq);
struct sk_buff *skb;
unsigned char *data;
+ dma_addr_t phys_addr;
u32 rx_status;
int rx_bytes, err;
@@ -1486,6 +1487,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
rx_status = rx_desc->status;
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
data = (unsigned char *)rx_desc->buf_cookie;
+ phys_addr = rx_desc->buf_phys_addr;
if (!mvneta_rxq_desc_is_first_last(rx_status) ||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
@@ -1534,7 +1536,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
if (!skb)
goto err_drop_frame;
- dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr,
+ dma_unmap_single(dev->dev.parent, phys_addr,
MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
rcvd_pkts++;
@@ -3173,6 +3175,8 @@ static int mvneta_probe(struct platform_device *pdev)
struct phy_device *phy = of_phy_find_device(dn);
mvneta_fixed_link_update(pp, phy);
+
+ put_device(&phy->dev);
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 4402a1e48c9b..e7a5000aa12c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1047,13 +1047,15 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done == budget) {
- int cpu_curr;
const struct cpumask *aff;
+ struct irq_data *idata;
+ int cpu_curr;
INC_PERF_COUNTER(priv->pstats.napi_quota);
cpu_curr = smp_processor_id();
- aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
+ idata = irq_desc_get_irq_data(cq->irq_desc);
+ aff = irq_data_get_affinity_mask(idata);
if (likely(cpumask_test_cpu(cpu_curr, aff)))
return budget;
@@ -1268,8 +1270,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
rss_context->hash_fn = MLX4_RSS_HASH_TOP;
memcpy(rss_context->rss_key, priv->rss_key,
MLX4_EN_RSS_KEY_SIZE);
- netdev_rss_key_fill(rss_context->rss_key,
- MLX4_EN_RSS_KEY_SIZE);
} else {
en_err(priv, "Unknown RSS hash function requested\n");
err = -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index bd9ea0d01aae..1d4e2e054647 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1184,10 +1184,11 @@ out:
if (prot == MLX4_PROT_ETH) {
/* manage the steering entry for promisc mode */
if (new_entry)
- new_steering_entry(dev, port, steer, index, qp->qpn);
+ err = new_steering_entry(dev, port, steer,
+ index, qp->qpn);
else
- existing_steering_entry(dev, port, steer,
- index, qp->qpn);
+ err = existing_steering_entry(dev, port, steer,
+ index, qp->qpn);
}
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index aa0d5ffe92d8..9335e5ae18cc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -200,25 +200,3 @@ int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev)
return err;
}
-
-int mlx5_core_query_special_context(struct mlx5_core_dev *dev, u32 *rsvd_lkey)
-{
- struct mlx5_cmd_query_special_contexts_mbox_in in;
- struct mlx5_cmd_query_special_contexts_mbox_out out;
- int err;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
- if (err)
- return err;
-
- if (out.hdr.status)
- err = mlx5_cmd_status_to_err(&out.hdr);
-
- *rsvd_lkey = be32_to_cpu(out.resd_lkey);
-
- return err;
-}
-EXPORT_SYMBOL(mlx5_core_query_special_context);
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 66d4ab703f45..60f43ec22175 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -1601,6 +1601,7 @@ static const struct of_device_id ks8851_match_table[] = {
{ .compatible = "micrel,ks8851" },
{ }
};
+MODULE_DEVICE_TABLE(of, ks8851_match_table);
static struct spi_driver ks8851_driver = {
.driver = {
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index becbb5f1f5a7..a10c928bbd6b 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -552,6 +552,7 @@ static const struct of_device_id moxart_mac_match[] = {
{ .compatible = "moxa,moxart-mac" },
{ }
};
+MODULE_DEVICE_TABLE(of, moxart_mac_match);
static struct platform_driver moxart_mac_driver = {
.probe = moxart_mac_probe,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 06bcc734fe8d..d6696cfa11d2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -536,6 +536,7 @@ struct qlcnic_hardware_context {
u8 extend_lb_time;
u8 phys_port_id[ETH_ALEN];
u8 lb_mode;
+ u8 vxlan_port_count;
u16 vxlan_port;
struct device *hwmon_dev;
u32 post_mode;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 8b08b20e8b30..d4481454b5f8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -483,11 +483,17 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev,
/* Adapter supports only one VXLAN port. Use very first port
* for enabling offload
*/
- if (!qlcnic_encap_rx_offload(adapter) || ahw->vxlan_port)
+ if (!qlcnic_encap_rx_offload(adapter))
return;
+ if (!ahw->vxlan_port_count) {
+ ahw->vxlan_port_count = 1;
+ ahw->vxlan_port = ntohs(port);
+ adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
+ return;
+ }
+ if (ahw->vxlan_port == ntohs(port))
+ ahw->vxlan_port_count++;
- ahw->vxlan_port = ntohs(port);
- adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
}
static void qlcnic_del_vxlan_port(struct net_device *netdev,
@@ -496,11 +502,13 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
- if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port ||
+ if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
(ahw->vxlan_port != ntohs(port)))
return;
- adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
+ ahw->vxlan_port_count--;
+ if (!ahw->vxlan_port_count)
+ adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
}
static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index d79e33b3c191..686334f4588d 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -157,6 +157,7 @@ enum {
NWayAdvert = 0x66, /* MII ADVERTISE */
NWayLPAR = 0x68, /* MII LPA */
NWayExpansion = 0x6A, /* MII Expansion */
+ TxDmaOkLowDesc = 0x82, /* Low 16 bit address of a Tx descriptor. */
Config5 = 0xD8, /* Config5 */
TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
@@ -341,6 +342,7 @@ struct cp_private {
unsigned tx_tail;
struct cp_desc *tx_ring;
struct sk_buff *tx_skb[CP_TX_RING_SIZE];
+ u32 tx_opts[CP_TX_RING_SIZE];
unsigned rx_buf_sz;
unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
@@ -665,7 +667,7 @@ static void cp_tx (struct cp_private *cp)
BUG_ON(!skb);
dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
- le32_to_cpu(txd->opts1) & 0xffff,
+ cp->tx_opts[tx_tail] & 0xffff,
PCI_DMA_TODEVICE);
if (status & LastFrag) {
@@ -733,7 +735,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
{
struct cp_private *cp = netdev_priv(dev);
unsigned entry;
- u32 eor, flags;
+ u32 eor, opts1;
unsigned long intr_flags;
__le32 opts2;
int mss = 0;
@@ -753,6 +755,21 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
mss = skb_shinfo(skb)->gso_size;
opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
+ opts1 = DescOwn;
+ if (mss)
+ opts1 |= LargeSend | ((mss & MSSMask) << MSSShift);
+ else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ const struct iphdr *ip = ip_hdr(skb);
+ if (ip->protocol == IPPROTO_TCP)
+ opts1 |= IPCS | TCPCS;
+ else if (ip->protocol == IPPROTO_UDP)
+ opts1 |= IPCS | UDPCS;
+ else {
+ WARN_ONCE(1,
+ "Net bug: asked to checksum invalid Legacy IP packet\n");
+ goto out_dma_error;
+ }
+ }
if (skb_shinfo(skb)->nr_frags == 0) {
struct cp_desc *txd = &cp->tx_ring[entry];
@@ -768,31 +785,20 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
txd->addr = cpu_to_le64(mapping);
wmb();
- flags = eor | len | DescOwn | FirstFrag | LastFrag;
-
- if (mss)
- flags |= LargeSend | ((mss & MSSMask) << MSSShift);
- else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const struct iphdr *ip = ip_hdr(skb);
- if (ip->protocol == IPPROTO_TCP)
- flags |= IPCS | TCPCS;
- else if (ip->protocol == IPPROTO_UDP)
- flags |= IPCS | UDPCS;
- else
- WARN_ON(1); /* we need a WARN() */
- }
+ opts1 |= eor | len | FirstFrag | LastFrag;
- txd->opts1 = cpu_to_le32(flags);
+ txd->opts1 = cpu_to_le32(opts1);
wmb();
cp->tx_skb[entry] = skb;
- entry = NEXT_TX(entry);
+ cp->tx_opts[entry] = opts1;
+ netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
+ entry, skb->len);
} else {
struct cp_desc *txd;
- u32 first_len, first_eor;
+ u32 first_len, first_eor, ctrl;
dma_addr_t first_mapping;
int frag, first_entry = entry;
- const struct iphdr *ip = ip_hdr(skb);
/* We must give this initial chunk to the device last.
* Otherwise we could race with the device.
@@ -805,14 +811,14 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
goto out_dma_error;
cp->tx_skb[entry] = skb;
- entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len;
- u32 ctrl;
dma_addr_t mapping;
+ entry = NEXT_TX(entry);
+
len = skb_frag_size(this_frag);
mapping = dma_map_single(&cp->pdev->dev,
skb_frag_address(this_frag),
@@ -824,19 +830,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
- ctrl = eor | len | DescOwn;
-
- if (mss)
- ctrl |= LargeSend |
- ((mss & MSSMask) << MSSShift);
- else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (ip->protocol == IPPROTO_TCP)
- ctrl |= IPCS | TCPCS;
- else if (ip->protocol == IPPROTO_UDP)
- ctrl |= IPCS | UDPCS;
- else
- BUG();
- }
+ ctrl = opts1 | eor | len;
if (frag == skb_shinfo(skb)->nr_frags - 1)
ctrl |= LastFrag;
@@ -849,8 +843,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
txd->opts1 = cpu_to_le32(ctrl);
wmb();
+ cp->tx_opts[entry] = ctrl;
cp->tx_skb[entry] = skb;
- entry = NEXT_TX(entry);
}
txd = &cp->tx_ring[first_entry];
@@ -858,27 +852,17 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
txd->addr = cpu_to_le64(first_mapping);
wmb();
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (ip->protocol == IPPROTO_TCP)
- txd->opts1 = cpu_to_le32(first_eor | first_len |
- FirstFrag | DescOwn |
- IPCS | TCPCS);
- else if (ip->protocol == IPPROTO_UDP)
- txd->opts1 = cpu_to_le32(first_eor | first_len |
- FirstFrag | DescOwn |
- IPCS | UDPCS);
- else
- BUG();
- } else
- txd->opts1 = cpu_to_le32(first_eor | first_len |
- FirstFrag | DescOwn);
+ ctrl = opts1 | first_eor | first_len | FirstFrag;
+ txd->opts1 = cpu_to_le32(ctrl);
wmb();
+
+ cp->tx_opts[first_entry] = ctrl;
+ netif_dbg(cp, tx_queued, cp->dev, "tx queued, slots %d-%d, skblen %d\n",
+ first_entry, entry, skb->len);
}
- cp->tx_head = entry;
+ cp->tx_head = NEXT_TX(entry);
netdev_sent_queue(dev, skb->len);
- netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
- entry, skb->len);
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
@@ -1115,6 +1099,7 @@ static int cp_init_rings (struct cp_private *cp)
{
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
+ memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
cp_init_rings_index(cp);
@@ -1151,7 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp)
desc = cp->rx_ring + i;
dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
- dev_kfree_skb(cp->rx_skb[i]);
+ dev_kfree_skb_any(cp->rx_skb[i]);
}
}
@@ -1164,7 +1149,7 @@ static void cp_clean_rings (struct cp_private *cp)
le32_to_cpu(desc->opts1) & 0xffff,
PCI_DMA_TODEVICE);
if (le32_to_cpu(desc->opts1) & LastFrag)
- dev_kfree_skb(skb);
+ dev_kfree_skb_any(skb);
cp->dev->stats.tx_dropped++;
}
}
@@ -1172,6 +1157,7 @@ static void cp_clean_rings (struct cp_private *cp)
memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
+ memset(cp->tx_opts, 0, sizeof(cp->tx_opts));
memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
@@ -1249,7 +1235,7 @@ static void cp_tx_timeout(struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
- int rc;
+ int rc, i;
netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
cpr8(Cmd), cpr16(CpCmd),
@@ -1257,13 +1243,26 @@ static void cp_tx_timeout(struct net_device *dev)
spin_lock_irqsave(&cp->lock, flags);
+ netif_dbg(cp, tx_err, cp->dev, "TX ring head %d tail %d desc %x\n",
+ cp->tx_head, cp->tx_tail, cpr16(TxDmaOkLowDesc));
+ for (i = 0; i < CP_TX_RING_SIZE; i++) {
+ netif_dbg(cp, tx_err, cp->dev,
+ "TX slot %d @%p: %08x (%08x) %08x %llx %p\n",
+ i, &cp->tx_ring[i], le32_to_cpu(cp->tx_ring[i].opts1),
+ cp->tx_opts[i], le32_to_cpu(cp->tx_ring[i].opts2),
+ le64_to_cpu(cp->tx_ring[i].addr),
+ cp->tx_skb[i]);
+ }
+
cp_stop_hw(cp);
cp_clean_rings(cp);
rc = cp_init_rings(cp);
cp_start_hw(cp);
- cp_enable_irq(cp);
+ __cp_set_rx_mode(dev);
+ cpw16_f(IntrMask, cp_norx_intr_mask);
netif_wake_queue(dev);
+ napi_schedule_irqoff(&cp->napi);
spin_unlock_irqrestore(&cp->lock, flags);
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 2b32e0c5a0b4..b4f21232019a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6081,7 +6081,7 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
- u16 rg_saw_cnt;
+ int rg_saw_cnt;
u32 data;
static const struct ephy_info e_info_8168h_1[] = {
{ 0x1e, 0x0800, 0x0001 },
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index b735fa22ac95..ebf6abc4853f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -161,11 +161,16 @@ int stmmac_mdio_reset(struct mii_bus *bus)
if (!gpio_request(reset_gpio, "mdio-reset")) {
gpio_direction_output(reset_gpio, active_low ? 1 : 0);
- udelay(data->delays[0]);
+ if (data->delays[0])
+ msleep(DIV_ROUND_UP(data->delays[0], 1000));
+
gpio_set_value(reset_gpio, active_low ? 0 : 1);
- udelay(data->delays[1]);
+ if (data->delays[1])
+ msleep(DIV_ROUND_UP(data->delays[1], 1000));
+
gpio_set_value(reset_gpio, active_low ? 1 : 0);
- udelay(data->delays[2]);
+ if (data->delays[2])
+ msleep(DIV_ROUND_UP(data->delays[2], 1000));
}
}
#endif
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 53fe200e0b79..cc106d892e29 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1756,7 +1756,8 @@ static const struct net_device_ops vnet_ops = {
#endif
};
-static struct vnet *vnet_new(const u64 *local_mac)
+static struct vnet *vnet_new(const u64 *local_mac,
+ struct vio_dev *vdev)
{
struct net_device *dev;
struct vnet *vp;
@@ -1790,6 +1791,8 @@ static struct vnet *vnet_new(const u64 *local_mac)
NETIF_F_HW_CSUM | NETIF_F_SG;
dev->features = dev->hw_features;
+ SET_NETDEV_DEV(dev, &vdev->dev);
+
err = register_netdev(dev);
if (err) {
pr_err("Cannot register net device, aborting\n");
@@ -1808,7 +1811,8 @@ err_out_free_dev:
return ERR_PTR(err);
}
-static struct vnet *vnet_find_or_create(const u64 *local_mac)
+static struct vnet *vnet_find_or_create(const u64 *local_mac,
+ struct vio_dev *vdev)
{
struct vnet *iter, *vp;
@@ -1821,7 +1825,7 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
}
}
if (!vp)
- vp = vnet_new(local_mac);
+ vp = vnet_new(local_mac, vdev);
mutex_unlock(&vnet_list_mutex);
return vp;
@@ -1848,7 +1852,8 @@ static void vnet_cleanup(void)
static const char *local_mac_prop = "local-mac-address";
static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
- u64 port_node)
+ u64 port_node,
+ struct vio_dev *vdev)
{
const u64 *local_mac = NULL;
u64 a;
@@ -1869,7 +1874,7 @@ static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
if (!local_mac)
return ERR_PTR(-ENODEV);
- return vnet_find_or_create(local_mac);
+ return vnet_find_or_create(local_mac, vdev);
}
static struct ldc_channel_config vnet_ldc_cfg = {
@@ -1923,7 +1928,7 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
hp = mdesc_grab();
- vp = vnet_find_parent(hp, vdev->mp);
+ vp = vnet_find_parent(hp, vdev->mp, vdev);
if (IS_ERR(vp)) {
pr_err("Cannot find port parent vnet\n");
err = PTR_ERR(vp);
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1a5aca55ea9f..9f9832f0dea9 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -291,13 +291,6 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
interface_list) {
struct netcp_intf_modpriv *intf_modpriv;
- /* If interface not registered then register now */
- if (!netcp_intf->netdev_registered)
- ret = netcp_register_interface(netcp_intf);
-
- if (ret)
- return -ENODEV;
-
intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
GFP_KERNEL);
if (!intf_modpriv)
@@ -306,6 +299,11 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
interface = of_parse_phandle(netcp_intf->node_interface,
module->name, 0);
+ if (!interface) {
+ devm_kfree(dev, intf_modpriv);
+ continue;
+ }
+
intf_modpriv->netcp_priv = netcp_intf;
intf_modpriv->netcp_module = module;
list_add_tail(&intf_modpriv->intf_list,
@@ -323,6 +321,18 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
continue;
}
}
+
+ /* Now register the interface with netdev */
+ list_for_each_entry(netcp_intf,
+ &netcp_device->interface_head,
+ interface_list) {
+ /* If interface not registered then register now */
+ if (!netcp_intf->netdev_registered) {
+ ret = netcp_register_interface(netcp_intf);
+ if (ret)
+ return -ENODEV;
+ }
+ }
return 0;
}
@@ -357,7 +367,6 @@ int netcp_register_module(struct netcp_module *module)
if (ret < 0)
goto fail;
}
-
mutex_unlock(&netcp_modules_lock);
return 0;
@@ -796,7 +805,7 @@ static void netcp_rxpool_free(struct netcp_intf *netcp)
netcp->rx_pool = NULL;
}
-static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
+static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
{
struct knav_dma_desc *hwdesc;
unsigned int buf_len, dma_sz;
@@ -810,7 +819,7 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
hwdesc = knav_pool_desc_get(netcp->rx_pool);
if (IS_ERR_OR_NULL(hwdesc)) {
dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
- return;
+ return -ENOMEM;
}
if (likely(fdq == 0)) {
@@ -862,25 +871,26 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
&dma_sz);
knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
- return;
+ return 0;
fail:
knav_pool_desc_put(netcp->rx_pool, hwdesc);
+ return -ENOMEM;
}
/* Refill Rx FDQ with descriptors & attached buffers */
static void netcp_rxpool_refill(struct netcp_intf *netcp)
{
u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
- int i;
+ int i, ret = 0;
/* Calculate the FDQ deficit and refill */
for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
fdq_deficit[i] = netcp->rx_queue_depths[i] -
knav_queue_get_count(netcp->rx_fdq[i]);
- while (fdq_deficit[i]--)
- netcp_allocate_rx_buf(netcp, i);
+ while (fdq_deficit[i]-- && !ret)
+ ret = netcp_allocate_rx_buf(netcp, i);
} /* end for fdqs */
}
@@ -893,12 +903,12 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget)
packets = netcp_process_rx_packets(netcp, budget);
+ netcp_rxpool_refill(netcp);
if (packets < budget) {
napi_complete(&netcp->rx_napi);
knav_queue_enable_notify(netcp->rx_queue);
}
- netcp_rxpool_refill(netcp);
return packets;
}
@@ -1384,7 +1394,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp)
continue;
dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
naddr->addr, naddr->type);
- mutex_lock(&netcp_modules_lock);
for_each_module(netcp, priv) {
module = priv->netcp_module;
if (!module->del_addr)
@@ -1393,7 +1402,6 @@ static void netcp_addr_sweep_del(struct netcp_intf *netcp)
naddr);
WARN_ON(error);
}
- mutex_unlock(&netcp_modules_lock);
netcp_addr_del(netcp, naddr);
}
}
@@ -1410,7 +1418,7 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
continue;
dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
naddr->addr, naddr->type);
- mutex_lock(&netcp_modules_lock);
+
for_each_module(netcp, priv) {
module = priv->netcp_module;
if (!module->add_addr)
@@ -1418,7 +1426,6 @@ static void netcp_addr_sweep_add(struct netcp_intf *netcp)
error = module->add_addr(priv->module_priv, naddr);
WARN_ON(error);
}
- mutex_unlock(&netcp_modules_lock);
}
}
@@ -1432,6 +1439,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
ndev->flags & IFF_ALLMULTI ||
netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
+ spin_lock(&netcp->lock);
/* first clear all marks */
netcp_addr_clear_mark(netcp);
@@ -1450,6 +1458,7 @@ static void netcp_set_rx_mode(struct net_device *ndev)
/* finally sweep and callout into modules */
netcp_addr_sweep_del(netcp);
netcp_addr_sweep_add(netcp);
+ spin_unlock(&netcp->lock);
}
static void netcp_free_navigator_resources(struct netcp_intf *netcp)
@@ -1614,7 +1623,6 @@ static int netcp_ndo_open(struct net_device *ndev)
goto fail;
}
- mutex_lock(&netcp_modules_lock);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if (module->open) {
@@ -1625,7 +1633,6 @@ static int netcp_ndo_open(struct net_device *ndev)
}
}
}
- mutex_unlock(&netcp_modules_lock);
napi_enable(&netcp->rx_napi);
napi_enable(&netcp->tx_napi);
@@ -1642,7 +1649,6 @@ fail_open:
if (module->close)
module->close(intf_modpriv->module_priv, ndev);
}
- mutex_unlock(&netcp_modules_lock);
fail:
netcp_free_navigator_resources(netcp);
@@ -1666,7 +1672,6 @@ static int netcp_ndo_stop(struct net_device *ndev)
napi_disable(&netcp->rx_napi);
napi_disable(&netcp->tx_napi);
- mutex_lock(&netcp_modules_lock);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if (module->close) {
@@ -1675,7 +1680,6 @@ static int netcp_ndo_stop(struct net_device *ndev)
dev_err(netcp->ndev_dev, "Close failed\n");
}
}
- mutex_unlock(&netcp_modules_lock);
/* Recycle Rx descriptors from completion queue */
netcp_empty_rx_queue(netcp);
@@ -1703,7 +1707,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
if (!netif_running(ndev))
return -EINVAL;
- mutex_lock(&netcp_modules_lock);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if (!module->ioctl)
@@ -1719,7 +1722,6 @@ static int netcp_ndo_ioctl(struct net_device *ndev,
}
out:
- mutex_unlock(&netcp_modules_lock);
return (ret == 0) ? 0 : err;
}
@@ -1754,11 +1756,12 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
struct netcp_intf *netcp = netdev_priv(ndev);
struct netcp_intf_modpriv *intf_modpriv;
struct netcp_module *module;
+ unsigned long flags;
int err = 0;
dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
- mutex_lock(&netcp_modules_lock);
+ spin_lock_irqsave(&netcp->lock, flags);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if ((module->add_vid) && (vid != 0)) {
@@ -1770,7 +1773,8 @@ static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
}
}
}
- mutex_unlock(&netcp_modules_lock);
+ spin_unlock_irqrestore(&netcp->lock, flags);
+
return err;
}
@@ -1779,11 +1783,12 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
struct netcp_intf *netcp = netdev_priv(ndev);
struct netcp_intf_modpriv *intf_modpriv;
struct netcp_module *module;
+ unsigned long flags;
int err = 0;
dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
- mutex_lock(&netcp_modules_lock);
+ spin_lock_irqsave(&netcp->lock, flags);
for_each_module(netcp, intf_modpriv) {
module = intf_modpriv->netcp_module;
if (module->del_vid) {
@@ -1795,7 +1800,7 @@ static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
}
}
}
- mutex_unlock(&netcp_modules_lock);
+ spin_unlock_irqrestore(&netcp->lock, flags);
return err;
}
@@ -2040,7 +2045,6 @@ static int netcp_probe(struct platform_device *pdev)
struct device_node *child, *interfaces;
struct netcp_device *netcp_device;
struct device *dev = &pdev->dev;
- struct netcp_module *module;
int ret;
if (!node) {
@@ -2087,14 +2091,6 @@ static int netcp_probe(struct platform_device *pdev)
/* Add the device instance to the list */
list_add_tail(&netcp_device->device_list, &netcp_devices);
- /* Probe & attach any modules already registered */
- mutex_lock(&netcp_modules_lock);
- for_each_netcp_module(module) {
- ret = netcp_module_probe(netcp_device, module);
- if (ret < 0)
- dev_err(dev, "module(%s) probe failed\n", module->name);
- }
- mutex_unlock(&netcp_modules_lock);
return 0;
probe_quit_interface:
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 6f16d6aaf7b7..6bff8d82ceab 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -77,6 +77,7 @@
#define GBENU_ALE_OFFSET 0x1e000
#define GBENU_HOST_PORT_NUM 0
#define GBENU_NUM_ALE_ENTRIES 1024
+#define GBENU_SGMII_MODULE_SIZE 0x100
/* 10G Ethernet SS defines */
#define XGBE_MODULE_NAME "netcp-xgbe"
@@ -149,8 +150,8 @@
#define XGBE_STATS2_MODULE 2
/* s: 0-based slave_port */
-#define SGMII_BASE(s) \
- (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
+#define SGMII_BASE(d, s) \
+ (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
#define GBE_TX_QUEUE 648
#define GBE_TXHOOK_ORDER 0
@@ -1997,13 +1998,8 @@ static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
return;
if (!SLAVE_LINK_IS_XGMII(slave)) {
- if (gbe_dev->ss_version == GBE_SS_VERSION_14)
- sgmii_link_state =
- netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
- else
- sgmii_link_state =
- netcp_sgmii_get_port_link(
- gbe_dev->sgmii_port_regs, sp);
+ sgmii_link_state =
+ netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
}
phy_link_state = gbe_phy_link_status(slave);
@@ -2100,17 +2096,11 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
static void gbe_sgmii_rtreset(struct gbe_priv *priv,
struct gbe_slave *slave, bool set)
{
- void __iomem *sgmii_port_regs;
-
if (SLAVE_LINK_IS_XGMII(slave))
return;
- if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
- sgmii_port_regs = priv->sgmii_port34_regs;
- else
- sgmii_port_regs = priv->sgmii_port_regs;
-
- netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
+ netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
+ slave->slave_num, set);
}
static void gbe_slave_stop(struct gbe_intf *intf)
@@ -2136,17 +2126,12 @@ static void gbe_slave_stop(struct gbe_intf *intf)
static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
{
- void __iomem *sgmii_port_regs;
-
- sgmii_port_regs = priv->sgmii_port_regs;
- if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
- sgmii_port_regs = priv->sgmii_port34_regs;
+ if (SLAVE_LINK_IS_XGMII(slave))
+ return;
- if (!SLAVE_LINK_IS_XGMII(slave)) {
- netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
- netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
- slave->link_interface);
- }
+ netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
+ netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
+ slave->link_interface);
}
static int gbe_slave_open(struct gbe_intf *gbe_intf)
@@ -2997,6 +2982,14 @@ static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
gbe_dev->switch_regs = regs;
gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
+
+ /* Although sgmii modules are mem mapped to one contiguous
+ * region on GBENU devices, setting sgmii_port34_regs allows
+ * consistent code when accessing sgmii api
+ */
+ gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
+ (2 * GBENU_SGMII_MODULE_SIZE);
+
gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
for (i = 0; i < (gbe_dev->max_num_ports); i++)
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 2f1264b882b9..d3d094742a7e 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_VIA
config VIA_RHINE
tristate "VIA Rhine support"
- depends on (PCI || OF_IRQ)
+ depends on PCI || (OF_IRQ && GENERIC_PCI_IOMAP)
depends on HAS_DMA
select CRC32
select MII
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 6008eee01a33..cf468c87ce57 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -828,6 +828,8 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
if (!phydev)
dev_info(dev,
"MDIO of the phy is not registered yet\n");
+ else
+ put_device(&phydev->dev);
return 0;
}
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index b5f4a78da828..2d3848c9dc35 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -1011,11 +1011,11 @@ static void fjes_hw_update_zone_task(struct work_struct *work)
set_bit(epidx, &irq_bit);
break;
}
- }
-
- hw->ep_shm_info[epidx].es_status = info[epidx].es_status;
- hw->ep_shm_info[epidx].zone = info[epidx].zone;
+ hw->ep_shm_info[epidx].es_status =
+ info[epidx].es_status;
+ hw->ep_shm_info[epidx].zone = info[epidx].zone;
+ }
break;
}
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index da3259ce7c8d..8f5c02eed47d 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -126,6 +126,8 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
__be32 addr;
int err;
+ iph = ip_hdr(skb); /* outer IP header... */
+
if (gs->collect_md) {
static u8 zero_vni[3];
@@ -133,7 +135,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
addr = 0;
} else {
vni = gnvh->vni;
- iph = ip_hdr(skb); /* Still outer IP header... */
addr = iph->saddr;
}
@@ -178,7 +179,6 @@ static void geneve_rx(struct geneve_sock *gs, struct sk_buff *skb)
skb_reset_network_header(skb);
- iph = ip_hdr(skb); /* Now inner IP header... */
err = IP_ECN_decapsulate(iph, skb);
if (unlikely(err)) {
@@ -626,6 +626,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
struct geneve_sock *gs = geneve->sock;
struct ip_tunnel_info *info = NULL;
struct rtable *rt = NULL;
+ const struct iphdr *iip; /* interior IP header */
struct flowi4 fl4;
__u8 tos, ttl;
__be16 sport;
@@ -653,6 +654,8 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
skb_reset_mac_header(skb);
+ iip = ip_hdr(skb);
+
if (info) {
const struct ip_tunnel_key *key = &info->key;
u8 *opts = NULL;
@@ -668,19 +671,16 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(err))
goto err;
- tos = key->tos;
+ tos = ip_tunnel_ecn_encap(key->tos, iip, skb);
ttl = key->ttl;
df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
} else {
- const struct iphdr *iip; /* interior IP header */
-
udp_csum = false;
err = geneve_build_skb(rt, skb, 0, geneve->vni,
0, NULL, udp_csum);
if (unlikely(err))
goto err;
- iip = ip_hdr(skb);
tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, iip, skb);
ttl = geneve->ttl;
if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
@@ -748,12 +748,8 @@ static void geneve_setup(struct net_device *dev)
dev->features |= NETIF_F_RXCSUM;
dev->features |= NETIF_F_GSO_SOFTWARE;
- dev->vlan_features = dev->features;
- dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
-
dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
netif_keep_dst(dev);
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
@@ -819,7 +815,7 @@ static struct geneve_dev *geneve_find_dev(struct geneve_net *gn,
static int geneve_configure(struct net *net, struct net_device *dev,
__be32 rem_addr, __u32 vni, __u8 ttl, __u8 tos,
- __u16 dst_port, bool metadata)
+ __be16 dst_port, bool metadata)
{
struct geneve_net *gn = net_generic(net, geneve_net_id);
struct geneve_dev *t, *geneve = netdev_priv(dev);
@@ -844,10 +840,10 @@ static int geneve_configure(struct net *net, struct net_device *dev,
geneve->ttl = ttl;
geneve->tos = tos;
- geneve->dst_port = htons(dst_port);
+ geneve->dst_port = dst_port;
geneve->collect_md = metadata;
- t = geneve_find_dev(gn, htons(dst_port), rem_addr, geneve->vni,
+ t = geneve_find_dev(gn, dst_port, rem_addr, geneve->vni,
&tun_on_same_port, &tun_collect_md);
if (t)
return -EBUSY;
@@ -871,7 +867,7 @@ static int geneve_configure(struct net *net, struct net_device *dev,
static int geneve_newlink(struct net *net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
{
- __u16 dst_port = GENEVE_UDP_PORT;
+ __be16 dst_port = htons(GENEVE_UDP_PORT);
__u8 ttl = 0, tos = 0;
bool metadata = false;
__be32 rem_addr;
@@ -890,7 +886,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
if (data[IFLA_GENEVE_PORT])
- dst_port = nla_get_u16(data[IFLA_GENEVE_PORT]);
+ dst_port = nla_get_be16(data[IFLA_GENEVE_PORT]);
if (data[IFLA_GENEVE_COLLECT_METADATA])
metadata = true;
@@ -913,7 +909,7 @@ static size_t geneve_get_size(const struct net_device *dev)
nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
- nla_total_size(sizeof(__u16)) + /* IFLA_GENEVE_PORT */
+ nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */
nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */
0;
}
@@ -935,7 +931,7 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
goto nla_put_failure;
- if (nla_put_u16(skb, IFLA_GENEVE_PORT, ntohs(geneve->dst_port)))
+ if (nla_put_be16(skb, IFLA_GENEVE_PORT, geneve->dst_port))
goto nla_put_failure;
if (geneve->collect_md) {
@@ -975,7 +971,7 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name,
if (IS_ERR(dev))
return dev;
- err = geneve_configure(net, dev, 0, 0, 0, 0, dst_port, true);
+ err = geneve_configure(net, dev, 0, 0, 0, 0, htons(dst_port), true);
if (err) {
free_netdev(dev);
return ERR_PTR(err);
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 58ae11a14bb6..64bb44d5d867 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -1031,7 +1031,6 @@ static void ali_ircc_fir_change_speed(struct ali_ircc_cb *priv, __u32 baud)
static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
{
struct ali_ircc_cb *self = priv;
- unsigned long flags;
int iobase;
int fcr; /* FIFO control reg */
int lcr; /* Line control reg */
@@ -1061,8 +1060,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
/* Update accounting for new speed */
self->io.speed = speed;
- spin_lock_irqsave(&self->lock, flags);
-
divisor = 115200/speed;
fcr = UART_FCR_ENABLE_FIFO;
@@ -1089,9 +1086,6 @@ static void ali_ircc_sir_change_speed(struct ali_ircc_cb *priv, __u32 speed)
/* without this, the connection will be broken after come back from FIR speed,
but with this, the SIR connection is harder to established */
outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase+UART_MCR);
-
- spin_unlock_irqrestore(&self->lock, flags);
-
}
static void ali_ircc_change_dongle_speed(struct ali_ircc_cb *priv, int speed)
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index edd77342773a..248478c6f6e4 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -1111,10 +1111,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
return 0;
case TUNSETSNDBUF:
- if (get_user(u, up))
+ if (get_user(s, sp))
return -EFAULT;
- q->sk.sk_sndbuf = u;
+ q->sk.sk_sndbuf = s;
return 0;
case TUNGETVNETHDRSZ:
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
index fb1299c6326e..e23bf5b90e17 100644
--- a/drivers/net/phy/fixed_phy.c
+++ b/drivers/net/phy/fixed_phy.c
@@ -220,7 +220,7 @@ int fixed_phy_update_state(struct phy_device *phydev,
struct fixed_mdio_bus *fmb = &platform_fmb;
struct fixed_phy *fp;
- if (!phydev || !phydev->bus)
+ if (!phydev || phydev->bus != fmb->mii_bus)
return -EINVAL;
list_for_each_entry(fp, &fmb->phys, node) {
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e6897b6a8a53..5de8d5827536 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -785,6 +785,7 @@ static int marvell_read_status(struct phy_device *phydev)
int adv;
int err;
int lpa;
+ int lpagb;
int status = 0;
/* Update the link, but return if there
@@ -802,10 +803,17 @@ static int marvell_read_status(struct phy_device *phydev)
if (lpa < 0)
return lpa;
+ lpagb = phy_read(phydev, MII_STAT1000);
+ if (lpagb < 0)
+ return lpagb;
+
adv = phy_read(phydev, MII_ADVERTISE);
if (adv < 0)
return adv;
+ phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) |
+ mii_lpa_to_ethtool_lpa_t(lpa);
+
lpa &= adv;
if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
@@ -853,6 +861,7 @@ static int marvell_read_status(struct phy_device *phydev)
phydev->speed = SPEED_10;
phydev->pause = phydev->asym_pause = 0;
+ phydev->lp_advertising = 0;
}
return 0;
diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
index 6a52a7f0fa0d..4bde5e728fe0 100644
--- a/drivers/net/phy/mdio-bcm-unimac.c
+++ b/drivers/net/phy/mdio-bcm-unimac.c
@@ -244,6 +244,7 @@ static const struct of_device_id unimac_mdio_ids[] = {
{ .compatible = "brcm,unimac-mdio", },
{ /* sentinel */ },
};
+MODULE_DEVICE_TABLE(of, unimac_mdio_ids);
static struct platform_driver unimac_mdio_driver = {
.driver = {
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 7dc21e56a7aa..3bc9f03349f3 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -261,6 +261,7 @@ static const struct of_device_id mdio_gpio_of_match[] = {
{ .compatible = "virtual,mdio-gpio", },
{ /* sentinel */ }
};
+MODULE_DEVICE_TABLE(of, mdio_gpio_of_match);
static struct platform_driver mdio_gpio_driver = {
.probe = mdio_gpio_probe,
diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
index 4d4d25efc1e1..280c7c311f72 100644
--- a/drivers/net/phy/mdio-mux.c
+++ b/drivers/net/phy/mdio-mux.c
@@ -113,18 +113,18 @@ int mdio_mux_init(struct device *dev,
if (!parent_bus_node)
return -ENODEV;
- parent_bus = of_mdio_find_bus(parent_bus_node);
- if (parent_bus == NULL) {
- ret_val = -EPROBE_DEFER;
- goto err_parent_bus;
- }
-
pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL);
if (pb == NULL) {
ret_val = -ENOMEM;
goto err_parent_bus;
}
+ parent_bus = of_mdio_find_bus(parent_bus_node);
+ if (parent_bus == NULL) {
+ ret_val = -EPROBE_DEFER;
+ goto err_parent_bus;
+ }
+
pb->switch_data = data;
pb->switch_fn = switch_fn;
pb->current_child = -1;
@@ -173,6 +173,10 @@ int mdio_mux_init(struct device *dev,
dev_info(dev, "Version " DRV_VERSION "\n");
return 0;
}
+
+ /* balance the reference of_mdio_find_bus() took */
+ put_device(&pb->mii_bus->dev);
+
err_parent_bus:
of_node_put(parent_bus_node);
return ret_val;
@@ -189,6 +193,9 @@ void mdio_mux_uninit(void *mux_handle)
mdiobus_free(cb->mii_bus);
cb = cb->next;
}
+
+ /* balance the reference of_mdio_find_bus() in mdio_mux_init() took */
+ put_device(&pb->mii_bus->dev);
}
EXPORT_SYMBOL_GPL(mdio_mux_uninit);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 02a4615b65f8..12f44c53cc8e 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -167,7 +167,9 @@ static int of_mdio_bus_match(struct device *dev, const void *mdio_bus_np)
* of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
* @mdio_bus_np: Pointer to the mii_bus.
*
- * Returns a pointer to the mii_bus, or NULL if none found.
+ * Returns a reference to the mii_bus, or NULL if none found. The
+ * embedded struct device will have its reference count incremented,
+ * and this must be put once the bus is finished with.
*
* Because the association of a device_node and mii_bus is made via
* of_mdiobus_register(), the mii_bus cannot be found before it is
@@ -234,15 +236,18 @@ static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
#endif
/**
- * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
+ * __mdiobus_register - bring up all the PHYs on a given bus and attach them to bus
* @bus: target mii_bus
+ * @owner: module containing bus accessor functions
*
* Description: Called by a bus driver to bring up all the PHYs
- * on a given bus, and attach them to the bus.
+ * on a given bus, and attach them to the bus. Drivers should use
+ * mdiobus_register() rather than __mdiobus_register() unless they
+ * need to pass a specific owner module.
*
* Returns 0 on success or < 0 on error.
*/
-int mdiobus_register(struct mii_bus *bus)
+int __mdiobus_register(struct mii_bus *bus, struct module *owner)
{
int i, err;
@@ -253,6 +258,7 @@ int mdiobus_register(struct mii_bus *bus)
BUG_ON(bus->state != MDIOBUS_ALLOCATED &&
bus->state != MDIOBUS_UNREGISTERED);
+ bus->owner = owner;
bus->dev.parent = bus->parent;
bus->dev.class = &mdio_bus_class;
bus->dev.groups = NULL;
@@ -288,13 +294,16 @@ int mdiobus_register(struct mii_bus *bus)
error:
while (--i >= 0) {
- if (bus->phy_map[i])
- device_unregister(&bus->phy_map[i]->dev);
+ struct phy_device *phydev = bus->phy_map[i];
+ if (phydev) {
+ phy_device_remove(phydev);
+ phy_device_free(phydev);
+ }
}
device_del(&bus->dev);
return err;
}
-EXPORT_SYMBOL(mdiobus_register);
+EXPORT_SYMBOL(__mdiobus_register);
void mdiobus_unregister(struct mii_bus *bus)
{
@@ -304,9 +313,11 @@ void mdiobus_unregister(struct mii_bus *bus)
bus->state = MDIOBUS_UNREGISTERED;
for (i = 0; i < PHY_MAX_ADDR; i++) {
- if (bus->phy_map[i])
- device_unregister(&bus->phy_map[i]->dev);
- bus->phy_map[i] = NULL;
+ struct phy_device *phydev = bus->phy_map[i];
+ if (phydev) {
+ phy_device_remove(phydev);
+ phy_device_free(phydev);
+ }
}
device_del(&bus->dev);
}
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index c0f211127274..f761288abe66 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -384,6 +384,24 @@ int phy_device_register(struct phy_device *phydev)
EXPORT_SYMBOL(phy_device_register);
/**
+ * phy_device_remove - Remove a previously registered phy device from the MDIO bus
+ * @phydev: phy_device structure to remove
+ *
+ * This doesn't free the phy_device itself, it merely reverses the effects
+ * of phy_device_register(). Use phy_device_free() to free the device
+ * after calling this function.
+ */
+void phy_device_remove(struct phy_device *phydev)
+{
+ struct mii_bus *bus = phydev->bus;
+ int addr = phydev->addr;
+
+ device_del(&phydev->dev);
+ bus->phy_map[addr] = NULL;
+}
+EXPORT_SYMBOL(phy_device_remove);
+
+/**
* phy_find_first - finds the first PHY device on the bus
* @bus: the target MII bus
*/
@@ -578,14 +596,22 @@ EXPORT_SYMBOL(phy_init_hw);
* generic driver is used. The phy_device is given a ptr to
* the attaching device, and given a callback for link status
* change. The phy_device is returned to the attaching driver.
+ * This function takes a reference on the phy device.
*/
int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
u32 flags, phy_interface_t interface)
{
+ struct mii_bus *bus = phydev->bus;
struct device *d = &phydev->dev;
- struct module *bus_module;
int err;
+ if (!try_module_get(bus->owner)) {
+ dev_err(&dev->dev, "failed to get the bus module\n");
+ return -EIO;
+ }
+
+ get_device(d);
+
/* Assume that if there is no driver, that it doesn't
* exist, and we should use the genphy driver.
*/
@@ -600,20 +626,13 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
err = device_bind_driver(d);
if (err)
- return err;
+ goto error;
}
if (phydev->attached_dev) {
dev_err(&dev->dev, "PHY already attached\n");
- return -EBUSY;
- }
-
- /* Increment the bus module reference count */
- bus_module = phydev->bus->dev.driver ?
- phydev->bus->dev.driver->owner : NULL;
- if (!try_module_get(bus_module)) {
- dev_err(&dev->dev, "failed to get the bus module\n");
- return -EIO;
+ err = -EBUSY;
+ goto error;
}
phydev->attached_dev = dev;
@@ -636,6 +655,11 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
phy_resume(phydev);
return err;
+
+error:
+ put_device(d);
+ module_put(bus->owner);
+ return err;
}
EXPORT_SYMBOL(phy_attach_direct);
@@ -677,14 +701,15 @@ EXPORT_SYMBOL(phy_attach);
/**
* phy_detach - detach a PHY device from its network device
* @phydev: target phy_device struct
+ *
+ * This detaches the phy device from its network device and the phy
+ * driver, and drops the reference count taken in phy_attach_direct().
*/
void phy_detach(struct phy_device *phydev)
{
+ struct mii_bus *bus;
int i;
- if (phydev->bus->dev.driver)
- module_put(phydev->bus->dev.driver->owner);
-
phydev->attached_dev->phydev = NULL;
phydev->attached_dev = NULL;
phy_suspend(phydev);
@@ -700,6 +725,15 @@ void phy_detach(struct phy_device *phydev)
break;
}
}
+
+ /*
+ * The phydev might go away on the put_device() below, so avoid
+ * a use-after-free bug by reading the underlying bus first.
+ */
+ bus = phydev->bus;
+
+ put_device(&phydev->dev);
+ module_put(bus->owner);
}
EXPORT_SYMBOL(phy_detach);
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 17cad185169d..76cad712ddb2 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -66,7 +66,6 @@
#define PHY_ID_VSC8244 0x000fc6c0
#define PHY_ID_VSC8514 0x00070670
#define PHY_ID_VSC8574 0x000704a0
-#define PHY_ID_VSC8641 0x00070431
#define PHY_ID_VSC8662 0x00070660
#define PHY_ID_VSC8221 0x000fc550
#define PHY_ID_VSC8211 0x000fc4b0
@@ -273,18 +272,6 @@ static struct phy_driver vsc82xx_driver[] = {
.config_intr = &vsc82xx_config_intr,
.driver = { .owner = THIS_MODULE,},
}, {
- .phy_id = PHY_ID_VSC8641,
- .name = "Vitesse VSC8641",
- .phy_id_mask = 0x000ffff0,
- .features = PHY_GBIT_FEATURES,
- .flags = PHY_HAS_INTERRUPT,
- .config_init = &vsc824x_config_init,
- .config_aneg = &vsc82x4_config_aneg,
- .read_status = &genphy_read_status,
- .ack_interrupt = &vsc824x_ack_interrupt,
- .config_intr = &vsc82xx_config_intr,
- .driver = { .owner = THIS_MODULE,},
-}, {
.phy_id = PHY_ID_VSC8662,
.name = "Vitesse VSC8662",
.phy_id_mask = 0x000ffff0,
@@ -331,7 +318,6 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = {
{ PHY_ID_VSC8244, 0x000fffc0 },
{ PHY_ID_VSC8514, 0x000ffff0 },
{ PHY_ID_VSC8574, 0x000ffff0 },
- { PHY_ID_VSC8641, 0x000ffff0 },
{ PHY_ID_VSC8662, 0x000ffff0 },
{ PHY_ID_VSC8221, 0x000ffff0 },
{ PHY_ID_VSC8211, 0x000ffff0 },
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 0481daf9201a..ed00446759b2 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2755,6 +2755,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
*/
dev_net_set(dev, net);
+ rtnl_lock();
mutex_lock(&pn->all_ppp_mutex);
if (unit < 0) {
@@ -2785,7 +2786,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
ppp->file.index = unit;
sprintf(dev->name, "ppp%d", unit);
- ret = register_netdev(dev);
+ ret = register_netdevice(dev);
if (ret != 0) {
unit_put(&pn->units_idr, unit);
netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n",
@@ -2797,6 +2798,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit,
atomic_inc(&ppp_unit_count);
mutex_unlock(&pn->all_ppp_mutex);
+ rtnl_unlock();
*retp = 0;
return ppp;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 1610b79ae386..fbb9325d1f6e 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -583,4 +583,15 @@ config USB_VL600
http://ubuntuforums.org/showpost.php?p=10589647&postcount=17
+config USB_NET_CH9200
+ tristate "QingHeng CH9200 USB ethernet support"
+ depends on USB_USBNET
+ select MII
+ help
+ Choose this option if you have a USB ethernet adapter with a QinHeng
+ CH9200 chipset.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ch9200.
+
endif # USB_NET_DRIVERS
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index cf6a0e610a7f..b5f04068dbe4 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -38,4 +38,4 @@ obj-$(CONFIG_USB_NET_HUAWEI_CDC_NCM) += huawei_cdc_ncm.o
obj-$(CONFIG_USB_VL600) += lg-vl600.o
obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
-
+obj-$(CONFIG_USB_NET_CH9200) += ch9200.o
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
new file mode 100644
index 000000000000..5e151e6a3e09
--- /dev/null
+++ b/drivers/net/usb/ch9200.c
@@ -0,0 +1,432 @@
+/*
+ * USB 10M/100M ethernet adapter
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/stddef.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
+
+#define CH9200_VID 0x1A86
+#define CH9200_PID_E092 0xE092
+
+#define CTRL_TIMEOUT_MS 1000
+
+#define CONTROL_TIMEOUT_MS 1000
+
+#define REQUEST_READ 0x0E
+#define REQUEST_WRITE 0x0F
+
+/* Address space:
+ * 00-63 : MII
+ * 64-128: MAC
+ *
+ * Note: all accesses must be 16-bit
+ */
+
+#define MAC_REG_CTRL 64
+#define MAC_REG_STATUS 66
+#define MAC_REG_INTERRUPT_MASK 68
+#define MAC_REG_PHY_COMMAND 70
+#define MAC_REG_PHY_DATA 72
+#define MAC_REG_STATION_L 74
+#define MAC_REG_STATION_M 76
+#define MAC_REG_STATION_H 78
+#define MAC_REG_HASH_L 80
+#define MAC_REG_HASH_M1 82
+#define MAC_REG_HASH_M2 84
+#define MAC_REG_HASH_H 86
+#define MAC_REG_THRESHOLD 88
+#define MAC_REG_FIFO_DEPTH 90
+#define MAC_REG_PAUSE 92
+#define MAC_REG_FLOW_CONTROL 94
+
+/* Control register bits
+ *
+ * Note: bits 13 and 15 are reserved
+ */
+#define LOOPBACK (0x01 << 14)
+#define BASE100X (0x01 << 12)
+#define MBPS_10 (0x01 << 11)
+#define DUPLEX_MODE (0x01 << 10)
+#define PAUSE_FRAME (0x01 << 9)
+#define PROMISCUOUS (0x01 << 8)
+#define MULTICAST (0x01 << 7)
+#define BROADCAST (0x01 << 6)
+#define HASH (0x01 << 5)
+#define APPEND_PAD (0x01 << 4)
+#define APPEND_CRC (0x01 << 3)
+#define TRANSMITTER_ACTION (0x01 << 2)
+#define RECEIVER_ACTION (0x01 << 1)
+#define DMA_ACTION (0x01 << 0)
+
+/* Status register bits
+ *
+ * Note: bits 7-15 are reserved
+ */
+#define ALIGNMENT (0x01 << 6)
+#define FIFO_OVER_RUN (0x01 << 5)
+#define FIFO_UNDER_RUN (0x01 << 4)
+#define RX_ERROR (0x01 << 3)
+#define RX_COMPLETE (0x01 << 2)
+#define TX_ERROR (0x01 << 1)
+#define TX_COMPLETE (0x01 << 0)
+
+/* FIFO depth register bits
+ *
+ * Note: bits 6 and 14 are reserved
+ */
+
+#define ETH_TXBD (0x01 << 15)
+#define ETN_TX_FIFO_DEPTH (0x01 << 8)
+#define ETH_RXBD (0x01 << 7)
+#define ETH_RX_FIFO_DEPTH (0x01 << 0)
+
+static int control_read(struct usbnet *dev,
+ unsigned char request, unsigned short value,
+ unsigned short index, void *data, unsigned short size,
+ int timeout)
+{
+ unsigned char *buf = NULL;
+ unsigned char request_type;
+ int err = 0;
+
+ if (request == REQUEST_READ)
+ request_type = (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_OTHER);
+ else
+ request_type = (USB_DIR_IN | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE);
+
+ netdev_dbg(dev->net, "Control_read() index=0x%02x size=%d\n",
+ index, size);
+
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ err = usb_control_msg(dev->udev,
+ usb_rcvctrlpipe(dev->udev, 0),
+ request, request_type, value, index, buf, size,
+ timeout);
+ if (err == size)
+ memcpy(data, buf, size);
+ else if (err >= 0)
+ err = -EINVAL;
+ kfree(buf);
+
+ return err;
+
+err_out:
+ return err;
+}
+
+static int control_write(struct usbnet *dev, unsigned char request,
+ unsigned short value, unsigned short index,
+ void *data, unsigned short size, int timeout)
+{
+ unsigned char *buf = NULL;
+ unsigned char request_type;
+ int err = 0;
+
+ if (request == REQUEST_WRITE)
+ request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_OTHER);
+ else
+ request_type = (USB_DIR_OUT | USB_TYPE_VENDOR |
+ USB_RECIP_DEVICE);
+
+ netdev_dbg(dev->net, "Control_write() index=0x%02x size=%d\n",
+ index, size);
+
+ if (data) {
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ memcpy(buf, data, size);
+ }
+
+ err = usb_control_msg(dev->udev,
+ usb_sndctrlpipe(dev->udev, 0),
+ request, request_type, value, index, buf, size,
+ timeout);
+ if (err >= 0 && err < size)
+ err = -EINVAL;
+ kfree(buf);
+
+ return 0;
+
+err_out:
+ return err;
+}
+
+static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ unsigned char buff[2];
+
+ netdev_dbg(netdev, "ch9200_mdio_read phy_id:%02x loc:%02x\n",
+ phy_id, loc);
+
+ if (phy_id != 0)
+ return -ENODEV;
+
+ control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
+ CONTROL_TIMEOUT_MS);
+
+ return (buff[0] | buff[1] << 8);
+}
+
+static void ch9200_mdio_write(struct net_device *netdev,
+ int phy_id, int loc, int val)
+{
+ struct usbnet *dev = netdev_priv(netdev);
+ unsigned char buff[2];
+
+ netdev_dbg(netdev, "ch9200_mdio_write() phy_id=%02x loc:%02x\n",
+ phy_id, loc);
+
+ if (phy_id != 0)
+ return;
+
+ buff[0] = (unsigned char)val;
+ buff[1] = (unsigned char)(val >> 8);
+
+ control_write(dev, REQUEST_WRITE, 0, loc * 2, buff, 0x02,
+ CONTROL_TIMEOUT_MS);
+}
+
+static int ch9200_link_reset(struct usbnet *dev)
+{
+ struct ethtool_cmd ecmd;
+
+ mii_check_media(&dev->mii, 1, 1);
+ mii_ethtool_gset(&dev->mii, &ecmd);
+
+ netdev_dbg(dev->net, "link_reset() speed:%d duplex:%d\n",
+ ecmd.speed, ecmd.duplex);
+
+ return 0;
+}
+
+static void ch9200_status(struct usbnet *dev, struct urb *urb)
+{
+ int link;
+ unsigned char *buf;
+
+ if (urb->actual_length < 16)
+ return;
+
+ buf = urb->transfer_buffer;
+ link = !!(buf[0] & 0x01);
+
+ if (link) {
+ netif_carrier_on(dev->net);
+ usbnet_defer_kevent(dev, EVENT_LINK_RESET);
+ } else {
+ netif_carrier_off(dev->net);
+ }
+}
+
+static struct sk_buff *ch9200_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int i = 0;
+ int len = 0;
+ int tx_overhead = 0;
+
+ tx_overhead = 0x40;
+
+ len = skb->len;
+ if (skb_headroom(skb) < tx_overhead) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_copy_expand(skb, tx_overhead, 0, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ __skb_push(skb, tx_overhead);
+ /* usbnet adds padding if length is a multiple of packet size
+ * if so, adjust length value in header
+ */
+ if ((skb->len % dev->maxpacket) == 0)
+ len++;
+
+ skb->data[0] = len;
+ skb->data[1] = len >> 8;
+ skb->data[2] = 0x00;
+ skb->data[3] = 0x80;
+
+ for (i = 4; i < 48; i++)
+ skb->data[i] = 0x00;
+
+ skb->data[48] = len;
+ skb->data[49] = len >> 8;
+ skb->data[50] = 0x00;
+ skb->data[51] = 0x80;
+
+ for (i = 52; i < 64; i++)
+ skb->data[i] = 0x00;
+
+ return skb;
+}
+
+static int ch9200_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int len = 0;
+ int rx_overhead = 0;
+
+ rx_overhead = 64;
+
+ if (unlikely(skb->len < rx_overhead)) {
+ dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
+ return 0;
+ }
+
+ len = (skb->data[skb->len - 16] | skb->data[skb->len - 15] << 8);
+ skb_trim(skb, len);
+
+ return 1;
+}
+
+static int get_mac_address(struct usbnet *dev, unsigned char *data)
+{
+ int err = 0;
+ unsigned char mac_addr[0x06];
+ int rd_mac_len = 0;
+
+ netdev_dbg(dev->net, "get_mac_address:\n\tusbnet VID:%0x PID:%0x\n",
+ dev->udev->descriptor.idVendor,
+ dev->udev->descriptor.idProduct);
+
+ memset(mac_addr, 0, sizeof(mac_addr));
+ rd_mac_len = control_read(dev, REQUEST_READ, 0,
+ MAC_REG_STATION_L, mac_addr, 0x02,
+ CONTROL_TIMEOUT_MS);
+ rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_M,
+ mac_addr + 2, 0x02, CONTROL_TIMEOUT_MS);
+ rd_mac_len += control_read(dev, REQUEST_READ, 0, MAC_REG_STATION_H,
+ mac_addr + 4, 0x02, CONTROL_TIMEOUT_MS);
+ if (rd_mac_len != ETH_ALEN)
+ err = -EINVAL;
+
+ data[0] = mac_addr[5];
+ data[1] = mac_addr[4];
+ data[2] = mac_addr[3];
+ data[3] = mac_addr[2];
+ data[4] = mac_addr[1];
+ data[5] = mac_addr[0];
+
+ return err;
+}
+
+static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ int retval = 0;
+ unsigned char data[2];
+
+ retval = usbnet_get_endpoints(dev, intf);
+ if (retval)
+ return retval;
+
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = ch9200_mdio_read;
+ dev->mii.mdio_write = ch9200_mdio_write;
+ dev->mii.reg_num_mask = 0x1f;
+
+ dev->mii.phy_id_mask = 0x1f;
+
+ dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
+ dev->rx_urb_size = 24 * 64 + 16;
+ mii_nway_restart(&dev->mii);
+
+ data[0] = 0x01;
+ data[1] = 0x0F;
+ retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_THRESHOLD, data,
+ 0x02, CONTROL_TIMEOUT_MS);
+
+ data[0] = 0xA0;
+ data[1] = 0x90;
+ retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FIFO_DEPTH, data,
+ 0x02, CONTROL_TIMEOUT_MS);
+
+ data[0] = 0x30;
+ data[1] = 0x00;
+ retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_PAUSE, data,
+ 0x02, CONTROL_TIMEOUT_MS);
+
+ data[0] = 0x17;
+ data[1] = 0xD8;
+ retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_FLOW_CONTROL,
+ data, 0x02, CONTROL_TIMEOUT_MS);
+
+ /* Undocumented register */
+ data[0] = 0x01;
+ data[1] = 0x00;
+ retval = control_write(dev, REQUEST_WRITE, 0, 254, data, 0x02,
+ CONTROL_TIMEOUT_MS);
+
+ data[0] = 0x5F;
+ data[1] = 0x0D;
+ retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02,
+ CONTROL_TIMEOUT_MS);
+
+ retval = get_mac_address(dev, dev->net->dev_addr);
+
+ return retval;
+}
+
+static const struct driver_info ch9200_info = {
+ .description = "CH9200 USB to Network Adaptor",
+ .flags = FLAG_ETHER,
+ .bind = ch9200_bind,
+ .rx_fixup = ch9200_rx_fixup,
+ .tx_fixup = ch9200_tx_fixup,
+ .status = ch9200_status,
+ .link_reset = ch9200_link_reset,
+ .reset = ch9200_link_reset,
+};
+
+static const struct usb_device_id ch9200_products[] = {
+ {
+ USB_DEVICE(0x1A86, 0xE092),
+ .driver_info = (unsigned long)&ch9200_info,
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(usb, ch9200_products);
+
+static struct usb_driver ch9200_driver = {
+ .name = "ch9200",
+ .id_table = ch9200_products,
+ .probe = usbnet_probe,
+ .disconnect = usbnet_disconnect,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+};
+
+module_usb_driver(ch9200_driver);
+
+MODULE_DESCRIPTION("QinHeng CH9200 USB Network device");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index e7094fbd7568..488c6f50df73 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -193,7 +193,8 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
.flowi4_oif = vrf_dev->ifindex,
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_tos = RT_TOS(ip4h->tos),
- .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC,
+ .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_VRFSRC |
+ FLOWI_FLAG_SKIP_NH_OIF,
.daddr = ip4h->daddr,
};
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index cf8b7f0473b3..bbac1d35ed4e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2392,10 +2392,6 @@ static void vxlan_setup(struct net_device *dev)
eth_hw_addr_random(dev);
ether_setup(dev);
- if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
- dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
- else
- dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
dev->netdev_ops = &vxlan_netdev_ops;
dev->destructor = free_netdev;
@@ -2640,8 +2636,11 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
dst->remote_ip.sa.sa_family = AF_INET;
if (dst->remote_ip.sa.sa_family == AF_INET6 ||
- vxlan->cfg.saddr.sa.sa_family == AF_INET6)
+ vxlan->cfg.saddr.sa.sa_family == AF_INET6) {
+ if (!IS_ENABLED(CONFIG_IPV6))
+ return -EPFNOSUPPORT;
use_ipv6 = true;
+ }
if (conf->remote_ifindex) {
struct net_device *lowerdev
@@ -2670,8 +2669,12 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
dev->needed_headroom = lowerdev->hard_header_len +
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
- } else if (use_ipv6)
+ } else if (use_ipv6) {
vxlan->flags |= VXLAN_F_IPV6;
+ dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
+ } else {
+ dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
+ }
memcpy(&vxlan->cfg, conf, sizeof(*conf));
if (!vxlan->cfg.dst_port)
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 59ad54a63d9f..cb477518dd0e 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -128,13 +128,13 @@ static ssize_t namespace_store(struct device *dev,
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
- nvdimm_bus_lock(dev);
device_lock(dev);
+ nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
rc, buf, buf[len - 1] == '\n' ? "" : "\n");
- device_unlock(dev);
nvdimm_bus_unlock(dev);
+ device_unlock(dev);
return rc;
}
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 3fd7d0d81a47..71805a1aa0f3 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -148,13 +148,13 @@ static ssize_t namespace_store(struct device *dev,
struct nd_pfn *nd_pfn = to_nd_pfn(dev);
ssize_t rc;
- nvdimm_bus_lock(dev);
device_lock(dev);
+ nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
rc, buf, buf[len - 1] == '\n' ? "" : "\n");
- device_unlock(dev);
nvdimm_bus_unlock(dev);
+ device_unlock(dev);
return rc;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index b9525385c0dc..0ba6a978f227 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -92,6 +92,8 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
struct pmem_device *pmem = bdev->bd_disk->private_data;
pmem_do_bvec(pmem, page, PAGE_CACHE_SIZE, 0, rw, sector);
+ if (rw & WRITE)
+ wmb_pmem();
page_endio(page, rw & WRITE, 0);
return 0;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 1350fa25cdb0..a87a868fed64 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -197,7 +197,8 @@ static int of_phy_match(struct device *dev, void *phy_np)
* of_phy_find_device - Give a PHY node, find the phy_device
* @phy_np: Pointer to the phy's device tree node
*
- * Returns a pointer to the phy_device.
+ * If successful, returns a pointer to the phy_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure.
*/
struct phy_device *of_phy_find_device(struct device_node *phy_np)
{
@@ -217,7 +218,9 @@ EXPORT_SYMBOL(of_phy_find_device);
* @hndlr: Link state callback for the network device
* @iface: PHY data interface type
*
- * Returns a pointer to the phy_device if successful. NULL otherwise
+ * If successful, returns a pointer to the phy_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure. The
+ * refcount must be dropped by calling phy_disconnect() or phy_detach().
*/
struct phy_device *of_phy_connect(struct net_device *dev,
struct device_node *phy_np,
@@ -225,13 +228,19 @@ struct phy_device *of_phy_connect(struct net_device *dev,
phy_interface_t iface)
{
struct phy_device *phy = of_phy_find_device(phy_np);
+ int ret;
if (!phy)
return NULL;
phy->dev_flags = flags;
- return phy_connect_direct(dev, phy, hndlr, iface) ? NULL : phy;
+ ret = phy_connect_direct(dev, phy, hndlr, iface);
+
+ /* refcount is held by phy_connect_direct() on success */
+ put_device(&phy->dev);
+
+ return ret ? NULL : phy;
}
EXPORT_SYMBOL(of_phy_connect);
@@ -241,17 +250,27 @@ EXPORT_SYMBOL(of_phy_connect);
* @phy_np: Node pointer for the PHY
* @flags: flags to pass to the PHY
* @iface: PHY data interface type
+ *
+ * If successful, returns a pointer to the phy_device with the embedded
+ * struct device refcount incremented by one, or NULL on failure. The
+ * refcount must be dropped by calling phy_disconnect() or phy_detach().
*/
struct phy_device *of_phy_attach(struct net_device *dev,
struct device_node *phy_np, u32 flags,
phy_interface_t iface)
{
struct phy_device *phy = of_phy_find_device(phy_np);
+ int ret;
if (!phy)
return NULL;
- return phy_attach_direct(dev, phy, flags, iface) ? NULL : phy;
+ ret = phy_attach_direct(dev, phy, flags, iface);
+
+ /* refcount is held by phy_attach_direct() on success */
+ put_device(&phy->dev);
+
+ return ret ? NULL : phy;
}
EXPORT_SYMBOL(of_phy_attach);
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 1710d9dc7fc2..2306313c0029 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -38,8 +38,8 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq
*/
rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
if (rc != 0)
- return rc;
- /* No pin, exit */
+ goto err;
+ /* No pin, exit with no error message. */
if (pin == 0)
return -ENODEV;
@@ -53,8 +53,10 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq
ppnode = pci_bus_to_OF_node(pdev->bus);
/* No node for host bridge ? give up */
- if (ppnode == NULL)
- return -EINVAL;
+ if (ppnode == NULL) {
+ rc = -EINVAL;
+ goto err;
+ }
} else {
/* We found a P2P bridge, check if it has a node */
ppnode = pci_device_to_OF_node(ppdev);
@@ -86,7 +88,13 @@ int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq
out_irq->args[0] = pin;
laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
laddr[1] = laddr[2] = cpu_to_be32(0);
- return of_irq_parse_raw(laddr, out_irq);
+ rc = of_irq_parse_raw(laddr, out_irq);
+ if (rc)
+ goto err;
+ return 0;
+err:
+ dev_err(&pdev->dev, "of_irq_parse_pci() failed with rc=%d\n", rc);
+ return rc;
}
EXPORT_SYMBOL_GPL(of_irq_parse_pci);
@@ -105,10 +113,8 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
int ret;
ret = of_irq_parse_pci(dev, &oirq);
- if (ret) {
- dev_err(&dev->dev, "of_irq_parse_pci() failed with rc=%d\n", ret);
+ if (ret)
return 0; /* Proper return code 0 == NO_IRQ */
- }
return irq_create_of_mapping(&oirq);
}
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index baec33c4e698..a0580afe1713 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -560,6 +560,9 @@ dino_fixup_bus(struct pci_bus *bus)
} else if (bus->parent) {
int i;
+ pci_read_bridge_bases(bus);
+
+
for(i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
if((bus->self->resource[i].flags &
(IORESOURCE_IO | IORESOURCE_MEM)) == 0)
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 7b9e89ba0465..a32c1f6c252c 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -693,6 +693,7 @@ lba_fixup_bus(struct pci_bus *bus)
if (bus->parent) {
int i;
/* PCI-PCI Bridge */
+ pci_read_bridge_bases(bus);
for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
pci_claim_bridge_resource(bus->self, i);
} else {
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 769f7e35f1a2..59ac36fe7c42 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -442,7 +442,8 @@ static const struct pci_vpd_ops pci_vpd_pci22_ops = {
static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
void *arg)
{
- struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
ssize_t ret;
if (!tdev)
@@ -456,7 +457,8 @@ static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
const void *arg)
{
- struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
+ struct pci_dev *tdev = pci_get_slot(dev->bus,
+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
ssize_t ret;
if (!tdev)
@@ -473,22 +475,6 @@ static const struct pci_vpd_ops pci_vpd_f0_ops = {
.release = pci_vpd_pci22_release,
};
-static int pci_vpd_f0_dev_check(struct pci_dev *dev)
-{
- struct pci_dev *tdev = pci_get_slot(dev->bus, PCI_SLOT(dev->devfn));
- int ret = 0;
-
- if (!tdev)
- return -ENODEV;
- if (!tdev->vpd || !tdev->multifunction ||
- dev->class != tdev->class || dev->vendor != tdev->vendor ||
- dev->device != tdev->device)
- ret = -ENODEV;
-
- pci_dev_put(tdev);
- return ret;
-}
-
int pci_vpd_pci22_init(struct pci_dev *dev)
{
struct pci_vpd_pci22 *vpd;
@@ -497,12 +483,7 @@ int pci_vpd_pci22_init(struct pci_dev *dev)
cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
if (!cap)
return -ENODEV;
- if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0) {
- int ret = pci_vpd_f0_dev_check(dev);
- if (ret)
- return ret;
- }
vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
if (!vpd)
return -ENOMEM;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 6fbd3f2b5992..d3346d23963b 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -256,6 +256,8 @@ bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
res->start = start;
res->end = end;
+ res->flags &= ~IORESOURCE_UNSET;
+ orig_res.flags &= ~IORESOURCE_UNSET;
dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
&orig_res, res);
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
index 81253e70b1c5..0aa81bd3de12 100644
--- a/drivers/pci/host/pci-keystone.c
+++ b/drivers/pci/host/pci-keystone.c
@@ -110,7 +110,7 @@ static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
return -EINVAL;
}
-static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void ks_pcie_msi_irq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
@@ -138,8 +138,7 @@ static void ks_pcie_msi_irq_handler(unsigned int __irq, struct irq_desc *desc)
* Traverse through pending legacy interrupts and invoke handler for each. Also
* takes care of interrupt controller level mask/ack operation.
*/
-static void ks_pcie_legacy_irq_handler(unsigned int __irq,
- struct irq_desc *desc)
+static void ks_pcie_legacy_irq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
diff --git a/drivers/pci/host/pci-rcar-gen2.c b/drivers/pci/host/pci-rcar-gen2.c
index 367e28fa7564..c4f64bfee551 100644
--- a/drivers/pci/host/pci-rcar-gen2.c
+++ b/drivers/pci/host/pci-rcar-gen2.c
@@ -362,6 +362,7 @@ static int rcar_pci_probe(struct platform_device *pdev)
static struct of_device_id rcar_pci_of_match[] = {
{ .compatible = "renesas,pci-r8a7790", },
{ .compatible = "renesas,pci-r8a7791", },
+ { .compatible = "renesas,pci-r8a7794", },
{ },
};
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index 996327cfa1e1..e491681daf22 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -295,7 +295,7 @@ static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi)
return 0;
}
-static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc)
+static void xgene_msi_isr(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct xgene_msi_group *msi_groups;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index dd652f2ae03d..108a3118ace7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -299,9 +299,10 @@ static long local_pci_probe(void *_ddi)
* Unbound PCI devices are always put in D0, regardless of
* runtime PM status. During probe, the device is set to
* active and the usage count is incremented. If the driver
- * supports runtime PM, it should call pm_runtime_put_noidle()
- * in its probe routine and pm_runtime_get_noresume() in its
- * remove routine.
+ * supports runtime PM, it should call pm_runtime_put_noidle(),
+ * or any other runtime PM helper function decrementing the usage
+ * count, in its probe routine and pm_runtime_get_noresume() in
+ * its remove routine.
*/
pm_runtime_get_sync(dev);
pci_dev->driver = pci_drv;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 0b2be174d981..8361d27e5eca 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -676,15 +676,20 @@ static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
static void pci_set_bus_msi_domain(struct pci_bus *bus)
{
struct irq_domain *d;
+ struct pci_bus *b;
/*
- * Either bus is the root, and we must obtain it from the
- * firmware, or we inherit it from the bridge device.
+ * The bus can be a root bus, a subordinate bus, or a virtual bus
+ * created by an SR-IOV device. Walk up to the first bridge device
+ * found or derive the domain from the host bridge.
*/
- if (pci_is_root_bus(bus))
- d = pci_host_bridge_msi_domain(bus);
- else
- d = dev_get_msi_domain(&bus->self->dev);
+ for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
+ if (b->self)
+ d = dev_get_msi_domain(&b->self->dev);
+ }
+
+ if (!d)
+ d = pci_host_bridge_msi_domain(b);
dev_set_msi_domain(&bus->dev, d);
}
@@ -855,9 +860,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
child->bridge_ctl = bctl;
}
- /* Read and initialize bridge resources */
- pci_read_bridge_bases(child);
-
cmax = pci_scan_child_bus(child);
if (cmax > subordinate)
dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
@@ -918,9 +920,6 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
if (!is_cardbus) {
child->bridge_ctl = bctl;
-
- /* Read and initialize bridge resources */
- pci_read_bridge_bases(child);
max = pci_scan_child_bus(child);
} else {
/*
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 6a30252cd79f..b03373fd05ca 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1907,11 +1907,27 @@ static void quirk_netmos(struct pci_dev *dev)
DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID,
PCI_CLASS_COMMUNICATION_SERIAL, 8, quirk_netmos);
+/*
+ * Quirk non-zero PCI functions to route VPD access through function 0 for
+ * devices that share VPD resources between functions. The functions are
+ * expected to be identical devices.
+ */
static void quirk_f0_vpd_link(struct pci_dev *dev)
{
- if (!dev->multifunction || !PCI_FUNC(dev->devfn))
+ struct pci_dev *f0;
+
+ if (!PCI_FUNC(dev->devfn))
return;
- dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+
+ f0 = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+ if (!f0)
+ return;
+
+ if (f0->vpd && dev->class == f0->class &&
+ dev->vendor == f0->vendor && dev->device == f0->device)
+ dev->dev_flags |= PCI_DEV_FLAGS_VPD_REF_F0;
+
+ pci_dev_put(f0);
}
DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_INTEL, PCI_ANY_ID,
PCI_CLASS_NETWORK_ETHERNET, 8, quirk_f0_vpd_link);
diff --git a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
index 7d9482bf8252..1ca783098e47 100644
--- a/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
+++ b/drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
@@ -143,7 +143,7 @@ static inline bool cygnus_get_bit(struct cygnus_gpio *chip, unsigned int reg,
return !!(readl(chip->base + offset) & BIT(shift));
}
-static void cygnus_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void cygnus_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct cygnus_gpio *chip = to_cygnus_gpio(gc);
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 69723e07036b..9638a00c67c2 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -349,6 +349,9 @@ static bool pinctrl_ready_for_gpio_range(unsigned gpio)
struct pinctrl_gpio_range *range = NULL;
struct gpio_chip *chip = gpio_to_chip(gpio);
+ if (WARN(!chip, "no gpio_chip for gpio%i?", gpio))
+ return false;
+
mutex_lock(&pinctrldev_list_mutex);
/* Loop over the pin controllers */
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index dac4865f3203..f79ea430f651 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -425,7 +425,7 @@ static void byt_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
}
}
-static void byt_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void byt_gpio_irq_handler(struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
struct byt_gpio *vg = to_byt_gpio(irq_desc_get_handler_data(desc));
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 2d5d3ddc36e5..270c127e03ea 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1414,7 +1414,7 @@ static struct irq_chip chv_gpio_irqchip = {
.flags = IRQCHIP_SKIP_SET_WAKE,
};
-static void chv_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void chv_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index bb377c110541..54848b8decef 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -836,7 +836,7 @@ static void intel_gpio_community_irq_handler(struct gpio_chip *gc,
}
}
-static void intel_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void intel_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct intel_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 7726c6caaf83..1b22f96ba839 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1190,7 +1190,7 @@ mtk_eint_debounce_process(struct mtk_pinctrl *pctl, int index)
}
}
-static void mtk_eint_irq_handler(unsigned irq, struct irq_desc *desc)
+static void mtk_eint_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct mtk_pinctrl *pctl = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
index 352ede13a9e9..96cf03908e93 100644
--- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c
+++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c
@@ -860,7 +860,7 @@ static void __nmk_gpio_irq_handler(struct irq_desc *desc, u32 status)
chained_irq_exit(host_chip, desc);
}
-static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void nmk_gpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
@@ -873,7 +873,7 @@ static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
__nmk_gpio_irq_handler(desc, status);
}
-static void nmk_gpio_latent_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void nmk_gpio_latent_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *chip = irq_desc_get_handler_data(desc);
struct nmk_gpio_chip *nmk_chip = container_of(chip, struct nmk_gpio_chip, chip);
diff --git a/drivers/pinctrl/pinctrl-adi2.c b/drivers/pinctrl/pinctrl-adi2.c
index a5976ebc4482..f6be68518c87 100644
--- a/drivers/pinctrl/pinctrl-adi2.c
+++ b/drivers/pinctrl/pinctrl-adi2.c
@@ -530,8 +530,7 @@ static inline void preflow_handler(struct irq_desc *desc)
static inline void preflow_handler(struct irq_desc *desc) { }
#endif
-static void adi_gpio_handle_pint_irq(unsigned int inta_irq,
- struct irq_desc *desc)
+static void adi_gpio_handle_pint_irq(struct irq_desc *desc)
{
u32 request;
u32 level_mask, hwirq;
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 5e86bb8ca80e..3318f1d6193c 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -492,15 +492,15 @@ static struct irq_chip amd_gpio_irqchip = {
.irq_set_type = amd_gpio_irq_set_type,
};
-static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void amd_gpio_irq_handler(struct irq_desc *desc)
{
- unsigned int irq = irq_desc_get_irq(desc);
u32 i;
u32 off;
u32 reg;
u32 pin_reg;
u64 reg64;
int handled = 0;
+ unsigned int irq;
unsigned long flags;
struct irq_chip *chip = irq_desc_get_chip(desc);
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -541,7 +541,7 @@ static void amd_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
}
if (handled == 0)
- handle_bad_irq(irq, desc);
+ handle_bad_irq(desc);
spin_lock_irqsave(&gpio_dev->lock, flags);
reg = readl(gpio_dev->base + WAKE_INT_MASTER_REG);
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index bae0012ee356..b0fde0f385e6 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1585,7 +1585,7 @@ static struct irq_chip gpio_irqchip = {
.irq_set_wake = gpio_irq_set_wake,
};
-static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void gpio_irq_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct gpio_chip *gpio_chip = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index 3731cc67a88b..9c9b88934bcc 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -519,7 +519,7 @@ static struct irq_chip u300_gpio_irqchip = {
.irq_set_type = u300_gpio_irq_type,
};
-static void u300_gpio_irq_handler(unsigned __irq, struct irq_desc *desc)
+static void u300_gpio_irq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct irq_chip *parent_chip = irq_desc_get_chip(desc);
diff --git a/drivers/pinctrl/pinctrl-digicolor.c b/drivers/pinctrl/pinctrl-digicolor.c
index 461fffc4c62a..11f8b835d3b6 100644
--- a/drivers/pinctrl/pinctrl-digicolor.c
+++ b/drivers/pinctrl/pinctrl-digicolor.c
@@ -337,9 +337,9 @@ static int dc_pinctrl_probe(struct platform_device *pdev)
pmap->dev = &pdev->dev;
pmap->pctl = pinctrl_register(pctl_desc, &pdev->dev, pmap);
- if (!pmap->pctl) {
+ if (IS_ERR(pmap->pctl)) {
dev_err(&pdev->dev, "pinctrl driver registration failed\n");
- return -EINVAL;
+ return PTR_ERR(pmap->pctl);
}
ret = dc_gpiochip_add(pmap, pdev->dev.of_node);
diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
index 3dc2ae15f3a1..952b1c623887 100644
--- a/drivers/pinctrl/pinctrl-pistachio.c
+++ b/drivers/pinctrl/pinctrl-pistachio.c
@@ -1303,20 +1303,18 @@ static int pistachio_gpio_irq_set_type(struct irq_data *data, unsigned int type)
}
if (type & IRQ_TYPE_LEVEL_MASK)
- __irq_set_handler_locked(data->irq, handle_level_irq);
+ irq_set_handler_locked(data, handle_level_irq);
else
- __irq_set_handler_locked(data->irq, handle_edge_irq);
+ irq_set_handler_locked(data, handle_edge_irq);
return 0;
}
-static void pistachio_gpio_irq_handler(unsigned int __irq,
- struct irq_desc *desc)
+static void pistachio_gpio_irq_handler(struct irq_desc *desc)
{
- unsigned int irq = irq_desc_get_irq(desc);
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct pistachio_gpio_bank *bank = gc_to_bank(gc);
- struct irq_chip *chip = irq_get_chip(irq);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned long pending;
unsigned int pin;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index c5246c05f70c..88bb707e107a 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -1475,7 +1475,7 @@ static const struct gpio_chip rockchip_gpiolib_chip = {
* Interrupt handling
*/
-static void rockchip_irq_demux(unsigned int __irq, struct irq_desc *desc)
+static void rockchip_irq_demux(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct rockchip_pin_bank *bank = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index bf548c2a7a9d..ef04b962c3d5 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1679,7 +1679,7 @@ static irqreturn_t pcs_irq_handler(int irq, void *d)
* Use this if you have a separate interrupt for each
* pinctrl-single instance.
*/
-static void pcs_irq_chain_handler(unsigned int irq, struct irq_desc *desc)
+static void pcs_irq_chain_handler(struct irq_desc *desc)
{
struct pcs_soc_data *pcs_soc = irq_desc_get_handler_data(desc);
struct irq_chip *chip;
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index f8338d2e6b6b..389526e704fb 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1460,7 +1460,7 @@ static void __gpio_irq_handler(struct st_gpio_bank *bank)
}
}
-static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void st_gpio_irq_handler(struct irq_desc *desc)
{
/* interrupt dedicated per bank */
struct irq_chip *chip = irq_desc_get_chip(desc);
@@ -1472,7 +1472,7 @@ static void st_gpio_irq_handler(unsigned irq, struct irq_desc *desc)
chained_irq_exit(chip, desc);
}
-static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
+static void st_gpio_irqmux_handler(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct st_pinctrl *info = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index 67e08cb315c4..29984b36926a 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -313,8 +313,7 @@ static int pinmux_func_name_to_selector(struct pinctrl_dev *pctldev,
/* See if this pctldev has this function */
while (selector < nfuncs) {
- const char *fname = ops->get_function_name(pctldev,
- selector);
+ const char *fname = ops->get_function_name(pctldev, selector);
if (!strcmp(function, fname))
return selector;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 492cdd51dc5c..a0c7407c1cac 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -765,9 +765,8 @@ static struct irq_chip msm_gpio_irq_chip = {
.irq_set_wake = msm_gpio_irq_set_wake,
};
-static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
+static void msm_gpio_irq_handler(struct irq_desc *desc)
{
- unsigned int irq = irq_desc_get_irq(desc);
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
const struct msm_pingroup *g;
struct msm_pinctrl *pctrl = to_msm_pinctrl(gc);
@@ -795,7 +794,7 @@ static void msm_gpio_irq_handler(unsigned int __irq, struct irq_desc *desc)
/* No interrupts were flagged */
if (handled == 0)
- handle_bad_irq(irq, desc);
+ handle_bad_irq(desc);
chained_irq_exit(chip, desc);
}
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
index c978b311031b..e1a3721bc8e5 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c
@@ -723,9 +723,9 @@ static int pm8xxx_gpio_probe(struct platform_device *pdev)
#endif
pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
- if (!pctrl->pctrl) {
+ if (IS_ERR(pctrl->pctrl)) {
dev_err(&pdev->dev, "couldn't register pm8xxx gpio driver\n");
- return -ENODEV;
+ return PTR_ERR(pctrl->pctrl);
}
pctrl->chip = pm8xxx_gpio_template;
diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
index 2d1b69f171be..6652b8d7f707 100644
--- a/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-ssbi-mpp.c
@@ -814,9 +814,9 @@ static int pm8xxx_mpp_probe(struct platform_device *pdev)
#endif
pctrl->pctrl = pinctrl_register(&pctrl->desc, &pdev->dev, pctrl);
- if (!pctrl->pctrl) {
+ if (IS_ERR(pctrl->pctrl)) {
dev_err(&pdev->dev, "couldn't register pm8xxx mpp driver\n");
- return -ENODEV;
+ return PTR_ERR(pctrl->pctrl);
}
pctrl->chip = pm8xxx_mpp_template;
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 5f45caaef46d..71ccf6a90b22 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -419,7 +419,7 @@ static const struct of_device_id exynos_wkup_irq_ids[] = {
};
/* interrupt handler for wakeup interrupts 0..15 */
-static void exynos_irq_eint0_15(unsigned int irq, struct irq_desc *desc)
+static void exynos_irq_eint0_15(struct irq_desc *desc)
{
struct exynos_weint_data *eintd = irq_desc_get_handler_data(desc);
struct samsung_pin_bank *bank = eintd->bank;
@@ -451,7 +451,7 @@ static inline void exynos_irq_demux_eint(unsigned long pend,
}
/* interrupt handler for wakeup interrupt 16 */
-static void exynos_irq_demux_eint16_31(unsigned int irq, struct irq_desc *desc)
+static void exynos_irq_demux_eint16_31(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct exynos_muxed_weint_data *eintd = irq_desc_get_handler_data(desc);
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
index 019844d479bb..3d92f827da7a 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c
@@ -240,7 +240,7 @@ static struct irq_chip s3c2410_eint0_3_chip = {
.irq_set_type = s3c24xx_eint_type,
};
-static void s3c2410_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
+static void s3c2410_demux_eint0_3(struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
@@ -295,7 +295,7 @@ static struct irq_chip s3c2412_eint0_3_chip = {
.irq_set_type = s3c24xx_eint_type,
};
-static void s3c2412_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
+static void s3c2412_demux_eint0_3(struct irq_desc *desc)
{
struct s3c24xx_eint_data *eint_data = irq_desc_get_handler_data(desc);
struct irq_data *data = irq_desc_get_irq_data(desc);
@@ -361,7 +361,7 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc,
u32 offset, u32 range)
{
struct s3c24xx_eint_data *data = irq_desc_get_handler_data(desc);
- struct irq_chip *chip = irq_desc_get_irq_chip(desc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
struct samsung_pinctrl_drv_data *d = data->drvdata;
unsigned int pend, mask;
@@ -388,12 +388,12 @@ static inline void s3c24xx_demux_eint(struct irq_desc *desc,
chained_irq_exit(chip, desc);
}
-static void s3c24xx_demux_eint4_7(unsigned int irq, struct irq_desc *desc)
+static void s3c24xx_demux_eint4_7(struct irq_desc *desc)
{
s3c24xx_demux_eint(desc, 0, 0xf0);
}
-static void s3c24xx_demux_eint8_23(unsigned int irq, struct irq_desc *desc)
+static void s3c24xx_demux_eint8_23(struct irq_desc *desc)
{
s3c24xx_demux_eint(desc, 8, 0xffff00);
}
diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
index f5ea40a69711..43407ab248f5 100644
--- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
+++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c
@@ -407,7 +407,7 @@ static const struct irq_domain_ops s3c64xx_gpio_irqd_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static void s3c64xx_eint_gpio_irq(unsigned int irq, struct irq_desc *desc)
+static void s3c64xx_eint_gpio_irq(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct s3c64xx_eint_gpio_data *data = irq_desc_get_handler_data(desc);
@@ -631,22 +631,22 @@ static inline void s3c64xx_irq_demux_eint(struct irq_desc *desc, u32 range)
chained_irq_exit(chip, desc);
}
-static void s3c64xx_demux_eint0_3(unsigned int irq, struct irq_desc *desc)
+static void s3c64xx_demux_eint0_3(struct irq_desc *desc)
{
s3c64xx_irq_demux_eint(desc, 0xf);
}
-static void s3c64xx_demux_eint4_11(unsigned int irq, struct irq_desc *desc)
+static void s3c64xx_demux_eint4_11(struct irq_desc *desc)
{
s3c64xx_irq_demux_eint(desc, 0xff0);
}
-static void s3c64xx_demux_eint12_19(unsigned int irq, struct irq_desc *desc)
+static void s3c64xx_demux_eint12_19(struct irq_desc *desc)
{
s3c64xx_irq_demux_eint(desc, 0xff000);
}
-static void s3c64xx_demux_eint20_27(unsigned int irq, struct irq_desc *desc)
+static void s3c64xx_demux_eint20_27(struct irq_desc *desc)
{
s3c64xx_irq_demux_eint(desc, 0xff00000);
}
diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
index 9df0c5f25824..0d24d9e4b70c 100644
--- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
+++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
@@ -4489,7 +4489,7 @@ static struct irq_chip atlas7_gpio_irq_chip = {
.irq_set_type = atlas7_gpio_irq_type,
};
-static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
+static void atlas7_gpio_handle_irq(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct atlas7_gpio_chip *a7gc = to_atlas7_gpio(gc);
@@ -4512,7 +4512,7 @@ static void atlas7_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
if (!status) {
pr_warn("%s: gpio [%s] status %#x no interrupt is flaged\n",
__func__, gc->label, status);
- handle_bad_irq(irq, desc);
+ handle_bad_irq(desc);
return;
}
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index f8bd9fb52033..2a8d69725de8 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -545,7 +545,7 @@ static struct irq_chip sirfsoc_irq_chip = {
.irq_set_type = sirfsoc_gpio_irq_type,
};
-static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
+static void sirfsoc_gpio_handle_irq(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
@@ -570,7 +570,7 @@ static void sirfsoc_gpio_handle_irq(unsigned int __irq, struct irq_desc *desc)
printk(KERN_WARNING
"%s: gpio id %d status %#x no interrupt is flagged\n",
__func__, bank->id, status);
- handle_bad_irq(irq, desc);
+ handle_bad_irq(desc);
return;
}
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index ae8f29fb5536..1f0af250dbb5 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -356,7 +356,7 @@ static struct irq_chip plgpio_irqchip = {
.irq_set_type = plgpio_irq_set_type,
};
-static void plgpio_irq_handler(unsigned irq, struct irq_desc *desc)
+static void plgpio_irq_handler(struct irq_desc *desc)
{
struct gpio_chip *gc = irq_desc_get_handler_data(desc);
struct plgpio *plgpio = container_of(gc, struct plgpio, chip);
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index fb4669c0ce0e..38e0c7bdd2ac 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -617,13 +617,11 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, unsigned int type)
spin_lock_irqsave(&pctl->lock, flags);
if (type & IRQ_TYPE_LEVEL_MASK)
- __irq_set_chip_handler_name_locked(d->irq,
- &sunxi_pinctrl_level_irq_chip,
- handle_fasteoi_irq, NULL);
+ irq_set_chip_handler_name_locked(d, &sunxi_pinctrl_level_irq_chip,
+ handle_fasteoi_irq, NULL);
else
- __irq_set_chip_handler_name_locked(d->irq,
- &sunxi_pinctrl_edge_irq_chip,
- handle_edge_irq, NULL);
+ irq_set_chip_handler_name_locked(d, &sunxi_pinctrl_edge_irq_chip,
+ handle_edge_irq, NULL);
regval = readl(pctl->membase + reg);
regval &= ~(IRQ_CFG_IRQ_MASK << index);
@@ -742,7 +740,7 @@ static struct irq_domain_ops sunxi_pinctrl_irq_domain_ops = {
.xlate = sunxi_pinctrl_irq_of_xlate,
};
-static void sunxi_pinctrl_irq_handler(unsigned __irq, struct irq_desc *desc)
+static void sunxi_pinctrl_irq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index abdaed34c728..131fee2b093e 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -128,6 +128,24 @@ static const struct dmi_system_id asus_quirks[] = {
},
{
.callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X456UA",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
+ .ident = "ASUSTeK COMPUTER INC. X456UF",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
+ },
+ .driver_data = &quirk_asus_wapf4,
+ },
+ {
+ .callback = dmi_matched,
.ident = "ASUSTeK COMPUTER INC. X501U",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 06697315a088..fb4dd7b3ee71 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -54,8 +54,9 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
#define HPWMI_HARDWARE_QUERY 0x4
#define HPWMI_WIRELESS_QUERY 0x5
#define HPWMI_BIOS_QUERY 0x9
+#define HPWMI_FEATURE_QUERY 0xb
#define HPWMI_HOTKEY_QUERY 0xc
-#define HPWMI_FEATURE_QUERY 0xd
+#define HPWMI_FEATURE2_QUERY 0xd
#define HPWMI_WIRELESS2_QUERY 0x1b
#define HPWMI_POSTCODEERROR_QUERY 0x2a
@@ -295,25 +296,33 @@ static int hp_wmi_tablet_state(void)
return (state & 0x4) ? 1 : 0;
}
-static int __init hp_wmi_bios_2009_later(void)
+static int __init hp_wmi_bios_2008_later(void)
{
int state = 0;
int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state,
sizeof(state), sizeof(state));
- if (ret)
- return ret;
+ if (!ret)
+ return 1;
- return (state & 0x10) ? 1 : 0;
+ return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
}
-static int hp_wmi_enable_hotkeys(void)
+static int __init hp_wmi_bios_2009_later(void)
{
- int ret;
- int query = 0x6e;
+ int state = 0;
+ int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, 0, &state,
+ sizeof(state), sizeof(state));
+ if (!ret)
+ return 1;
- ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
- 0);
+ return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
+}
+static int __init hp_wmi_enable_hotkeys(void)
+{
+ int value = 0x6e;
+ int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
+ sizeof(value), 0);
if (ret)
return -EINVAL;
return 0;
@@ -663,7 +672,7 @@ static int __init hp_wmi_input_setup(void)
hp_wmi_tablet_state());
input_sync(hp_wmi_input_dev);
- if (hp_wmi_bios_2009_later() == 4)
+ if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
hp_wmi_enable_hotkeys();
status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 6740c513919c..f2372f400ddb 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -938,7 +938,7 @@ static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state)
else if (result == TOS_NOT_SUPPORTED)
return -ENODEV;
- return result = TOS_SUCCESS ? 0 : -EIO;
+ return result == TOS_SUCCESS ? 0 : -EIO;
}
static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
@@ -2398,11 +2398,9 @@ static int toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
if (error)
return error;
- error = toshiba_hotkey_event_type_get(dev, &events_type);
- if (error) {
- pr_err("Unable to query Hotkey Event Type\n");
- return error;
- }
+ if (toshiba_hotkey_event_type_get(dev, &events_type))
+ pr_notice("Unable to query Hotkey Event Type\n");
+
dev->hotkey_event_type = events_type;
dev->hotkey_dev = input_allocate_device();
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index aac47573f9ed..eb391a281833 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -194,34 +194,6 @@ static bool wmi_parse_guid(const u8 *src, u8 *dest)
return true;
}
-/*
- * Convert a raw GUID to the ACII string representation
- */
-static int wmi_gtoa(const char *in, char *out)
-{
- int i;
-
- for (i = 3; i >= 0; i--)
- out += sprintf(out, "%02X", in[i] & 0xFF);
-
- out += sprintf(out, "-");
- out += sprintf(out, "%02X", in[5] & 0xFF);
- out += sprintf(out, "%02X", in[4] & 0xFF);
- out += sprintf(out, "-");
- out += sprintf(out, "%02X", in[7] & 0xFF);
- out += sprintf(out, "%02X", in[6] & 0xFF);
- out += sprintf(out, "-");
- out += sprintf(out, "%02X", in[8] & 0xFF);
- out += sprintf(out, "%02X", in[9] & 0xFF);
- out += sprintf(out, "-");
-
- for (i = 10; i <= 15; i++)
- out += sprintf(out, "%02X", in[i] & 0xFF);
-
- *out = '\0';
- return 0;
-}
-
static bool find_guid(const char *guid_string, struct wmi_block **out)
{
char tmp[16], guid_input[16];
@@ -457,11 +429,7 @@ EXPORT_SYMBOL_GPL(wmi_set_block);
static void wmi_dump_wdg(const struct guid_block *g)
{
- char guid_string[37];
-
- wmi_gtoa(g->guid, guid_string);
-
- pr_info("%s:\n", guid_string);
+ pr_info("%pUL:\n", g->guid);
pr_info("\tobject_id: %c%c\n", g->object_id[0], g->object_id[1]);
pr_info("\tnotify_id: %02X\n", g->notify_id);
pr_info("\treserved: %02X\n", g->reserved);
@@ -661,7 +629,6 @@ EXPORT_SYMBOL_GPL(wmi_has_guid);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- char guid_string[37];
struct wmi_block *wblock;
wblock = dev_get_drvdata(dev);
@@ -670,9 +637,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
return strlen(buf);
}
- wmi_gtoa(wblock->gblock.guid, guid_string);
-
- return sprintf(buf, "wmi:%s\n", guid_string);
+ return sprintf(buf, "wmi:%pUL\n", wblock->gblock.guid);
}
static DEVICE_ATTR_RO(modalias);
@@ -695,7 +660,7 @@ static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
if (!wblock)
return -ENOMEM;
- wmi_gtoa(wblock->gblock.guid, guid_string);
+ sprintf(guid_string, "%pUL", wblock->gblock.guid);
strcpy(&env->buf[env->buflen - 1], "wmi:");
memcpy(&env->buf[env->buflen - 1 + 4], guid_string, 36);
@@ -721,12 +686,9 @@ static struct class wmi_class = {
static int wmi_create_device(const struct guid_block *gblock,
struct wmi_block *wblock, acpi_handle handle)
{
- char guid_string[37];
-
wblock->dev.class = &wmi_class;
- wmi_gtoa(gblock->guid, guid_string);
- dev_set_name(&wblock->dev, "%s", guid_string);
+ dev_set_name(&wblock->dev, "%pUL", gblock->guid);
dev_set_drvdata(&wblock->dev, wblock);
@@ -877,7 +839,6 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event)
struct guid_block *block;
struct wmi_block *wblock;
struct list_head *p;
- char guid_string[37];
list_for_each(p, &wmi_block_list) {
wblock = list_entry(p, struct wmi_block, list);
@@ -888,8 +849,8 @@ static void acpi_wmi_notify(struct acpi_device *device, u32 event)
if (wblock->handler)
wblock->handler(event, wblock->handler_data);
if (debug_event) {
- wmi_gtoa(wblock->gblock.guid, guid_string);
- pr_info("DEBUG Event GUID: %s\n", guid_string);
+ pr_info("DEBUG Event GUID: %pUL\n",
+ wblock->gblock.guid);
}
acpi_bus_generate_netlink_event(
diff --git a/drivers/power/twl4030_charger.c b/drivers/power/twl4030_charger.c
index f4f2c1f76c32..74f2d3ff1d7c 100644
--- a/drivers/power/twl4030_charger.c
+++ b/drivers/power/twl4030_charger.c
@@ -91,7 +91,7 @@
#define TWL4030_MSTATEC_COMPLETE1 0x0b
#define TWL4030_MSTATEC_COMPLETE4 0x0e
-#if IS_ENABLED(CONFIG_TWL4030_MADC)
+#if IS_REACHABLE(CONFIG_TWL4030_MADC)
/*
* If AC (Accessory Charger) voltage exceeds 4.5V (MADC 11)
* then AC is available.
@@ -1057,13 +1057,9 @@ static int twl4030_bci_probe(struct platform_device *pdev)
phynode = of_find_compatible_node(bci->dev->of_node->parent,
NULL, "ti,twl4030-usb");
- if (phynode) {
+ if (phynode)
bci->transceiver = devm_usb_get_phy_by_node(
bci->dev, phynode, &bci->usb_nb);
- if (IS_ERR(bci->transceiver) &&
- PTR_ERR(bci->transceiver) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- }
}
/* Enable interrupts now. */
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 738adfa5332b..52ea605f8130 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -318,6 +318,7 @@ static const struct of_device_id of_anatop_regulator_match_tbl[] = {
{ .compatible = "fsl,anatop-regulator", },
{ /* end */ }
};
+MODULE_DEVICE_TABLE(of, of_anatop_regulator_match_tbl);
static struct platform_driver anatop_regulator_driver = {
.driver = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7a85ac9e32c5..7849187d91ae 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1394,15 +1394,15 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
return 0;
r = regulator_dev_lookup(dev, rdev->supply_name, &ret);
- if (ret == -ENODEV) {
- /*
- * No supply was specified for this regulator and
- * there will never be one.
- */
- return 0;
- }
-
if (!r) {
+ if (ret == -ENODEV) {
+ /*
+ * No supply was specified for this regulator and
+ * there will never be one.
+ */
+ return 0;
+ }
+
if (have_full_constraints()) {
r = dummy_regulator_rdev;
} else {
@@ -1422,11 +1422,10 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
return ret;
/* Cascade always-on state to supply */
- if (_regulator_is_enabled(rdev)) {
+ if (_regulator_is_enabled(rdev) && rdev->supply) {
ret = regulator_enable(rdev->supply);
if (ret < 0) {
- if (rdev->supply)
- _regulator_put(rdev->supply);
+ _regulator_put(rdev->supply);
return ret;
}
}
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 464018de7e97..7bba8b747f30 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -394,6 +394,7 @@ static const struct of_device_id regulator_gpio_of_match[] = {
{ .compatible = "regulator-gpio", },
{},
};
+MODULE_DEVICE_TABLE(of, regulator_gpio_of_match);
#endif
static struct platform_driver gpio_regulator_driver = {
diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c
index 4fa7bcaf454e..f9d74d63be7c 100644
--- a/drivers/regulator/pbias-regulator.c
+++ b/drivers/regulator/pbias-regulator.c
@@ -45,6 +45,10 @@ struct pbias_regulator_data {
int voltage;
};
+struct pbias_of_data {
+ unsigned int offset;
+};
+
static const unsigned int pbias_volt_table[] = {
1800000,
3000000
@@ -102,8 +106,35 @@ static struct of_regulator_match pbias_matches[] = {
};
#define PBIAS_NUM_REGS ARRAY_SIZE(pbias_matches)
+/* Offset from SCM general area (and syscon) base */
+
+static const struct pbias_of_data pbias_of_data_omap2 = {
+ .offset = 0x230,
+};
+
+static const struct pbias_of_data pbias_of_data_omap3 = {
+ .offset = 0x2b0,
+};
+
+static const struct pbias_of_data pbias_of_data_omap4 = {
+ .offset = 0x60,
+};
+
+static const struct pbias_of_data pbias_of_data_omap5 = {
+ .offset = 0x60,
+};
+
+static const struct pbias_of_data pbias_of_data_dra7 = {
+ .offset = 0xe00,
+};
+
static const struct of_device_id pbias_of_match[] = {
{ .compatible = "ti,pbias-omap", },
+ { .compatible = "ti,pbias-omap2", .data = &pbias_of_data_omap2, },
+ { .compatible = "ti,pbias-omap3", .data = &pbias_of_data_omap3, },
+ { .compatible = "ti,pbias-omap4", .data = &pbias_of_data_omap4, },
+ { .compatible = "ti,pbias-omap5", .data = &pbias_of_data_omap5, },
+ { .compatible = "ti,pbias-dra7", .data = &pbias_of_data_dra7, },
{},
};
MODULE_DEVICE_TABLE(of, pbias_of_match);
@@ -118,6 +149,9 @@ static int pbias_regulator_probe(struct platform_device *pdev)
const struct pbias_reg_info *info;
int ret = 0;
int count, idx, data_idx = 0;
+ const struct of_device_id *match;
+ const struct pbias_of_data *data;
+ unsigned int offset;
count = of_regulator_match(&pdev->dev, np, pbias_matches,
PBIAS_NUM_REGS);
@@ -133,6 +167,20 @@ static int pbias_regulator_probe(struct platform_device *pdev)
if (IS_ERR(syscon))
return PTR_ERR(syscon);
+ match = of_match_device(of_match_ptr(pbias_of_match), &pdev->dev);
+ if (match && match->data) {
+ data = match->data;
+ offset = data->offset;
+ } else {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ offset = res->start;
+ dev_WARN(&pdev->dev,
+ "using legacy dt data for pbias offset\n");
+ }
+
cfg.regmap = syscon;
cfg.dev = &pdev->dev;
@@ -145,10 +193,6 @@ static int pbias_regulator_probe(struct platform_device *pdev)
if (!info)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -EINVAL;
-
drvdata[data_idx].syscon = syscon;
drvdata[data_idx].info = info;
drvdata[data_idx].desc.name = info->name;
@@ -158,9 +202,9 @@ static int pbias_regulator_probe(struct platform_device *pdev)
drvdata[data_idx].desc.volt_table = pbias_volt_table;
drvdata[data_idx].desc.n_voltages = 2;
drvdata[data_idx].desc.enable_time = info->enable_time;
- drvdata[data_idx].desc.vsel_reg = res->start;
+ drvdata[data_idx].desc.vsel_reg = offset;
drvdata[data_idx].desc.vsel_mask = info->vmode;
- drvdata[data_idx].desc.enable_reg = res->start;
+ drvdata[data_idx].desc.enable_reg = offset;
drvdata[data_idx].desc.enable_mask = info->enable_mask;
drvdata[data_idx].desc.enable_val = info->enable;
drvdata[data_idx].desc.disable_val = info->disable_val;
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 7f97223f95c5..a02c1b961039 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -73,7 +73,7 @@ static const struct regulator_linear_range dcdc4_ranges[] = {
};
static struct tps_info tps65218_pmic_regs[] = {
- TPS65218_INFO(DCDC1, "DCDC1", 850000, 167500),
+ TPS65218_INFO(DCDC1, "DCDC1", 850000, 1675000),
TPS65218_INFO(DCDC2, "DCDC2", 850000, 1675000),
TPS65218_INFO(DCDC3, "DCDC3", 900000, 3400000),
TPS65218_INFO(DCDC4, "DCDC4", 1175000, 3400000),
diff --git a/drivers/regulator/vexpress.c b/drivers/regulator/vexpress.c
index bed9d3ee4198..c810cbbd463f 100644
--- a/drivers/regulator/vexpress.c
+++ b/drivers/regulator/vexpress.c
@@ -103,6 +103,7 @@ static const struct of_device_id vexpress_regulator_of_match[] = {
{ .compatible = "arm,vexpress-volt", },
{ }
};
+MODULE_DEVICE_TABLE(of, vexpress_regulator_of_match);
static struct platform_driver vexpress_regulator_driver = {
.probe = vexpress_regulator_probe,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index f8d8fdb26b72..e9fae30fafda 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -400,12 +400,16 @@ static bool virtio_ccw_kvm_notify(struct virtqueue *vq)
static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
struct ccw1 *ccw, int index)
{
+ int ret;
+
vcdev->config_block->index = index;
ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
ccw->flags = 0;
ccw->count = sizeof(struct vq_config_block);
ccw->cda = (__u32)(unsigned long)(vcdev->config_block);
- ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
+ ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
+ if (ret)
+ return ret;
return vcdev->config_block->num;
}
@@ -503,6 +507,10 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
goto out_err;
}
info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i);
+ if (info->num < 0) {
+ err = info->num;
+ goto out_err;
+ }
size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN));
info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
if (info->queue == NULL) {
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index cbfc5990052b..126a48c6431e 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1957,7 +1957,7 @@ static int scsi_mq_prep_fn(struct request *req)
static void scsi_mq_done(struct scsi_cmnd *cmd)
{
trace_scsi_dispatch_cmd_done(cmd);
- blk_mq_complete_request(cmd->request);
+ blk_mq_complete_request(cmd->request, cmd->request->errors);
}
static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 043419dcee92..8e72bcbd3d6d 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -65,7 +65,7 @@ void intc_set_prio_level(unsigned int irq, unsigned int level)
raw_spin_unlock_irqrestore(&intc_big_lock, flags);
}
-static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
+static void intc_redirect_irq(struct irq_desc *desc)
{
generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc));
}
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index 7dff08e2a071..6ce7f0d26dcf 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -99,15 +99,7 @@ static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
*/
static inline void activate_irq(int irq)
{
-#ifdef CONFIG_ARM
- /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
- * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
- */
- set_irq_flags(irq, IRQF_VALID);
-#else
- /* same effect on other architectures */
- irq_set_noprobe(irq);
-#endif
+ irq_modify_status(irq, IRQ_NOREQUEST, IRQ_NOPROBE);
}
static inline int intc_handle_int_cmp(const void *a, const void *b)
diff --git a/drivers/sh/intc/virq.c b/drivers/sh/intc/virq.c
index bafc51c6f0ba..e7899624aa0b 100644
--- a/drivers/sh/intc/virq.c
+++ b/drivers/sh/intc/virq.c
@@ -109,7 +109,7 @@ static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
return 0;
}
-static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc)
+static void intc_virq_handler(struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
struct irq_data *data = irq_desc_get_irq_data(desc);
@@ -127,7 +127,7 @@ static void intc_virq_handler(unsigned int __irq, struct irq_desc *desc)
handle = (unsigned long)irq_desc_get_handler_data(vdesc);
addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
- generic_handle_irq_desc(entry->irq, vdesc);
+ generic_handle_irq_desc(vdesc);
}
}
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index d3d1891cda3c..25abd4eb7d10 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -35,20 +35,11 @@ static struct pm_clk_notifier_block platform_bus_notifier = {
static int __init sh_pm_runtime_init(void)
{
if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
- if (!of_machine_is_compatible("renesas,emev2") &&
- !of_machine_is_compatible("renesas,r7s72100") &&
-#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
- !of_machine_is_compatible("renesas,r8a73a4") &&
- !of_machine_is_compatible("renesas,r8a7740") &&
- !of_machine_is_compatible("renesas,sh73a0") &&
-#endif
- !of_machine_is_compatible("renesas,r8a7778") &&
- !of_machine_is_compatible("renesas,r8a7779") &&
- !of_machine_is_compatible("renesas,r8a7790") &&
- !of_machine_is_compatible("renesas,r8a7791") &&
- !of_machine_is_compatible("renesas,r8a7792") &&
- !of_machine_is_compatible("renesas,r8a7793") &&
- !of_machine_is_compatible("renesas,r8a7794"))
+ if (!of_find_compatible_node(NULL, NULL,
+ "renesas,cpg-mstp-clocks"))
+ return 0;
+ if (IS_ENABLED(CONFIG_PM_GENERIC_DOMAINS_OF) &&
+ of_find_node_with_property(NULL, "#power-domain-cells"))
return 0;
}
diff --git a/drivers/soc/dove/pmu.c b/drivers/soc/dove/pmu.c
index 6792aae9e2e5..052aecf29893 100644
--- a/drivers/soc/dove/pmu.c
+++ b/drivers/soc/dove/pmu.c
@@ -222,9 +222,9 @@ static void __pmu_domain_register(struct pmu_domain *domain,
}
/* PMU IRQ controller */
-static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc)
+static void pmu_irq_handler(struct irq_desc *desc)
{
- struct pmu_data *pmu = irq_get_handler_data(irq);
+ struct pmu_data *pmu = irq_desc_get_handler_data(desc);
struct irq_chip_generic *gc = pmu->irq_gc;
struct irq_domain *domain = pmu->irq_domain;
void __iomem *base = gc->reg_base;
@@ -232,7 +232,7 @@ static void pmu_irq_handler(unsigned int irq, struct irq_desc *desc)
u32 done = ~0;
if (stat == 0) {
- handle_bad_irq(irq, desc);
+ handle_bad_irq(desc);
return;
}
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index bf9ed380bb1c..63318e2afba1 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1720,6 +1720,7 @@ static int atmel_spi_runtime_resume(struct device *dev)
return clk_prepare_enable(as->clk);
}
+#ifdef CONFIG_PM_SLEEP
static int atmel_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
@@ -1756,6 +1757,7 @@ static int atmel_spi_resume(struct device *dev)
return ret;
}
+#endif
static const struct dev_pm_ops atmel_spi_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index e7874a6171ec..3e8eeb23d4e9 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -386,14 +386,14 @@ static bool bcm2835_spi_can_dma(struct spi_master *master,
/* otherwise we only allow transfers within the same page
* to avoid wasting time on dma_mapping when it is not practical
*/
- if (((size_t)tfr->tx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) {
+ if (((size_t)tfr->tx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
dev_warn_once(&spi->dev,
"Unaligned spi tx-transfer bridging page\n");
return false;
}
- if (((size_t)tfr->rx_buf & PAGE_MASK) + tfr->len > PAGE_SIZE) {
+ if (((size_t)tfr->rx_buf & (PAGE_SIZE - 1)) + tfr->len > PAGE_SIZE) {
dev_warn_once(&spi->dev,
- "Unaligned spi tx-transfer bridging page\n");
+ "Unaligned spi rx-transfer bridging page\n");
return false;
}
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
index 5468fc70dbf8..2465259f6241 100644
--- a/drivers/spi/spi-meson-spifc.c
+++ b/drivers/spi/spi-meson-spifc.c
@@ -444,6 +444,7 @@ static const struct of_device_id meson_spifc_dt_match[] = {
{ .compatible = "amlogic,meson6-spifc", },
{ },
};
+MODULE_DEVICE_TABLE(of, meson_spifc_dt_match);
static struct platform_driver meson_spifc_driver = {
.probe = meson_spifc_probe,
diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c
index 5f6315c47920..ecb6c58238c4 100644
--- a/drivers/spi/spi-mt65xx.c
+++ b/drivers/spi/spi-mt65xx.c
@@ -85,7 +85,7 @@ struct mtk_spi {
void __iomem *base;
u32 state;
u32 pad_sel;
- struct clk *spi_clk, *parent_clk;
+ struct clk *parent_clk, *sel_clk, *spi_clk;
struct spi_transfer *cur_transfer;
u32 xfer_len;
struct scatterlist *tx_sgl, *rx_sgl;
@@ -173,22 +173,6 @@ static void mtk_spi_config(struct mtk_spi *mdata,
writel(mdata->pad_sel, mdata->base + SPI_PAD_SEL_REG);
}
-static int mtk_spi_prepare_hardware(struct spi_master *master)
-{
- struct spi_transfer *trans;
- struct mtk_spi *mdata = spi_master_get_devdata(master);
- struct spi_message *msg = master->cur_msg;
-
- trans = list_first_entry(&msg->transfers, struct spi_transfer,
- transfer_list);
- if (!trans->cs_change) {
- mdata->state = MTK_SPI_IDLE;
- mtk_spi_reset(mdata);
- }
-
- return 0;
-}
-
static int mtk_spi_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -228,11 +212,15 @@ static void mtk_spi_set_cs(struct spi_device *spi, bool enable)
struct mtk_spi *mdata = spi_master_get_devdata(spi->master);
reg_val = readl(mdata->base + SPI_CMD_REG);
- if (!enable)
+ if (!enable) {
reg_val |= SPI_CMD_PAUSE_EN;
- else
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+ } else {
reg_val &= ~SPI_CMD_PAUSE_EN;
- writel(reg_val, mdata->base + SPI_CMD_REG);
+ writel(reg_val, mdata->base + SPI_CMD_REG);
+ mdata->state = MTK_SPI_IDLE;
+ mtk_spi_reset(mdata);
+ }
}
static void mtk_spi_prepare_transfer(struct spi_master *master,
@@ -509,7 +497,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->set_cs = mtk_spi_set_cs;
- master->prepare_transfer_hardware = mtk_spi_prepare_hardware;
master->prepare_message = mtk_spi_prepare_message;
master->transfer_one = mtk_spi_transfer_one;
master->can_dma = mtk_spi_can_dma;
@@ -576,13 +563,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
goto err_put_master;
}
- mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
- if (IS_ERR(mdata->spi_clk)) {
- ret = PTR_ERR(mdata->spi_clk);
- dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
- goto err_put_master;
- }
-
mdata->parent_clk = devm_clk_get(&pdev->dev, "parent-clk");
if (IS_ERR(mdata->parent_clk)) {
ret = PTR_ERR(mdata->parent_clk);
@@ -590,13 +570,27 @@ static int mtk_spi_probe(struct platform_device *pdev)
goto err_put_master;
}
+ mdata->sel_clk = devm_clk_get(&pdev->dev, "sel-clk");
+ if (IS_ERR(mdata->sel_clk)) {
+ ret = PTR_ERR(mdata->sel_clk);
+ dev_err(&pdev->dev, "failed to get sel-clk: %d\n", ret);
+ goto err_put_master;
+ }
+
+ mdata->spi_clk = devm_clk_get(&pdev->dev, "spi-clk");
+ if (IS_ERR(mdata->spi_clk)) {
+ ret = PTR_ERR(mdata->spi_clk);
+ dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
+ goto err_put_master;
+ }
+
ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
goto err_put_master;
}
- ret = clk_set_parent(mdata->spi_clk, mdata->parent_clk);
+ ret = clk_set_parent(mdata->sel_clk, mdata->parent_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to clk_set_parent (%d)\n", ret);
goto err_disable_clk;
@@ -630,7 +624,6 @@ static int mtk_spi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
mtk_spi_reset(mdata);
- clk_disable_unprepare(mdata->spi_clk);
spi_master_put(master);
return 0;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index fdd791977041..a8ef38ebb9c9 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -654,6 +654,10 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
if (!(sccr1_reg & SSCR1_TIE))
mask &= ~SSSR_TFS;
+ /* Ignore RX timeout interrupt if it is disabled */
+ if (!(sccr1_reg & SSCR1_TINTE))
+ mask &= ~SSSR_TINT;
+
if (!(status & mask))
return IRQ_NONE;
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
index 2e32ea2f194f..be6155cba9de 100644
--- a/drivers/spi/spi-xtensa-xtfpga.c
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -34,13 +34,13 @@ struct xtfpga_spi {
static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
unsigned addr, u32 val)
{
- iowrite32(val, spi->regs + addr);
+ __raw_writel(val, spi->regs + addr);
}
static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
unsigned addr)
{
- return ioread32(spi->regs + addr);
+ return __raw_readl(spi->regs + addr);
}
static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 3abb3903f2ad..a5f53de813d3 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1610,8 +1610,7 @@ static struct class spi_master_class = {
*
* The caller is responsible for assigning the bus number and initializing
* the master's methods before calling spi_register_master(); and (after errors
- * adding the device) calling spi_master_put() and kfree() to prevent a memory
- * leak.
+ * adding the device) calling spi_master_put() to prevent a memory leak.
*/
struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
{
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index fba92a526531..ef008e52f953 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -651,7 +651,8 @@ static int spidev_release(struct inode *inode, struct file *filp)
kfree(spidev->rx_buffer);
spidev->rx_buffer = NULL;
- spidev->speed_hz = spidev->spi->max_speed_hz;
+ if (spidev->spi)
+ spidev->speed_hz = spidev->spi->max_speed_hz;
/* ... after we unbound from the underlying device? */
spin_lock_irq(&spidev->spi_lock);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index bdfb3c84c3cb..4a3cf9ba152f 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -451,7 +451,7 @@ static void periph_interrupt(struct spmi_pmic_arb_dev *pa, u8 apid)
}
}
-static void pmic_arb_chained_irq(unsigned int irq, struct irq_desc *desc)
+static void pmic_arb_chained_irq(struct irq_desc *desc)
{
struct spmi_pmic_arb_dev *pa = irq_desc_get_handler_data(desc);
struct irq_chip *chip = irq_desc_get_chip(desc);
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 20288fc53946..8f3ac37bfe12 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -5,5 +5,25 @@ TODO:
- add proper arch dependencies as needed
- audit userspace interfaces to make sure they are sane
+
+ion/
+ - Remove ION_IOC_SYNC: Flushing for devices should be purely a kernel internal
+ interface on top of dma-buf. flush_for_device needs to be added to dma-buf
+ first.
+ - Remove ION_IOC_CUSTOM: Atm used for cache flushing for cpu access in some
+ vendor trees. Should be replaced with an ioctl on the dma-buf to expose the
+ begin/end_cpu_access hooks to userspace.
+ - Clarify the tricks ion plays with explicitly managing coherency behind the
+ dma api's back (this is absolutely needed for high-perf gpu drivers): Add an
+ explicit coherency management mode to flush_for_device to be used by drivers
+ which want to manage caches themselves and which indicates whether cpu caches
+ need flushing.
+ - With those removed there's probably no use for ION_IOC_IMPORT anymore either
+ since ion would just be the central allocator for shared buffers.
+ - Add dt-binding to expose cma regions as ion heaps, with the rule that any
+ such cma regions must already be used by some device for dma. I.e. ion only
+ exposes existing cma regions and doesn't reserve unecessarily memory when
+ booting a system which doesn't use ion.
+
Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 217aa537c4eb..6e8d8392ca38 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1179,13 +1179,13 @@ struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
mutex_unlock(&client->lock);
goto end;
}
- mutex_unlock(&client->lock);
handle = ion_handle_create(client, buffer);
- if (IS_ERR(handle))
+ if (IS_ERR(handle)) {
+ mutex_unlock(&client->lock);
goto end;
+ }
- mutex_lock(&client->lock);
ret = ion_handle_add(client, handle);
mutex_unlock(&client->lock);
if (ret) {
diff --git a/drivers/staging/fbtft/fb_uc1611.c b/drivers/staging/fbtft/fb_uc1611.c
index 32f3a9d921d6..5cafa50d1fac 100644
--- a/drivers/staging/fbtft/fb_uc1611.c
+++ b/drivers/staging/fbtft/fb_uc1611.c
@@ -76,7 +76,7 @@ static int init_display(struct fbtft_par *par)
/* Set CS active high */
par->spi->mode |= SPI_CS_HIGH;
- ret = par->spi->master->setup(par->spi);
+ ret = spi_setup(par->spi);
if (ret) {
dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
return ret;
diff --git a/drivers/staging/fbtft/fb_watterott.c b/drivers/staging/fbtft/fb_watterott.c
index 88fb2c0132d5..8eae6ef25846 100644
--- a/drivers/staging/fbtft/fb_watterott.c
+++ b/drivers/staging/fbtft/fb_watterott.c
@@ -169,7 +169,7 @@ static int init_display(struct fbtft_par *par)
/* enable SPI interface by having CS and MOSI low during reset */
save_mode = par->spi->mode;
par->spi->mode |= SPI_CS_HIGH;
- ret = par->spi->master->setup(par->spi); /* set CS inactive low */
+ ret = spi_setup(par->spi); /* set CS inactive low */
if (ret) {
dev_err(par->info->device, "Could not set SPI_CS_HIGH\n");
return ret;
@@ -180,7 +180,7 @@ static int init_display(struct fbtft_par *par)
par->fbtftops.reset(par);
mdelay(1000);
par->spi->mode = save_mode;
- ret = par->spi->master->setup(par->spi);
+ ret = spi_setup(par->spi);
if (ret) {
dev_err(par->info->device, "Could not restore SPI mode\n");
return ret;
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index 23392eb6799e..7f5fa3d1cab0 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -1436,15 +1436,11 @@ int fbtft_probe_common(struct fbtft_display *display,
/* 9-bit SPI setup */
if (par->spi && display->buswidth == 9) {
- par->spi->bits_per_word = 9;
- ret = par->spi->master->setup(par->spi);
- if (ret) {
+ if (par->spi->master->bits_per_word_mask & SPI_BPW_MASK(9)) {
+ par->spi->bits_per_word = 9;
+ } else {
dev_warn(&par->spi->dev,
"9-bit SPI not available, emulating using 8-bit.\n");
- par->spi->bits_per_word = 8;
- ret = par->spi->master->setup(par->spi);
- if (ret)
- goto out_release;
/* allocate buffer with room for dc bits */
par->extra = devm_kzalloc(par->info->device,
par->txbuf.len + (par->txbuf.len / 8) + 8,
diff --git a/drivers/staging/fbtft/flexfb.c b/drivers/staging/fbtft/flexfb.c
index c763efc5de7d..3f380a0086c3 100644
--- a/drivers/staging/fbtft/flexfb.c
+++ b/drivers/staging/fbtft/flexfb.c
@@ -463,15 +463,12 @@ static int flexfb_probe_common(struct spi_device *sdev,
}
par->fbtftops.write_register = fbtft_write_reg8_bus9;
par->fbtftops.write_vmem = fbtft_write_vmem16_bus9;
- sdev->bits_per_word = 9;
- ret = sdev->master->setup(sdev);
- if (ret) {
+ if (par->spi->master->bits_per_word_mask
+ & SPI_BPW_MASK(9)) {
+ par->spi->bits_per_word = 9;
+ } else {
dev_warn(dev,
"9-bit SPI not available, emulating using 8-bit.\n");
- sdev->bits_per_word = 8;
- ret = sdev->master->setup(sdev);
- if (ret)
- goto out_release;
/* allocate buffer with room for dc bits */
par->extra = devm_kzalloc(par->info->device,
par->txbuf.len + (par->txbuf.len / 8) + 8,
diff --git a/drivers/staging/lustre/README.txt b/drivers/staging/lustre/README.txt
index cf0ca50ff83b..0676243eea9e 100644
--- a/drivers/staging/lustre/README.txt
+++ b/drivers/staging/lustre/README.txt
@@ -14,10 +14,8 @@ Unlike shared disk storage cluster filesystems (e.g. OCFS2, GFS, GPFS),
Lustre has independent Metadata and Data servers that clients can access
in parallel to maximize performance.
-In order to use Lustre client you will need to download lustre client
-tools from
-https://downloads.hpdd.intel.com/public/lustre/latest-feature-release/
-the package name is lustre-client.
+In order to use Lustre client you will need to download the "lustre-client"
+package that contains the userspace tools from http://lustre.org/download/
You will need to install and configure your Lustre servers separately.
@@ -76,12 +74,10 @@ Mount Options
More Information
================
-You can get more information at
-OpenSFS website: http://lustre.opensfs.org/about/
-Intel HPDD wiki: https://wiki.hpdd.intel.com
+You can get more information at the Lustre website: http://wiki.lustre.org/
-Out of tree Lustre client and server code is available at:
-http://git.whamcloud.com/fs/lustre-release.git
+Source for the userspace tools and out-of-tree client and server code
+is available at: http://git.hpdd.intel.com/fs/lustre-release.git
Latest binary packages:
-http://lustre.opensfs.org/download-lustre/
+http://lustre.org/download/
diff --git a/drivers/staging/most/Kconfig b/drivers/staging/most/Kconfig
index d50de03de7b9..0b9b9b539f70 100644
--- a/drivers/staging/most/Kconfig
+++ b/drivers/staging/most/Kconfig
@@ -1,5 +1,6 @@
menuconfig MOST
tristate "MOST driver"
+ depends on HAS_DMA
select MOSTCORE
default n
---help---
diff --git a/drivers/staging/most/hdm-dim2/Kconfig b/drivers/staging/most/hdm-dim2/Kconfig
index 1d4ad1d67758..fc548769479b 100644
--- a/drivers/staging/most/hdm-dim2/Kconfig
+++ b/drivers/staging/most/hdm-dim2/Kconfig
@@ -5,6 +5,7 @@
config HDM_DIM2
tristate "DIM2 HDM"
depends on AIM_NETWORK
+ depends on HAS_IOMEM
---help---
Say Y here if you want to connect via MediaLB to network transceiver.
diff --git a/drivers/staging/most/hdm-usb/Kconfig b/drivers/staging/most/hdm-usb/Kconfig
index a482c3fdf34b..ec1546312ee6 100644
--- a/drivers/staging/most/hdm-usb/Kconfig
+++ b/drivers/staging/most/hdm-usb/Kconfig
@@ -4,7 +4,7 @@
config HDM_USB
tristate "USB HDM"
- depends on USB
+ depends on USB && NET
select AIM_NETWORK
---help---
Say Y here if you want to connect via USB to network tranceiver.
diff --git a/drivers/staging/most/mostcore/Kconfig b/drivers/staging/most/mostcore/Kconfig
index 38abf1b21b66..47172546d728 100644
--- a/drivers/staging/most/mostcore/Kconfig
+++ b/drivers/staging/most/mostcore/Kconfig
@@ -4,6 +4,7 @@
config MOSTCORE
tristate "MOST Core"
+ depends on HAS_DMA
---help---
Say Y here if you want to enable MOST support.
diff --git a/drivers/staging/rdma/Kconfig b/drivers/staging/rdma/Kconfig
index cf5fe9bb87a1..d7f62359d743 100644
--- a/drivers/staging/rdma/Kconfig
+++ b/drivers/staging/rdma/Kconfig
@@ -24,6 +24,8 @@ if STAGING_RDMA
source "drivers/staging/rdma/amso1100/Kconfig"
+source "drivers/staging/rdma/ehca/Kconfig"
+
source "drivers/staging/rdma/hfi1/Kconfig"
source "drivers/staging/rdma/ipath/Kconfig"
diff --git a/drivers/staging/rdma/Makefile b/drivers/staging/rdma/Makefile
index cbd915ac7f20..139d78ef2c24 100644
--- a/drivers/staging/rdma/Makefile
+++ b/drivers/staging/rdma/Makefile
@@ -1,4 +1,5 @@
# Entries for RDMA_STAGING tree
obj-$(CONFIG_INFINIBAND_AMSO1100) += amso1100/
+obj-$(CONFIG_INFINIBAND_EHCA) += ehca/
obj-$(CONFIG_INFINIBAND_HFI1) += hfi1/
obj-$(CONFIG_INFINIBAND_IPATH) += ipath/
diff --git a/drivers/infiniband/hw/ehca/Kconfig b/drivers/staging/rdma/ehca/Kconfig
index 59f807d8d58e..3fadd2ad6426 100644
--- a/drivers/infiniband/hw/ehca/Kconfig
+++ b/drivers/staging/rdma/ehca/Kconfig
@@ -2,7 +2,8 @@ config INFINIBAND_EHCA
tristate "eHCA support"
depends on IBMEBUS
---help---
- This driver supports the IBM pSeries eHCA InfiniBand adapter.
+ This driver supports the deprecated IBM pSeries eHCA InfiniBand
+ adapter.
To compile the driver as a module, choose M here. The module
will be called ib_ehca.
diff --git a/drivers/infiniband/hw/ehca/Makefile b/drivers/staging/rdma/ehca/Makefile
index 74d284e46a40..74d284e46a40 100644
--- a/drivers/infiniband/hw/ehca/Makefile
+++ b/drivers/staging/rdma/ehca/Makefile
diff --git a/drivers/staging/rdma/ehca/TODO b/drivers/staging/rdma/ehca/TODO
new file mode 100644
index 000000000000..199a4a600142
--- /dev/null
+++ b/drivers/staging/rdma/ehca/TODO
@@ -0,0 +1,4 @@
+9/2015
+
+The ehca driver has been deprecated and moved to drivers/staging/rdma.
+It will be removed in the 4.6 merge window.
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/staging/rdma/ehca/ehca_av.c
index 465926319f3d..465926319f3d 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/staging/rdma/ehca/ehca_av.c
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/staging/rdma/ehca/ehca_classes.h
index bd45e0f3923f..bd45e0f3923f 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/staging/rdma/ehca/ehca_classes.h
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
index 689c35786dd2..689c35786dd2 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
+++ b/drivers/staging/rdma/ehca/ehca_classes_pSeries.h
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/staging/rdma/ehca/ehca_cq.c
index 9b68b175069b..9b68b175069b 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/staging/rdma/ehca/ehca_cq.c
diff --git a/drivers/infiniband/hw/ehca/ehca_eq.c b/drivers/staging/rdma/ehca/ehca_eq.c
index 90da6747d395..90da6747d395 100644
--- a/drivers/infiniband/hw/ehca/ehca_eq.c
+++ b/drivers/staging/rdma/ehca/ehca_eq.c
diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/staging/rdma/ehca/ehca_hca.c
index e8b1bb65797a..e8b1bb65797a 100644
--- a/drivers/infiniband/hw/ehca/ehca_hca.c
+++ b/drivers/staging/rdma/ehca/ehca_hca.c
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/staging/rdma/ehca/ehca_irq.c
index 8615d7cf7e01..8615d7cf7e01 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/staging/rdma/ehca/ehca_irq.c
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.h b/drivers/staging/rdma/ehca/ehca_irq.h
index 5370199f08c7..5370199f08c7 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.h
+++ b/drivers/staging/rdma/ehca/ehca_irq.h
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/staging/rdma/ehca/ehca_iverbs.h
index 80e6a3d5df3e..80e6a3d5df3e 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/staging/rdma/ehca/ehca_iverbs.h
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/staging/rdma/ehca/ehca_main.c
index 8246418cd4e0..8246418cd4e0 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/staging/rdma/ehca/ehca_main.c
diff --git a/drivers/infiniband/hw/ehca/ehca_mcast.c b/drivers/staging/rdma/ehca/ehca_mcast.c
index cec181532924..cec181532924 100644
--- a/drivers/infiniband/hw/ehca/ehca_mcast.c
+++ b/drivers/staging/rdma/ehca/ehca_mcast.c
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
index f914b30999f8..f914b30999f8 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.c
+++ b/drivers/staging/rdma/ehca/ehca_mrmw.c
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.h b/drivers/staging/rdma/ehca/ehca_mrmw.h
index 50d8b51306dd..50d8b51306dd 100644
--- a/drivers/infiniband/hw/ehca/ehca_mrmw.h
+++ b/drivers/staging/rdma/ehca/ehca_mrmw.h
diff --git a/drivers/infiniband/hw/ehca/ehca_pd.c b/drivers/staging/rdma/ehca/ehca_pd.c
index 351577a6670a..351577a6670a 100644
--- a/drivers/infiniband/hw/ehca/ehca_pd.c
+++ b/drivers/staging/rdma/ehca/ehca_pd.c
diff --git a/drivers/infiniband/hw/ehca/ehca_qes.h b/drivers/staging/rdma/ehca/ehca_qes.h
index 90c4efa67586..90c4efa67586 100644
--- a/drivers/infiniband/hw/ehca/ehca_qes.h
+++ b/drivers/staging/rdma/ehca/ehca_qes.h
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/staging/rdma/ehca/ehca_qp.c
index 2e89356c46fa..2e89356c46fa 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/staging/rdma/ehca/ehca_qp.c
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/staging/rdma/ehca/ehca_reqs.c
index 47f94984353d..47f94984353d 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/staging/rdma/ehca/ehca_reqs.c
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/staging/rdma/ehca/ehca_sqp.c
index 376b031c2c7f..376b031c2c7f 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/staging/rdma/ehca/ehca_sqp.c
diff --git a/drivers/infiniband/hw/ehca/ehca_tools.h b/drivers/staging/rdma/ehca/ehca_tools.h
index d280b12aae64..d280b12aae64 100644
--- a/drivers/infiniband/hw/ehca/ehca_tools.h
+++ b/drivers/staging/rdma/ehca/ehca_tools.h
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/staging/rdma/ehca/ehca_uverbs.c
index 1a1d5d99fcf9..1a1d5d99fcf9 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/staging/rdma/ehca/ehca_uverbs.c
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/staging/rdma/ehca/hcp_if.c
index 89517ffb4389..89517ffb4389 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/staging/rdma/ehca/hcp_if.c
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/staging/rdma/ehca/hcp_if.h
index a46e514c367b..a46e514c367b 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.h
+++ b/drivers/staging/rdma/ehca/hcp_if.h
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/staging/rdma/ehca/hcp_phyp.c
index 077376ff3d28..077376ff3d28 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.c
+++ b/drivers/staging/rdma/ehca/hcp_phyp.c
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/staging/rdma/ehca/hcp_phyp.h
index d1b029910249..d1b029910249 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.h
+++ b/drivers/staging/rdma/ehca/hcp_phyp.h
diff --git a/drivers/infiniband/hw/ehca/hipz_fns.h b/drivers/staging/rdma/ehca/hipz_fns.h
index 9dac93d02140..9dac93d02140 100644
--- a/drivers/infiniband/hw/ehca/hipz_fns.h
+++ b/drivers/staging/rdma/ehca/hipz_fns.h
diff --git a/drivers/infiniband/hw/ehca/hipz_fns_core.h b/drivers/staging/rdma/ehca/hipz_fns_core.h
index 868735fd3187..868735fd3187 100644
--- a/drivers/infiniband/hw/ehca/hipz_fns_core.h
+++ b/drivers/staging/rdma/ehca/hipz_fns_core.h
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/staging/rdma/ehca/hipz_hw.h
index bf996c7acc42..bf996c7acc42 100644
--- a/drivers/infiniband/hw/ehca/hipz_hw.h
+++ b/drivers/staging/rdma/ehca/hipz_hw.h
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/staging/rdma/ehca/ipz_pt_fn.c
index 7ffc748cb973..7ffc748cb973 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/staging/rdma/ehca/ipz_pt_fn.c
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/staging/rdma/ehca/ipz_pt_fn.h
index a801274ea337..a801274ea337 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h
+++ b/drivers/staging/rdma/ehca/ipz_pt_fn.h
diff --git a/drivers/staging/rdma/hfi1/chip.c b/drivers/staging/rdma/hfi1/chip.c
index 654eafef1d30..aa58e597df06 100644
--- a/drivers/staging/rdma/hfi1/chip.c
+++ b/drivers/staging/rdma/hfi1/chip.c
@@ -2710,7 +2710,7 @@ int acquire_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
if (sleep_ok) {
mutex_lock(&ppd->hls_lock);
} else {
- while (mutex_trylock(&ppd->hls_lock) == EBUSY)
+ while (!mutex_trylock(&ppd->hls_lock))
udelay(1);
}
@@ -2758,7 +2758,7 @@ int release_lcb_access(struct hfi1_devdata *dd, int sleep_ok)
if (sleep_ok) {
mutex_lock(&dd->pport->hls_lock);
} else {
- while (mutex_trylock(&dd->pport->hls_lock) == EBUSY)
+ while (!mutex_trylock(&dd->pport->hls_lock))
udelay(1);
}
diff --git a/drivers/staging/rdma/hfi1/device.c b/drivers/staging/rdma/hfi1/device.c
index 07c87a87775f..bc26a5392712 100644
--- a/drivers/staging/rdma/hfi1/device.c
+++ b/drivers/staging/rdma/hfi1/device.c
@@ -57,11 +57,13 @@
#include "device.h"
static struct class *class;
+static struct class *user_class;
static dev_t hfi1_dev;
int hfi1_cdev_init(int minor, const char *name,
const struct file_operations *fops,
- struct cdev *cdev, struct device **devp)
+ struct cdev *cdev, struct device **devp,
+ bool user_accessible)
{
const dev_t dev = MKDEV(MAJOR(hfi1_dev), minor);
struct device *device = NULL;
@@ -78,7 +80,11 @@ int hfi1_cdev_init(int minor, const char *name,
goto done;
}
- device = device_create(class, NULL, dev, NULL, "%s", name);
+ if (user_accessible)
+ device = device_create(user_class, NULL, dev, NULL, "%s", name);
+ else
+ device = device_create(class, NULL, dev, NULL, "%s", name);
+
if (!IS_ERR(device))
goto done;
ret = PTR_ERR(device);
@@ -110,6 +116,26 @@ const char *class_name(void)
return hfi1_class_name;
}
+static char *hfi1_devnode(struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0600;
+ return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+}
+
+static const char *hfi1_class_name_user = "hfi1_user";
+const char *class_name_user(void)
+{
+ return hfi1_class_name_user;
+}
+
+static char *hfi1_user_devnode(struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0666;
+ return kasprintf(GFP_KERNEL, "%s", dev_name(dev));
+}
+
int __init dev_init(void)
{
int ret;
@@ -125,7 +151,22 @@ int __init dev_init(void)
ret = PTR_ERR(class);
pr_err("Could not create device class (err %d)\n", -ret);
unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
+ goto done;
}
+ class->devnode = hfi1_devnode;
+
+ user_class = class_create(THIS_MODULE, class_name_user());
+ if (IS_ERR(user_class)) {
+ ret = PTR_ERR(user_class);
+ pr_err("Could not create device class for user accessible files (err %d)\n",
+ -ret);
+ class_destroy(class);
+ class = NULL;
+ user_class = NULL;
+ unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
+ goto done;
+ }
+ user_class->devnode = hfi1_user_devnode;
done:
return ret;
@@ -133,10 +174,11 @@ done:
void dev_cleanup(void)
{
- if (class) {
- class_destroy(class);
- class = NULL;
- }
+ class_destroy(class);
+ class = NULL;
+
+ class_destroy(user_class);
+ user_class = NULL;
unregister_chrdev_region(hfi1_dev, HFI1_NMINORS);
}
diff --git a/drivers/staging/rdma/hfi1/device.h b/drivers/staging/rdma/hfi1/device.h
index 98caecd3d807..2850ff739d81 100644
--- a/drivers/staging/rdma/hfi1/device.h
+++ b/drivers/staging/rdma/hfi1/device.h
@@ -52,7 +52,8 @@
int hfi1_cdev_init(int minor, const char *name,
const struct file_operations *fops,
- struct cdev *cdev, struct device **devp);
+ struct cdev *cdev, struct device **devp,
+ bool user_accessible);
void hfi1_cdev_cleanup(struct cdev *cdev, struct device **devp);
const char *class_name(void);
int __init dev_init(void);
diff --git a/drivers/staging/rdma/hfi1/diag.c b/drivers/staging/rdma/hfi1/diag.c
index 6777d6b659cf..3e8d5ac4c626 100644
--- a/drivers/staging/rdma/hfi1/diag.c
+++ b/drivers/staging/rdma/hfi1/diag.c
@@ -292,7 +292,7 @@ int hfi1_diag_add(struct hfi1_devdata *dd)
if (atomic_inc_return(&diagpkt_count) == 1) {
ret = hfi1_cdev_init(HFI1_DIAGPKT_MINOR, name,
&diagpkt_file_ops, &diagpkt_cdev,
- &diagpkt_device);
+ &diagpkt_device, false);
}
return ret;
@@ -592,7 +592,8 @@ static int hfi1_snoop_add(struct hfi1_devdata *dd, const char *name)
ret = hfi1_cdev_init(HFI1_SNOOP_CAPTURE_BASE + dd->unit, name,
&snoop_file_ops,
- &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev);
+ &dd->hfi1_snoop.cdev, &dd->hfi1_snoop.class_dev,
+ false);
if (ret) {
dd_dev_err(dd, "Couldn't create %s device: %d", name, ret);
@@ -1012,11 +1013,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
case HFI1_SNOOP_IOCSETLINKSTATE_EXTRA:
memset(&link_info, 0, sizeof(link_info));
- ret = copy_from_user(&link_info,
+ if (copy_from_user(&link_info,
(struct hfi1_link_info __user *)arg,
- sizeof(link_info));
- if (ret)
- break;
+ sizeof(link_info)))
+ ret = -EFAULT;
value = link_info.port_state;
index = link_info.port_number;
@@ -1080,9 +1080,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
case HFI1_SNOOP_IOCGETLINKSTATE_EXTRA:
if (cmd == HFI1_SNOOP_IOCGETLINKSTATE_EXTRA) {
memset(&link_info, 0, sizeof(link_info));
- ret = copy_from_user(&link_info,
+ if (copy_from_user(&link_info,
(struct hfi1_link_info __user *)arg,
- sizeof(link_info));
+ sizeof(link_info)))
+ ret = -EFAULT;
index = link_info.port_number;
} else {
ret = __get_user(index, (int __user *) arg);
@@ -1114,9 +1115,10 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
ppd->link_speed_active;
link_info.link_width_active =
ppd->link_width_active;
- ret = copy_to_user(
+ if (copy_to_user(
(struct hfi1_link_info __user *)arg,
- &link_info, sizeof(link_info));
+ &link_info, sizeof(link_info)))
+ ret = -EFAULT;
} else {
ret = __put_user(value, (int __user *)arg);
}
@@ -1142,10 +1144,9 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
snoop_dbg("Setting filter");
/* just copy command structure */
argp = (unsigned long *)arg;
- ret = copy_from_user(&filter_cmd, (void __user *)argp,
- sizeof(filter_cmd));
- if (ret < 0) {
- pr_alert("Error copying filter command\n");
+ if (copy_from_user(&filter_cmd, (void __user *)argp,
+ sizeof(filter_cmd))) {
+ ret = -EFAULT;
break;
}
if (filter_cmd.opcode >= HFI1_MAX_FILTERS) {
@@ -1167,12 +1168,11 @@ static long hfi1_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
break;
}
/* copy remaining data from userspace */
- ret = copy_from_user((u8 *)filter_value,
+ if (copy_from_user((u8 *)filter_value,
(void __user *)filter_cmd.value_ptr,
- filter_cmd.length);
- if (ret < 0) {
+ filter_cmd.length)) {
kfree(filter_value);
- pr_alert("Error copying filter data\n");
+ ret = -EFAULT;
break;
}
/* Drain packets first */
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index 469861750b76..72d38500d8ce 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -1181,6 +1181,7 @@ static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
struct hfi1_filedata *fd = fp->private_data;
int ret = 0;
+ memset(&cinfo, 0, sizeof(cinfo));
ret = hfi1_get_base_kinfo(uctxt, &cinfo);
if (ret < 0)
goto done;
@@ -2089,14 +2090,16 @@ static int user_add(struct hfi1_devdata *dd)
if (atomic_inc_return(&user_count) == 1) {
ret = hfi1_cdev_init(0, class_name(), &hfi1_file_ops,
- &wildcard_cdev, &wildcard_device);
+ &wildcard_cdev, &wildcard_device,
+ true);
if (ret)
goto done;
}
snprintf(name, sizeof(name), "%s_%d", class_name(), dd->unit);
ret = hfi1_cdev_init(dd->unit + 1, name, &hfi1_file_ops,
- &dd->user_cdev, &dd->user_device);
+ &dd->user_cdev, &dd->user_device,
+ true);
if (ret)
goto done;
@@ -2104,7 +2107,8 @@ static int user_add(struct hfi1_devdata *dd)
snprintf(name, sizeof(name),
"%s_ui%d", class_name(), dd->unit);
ret = hfi1_cdev_init(dd->unit + UI_OFFSET, name, &ui_file_ops,
- &dd->ui_cdev, &dd->ui_device);
+ &dd->ui_cdev, &dd->ui_device,
+ false);
if (ret)
goto done;
}
diff --git a/drivers/staging/rdma/hfi1/mad.c b/drivers/staging/rdma/hfi1/mad.c
index 37269eb90c34..b2c1b72d38ce 100644
--- a/drivers/staging/rdma/hfi1/mad.c
+++ b/drivers/staging/rdma/hfi1/mad.c
@@ -1717,9 +1717,9 @@ static int __subn_get_opa_psi(struct opa_smp *smp, u32 am, u8 *data,
psi->port_states.portphysstate_portstate =
(hfi1_ibphys_portstate(ppd) << 4) | (lstate & 0xf);
psi->link_width_downgrade_tx_active =
- ppd->link_width_downgrade_tx_active;
+ cpu_to_be16(ppd->link_width_downgrade_tx_active);
psi->link_width_downgrade_rx_active =
- ppd->link_width_downgrade_rx_active;
+ cpu_to_be16(ppd->link_width_downgrade_rx_active);
if (resp_len)
*resp_len += sizeof(struct opa_port_state_info);
diff --git a/drivers/staging/rdma/hfi1/sdma.c b/drivers/staging/rdma/hfi1/sdma.c
index a8c903caecce..aecd1a74741c 100644
--- a/drivers/staging/rdma/hfi1/sdma.c
+++ b/drivers/staging/rdma/hfi1/sdma.c
@@ -737,7 +737,7 @@ u16 sdma_get_descq_cnt(void)
*/
if (!is_power_of_2(count))
return SDMA_DESCQ_CNT;
- if (count < 64 && count > 32768)
+ if (count < 64 || count > 32768)
return SDMA_DESCQ_CNT;
return count;
}
@@ -1848,7 +1848,7 @@ static void dump_sdma_state(struct sdma_engine *sde)
dd_dev_err(sde->dd,
"\taidx: %u amode: %u alen: %u\n",
(u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
- >> SDMA_DESC1_HEADER_INDEX_MASK),
+ >> SDMA_DESC1_HEADER_INDEX_SHIFT),
(u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
>> SDMA_DESC1_HEADER_MODE_SHIFT),
(u8)((desc[1] & SDMA_DESC1_HEADER_DWS_SMASK)
@@ -1926,7 +1926,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde)
if (desc[0] & SDMA_DESC0_FIRST_DESC_FLAG)
seq_printf(s, "\t\tahgidx: %u ahgmode: %u\n",
(u8)((desc[1] & SDMA_DESC1_HEADER_INDEX_SMASK)
- >> SDMA_DESC1_HEADER_INDEX_MASK),
+ >> SDMA_DESC1_HEADER_INDEX_SHIFT),
(u8)((desc[1] & SDMA_DESC1_HEADER_MODE_SMASK)
>> SDMA_DESC1_HEADER_MODE_SHIFT));
head = (head + 1) & sde->sdma_mask;
diff --git a/drivers/staging/rdma/hfi1/sdma.h b/drivers/staging/rdma/hfi1/sdma.h
index 1e613fcd8f4c..496086903891 100644
--- a/drivers/staging/rdma/hfi1/sdma.h
+++ b/drivers/staging/rdma/hfi1/sdma.h
@@ -109,53 +109,53 @@
/*
* Bits defined in the send DMA descriptor.
*/
-#define SDMA_DESC0_FIRST_DESC_FLAG (1ULL<<63)
-#define SDMA_DESC0_LAST_DESC_FLAG (1ULL<<62)
+#define SDMA_DESC0_FIRST_DESC_FLAG (1ULL << 63)
+#define SDMA_DESC0_LAST_DESC_FLAG (1ULL << 62)
#define SDMA_DESC0_BYTE_COUNT_SHIFT 48
#define SDMA_DESC0_BYTE_COUNT_WIDTH 14
#define SDMA_DESC0_BYTE_COUNT_MASK \
- ((1ULL<<SDMA_DESC0_BYTE_COUNT_WIDTH)-1ULL)
+ ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1)
#define SDMA_DESC0_BYTE_COUNT_SMASK \
- (SDMA_DESC0_BYTE_COUNT_MASK<<SDMA_DESC0_BYTE_COUNT_SHIFT)
+ (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT)
#define SDMA_DESC0_PHY_ADDR_SHIFT 0
#define SDMA_DESC0_PHY_ADDR_WIDTH 48
#define SDMA_DESC0_PHY_ADDR_MASK \
- ((1ULL<<SDMA_DESC0_PHY_ADDR_WIDTH)-1ULL)
+ ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1)
#define SDMA_DESC0_PHY_ADDR_SMASK \
- (SDMA_DESC0_PHY_ADDR_MASK<<SDMA_DESC0_PHY_ADDR_SHIFT)
+ (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT)
#define SDMA_DESC1_HEADER_UPDATE1_SHIFT 32
#define SDMA_DESC1_HEADER_UPDATE1_WIDTH 32
#define SDMA_DESC1_HEADER_UPDATE1_MASK \
- ((1ULL<<SDMA_DESC1_HEADER_UPDATE1_WIDTH)-1ULL)
+ ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1)
#define SDMA_DESC1_HEADER_UPDATE1_SMASK \
- (SDMA_DESC1_HEADER_UPDATE1_MASK<<SDMA_DESC1_HEADER_UPDATE1_SHIFT)
+ (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT)
#define SDMA_DESC1_HEADER_MODE_SHIFT 13
#define SDMA_DESC1_HEADER_MODE_WIDTH 3
#define SDMA_DESC1_HEADER_MODE_MASK \
- ((1ULL<<SDMA_DESC1_HEADER_MODE_WIDTH)-1ULL)
+ ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1)
#define SDMA_DESC1_HEADER_MODE_SMASK \
- (SDMA_DESC1_HEADER_MODE_MASK<<SDMA_DESC1_HEADER_MODE_SHIFT)
+ (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT)
#define SDMA_DESC1_HEADER_INDEX_SHIFT 8
#define SDMA_DESC1_HEADER_INDEX_WIDTH 5
#define SDMA_DESC1_HEADER_INDEX_MASK \
- ((1ULL<<SDMA_DESC1_HEADER_INDEX_WIDTH)-1ULL)
+ ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1)
#define SDMA_DESC1_HEADER_INDEX_SMASK \
- (SDMA_DESC1_HEADER_INDEX_MASK<<SDMA_DESC1_HEADER_INDEX_SHIFT)
+ (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT)
#define SDMA_DESC1_HEADER_DWS_SHIFT 4
#define SDMA_DESC1_HEADER_DWS_WIDTH 4
#define SDMA_DESC1_HEADER_DWS_MASK \
- ((1ULL<<SDMA_DESC1_HEADER_DWS_WIDTH)-1ULL)
+ ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1)
#define SDMA_DESC1_HEADER_DWS_SMASK \
- (SDMA_DESC1_HEADER_DWS_MASK<<SDMA_DESC1_HEADER_DWS_SHIFT)
+ (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT)
#define SDMA_DESC1_GENERATION_SHIFT 2
#define SDMA_DESC1_GENERATION_WIDTH 2
#define SDMA_DESC1_GENERATION_MASK \
- ((1ULL<<SDMA_DESC1_GENERATION_WIDTH)-1ULL)
+ ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1)
#define SDMA_DESC1_GENERATION_SMASK \
- (SDMA_DESC1_GENERATION_MASK<<SDMA_DESC1_GENERATION_SHIFT)
-#define SDMA_DESC1_INT_REQ_FLAG (1ULL<<1)
-#define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL<<0)
+ (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT)
+#define SDMA_DESC1_INT_REQ_FLAG (1ULL << 1)
+#define SDMA_DESC1_HEAD_TO_HOST_FLAG (1ULL << 0)
enum sdma_states {
sdma_state_s00_hw_down,
diff --git a/drivers/staging/rdma/hfi1/verbs.c b/drivers/staging/rdma/hfi1/verbs.c
index 53ac21431542..41bb59eb001c 100644
--- a/drivers/staging/rdma/hfi1/verbs.c
+++ b/drivers/staging/rdma/hfi1/verbs.c
@@ -749,11 +749,13 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
struct verbs_txreq *tx;
tx = kmem_cache_alloc(dev->verbs_txreq_cache, GFP_ATOMIC);
- if (!tx)
+ if (!tx) {
/* call slow path to get the lock */
tx = __get_txreq(dev, qp);
- if (tx)
- tx->qp = qp;
+ if (IS_ERR(tx))
+ return tx;
+ }
+ tx->qp = qp;
return tx;
}
diff --git a/drivers/staging/unisys/visorbus/Makefile b/drivers/staging/unisys/visorbus/Makefile
index fa27ee5f336c..fc790e7592fc 100644
--- a/drivers/staging/unisys/visorbus/Makefile
+++ b/drivers/staging/unisys/visorbus/Makefile
@@ -10,4 +10,3 @@ visorbus-y += visorchipset.o
visorbus-y += periodic_work.o
ccflags-y += -Idrivers/staging/unisys/include
-ccflags-y += -Idrivers/staging/unisys/visorutil
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 2309f5f2b238..a272b48bab28 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -37,6 +37,8 @@ static int visorbus_debugref;
#define POLLJIFFIES_TESTWORK 100
#define POLLJIFFIES_NORMALCHANNEL 10
+static int busreg_rc = -ENODEV; /* stores the result from bus registration */
+
static int visorbus_uevent(struct device *xdev, struct kobj_uevent_env *env);
static int visorbus_match(struct device *xdev, struct device_driver *xdrv);
static void fix_vbus_dev_info(struct visor_device *visordev);
@@ -863,6 +865,9 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
{
int rc = 0;
+ if (busreg_rc < 0)
+ return -ENODEV; /*can't register on a nonexistent bus*/
+
drv->driver.name = drv->name;
drv->driver.bus = &visorbus_type;
drv->driver.probe = visordriver_probe_device;
@@ -885,6 +890,8 @@ int visorbus_register_visor_driver(struct visor_driver *drv)
if (rc < 0)
return rc;
rc = register_driver_attributes(drv);
+ if (rc < 0)
+ driver_unregister(&drv->driver);
return rc;
}
EXPORT_SYMBOL_GPL(visorbus_register_visor_driver);
@@ -1260,10 +1267,8 @@ remove_bus_instance(struct visor_device *dev)
static int
create_bus_type(void)
{
- int rc = 0;
-
- rc = bus_register(&visorbus_type);
- return rc;
+ busreg_rc = bus_register(&visorbus_type);
+ return busreg_rc;
}
/** Remove the one-and-only one instance of the visor bus type (visorbus_type).
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index 8c9da7ea7845..9d3c1e282062 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1189,16 +1189,16 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
spin_lock_irqsave(&devdata->priv_lock, flags);
atomic_dec(&devdata->num_rcvbuf_in_iovm);
- /* update rcv stats - call it with priv_lock held */
- devdata->net_stats.rx_packets++;
- devdata->net_stats.rx_bytes = skb->len;
-
/* set length to how much was ACTUALLY received -
* NOTE: rcv_done_len includes actual length of data rcvd
* including ethhdr
*/
skb->len = cmdrsp->net.rcv.rcv_done_len;
+ /* update rcv stats - call it with priv_lock held */
+ devdata->net_stats.rx_packets++;
+ devdata->net_stats.rx_bytes += skb->len;
+
/* test enabled while holding lock */
if (!(devdata->enabled && devdata->enab_dis_acked)) {
/* don't process it unless we're in enable mode and until
@@ -1924,13 +1924,16 @@ static int visornic_probe(struct visor_device *dev)
"%s debugfs_create_dir %s failed\n",
__func__, netdev->name);
err = -ENOMEM;
- goto cleanup_xmit_cmdrsp;
+ goto cleanup_register_netdev;
}
dev_info(&dev->device, "%s success netdev=%s\n",
__func__, netdev->name);
return 0;
+cleanup_register_netdev:
+ unregister_netdev(netdev);
+
cleanup_napi_add:
del_timer_sync(&devdata->irq_poll_timer);
netif_napi_del(&devdata->napi);
@@ -2128,8 +2131,9 @@ static int visornic_init(void)
if (!dev_num_pool)
goto cleanup_workqueue;
- visorbus_register_visor_driver(&visornic_driver);
- return 0;
+ err = visorbus_register_visor_driver(&visornic_driver);
+ if (!err)
+ return 0;
cleanup_workqueue:
if (visornic_timeout_reset_workqueue) {
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index e8a52f7d6204..51d1734d5390 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -407,6 +407,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
TYPERANGE_UTF8, USE_INITIAL_ONLY);
if (!param)
goto out;
+
/*
* Extra parameters for ISER from RFC-5046
*/
@@ -496,9 +497,9 @@ int iscsi_set_keys_to_negotiate(
} else if (!strcmp(param->name, SESSIONTYPE)) {
SET_PSTATE_NEGOTIATE(param);
} else if (!strcmp(param->name, IFMARKER)) {
- SET_PSTATE_NEGOTIATE(param);
+ SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, OFMARKER)) {
- SET_PSTATE_NEGOTIATE(param);
+ SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, IFMARKINT)) {
SET_PSTATE_REJECT(param);
} else if (!strcmp(param->name, OFMARKINT)) {
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index dcc424ac35d4..88ea4e4f124b 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -62,22 +62,13 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
struct se_session *se_sess = se_cmd->se_sess;
struct se_node_acl *nacl = se_sess->se_node_acl;
struct se_dev_entry *deve;
+ sense_reason_t ret = TCM_NO_SENSE;
rcu_read_lock();
deve = target_nacl_find_deve(nacl, unpacked_lun);
if (deve) {
atomic_long_inc(&deve->total_cmds);
- if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
- (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
- pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
- " Access for 0x%08llx\n",
- se_cmd->se_tfo->get_fabric_name(),
- unpacked_lun);
- rcu_read_unlock();
- return TCM_WRITE_PROTECTED;
- }
-
if (se_cmd->data_direction == DMA_TO_DEVICE)
atomic_long_add(se_cmd->data_length,
&deve->write_bytes);
@@ -93,6 +84,17 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
percpu_ref_get(&se_lun->lun_ref);
se_cmd->lun_ref_active = true;
+
+ if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
+ (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
+ pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+ " Access for 0x%08llx\n",
+ se_cmd->se_tfo->get_fabric_name(),
+ unpacked_lun);
+ rcu_read_unlock();
+ ret = TCM_WRITE_PROTECTED;
+ goto ref_dev;
+ }
}
rcu_read_unlock();
@@ -109,12 +111,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
unpacked_lun);
return TCM_NON_EXISTENT_LUN;
}
- /*
- * Force WRITE PROTECT for virtual LUN 0
- */
- if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
- (se_cmd->data_direction != DMA_NONE))
- return TCM_WRITE_PROTECTED;
se_lun = se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
@@ -123,6 +119,15 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
percpu_ref_get(&se_lun->lun_ref);
se_cmd->lun_ref_active = true;
+
+ /*
+ * Force WRITE PROTECT for virtual LUN 0
+ */
+ if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
+ (se_cmd->data_direction != DMA_NONE)) {
+ ret = TCM_WRITE_PROTECTED;
+ goto ref_dev;
+ }
}
/*
* RCU reference protected by percpu se_lun->lun_ref taken above that
@@ -130,6 +135,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
* pointer can be kfree_rcu() by the final se_lun->lun_group put via
* target_core_fabric_configfs.c:target_fabric_port_release
*/
+ref_dev:
se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
atomic_long_inc(&se_cmd->se_dev->num_cmds);
@@ -140,7 +146,7 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u64 unpacked_lun)
atomic_long_add(se_cmd->data_length,
&se_cmd->se_dev->read_bytes);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(transport_lookup_cmd_lun);
@@ -427,8 +433,6 @@ void core_disable_device_list_for_node(
hlist_del_rcu(&orig->link);
clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
- rcu_assign_pointer(orig->se_lun, NULL);
- rcu_assign_pointer(orig->se_lun_acl, NULL);
orig->lun_flags = 0;
orig->creation_time = 0;
orig->attach_count--;
@@ -439,6 +443,9 @@ void core_disable_device_list_for_node(
kref_put(&orig->pr_kref, target_pr_kref_release);
wait_for_completion(&orig->pr_comp);
+ rcu_assign_pointer(orig->se_lun, NULL);
+ rcu_assign_pointer(orig->se_lun_acl, NULL);
+
kfree_rcu(orig, rcu_head);
core_scsi3_free_pr_reg_from_nacl(dev, nacl);
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index 9522960c7fdd..22390e0e046c 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -187,5 +187,5 @@ core_delete_hba(struct se_hba *hba)
bool target_sense_desc_format(struct se_device *dev)
{
- return dev->transport->get_blocks(dev) > U32_MAX;
+ return (dev) ? dev->transport->get_blocks(dev) > U32_MAX : false;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 5a9982f5d5d6..0f19e11acac2 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -105,6 +105,8 @@ static int iblock_configure_device(struct se_device *dev)
mode = FMODE_READ|FMODE_EXCL;
if (!ib_dev->ibd_readonly)
mode |= FMODE_WRITE;
+ else
+ dev->dev_flags |= DF_READ_ONLY;
bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
if (IS_ERR(bd)) {
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 5ab7100de17e..e7933115087a 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -618,7 +618,7 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
struct se_device *dev,
struct se_node_acl *nacl,
struct se_lun *lun,
- struct se_dev_entry *deve,
+ struct se_dev_entry *dest_deve,
u64 mapped_lun,
unsigned char *isid,
u64 sa_res_key,
@@ -640,7 +640,29 @@ static struct t10_pr_registration *__core_scsi3_do_alloc_registration(
INIT_LIST_HEAD(&pr_reg->pr_reg_atp_mem_list);
atomic_set(&pr_reg->pr_res_holders, 0);
pr_reg->pr_reg_nacl = nacl;
- pr_reg->pr_reg_deve = deve;
+ /*
+ * For destination registrations for ALL_TG_PT=1 and SPEC_I_PT=1,
+ * the se_dev_entry->pr_ref will have been already obtained by
+ * core_get_se_deve_from_rtpi() or __core_scsi3_alloc_registration().
+ *
+ * Otherwise, locate se_dev_entry now and obtain a reference until
+ * registration completes in __core_scsi3_add_registration().
+ */
+ if (dest_deve) {
+ pr_reg->pr_reg_deve = dest_deve;
+ } else {
+ rcu_read_lock();
+ pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
+ if (!pr_reg->pr_reg_deve) {
+ rcu_read_unlock();
+ pr_err("Unable to locate PR deve %s mapped_lun: %llu\n",
+ nacl->initiatorname, mapped_lun);
+ kmem_cache_free(t10_pr_reg_cache, pr_reg);
+ return NULL;
+ }
+ kref_get(&pr_reg->pr_reg_deve->pr_kref);
+ rcu_read_unlock();
+ }
pr_reg->pr_res_mapped_lun = mapped_lun;
pr_reg->pr_aptpl_target_lun = lun->unpacked_lun;
pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
@@ -936,17 +958,29 @@ static int __core_scsi3_check_aptpl_registration(
!(strcmp(pr_reg->pr_tport, t_port)) &&
(pr_reg->pr_reg_tpgt == tpgt) &&
(pr_reg->pr_aptpl_target_lun == target_lun)) {
+ /*
+ * Obtain the ->pr_reg_deve pointer + reference, that
+ * is released by __core_scsi3_add_registration() below.
+ */
+ rcu_read_lock();
+ pr_reg->pr_reg_deve = target_nacl_find_deve(nacl, mapped_lun);
+ if (!pr_reg->pr_reg_deve) {
+ pr_err("Unable to locate PR APTPL %s mapped_lun:"
+ " %llu\n", nacl->initiatorname, mapped_lun);
+ rcu_read_unlock();
+ continue;
+ }
+ kref_get(&pr_reg->pr_reg_deve->pr_kref);
+ rcu_read_unlock();
pr_reg->pr_reg_nacl = nacl;
pr_reg->tg_pt_sep_rtpi = lun->lun_rtpi;
-
list_del(&pr_reg->pr_reg_aptpl_list);
spin_unlock(&pr_tmpl->aptpl_reg_lock);
/*
* At this point all of the pointers in *pr_reg will
* be setup, so go ahead and add the registration.
*/
-
__core_scsi3_add_registration(dev, nacl, pr_reg, 0, 0);
/*
* If this registration is the reservation holder,
@@ -1044,18 +1078,11 @@ static void __core_scsi3_add_registration(
__core_scsi3_dump_registration(tfo, dev, nacl, pr_reg, register_type);
spin_unlock(&pr_tmpl->registration_lock);
-
- rcu_read_lock();
- deve = pr_reg->pr_reg_deve;
- if (deve)
- set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
- rcu_read_unlock();
-
/*
* Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
*/
if (!pr_reg->pr_reg_all_tg_pt || register_move)
- return;
+ goto out;
/*
* Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
* allocated in __core_scsi3_alloc_registration()
@@ -1075,19 +1102,31 @@ static void __core_scsi3_add_registration(
__core_scsi3_dump_registration(tfo, dev, nacl_tmp, pr_reg_tmp,
register_type);
spin_unlock(&pr_tmpl->registration_lock);
-
+ /*
+ * Drop configfs group dependency reference and deve->pr_kref
+ * obtained from __core_scsi3_alloc_registration() code.
+ */
rcu_read_lock();
deve = pr_reg_tmp->pr_reg_deve;
- if (deve)
+ if (deve) {
set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+ core_scsi3_lunacl_undepend_item(deve);
+ pr_reg_tmp->pr_reg_deve = NULL;
+ }
rcu_read_unlock();
-
- /*
- * Drop configfs group dependency reference from
- * __core_scsi3_alloc_registration()
- */
- core_scsi3_lunacl_undepend_item(pr_reg_tmp->pr_reg_deve);
}
+out:
+ /*
+ * Drop deve->pr_kref obtained in __core_scsi3_do_alloc_registration()
+ */
+ rcu_read_lock();
+ deve = pr_reg->pr_reg_deve;
+ if (deve) {
+ set_bit(DEF_PR_REG_ACTIVE, &deve->deve_flags);
+ kref_put(&deve->pr_kref, target_pr_kref_release);
+ pr_reg->pr_reg_deve = NULL;
+ }
+ rcu_read_unlock();
}
static int core_scsi3_alloc_registration(
@@ -1785,9 +1824,11 @@ core_scsi3_decode_spec_i_port(
dest_node_acl->initiatorname, i_buf, (dest_se_deve) ?
dest_se_deve->mapped_lun : 0);
- if (!dest_se_deve)
+ if (!dest_se_deve) {
+ kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
+ target_pr_kref_release);
continue;
-
+ }
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1823,9 +1864,11 @@ out:
kmem_cache_free(t10_pr_reg_cache, dest_pr_reg);
- if (!dest_se_deve)
+ if (!dest_se_deve) {
+ kref_put(&local_pr_reg->pr_reg_deve->pr_kref,
+ target_pr_kref_release);
continue;
-
+ }
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 2d0381dd105c..5fb9dd7f08bb 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -668,7 +668,10 @@ int core_tpg_add_lun(
list_add_tail(&lun->lun_dev_link, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock);
- lun->lun_access = lun_access;
+ if (dev->dev_flags & DF_READ_ONLY)
+ lun->lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
+ else
+ lun->lun_access = lun_access;
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
hlist_add_head_rcu(&lun->link, &tpg->tpg_lun_hlist);
mutex_unlock(&tpg->tpg_lun_mutex);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 039004400987..5aabc4bc0d75 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -163,7 +163,7 @@ config THERMAL_EMULATION
config HISI_THERMAL
tristate "Hisilicon thermal driver"
- depends on ARCH_HISI && CPU_THERMAL && OF
+ depends on (ARCH_HISI && CPU_THERMAL && OF) || COMPILE_TEST
help
Enable this to plug hisilicon's thermal sensor driver into the Linux
thermal framework. cpufreq is used as the cooling device to throttle
@@ -182,7 +182,7 @@ config IMX_THERMAL
config SPEAR_THERMAL
bool "SPEAr thermal sensor driver"
- depends on PLAT_SPEAR
+ depends on PLAT_SPEAR || COMPILE_TEST
depends on OF
help
Enable this to plug the SPEAr thermal sensor driver into the Linux
@@ -190,7 +190,7 @@ config SPEAR_THERMAL
config ROCKCHIP_THERMAL
tristate "Rockchip thermal driver"
- depends on ARCH_ROCKCHIP
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
depends on RESET_CONTROLLER
help
Rockchip thermal driver provides support for Temperature sensor
@@ -208,7 +208,7 @@ config RCAR_THERMAL
config KIRKWOOD_THERMAL
tristate "Temperature sensor on Marvell Kirkwood SoCs"
- depends on MACH_KIRKWOOD
+ depends on MACH_KIRKWOOD || COMPILE_TEST
depends on OF
help
Support for the Kirkwood thermal sensor driver into the Linux thermal
@@ -216,7 +216,7 @@ config KIRKWOOD_THERMAL
config DOVE_THERMAL
tristate "Temperature sensor on Marvell Dove SoCs"
- depends on ARCH_DOVE || MACH_DOVE
+ depends on ARCH_DOVE || MACH_DOVE || COMPILE_TEST
depends on OF
help
Support for the Dove thermal sensor driver in the Linux thermal
@@ -234,7 +234,7 @@ config DB8500_THERMAL
config ARMADA_THERMAL
tristate "Armada 370/XP thermal management"
- depends on ARCH_MVEBU
+ depends on ARCH_MVEBU || COMPILE_TEST
depends on OF
help
Enable this option if you want to have support for thermal management
@@ -349,11 +349,12 @@ config INTEL_PCH_THERMAL
programmable trip points and other information.
menu "Texas Instruments thermal drivers"
+depends on ARCH_HAS_BANDGAP || COMPILE_TEST
source "drivers/thermal/ti-soc-thermal/Kconfig"
endmenu
menu "Samsung thermal drivers"
-depends on ARCH_EXYNOS
+depends on ARCH_EXYNOS || COMPILE_TEST
source "drivers/thermal/samsung/Kconfig"
endmenu
@@ -364,7 +365,7 @@ endmenu
config QCOM_SPMI_TEMP_ALARM
tristate "Qualcomm SPMI PMIC Temperature Alarm"
- depends on OF && SPMI && IIO
+ depends on OF && (SPMI || COMPILE_TEST) && IIO
select REGMAP_SPMI
help
This enables a thermal sysfs driver for Qualcomm plug-and-play (QPNP)
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 620dcd405ff6..42c6f71bdcc1 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -262,7 +262,9 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
* efficiently. Power is stored in mW, frequency in KHz. The
* resulting table is in ascending order.
*
- * Return: 0 on success, -E* on error.
+ * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs,
+ * -ENOMEM if we run out of memory or -EAGAIN if an OPP was
+ * added/enabled while the function was executing.
*/
static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
u32 capacitance)
@@ -273,8 +275,6 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
int num_opps = 0, cpu, i, ret = 0;
unsigned long freq;
- rcu_read_lock();
-
for_each_cpu(cpu, &cpufreq_device->allowed_cpus) {
dev = get_cpu_device(cpu);
if (!dev) {
@@ -284,24 +284,20 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
}
num_opps = dev_pm_opp_get_opp_count(dev);
- if (num_opps > 0) {
+ if (num_opps > 0)
break;
- } else if (num_opps < 0) {
- ret = num_opps;
- goto unlock;
- }
+ else if (num_opps < 0)
+ return num_opps;
}
- if (num_opps == 0) {
- ret = -EINVAL;
- goto unlock;
- }
+ if (num_opps == 0)
+ return -EINVAL;
power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL);
- if (!power_table) {
- ret = -ENOMEM;
- goto unlock;
- }
+ if (!power_table)
+ return -ENOMEM;
+
+ rcu_read_lock();
for (freq = 0, i = 0;
opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp);
@@ -309,6 +305,12 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
u32 freq_mhz, voltage_mv;
u64 power;
+ if (i >= num_opps) {
+ rcu_read_unlock();
+ ret = -EAGAIN;
+ goto free_power_table;
+ }
+
freq_mhz = freq / 1000000;
voltage_mv = dev_pm_opp_get_voltage(opp) / 1000;
@@ -326,17 +328,22 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device,
power_table[i].power = power;
}
- if (i == 0) {
+ rcu_read_unlock();
+
+ if (i != num_opps) {
ret = PTR_ERR(opp);
- goto unlock;
+ goto free_power_table;
}
cpufreq_device->cpu_dev = dev;
cpufreq_device->dyn_power_table = power_table;
cpufreq_device->dyn_power_table_entries = i;
-unlock:
- rcu_read_unlock();
+ return 0;
+
+free_power_table:
+ kfree(power_table);
+
return ret;
}
@@ -847,7 +854,7 @@ __cpufreq_cooling_register(struct device_node *np,
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
if (ret) {
cool_dev = ERR_PTR(ret);
- goto free_table;
+ goto free_power_table;
}
snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
@@ -889,6 +896,8 @@ __cpufreq_cooling_register(struct device_node *np,
remove_idr:
release_idr(&cpufreq_idr, cpufreq_dev->id);
+free_power_table:
+ kfree(cpufreq_dev->dyn_power_table);
free_table:
kfree(cpufreq_dev->freq_table);
free_time_in_idle_timestamp:
@@ -1039,6 +1048,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
release_idr(&cpufreq_idr, cpufreq_dev->id);
+ kfree(cpufreq_dev->dyn_power_table);
kfree(cpufreq_dev->time_in_idle_timestamp);
kfree(cpufreq_dev->time_in_idle);
kfree(cpufreq_dev->freq_table);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 607b62c7e611..e58bd0b658b5 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -72,6 +72,7 @@ static const struct of_device_id db8500_cpufreq_cooling_match[] = {
{ .compatible = "stericsson,db8500-cpufreq-cooling" },
{},
};
+MODULE_DEVICE_TABLE(of, db8500_cpufreq_cooling_match);
#endif
static struct platform_driver db8500_cpufreq_cooling_driver = {
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 9c8a7aad0252..e570ff084add 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -24,6 +24,8 @@
#include "thermal_core.h"
+#define INVALID_TRIP -1
+
#define FRAC_BITS 10
#define int_to_frac(x) ((x) << FRAC_BITS)
#define frac_to_int(x) ((x) >> FRAC_BITS)
@@ -56,16 +58,21 @@ static inline s64 div_frac(s64 x, s64 y)
/**
* struct power_allocator_params - parameters for the power allocator governor
+ * @allocated_tzp: whether we have allocated tzp for this thermal zone and
+ * it needs to be freed on unbind
* @err_integral: accumulated error in the PID controller.
* @prev_err: error in the previous iteration of the PID controller.
* Used to calculate the derivative term.
* @trip_switch_on: first passive trip point of the thermal zone. The
* governor switches on when this trip point is crossed.
+ * If the thermal zone only has one passive trip point,
+ * @trip_switch_on should be INVALID_TRIP.
* @trip_max_desired_temperature: last passive trip point of the thermal
* zone. The temperature we are
* controlling for.
*/
struct power_allocator_params {
+ bool allocated_tzp;
s64 err_integral;
s32 prev_err;
int trip_switch_on;
@@ -73,6 +80,98 @@ struct power_allocator_params {
};
/**
+ * estimate_sustainable_power() - Estimate the sustainable power of a thermal zone
+ * @tz: thermal zone we are operating in
+ *
+ * For thermal zones that don't provide a sustainable_power in their
+ * thermal_zone_params, estimate one. Calculate it using the minimum
+ * power of all the cooling devices as that gives a valid value that
+ * can give some degree of functionality. For optimal performance of
+ * this governor, provide a sustainable_power in the thermal zone's
+ * thermal_zone_params.
+ */
+static u32 estimate_sustainable_power(struct thermal_zone_device *tz)
+{
+ u32 sustainable_power = 0;
+ struct thermal_instance *instance;
+ struct power_allocator_params *params = tz->governor_data;
+
+ list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
+ struct thermal_cooling_device *cdev = instance->cdev;
+ u32 min_power;
+
+ if (instance->trip != params->trip_max_desired_temperature)
+ continue;
+
+ if (power_actor_get_min_power(cdev, tz, &min_power))
+ continue;
+
+ sustainable_power += min_power;
+ }
+
+ return sustainable_power;
+}
+
+/**
+ * estimate_pid_constants() - Estimate the constants for the PID controller
+ * @tz: thermal zone for which to estimate the constants
+ * @sustainable_power: sustainable power for the thermal zone
+ * @trip_switch_on: trip point number for the switch on temperature
+ * @control_temp: target temperature for the power allocator governor
+ * @force: whether to force the update of the constants
+ *
+ * This function is used to update the estimation of the PID
+ * controller constants in struct thermal_zone_parameters.
+ * Sustainable power is provided in case it was estimated. The
+ * estimated sustainable_power should not be stored in the
+ * thermal_zone_parameters so it has to be passed explicitly to this
+ * function.
+ *
+ * If @force is not set, the values in the thermal zone's parameters
+ * are preserved if they are not zero. If @force is set, the values
+ * in thermal zone's parameters are overwritten.
+ */
+static void estimate_pid_constants(struct thermal_zone_device *tz,
+ u32 sustainable_power, int trip_switch_on,
+ int control_temp, bool force)
+{
+ int ret;
+ int switch_on_temp;
+ u32 temperature_threshold;
+
+ ret = tz->ops->get_trip_temp(tz, trip_switch_on, &switch_on_temp);
+ if (ret)
+ switch_on_temp = 0;
+
+ temperature_threshold = control_temp - switch_on_temp;
+ /*
+ * estimate_pid_constants() tries to find appropriate default
+ * values for thermal zones that don't provide them. If a
+ * system integrator has configured a thermal zone with two
+ * passive trip points at the same temperature, that person
+ * hasn't put any effort to set up the thermal zone properly
+ * so just give up.
+ */
+ if (!temperature_threshold)
+ return;
+
+ if (!tz->tzp->k_po || force)
+ tz->tzp->k_po = int_to_frac(sustainable_power) /
+ temperature_threshold;
+
+ if (!tz->tzp->k_pu || force)
+ tz->tzp->k_pu = int_to_frac(2 * sustainable_power) /
+ temperature_threshold;
+
+ if (!tz->tzp->k_i || force)
+ tz->tzp->k_i = int_to_frac(10) / 1000;
+ /*
+ * The default for k_d and integral_cutoff is 0, so we can
+ * leave them as they are.
+ */
+}
+
+/**
* pid_controller() - PID controller
* @tz: thermal zone we are operating in
* @current_temp: the current temperature in millicelsius
@@ -98,10 +197,20 @@ static u32 pid_controller(struct thermal_zone_device *tz,
{
s64 p, i, d, power_range;
s32 err, max_power_frac;
+ u32 sustainable_power;
struct power_allocator_params *params = tz->governor_data;
max_power_frac = int_to_frac(max_allocatable_power);
+ if (tz->tzp->sustainable_power) {
+ sustainable_power = tz->tzp->sustainable_power;
+ } else {
+ sustainable_power = estimate_sustainable_power(tz);
+ estimate_pid_constants(tz, sustainable_power,
+ params->trip_switch_on, control_temp,
+ true);
+ }
+
err = control_temp - current_temp;
err = int_to_frac(err);
@@ -139,7 +248,7 @@ static u32 pid_controller(struct thermal_zone_device *tz,
power_range = p + i + d;
/* feed-forward the known sustainable dissipatable power */
- power_range = tz->tzp->sustainable_power + frac_to_int(power_range);
+ power_range = sustainable_power + frac_to_int(power_range);
power_range = clamp(power_range, (s64)0, (s64)max_allocatable_power);
@@ -247,6 +356,11 @@ static int allocate_power(struct thermal_zone_device *tz,
}
}
+ if (!num_actors) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
/*
* We need to allocate five arrays of the same size:
* req_power, max_power, granted_power, extra_actor_power and
@@ -340,43 +454,66 @@ unlock:
return ret;
}
-static int get_governor_trips(struct thermal_zone_device *tz,
- struct power_allocator_params *params)
+/**
+ * get_governor_trips() - get the number of the two trip points that are key for this governor
+ * @tz: thermal zone to operate on
+ * @params: pointer to private data for this governor
+ *
+ * The power allocator governor works optimally with two trips points:
+ * a "switch on" trip point and a "maximum desired temperature". These
+ * are defined as the first and last passive trip points.
+ *
+ * If there is only one trip point, then that's considered to be the
+ * "maximum desired temperature" trip point and the governor is always
+ * on. If there are no passive or active trip points, then the
+ * governor won't do anything. In fact, its throttle function
+ * won't be called at all.
+ */
+static void get_governor_trips(struct thermal_zone_device *tz,
+ struct power_allocator_params *params)
{
- int i, ret, last_passive;
+ int i, last_active, last_passive;
bool found_first_passive;
found_first_passive = false;
- last_passive = -1;
- ret = -EINVAL;
+ last_active = INVALID_TRIP;
+ last_passive = INVALID_TRIP;
for (i = 0; i < tz->trips; i++) {
enum thermal_trip_type type;
+ int ret;
ret = tz->ops->get_trip_type(tz, i, &type);
- if (ret)
- return ret;
+ if (ret) {
+ dev_warn(&tz->device,
+ "Failed to get trip point %d type: %d\n", i,
+ ret);
+ continue;
+ }
- if (!found_first_passive) {
- if (type == THERMAL_TRIP_PASSIVE) {
+ if (type == THERMAL_TRIP_PASSIVE) {
+ if (!found_first_passive) {
params->trip_switch_on = i;
found_first_passive = true;
+ } else {
+ last_passive = i;
}
- } else if (type == THERMAL_TRIP_PASSIVE) {
- last_passive = i;
+ } else if (type == THERMAL_TRIP_ACTIVE) {
+ last_active = i;
} else {
break;
}
}
- if (last_passive != -1) {
+ if (last_passive != INVALID_TRIP) {
params->trip_max_desired_temperature = last_passive;
- ret = 0;
+ } else if (found_first_passive) {
+ params->trip_max_desired_temperature = params->trip_switch_on;
+ params->trip_switch_on = INVALID_TRIP;
} else {
- ret = -EINVAL;
+ params->trip_switch_on = INVALID_TRIP;
+ params->trip_max_desired_temperature = last_active;
}
-
- return ret;
}
static void reset_pid_controller(struct power_allocator_params *params)
@@ -405,60 +542,45 @@ static void allow_maximum_power(struct thermal_zone_device *tz)
* power_allocator_bind() - bind the power_allocator governor to a thermal zone
* @tz: thermal zone to bind it to
*
- * Check that the thermal zone is valid for this governor, that is, it
- * has two thermal trips. If so, initialize the PID controller
- * parameters and bind it to the thermal zone.
+ * Initialize the PID controller parameters and bind it to the thermal
+ * zone.
*
- * Return: 0 on success, -EINVAL if the trips were invalid or -ENOMEM
- * if we ran out of memory.
+ * Return: 0 on success, or -ENOMEM if we ran out of memory.
*/
static int power_allocator_bind(struct thermal_zone_device *tz)
{
int ret;
struct power_allocator_params *params;
- int switch_on_temp, control_temp;
- u32 temperature_threshold;
-
- if (!tz->tzp || !tz->tzp->sustainable_power) {
- dev_err(&tz->device,
- "power_allocator: missing sustainable_power\n");
- return -EINVAL;
- }
+ int control_temp;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
- ret = get_governor_trips(tz, params);
- if (ret) {
- dev_err(&tz->device,
- "thermal zone %s has wrong trip setup for power allocator\n",
- tz->type);
- goto free;
- }
+ if (!tz->tzp) {
+ tz->tzp = kzalloc(sizeof(*tz->tzp), GFP_KERNEL);
+ if (!tz->tzp) {
+ ret = -ENOMEM;
+ goto free_params;
+ }
- ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
- &switch_on_temp);
- if (ret)
- goto free;
+ params->allocated_tzp = true;
+ }
- ret = tz->ops->get_trip_temp(tz, params->trip_max_desired_temperature,
- &control_temp);
- if (ret)
- goto free;
+ if (!tz->tzp->sustainable_power)
+ dev_warn(&tz->device, "power_allocator: sustainable_power will be estimated\n");
- temperature_threshold = control_temp - switch_on_temp;
+ get_governor_trips(tz, params);
- tz->tzp->k_po = tz->tzp->k_po ?:
- int_to_frac(tz->tzp->sustainable_power) / temperature_threshold;
- tz->tzp->k_pu = tz->tzp->k_pu ?:
- int_to_frac(2 * tz->tzp->sustainable_power) /
- temperature_threshold;
- tz->tzp->k_i = tz->tzp->k_i ?: int_to_frac(10) / 1000;
- /*
- * The default for k_d and integral_cutoff is 0, so we can
- * leave them as they are.
- */
+ if (tz->trips > 0) {
+ ret = tz->ops->get_trip_temp(tz,
+ params->trip_max_desired_temperature,
+ &control_temp);
+ if (!ret)
+ estimate_pid_constants(tz, tz->tzp->sustainable_power,
+ params->trip_switch_on,
+ control_temp, false);
+ }
reset_pid_controller(params);
@@ -466,14 +588,23 @@ static int power_allocator_bind(struct thermal_zone_device *tz)
return 0;
-free:
+free_params:
kfree(params);
+
return ret;
}
static void power_allocator_unbind(struct thermal_zone_device *tz)
{
+ struct power_allocator_params *params = tz->governor_data;
+
dev_dbg(&tz->device, "Unbinding from thermal zone %d\n", tz->id);
+
+ if (params->allocated_tzp) {
+ kfree(tz->tzp);
+ tz->tzp = NULL;
+ }
+
kfree(tz->governor_data);
tz->governor_data = NULL;
}
@@ -499,13 +630,7 @@ static int power_allocator_throttle(struct thermal_zone_device *tz, int trip)
ret = tz->ops->get_trip_temp(tz, params->trip_switch_on,
&switch_on_temp);
- if (ret) {
- dev_warn(&tz->device,
- "Failed to get switch on temperature: %d\n", ret);
- return ret;
- }
-
- if (current_temp < switch_on_temp) {
+ if (!ret && (current_temp < switch_on_temp)) {
tz->passive = 0;
reset_pid_controller(params);
allow_maximum_power(tz);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 5e5fc7015c7f..d9e525cc9c1c 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1013,6 +1013,34 @@ int power_actor_get_max_power(struct thermal_cooling_device *cdev,
}
/**
+ * power_actor_get_min_power() - get the mainimum power that a cdev can consume
+ * @cdev: pointer to &thermal_cooling_device
+ * @tz: a valid thermal zone device pointer
+ * @min_power: pointer in which to store the minimum power
+ *
+ * Calculate the minimum power consumption in milliwatts that the
+ * cooling device can currently consume and store it in @min_power.
+ *
+ * Return: 0 on success, -EINVAL if @cdev doesn't support the
+ * power_actor API or -E* on other error.
+ */
+int power_actor_get_min_power(struct thermal_cooling_device *cdev,
+ struct thermal_zone_device *tz, u32 *min_power)
+{
+ unsigned long max_state;
+ int ret;
+
+ if (!cdev_is_power_actor(cdev))
+ return -EINVAL;
+
+ ret = cdev->ops->get_max_state(cdev, &max_state);
+ if (ret)
+ return ret;
+
+ return cdev->ops->state2power(cdev, tz, max_state, min_power);
+}
+
+/**
* power_actor_set_power() - limit the maximum power that a cooling device can consume
* @cdev: pointer to &thermal_cooling_device
* @instance: thermal instance to update
diff --git a/drivers/thermal/ti-soc-thermal/Kconfig b/drivers/thermal/ti-soc-thermal/Kconfig
index bd4c7beba679..cb6686ff09ae 100644
--- a/drivers/thermal/ti-soc-thermal/Kconfig
+++ b/drivers/thermal/ti-soc-thermal/Kconfig
@@ -1,7 +1,5 @@
config TI_SOC_THERMAL
tristate "Texas Instruments SoCs temperature sensor driver"
- depends on THERMAL
- depends on ARCH_HAS_BANDGAP
help
If you say yes here you get support for the Texas Instruments
OMAP4460+ on die bandgap temperature sensor support. The register
@@ -24,7 +22,7 @@ config TI_THERMAL
config OMAP4_THERMAL
bool "Texas Instruments OMAP4 thermal support"
depends on TI_SOC_THERMAL
- depends on ARCH_OMAP4
+ depends on ARCH_OMAP4 || COMPILE_TEST
help
If you say yes here you get thermal support for the Texas Instruments
OMAP4 SoC family. The current chip supported are:
@@ -38,7 +36,7 @@ config OMAP4_THERMAL
config OMAP5_THERMAL
bool "Texas Instruments OMAP5 thermal support"
depends on TI_SOC_THERMAL
- depends on SOC_OMAP5
+ depends on SOC_OMAP5 || COMPILE_TEST
help
If you say yes here you get thermal support for the Texas Instruments
OMAP5 SoC family. The current chip supported are:
@@ -50,7 +48,7 @@ config OMAP5_THERMAL
config DRA752_THERMAL
bool "Texas Instruments DRA752 thermal support"
depends on TI_SOC_THERMAL
- depends on SOC_DRA7XX
+ depends on SOC_DRA7XX || COMPILE_TEST
help
If you say yes here you get thermal support for the Texas Instruments
DRA752 SoC family. The current chip supported are:
diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
index c68fe1222c16..20a41f7de76f 100644
--- a/drivers/thunderbolt/nhi.c
+++ b/drivers/thunderbolt/nhi.c
@@ -643,7 +643,7 @@ static struct pci_device_id nhi_ids[] = {
{
.class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
.vendor = PCI_VENDOR_ID_INTEL, .device = 0x156c,
- .subvendor = 0x2222, .subdevice = 0x1111,
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
},
{ 0,}
};
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 54e6c8ddef5d..b1e0ba3e525b 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2910,3 +2910,5 @@ int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
}
#endif /* CONFIG_SERIAL_8250_CONSOLE */
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 867e9f3f3859..dcc50c878159 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -61,7 +61,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = {
{ .compatible = "fsl,imx27-usb", .data = &imx27_usb_data},
{ .compatible = "fsl,imx6q-usb", .data = &imx6q_usb_data},
{ .compatible = "fsl,imx6sl-usb", .data = &imx6sl_usb_data},
- { .compatible = "fsl,imx6sx-usb", .data = &imx6sl_usb_data},
+ { .compatible = "fsl,imx6sx-usb", .data = &imx6sx_usb_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids);
diff --git a/drivers/usb/chipidea/ci_hdrc_usb2.c b/drivers/usb/chipidea/ci_hdrc_usb2.c
index 9eae1a16cef9..4456d2cf80ff 100644
--- a/drivers/usb/chipidea/ci_hdrc_usb2.c
+++ b/drivers/usb/chipidea/ci_hdrc_usb2.c
@@ -12,6 +12,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/usb/chipidea.h>
@@ -30,18 +31,36 @@ static const struct ci_hdrc_platform_data ci_default_pdata = {
.flags = CI_HDRC_DISABLE_STREAMING,
};
+static struct ci_hdrc_platform_data ci_zynq_pdata = {
+ .capoffset = DEF_CAPOFFSET,
+};
+
+static const struct of_device_id ci_hdrc_usb2_of_match[] = {
+ { .compatible = "chipidea,usb2"},
+ { .compatible = "xlnx,zynq-usb-2.20a", .data = &ci_zynq_pdata},
+ { }
+};
+MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
+
static int ci_hdrc_usb2_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct ci_hdrc_usb2_priv *priv;
struct ci_hdrc_platform_data *ci_pdata = dev_get_platdata(dev);
int ret;
+ const struct of_device_id *match;
if (!ci_pdata) {
ci_pdata = devm_kmalloc(dev, sizeof(*ci_pdata), GFP_KERNEL);
*ci_pdata = ci_default_pdata; /* struct copy */
}
+ match = of_match_device(ci_hdrc_usb2_of_match, &pdev->dev);
+ if (match && match->data) {
+ /* struct copy */
+ *ci_pdata = *(struct ci_hdrc_platform_data *)match->data;
+ }
+
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -96,12 +115,6 @@ static int ci_hdrc_usb2_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ci_hdrc_usb2_of_match[] = {
- { .compatible = "chipidea,usb2" },
- { }
-};
-MODULE_DEVICE_TABLE(of, ci_hdrc_usb2_of_match);
-
static struct platform_driver ci_hdrc_usb2_driver = {
.probe = ci_hdrc_usb2_probe,
.remove = ci_hdrc_usb2_remove,
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index a637da25dda0..8223fe73ea85 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -656,6 +656,44 @@ __acquires(hwep->lock)
return 0;
}
+static int _ep_set_halt(struct usb_ep *ep, int value, bool check_transfer)
+{
+ struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
+ int direction, retval = 0;
+ unsigned long flags;
+
+ if (ep == NULL || hwep->ep.desc == NULL)
+ return -EINVAL;
+
+ if (usb_endpoint_xfer_isoc(hwep->ep.desc))
+ return -EOPNOTSUPP;
+
+ spin_lock_irqsave(hwep->lock, flags);
+
+ if (value && hwep->dir == TX && check_transfer &&
+ !list_empty(&hwep->qh.queue) &&
+ !usb_endpoint_xfer_control(hwep->ep.desc)) {
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return -EAGAIN;
+ }
+
+ direction = hwep->dir;
+ do {
+ retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
+
+ if (!value)
+ hwep->wedge = 0;
+
+ if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
+ hwep->dir = (hwep->dir == TX) ? RX : TX;
+
+ } while (hwep->dir != direction);
+
+ spin_unlock_irqrestore(hwep->lock, flags);
+ return retval;
+}
+
+
/**
* _gadget_stop_activity: stops all USB activity, flushes & disables all endpts
* @gadget: gadget
@@ -1051,7 +1089,7 @@ __acquires(ci->lock)
num += ci->hw_ep_max / 2;
spin_unlock(&ci->lock);
- err = usb_ep_set_halt(&ci->ci_hw_ep[num].ep);
+ err = _ep_set_halt(&ci->ci_hw_ep[num].ep, 1, false);
spin_lock(&ci->lock);
if (!err)
isr_setup_status_phase(ci);
@@ -1117,8 +1155,8 @@ delegate:
if (err < 0) {
spin_unlock(&ci->lock);
- if (usb_ep_set_halt(&hwep->ep))
- dev_err(ci->dev, "error: ep_set_halt\n");
+ if (_ep_set_halt(&hwep->ep, 1, false))
+ dev_err(ci->dev, "error: _ep_set_halt\n");
spin_lock(&ci->lock);
}
}
@@ -1149,9 +1187,9 @@ __acquires(ci->lock)
err = isr_setup_status_phase(ci);
if (err < 0) {
spin_unlock(&ci->lock);
- if (usb_ep_set_halt(&hwep->ep))
+ if (_ep_set_halt(&hwep->ep, 1, false))
dev_err(ci->dev,
- "error: ep_set_halt\n");
+ "error: _ep_set_halt\n");
spin_lock(&ci->lock);
}
}
@@ -1397,41 +1435,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
*/
static int ep_set_halt(struct usb_ep *ep, int value)
{
- struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
- int direction, retval = 0;
- unsigned long flags;
-
- if (ep == NULL || hwep->ep.desc == NULL)
- return -EINVAL;
-
- if (usb_endpoint_xfer_isoc(hwep->ep.desc))
- return -EOPNOTSUPP;
-
- spin_lock_irqsave(hwep->lock, flags);
-
-#ifndef STALL_IN
- /* g_file_storage MS compliant but g_zero fails chapter 9 compliance */
- if (value && hwep->type == USB_ENDPOINT_XFER_BULK && hwep->dir == TX &&
- !list_empty(&hwep->qh.queue)) {
- spin_unlock_irqrestore(hwep->lock, flags);
- return -EAGAIN;
- }
-#endif
-
- direction = hwep->dir;
- do {
- retval |= hw_ep_set_halt(hwep->ci, hwep->num, hwep->dir, value);
-
- if (!value)
- hwep->wedge = 0;
-
- if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
- hwep->dir = (hwep->dir == TX) ? RX : TX;
-
- } while (hwep->dir != direction);
-
- spin_unlock_irqrestore(hwep->lock, flags);
- return retval;
+ return _ep_set_halt(ep, value, true);
}
/**
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index b2a540b43f97..b9ddf0c1ffe5 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -112,7 +112,7 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
cfgno, inum, asnum, ep->desc.bEndpointAddress);
ep->ss_ep_comp.bmAttributes = 16;
} else if (usb_endpoint_xfer_isoc(&ep->desc) &&
- desc->bmAttributes > 2) {
+ USB_SS_MULT(desc->bmAttributes) > 3) {
dev_warn(ddev, "Isoc endpoint has Mult of %d in "
"config %d interface %d altsetting %d ep %d: "
"setting to 3\n", desc->bmAttributes + 1,
@@ -121,7 +121,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
}
if (usb_endpoint_xfer_isoc(&ep->desc))
- max_tx = (desc->bMaxBurst + 1) * (desc->bmAttributes + 1) *
+ max_tx = (desc->bMaxBurst + 1) *
+ (USB_SS_MULT(desc->bmAttributes)) *
usb_endpoint_maxp(&ep->desc);
else if (usb_endpoint_xfer_int(&ep->desc))
max_tx = usb_endpoint_maxp(&ep->desc) *
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index a5a1b7c45743..22e9606d8e08 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -514,8 +514,6 @@ static int dwc3_omap_probe(struct platform_device *pdev)
goto err1;
}
- dwc3_omap_enable_irqs(omap);
-
ret = dwc3_omap_extcon_register(omap);
if (ret < 0)
goto err2;
@@ -526,6 +524,8 @@ static int dwc3_omap_probe(struct platform_device *pdev)
goto err3;
}
+ dwc3_omap_enable_irqs(omap);
+
return 0;
err3:
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 0c25704dcb6b..1e8bdf817811 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2665,8 +2665,6 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
int i;
irqreturn_t ret = IRQ_NONE;
- spin_lock(&dwc->lock);
-
for (i = 0; i < dwc->num_event_buffers; i++) {
irqreturn_t status;
@@ -2675,8 +2673,6 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
ret = status;
}
- spin_unlock(&dwc->lock);
-
return ret;
}
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 978435a51038..6399c106a3a5 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -186,6 +186,7 @@ void usb_ep_autoconfig_reset (struct usb_gadget *gadget)
list_for_each_entry (ep, &gadget->ep_list, ep_list) {
ep->claimed = false;
+ ep->driver_data = NULL;
}
gadget->in_epnum = 0;
gadget->out_epnum = 0;
diff --git a/drivers/usb/gadget/udc/amd5536udc.c b/drivers/usb/gadget/udc/amd5536udc.c
index fdacddb18c00..175ca93fe5e2 100644
--- a/drivers/usb/gadget/udc/amd5536udc.c
+++ b/drivers/usb/gadget/udc/amd5536udc.c
@@ -3138,8 +3138,8 @@ static void udc_pci_remove(struct pci_dev *pdev)
writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
if (dev->irq_registered)
free_irq(pdev->irq, dev);
- if (dev->regs)
- iounmap(dev->regs);
+ if (dev->virt_addr)
+ iounmap(dev->virt_addr);
if (dev->mem_region)
release_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
@@ -3226,17 +3226,13 @@ static int udc_pci_probe(
/* init */
dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
- if (!dev) {
- retval = -ENOMEM;
- goto finished;
- }
+ if (!dev)
+ return -ENOMEM;
/* pci setup */
if (pci_enable_device(pdev) < 0) {
- kfree(dev);
- dev = NULL;
retval = -ENODEV;
- goto finished;
+ goto err_pcidev;
}
dev->active = 1;
@@ -3246,28 +3242,22 @@ static int udc_pci_probe(
if (!request_mem_region(resource, len, name)) {
dev_dbg(&pdev->dev, "pci device used already\n");
- kfree(dev);
- dev = NULL;
retval = -EBUSY;
- goto finished;
+ goto err_memreg;
}
dev->mem_region = 1;
dev->virt_addr = ioremap_nocache(resource, len);
if (dev->virt_addr == NULL) {
dev_dbg(&pdev->dev, "start address cannot be mapped\n");
- kfree(dev);
- dev = NULL;
retval = -EFAULT;
- goto finished;
+ goto err_ioremap;
}
if (!pdev->irq) {
dev_err(&pdev->dev, "irq not set\n");
- kfree(dev);
- dev = NULL;
retval = -ENODEV;
- goto finished;
+ goto err_irq;
}
spin_lock_init(&dev->lock);
@@ -3283,10 +3273,8 @@ static int udc_pci_probe(
if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
- kfree(dev);
- dev = NULL;
retval = -EBUSY;
- goto finished;
+ goto err_irq;
}
dev->irq_registered = 1;
@@ -3314,8 +3302,17 @@ static int udc_pci_probe(
return 0;
finished:
- if (dev)
- udc_pci_remove(pdev);
+ udc_pci_remove(pdev);
+ return retval;
+
+err_irq:
+ iounmap(dev->virt_addr);
+err_ioremap:
+ release_mem_region(resource, len);
+err_memreg:
+ pci_disable_device(pdev);
+err_pcidev:
+ kfree(dev);
return retval;
}
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index 3dfada8d6061..f0f2b066ac08 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -2002,6 +2002,17 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
ep->udc = udc;
INIT_LIST_HEAD(&ep->queue);
+ if (ep->index == 0) {
+ ep->ep.caps.type_control = true;
+ } else {
+ ep->ep.caps.type_iso = ep->can_isoc;
+ ep->ep.caps.type_bulk = true;
+ ep->ep.caps.type_int = true;
+ }
+
+ ep->ep.caps.dir_in = true;
+ ep->ep.caps.dir_out = true;
+
if (i)
list_add_tail(&ep->ep.ep_list, &udc->gadget.ep_list);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c
index 5c8f4effb62a..ccb9c213cc9f 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_core.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_core.c
@@ -324,8 +324,7 @@ static void bdc_mem_free(struct bdc *bdc)
bdc->scratchpad.buff, bdc->scratchpad.sp_dma);
/* Destroy the dma pools */
- if (bdc->bd_table_pool)
- dma_pool_destroy(bdc->bd_table_pool);
+ dma_pool_destroy(bdc->bd_table_pool);
/* Free the bdc_ep array */
kfree(bdc->bdc_ep_array);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 1379ad40d864..27af0f008b57 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -1348,6 +1348,7 @@ static int transfer(struct dummy_hcd *dum_hcd, struct urb *urb,
{
struct dummy *dum = dum_hcd->dum;
struct dummy_request *req;
+ int sent = 0;
top:
/* if there's no request queued, the device is NAKing; return */
@@ -1385,12 +1386,15 @@ top:
if (len == 0)
break;
- /* use an extra pass for the final short packet */
- if (len > ep->ep.maxpacket) {
- rescan = 1;
- len -= (len % ep->ep.maxpacket);
+ /* send multiple of maxpacket first, then remainder */
+ if (len >= ep->ep.maxpacket) {
+ is_short = 0;
+ if (len % ep->ep.maxpacket)
+ rescan = 1;
+ len -= len % ep->ep.maxpacket;
+ } else {
+ is_short = 1;
}
- is_short = (len % ep->ep.maxpacket) != 0;
len = dummy_perform_transfer(urb, req, len);
@@ -1399,6 +1403,7 @@ top:
req->req.status = len;
} else {
limit -= len;
+ sent += len;
urb->actual_length += len;
req->req.actual += len;
}
@@ -1421,7 +1426,7 @@ top:
*status = -EOVERFLOW;
else
*status = 0;
- } else if (!to_host) {
+ } else {
*status = 0;
if (host_len > dev_len)
req->req.status = -EOVERFLOW;
@@ -1429,15 +1434,24 @@ top:
req->req.status = 0;
}
- /* many requests terminate without a short packet */
+ /*
+ * many requests terminate without a short packet.
+ * send a zlp if demanded by flags.
+ */
} else {
- if (req->req.length == req->req.actual
- && !req->req.zero)
- req->req.status = 0;
- if (urb->transfer_buffer_length == urb->actual_length
- && !(urb->transfer_flags
- & URB_ZERO_PACKET))
- *status = 0;
+ if (req->req.length == req->req.actual) {
+ if (req->req.zero && to_host)
+ rescan = 1;
+ else
+ req->req.status = 0;
+ }
+ if (urb->transfer_buffer_length == urb->actual_length) {
+ if (urb->transfer_flags & URB_ZERO_PACKET &&
+ !to_host)
+ rescan = 1;
+ else
+ *status = 0;
+ }
}
/* device side completion --> continuable */
@@ -1460,7 +1474,7 @@ top:
if (rescan)
goto top;
}
- return limit;
+ return sent;
}
static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep)
@@ -1890,7 +1904,7 @@ restart:
default:
treat_control_like_bulk:
ep->last_io = jiffies;
- total = transfer(dum_hcd, urb, ep, limit, &status);
+ total -= transfer(dum_hcd, urb, ep, limit, &status);
break;
}
diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c
index 8aa2593c2c36..b9429bc42511 100644
--- a/drivers/usb/gadget/udc/gr_udc.c
+++ b/drivers/usb/gadget/udc/gr_udc.c
@@ -2117,8 +2117,7 @@ static int gr_remove(struct platform_device *pdev)
return -EBUSY;
gr_dfs_delete(dev);
- if (dev->desc_pool)
- dma_pool_destroy(dev->desc_pool);
+ dma_pool_destroy(dev->desc_pool);
platform_set_drvdata(pdev, NULL);
gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req);
diff --git a/drivers/usb/gadget/udc/mv_u3d_core.c b/drivers/usb/gadget/udc/mv_u3d_core.c
index 4c489692745e..dafe74eb9ade 100644
--- a/drivers/usb/gadget/udc/mv_u3d_core.c
+++ b/drivers/usb/gadget/udc/mv_u3d_core.c
@@ -1767,8 +1767,7 @@ static int mv_u3d_remove(struct platform_device *dev)
usb_del_gadget_udc(&u3d->gadget);
/* free memory allocated in probe */
- if (u3d->trb_pool)
- dma_pool_destroy(u3d->trb_pool);
+ dma_pool_destroy(u3d->trb_pool);
if (u3d->ep_context)
dma_free_coherent(&dev->dev, u3d->ep_context_size,
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index 339af51df57d..81b6229c7805 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2100,8 +2100,7 @@ static int mv_udc_remove(struct platform_device *pdev)
}
/* free memory allocated in probe */
- if (udc->dtd_pool)
- dma_pool_destroy(udc->dtd_pool);
+ dma_pool_destroy(udc->dtd_pool);
if (udc->ep_dqh)
dma_free_coherent(&pdev->dev, udc->ep_dqh_size,
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 9a8c936cd42c..41f841fa6c4d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1498,10 +1498,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
* use Event Data TRBs, and we don't chain in a link TRB on short
* transfers, we're basically dividing by 1.
*
- * xHCI 1.0 specification indicates that the Average TRB Length should
- * be set to 8 for control endpoints.
+ * xHCI 1.0 and 1.1 specification indicates that the Average TRB Length
+ * should be set to 8 for control endpoints.
*/
- if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version == 0x100)
+ if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
ep_ctx->tx_info |= cpu_to_le32(AVG_TRB_LENGTH_FOR_EP(8));
else
ep_ctx->tx_info |=
@@ -1792,8 +1792,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
int size;
int i, j, num_ports;
- if (timer_pending(&xhci->cmd_timer))
- del_timer_sync(&xhci->cmd_timer);
+ del_timer_sync(&xhci->cmd_timer);
/* Free the Event Ring Segment Table and the actual Event Ring */
size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -2321,6 +2320,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
INIT_LIST_HEAD(&xhci->cmd_list);
+ /* init command timeout timer */
+ setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
+ (unsigned long)xhci);
+
page_size = readl(&xhci->op_regs->page_size);
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Supported page size register = 0x%x", page_size);
@@ -2505,10 +2508,6 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
"Wrote ERST address to ir_set 0.");
xhci_print_ir_set(xhci, 0);
- /* init command timeout timer */
- setup_timer(&xhci->cmd_timer, xhci_handle_command_timeout,
- (unsigned long)xhci);
-
/*
* XXX: Might need to set the Interrupter Moderation Register to
* something other than the default (~1ms minimum between interrupts).
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 5590eac2b22d..c79d33676672 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -180,51 +180,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
"QUIRK: Resetting on resume");
}
-/*
- * In some Intel xHCI controllers, in order to get D3 working,
- * through a vendor specific SSIC CONFIG register at offset 0x883c,
- * SSIC PORT need to be marked as "unused" before putting xHCI
- * into D3. After D3 exit, the SSIC port need to be marked as "used".
- * Without this change, xHCI might not enter D3 state.
- * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
- * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
- */
-static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
-{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
- u32 val;
- void __iomem *reg;
-
- if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
-
- reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
-
- /* Notify SSIC that SSIC profile programming is not done */
- val = readl(reg) & ~PROG_DONE;
- writel(val, reg);
-
- /* Mark SSIC port as unused(suspend) or used(resume) */
- val = readl(reg);
- if (suspend)
- val |= SSIC_PORT_UNUSED;
- else
- val &= ~SSIC_PORT_UNUSED;
- writel(val, reg);
-
- /* Notify SSIC that SSIC profile programming is done */
- val = readl(reg) | PROG_DONE;
- writel(val, reg);
- readl(reg);
- }
-
- reg = (void __iomem *) xhci->cap_regs + 0x80a4;
- val = readl(reg);
- writel(val | BIT(28), reg);
- readl(reg);
-}
-
#ifdef CONFIG_ACPI
static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
{
@@ -345,6 +300,51 @@ static void xhci_pci_remove(struct pci_dev *dev)
}
#ifdef CONFIG_PM
+/*
+ * In some Intel xHCI controllers, in order to get D3 working,
+ * through a vendor specific SSIC CONFIG register at offset 0x883c,
+ * SSIC PORT need to be marked as "unused" before putting xHCI
+ * into D3. After D3 exit, the SSIC port need to be marked as "used".
+ * Without this change, xHCI might not enter D3 state.
+ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
+ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
+ */
+static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
+ u32 val;
+ void __iomem *reg;
+
+ if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
+
+ reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
+
+ /* Notify SSIC that SSIC profile programming is not done */
+ val = readl(reg) & ~PROG_DONE;
+ writel(val, reg);
+
+ /* Mark SSIC port as unused(suspend) or used(resume) */
+ val = readl(reg);
+ if (suspend)
+ val |= SSIC_PORT_UNUSED;
+ else
+ val &= ~SSIC_PORT_UNUSED;
+ writel(val, reg);
+
+ /* Notify SSIC that SSIC profile programming is done */
+ val = readl(reg) | PROG_DONE;
+ writel(val, reg);
+ readl(reg);
+ }
+
+ reg = (void __iomem *) xhci->cap_regs + 0x80a4;
+ val = readl(reg);
+ writel(val | BIT(28), reg);
+ readl(reg);
+}
+
static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a47a1e897086..43291f93afeb 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -302,6 +302,15 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
ret = xhci_handshake(&xhci->op_regs->cmd_ring,
CMD_RING_RUNNING, 0, 5 * 1000 * 1000);
if (ret < 0) {
+ /* we are about to kill xhci, give it one more chance */
+ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+ &xhci->op_regs->cmd_ring);
+ udelay(1000);
+ ret = xhci_handshake(&xhci->op_regs->cmd_ring,
+ CMD_RING_RUNNING, 0, 3 * 1000 * 1000);
+ if (ret == 0)
+ return 0;
+
xhci_err(xhci, "Stopped the command ring failed, "
"maybe the host is dead\n");
xhci->xhc_state |= XHCI_STATE_DYING;
@@ -3461,8 +3470,8 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (start_cycle == 0)
field |= 0x1;
- /* xHCI 1.0 6.4.1.2.1: Transfer Type field */
- if (xhci->hci_version == 0x100) {
+ /* xHCI 1.0/1.1 6.4.1.2.1: Transfer Type field */
+ if (xhci->hci_version >= 0x100) {
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType & USB_DIR_IN)
field |= TRB_TX_TYPE(TRB_DATA_IN);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 6b0f4a47e402..9957bd96d4bc 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -146,7 +146,8 @@ static int xhci_start(struct xhci_hcd *xhci)
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (!ret)
- xhci->xhc_state &= ~XHCI_STATE_HALTED;
+ xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+
return ret;
}
@@ -654,15 +655,6 @@ int xhci_run(struct usb_hcd *hcd)
}
EXPORT_SYMBOL_GPL(xhci_run);
-static void xhci_only_stop_hcd(struct usb_hcd *hcd)
-{
- struct xhci_hcd *xhci = hcd_to_xhci(hcd);
-
- spin_lock_irq(&xhci->lock);
- xhci_halt(xhci);
- spin_unlock_irq(&xhci->lock);
-}
-
/*
* Stop xHCI driver.
*
@@ -677,12 +669,14 @@ void xhci_stop(struct usb_hcd *hcd)
u32 temp;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
- if (!usb_hcd_is_primary_hcd(hcd)) {
- xhci_only_stop_hcd(xhci->shared_hcd);
+ if (xhci->xhc_state & XHCI_STATE_HALTED)
return;
- }
+ mutex_lock(&xhci->mutex);
spin_lock_irq(&xhci->lock);
+ xhci->xhc_state |= XHCI_STATE_HALTED;
+ xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+
/* Make sure the xHC is halted for a USB3 roothub
* (xhci_stop() could be called as part of failed init).
*/
@@ -717,6 +711,7 @@ void xhci_stop(struct usb_hcd *hcd)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"xhci_stop completed - status = %x",
readl(&xhci->op_regs->status));
+ mutex_unlock(&xhci->mutex);
}
/*
@@ -3793,6 +3788,9 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_lock(&xhci->mutex);
+ if (xhci->xhc_state) /* dying or halted */
+ goto out;
+
if (!udev->slot_id) {
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Bad Slot ID %d", udev->slot_id);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 514a6cdaeff6..4a518ff12310 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1051,6 +1051,7 @@ void musb_start(struct musb *musb)
* (c) peripheral initiates, using SRP
*/
if (musb->port_mode != MUSB_PORT_MODE_HOST &&
+ musb->xceiv->otg->state != OTG_STATE_A_WAIT_BCON &&
(devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS) {
musb->is_active = 1;
} else {
@@ -2448,6 +2449,9 @@ static int musb_suspend(struct device *dev)
struct musb *musb = dev_to_musb(dev);
unsigned long flags;
+ musb_platform_disable(musb);
+ musb_generic_disable(musb);
+
spin_lock_irqsave(&musb->lock, flags);
if (is_peripheral_active(musb)) {
@@ -2501,6 +2505,9 @@ static int musb_resume(struct device *dev)
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
+
+ musb_start(musb);
+
return 0;
}
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index d07cafb7d5f5..e499b862a946 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -551,6 +551,9 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel)
} else {
cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE);
+ /* delay to drain to cppi dma pipeline for isoch */
+ udelay(250);
+
csr = musb_readw(epio, MUSB_RXCSR);
csr &= ~(MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_DMAENAB);
musb_writew(epio, MUSB_RXCSR, csr);
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index a0cfead6150f..84512d1d5eee 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -225,8 +225,11 @@ static void dsps_musb_enable(struct musb *musb)
dsps_writel(reg_base, wrp->epintr_set, epmask);
dsps_writel(reg_base, wrp->coreintr_set, coremask);
- /* start polling for ID change. */
- mod_timer(&glue->timer, jiffies + msecs_to_jiffies(wrp->poll_timeout));
+ /* start polling for ID change in dual-role idle mode */
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
+ musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
+ mod_timer(&glue->timer, jiffies +
+ msecs_to_jiffies(wrp->poll_timeout));
dsps_musb_try_idle(musb, 0);
}
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index 39168fe9b406..b2685e75a683 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -379,6 +379,8 @@ static const struct of_device_id ux500_match[] = {
{}
};
+MODULE_DEVICE_TABLE(of, ux500_match);
+
static struct platform_driver ux500_driver = {
.probe = ux500_probe,
.remove = ux500_remove,
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 7d3beee2a587..173132416170 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -155,7 +155,7 @@ config USB_MSM_OTG
config USB_QCOM_8X16_PHY
tristate "Qualcomm APQ8016/MSM8916 on-chip USB PHY controller support"
depends on ARCH_QCOM || COMPILE_TEST
- depends on RESET_CONTROLLER
+ depends on RESET_CONTROLLER && EXTCON
select USB_PHY
select USB_ULPI_VIEWPORT
help
diff --git a/drivers/usb/phy/phy-generic.c b/drivers/usb/phy/phy-generic.c
index ec6ecd03269c..5320cb8642cb 100644
--- a/drivers/usb/phy/phy-generic.c
+++ b/drivers/usb/phy/phy-generic.c
@@ -232,7 +232,8 @@ int usb_phy_gen_create_phy(struct device *dev, struct usb_phy_generic *nop,
clk_rate = pdata->clk_rate;
needs_vcc = pdata->needs_vcc;
if (gpio_is_valid(pdata->gpio_reset)) {
- err = devm_gpio_request_one(dev, pdata->gpio_reset, 0,
+ err = devm_gpio_request_one(dev, pdata->gpio_reset,
+ GPIOF_ACTIVE_LOW,
dev_name(dev));
if (!err)
nop->gpiod_reset =
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index 8a55b37d1a02..db68156568e6 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -31,6 +31,7 @@ static const struct i2c_device_id isp1301_id[] = {
{ "isp1301", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, isp1301_id);
static struct i2c_client *isp1301_i2c_client;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6d1941a2396a..6956c4f62216 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -278,6 +278,10 @@ static void option_instat_callback(struct urb *urb);
#define ZTE_PRODUCT_MF622 0x0001
#define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031
+#define ZTE_PRODUCT_ZM8620_X 0x0396
+#define ZTE_PRODUCT_ME3620_MBIM 0x0426
+#define ZTE_PRODUCT_ME3620_X 0x1432
+#define ZTE_PRODUCT_ME3620_L 0x1433
#define ZTE_PRODUCT_AC2726 0xfff1
#define ZTE_PRODUCT_MG880 0xfffd
#define ZTE_PRODUCT_CDMA_TECH 0xfffe
@@ -544,6 +548,18 @@ static const struct option_blacklist_info zte_mc2716_z_blacklist = {
.sendsetup = BIT(1) | BIT(2) | BIT(3),
};
+static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
+ .reserved = BIT(2) | BIT(3) | BIT(4),
+};
+
+static const struct option_blacklist_info zte_me3620_xl_blacklist = {
+ .reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
+static const struct option_blacklist_info zte_zm8620_x_blacklist = {
+ .reserved = BIT(3) | BIT(4) | BIT(5),
+};
+
static const struct option_blacklist_info huawei_cdc12_blacklist = {
.reserved = BIT(1) | BIT(2),
};
@@ -1591,6 +1607,14 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
+ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
+ .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
+ .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ { USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
+ .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 6c3734d2b45a..d3ea90bef84d 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -80,6 +80,8 @@ static int whiteheat_firmware_download(struct usb_serial *serial,
static int whiteheat_firmware_attach(struct usb_serial *serial);
/* function prototypes for the Connect Tech WhiteHEAT serial converter */
+static int whiteheat_probe(struct usb_serial *serial,
+ const struct usb_device_id *id);
static int whiteheat_attach(struct usb_serial *serial);
static void whiteheat_release(struct usb_serial *serial);
static int whiteheat_port_probe(struct usb_serial_port *port);
@@ -116,6 +118,7 @@ static struct usb_serial_driver whiteheat_device = {
.description = "Connect Tech - WhiteHEAT",
.id_table = id_table_std,
.num_ports = 4,
+ .probe = whiteheat_probe,
.attach = whiteheat_attach,
.release = whiteheat_release,
.port_probe = whiteheat_port_probe,
@@ -217,6 +220,34 @@ static int whiteheat_firmware_attach(struct usb_serial *serial)
/*****************************************************************************
* Connect Tech's White Heat serial driver functions
*****************************************************************************/
+
+static int whiteheat_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+{
+ struct usb_host_interface *iface_desc;
+ struct usb_endpoint_descriptor *endpoint;
+ size_t num_bulk_in = 0;
+ size_t num_bulk_out = 0;
+ size_t min_num_bulk;
+ unsigned int i;
+
+ iface_desc = serial->interface->cur_altsetting;
+
+ for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
+ endpoint = &iface_desc->endpoint[i].desc;
+ if (usb_endpoint_is_bulk_in(endpoint))
+ ++num_bulk_in;
+ if (usb_endpoint_is_bulk_out(endpoint))
+ ++num_bulk_out;
+ }
+
+ min_num_bulk = COMMAND_PORT + 1;
+ if (num_bulk_in < min_num_bulk || num_bulk_out < min_num_bulk)
+ return -ENODEV;
+
+ return 0;
+}
+
static int whiteheat_attach(struct usb_serial *serial)
{
struct usb_serial_port *command_port;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7d137a43cc86..9eda69e40678 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -61,8 +61,7 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
enum {
VHOST_NET_FEATURES = VHOST_FEATURES |
(1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
- (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
- (1ULL << VIRTIO_F_VERSION_1),
+ (1ULL << VIRTIO_NET_F_MRG_RXBUF)
};
enum {
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index f114a9dbb48f..e25a23692822 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -166,9 +166,7 @@ enum {
/* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
enum {
VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
- (1ULL << VIRTIO_SCSI_F_T10_PI) |
- (1ULL << VIRTIO_F_ANY_LAYOUT) |
- (1ULL << VIRTIO_F_VERSION_1)
+ (1ULL << VIRTIO_SCSI_F_T10_PI)
};
#define VHOST_SCSI_MAX_TARGET 256
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index d9c501eaa6c3..f2882ac98726 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -277,10 +277,13 @@ static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
return -EFAULT;
return 0;
case VHOST_SET_FEATURES:
+ printk(KERN_ERR "1\n");
if (copy_from_user(&features, featurep, sizeof features))
return -EFAULT;
+ printk(KERN_ERR "2\n");
if (features & ~VHOST_FEATURES)
return -EOPNOTSUPP;
+ printk(KERN_ERR "3\n");
return vhost_test_set_features(n, features);
case VHOST_RESET_OWNER:
return vhost_test_reset_owner(n);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index ce6f6da4b09f..4772862b71a7 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -173,7 +173,9 @@ enum {
VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
(1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
(1ULL << VIRTIO_RING_F_EVENT_IDX) |
- (1ULL << VHOST_F_LOG_ALL),
+ (1ULL << VHOST_F_LOG_ALL) |
+ (1ULL << VIRTIO_F_ANY_LAYOUT) |
+ (1ULL << VIRTIO_F_VERSION_1)
};
static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index c68edc16aa54..79e1aa1b0959 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -817,8 +817,9 @@ config ITCO_WDT
tristate "Intel TCO Timer/Watchdog"
depends on (X86 || IA64) && PCI
select WATCHDOG_CORE
+ depends on I2C || I2C=n
select LPC_ICH if !EXPERT
- select I2C_I801 if !EXPERT
+ select I2C_I801 if !EXPERT && I2C
---help---
Hardware driver for the intel TCO timer based watchdog devices.
These drivers are included in the Intel 82801 I/O Controller
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index 66c3e656a616..8a5ce5b5a0b6 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -36,6 +36,13 @@
#define PM_RSTC_WRCFG_FULL_RESET 0x00000020
#define PM_RSTC_RESET 0x00000102
+/*
+ * The Raspberry Pi firmware uses the RSTS register to know which partiton
+ * to boot from. The partiton value is spread into bits 0, 2, 4, 6, 8, 10.
+ * Partiton 63 is a special partition used by the firmware to indicate halt.
+ */
+#define PM_RSTS_RASPBERRYPI_HALT 0x555
+
#define SECS_TO_WDOG_TICKS(x) ((x) << 16)
#define WDOG_TICKS_TO_SECS(x) ((x) >> 16)
@@ -151,8 +158,7 @@ static void bcm2835_power_off(void)
* hard reset.
*/
val = readl_relaxed(wdt->base + PM_RSTS);
- val &= PM_RSTC_WRCFG_CLR;
- val |= PM_PASSWORD | PM_RSTS_HADWRH_SET;
+ val |= PM_PASSWORD | PM_RSTS_RASPBERRYPI_HALT;
writel_relaxed(val, wdt->base + PM_RSTS);
/* Continue with normal reset mechanism */
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index cc1bdfc2ff71..006e2348022c 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -303,6 +303,7 @@ static const struct of_device_id gef_wdt_ids[] = {
},
{},
};
+MODULE_DEVICE_TABLE(of, gef_wdt_ids);
static struct platform_driver gef_wdt_driver = {
.driver = {
diff --git a/drivers/watchdog/mena21_wdt.c b/drivers/watchdog/mena21_wdt.c
index 69013007dc47..098fa9c34d6d 100644
--- a/drivers/watchdog/mena21_wdt.c
+++ b/drivers/watchdog/mena21_wdt.c
@@ -253,6 +253,7 @@ static const struct of_device_id a21_wdt_ids[] = {
{ .compatible = "men,a021-wdt" },
{ },
};
+MODULE_DEVICE_TABLE(of, a21_wdt_ids);
static struct platform_driver a21_wdt_driver = {
.probe = a21_wdt_probe,
diff --git a/drivers/watchdog/moxart_wdt.c b/drivers/watchdog/moxart_wdt.c
index 2789da2c0515..60b0605bd7e6 100644
--- a/drivers/watchdog/moxart_wdt.c
+++ b/drivers/watchdog/moxart_wdt.c
@@ -168,6 +168,7 @@ static const struct of_device_id moxart_watchdog_match[] = {
{ .compatible = "moxa,moxart-watchdog" },
{ },
};
+MODULE_DEVICE_TABLE(of, moxart_watchdog_match);
static struct platform_driver moxart_wdt_driver = {
.probe = moxart_wdt_probe,
OpenPOWER on IntegriCloud